diff --git a/.cargo/config.toml b/.cargo/config.toml index bd46659f7991a95d853711672a6a4eed9222c5a1..f113e9114acef51eaae6dd96666cc49781c8d41a 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,41 +1,7 @@ [build] rustdocflags = [ - "-Dwarnings", - "-Arustdoc::redundant_explicit_links", # stylistic -] - -# An auto defined `clippy` feature was introduced, -# but it was found to clash with user defined features, -# so was renamed to `cargo-clippy`. -# -# If you want standard clippy run: -# RUSTFLAGS= cargo clippy -[target.'cfg(feature = "cargo-clippy")'] -rustflags = [ - "-Aclippy::all", - "-Dclippy::correctness", - "-Aclippy::if-same-then-else", - "-Asuspicious_double_ref_op", - "-Dclippy::complexity", - "-Aclippy::zero-prefixed-literal", # 00_1000_000 - "-Aclippy::type_complexity", # raison d'etre - "-Aclippy::nonminimal-bool", # maybe - "-Aclippy::borrowed-box", # Reasonable to fix this one - "-Aclippy::too-many-arguments", # (Turning this on would lead to) - "-Aclippy::unnecessary_cast", # Types may change - "-Aclippy::identity-op", # One case where we do 0 + - "-Aclippy::useless_conversion", # Types may change - "-Aclippy::unit_arg", # styalistic. - "-Aclippy::option-map-unit-fn", # styalistic - "-Aclippy::bind_instead_of_map", # styalistic - "-Aclippy::erasing_op", # E.g. 0 * DOLLARS - "-Aclippy::eq_op", # In tests we test equality. - "-Aclippy::while_immutable_condition", # false positives - "-Aclippy::needless_option_as_deref", # false positives - "-Aclippy::derivable_impls", # false positives - "-Aclippy::stable_sort_primitive", # prefer stable sort - "-Aclippy::extra-unused-type-parameters", # stylistic - "-Aclippy::default_constructed_unit_structs", # stylistic + "-Dwarnings", + "-Arustdoc::redundant_explicit_links", # stylistic ] [env] diff --git a/.config/lychee.toml b/.config/lychee.toml index 9b2ae069931769251fc26b57686fd1dbc5ff0b83..72c1e66a4dfb046a744a066f47b3b5477fbdcf6e 100644 --- a/.config/lychee.toml +++ b/.config/lychee.toml @@ -15,36 +15,36 @@ accept = [ 200, # Rate limited - GitHub likes to throw this. - 429 + 429, ] -exclude_path = [ "./target" ] +exclude_path = ["./target"] exclude = [ - # Place holders (no need to fix these): - "http://visitme/", - "https://visitme/", - - # TODO - "https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs", - "https://docs.substrate.io/rustdocs/latest/sp_api/macro.decl_runtime_apis.html", - "https://github.com/ipfs/js-ipfs-bitswap/blob/", - "https://github.com/paritytech/polkadot-sdk/substrate/frame/timestamp", - "https://github.com/paritytech/substrate/frame/fast-unstake", - "https://github.com/zkcrypto/bls12_381/blob/e224ad4ea1babfc582ccd751c2bf128611d10936/src/test-data/mod.rs", - "https://polkadot.network/the-path-of-a-parachain-block/", - "https://research.web3.foundation/en/latest/polkadot/BABE/Babe/#6-practical-results", - "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html", - "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html#-6.-practical-results", - "https://research.web3.foundation/en/latest/polkadot/networking/3-avail-valid.html#topology", - "https://research.web3.foundation/en/latest/polkadot/NPoS/3.%20Balancing.html", - "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html", - "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html#inflation-model", - "https://research.web3.foundation/en/latest/polkadot/slashing/npos.html", - "https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model", - "https://rpc.polkadot.io/", - "https://w3f.github.io/parachain-implementers-guide/node/approval/approval-distribution.html", - "https://w3f.github.io/parachain-implementers-guide/node/index.html", - "https://w3f.github.io/parachain-implementers-guide/protocol-chain-selection.html", - "https://w3f.github.io/parachain-implementers-guide/runtime/session_info.html", + # Place holders (no need to fix these): + "http://visitme/", + "https://visitme/", + + # TODO + "https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs", + "https://docs.substrate.io/rustdocs/latest/sp_api/macro.decl_runtime_apis.html", + "https://github.com/ipfs/js-ipfs-bitswap/blob/", + "https://github.com/paritytech/polkadot-sdk/substrate/frame/timestamp", + "https://github.com/paritytech/substrate/frame/fast-unstake", + "https://github.com/zkcrypto/bls12_381/blob/e224ad4ea1babfc582ccd751c2bf128611d10936/src/test-data/mod.rs", + "https://polkadot.network/the-path-of-a-parachain-block/", + "https://research.web3.foundation/en/latest/polkadot/BABE/Babe/#6-practical-results", + "https://research.web3.foundation/en/latest/polkadot/NPoS/3.%20Balancing.html", + "https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model", + "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html", + "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html#-6.-practical-results", + "https://research.web3.foundation/en/latest/polkadot/networking/3-avail-valid.html#topology", + "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html", + "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html#inflation-model", + "https://research.web3.foundation/en/latest/polkadot/slashing/npos.html", + "https://rpc.polkadot.io/", + "https://w3f.github.io/parachain-implementers-guide/node/approval/approval-distribution.html", + "https://w3f.github.io/parachain-implementers-guide/node/index.html", + "https://w3f.github.io/parachain-implementers-guide/protocol-chain-selection.html", + "https://w3f.github.io/parachain-implementers-guide/runtime/session_info.html", ] diff --git a/.config/taplo.toml b/.config/taplo.toml new file mode 100644 index 0000000000000000000000000000000000000000..f5d0b7021ba898ea3ab96323fa3fbc4efdd7b307 --- /dev/null +++ b/.config/taplo.toml @@ -0,0 +1,33 @@ +# all options https://taplo.tamasfe.dev/configuration/formatter-options.html + +# ignore zombienet as they do some deliberate custom toml stuff +exclude = [ + "cumulus/zombienet/**", + "polkadot/node/malus/integrationtests/**", + "polkadot/zombienet_tests/**", + "substrate/zombienet/**", +] + +# global rules +[formatting] +reorder_arrays = true +inline_table_expand = false +array_auto_expand = false +array_auto_collapse = false +indent_string = " " # tab + +# don't re-order order-dependent deb package metadata +[[rule]] +include = ["polkadot/Cargo.toml"] +keys = ["package.metadata.deb"] + +[rule.formatting] +reorder_arrays = false + +# don't re-order order-dependent rustflags +[[rule]] +include = [".cargo/config.toml"] +keys = ["build"] + +[rule.formatting] +reorder_arrays = false diff --git a/.config/zepter.yaml b/.config/zepter.yaml index 33bf3a044cf8069f66fb0e6284078f0083c4e38f..f701392d16b15aab8351b730efa13f3abffe2406 100644 --- a/.config/zepter.yaml +++ b/.config/zepter.yaml @@ -19,18 +19,15 @@ workflows: '--left-side-outside-workspace=ignore', # Some features imply that they activate a specific dependency as non-optional. Otherwise the default behaviour with a `?` is used. '--feature-enables-dep=try-runtime:frame-try-runtime,runtime-benchmarks:frame-benchmarking', - # Actually modify the files and not just report the issues: + # Auxillary flags: '--offline', '--locked', '--show-path', '--quiet', ] - # Format the features into canonical format: - - ['format', 'features', '--offline', '--locked', '--quiet'] # Same as `check`, but with the `--fix` flag. default: - [ $check.0, '--fix' ] - - [ $check.1, '--fix' ] # Will be displayed when any workflow fails: help: diff --git a/.github/pr-custom-review.yml b/.github/pr-custom-review.yml deleted file mode 100644 index ac13d862a4ac2aee487de33de56fe34d6507c0dc..0000000000000000000000000000000000000000 --- a/.github/pr-custom-review.yml +++ /dev/null @@ -1,63 +0,0 @@ -# 🔒 PROTECTED: Changes to locks-review-team should be approved by the current locks-review-team -locks-review-team: locks-review -team-leads-team: polkadot-review -action-review-team: ci - -rules: - - name: CI files - check_type: changed_files - condition: - include: ^\.gitlab-ci\.yml|^docker/.*|^\.github/.*|^\.gitlab/.*|^\.config/nextest.toml|^\.cargo/.* - exclude: ^\.gitlab/pipeline/zombienet.* - min_approvals: 2 - teams: - - ci - - release-engineering - - - name: Core developers - check_type: changed_files - condition: - include: .* - # excluding files from 'Runtime files' and 'CI files' rules - exclude: ^polkadot/runtime/(kusama|polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/assets/(asset-hub-kusama|asset-hub-polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/bridge-hubs/(bridge-hub-kusama|bridge-hub-polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/collectives/collectives-polkadot/src/[^/]+\.rs$|^cumulus/parachains/common/src/[^/]+\.rs$|^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*))|^polkadot/runtime/(kusama|polkadot)/src/[^/]+\.rs$|^\.gitlab-ci\.yml|^docker/.*|^\.github/.*|^\.gitlab/.*|^\.config/nextest.toml|^\.cargo/.* - min_approvals: 2 - teams: - - core-devs - - # cumulus - - name: Runtime files cumulus - check_type: changed_files - condition: ^cumulus/parachains/runtimes/assets/(asset-hub-kusama|asset-hub-polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/bridge-hubs/(bridge-hub-kusama|bridge-hub-polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/collectives/collectives-polkadot/src/[^/]+\.rs$|^cumulus/parachains/common/src/[^/]+\.rs$ - all_distinct: - - min_approvals: 1 - teams: - - locks-review - - min_approvals: 1 - teams: - - polkadot-review - - # if there are any changes in the bridges subtree (in case of backport changes back to bridges repo) - - name: Bridges subtree files - check_type: changed_files - condition: ^bridges/.* - min_approvals: 1 - teams: - - bridges-core - - # substrate - - - name: FRAME coders substrate - check_type: changed_files - condition: - include: ^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*)) - all: - - min_approvals: 2 - teams: - - core-devs - - min_approvals: 1 - teams: - - frame-coders - -prevent-review-request: - teams: - - core-devs diff --git a/.github/review-bot.yml b/.github/review-bot.yml index b053ead37fb758c5513d2d2f340ed132ff24e29b..aa4ab8a69e02b409992581b34eda714b83e84ca0 100644 --- a/.github/review-bot.yml +++ b/.github/review-bot.yml @@ -1,31 +1,34 @@ rules: - name: CI files + countAuthor: true condition: - include: + include: - ^\.gitlab-ci\.yml - ^docker/.* - ^\.github/.* - ^\.gitlab/.* - ^\.config/nextest.toml - ^\.cargo/.* - exclude: - - ^./gitlab/pipeline/zombienet.* - minApprovals: 2 - type: basic - teams: - - ci - - release-engineering + exclude: + - ^\.gitlab/pipeline/zombienet.* + type: "or" + reviewers: + - minApprovals: 2 + teams: + - ci + - minApprovals: 2 + teams: + - core-devs - name: Audit rules type: basic condition: - include: - - ^polkadot/runtime\/(kusama|polkadot|common)\/.* + include: + - ^polkadot/runtime/common/.* - ^polkadot/primitives/src\/.+\.rs$ - ^substrate/primitives/.* - ^substrate/frame/.* - exclude: - - ^polkadot/runtime\/(kusama|polkadot)\/src\/weights\/.+\.rs$ + exclude: - ^substrate\/frame\/.+\.md$ minApprovals: 1 allowedToSkipRule: @@ -41,13 +44,8 @@ rules: - .* # excluding files from 'Runtime files' and 'CI files' rules exclude: - - ^polkadot/runtime/(kusama|polkadot)/src/[^/]+\.rs$ - - ^cumulus/parachains/runtimes/assets/(asset-hub-kusama|asset-hub-polkadot)/src/[^/]+\.rs$ - - ^cumulus/parachains/runtimes/bridge-hubs/(bridge-hub-kusama|bridge-hub-polkadot)/src/[^/]+\.rs$ - - ^cumulus/parachains/runtimes/collectives/collectives-polkadot/src/[^/]+\.rs$ - ^cumulus/parachains/common/src/[^/]+\.rs$ - ^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*)) - - ^polkadot/runtime/(kusama|polkadot)/src/[^/]+\.rs$ - ^\.gitlab-ci\.yml - ^docker/.* - ^\.github/.* @@ -59,28 +57,10 @@ rules: teams: - core-devs - # cumulus - - name: Runtime files cumulus - countAuthor: true - condition: - include: - - ^cumulus/parachains/runtimes/assets/(asset-hub-kusama|asset-hub-polkadot)/src/[^/]+\.rs$ - - ^cumulus/parachains/runtimes/bridge-hubs/(bridge-hub-kusama|bridge-hub-polkadot)/src/[^/]+\.rs$ - - ^cumulus/parachains/runtimes/collectives/collectives-polkadot/src/[^/]+\.rs$ - - ^cumulus/parachains/common/src/[^/]+\.rs$ - type: and-distinct - reviewers: - - minApprovals: 1 - teams: - - locks-review - - minApprovals: 1 - teams: - - polkadot-review - # if there are any changes in the bridges subtree (in case of backport changes back to bridges repo) - name: Bridges subtree files type: basic - condition: + condition: include: - ^bridges/.* minApprovals: 1 @@ -88,10 +68,9 @@ rules: - bridges-core # substrate - - name: FRAME coders substrate condition: - include: + include: - ^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*)) type: "and" reviewers: @@ -104,8 +83,9 @@ rules: # Protection of THIS file - name: Review Bot + countAuthor: true condition: - include: + include: - review-bot\.yml type: "and" reviewers: @@ -115,9 +95,6 @@ rules: - minApprovals: 1 teams: - locks-review - - minApprovals: 1 - teams: - - ci preventReviewRequests: teams: diff --git a/.github/runtime_specs/rococo.json b/.github/runtime_specs/rococo.json new file mode 100644 index 0000000000000000000000000000000000000000..6568b06400c8dab64a397b0b1fbd6d6fb72c2f7a --- /dev/null +++ b/.github/runtime_specs/rococo.json @@ -0,0 +1,17 @@ +{ + "pallets": { + "1": { + "constants": { + "EpochDuration": { + "value": [ 88, 2, 0, 0, 0, 0, 0, 0 ]} + } + }, + + "2": { + "constants": { + "MinimumPeriod": { + "value": [ 184, 11, 0, 0, 0, 0, 0, 0 ]} + } + } + } + } diff --git a/.github/runtime_specs/westend.json b/.github/runtime_specs/westend.json new file mode 100644 index 0000000000000000000000000000000000000000..6568b06400c8dab64a397b0b1fbd6d6fb72c2f7a --- /dev/null +++ b/.github/runtime_specs/westend.json @@ -0,0 +1,17 @@ +{ + "pallets": { + "1": { + "constants": { + "EpochDuration": { + "value": [ 88, 2, 0, 0, 0, 0, 0, 0 ]} + } + }, + + "2": { + "constants": { + "MinimumPeriod": { + "value": [ 184, 11, 0, 0, 0, 0, 0, 0 ]} + } + } + } + } diff --git a/.github/scripts/check-runtime.py b/.github/scripts/check-runtime.py new file mode 100755 index 0000000000000000000000000000000000000000..9f3d047e01f8619364e20f6c83296e6b1a196f06 --- /dev/null +++ b/.github/scripts/check-runtime.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python3 + +import json +import sys +import logging +import os + + +def check_constant(spec_pallet_id, spec_pallet_value, meta_constant): + """ + Check a single constant + + :param spec_pallet_id: + :param spec_pallet_value: + :param meta_constant: + :return: + """ + if meta_constant['name'] == list(spec_pallet_value.keys())[0]: + constant = meta_constant['name'] + res = list(spec_pallet_value.values())[0]["value"] == meta_constant["value"] + + logging.debug(f" Checking pallet:{spec_pallet_id}/constants/{constant}") + logging.debug(f" spec_pallet_value: {spec_pallet_value}") + logging.debug(f" meta_constant: {meta_constant}") + logging.info(f"pallet:{spec_pallet_id}/constants/{constant} -> {res}") + return res + else: + # logging.warning(f" Skipping pallet:{spec_pallet_id}/constants/{meta_constant['name']}") + pass + + +def check_pallet(metadata, spec_pallet): + """ + Check one pallet + + :param metadata: + :param spec_pallet_id: + :param spec_pallet_value: + :return: + """ + + spec_pallet_id, spec_pallet_value = spec_pallet + logging.debug(f"Pallet: {spec_pallet_id}") + + metadata_pallets = metadata["pallets"] + metadata_pallet = metadata_pallets[spec_pallet_id] + + res = map(lambda meta_constant_value: check_constant( + spec_pallet_id, spec_pallet_value["constants"], meta_constant_value), + metadata_pallet["constants"].values()) + res = list(filter(lambda item: item is not None, res)) + return all(res) + + +def check_pallets(metadata, specs): + """ + CHeck all pallets + + :param metadata: + :param specs: + :return: + """ + + res = list(map(lambda spec_pallet: check_pallet(metadata, spec_pallet), + specs['pallets'].items())) + res = list(filter(lambda item: item is not None, res)) + return all(res) + + +def check_metadata(metadata, specs): + """ + Check metadata (json) against a list of expectations + + :param metadata: Metadata in JSON format + :param expectation: Expectations + :return: Bool + """ + + res = check_pallets(metadata, specs) + return res + + +def help(): + """ Show some simple help """ + + print(f"You must pass 2 args, you passed {len(sys.argv) - 1}") + print("Sample call:") + print("check-runtime.py ") + + +def load_json(file): + """ Load json from a file """ + + f = open(file) + return json.load(f) + + +def main(): + LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper() + logging.basicConfig(level=LOGLEVEL) + + if len(sys.argv) != 3: + help() + exit(1) + + metadata_file = sys.argv[1] + specs_file = sys.argv[2] + print(f"Checking metadata from: {metadata_file} with specs from: {specs_file}") + + metadata = load_json(metadata_file) + specs = load_json(specs_file) + + res = check_metadata(metadata, specs) + + if res: + logging.info(f"OK") + exit(0) + else: + print("") + logging.info(f"Some errors were found, run again with LOGLEVEL=debug") + exit(1) + +if __name__ == "__main__": + main() diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index b0f9cb32063a43cda58d2e6e8f25e0ff66a134eb..bd12d9c6e6ff773f8513189a381d725243e53eb5 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -202,21 +202,26 @@ fetch_release_artifacts() { echo "Release ID : $RELEASE_ID" echo "Repo : $REPO" echo "Binary : $BINARY" + OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"} + echo "OUTPUT_DIR : $OUTPUT_DIR" + echo "Fetching release info..." curl -L -s \ -H "Accept: application/vnd.github+json" \ -H "Authorization: Bearer ${GITHUB_TOKEN}" \ -H "X-GitHub-Api-Version: 2022-11-28" \ https://api.github.com/repos/${REPO}/releases/${RELEASE_ID} > release.json - # Get Asset ids + echo "Extract asset ids..." ids=($(jq -r '.assets[].id' < release.json )) + echo "Extract asset count..." count=$(jq '.assets|length' < release.json ) # Fetch artifacts - mkdir -p "./release-artifacts/${BINARY}" - pushd "./release-artifacts/${BINARY}" > /dev/null + mkdir -p "$OUTPUT_DIR" + pushd "$OUTPUT_DIR" > /dev/null + echo "Fetching assets..." iter=1 for id in "${ids[@]}" do @@ -264,3 +269,78 @@ function check_gpg() { echo "Checking GPG Signature for $1" gpg --no-tty --verify -q $1.asc $1 } + +# GITHUB_REF will typically be like: +# - refs/heads/release-v1.2.3 +# - refs/heads/release-polkadot-v1.2.3-rc2 +# This function extracts the version +function get_version_from_ghref() { + GITHUB_REF=$1 + stripped=${GITHUB_REF#refs/heads/release-} + re="v([0-9]+\.[0-9]+\.[0-9]+)" + if [[ $stripped =~ $re ]]; then + echo ${BASH_REMATCH[0]}; + return 0 + else + return 1 + fi +} + +# Get latest rc tag based on the release version and product +function get_latest_rc_tag() { + version=$1 + product=$2 + + if [[ "$product" == "polkadot" ]]; then + last_rc=$(git tag -l "$version-rc*" | sort -V | tail -n 1) + elif [[ "$product" == "polkadot-parachain" ]]; then + last_rc=$(git tag -l "polkadot-parachains-$version-rc*" | sort -V | tail -n 1) + fi + echo "${last_rc}" +} + +# Increment rc tag number based on the value of a suffix of the current rc tag +function increment_rc_tag() { + last_rc=$1 + + suffix=$(echo "$last_rc" | grep -Eo '[0-9]+$') + ((suffix++)) + echo $suffix +} + +function relative_parent() { + echo "$1" | sed -E 's/(.*)\/(.*)\/\.\./\1/g' +} + +# Find all the runtimes, it returns the result as JSON object, compatible to be +# used as Github Workflow Matrix. This call is exposed by the `scan` command and can be used as: +# podman run --rm -it -v /.../fellowship-runtimes:/build docker.io/chevdor/srtool:1.70.0-0.11.1 scan +function find_runtimes() { + libs=($(git grep -I -r --cached --max-depth 20 --files-with-matches 'construct_runtime!' -- '*lib.rs')) + re=".*-runtime$" + JSON=$(jq --null-input '{ "include": [] }') + + # EXCLUDED_RUNTIMES is a space separated list of runtime names (without the -runtime postfix) + # EXCLUDED_RUNTIMES=${EXCLUDED_RUNTIMES:-"substrate-test"} + IFS=' ' read -r -a exclusions <<< "$EXCLUDED_RUNTIMES" + + for lib in "${libs[@]}"; do + crate_dir=$(dirname "$lib") + cargo_toml="$crate_dir/../Cargo.toml" + + name=$(toml get -r $cargo_toml 'package.name') + chain=${name//-runtime/} + + if [[ "$name" =~ $re ]] && ! [[ ${exclusions[@]} =~ $chain ]]; then + lib_dir=$(dirname "$lib") + runtime_dir=$(relative_parent "$lib_dir/..") + ITEM=$(jq --null-input \ + --arg chain "$chain" \ + --arg name "$name" \ + --arg runtime_dir "$runtime_dir" \ + '{ "chain": $chain, "crate": $name, "runtime_dir": $runtime_dir }') + JSON=$(echo $JSON | jq ".include += [$ITEM]") + fi + done + echo $JSON +} diff --git a/.github/workflows/build-and-attach-release-runtimes.yml b/.github/workflows/build-and-attach-release-runtimes.yml index c7cd4b34384a8f96cb23202b73baf5c43f2792be..f7003379cf0cd2b2f34101c94e8365eccdcdc704 100644 --- a/.github/workflows/build-and-attach-release-runtimes.yml +++ b/.github/workflows/build-and-attach-release-runtimes.yml @@ -3,7 +3,7 @@ name: Build and Attach Runtimes to Releases/RC on: release: types: - - created + - published env: PROFILE: production @@ -19,9 +19,11 @@ jobs: - { name: asset-hub-westend, package: asset-hub-westend-runtime, path: cumulus/parachains/runtimes/assets/asset-hub-westend } - { name: bridge-hub-rococo, package: bridge-hub-rococo-runtime, path: cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo } - { name: contracts-rococo, package: contracts-rococo-runtime, path: cumulus/parachains/runtimes/contracts/contracts-rococo } + - { name: collectives-westend, package: collectives-westend-runtime, path: cumulus/parachains/runtimes/collectives/collectives-westend } + - { name: glutton-westend, package: glutton-westend-runtime, path: cumulus/parachains/runtimes/glutton/glutton-westend } build_config: # Release build has logging disabled and no dev features - - { type: on-chain-release, opts: --features on-chain-release-build } + - { type: on-chain-release, opts: --features on-chain-release-build } # Debug build has logging enabled and developer features - { type: dev-debug-build, opts: --features try-runtime } @@ -29,11 +31,11 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Build ${{ matrix.runtime.name }} ${{ matrix.build_config.type }} id: srtool_build - uses: chevdor/srtool-actions@v0.9.0 + uses: chevdor/srtool-actions@v0.9.1 env: BUILD_OPTS: ${{ matrix.build_config.opts }} with: @@ -42,12 +44,6 @@ jobs: runtime_dir: ${{ matrix.runtime.path }} profile: ${{ env.PROFILE }} - - name: Build Summary - run: | - echo "${{ steps.srtool_build.outputs.json }}" | jq . > ${{ matrix.runtime.name }}-srtool-digest.json - cat ${{ matrix.runtime.name }}-srtool-digest.json - echo "Runtime location: ${{ steps.srtool_build.outputs.wasm }}" - - name: Set up paths and runtime names id: setup run: | diff --git a/.github/workflows/check-labels.yml b/.github/workflows/check-labels.yml index 83b52e82313257affaea41eb66da44b164544116..97562f0da09569931582864bd764e6724900d619 100644 --- a/.github/workflows/check-labels.yml +++ b/.github/workflows/check-labels.yml @@ -3,11 +3,15 @@ name: Check labels on: pull_request: types: [labeled, opened, synchronize, unlabeled] + merge_group: jobs: check-labels: runs-on: ubuntu-latest steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Pull image env: IMAGE: paritytech/ruled_labels:0.4.0 diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index 50dd10a6d3c0ae9a11d548222e2f952468606e3f..e1e92d288ceae235d23fa36c31d592092fe8b0ba 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -2,20 +2,21 @@ name: Check licenses on: pull_request: + merge_group: permissions: packages: read jobs: check-licenses: - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest env: LICENSES: "'Apache-2.0' 'GPL-3.0-only' 'GPL-3.0-or-later WITH Classpath-exception-2.0'" NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - name: Checkout sources uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: actions/setup-node@v4.0.0 + - uses: actions/setup-node@v4.0.1 with: node-version: "18.x" registry-url: "https://npm.pkg.github.com" diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 3ed6ba84b82f14e2b20cb4ab4553bcef4a11283d..0932d38c9adda4e170745deaecb0cb18bec67ed8 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -8,6 +8,7 @@ on: - ".github/workflows/check-links.yml" - ".config/lychee.toml" types: [opened, synchronize, reopened, ready_for_review] + merge_group: permissions: packages: read diff --git a/.github/workflows/check-markdown.yml b/.github/workflows/check-markdown.yml index 05b5d898d67122aec7c1ed05df20dac0cd4cb9c9..dc02970a1733ddef89dfacf308bdedd3d746042a 100644 --- a/.github/workflows/check-markdown.yml +++ b/.github/workflows/check-markdown.yml @@ -3,6 +3,7 @@ name: Check Markdown on: pull_request: types: [opened, synchronize, reopened, ready_for_review] + merge_group: permissions: packages: read @@ -15,7 +16,7 @@ jobs: - name: Checkout sources uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: actions/setup-node@v4.0.0 + - uses: actions/setup-node@v4.0.1 with: node-version: "18.x" registry-url: "https://npm.pkg.github.com" @@ -23,8 +24,8 @@ jobs: - name: Install tooling run: | - npm install -g markdownlint-cli - markdownlint --version + npm install -g markdownlint-cli + markdownlint --version - name: Check Markdown env: diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index 690f7a3f1333366dea41944b5ae81bc74f12b761..f47404744a49b86735b584e5c0f84bda3fe3078e 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -3,25 +3,30 @@ name: Check PRdoc on: pull_request: types: [labeled, opened, synchronize, unlabeled] + merge_group: env: - IMAGE: paritytech/prdoc:v0.0.5 + IMAGE: docker.io/paritytech/prdoc:v0.0.7 API_BASE: https://api.github.com/repos REPO: ${{ github.repository }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_PR: ${{ github.event.pull_request.number }} - MOUNT: /prdoc ENGINE: docker + PRDOC_DOC: https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/prdoc.md jobs: check-prdoc: runs-on: ubuntu-latest steps: + # we cannot show the version in this step (ie before checking out the repo) + # due to https://github.com/paritytech/prdoc/issues/15 + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Pull image run: | echo "Pulling $IMAGE" - docker pull $IMAGE - docker run --rm $IMAGE --version + $ENGINE pull $IMAGE - name: Check if PRdoc is required id: get-labels @@ -32,18 +37,29 @@ jobs: echo "Labels: ${labels}" echo "labels=${labels}" >> "$GITHUB_OUTPUT" - - name: No PRdoc required + - name: Checkout repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1 + + - name: Check PRDoc version + run: | + $ENGINE run --rm -v $PWD:/repo $IMAGE --version + + - name: Early exit if PR is silent if: ${{ contains(steps.get-labels.outputs.labels, 'R0') }} run: | - echo "PR detected as silent, no PRdoc is required, exiting..." + hits=$(find prdoc -name "pr_$GITHUB_PR*.prdoc" | wc -l) + if (( hits > 0 )); then + echo "PR detected as silent, but a PRDoc was found, checking it as information" + $ENGINE run --rm -v $PWD:/repo $IMAGE check -n ${GITHUB_PR} || echo "Ignoring failure" + else + echo "PR detected as silent, no PRDoc found, exiting..." + fi + echo "If you want to add a PRDoc, please refer to $PRDOC_DOC" exit 0 - - name: Checkout repo - if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1 - - name: PRdoc check for PR#${{ github.event.pull_request.number }} if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} run: | - echo "Checking for PR#${GITHUB_PR} in $MOUNT" - $ENGINE run --rm -v $PWD/prdoc:/doc $IMAGE check -n ${GITHUB_PR} || true + echo "Checking for PR#${GITHUB_PR}" + echo "You can find more information about PRDoc at $PRDOC_DOC" + $ENGINE run --rm -v $PWD:/repo $IMAGE check -n ${GITHUB_PR} diff --git a/.github/workflows/check-publish.yml b/.github/workflows/check-publish.yml index 9ab47dba51b1ca93829dc43a1494c78e47324559..1941bd9816757210b0d9f238346acb71c54b9a48 100644 --- a/.github/workflows/check-publish.yml +++ b/.github/workflows/check-publish.yml @@ -6,15 +6,13 @@ on: - master pull_request: types: [opened, synchronize, reopened, ready_for_review] + merge_group: jobs: check-publish: - strategy: - matrix: - os: ["ubuntu-latest"] - runs-on: ${{ matrix.os }} + runs-on: ubuntu-latest steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Rust Cache uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 @@ -22,7 +20,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish --profile dev + run: cargo install parity-publish@0.3.0 - name: parity-publish check run: parity-publish check --allow-unpublished diff --git a/.github/workflows/check-runtimes.yml b/.github/workflows/check-runtimes.yml new file mode 100644 index 0000000000000000000000000000000000000000..0e5ad104766a89aaa678cc5436475d95e3ab76fd --- /dev/null +++ b/.github/workflows/check-runtimes.yml @@ -0,0 +1,94 @@ +name: Check Runtimes Specs +# This GH Workflow fetches the runtimes available in a release. +# It then compares their metadata with reference specs located under +# .github/runtime_specs. + +on: + workflow_dispatch: + inputs: + release_id: + description: | + Release ID. + You can find it using the command: + curl -s \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/paritytech/polkadot-sdk/releases | \ + jq '.[] | { name: .name, id: .id }' + required: true + type: string + + # This trigger unfortunately does not work as expected. + # https://github.com/orgs/community/discussions/47794 + # release: + # types: [edited] + +env: + RUNTIME_SPECS_DIR: .github/runtime_specs + DATA_DIR: runtimes + RELEASE_ID: ${{ inputs.release_id }} + REPO: ${{ github.repository }} + +jobs: + find-specs: + name: Fetch runtime specs + outputs: + specs: ${{ steps.get-list.outputs.specs }} + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Get list + id: get-list + run: | + lst=$(ls $RUNTIME_SPECS_DIR/*.json | xargs -I{} basename "{}" .json | jq -R .| jq -sc .) + echo "Found: $lst" + echo "specs=$lst" >> $GITHUB_OUTPUT + + check-runtimes: + name: Check runtime specs + runs-on: ubuntu-latest + needs: + - find-specs + + strategy: + matrix: + specs: ${{ fromJSON(needs.find-specs.outputs.specs) }} + + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Fetch release artifacts based on release id + env: + OUTPUT_DIR: ${{ env.DATA_DIR }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + . ./.github/scripts/common/lib.sh + fetch_release_artifacts + + - name: Install tooling + env: + SUBWASM_VERSION: v0.20.0 + DL_BASE_URL: https://github.com/chevdor/subwasm/releases/download + run: | + wget $DL_BASE_URL/$SUBWASM_VERSION/subwasm_linux_amd64_$SUBWASM_VERSION.deb \ + -O subwasm.deb + sudo dpkg -i subwasm.deb + subwasm --version + + - name: Extract metadata JSON for ${{ matrix.specs }} + env: + RUNTIME: ${{ matrix.specs }} + run: | + WASM=$(ls ${DATA_DIR}/${RUNTIME}*.wasm) + echo "WASM=$WASM" + subwasm show --json "$WASM" > "${DATA_DIR}/${RUNTIME}.json" + + - name: Check specs for ${{ matrix.specs }} + id: build + env: + RUNTIME: ${{ matrix.specs }} + LOGLEVEL: info + run: | + python --version + .github/scripts/check-runtime.py "${DATA_DIR}/${RUNTIME}.json" "${RUNTIME_SPECS_DIR}/${RUNTIME}.json" diff --git a/.github/workflows/claim-crates.yml b/.github/workflows/claim-crates.yml new file mode 100644 index 0000000000000000000000000000000000000000..9e272266201837fae0ab875186adc89a286e2599 --- /dev/null +++ b/.github/workflows/claim-crates.yml @@ -0,0 +1,26 @@ +name: Claim Crates + +on: + push: + branches: + - master + +jobs: + claim-crates: + runs-on: ubuntu-latest + environment: master + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Rust Cache + uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 + with: + cache-on-failure: true + + - name: install parity-publish + run: cargo install parity-publish@0.3.0 + + - name: parity-publish claim + env: + PARITY_PUBLISH_CRATESIO_TOKEN: ${{ secrets.CRATESIO_PUBLISH_CLAIM_TOKEN }} + run: parity-publish claim diff --git a/.github/workflows/fmt-check.yml b/.github/workflows/fmt-check.yml index 7ca4413bb0503093e1d7d34c08749d56cf832512..99ac5120097d1d888b0c9207621433cc93a950c2 100644 --- a/.github/workflows/fmt-check.yml +++ b/.github/workflows/fmt-check.yml @@ -6,6 +6,7 @@ on: - master pull_request: types: [opened, synchronize, reopened, ready_for_review] + merge_group: jobs: quick_check: @@ -14,7 +15,7 @@ jobs: os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} container: - image: paritytech/ci-unified:bullseye-1.73.0-2023-11-01-v20231025 + image: paritytech/ci-unified:bullseye-1.74.0-2023-11-01-v20231204 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 diff --git a/.github/workflows/gitspiegel-trigger.yml b/.github/workflows/gitspiegel-trigger.yml index dce3aaf2feca59b26283c1a600440483eabfb892..b338f7a3f6254b9db628f8b2b45c88b8094ef390 100644 --- a/.github/workflows/gitspiegel-trigger.yml +++ b/.github/workflows/gitspiegel-trigger.yml @@ -13,6 +13,18 @@ on: - unlocked - ready_for_review - reopened + # the job doesn't check out any code, so it is relatively safe to run it on any event + pull_request_target: + types: + - opened + - synchronize + - unlocked + - ready_for_review + - reopened + merge_group: + +# drop all permissions for GITHUB_TOKEN +permissions: {} jobs: sync: diff --git a/.github/workflows/merge-queue.yml b/.github/workflows/merge-queue.yml new file mode 100644 index 0000000000000000000000000000000000000000..cce326f44931bed8357fb661c8b053f60205f119 --- /dev/null +++ b/.github/workflows/merge-queue.yml @@ -0,0 +1,24 @@ +name: Merge-Queue + +on: + merge_group: + +jobs: + trigger-merge-queue-action: + runs-on: ubuntu-latest + environment: merge-queues + steps: + - name: Generate token + id: app_token + uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 + with: + app_id: ${{ secrets.REVIEW_APP_ID }} + private_key: ${{ secrets.REVIEW_APP_KEY }} + - name: Add Merge Queue status check + uses: billyjbryant/create-status-check@3e6fa0ac599d10d9588cf9516ca4330ef669b858 # v2 + with: + authToken: ${{ steps.app_token.outputs.token }} + context: 'review-bot' + description: 'PRs for merge queue gets approved' + state: 'success' + sha: ${{ github.event.merge_group.head_commit.id }} diff --git a/.github/workflows/notif-burnin-label.yml b/.github/workflows/notif-burnin-label.yml new file mode 100644 index 0000000000000000000000000000000000000000..b630cd07440f925e603c920f337d75c48d871b05 --- /dev/null +++ b/.github/workflows/notif-burnin-label.yml @@ -0,0 +1,24 @@ +name: Notify DevOps when burn-in label applied +on: + pull_request: + types: [labeled] + +jobs: + notify-devops: + runs-on: ubuntu-latest + strategy: + matrix: + channel: + - name: 'Team: DevOps' + room: '!lUslSijLMgNcEKcAiE:parity.io' + + steps: + - name: Send Matrix message to ${{ matrix.channel.name }} + if: startsWith(github.event.label.name, 'A0-') + uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3 + with: + room_id: ${{ matrix.channel.room }} + access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} + server: m.parity.io + message: | + @room Burn-in request received for [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }}) diff --git a/.github/workflows/pr-custom-review.yml b/.github/workflows/pr-custom-review.yml deleted file mode 100644 index b15d20c696fe83fd5c19cf834328458c8a5c6c3c..0000000000000000000000000000000000000000 --- a/.github/workflows/pr-custom-review.yml +++ /dev/null @@ -1,42 +0,0 @@ -name: Assign reviewers - -on: - pull_request: - branches: - - master - - main - types: - - opened - - reopened - - synchronize - - review_requested - - review_request_removed - - ready_for_review - - converted_to_draft - pull_request_review: - -jobs: - pr-custom-review: - runs-on: ubuntu-latest - steps: - - name: Skip if pull request is in Draft - # `if: github.event.pull_request.draft == true` should be kept here, at - # the step level, rather than at the job level. The latter is not - # recommended because when the PR is moved from "Draft" to "Ready to - # review" the workflow will immediately be passing (since it was skipped), - # even though it hasn't actually ran, since it takes a few seconds for - # the workflow to start. This is also disclosed in: - # https://github.community/t/dont-run-actions-on-draft-pull-requests/16817/17 - # That scenario would open an opportunity for the check to be bypassed: - # 1. Get your PR approved - # 2. Move it to Draft - # 3. Push whatever commits you want - # 4. Move it to "Ready for review"; now the workflow is passing (it was - # skipped) and "Check reviews" is also passing (it won't be updated - # until the workflow is finished) - if: github.event.pull_request.draft == true - run: exit 1 - - name: pr-custom-review - uses: paritytech/pr-custom-review@master - with: - checks-reviews-api: http://pcr.parity-prod.parity.io/api/v1/check_reviews diff --git a/.github/workflows/release-10_rc-automation.yml b/.github/workflows/release-10_rc-automation.yml new file mode 100644 index 0000000000000000000000000000000000000000..7231a8b75886d04ce18bb89fcef99029e3ab14c6 --- /dev/null +++ b/.github/workflows/release-10_rc-automation.yml @@ -0,0 +1,113 @@ +name: Release - RC automation +on: + push: + branches: + # Catches release-polkadot-v1.2.3, release-v1.2.3-rc1, etc + - release-v[0-9]+.[0-9]+.[0-9]+* + - release-cumulus-v[0-9]+* + - release-polkadot-v[0-9]+* + + workflow_dispatch: + +jobs: + tag_rc: + runs-on: ubuntu-latest + strategy: + matrix: + channel: + - name: "RelEng: Polkadot Release Coordination" + room: '!cqAmzdIcbOFwrdrubV:parity.io' + environment: release + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + + - name: Get release product + id: get_rel_product + shell: bash + run: | + current_branch=$(git branch --show-current) + echo "Current branch: $current_branch" + if [[ "$current_branch" =~ "release-polkadot" ]]; then + echo "product=polkadot" >> $GITHUB_OUTPUT + elif [[ "$current_branch" =~ "release-cumulus" ]]; then + echo "product=polkadot-parachain" >> $GITHUB_OUTPUT + fi + + + - name: Compute next rc tag for polkadot + if: ${{ steps.get_rel_product.outputs.product == 'polkadot' }} + id: compute_tag_polkadot + shell: bash + run: | + . ./.github/scripts/common/lib.sh + + # Get last rc tag if exists, else set it to {version}-rc1 + version=$(get_version_from_ghref ${GITHUB_REF}) + echo "$version" + echo "version=$version" >> $GITHUB_OUTPUT + + last_rc=$(get_latest_rc_tag $version polkadot) + + if [ -n "$last_rc" ]; then + suffix=$(increment_rc_tag $last_rc) + echo "new_tag=$version-rc$suffix" >> $GITHUB_OUTPUT + echo "first_rc=false" >> $GITHUB_OUTPUT + else + echo "new_tag=$version-rc1" >> $GITHUB_OUTPUT + echo "first_rc=true" >> $GITHUB_OUTPUT + fi + + - name: Compute next rc tag for polkadot-parachain + if: ${{ steps.get_rel_product.outputs.product == 'polkadot-parachain' }} + id: compute_tag_cumulus + shell: bash + run: | + . ./.github/scripts/common/lib.sh + + # Get last rc tag if exists, else set it to polkadot-parachains-{version}-rc1 + version=$(get_version_from_ghref ${GITHUB_REF}) + echo "$version" + echo "version=$version" >> $GITHUB_OUTPUT + + last_rc=$(get_latest_rc_tag $version polkadot-parachain) + if [ -n "$last_rc" ]; then + suffix=$(increment_rc_tag $last_rc) + echo "new_tag=polkadot-parachains-$version-rc$suffix" >> $GITHUB_OUTPUT + echo "first_rc=false" >> $GITHUB_OUTPUT + else + echo "new_tag=polkadot-parachain-$version-rc1" >> $GITHUB_OUTPUT + echo "first_rc=true" >> $GITHUB_OUTPUT + fi + + - name: Apply new tag + uses: tvdias/github-tagger@ed7350546e3e503b5e942dffd65bc8751a95e49d # v0.0.2 + with: + # We can't use the normal GITHUB_TOKEN for the following reason: + # https://docs.github.com/en/actions/reference/events-that-trigger-workflows#triggering-new-workflows-using-a-personal-access-token + # RELEASE_BRANCH_TOKEN requires public_repo OAuth scope + repo-token: "${{ secrets.RELEASE_BRANCH_TOKEN }}" + tag: ${{ steps.compute_tag_polkadot.outputs.new_tag || steps.compute_tag_cumulus.outputs.new_tag }} + + # - id: create-issue + # uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1 + # # Only create the issue if it's the first release candidate + # if: steps.compute_tag.outputs.first_rc == 'true' + # env: + # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # VERSION: ${{ steps.compute_tag.outputs.version }} + # with: + # filename: .github/ISSUE_TEMPLATE/release.md + + - name: Send Matrix message to ${{ matrix.channel.name }} + uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3 + # if: steps.create-issue.outputs.url != '' + with: + room_id: ${{ matrix.channel.room }} + access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} + server: m.parity.io + message: | + Release process for polkadot ${{ steps.compute_tag_polkadot.outputs.new_tag || steps.compute_tag_cumulus.outputs.new_tag }} has been started.
diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 891f43e605c051a63325deb0638b342a97e2377e..f74fb6a0ad1f9e2acad44e1802e2efd8edc61df1 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -220,6 +220,7 @@ jobs: runs-on: ubuntu-latest outputs: polkadot_apt_version: ${{ steps.fetch-latest-apt.outputs.polkadot_apt_version }} + polkadot_container_tag: ${{ steps.fetch-latest-apt.outputs.polkadot_container_tag }} container: image: paritytech/parity-keyring options: --user root @@ -230,7 +231,9 @@ jobs: apt update apt show polkadot version=$(apt show polkadot 2>/dev/null | grep "Version:" | awk '{print $2}') + tag=$(echo $version | sed 's/-.*//') echo "polkadot_apt_version=v$version" >> $GITHUB_OUTPUT + echo "polkadot_container_tag=v$tag" >> $GITHUB_OUTPUT echo "You passed ${{ inputs.version }} but this is ignored" echo "We use the version from the Debian Package: $version" @@ -268,7 +271,7 @@ jobs: - name: Build and push id: docker_build - uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 + uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 with: push: true file: docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile @@ -276,7 +279,7 @@ jobs: # TODO: It would be good to get rid of this GHA that we don't really need. tags: | parity/polkadot:latest - parity/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_apt_version }} + parity/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} build-args: | VCS_REF=${{ github.ref }} POLKADOT_VERSION=${{ needs.fetch-latest-debian-package-version.outputs.polkadot_apt_version }} diff --git a/.github/workflows/release-99_notif-published.yml b/.github/workflows/release-99_notif-published.yml new file mode 100644 index 0000000000000000000000000000000000000000..b35120ca4e128beaa37047b0ac3f21b02f4da663 --- /dev/null +++ b/.github/workflows/release-99_notif-published.yml @@ -0,0 +1,61 @@ +name: Release - Announce release to Matrix rooms +on: + release: + types: + - published + - prereleased + +jobs: + ping_matrix: + runs-on: ubuntu-latest + strategy: + matrix: + channel: + # Internal + - name: 'RelEng: Cumulus Release Coordination' + room: '!NAEMyPAHWOiOQHsvus:parity.io' + pre-releases: true + - name: "RelEng: Polkadot Release Coordination" + room: '!cqAmzdIcbOFwrdrubV:parity.io' + pre-release: true + - name: 'General: Rust, Polkadot, Substrate' + room: '!aJymqQYtCjjqImFLSb:parity.io' + pre-release: false + - name: 'Team: DevOps' + room: '!lUslSijLMgNcEKcAiE:parity.io' + pre-release: true + + # External + - name: 'Ledger <> Polkadot Coordination' + room: '!EoIhaKfGPmFOBrNSHT:web3.foundation' + pre-release: true + + # Public + # - name: '#KusamaValidatorLounge:polkadot.builders' + # room: '!LhjZccBOqFNYKLdmbb:polkadot.builders' + # pre-releases: false + # - name: '#kusama-announcements:matrix.parity.io' + # room: '!FMwxpQnYhRCNDRsYGI:matrix.parity.io' + # pre-release: false + # - name: '#polkadotvalidatorlounge:web3.foundation' + # room: '!NZrbtteFeqYKCUGQtr:matrix.parity.io' + # pre-release: false + # - name: '#polkadot-announcements:matrix.parity.io' + # room: '!UqHPWiCBGZWxrmYBkF:matrix.parity.io' + # pre-release: false + + steps: + - name: Matrix notification to ${{ matrix.channel.name }} + if: github.event.release.prerelease == false || matrix.channel.pre-release + uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3 + with: + room_id: ${{ matrix.channel.room }} + access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} + server: m.parity.io + message: | + A (pre)release has been ${{github.event.action}} in **${{github.event.repository.full_name}}:**
+ Release version: [${{github.event.release.tag_name}}](${{github.event.release.html_url}}) + + ----- + + ${{github.event.release.body}} diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml index 5970989cde09374743bde8ac8890eae192527cec..0a7e80f007c5b643ce183fdca85d91c57b61f53f 100644 --- a/.github/workflows/review-bot.yml +++ b/.github/workflows/review-bot.yml @@ -23,7 +23,7 @@ jobs: app_id: ${{ secrets.REVIEW_APP_ID }} private_key: ${{ secrets.REVIEW_APP_KEY }} - name: "Evaluates PR reviews and assigns reviewers" - uses: paritytech/review-bot@v2.2.0 + uses: paritytech/review-bot@v2.3.0 with: repo-token: ${{ steps.app_token.outputs.token }} team-token: ${{ steps.app_token.outputs.token }} diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 1ae6b79ffbdac065ffda764b0b3fe4e8a3228c8e..e5fcb434fd360bd229cbc9e18a5588c24afac2fb 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -1,6 +1,6 @@ name: Review-Trigger -on: +on: pull_request_target: types: - opened @@ -10,13 +10,18 @@ on: - review_request_removed - ready_for_review pull_request_review: + merge_group: jobs: trigger-review-bot: - if: github.event.pull_request.draft != true + # (It is not a draft) && (it is not a review || it is an approving review) + if: ${{ github.event.pull_request.draft != true && (github.event_name != 'pull_request_review' || (github.event.review && github.event.review.state == 'APPROVED')) }} runs-on: ubuntu-latest name: trigger review bot steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Get PR number env: PR_NUMBER: ${{ github.event.pull_request.number }} diff --git a/.github/workflows/srtool.yml b/.github/workflows/srtool.yml new file mode 100644 index 0000000000000000000000000000000000000000..89659399fc64b643aca6adc5c4d555d5962b8746 --- /dev/null +++ b/.github/workflows/srtool.yml @@ -0,0 +1,135 @@ +name: Srtool build + +env: + SUBWASM_VERSION: 0.20.0 + TOML_CLI_VERSION: 0.2.4 + +on: + push: + tags: + - "*" + branches: + - release-v[0-9]+.[0-9]+.[0-9]+* + - release-cumulus-v[0-9]+* + - release-polkadot-v[0-9]+* + + schedule: + - cron: "00 02 * * 1" # 2AM weekly on monday + + workflow_dispatch: + +jobs: + find-runtimes: + name: Scan repo paritytech/polkadot-sdk + outputs: + runtime: ${{ steps.get_runtimes_list.outputs.runtime }} + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + with: + fetch-depth: 0 + + - name: Install tooling + run: | + URL=https://github.com/chevdor/toml-cli/releases/download/v${{ env.TOML_CLI_VERSION }}/toml_linux_amd64_v${{ env.TOML_CLI_VERSION }}.deb + curl -L $URL --output toml.deb + sudo dpkg -i toml.deb + toml --version; jq --version + + - name: Scan runtimes + env: + EXCLUDED_RUNTIMES: "substrate-test" + run: | + . ./.github/scripts/common/lib.sh + + echo "Github workspace: ${{ github.workspace }}" + echo "Current folder: $(pwd)"; ls -al + ls -al + + MATRIX=$(find_runtimes | tee runtimes_list.json) + echo $MATRIX + + - name: Get runtimes list + id: get_runtimes_list + run: | + ls -al + MATRIX=$(cat runtimes_list.json) + echo $MATRIX + echo "runtime=$MATRIX" >> $GITHUB_OUTPUT + + srtool: + runs-on: ubuntu-latest + needs: + - find-runtimes + strategy: + fail-fast: false + matrix: ${{ fromJSON(needs.find-runtimes.outputs.runtime) }} + + steps: + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + with: + fetch-depth: 0 + + - name: Srtool build + id: srtool_build + uses: chevdor/srtool-actions@v0.9.1 + with: + chain: ${{ matrix.chain }} + runtime_dir: ${{ matrix.runtime_dir }} + + - name: Summary + run: | + echo '${{ steps.srtool_build.outputs.json }}' | jq > ${{ matrix.chain }}-srtool-digest.json + cat ${{ matrix.chain }}-srtool-digest.json + echo "Compact Runtime: ${{ steps.srtool_build.outputs.wasm }}" + echo "Compressed Runtime: ${{ steps.srtool_build.outputs.wasm_compressed }}" + + # it takes a while to build the runtime, so let's save the artifact as soon as we have it + - name: Archive Artifacts for ${{ matrix.chain }} + uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + with: + name: ${{ matrix.chain }}-runtime + path: | + ${{ steps.srtool_build.outputs.wasm }} + ${{ steps.srtool_build.outputs.wasm_compressed }} + ${{ matrix.chain }}-srtool-digest.json + + # We now get extra information thanks to subwasm + - name: Install subwasm + run: | + wget https://github.com/chevdor/subwasm/releases/download/v${{ env.SUBWASM_VERSION }}/subwasm_linux_amd64_v${{ env.SUBWASM_VERSION }}.deb + sudo dpkg -i subwasm_linux_amd64_v${{ env.SUBWASM_VERSION }}.deb + subwasm --version + + - name: Show Runtime information + shell: bash + run: | + subwasm info ${{ steps.srtool_build.outputs.wasm }} + subwasm info ${{ steps.srtool_build.outputs.wasm_compressed }} + subwasm --json info ${{ steps.srtool_build.outputs.wasm }} > ${{ matrix.chain }}-info.json + subwasm --json info ${{ steps.srtool_build.outputs.wasm_compressed }} > ${{ matrix.chain }}-compressed-info.json + + - name: Extract the metadata + shell: bash + run: | + subwasm meta ${{ steps.srtool_build.outputs.wasm }} + subwasm --json meta ${{ steps.srtool_build.outputs.wasm }} > ${{ matrix.chain }}-metadata.json + + - name: Check the metadata diff + shell: bash + # the following subwasm call will error for chains that are not known and/or live, that includes shell for instance + run: | + subwasm diff ${{ steps.srtool_build.outputs.wasm }} --chain-b ${{ matrix.chain }} || \ + echo "Subwasm call failed, check the logs. This is likely because ${{ matrix.chain }} is not known by subwasm" | \ + tee ${{ matrix.chain }}-diff.txt + + - name: Archive Subwasm results + uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + with: + name: ${{ matrix.chain }}-runtime + path: | + ${{ matrix.chain }}-info.json + ${{ matrix.chain }}-compressed-info.json + ${{ matrix.chain }}-metadata.json + ${{ matrix.chain }}-diff.txt diff --git a/.gitignore b/.gitignore index 581c417cb85408bee2e13bedefe5aa1edf0d3d33..2f1631fb4b9d14496021907cca96b4cdf4902eb8 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ bin/node-template/Cargo.lock nohup.out polkadot_argument_parsing polkadot.* +!docs/sdk/src/polkadot_sdk/polkadot.rs pwasm-alloc/Cargo.lock pwasm-libc/Cargo.lock release-artifacts diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f507afda23e304d341739841f0d16d5400097e58..dc4b3cf162e177a2ce15e1cf943c788237db2f81 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,6 @@ variables: RUSTY_CACHIER_COMPRESSION_METHOD: zstd NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.79" DOCKER_IMAGES_VERSION: "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" default: @@ -70,7 +69,6 @@ default: .common-before-script: before_script: - !reference [.job-switcher, before_script] - - !reference [.timestamp, before_script] - !reference [.pipeline-stopper-vars, script] .job-switcher: @@ -108,27 +106,37 @@ default: .docker-env: image: "${CI_IMAGE}" + variables: + FL_FORKLIFT_VERSION: !reference [.forklift, variables, FL_FORKLIFT_VERSION] before_script: - !reference [.common-before-script, before_script] - !reference [.prepare-env, before_script] - !reference [.rust-info-script, script] - - !reference [.rusty-cachier, before_script] + - !reference [.forklift-cache, before_script] tags: - linux-docker -# rusty-cachier's hidden job. Parts of this job are used to instrument the pipeline's other real jobs with rusty-cachier -# rusty-cachier's commands are described here: https://gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client#description -.rusty-cachier: +# +.forklift-cache: before_script: - # - curl -s https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client/-/raw/release/util/install.sh | bash - # - mkdir -p cargo_home cargo_target_dir - # - export CARGO_HOME=$CI_PROJECT_DIR/cargo_home - # - export CARGO_TARGET_DIR=$CI_PROJECT_DIR/cargo_target_dir - # - find . \( -path ./cargo_target_dir -o -path ./cargo_home \) -prune -o -type f -exec touch -t 202005260100 {} + - # - git restore-mtime - # - rusty-cachier --version - # - rusty-cachier project touch-changed - - echo tbd + - 'curl --header "PRIVATE-TOKEN: $FL_CI_GROUP_TOKEN" -o forklift -L "${CI_API_V4_URL}/projects/676/packages/generic/forklift/${FL_FORKLIFT_VERSION}/forklift_${FL_FORKLIFT_VERSION}_linux_amd64"' + - chmod +x forklift + - mkdir .forklift + - cp $FL_FORKLIFT_CONFIG .forklift/config.toml + - export FORKLIFT_PACKAGE_SUFFIX=${CI_JOB_NAME/ [0-9 \/]*} + - shopt -s expand_aliases + - export PATH=$PATH:$(pwd) + - | + if [ "$FORKLIFT_BYPASS" != "true" ]; then + echo "FORKLIFT_BYPASS not set, creating alias cargo='forklift cargo'" + alias cargo="forklift cargo" + fi + - ls -al + - rm -f forklift.sock + - forklift clean + # + - echo "FL_FORKLIFT_VERSION ${FL_FORKLIFT_VERSION}" + - echo "FORKLIFT_PACKAGE_SUFFIX $FORKLIFT_PACKAGE_SUFFIX" .common-refs: rules: @@ -136,11 +144,13 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 .test-pr-refs: rules: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues # handle the specific case where benches could store incorrect bench data because of the downstream staging runs # exclude cargo-check-benches from such runs @@ -152,6 +162,7 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 .test-refs-no-trigger: @@ -162,6 +173,7 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ @@ -172,6 +184,7 @@ default: - if: $CI_PIPELINE_SOURCE == "web" - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues .publish-refs: rules: @@ -192,9 +205,7 @@ default: - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - -.zombienet-refs: - extends: .build-refs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues include: # check jobs @@ -209,14 +220,13 @@ include: - .gitlab/pipeline/publish.yml # zombienet jobs - .gitlab/pipeline/zombienet.yml - # timestamp handler - - project: parity/infrastructure/ci_cd/shared - ref: v0.2 - file: /common/timestamp.yml # ci image - project: parity/infrastructure/ci_cd/shared ref: main file: /common/ci-unified.yml + - project: parity/infrastructure/ci_cd/shared + ref: main + file: /common/forklift.yml # This job cancels the whole pipeline if any of provided jobs fail. # In a DAG, every jobs chain is executed independently of others. The `fail_fast` principle suggests # to fail the pipeline as soon as possible to shorten the feedback loop. diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 5c13045706c40e2a6049e67e3a5bb2a1b140fddb..20aa4a5c2a2835cdb859a0768be055064594b6b3 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -125,7 +125,7 @@ build-rustdoc: find "$path" -name '*.html' | xargs -I {} -P "$(nproc)" bash -c 'process_file "$@"' _ {} } inject_simple_analytics "./crate-docs" - - echo "" > ./crate-docs/index.html + - echo "" > ./crate-docs/index.html build-implementers-guide: stage: build @@ -220,6 +220,7 @@ build-test-parachain: # DAG: build-runtime-assets -> build-runtime-collectives -> build-runtime-bridge-hubs # DAG: build-runtime-assets -> build-runtime-collectives -> build-runtime-contracts +# DAG: build-runtime-assets -> build-runtime-coretime # DAG: build-runtime-assets -> build-runtime-starters -> build-runtime-testing build-runtime-assets: <<: *build-runtime-template @@ -235,6 +236,15 @@ build-runtime-collectives: - job: build-runtime-assets artifacts: false +build-runtime-coretime: + <<: *build-runtime-template + variables: + RUNTIME_PATH: "cumulus/parachains/runtimes/coretime" + # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs + needs: + - job: build-runtime-assets + artifacts: false + build-runtime-bridge-hubs: <<: *build-runtime-template variables: diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index cbb3baf277cdc3ce78fd84d26df81e1f34e77e9b..1ed12e68c2ce19b67dd5aca03cec85702351c039 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -4,8 +4,11 @@ cargo-clippy: - .docker-env - .common-refs - .pipeline-stopper-artifacts + variables: + RUSTFLAGS: "-D warnings" script: - - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo clippy --all-targets --locked --workspace + - SKIP_WASM_BUILD=1 cargo clippy --all-targets --locked --workspace + - SKIP_WASM_BUILD=1 cargo clippy --all-targets --all-features --locked --workspace check-try-runtime: stage: check @@ -65,11 +68,7 @@ test-rust-features: - .kubernetes-env - .test-refs-no-trigger-prs-only script: - - git clone - --depth=1 - --branch="master" - https://github.com/paritytech/pipeline-scripts - - bash ./pipeline-scripts/rust-features.sh . + - bash .gitlab/rust-features.sh . job-starter: stage: check @@ -87,9 +86,17 @@ check-rust-feature-propagation: - .kubernetes-env - .common-refs script: - - cargo install --locked --version 0.13.3 -q -f zepter && zepter --version - zepter run check +check-toml-format: + stage: check + extends: + - .kubernetes-env + - .common-refs + script: + - taplo format --check --config .config/taplo.toml + - echo "Please run `taplo format --config .config/taplo.toml` to fix any toml formatting issues" + # More info can be found here: https://github.com/paritytech/polkadot/pull/5865 .check-runtime-migration: stage: check @@ -101,16 +108,17 @@ check-rust-feature-propagation: export RUST_LOG=remote-ext=debug,runtime=debug echo "---------- Downloading try-runtime CLI ----------" - curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.3.3/try-runtime-x86_64-unknown-linux-musl -o try-runtime + curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.5.0/try-runtime-x86_64-unknown-linux-musl -o try-runtime chmod +x ./try-runtime echo "---------- Building ${PACKAGE} runtime ----------" time cargo build --release --locked -p "$PACKAGE" --features try-runtime echo "---------- Executing on-runtime-upgrade for ${NETWORK} ----------" - time ./try-runtime \ + time ./try-runtime ${COMMAND_EXTRA_ARGS} \ --runtime ./target/release/wbuild/"$PACKAGE"/"$WASM" \ - on-runtime-upgrade --checks=pre-and-post ${EXTRA_ARGS} live --uri ${URI} + on-runtime-upgrade --disable-spec-version-check --checks=all ${SUBCOMMAND_EXTRA_ARGS} live --uri ${URI} + sleep 5 # Check runtime migrations for Parity managed relay chains check-runtime-migration-westend: @@ -124,7 +132,7 @@ check-runtime-migration-westend: PACKAGE: "westend-runtime" WASM: "westend_runtime.compact.compressed.wasm" URI: "wss://westend-try-runtime-node.parity-chains.parity.io:443" - EXTRA_ARGS: "--no-weight-warnings" + SUBCOMMAND_EXTRA_ARGS: "--no-weight-warnings" check-runtime-migration-rococo: stage: check @@ -137,7 +145,7 @@ check-runtime-migration-rococo: PACKAGE: "rococo-runtime" WASM: "rococo_runtime.compact.compressed.wasm" URI: "wss://rococo-try-runtime-node.parity-chains.parity.io:443" - EXTRA_ARGS: "--no-weight-warnings" + SUBCOMMAND_EXTRA_ARGS: "--no-weight-warnings" # Check runtime migrations for Parity managed asset hub chains check-runtime-migration-asset-hub-westend: @@ -152,6 +160,31 @@ check-runtime-migration-asset-hub-westend: WASM: "asset_hub_westend_runtime.compact.compressed.wasm" URI: "wss://westend-asset-hub-rpc.polkadot.io:443" +check-runtime-migration-asset-hub-rococo: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "asset-hub-rococo" + PACKAGE: "asset-hub-rococo-runtime" + WASM: "asset_hub_rococo_runtime.compact.compressed.wasm" + URI: "wss://rococo-asset-hub-rpc.polkadot.io:443" + +# Check runtime migrations for Parity managed bridge hub chains +check-runtime-migration-bridge-hub-westend: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "bridge-hub-westend" + PACKAGE: "bridge-hub-westend-runtime" + WASM: "bridge_hub_westend_runtime.compact.compressed.wasm" + URI: "wss://westend-bridge-hub-rpc.polkadot.io:443" + check-runtime-migration-bridge-hub-rococo: stage: check extends: @@ -177,6 +210,33 @@ check-runtime-migration-contracts-rococo: WASM: "contracts_rococo_runtime.compact.compressed.wasm" URI: "wss://rococo-contracts-rpc.polkadot.io:443" +# Check runtime migrations for Parity managed collectives chains +check-runtime-migration-collectives-westend: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "collectives-westend" + PACKAGE: "collectives-westend-runtime" + WASM: "collectives_westend_runtime.compact.compressed.wasm" + URI: "wss://westend-collectives-rpc.polkadot.io:443" + COMMAND_EXTRA_ARGS: "--disable-spec-name-check" + +# Check runtime migrations for Parity managed coretime chain +check-runtime-migration-coretime-rococo: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "coretime-rococo" + PACKAGE: "coretime-rococo-runtime" + WASM: "coretime_rococo_runtime.compact.compressed.wasm" + URI: "wss://rococo-coretime-rpc.polkadot.io:443" + find-fail-ci-phrase: stage: check variables: diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index a03d407c040904a6a1a4fd7095052a039d0019e0..92ebc9eea1faad8a6ce87b1bb322431de1126aa4 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -63,16 +63,16 @@ publish-rustdoc: after_script: - rm -rf .git/ ./* -# cumulus - +# note: images are used not only in zombienet but also in rococo, wococo and versi .build-push-image: image: $BUILDAH_IMAGE variables: DOCKERFILE: "" # docker/path-to.Dockerfile IMAGE_NAME: "" # docker.io/paritypr/image_name script: - # - test "$PARITYPR_USER" -a "$PARITYPR_PASS" || - # ( echo "no docker credentials provided"; exit 1 ) + # Dockertag should differ in a merge queue + # TODO: test this + # - if [[ $CI_COMMIT_REF_NAME == *"gh-readonly-queue"* ]]; export DOCKER_IMAGES_VERSION="${CI_COMMIT_SHORT_SHA}"; fi - $BUILDAH_COMMAND build --format=docker --build-arg VCS_REF="${CI_COMMIT_SHA}" @@ -112,59 +112,6 @@ build-push-image-test-parachain: variables: DOCKERFILE: "docker/dockerfiles/test-parachain_injected.Dockerfile" IMAGE_NAME: "docker.io/paritypr/test-parachain" -# publish-s3: -# stage: publish -# extends: -# - .kubernetes-env -# - .publish-refs -# image: paritytech/awscli:latest -# needs: -# - job: build-linux-stable-cumulus -# artifacts: true -# variables: -# GIT_STRATEGY: none -# BUCKET: "releases.parity.io" -# PREFIX: "cumulus/${ARCH}-${DOCKER_OS}" -# script: -# - echo "___Publishing a binary with debug assertions!___" -# - echo "___VERSION = $(cat ./artifacts/VERSION) ___" -# - aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/ -# - echo "___Updating objects in latest path___" -# - aws s3 sync s3://${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/ s3://${BUCKET}/${PREFIX}/latest/ -# after_script: -# - aws s3 ls s3://${BUCKET}/${PREFIX}/latest/ -# --recursive --human-readable --summarize - -# publish-benchmarks-assets-s3: &publish-benchmarks -# stage: publish -# extends: -# - .kubernetes-env -# - .benchmarks-refs -# image: paritytech/awscli:latest -# needs: -# - job: benchmarks-assets -# artifacts: true -# variables: -# GIT_STRATEGY: none -# BUCKET: "releases.parity.io" -# PREFIX: "cumulus/$CI_COMMIT_REF_NAME/benchmarks-assets" -# script: -# - echo "___Publishing benchmark results___" -# - aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/ -# after_script: -# - aws s3 ls s3://${BUCKET}/${PREFIX}/ --recursive --human-readable --summarize - -# publish-benchmarks-collectives-s3: -# <<: *publish-benchmarks -# variables: -# GIT_STRATEGY: none -# BUCKET: "releases.parity.io" -# PREFIX: "cumulus/$CI_COMMIT_REF_NAME/benchmarks-collectives" -# needs: -# - job: benchmarks-collectives -# artifacts: true - -### Polkadot build-push-image-polkadot-debug: stage: publish @@ -217,143 +164,3 @@ build-push-image-substrate-pr: variables: DOCKERFILE: "docker/dockerfiles/substrate_injected.Dockerfile" IMAGE_NAME: "docker.io/paritypr/substrate" -# old way - -# .build-push-image-polkadot: -# before_script: -# # - test -s ./artifacts/VERSION || exit 1 -# # - test -s ./artifacts/EXTRATAG || exit 1 -# - VERSION="$(cat ./artifacts/VERSION)" -# - EXTRATAG="$(cat ./artifacts/EXTRATAG)" -# - echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})" -# script: -# # - test "$DOCKER_USER" -a "$DOCKER_PASS" || -# # ( echo "no docker credentials provided"; exit 1 ) -# - cd ./artifacts -# - $BUILDAH_COMMAND build -# --format=docker -# --build-arg VCS_REF="${CI_COMMIT_SHA}" -# --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" -# --build-arg IMAGE_NAME="${IMAGE_NAME}" -# --tag "$IMAGE_NAME:$VERSION" -# --tag "$IMAGE_NAME:$EXTRATAG" -# --file ${DOCKERFILE} . -# # The job will success only on the protected branch -# # - echo "$DOCKER_PASS" | -# # buildah login --username "$DOCKER_USER" --password-stdin docker.io -# # - $BUILDAH_COMMAND info -# # - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:$VERSION" -# # - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:$EXTRATAG" -# after_script: -# - buildah logout --all - -# publish-polkadot-debug-image: -# stage: publish -# image: ${BUILDAH_IMAGE} -# extends: -# - .kubernetes-env -# - .build-push-image-polkadot -# rules: -# - if: $CI_PIPELINE_SOURCE == "web" -# - if: $CI_PIPELINE_SOURCE == "schedule" -# - if: $CI_COMMIT_REF_NAME == "master" -# - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs -# - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 -# variables: -# GIT_STRATEGY: none -# DOCKER_USER: ${PARITYPR_USER} -# DOCKER_PASS: ${PARITYPR_PASS} -# # docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile -# DOCKERFILE: polkadot_injected_debug.Dockerfile -# IMAGE_NAME: docker.io/paritypr/polkadot-debug -# needs: -# - job: build-linux-stable -# artifacts: true -# after_script: -# # pass artifacts to the zombienet-tests job -# # https://docs.gitlab.com/ee/ci/multi_project_pipelines.html#with-variable-inheritance -# - echo "PARACHAINS_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/parachains.env -# - echo "PARACHAINS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/parachains.env -# artifacts: -# reports: -# # this artifact is used in zombienet-tests job -# dotenv: ./artifacts/parachains.env -# expire_in: 1 days - -# publish-test-collators-image: -# # service image for zombienet -# stage: publish -# extends: -# - .kubernetes-env -# - .build-push-image-polkadot -# - .zombienet-refs -# variables: -# CI_IMAGE: ${BUILDAH_IMAGE} -# GIT_STRATEGY: none -# DOCKER_USER: ${PARITYPR_USER} -# DOCKER_PASS: ${PARITYPR_PASS} -# # docker/dockerfiles/collator_injected.Dockerfile -# DOCKERFILE: collator_injected.Dockerfile -# IMAGE_NAME: docker.io/paritypr/colander -# needs: -# - job: build-test-collators -# artifacts: true -# after_script: -# - buildah logout --all -# # pass artifacts to the zombienet-tests job -# - echo "COLLATOR_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/collator.env -# - echo "COLLATOR_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/collator.env -# artifacts: -# reports: -# # this artifact is used in zombienet-tests job -# dotenv: ./artifacts/collator.env - -# publish-malus-image: -# # service image for Simnet -# stage: publish -# extends: -# - .kubernetes-env -# - .build-push-image-polkadot -# - .zombienet-refs -# variables: -# CI_IMAGE: ${BUILDAH_IMAGE} -# GIT_STRATEGY: none -# DOCKER_USER: ${PARITYPR_USER} -# DOCKER_PASS: ${PARITYPR_PASS} -# # docker/dockerfiles/malus_injected.Dockerfile -# DOCKERFILE: malus_injected.Dockerfile -# IMAGE_NAME: docker.io/paritypr/malus -# needs: -# - job: build-malus -# artifacts: true -# after_script: -# - buildah logout "$IMAGE_NAME" -# # pass artifacts to the zombienet-tests job -# - echo "MALUS_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/malus.env -# - echo "MALUS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/malus.env -# artifacts: -# reports: -# # this artifact is used in zombienet-tests job -# dotenv: ./artifacts/malus.env - -# substrate - -# publish-substrate-image-pr: -# # service image for zombienet -# stage: publish -# extends: -# - .kubernetes-env -# - .build-push-image-polkadot -# - .zombienet-refs -# variables: -# CI_IMAGE: ${BUILDAH_IMAGE} -# GIT_STRATEGY: none -# DOCKER_USER: ${PARITYPR_USER} -# DOCKER_PASS: ${PARITYPR_PASS} -# DOCKERFILE: substrate_injected.Dockerfile -# IMAGE_NAME: docker.io/paritypr/substrate -# needs: -# - job: build-linux-substrate -# artifacts: true -# after_script: -# - buildah logout "$IMAGE_NAME" diff --git a/.gitlab/pipeline/short-benchmarks.yml b/.gitlab/pipeline/short-benchmarks.yml index 76c75e815ce54d76b21ec0e7bf01926ea07c1b5e..e9dbe20088116721470e57a02b9b3d1353634c06 100644 --- a/.gitlab/pipeline/short-benchmarks.yml +++ b/.gitlab/pipeline/short-benchmarks.yml @@ -49,16 +49,6 @@ short-benchmark-westend: &short-bench script: - ./artifacts/polkadot-parachain benchmark pallet --chain $RUNTIME_CHAIN --pallet "*" --extrinsic "*" --steps 2 --repeat 1 -short-benchmark-asset-hub-polkadot: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: asset-hub-polkadot-dev - -short-benchmark-asset-hub-kusama: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: asset-hub-kusama-dev - short-benchmark-asset-hub-rococo: <<: *short-bench-cumulus variables: @@ -69,32 +59,32 @@ short-benchmark-asset-hub-westend: variables: RUNTIME_CHAIN: asset-hub-westend-dev -short-benchmark-bridge-hub-polkadot: +short-benchmark-bridge-hub-rococo: <<: *short-bench-cumulus variables: - RUNTIME_CHAIN: bridge-hub-polkadot-dev + RUNTIME_CHAIN: bridge-hub-rococo-dev -short-benchmark-bridge-hub-kusama: +short-benchmark-bridge-hub-westend: <<: *short-bench-cumulus variables: - RUNTIME_CHAIN: bridge-hub-kusama-dev + RUNTIME_CHAIN: bridge-hub-westend-dev -short-benchmark-bridge-hub-rococo: +short-benchmark-collectives-westend: <<: *short-bench-cumulus variables: - RUNTIME_CHAIN: bridge-hub-rococo-dev + RUNTIME_CHAIN: collectives-westend-dev -short-benchmark-bridge-hub-westend: +short-benchmark-coretime-rococo: <<: *short-bench-cumulus variables: - RUNTIME_CHAIN: bridge-hub-westend-dev + RUNTIME_CHAIN: coretime-rococo-dev -short-benchmark-collectives-polkadot: +short-benchmark-coretime-westend: <<: *short-bench-cumulus variables: - RUNTIME_CHAIN: collectives-polkadot-dev + RUNTIME_CHAIN: coretime-westend-dev -short-benchmark-glutton-kusama: +short-benchmark-glutton-westend: <<: *short-bench-cumulus variables: - RUNTIME_CHAIN: glutton-kusama-dev-1300 + RUNTIME_CHAIN: glutton-westend-dev-1300 diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 4ed3ec19c48a72af86f98efd2d9973e838ed821a..bbe9b612bc37bb2c1d76653c8784bb8b0d953f50 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -29,7 +29,7 @@ test-linux-stable: --locked \ --release \ --no-fail-fast \ - --features try-runtime,experimental \ + --features try-runtime,experimental,ci-only-tests \ --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} # Upload tests results to Elasticsearch - echo "Upload test results to Elasticsearch" @@ -270,7 +270,7 @@ cargo-check-benches: SKIP_WASM_BUILD=1 time cargo check --locked --benches --all; cargo run --locked --release -p node-bench -- ::trie::read::small --json | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json; - echo "___Uploading cache for rusty-cachier___"; + echo "___Cache could be uploaded___"; ;; 2) cargo run --locked --release -p node-bench -- ::node::import::sr25519::transfer_keep_alive::paritydb::small --json @@ -303,8 +303,15 @@ node-bench-regression-guard: artifacts: true variables: CI_IMAGE: "paritytech/node-bench-regression-guard:latest" + # current git limit is 20, set to 100 to avoid failures (gitlab removes old artifacts) + GIT_DEPTH: 100 + GIT_STRATEGY: fetch before_script: [""] script: + - if [ $(ls -la artifacts/benches/ | grep master | wc -l) == 0 ]; then + echo "Couldn't find master artifacts, consider increasing GIT_LIMIT variable"; + exit 1; + fi - echo "------- IMPORTANT -------" - echo "node-bench-regression-guard depends on the results of a cargo-check-benches job" - echo "In case of this job failure, check your pipeline's cargo-check-benches" @@ -313,7 +320,7 @@ node-bench-regression-guard: after_script: [""] # if this fails run `bot update-ui` in the Pull Request or "./scripts/update-ui-tests.sh" locally -# see ./docs/CONTRIBUTING.md#ui-tests +# see ./docs/contributor/CONTRIBUTING.md#ui-tests test-frame-ui: stage: test extends: @@ -395,7 +402,6 @@ test-linux-stable-int: RUN_UI_TESTS: 1 script: - WASM_BUILD_NO_COLOR=1 - RUST_LOG=sync=trace,consensus=trace,client=trace,state-db=trace,db=trace,forks=trace,state_db=trace,storage_cache=trace time cargo test -p staging-node-cli --release --locked -- --ignored # more information about this job can be found here: @@ -439,7 +445,8 @@ cargo-check-each-crate: - .run-immediately # - .collect-artifacts variables: - # $CI_JOB_NAME is set manually so that rusty-cachier can share the cache for all + RUSTFLAGS: "-D warnings" + # $CI_JOB_NAME is set manually so that cache could be shared for all jobs # "cargo-check-each-crate I/N" jobs CI_JOB_NAME: cargo-check-each-crate timeout: 2h @@ -462,10 +469,10 @@ cargo-check-each-crate-macos: variables: SKIP_WASM_BUILD: 1 script: - # TODO: enable rusty-cachier once it supports Mac # TODO: use parallel jobs, as per cargo-check-each-crate, once more Mac runners are available # - time ./scripts/ci/gitlab/check-each-crate.py 1 1 - time cargo check --workspace --locked + timeout: 2h tags: - osx @@ -488,7 +495,7 @@ cargo-hfuzz: # use git version of honggfuzz-rs until v0.5.56 is out, we need a few recent changes: # https://github.com/rust-fuzz/honggfuzz-rs/pull/75 to avoid breakage on debian # https://github.com/rust-fuzz/honggfuzz-rs/pull/81 fix to the above pr - # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling rusty-cachier's absolute CARGO_TARGET_DIR + # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling absolute CARGO_TARGET_DIR HFUZZ_BUILD_ARGS: > --config=patch.crates-io.honggfuzz.git="https://github.com/altaua/honggfuzz-rs" --config=patch.crates-io.honggfuzz.rev="205f7c8c059a0d98fe1cb912cdac84f324cb6981" @@ -520,6 +527,6 @@ test-syscalls: - ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-prepare-worker --only-used-syscalls | diff -u prepare-worker-syscalls - after_script: - if [[ "$CI_JOB_STATUS" == "failed" ]]; then - printf "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed.\n"; + printf "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed.\n"; fi - allow_failure: true # TODO: remove this once we have an idea how often the syscall lists will change + allow_failure: false # this rarely triggers in practice diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 64210d6a00ab5b83b6b018ca3e0c27d827f74de1..d5845611c60d14f619c5a27d68822967a23474e4 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,3 +1,8 @@ +.zombienet-refs: + extends: .build-refs + variables: + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.86" + include: # substrate tests - .gitlab/pipeline/zombienet/substrate.yml diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index 3f2c6f64fbfe120ae15bf394982d6dbdd29405b0..409c0aba68e7546b896d35ebd01bb26bc4fec992 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -3,6 +3,8 @@ .zombienet-before-script: before_script: + # Exit if the job is not merge queue + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - echo "Zombie-net Tests Config" - echo "${ZOMBIENET_IMAGE}" - echo "${POLKADOT_IMAGE}" @@ -25,8 +27,10 @@ needs: - job: build-push-image-test-parachain artifacts: true + - job: build-push-image-polkadot-debug + artifacts: true variables: - POLKADOT_IMAGE: "docker.io/paritypr/polkadot-debug:master" + POLKADOT_IMAGE: "docker.io/paritypr/polkadot-debug:${DOCKER_IMAGES_VERSION}" GH_DIR: "https://github.com/paritytech/cumulus/tree/${CI_COMMIT_SHORT_SHA}/zombienet/tests" LOCAL_DIR: "/builds/parity/mirrors/polkadot-sdk/cumulus/zombienet/tests" COL_IMAGE: "docker.io/paritypr/test-parachain:${DOCKER_IMAGES_VERSION}" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 8fc8b280bba8c11e614ed0a97946d7d0e76bce44..6b89648c4e36ee8b804ab5771e7375d36f089bf3 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -4,6 +4,8 @@ # common settings for all zombienet jobs .zombienet-polkadot-common: before_script: + # Exit if the job is not merge queue + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)" # from build-linux-stable job - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} @@ -12,12 +14,12 @@ - export MALUS_IMAGE="${MALUS_IMAGE}":${PIPELINE_IMAGE_TAG} - IMAGE_AVAILABLE=$(curl -o /dev/null -w "%{http_code}" -I -L -s https://registry.hub.docker.com/v2/repositories/parity/polkadot/tags/${BUILD_RELEASE_VERSION}) - if [ $IMAGE_AVAILABLE -eq 200 ]; then - export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}"; + export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}"; else - echo "Getting the image to use as SECONDARY, using ${BUILD_RELEASE_VERSION} as base"; - VERSIONS=$(curl -L -s 'https://registry.hub.docker.com/v2/repositories/parity/polkadot/tags/' | jq -r '.results[].name'| grep -E "v[0-9]" |grep -vE "[0-9]-"); - VERSION_TO_USE=$(echo "${BUILD_RELEASE_VERSION}\n$VERSIONS"|sort -r|grep -A1 "${BUILD_RELEASE_VERSION}"|tail -1); - export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${VERSION_TO_USE}"; + echo "Getting the image to use as SECONDARY, using ${BUILD_RELEASE_VERSION} as base"; + VERSIONS=$(curl -L -s 'https://registry.hub.docker.com/v2/repositories/parity/polkadot/tags/' | jq -r '.results[].name'| grep -E "v[0-9]" |grep -vE "[0-9]-"); + VERSION_TO_USE=$(echo "${BUILD_RELEASE_VERSION}\n$VERSIONS"|sort -r|grep -A1 "${BUILD_RELEASE_VERSION}"|tail -1); + export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${VERSION_TO_USE}"; fi - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" @@ -113,10 +115,36 @@ zombienet-polkadot-functional-0006-parachains-max-tranche0: --local-dir="${LOCAL_DIR}/functional" --test="0006-parachains-max-tranche0.zndsl" +zombienet-polkadot-functional-0007-dispute-freshly-finalized: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0007-dispute-freshly-finalized.zndsl" + +zombienet-polkadot-functional-0008-dispute-old-finalized: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0008-dispute-old-finalized.zndsl" + +zombienet-polkadot-functional-0009-approval-voting-coalescing: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0009-approval-voting-coalescing.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common before_script: + # Exit if the job is not merge queue + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - echo "Zombienet Tests Config" @@ -134,6 +162,8 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: extends: - .zombienet-polkadot-common before_script: + # Exit if the job is not merge queue + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:${DOCKER_IMAGES_VERSION}" - echo "Zombienet Tests Config" @@ -176,8 +206,10 @@ zombienet-polkadot-misc-0002-upgrade-node: - job: build-linux-stable artifacts: true before_script: + # Exit if the job is not merge queue + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="docker.io/parity/polkadot:latest" - - echo "Overrided poladot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + - echo "Overrided polkadot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - BUILD_LINUX_JOB_ID="$(cat ./artifacts/BUILD_LINUX_JOB_ID)" - export POLKADOT_PR_ARTIFACTS_URL="https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/${BUILD_LINUX_JOB_ID}/artifacts/raw/artifacts" diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml index 9fb2f161ad73367a86279a6e7268f9116eae8f54..b687576267de5b40bab9fb1f544bb0afbb1959a0 100644 --- a/.gitlab/pipeline/zombienet/substrate.yml +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -4,6 +4,8 @@ # common settings for all zombienet jobs .zombienet-substrate-common: before_script: + # Exit if the job is not merge queue + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - echo "Zombienet Tests Config" - echo "${ZOMBIENET_IMAGE}" - echo "${GH_DIR}" diff --git a/.gitlab/rust-features.sh b/.gitlab/rust-features.sh new file mode 100755 index 0000000000000000000000000000000000000000..c0ac192a6ec69ba16abb3bad2ec49de7e9cebb61 --- /dev/null +++ b/.gitlab/rust-features.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +############################################################################## +# +# This script checks that crates to not carelessly enable features that +# should stay disabled. It's important to check that since features +# are used to gate specific functionality which should only be enabled +# when the feature is explicitly enabled. +# +# Invocation scheme: +# ./rust-features.sh +# +# Example: +# ./rust-features.sh path/to/substrate +# +# The steps of this script: +# 1. Check that all required dependencies are installed. +# 2. Check that all rules are fullfilled for the whole workspace. If not: +# 4. Check all crates to find the offending ones. +# 5. Print all offending crates and exit with code 1. +# +############################################################################## + +set -eu + +# Check that cargo and grep are installed - otherwise abort. +command -v cargo >/dev/null 2>&1 || { echo >&2 "cargo is required but not installed. Aborting."; exit 1; } +command -v grep >/dev/null 2>&1 || { echo >&2 "grep is required but not installed. Aborting."; exit 1; } + +# Enter the workspace root folder. +cd "$1" +echo "Workspace root is $PWD" + +function main() { + feature_does_not_imply 'default' 'runtime-benchmarks' + feature_does_not_imply 'std' 'runtime-benchmarks' + feature_does_not_imply 'default' 'try-runtime' + feature_does_not_imply 'std' 'try-runtime' +} + +# Accepts two feature names as arguments. +# Checks that the first feature does not imply the second one. +function feature_does_not_imply() { + ENABLED=$1 + STAYS_DISABLED=$2 + echo "📏 Checking that $ENABLED does not imply $STAYS_DISABLED ..." + + # Check if the forbidden feature is enabled anywhere in the workspace. + # But only check "normal" dependencies, so no "dev" or "build" dependencies. + if cargo tree --no-default-features --locked --workspace -e features,normal --features "$ENABLED" | grep -qF "feature \"$STAYS_DISABLED\""; then + echo "❌ $ENABLED implies $STAYS_DISABLED in the workspace" + else + echo "✅ $ENABLED does not imply $STAYS_DISABLED in the workspace" + return + fi + + # Find all Cargo.toml files but exclude the root one since we already know that it is broken. + CARGOS=`find . -name Cargo.toml -not -path ./Cargo.toml` + NUM_CRATES=`echo "$CARGOS" | wc -l` + FAILED=0 + PASSED=0 + echo "🔍 Checking all $NUM_CRATES crates - this takes some time." + + for CARGO in $CARGOS; do + OUTPUT=$(cargo tree --no-default-features --locked --offline -e features,normal --features $ENABLED --manifest-path $CARGO 2>&1 || true) + + if echo "$OUTPUT" | grep -qF "not supported for packages in this workspace"; then + # This case just means that the pallet does not support the + # requested feature which is fine. + PASSED=$((PASSED+1)) + elif echo "$OUTPUT" | grep -qF "feature \"$STAYS_DISABLED\""; then + echo "❌ Violation in $CARGO by dependency:" + # Best effort hint for which dependency needs to be fixed. + echo "$OUTPUT" | grep -wF "feature \"$STAYS_DISABLED\"" | head -n 1 + FAILED=$((FAILED+1)) + else + PASSED=$((PASSED+1)) + fi + done + + echo "Checked $NUM_CRATES crates in total of which $FAILED failed and $PASSED passed." + echo "Exiting with code 1" + exit 1 +} + +main "$@" + diff --git a/.gitlab/spellcheck.toml b/.gitlab/spellcheck.toml index 025c7a0a461b05050d8cc559e103d4e8f1dc43d7..8c60bf6915d16c8ea091119bfa70940411157509 100644 --- a/.gitlab/spellcheck.toml +++ b/.gitlab/spellcheck.toml @@ -8,20 +8,20 @@ use_builtin = true [hunspell.quirks] # He tagged it as 'TheGreatestOfAllTimes' transform_regex = [ -# `Type`'s + # `Type`'s "^'([^\\s])'$", -# 5x -# 10.7% + # 5x + # 10.7% "^[0-9_]+(?:\\.[0-9]*)?(x|%)$", -# Transforms' + # Transforms' "^(.*)'$", -# backslashes - "^\\+$", + # backslashes "^[0-9]*+k|MB|Mb|ms|Mbit|nd|th|rd$", -# single char `=` `>` `%` .. + "^\\+$", + # single char `=` `>` `%` .. "^=|>|<|%$", -# 22_100 - "^(?:[0-9]+_)+[0-9]+$" + # 22_100 + "^(?:[0-9]+_)+[0-9]+$", ] allow_concatenation = true allow_dashes = true diff --git a/.prdoc.toml b/.prdoc.toml new file mode 100644 index 0000000000000000000000000000000000000000..01e2eebe54b83a263dfa85cc28af7d041fd57bd0 --- /dev/null +++ b/.prdoc.toml @@ -0,0 +1,7 @@ +# Config file for prdoc, see https://github.com/paritytech/prdoc + +version = 1 +schema = "prdoc/schema_user.json" +output_dir = "prdoc" +prdoc_folders = ["prdoc"] +template = "prdoc/.template.prdoc" diff --git a/Cargo.lock b/Cargo.lock index 2a091ce6817da957cc7a486dc935add4277e36b6..81af9f584ed024a6d3d446d21b5205aee3df6296 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -42,15 +42,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" -[[package]] -name = "aead" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "aead" version = "0.4.3" @@ -58,7 +49,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ "generic-array 0.14.7", - "rand_core 0.6.4", ] [[package]] @@ -71,17 +61,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" -dependencies = [ - "aes-soft", - "aesni", - "cipher 0.2.5", -] - [[package]] name = "aes" version = "0.7.5" @@ -133,26 +112,6 @@ dependencies = [ "subtle 2.4.1", ] -[[package]] -name = "aes-soft" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" -dependencies = [ - "cipher 0.2.5", - "opaque-debug 0.3.0", -] - -[[package]] -name = "aesni" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" -dependencies = [ - "cipher 0.2.5", - "opaque-debug 0.3.0", -] - [[package]] name = "ahash" version = "0.7.6" @@ -191,12 +150,93 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +[[package]] +name = "alloy-primitives" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0628ec0ba5b98b3370bb6be17b12f23bfce8ee4ad83823325a20546d9b03b78" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more", + "hex-literal", + "itoa", + "proptest", + "rand 0.8.5", + "ruint", + "serde", + "tiny-keccak", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc0fac0fc16baf1f63f78b47c3d24718f3619b0714076f6a02957d808d52cbef" +dependencies = [ + "alloy-rlp-derive", + "arrayvec 0.7.4", + "bytes", + "smol_str", +] + +[[package]] +name = "alloy-rlp-derive" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0391754c09fab4eae3404d19d0d297aa1c670c1775ab51d8a5312afeca23157" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.41", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a98ad1696a2e17f010ae8e43e9f2a1e930ed176a8e3ff77acfeff6dfb07b42c" +dependencies = [ + "const-hex", + "dunce", + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.41", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-types" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98d7107bed88e8f09f0ddcc3335622d87bfb6821f3e0c7473329fb1cfad5e015" +dependencies = [ + "alloy-primitives", + "alloy-sol-macro", + "const-hex", + "serde", +] + [[package]] name = "always-assert" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4436e0292ab1bb631b42973c61205e704475fe8126af845c8d923c0996328127" +[[package]] +name = "amcl" +version = "0.3.0" +source = "git+https://github.com/snowfork/milagro_bls?rev=a6d66e4eb89015e352fb1c9f7b661ecdbb5b2176#a6d66e4eb89015e352fb1c9f7b661ecdbb5b2176" +dependencies = [ + "parity-scale-codec", + "scale-info", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -310,12 +350,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" -[[package]] -name = "arc-swap" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" - [[package]] name = "ark-bls12-377" version = "0.4.0" @@ -323,8 +357,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb00293ba84f51ce3bd026bd0de55899c4e68f0a39a5728cebae3a73ffdc0a4f" dependencies = [ "ark-ec", - "ark-ff", - "ark-std", + "ark-ff 0.4.2", + "ark-std 0.4.0", ] [[package]] @@ -336,7 +370,7 @@ dependencies = [ "ark-bls12-377", "ark-ec", "ark-models-ext", - "ark-std", + "ark-std 0.4.0", ] [[package]] @@ -346,9 +380,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c775f0d12169cba7aae4caeb547bb6a50781c7449a8aa53793827c9ec4abf488" dependencies = [ "ark-ec", - "ark-ff", - "ark-serialize", - "ark-std", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", ] [[package]] @@ -359,10 +393,10 @@ checksum = "b1dc4b3d08f19e8ec06e949712f95b8361e43f1391d94f65e4234df03480631c" dependencies = [ "ark-bls12-381", "ark-ec", - "ark-ff", + "ark-ff 0.4.2", "ark-models-ext", - "ark-serialize", - "ark-std", + "ark-serialize 0.4.2", + "ark-std 0.4.0", ] [[package]] @@ -373,8 +407,8 @@ checksum = "2e0605daf0cc5aa2034b78d008aaf159f56901d92a52ee4f6ecdfdac4f426700" dependencies = [ "ark-bls12-377", "ark-ec", - "ark-ff", - "ark-std", + "ark-ff 0.4.2", + "ark-std 0.4.0", ] [[package]] @@ -385,9 +419,9 @@ checksum = "ccee5fba47266f460067588ee1bf070a9c760bf2050c1c509982c5719aadb4f2" dependencies = [ "ark-bw6-761", "ark-ec", - "ark-ff", + "ark-ff 0.4.2", "ark-models-ext", - "ark-std", + "ark-std 0.4.0", ] [[package]] @@ -396,10 +430,10 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" dependencies = [ - "ark-ff", + "ark-ff 0.4.2", "ark-poly", - "ark-serialize", - "ark-std", + "ark-serialize 0.4.2", + "ark-std 0.4.0", "derivative", "hashbrown 0.13.2", "itertools 0.10.5", @@ -416,8 +450,8 @@ checksum = "b10d901b9ac4b38f9c32beacedfadcdd64e46f8d7f8e88c1ae1060022cf6f6c6" dependencies = [ "ark-bls12-377", "ark-ec", - "ark-ff", - "ark-std", + "ark-ff 0.4.2", + "ark-std 0.4.0", ] [[package]] @@ -428,9 +462,9 @@ checksum = "524a4fb7540df2e1a8c2e67a83ba1d1e6c3947f4f9342cc2359fc2e789ad731d" dependencies = [ "ark-ec", "ark-ed-on-bls12-377", - "ark-ff", + "ark-ff 0.4.2", "ark-models-ext", - "ark-std", + "ark-std 0.4.0", ] [[package]] @@ -441,8 +475,8 @@ checksum = "f9cde0f2aa063a2a5c28d39b47761aa102bda7c13c84fc118a61b87c7b2f785c" dependencies = [ "ark-bls12-381", "ark-ec", - "ark-ff", - "ark-std", + "ark-ff 0.4.2", + "ark-std 0.4.0", ] [[package]] @@ -453,9 +487,27 @@ checksum = "d15185f1acb49a07ff8cbe5f11a1adc5a93b19e211e325d826ae98e98e124346" dependencies = [ "ark-ec", "ark-ed-on-bls12-381-bandersnatch", - "ark-ff", + "ark-ff 0.4.2", "ark-models-ext", - "ark-std", + "ark-std 0.4.0", +] + +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", ] [[package]] @@ -464,10 +516,10 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" dependencies = [ - "ark-ff-asm", - "ark-ff-macros", - "ark-serialize", - "ark-std", + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", "derivative", "digest 0.10.7", "itertools 0.10.5", @@ -478,6 +530,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "ark-ff-asm" version = "0.4.2" @@ -488,6 +550,18 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + [[package]] name = "ark-ff-macros" version = "0.4.2" @@ -508,9 +582,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e9eab5d4b5ff2f228b763d38442adc9b084b0a465409b059fac5c2308835ec2" dependencies = [ "ark-ec", - "ark-ff", - "ark-serialize", - "ark-std", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", "derivative", ] @@ -520,9 +594,9 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" dependencies = [ - "ark-ff", - "ark-serialize", - "ark-std", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", "derivative", "hashbrown 0.13.2", ] @@ -534,9 +608,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51bd73bb6ddb72630987d37fa963e99196896c0d0ea81b7c894567e74a2f83af" dependencies = [ "ark-ec", - "ark-ff", - "ark-serialize", - "ark-std", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "parity-scale-codec", + "scale-info", +] + +[[package]] +name = "ark-scale" +version = "0.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f69c00b3b529be29528a6f2fd5fa7b1790f8bed81b9cdca17e326538545a179" +dependencies = [ + "ark-ec", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", "parity-scale-codec", "scale-info", ] @@ -544,18 +632,28 @@ dependencies = [ [[package]] name = "ark-secret-scalar" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502" +source = "git+https://github.com/w3f/ring-vrf?rev=e9782f9#e9782f938629c90f3adb3fff2358bc8d1386af3e" dependencies = [ "ark-ec", - "ark-ff", - "ark-serialize", - "ark-std", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", "ark-transcript", "digest 0.10.7", - "rand_core 0.6.4", + "getrandom_or_panic", "zeroize", ] +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + [[package]] name = "ark-serialize" version = "0.4.2" @@ -563,7 +661,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" dependencies = [ "ark-serialize-derive", - "ark-std", + "ark-std 0.4.0", "digest 0.10.7", "num-bigint", ] @@ -579,6 +677,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + [[package]] name = "ark-std" version = "0.4.0" @@ -593,11 +701,11 @@ dependencies = [ [[package]] name = "ark-transcript" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502" +source = "git+https://github.com/w3f/ring-vrf?rev=e9782f9#e9782f938629c90f3adb3fff2358bc8d1386af3e" dependencies = [ - "ark-ff", - "ark-serialize", - "ark-std", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", "digest 0.10.7", "rand_core 0.6.4", "sha3", @@ -642,48 +750,20 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" -[[package]] -name = "asn1-rs" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ff05a702273012438132f449575dbc804e27b2f3cbe3069aa237d26c98fa33" -dependencies = [ - "asn1-rs-derive 0.1.0", - "asn1-rs-impl", - "displaydoc", - "nom", - "num-traits", - "rusticata-macros", - "thiserror", - "time 0.3.27", -] - [[package]] name = "asn1-rs" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ - "asn1-rs-derive 0.4.0", + "asn1-rs-derive", "asn1-rs-impl", "displaydoc", "nom", "num-traits", "rusticata-macros", "thiserror", - "time 0.3.27", -] - -[[package]] -name = "asn1-rs-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", + "time", ] [[package]] @@ -731,13 +811,54 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] -name = "asset-hub-kusama-runtime" +name = "asset-hub-rococo-emulated-chain" +version = "0.0.0" +dependencies = [ + "asset-hub-rococo-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "rococo-emulated-chain", + "serde_json", + "sp-core", + "sp-runtime", +] + +[[package]] +name = "asset-hub-rococo-integration-tests" +version = "1.0.0" +dependencies = [ + "assert_matches", + "asset-hub-rococo-runtime", + "asset-test-utils", + "emulated-integration-tests-common", + "frame-support", + "pallet-asset-conversion", + "pallet-assets", + "pallet-balances", + "pallet-message-queue", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "rococo-runtime", + "rococo-system-emulated-network", + "sp-runtime", + "staging-xcm", + "staging-xcm-executor", +] + +[[package]] +name = "asset-hub-rococo-runtime" version = "0.9.420" dependencies = [ "asset-test-utils", "assets-common", + "bp-asset-hub-rococo", + "bp-asset-hub-westend", + "bp-bridge-hub-rococo", + "bp-bridge-hub-westend", "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-session-benchmarking", "cumulus-pallet-xcm", @@ -775,14 +896,18 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", + "pallet-xcm-bridge-hub-router", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-runtime-common", "primitive-types", + "rococo-runtime-constants", "scale-info", "smallvec", + "snowbridge-rococo-common", + "snowbridge-router-primitives", "sp-api", "sp-block-builder", "sp-consensus-aura", @@ -805,219 +930,31 @@ dependencies = [ ] [[package]] -name = "asset-hub-polkadot-runtime" -version = "0.9.420" +name = "asset-hub-westend-emulated-chain" +version = "0.0.0" dependencies = [ + "asset-hub-westend-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "serde_json", + "sp-core", + "sp-runtime", + "westend-emulated-chain", +] + +[[package]] +name = "asset-hub-westend-integration-tests" +version = "1.0.0" +dependencies = [ + "assert_matches", + "asset-hub-westend-runtime", "asset-test-utils", - "assets-common", - "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "hex-literal", - "log", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-nfts", - "pallet-nfts-runtime-api", - "pallet-proxy", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-uniques", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parachains-common", - "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "scale-info", - "smallvec", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std 8.0.0", - "sp-storage 13.0.0", - "sp-transaction-pool", - "sp-version", - "sp-weights", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", -] - -[[package]] -name = "asset-hub-rococo-emulated-chain" -version = "0.0.0" -dependencies = [ - "asset-hub-rococo-runtime", - "cumulus-primitives-core", - "emulated-integration-tests-common", - "frame-support", - "parachains-common", - "rococo-emulated-chain", - "serde_json", - "sp-core", - "sp-runtime", -] - -[[package]] -name = "asset-hub-rococo-integration-tests" -version = "1.0.0" -dependencies = [ - "assert_matches", - "asset-hub-rococo-runtime", - "asset-test-utils", - "emulated-integration-tests-common", - "frame-support", - "pallet-asset-conversion", - "pallet-assets", - "pallet-balances", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "rococo-runtime", - "rococo-system-emulated-network", - "sp-runtime", - "staging-xcm", - "staging-xcm-executor", -] - -[[package]] -name = "asset-hub-rococo-runtime" -version = "0.9.420" -dependencies = [ - "asset-test-utils", - "assets-common", - "bp-asset-hub-rococo", - "bp-asset-hub-westend", - "bp-asset-hub-wococo", - "bp-bridge-hub-rococo", - "bp-bridge-hub-westend", - "bp-bridge-hub-wococo", - "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "hex-literal", - "log", - "pallet-asset-conversion", - "pallet-asset-conversion-tx-payment", - "pallet-assets", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-nft-fractionalization", - "pallet-nfts", - "pallet-nfts-runtime-api", - "pallet-proxy", - "pallet-session", - "pallet-state-trie-migration", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-uniques", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "pallet-xcm-bridge-hub-router", - "parachains-common", - "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "primitive-types", - "rococo-runtime-constants", - "scale-info", - "smallvec", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std 8.0.0", - "sp-storage 13.0.0", - "sp-transaction-pool", - "sp-version", - "sp-weights", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", -] - -[[package]] -name = "asset-hub-westend-emulated-chain" -version = "0.0.0" -dependencies = [ - "asset-hub-westend-runtime", - "cumulus-primitives-core", - "emulated-integration-tests-common", - "frame-support", - "parachains-common", - "serde_json", - "sp-core", - "sp-runtime", - "westend-emulated-chain", -] - -[[package]] -name = "asset-hub-westend-integration-tests" -version = "1.0.0" -dependencies = [ - "assert_matches", - "asset-hub-westend-runtime", - "asset-test-utils", - "cumulus-pallet-dmp-queue", - "cumulus-pallet-parachain-system", - "emulated-integration-tests-common", + "emulated-integration-tests-common", "frame-support", "frame-system", "pallet-asset-conversion", @@ -1050,7 +987,6 @@ dependencies = [ "bp-bridge-hub-rococo", "bp-bridge-hub-westend", "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-session-benchmarking", "cumulus-pallet-xcm", @@ -1117,22 +1053,6 @@ dependencies = [ "westend-runtime-constants", ] -[[package]] -name = "asset-hub-wococo-emulated-chain" -version = "0.0.0" -dependencies = [ - "asset-hub-rococo-emulated-chain", - "asset-hub-rococo-runtime", - "cumulus-primitives-core", - "emulated-integration-tests-common", - "frame-support", - "parachains-common", - "serde_json", - "sp-core", - "sp-runtime", - "wococo-emulated-chain", -] - [[package]] name = "asset-test-utils" version = "1.0.0" @@ -1306,7 +1226,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -1317,13 +1237,13 @@ checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" -version = "0.1.73" +version = "0.1.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -1362,6 +1282,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "auto_impl" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "autocfg" version = "1.1.0" @@ -1385,15 +1317,15 @@ dependencies = [ [[package]] name = "bandersnatch_vrfs" -version = "0.0.3" -source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502" +version = "0.0.4" +source = "git+https://github.com/w3f/ring-vrf?rev=e9782f9#e9782f938629c90f3adb3fff2358bc8d1386af3e" dependencies = [ "ark-bls12-381", "ark-ec", "ark-ed-on-bls12-381-bandersnatch", - "ark-ff", - "ark-serialize", - "ark-std", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", "dleq_vrf", "fflonk", "merlin 3.0.0", @@ -1412,12 +1344,6 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - [[package]] name = "base16ct" version = "0.2.0" @@ -1499,7 +1425,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -1515,6 +1441,21 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitcoin_hashes" version = "0.11.0" @@ -1541,6 +1482,7 @@ checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", + "serde", "tap", "wyz", ] @@ -1600,16 +1542,15 @@ dependencies = [ [[package]] name = "blake3" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "199c42ab6972d92c9f8995f086273d25c42fc0f7b2a1fcefba465c1352d25ba5" +checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" dependencies = [ "arrayref", "arrayvec 0.7.4", "cc", "cfg-if", "constant_time_eq 0.3.0", - "digest 0.10.7", ] [[package]] @@ -1618,7 +1559,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", + "block-padding", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -1642,16 +1583,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "block-modes" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" -dependencies = [ - "block-padding 0.2.1", - "cipher 0.2.5", -] - [[package]] name = "block-padding" version = "0.1.5" @@ -1661,12 +1592,6 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "blocking" version = "1.3.1" @@ -1684,13 +1609,14 @@ dependencies = [ [[package]] name = "bounded-collections" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b05133427c07c4776906f673ccf36c21b102c9829c641a5b56bd151d44fd6" +checksum = "ca548b6163b872067dc5eb82fd130c56881435e30367d2073594a3d9744120dd" dependencies = [ "log", "parity-scale-codec", "scale-info", + "schemars", "serde", ] @@ -1704,7 +1630,7 @@ dependencies = [ ] [[package]] -name = "bp-asset-hub-kusama" +name = "bp-asset-hub-rococo" version = "0.1.0" dependencies = [ "bp-xcm-bridge-hub-router", @@ -1714,48 +1640,17 @@ dependencies = [ ] [[package]] -name = "bp-asset-hub-polkadot" +name = "bp-asset-hub-westend" version = "0.1.0" dependencies = [ "bp-xcm-bridge-hub-router", "frame-support", "parity-scale-codec", "scale-info", - "sp-runtime", ] [[package]] -name = "bp-asset-hub-rococo" -version = "0.1.0" -dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-support", - "parity-scale-codec", - "scale-info", -] - -[[package]] -name = "bp-asset-hub-westend" -version = "0.1.0" -dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-support", - "parity-scale-codec", - "scale-info", -] - -[[package]] -name = "bp-asset-hub-wococo" -version = "0.1.0" -dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-support", - "parity-scale-codec", - "scale-info", -] - -[[package]] -name = "bp-bridge-hub-cumulus" +name = "bp-bridge-hub-cumulus" version = "0.1.0" dependencies = [ "bp-messages", @@ -1820,19 +1715,6 @@ dependencies = [ "sp-std 8.0.0", ] -[[package]] -name = "bp-bridge-hub-wococo" -version = "0.1.0" -dependencies = [ - "bp-bridge-hub-cumulus", - "bp-messages", - "bp-runtime", - "frame-support", - "sp-api", - "sp-runtime", - "sp-std 8.0.0", -] - [[package]] name = "bp-header-chain" version = "0.1.0" @@ -2025,15 +1907,9 @@ dependencies = [ ] [[package]] -name = "bp-wococo" +name = "bp-xcm-bridge-hub" version = "0.1.0" dependencies = [ - "bp-header-chain", - "bp-polkadot-core", - "bp-rococo", - "bp-runtime", - "frame-support", - "sp-api", "sp-std 8.0.0", ] @@ -2048,143 +1924,37 @@ dependencies = [ ] [[package]] -name = "bridge-hub-kusama-runtime" -version = "0.1.0" -dependencies = [ - "bridge-hub-test-utils", - "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "hex-literal", - "log", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parachains-common", - "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "scale-info", - "serde", - "smallvec", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-io", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std 8.0.0", - "sp-storage 13.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", -] - -[[package]] -name = "bridge-hub-polkadot-runtime" +name = "bridge-hub-common" version = "0.1.0" dependencies = [ - "bridge-hub-test-utils", - "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", "cumulus-primitives-core", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "hex-literal", - "log", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", "pallet-message-queue", - "pallet-multisig", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parachains-common", "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-common", "scale-info", - "serde", - "smallvec", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", + "snowbridge-core", "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-io", - "sp-offchain", "sp-runtime", - "sp-session", "sp-std 8.0.0", - "sp-storage 13.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", ] [[package]] name = "bridge-hub-rococo-emulated-chain" version = "0.0.0" dependencies = [ + "bridge-hub-common", "bridge-hub-rococo-runtime", "cumulus-primitives-core", "emulated-integration-tests-common", "frame-support", "parachains-common", "serde_json", + "snowbridge-core", + "snowbridge-inbound-queue", + "snowbridge-outbound-queue", + "snowbridge-router-primitives", + "snowbridge-system", "sp-core", "sp-runtime", ] @@ -2193,6 +1963,7 @@ dependencies = [ name = "bridge-hub-rococo-integration-tests" version = "1.0.0" dependencies = [ + "asset-hub-rococo-runtime", "asset-test-utils", "bp-messages", "bridge-hub-rococo-runtime", @@ -2200,12 +1971,27 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", "frame-support", + "hex", + "hex-literal", + "pallet-assets", + "pallet-balances", "pallet-bridge-messages", "pallet-message-queue", "pallet-xcm", "parachains-common", "parity-scale-codec", - "rococo-wococo-system-emulated-network", + "penpal-runtime", + "rococo-system-emulated-network", + "rococo-westend-system-emulated-network", + "scale-info", + "snowbridge-core", + "snowbridge-inbound-queue", + "snowbridge-outbound-queue", + "snowbridge-rococo-common", + "snowbridge-router-primitives", + "snowbridge-system", + "sp-core", + "sp-runtime", "staging-xcm", "staging-xcm-executor", ] @@ -2216,23 +2002,22 @@ version = "0.1.0" dependencies = [ "bp-asset-hub-rococo", "bp-asset-hub-westend", - "bp-asset-hub-wococo", + "bp-bridge-hub-polkadot", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", - "bp-bridge-hub-wococo", "bp-header-chain", "bp-messages", "bp-parachains", + "bp-polkadot-bulletin", "bp-polkadot-core", "bp-relayers", "bp-rococo", "bp-runtime", "bp-westend", - "bp-wococo", + "bridge-hub-common", "bridge-hub-test-utils", "bridge-runtime-common", "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-session-benchmarking", "cumulus-pallet-xcm", @@ -2265,6 +2050,7 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", + "pallet-xcm-bridge-hub", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -2274,6 +2060,17 @@ dependencies = [ "scale-info", "serde", "smallvec", + "snowbridge-beacon-primitives", + "snowbridge-core", + "snowbridge-ethereum-beacon-client", + "snowbridge-inbound-queue", + "snowbridge-outbound-queue", + "snowbridge-outbound-queue-runtime-api", + "snowbridge-rococo-common", + "snowbridge-router-primitives", + "snowbridge-runtime-common", + "snowbridge-system", + "snowbridge-system-runtime-api", "sp-api", "sp-block-builder", "sp-consensus-aura", @@ -2302,8 +2099,6 @@ name = "bridge-hub-test-utils" version = "0.1.0" dependencies = [ "asset-test-utils", - "bp-bridge-hub-rococo", - "bp-bridge-hub-wococo", "bp-header-chain", "bp-messages", "bp-parachains", @@ -2318,6 +2113,7 @@ dependencies = [ "frame-executive", "frame-support", "frame-system", + "impl-trait-for-tuples", "log", "pallet-balances", "pallet-bridge-grandpa", @@ -2336,6 +2132,7 @@ dependencies = [ "sp-io", "sp-keyring", "sp-runtime", + "sp-std 8.0.0", "sp-tracing 10.0.0", "staging-parachain-info", "staging-xcm", @@ -2347,6 +2144,7 @@ dependencies = [ name = "bridge-hub-westend-emulated-chain" version = "0.0.0" dependencies = [ + "bridge-hub-common", "bridge-hub-westend-runtime", "cumulus-primitives-core", "emulated-integration-tests-common", @@ -2368,20 +2166,24 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", "frame-support", + "pallet-assets", + "pallet-balances", "pallet-bridge-messages", "pallet-message-queue", "pallet-xcm", "parachains-common", "parity-scale-codec", + "rococo-westend-system-emulated-network", + "sp-runtime", "staging-xcm", "staging-xcm-executor", - "westend-system-emulated-network", ] [[package]] name = "bridge-hub-westend-runtime" version = "0.1.0" dependencies = [ + "bp-asset-hub-rococo", "bp-asset-hub-westend", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", @@ -2393,10 +2195,10 @@ dependencies = [ "bp-rococo", "bp-runtime", "bp-westend", + "bridge-hub-common", "bridge-hub-test-utils", "bridge-runtime-common", "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-session-benchmarking", "cumulus-pallet-xcm", @@ -2429,6 +2231,7 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", + "pallet-xcm-bridge-hub", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -2461,21 +2264,6 @@ dependencies = [ "westend-runtime-constants", ] -[[package]] -name = "bridge-hub-wococo-emulated-chain" -version = "0.0.0" -dependencies = [ - "bridge-hub-rococo-emulated-chain", - "bridge-hub-rococo-runtime", - "cumulus-primitives-core", - "emulated-integration-tests-common", - "frame-support", - "parachains-common", - "serde_json", - "sp-core", - "sp-runtime", -] - [[package]] name = "bridge-runtime-common" version = "0.1.0" @@ -2487,6 +2275,7 @@ dependencies = [ "bp-relayers", "bp-runtime", "bp-test-utils", + "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "frame-support", "frame-system", @@ -2652,17 +2441,6 @@ dependencies = [ "libc", ] -[[package]] -name = "ccm" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca1a8fbc20b50ac9673ff014abfb2b5f4085ee1a850d408f14a159c5853ac7" -dependencies = [ - "aead 0.3.2", - "cipher 0.2.5", - "subtle 2.4.1", -] - [[package]] name = "cexpr" version = "0.6.0" @@ -2741,15 +2519,14 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.27" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f56b4c72906975ca04becb8a30e102dfecddd0c06181e3e95ddc444be28881f8" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", - "time 0.1.45", "wasm-bindgen", "windows-targets 0.48.5", ] @@ -2789,7 +2566,7 @@ checksum = "b9b68e3193982cd54187d71afdb2a271ad4cf8af157858e9cb911b91321de143" dependencies = [ "core2", "multibase", - "multihash", + "multihash 0.17.0", "serde", "unsigned-varint", ] @@ -2861,23 +2638,32 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.6" +version = "4.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" +checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" dependencies = [ "clap_builder", - "clap_derive 4.4.2", + "clap_derive 4.4.7", +] + +[[package]] +name = "clap-num" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488557e97528174edaa2ee268b23a809e0c598213a4bbcb4f34575a46fda147e" +dependencies = [ + "num-traits", ] [[package]] name = "clap_builder" -version = "4.4.6" +version = "4.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" +checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" dependencies = [ "anstream", "anstyle", - "clap_lex 0.5.1", + "clap_lex 0.6.0", "strsim", "terminal_size", ] @@ -2888,7 +2674,7 @@ version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "586a385f7ef2f8b4d86bddaa0c094794e7ccbfe5ffef1f434fe928143fc783a5" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", ] [[package]] @@ -2906,14 +2692,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.2" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" +checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -2927,9 +2713,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" [[package]] name = "coarsetime" @@ -2954,11 +2740,25 @@ dependencies = [ ] [[package]] -name = "collectives-polkadot-runtime" +name = "collectives-westend-emulated-chain" +version = "0.0.0" +dependencies = [ + "collectives-westend-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "serde_json", + "sp-core", + "sp-runtime", + "westend-emulated-chain", +] + +[[package]] +name = "collectives-westend-runtime" version = "1.0.0" dependencies = [ "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-session-benchmarking", "cumulus-pallet-xcm", @@ -2975,6 +2775,7 @@ dependencies = [ "hex-literal", "log", "pallet-alliance", + "pallet-asset-rate", "pallet-aura", "pallet-authorship", "pallet-balances", @@ -2994,6 +2795,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", + "pallet-treasury", "pallet-utility", "pallet-xcm", "parachains-common", @@ -3023,6 +2825,7 @@ dependencies = [ "staging-xcm-builder", "staging-xcm-executor", "substrate-wasm-builder", + "westend-runtime-constants", ] [[package]] @@ -3065,6 +2868,17 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +[[package]] +name = "colored" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6" +dependencies = [ + "is-terminal", + "lazy_static", + "windows-sys 0.48.0", +] + [[package]] name = "comfy-table" version = "7.0.1" @@ -3079,14 +2893,15 @@ dependencies = [ [[package]] name = "common" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#edd1e90b847e560bf60fc2e8712235ccfa11a9a9" +source = "git+https://github.com/w3f/ring-proof#b273d33f9981e2bb3375ab45faeb537f7ee35224" dependencies = [ "ark-ec", - "ark-ff", + "ark-ff 0.4.2", "ark-poly", - "ark-serialize", - "ark-std", + "ark-serialize 0.4.2", + "ark-std 0.4.0", "fflonk", + "getrandom_or_panic", "merlin 3.0.0", "rand_chacha 0.3.1", ] @@ -3119,6 +2934,29 @@ dependencies = [ "windows-sys 0.45.0", ] +[[package]] +name = "console_error_panic_hook" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" +dependencies = [ + "cfg-if", + "wasm-bindgen", +] + +[[package]] +name = "const-hex" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5104de16b218eddf8e34ffe2f86f74bfa4e61e95a1b89732fccf6325efd0557" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex", + "proptest", + "serde", +] + [[package]] name = "const-oid" version = "0.9.5" @@ -3197,7 +3035,6 @@ dependencies = [ "pallet-balances", "pallet-collator-selection", "pallet-contracts", - "pallet-contracts-primitives", "pallet-insecure-randomness-collective-flip", "pallet-message-queue", "pallet-multisig", @@ -3268,13 +3105,140 @@ dependencies = [ ] [[package]] -name = "cpp_demangle" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeaa953eaad386a53111e47172c2fedba671e5684c8dd601a5f474f4f118710f" +name = "coretime-rococo-runtime" +version = "0.1.0" dependencies = [ - "cfg-if", -] + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "log", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-broker", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-multisig", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parachains-common", + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "rococo-runtime-constants", + "scale-info", + "serde", + "smallvec", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-transaction-pool", + "sp-version", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", +] + +[[package]] +name = "coretime-westend-runtime" +version = "0.1.0" +dependencies = [ + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "log", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-multisig", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parachains-common", + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "scale-info", + "serde", + "smallvec", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-transaction-pool", + "sp-version", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "westend-runtime-constants", +] + +[[package]] +name = "cpp_demangle" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eeaa953eaad386a53111e47172c2fedba671e5684c8dd601a5f474f4f118710f" +dependencies = [ + "cfg-if", +] [[package]] name = "cpp_demangle" @@ -3402,21 +3366,6 @@ dependencies = [ "wasmtime-types", ] -[[package]] -name = "crc" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" -dependencies = [ - "crc-catalog", -] - -[[package]] -name = "crc-catalog" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" - [[package]] name = "crc32fast" version = "1.3.2" @@ -3463,7 +3412,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.4.6", + "clap 4.4.11", "criterion-plot", "futures", "is-terminal", @@ -3551,18 +3500,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "crypto-bigint" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array 0.14.7", - "rand_core 0.6.4", - "subtle 2.4.1", - "zeroize", -] - [[package]] name = "crypto-bigint" version = "0.5.2" @@ -3638,12 +3575,13 @@ dependencies = [ name = "cumulus-client-cli" version = "0.1.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "parity-scale-codec", "sc-chain-spec", "sc-cli", "sc-client-api", "sc-service", + "sp-blockchain", "sp-core", "sp-runtime", "url", @@ -3859,6 +3797,7 @@ dependencies = [ "cumulus-client-network", "cumulus-client-pov-recovery", "cumulus-primitives-core", + "cumulus-primitives-proof-size-hostfunction", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", @@ -3928,6 +3867,7 @@ dependencies = [ "cumulus-pallet-parachain-system-proc-macro", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", + "cumulus-primitives-proof-size-hostfunction", "cumulus-test-client", "cumulus-test-relay-sproof-builder", "environmental", @@ -3959,16 +3899,17 @@ dependencies = [ "sp-version", "staging-xcm", "trie-db", + "trie-standardmap", ] [[package]] name = "cumulus-pallet-parachain-system-proc-macro" version = "0.1.0" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -4107,6 +4048,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "cumulus-primitives-proof-size-hostfunction" +version = "0.1.0" +dependencies = [ + "sp-core", + "sp-externalities 0.19.0", + "sp-io", + "sp-runtime-interface 17.0.0", + "sp-state-machine", + "sp-trie", +] + [[package]] name = "cumulus-primitives-timestamp" version = "0.1.0" @@ -4193,11 +4146,13 @@ dependencies = [ "cumulus-relay-chain-interface", "cumulus-relay-chain-rpc-interface", "futures", + "parking_lot 0.12.1", "polkadot-availability-recovery", "polkadot-collator-protocol", "polkadot-core-primitives", "polkadot-network-bridge", "polkadot-node-collation-generation", + "polkadot-node-core-chain-api", "polkadot-node-core-prospective-parachains", "polkadot-node-core-runtime-api", "polkadot-node-network-protocol", @@ -4205,16 +4160,19 @@ dependencies = [ "polkadot-overseer", "polkadot-primitives", "sc-authority-discovery", + "sc-client-api", "sc-network", "sc-network-common", "sc-service", "sc-tracing", "sc-utils", "sp-api", + "sp-blockchain", "sp-consensus", "sp-consensus-babe", "sp-runtime", "substrate-prometheus-endpoint", + "tokio", "tracing", ] @@ -4248,6 +4206,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-storage 13.0.0", + "sp-version", "thiserror", "tokio", "tokio-util", @@ -4261,6 +4220,7 @@ version = "0.1.0" dependencies = [ "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", + "cumulus-primitives-proof-size-hostfunction", "cumulus-test-relay-sproof-builder", "cumulus-test-runtime", "cumulus-test-service", @@ -4337,7 +4297,7 @@ name = "cumulus-test-service" version = "0.1.0" dependencies = [ "async-trait", - "clap 4.4.6", + "clap 4.4.11", "criterion 0.5.1", "cumulus-client-cli", "cumulus-client-consensus-common", @@ -4437,9 +4397,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0" +version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f711ade317dd348950a9910f81c5947e3d8907ebd2b83f76203ff1807e6a2bc2" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" dependencies = [ "cfg-if", "cpufeatures", @@ -4460,7 +4420,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -4500,7 +4460,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -4517,42 +4477,7 @@ checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", -] - -[[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core", - "quote", - "syn 1.0.109", + "syn 2.0.41", ] [[package]] @@ -4603,17 +4528,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "der" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" -dependencies = [ - "const-oid", - "pem-rfc7468", - "zeroize", -] - [[package]] name = "der" version = "0.7.8" @@ -4624,27 +4538,13 @@ dependencies = [ "zeroize", ] -[[package]] -name = "der-parser" -version = "7.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe398ac75057914d7d07307bf67dc7f3f574a26783b4fc7805a20ffa9f506e82" -dependencies = [ - "asn1-rs 0.3.1", - "displaydoc", - "nom", - "num-bigint", - "num-traits", - "rusticata-macros", -] - [[package]] name = "der-parser" version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ - "asn1-rs 0.5.2", + "asn1-rs", "displaydoc", "nom", "num-bigint", @@ -4680,37 +4580,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "derive_builder" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07adf7be193b71cc36b193d0f5fe60b918a3a9db4dad0449f57bcfd519704a3" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" -dependencies = [ - "darling", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "derive_builder_macro" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" -dependencies = [ - "derive_builder_core", - "syn 1.0.109", -] - [[package]] name = "derive_more" version = "0.99.17" @@ -4816,7 +4685,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -4828,17 +4697,16 @@ checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632" [[package]] name = "dleq_vrf" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502" +source = "git+https://github.com/w3f/ring-vrf?rev=e9782f9#e9782f938629c90f3adb3fff2358bc8d1386af3e" dependencies = [ "ark-ec", - "ark-ff", - "ark-scale", + "ark-ff 0.4.2", + "ark-scale 0.0.12", "ark-secret-scalar", - "ark-serialize", - "ark-std", + "ark-serialize 0.4.2", + "ark-std 0.4.0", "ark-transcript", "arrayvec 0.7.4", - "rand_core 0.6.4", "zeroize", ] @@ -4878,9 +4746,9 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.38", + "syn 2.0.41", "termcolor", - "toml 0.7.6", + "toml 0.7.8", "walkdir", ] @@ -4902,6 +4770,12 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" +[[package]] +name = "dunce" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" + [[package]] name = "dyn-clonable" version = "0.9.0" @@ -4925,21 +4799,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.13" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfc4744c1b8f2a09adc0e55242f60b1af195d88596bd8700be74418c056c555" - -[[package]] -name = "ecdsa" -version = "0.14.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der 0.6.1", - "elliptic-curve 0.12.3", - "rfc6979 0.3.1", - "signature 1.6.4", -] +checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" [[package]] name = "ecdsa" @@ -4947,12 +4809,12 @@ version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" dependencies = [ - "der 0.7.8", + "der", "digest 0.10.7", - "elliptic-curve 0.13.5", - "rfc6979 0.4.0", - "signature 2.1.0", - "spki 0.7.2", + "elliptic-curve", + "rfc6979", + "signature", + "spki", ] [[package]] @@ -4961,21 +4823,22 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" dependencies = [ - "pkcs8 0.10.2", - "signature 2.1.0", + "pkcs8", + "signature", ] [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" dependencies = [ - "curve25519-dalek 4.0.0", + "curve25519-dalek 4.1.1", "ed25519", "rand_core 0.6.4", "serde", "sha2 0.10.7", + "subtle 2.4.1", "zeroize", ] @@ -4995,11 +4858,11 @@ dependencies = [ [[package]] name = "ed25519-zebra" -version = "4.0.2" +version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e83e509bcd060ca4b54b72bde5bb306cb2088cb01e14797ebae90a24f70f5f7" +checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" dependencies = [ - "curve25519-dalek 4.0.0", + "curve25519-dalek 4.1.1", "ed25519", "hashbrown 0.14.0", "hex", @@ -5016,41 +4879,19 @@ checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "elliptic-curve" -version = "0.12.3" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" dependencies = [ - "base16ct 0.1.1", - "crypto-bigint 0.4.9", - "der 0.6.1", + "base16ct", + "crypto-bigint", "digest 0.10.7", - "ff 0.12.1", + "ff", "generic-array 0.14.7", - "group 0.12.1", - "hkdf", - "pem-rfc7468", - "pkcs8 0.9.0", + "group", + "pkcs8", "rand_core 0.6.4", - "sec1 0.3.0", - "subtle 2.4.1", - "zeroize", -] - -[[package]] -name = "elliptic-curve" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" -dependencies = [ - "base16ct 0.2.0", - "crypto-bigint 0.5.2", - "digest 0.10.7", - "ff 0.13.0", - "generic-array 0.14.7", - "group 0.13.0", - "pkcs8 0.10.2", - "rand_core 0.6.4", - "sec1 0.7.3", + "sec1", "subtle 2.4.1", "zeroize", ] @@ -5133,7 +4974,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -5144,7 +4985,17 @@ checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", +] + +[[package]] +name = "env_logger" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +dependencies = [ + "log", + "regex", ] [[package]] @@ -5162,9 +5013,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" +checksum = "95b3f3e67048839cb0d0781f445682a35113da7121f7c949db0e2be96a4fbece" dependencies = [ "humantime", "is-terminal", @@ -5225,6 +5076,15 @@ dependencies = [ "libc", ] +[[package]] +name = "ethabi-decode" +version = "1.4.0" +source = "git+https://github.com/snowfork/ethabi-decode.git?branch=master#7d215837b626650bd9a076821e57ad488101301f" +dependencies = [ + "ethereum-types", + "tiny-keccak", +] + [[package]] name = "ethbloom" version = "0.13.0" @@ -5233,8 +5093,10 @@ checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" dependencies = [ "crunchy", "fixed-hash", + "impl-codec", "impl-rlp", "impl-serde", + "scale-info", "tiny-keccak", ] @@ -5246,9 +5108,11 @@ checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" dependencies = [ "ethbloom", "fixed-hash", + "impl-codec", "impl-rlp", "impl-serde", "primitive-types", + "scale-info", "uint", ] @@ -5289,7 +5153,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -5329,6 +5193,17 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec 0.7.4", + "auto_impl", + "bytes", +] + [[package]] name = "fatality" version = "0.0.6" @@ -5347,7 +5222,7 @@ checksum = "f5aa1e3ae159e592ad222dc90c5acbad632b527779ba88486abe92782ab268bd" dependencies = [ "expander 0.0.4", "indexmap 1.9.3", - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -5356,11 +5231,12 @@ dependencies = [ [[package]] name = "fdlimit" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c4c9e43643f5a3be4ca5b67d26b98031ff9db6806c3440ae32e02e3ceac3f1b" +checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" dependencies = [ "libc", + "thiserror", ] [[package]] @@ -5379,16 +5255,6 @@ dependencies = [ "web-sys", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "ff" version = "0.13.0" @@ -5402,21 +5268,21 @@ dependencies = [ [[package]] name = "fflonk" version = "0.1.0" -source = "git+https://github.com/w3f/fflonk#26a5045b24e169cffc1f9328ca83d71061145c40" +source = "git+https://github.com/w3f/fflonk#1e854f35e9a65d08b11a86291405cdc95baa0a35" dependencies = [ "ark-ec", - "ark-ff", + "ark-ff 0.4.2", "ark-poly", - "ark-serialize", - "ark-std", + "ark-serialize 0.4.2", + "ark-std 0.4.0", "merlin 3.0.0", ] [[package]] name = "fiat-crypto" -version = "0.1.20" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" +checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" [[package]] name = "file-per-thread-logger" @@ -5424,7 +5290,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84f2e425d9790201ba4af4630191feac6dcc98765b118d4d18e91d23c2353866" dependencies = [ - "env_logger 0.10.0", + "env_logger 0.10.1", "log", ] @@ -5609,7 +5475,7 @@ dependencies = [ "Inflector", "array-bytes 6.1.0", "chrono", - "clap 4.4.6", + "clap 4.4.11", "comfy-table", "frame-benchmarking", "frame-support", @@ -5670,12 +5536,12 @@ dependencies = [ "frame-election-provider-support", "frame-support", "parity-scale-codec", - "proc-macro-crate", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", "scale-info", "sp-arithmetic", - "syn 2.0.38", + "syn 2.0.41", "trybuild", ] @@ -5701,7 +5567,7 @@ dependencies = [ name = "frame-election-solution-type-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-support", @@ -5826,8 +5692,9 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", + "regex", "sp-core-hashing", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -5835,10 +5702,10 @@ name = "frame-support-procedural-tools" version = "4.0.0-dev" dependencies = [ "frame-support-procedural-tools-derive", - "proc-macro-crate", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -5847,7 +5714,7 @@ version = "3.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -5918,6 +5785,7 @@ version = "4.0.0-dev" dependencies = [ "cfg-if", "criterion 0.4.0", + "docify", "frame-support", "log", "parity-scale-codec", @@ -5987,9 +5855,9 @@ dependencies = [ [[package]] name = "fs4" -version = "0.6.6" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eeb4ed9e12f43b7fa0baae3f9cdda28352770132ef2e09a23760c29cae8bd47" +checksum = "29f9df8a11882c4e3335eb2d18a0137c505d9ca927470b0cac9c6f0ae07d28f7" dependencies = [ "rustix 0.38.21", "windows-sys 0.48.0", @@ -6079,7 +5947,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -6090,7 +5958,7 @@ checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" dependencies = [ "futures-io", "rustls 0.20.8", - "webpki 0.22.0", + "webpki", ] [[package]] @@ -6203,6 +6071,16 @@ dependencies = [ "wasi 0.11.0+wasi-snapshot-preview1", ] +[[package]] +name = "getrandom_or_panic" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea1015b5a70616b688dc230cfe50c8af89d972cb132d5a622814d29773b10b9" +dependencies = [ + "rand 0.8.5", + "rand_core 0.6.4", +] + [[package]] name = "ghash" version = "0.4.4" @@ -6260,7 +6138,7 @@ dependencies = [ ] [[package]] -name = "glutton-runtime" +name = "glutton-westend-runtime" version = "1.0.0" dependencies = [ "cumulus-pallet-aura-ext", @@ -6304,24 +6182,13 @@ dependencies = [ "substrate-wasm-builder", ] -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.0", + "ff", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -6633,12 +6500,6 @@ dependencies = [ "cc", ] -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - [[package]] name = "idna" version = "0.2.3" @@ -6836,25 +6697,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "interceptor" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e8a11ae2da61704edada656798b61c94b35ecac2c58eb955156987d5e6be90b" -dependencies = [ - "async-trait", - "bytes", - "log", - "rand 0.8.5", - "rtcp", - "rtp", - "thiserror", - "tokio", - "waitgroup", - "webrtc-srtp", - "webrtc-util", -] - [[package]] name = "io-lifetimes" version = "1.0.11" @@ -7059,7 +6901,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44e8ab85614a08792b9bff6c8feee23be78c98d0182d4c622c05256ab553892a" dependencies = [ "heck", - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -7120,8 +6962,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" dependencies = [ "cfg-if", - "ecdsa 0.16.8", - "elliptic-curve 0.13.5", + "ecdsa", + "elliptic-curve", "once_cell", "sha2 0.10.7", ] @@ -7183,13 +7025,13 @@ dependencies = [ "pallet-child-bounties", "pallet-collective", "pallet-contracts", - "pallet-contracts-primitives", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-democracy", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", + "pallet-example-tasks", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", @@ -7224,6 +7066,7 @@ dependencies = [ "pallet-scheduler", "pallet-session", "pallet-session-benchmarking", + "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", @@ -7375,6 +7218,17 @@ dependencies = [ "rle-decode-fast", ] +[[package]] +name = "libfuzzer-sys" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a96cfd5557eb82f2b83fed4955246c988d331975a002961b07c81584d107e7f7" +dependencies = [ + "arbitrary", + "cc", + "once_cell", +] + [[package]] name = "libloading" version = "0.7.4" @@ -7393,9 +7247,9 @@ checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "libp2p" -version = "0.51.3" +version = "0.51.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f210d259724eae82005b5c48078619b7745edb7b76de370b03f8ba59ea103097" +checksum = "f35eae38201a993ece6bdc823292d6abd1bffed1c4d0f4a3517d2bd8e1d917fe" dependencies = [ "bytes", "futures", @@ -7418,7 +7272,6 @@ dependencies = [ "libp2p-swarm", "libp2p-tcp", "libp2p-wasm-ext", - "libp2p-webrtc", "libp2p-websocket", "libp2p-yamux", "multiaddr", @@ -7463,7 +7316,7 @@ dependencies = [ "libp2p-identity", "log", "multiaddr", - "multihash", + "multihash 0.17.0", "multistream-select", "once_cell", "parking_lot 0.12.1", @@ -7523,7 +7376,7 @@ dependencies = [ "ed25519-dalek", "log", "multiaddr", - "multihash", + "multihash 0.17.0", "quick-protobuf", "rand 0.8.5", "sha2 0.10.7", @@ -7730,12 +7583,12 @@ dependencies = [ "futures-rustls", "libp2p-core", "libp2p-identity", - "rcgen 0.10.0", + "rcgen", "ring 0.16.20", "rustls 0.20.8", "thiserror", - "webpki 0.22.0", - "x509-parser 0.14.0", + "webpki", + "x509-parser", "yasna", ] @@ -7753,37 +7606,6 @@ dependencies = [ "wasm-bindgen-futures", ] -[[package]] -name = "libp2p-webrtc" -version = "0.4.0-alpha.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dba48592edbc2f60b4bc7c10d65445b0c3964c07df26fdf493b6880d33be36f8" -dependencies = [ - "async-trait", - "asynchronous-codec", - "bytes", - "futures", - "futures-timer", - "hex", - "if-watch", - "libp2p-core", - "libp2p-identity", - "libp2p-noise", - "log", - "multihash", - "quick-protobuf", - "quick-protobuf-codec", - "rand 0.8.5", - "rcgen 0.9.3", - "serde", - "stun", - "thiserror", - "tinytemplate", - "tokio", - "tokio-util", - "webrtc", -] - [[package]] name = "libp2p-websocket" version = "0.41.0" @@ -8062,7 +7884,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -8076,7 +7898,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -8087,7 +7909,7 @@ checksum = "9ea73aa640dc01d62a590d48c0c3521ed739d53b27f919b25c3551e233481654" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -8098,7 +7920,7 @@ checksum = "ef9d79ae96aaba821963320eb2b6e34d17df1e5a83d8a1985c29cc5be59577b3" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -8138,20 +7960,11 @@ dependencies = [ "rawpointer", ] -[[package]] -name = "md-5" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" -dependencies = [ - "digest 0.10.7", -] - [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memfd" @@ -8171,15 +7984,6 @@ dependencies = [ "libc", ] -[[package]] -name = "memoffset" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" -dependencies = [ - "autocfg", -] - [[package]] name = "memoffset" version = "0.7.1" @@ -8251,6 +8055,20 @@ dependencies = [ "thrift", ] +[[package]] +name = "milagro_bls" +version = "1.5.0" +source = "git+https://github.com/snowfork/milagro_bls?rev=a6d66e4eb89015e352fb1c9f7b661ecdbb5b2176#a6d66e4eb89015e352fb1c9f7b661ecdbb5b2176" +dependencies = [ + "amcl", + "hex", + "lazy_static", + "parity-scale-codec", + "rand 0.8.5", + "scale-info", + "zeroize", +] + [[package]] name = "mime" version = "0.3.17" @@ -8267,7 +8085,7 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" name = "minimal-node" version = "4.0.0-dev" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "frame", "futures", "futures-timer", @@ -8346,7 +8164,7 @@ dependencies = [ "bitflags 1.3.2", "blake2 0.10.6", "c2-chacha", - "curve25519-dalek 4.0.0", + "curve25519-dalek 4.1.1", "either", "hashlink", "lioness", @@ -8437,7 +8255,7 @@ dependencies = [ "data-encoding", "log", "multibase", - "multihash", + "multihash 0.17.0", "percent-encoding", "serde", "static_assertions", @@ -8467,19 +8285,87 @@ dependencies = [ "blake3", "core2", "digest 0.10.7", - "multihash-derive", + "multihash-derive 0.8.0", "sha2 0.10.7", "sha3", "unsigned-varint", ] +[[package]] +name = "multihash" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfd8a792c1694c6da4f68db0a9d707c72bd260994da179e6030a5dcee00bb815" +dependencies = [ + "core2", + "digest 0.10.7", + "multihash-derive 0.8.0", + "sha2 0.10.7", + "unsigned-varint", +] + +[[package]] +name = "multihash" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" +dependencies = [ + "core2", + "unsigned-varint", +] + +[[package]] +name = "multihash-codetable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6d815ecb3c8238d00647f8630ede7060a642c9f704761cd6082cb4028af6935" +dependencies = [ + "blake2b_simd", + "blake2s_simd", + "blake3", + "core2", + "digest 0.10.7", + "multihash-derive 0.9.0", + "ripemd", + "serde", + "sha1", + "sha2 0.10.7", + "sha3", + "strobe-rs", +] + [[package]] name = "multihash-derive" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + +[[package]] +name = "multihash-derive" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "890e72cb7396cb99ed98c1246a97b243cc16394470d94e0bc8b0c2c11d84290e" +dependencies = [ + "core2", + "multihash 0.19.1", + "multihash-derive-impl", +] + +[[package]] +name = "multihash-derive-impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38685e08adb338659871ecfc6ee47ba9b22dcc8abcf6975d379cc49145c3040" +dependencies = [ + "proc-macro-crate 1.3.1", "proc-macro-error", "proc-macro2", "quote", @@ -8534,15 +8420,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "names" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7d66043b25d4a6cccb23619d10c19c25304b355a7dccd4a8e11423dd2382146" -dependencies = [ - "rand 0.8.5", -] - [[package]] name = "names" version = "0.14.0" @@ -8634,7 +8511,6 @@ dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", - "memoffset 0.6.5", ] [[package]] @@ -8651,6 +8527,17 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "nix" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +dependencies = [ + "bitflags 2.4.0", + "cfg-if", + "libc", +] + [[package]] name = "no-std-net" version = "0.6.0" @@ -8662,7 +8549,7 @@ name = "node-bench" version = "0.9.0-dev" dependencies = [ "array-bytes 6.1.0", - "clap 4.4.6", + "clap 4.4.11", "derive_more", "fs_extra", "futures", @@ -8737,7 +8624,7 @@ dependencies = [ name = "node-runtime-generate-bags" version = "3.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "generate-bags", "kitchensink-runtime", ] @@ -8746,7 +8633,7 @@ dependencies = [ name = "node-template" version = "4.0.0-dev" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "frame-benchmarking", "frame-benchmarking-cli", "frame-system", @@ -8790,14 +8677,14 @@ dependencies = [ name = "node-template-release" version = "3.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "flate2", "fs_extra", "glob", "itertools 0.10.5", "tar", "tempfile", - "toml_edit", + "toml_edit 0.19.15", ] [[package]] @@ -8853,6 +8740,7 @@ dependencies = [ "pallet-asset-conversion-tx-payment", "pallet-asset-tx-payment", "pallet-assets", + "pallet-skip-feeless-payment", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -8870,7 +8758,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-timestamp", - "staging-node-executor", + "staging-node-cli", "substrate-test-client", "tempfile", ] @@ -8982,9 +8870,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", "libm", @@ -9027,22 +8915,13 @@ dependencies = [ "memchr", ] -[[package]] -name = "oid-registry" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38e20717fa0541f39bd146692035c37bedfa532b3e5071b35761082407546b2a" -dependencies = [ - "asn1-rs 0.3.1", -] - [[package]] name = "oid-registry" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" dependencies = [ - "asn1-rs 0.5.2", + "asn1-rs", ] [[package]] @@ -9111,7 +8990,7 @@ dependencies = [ "itertools 0.11.0", "layout-rs", "petgraph", - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -9126,6 +9005,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "os_pipe" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ae859aa07428ca9a929b936690f8b12dc5f11dd8c6992a18ca93919f28bc177" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "os_str_bytes" version = "6.5.1" @@ -9138,28 +9027,6 @@ version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" -[[package]] -name = "p256" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" -dependencies = [ - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.7", -] - -[[package]] -name = "p384" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" -dependencies = [ - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.7", -] - [[package]] name = "pallet-alliance" version = "4.0.0-dev" @@ -9543,7 +9410,6 @@ dependencies = [ "pallet-balances", "parity-scale-codec", "scale-info", - "sp-core", "sp-io", "sp-runtime", "sp-std 8.0.0", @@ -9588,7 +9454,6 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-arithmetic", - "sp-core", "sp-io", "sp-runtime", "sp-std 8.0.0", @@ -9700,11 +9565,13 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "log", + "pallet-assets", "pallet-balances", "pallet-contracts-fixtures", - "pallet-contracts-primitives", "pallet-contracts-proc-macro", + "pallet-contracts-uapi", "pallet-insecure-randomness-collective-flip", + "pallet-message-queue", "pallet-proxy", "pallet-timestamp", "pallet-utility", @@ -9721,6 +9588,9 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-std 8.0.0", + "sp-tracing 10.0.0", + "staging-xcm", + "staging-xcm-builder", "wasm-instrument 0.4.0", "wasmi", "wat", @@ -9730,21 +9600,57 @@ dependencies = [ name = "pallet-contracts-fixtures" version = "1.0.0" dependencies = [ + "anyhow", + "cfg-if", "frame-system", + "parity-wasm", "sp-runtime", + "tempfile", + "toml 0.8.2", + "twox-hash", "wat", ] [[package]] -name = "pallet-contracts-primitives" -version = "24.0.0" +name = "pallet-contracts-fixtures-common" +version = "1.0.0" + +[[package]] +name = "pallet-contracts-mock-network" +version = "1.0.0" dependencies = [ - "bitflags 1.3.2", + "assert_matches", + "frame-support", + "frame-system", + "pallet-assets", + "pallet-balances", + "pallet-contracts", + "pallet-contracts-fixtures", + "pallet-contracts-proc-macro", + "pallet-contracts-uapi", + "pallet-insecure-randomness-collective-flip", + "pallet-message-queue", + "pallet-proxy", + "pallet-timestamp", + "pallet-utility", + "pallet-xcm", "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-parachains", + "pretty_assertions", "scale-info", + "sp-api", + "sp-core", + "sp-io", + "sp-keystore", "sp-runtime", "sp-std 8.0.0", - "sp-weights", + "sp-tracing 10.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "xcm-simulator", ] [[package]] @@ -9753,7 +9659,17 @@ version = "4.0.0-dev" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", +] + +[[package]] +name = "pallet-contracts-uapi" +version = "4.0.0-dev" +dependencies = [ + "bitflags 1.3.2", + "parity-scale-codec", + "paste", + "scale-info", ] [[package]] @@ -10002,6 +9918,22 @@ dependencies = [ "sp-std 8.0.0", ] +[[package]] +name = "pallet-example-tasks" +version = "1.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 8.0.0", +] + [[package]] name = "pallet-examples" version = "4.0.0-dev" @@ -10013,6 +9945,7 @@ dependencies = [ "pallet-example-kitchensink", "pallet-example-offchain-worker", "pallet-example-split", + "pallet-example-tasks", ] [[package]] @@ -10194,6 +10127,7 @@ dependencies = [ name = "pallet-message-queue" version = "7.0.0-dev" dependencies = [ + "environmental", "frame-benchmarking", "frame-support", "frame-system", @@ -10312,6 +10246,7 @@ dependencies = [ "pallet-nfts", "parity-scale-codec", "sp-api", + "sp-std 8.0.0", ] [[package]] @@ -10684,6 +10619,7 @@ dependencies = [ name = "pallet-safe-mode" version = "4.0.0-dev" dependencies = [ + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10716,6 +10652,24 @@ dependencies = [ "sp-std 8.0.0", ] +[[package]] +name = "pallet-sassafras" +version = "0.3.5-dev" +dependencies = [ + "array-bytes 6.1.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-consensus-sassafras", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 8.0.0", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" @@ -10795,6 +10749,18 @@ dependencies = [ "sp-std 8.0.0", ] +[[package]] +name = "pallet-skip-feeless-payment" +version = "1.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-runtime", + "sp-std 8.0.0", +] + [[package]] name = "pallet-society" version = "4.0.0-dev" @@ -10849,11 +10815,11 @@ dependencies = [ name = "pallet-staking-reward-curve" version = "4.0.0-dev" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", "sp-runtime", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -11075,6 +11041,7 @@ dependencies = [ name = "pallet-tx-pause" version = "4.0.0-dev" dependencies = [ + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -11169,6 +11136,7 @@ dependencies = [ "frame-support", "frame-system", "log", + "pallet-assets", "pallet-balances", "parity-scale-codec", "polkadot-parachain-primitives", @@ -11209,6 +11177,31 @@ dependencies = [ "staging-xcm-executor", ] +[[package]] +name = "pallet-xcm-bridge-hub" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-messages", + "bp-runtime", + "bp-xcm-bridge-hub", + "bridge-runtime-common", + "frame-support", + "frame-system", + "log", + "pallet-balances", + "pallet-bridge-messages", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 8.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", +] + [[package]] name = "pallet-xcm-bridge-hub-router" version = "0.1.0" @@ -11232,7 +11225,7 @@ dependencies = [ name = "parachain-template-node" version = "0.1.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "color-print", "cumulus-client-cli", "cumulus-client-collator", @@ -11411,11 +11404,17 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "parity-bytes" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b56e3a2420138bdb970f84dfb9c774aea80fa0e7371549eedec0d80c209c67" + [[package]] name = "parity-db" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78f19d20a0d2cc52327a88d131fa1c4ea81ea4a04714aedcfeca2dd410049cf8" +checksum = "59e9ab494af9e6e813c72170f0d3c1de1500990d62c97cc05cc7576f91aa402f" dependencies = [ "blake2 0.10.6", "crc32fast", @@ -11433,9 +11432,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.4" +version = "3.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" +checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" dependencies = [ "arrayvec 0.7.4", "bitvec", @@ -11448,11 +11447,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.4" +version = "3.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" +checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -11598,15 +11597,6 @@ dependencies = [ "base64 0.13.1", ] -[[package]] -name = "pem-rfc7468" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac" -dependencies = [ - "base64ct", -] - [[package]] name = "penpal-emulated-chain" version = "0.0.0" @@ -11616,15 +11606,18 @@ dependencies = [ "frame-support", "parachains-common", "penpal-runtime", + "rococo-emulated-chain", "serde_json", "sp-core", "sp-runtime", + "westend-emulated-chain", ] [[package]] name = "penpal-runtime" version = "0.9.27" dependencies = [ + "assets-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", @@ -11662,6 +11655,7 @@ dependencies = [ "polkadot-runtime-common", "scale-info", "smallvec", + "snowbridge-rococo-common", "sp-api", "sp-block-builder", "sp-consensus-aura", @@ -11718,7 +11712,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -11759,7 +11753,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -11780,24 +11774,14 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der 0.6.1", - "spki 0.6.0", -] - [[package]] name = "pkcs8" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.8", - "spki 0.7.2", + "der", + "spki", ] [[package]] @@ -11842,7 +11826,7 @@ dependencies = [ [[package]] name = "polkadot" -version = "1.1.0" +version = "1.5.0" dependencies = [ "assert_cmd", "color-eyre", @@ -11883,8 +11867,8 @@ dependencies = [ "polkadot-primitives-test-helpers", "rand 0.8.5", "rand_chacha 0.3.1", - "rand_core 0.5.1", - "schnorrkel 0.9.1", + "rand_core 0.6.4", + "schnorrkel 0.11.4", "sp-authority-discovery", "sp-core", "tracing-gum", @@ -11973,6 +11957,7 @@ dependencies = [ "sp-core", "sp-keyring", "thiserror", + "tokio", "tracing-gum", ] @@ -11980,7 +11965,8 @@ dependencies = [ name = "polkadot-cli" version = "1.1.0" dependencies = [ - "clap 4.4.6", + "cfg-if", + "clap 4.4.11", "frame-benchmarking-cli", "futures", "log", @@ -12106,6 +12092,7 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-primitives", + "quickcheck", "rand 0.8.5", "rand_chacha 0.3.1", "sc-network", @@ -12185,7 +12172,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "log", - "merlin 2.0.1", + "merlin 3.0.0", "parity-scale-codec", "parking_lot 0.12.1", "polkadot-node-jaeger", @@ -12198,10 +12185,10 @@ dependencies = [ "polkadot-primitives-test-helpers", "rand 0.8.5", "rand_chacha 0.3.1", - "rand_core 0.5.1", + "rand_core 0.6.4", "sc-keystore", "schnellru", - "schnorrkel 0.9.1", + "schnorrkel 0.11.4", "sp-application-crypto", "sp-consensus", "sp-consensus-babe", @@ -12322,6 +12309,7 @@ dependencies = [ "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-types", "polkadot-primitives", "sc-client-api", "sc-consensus-babe", @@ -12446,6 +12434,7 @@ version = "1.0.0" dependencies = [ "always-assert", "assert_matches", + "blake3", "cfg-if", "criterion 0.4.0", "futures", @@ -12462,12 +12451,14 @@ dependencies = [ "polkadot-node-core-pvf-prepare-worker", "polkadot-node-metrics", "polkadot-node-primitives", + "polkadot-node-subsystem", "polkadot-parachain-primitives", "polkadot-primitives", "procfs", "rand 0.8.5", "rococo-runtime", "rusty-fork", + "sc-sysinfo", "slotmap", "sp-core", "sp-maybe-compressed-blob", @@ -12475,6 +12466,7 @@ dependencies = [ "tempfile", "test-parachain-adder", "test-parachain-halt", + "thiserror", "tokio", "tracing-gum", ] @@ -12523,6 +12515,7 @@ dependencies = [ "sp-externalities 0.19.0", "sp-io", "sp-tracing 10.0.0", + "substrate-build-script-utils", "tempfile", "thiserror", "tracing-gum", @@ -12533,6 +12526,9 @@ name = "polkadot-node-core-pvf-execute-worker" version = "1.0.0" dependencies = [ "cpu-time", + "libc", + "nix 0.27.1", + "os_pipe", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-parachain-primitives", @@ -12544,9 +12540,12 @@ dependencies = [ name = "polkadot-node-core-pvf-prepare-worker" version = "1.0.0" dependencies = [ + "blake3", "cfg-if", "criterion 0.4.0", "libc", + "nix 0.27.1", + "os_pipe", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-primitives", @@ -12660,7 +12659,7 @@ dependencies = [ "polkadot-erasure-coding", "polkadot-parachain-primitives", "polkadot-primitives", - "schnorrkel 0.9.1", + "schnorrkel 0.11.4", "serde", "sp-application-crypto", "sp-consensus-babe", @@ -12688,6 +12687,8 @@ dependencies = [ "async-trait", "futures", "parking_lot 0.12.1", + "polkadot-erasure-coding", + "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-primitives", @@ -12720,7 +12721,9 @@ dependencies = [ "smallvec", "sp-api", "sp-authority-discovery", + "sp-blockchain", "sp-consensus-babe", + "sp-runtime", "substrate-prometheus-endpoint", "thiserror", ] @@ -12796,22 +12799,20 @@ dependencies = [ [[package]] name = "polkadot-parachain-bin" -version = "1.1.0" +version = "1.5.0" dependencies = [ "assert_cmd", - "asset-hub-kusama-runtime", - "asset-hub-polkadot-runtime", "asset-hub-rococo-runtime", "asset-hub-westend-runtime", "async-trait", - "bridge-hub-kusama-runtime", - "bridge-hub-polkadot-runtime", "bridge-hub-rococo-runtime", "bridge-hub-westend-runtime", - "clap 4.4.6", - "collectives-polkadot-runtime", + "clap 4.4.11", + "collectives-westend-runtime", "color-print", "contracts-rococo-runtime", + "coretime-rococo-runtime", + "coretime-westend-runtime", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", @@ -12825,13 +12826,18 @@ dependencies = [ "cumulus-relay-chain-interface", "frame-benchmarking", "frame-benchmarking-cli", + "frame-support", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "futures", - "glutton-runtime", + "glutton-westend-runtime", "hex-literal", "jsonrpsee", "log", "nix 0.26.2", + "pallet-transaction-payment", "pallet-transaction-payment-rpc", + "pallet-transaction-payment-rpc-runtime-api", "parachains-common", "parity-scale-codec", "penpal-runtime", @@ -12863,14 +12869,18 @@ dependencies = [ "sp-blockchain", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", + "sp-inherents", "sp-io", "sp-keystore", "sp-offchain", "sp-runtime", "sp-session", + "sp-std 8.0.0", "sp-timestamp", "sp-tracing 10.0.0", "sp-transaction-pool", + "sp-version", "staging-xcm", "substrate-build-script-utils", "substrate-frame-rpc-system", @@ -12983,8 +12993,10 @@ dependencies = [ "pallet-authorship", "pallet-babe", "pallet-balances", + "pallet-broker", "pallet-election-provider-multi-phase", "pallet-fast-unstake", + "pallet-identity", "pallet-session", "pallet-staking", "pallet-staking-reward-fn", @@ -13052,6 +13064,7 @@ dependencies = [ "pallet-authorship", "pallet-babe", "pallet-balances", + "pallet-broker", "pallet-message-queue", "pallet-session", "pallet-staking", @@ -13072,6 +13085,7 @@ dependencies = [ "serde_json", "sp-api", "sp-application-crypto", + "sp-arithmetic", "sp-core", "sp-inherents", "sp-io", @@ -13088,6 +13102,45 @@ dependencies = [ "thousands", ] +[[package]] +name = "polkadot-sdk-docs" +version = "0.0.1" +dependencies = [ + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "docify", + "frame", + "kitchensink-runtime", + "pallet-aura", + "pallet-default-config-example", + "pallet-examples", + "pallet-timestamp", + "parity-scale-codec", + "sc-cli", + "sc-client-db", + "sc-consensus-aura", + "sc-consensus-babe", + "sc-consensus-beefy", + "sc-consensus-grandpa", + "sc-consensus-manual-seal", + "sc-consensus-pow", + "sc-network", + "sc-rpc", + "sc-rpc-api", + "scale-info", + "simple-mermaid", + "sp-api", + "sp-core", + "sp-io", + "sp-keyring", + "sp-runtime", + "staging-chain-spec-builder", + "staging-node-cli", + "staging-parachain-info", + "subkey", + "substrate-wasm-builder", +] + [[package]] name = "polkadot-service" version = "1.0.0" @@ -13114,6 +13167,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "parity-db", "parity-scale-codec", + "parking_lot 0.12.1", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", @@ -13229,7 +13283,6 @@ dependencies = [ "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", - "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", @@ -13257,48 +13310,96 @@ dependencies = [ ] [[package]] -name = "polkadot-test-client" -version = "1.0.0" -dependencies = [ - "frame-benchmarking", - "futures", - "parity-scale-codec", - "polkadot-node-subsystem", - "polkadot-primitives", - "polkadot-test-runtime", - "polkadot-test-service", - "sc-block-builder", - "sc-consensus", - "sc-offchain", - "sc-service", - "sp-api", - "sp-blockchain", - "sp-consensus", - "sp-consensus-babe", - "sp-core", - "sp-inherents", - "sp-io", - "sp-keyring", - "sp-runtime", - "sp-state-machine", - "sp-timestamp", - "substrate-test-client", -] - -[[package]] -name = "polkadot-test-malus" +name = "polkadot-subsystem-bench" version = "1.0.0" dependencies = [ "assert_matches", "async-trait", - "clap 4.4.6", + "clap 4.4.11", + "clap-num", "color-eyre", + "colored", + "env_logger 0.9.3", "futures", "futures-timer", - "polkadot-cli", - "polkadot-erasure-coding", - "polkadot-node-core-backing", - "polkadot-node-core-candidate-validation", + "itertools 0.11.0", + "log", + "orchestra", + "parity-scale-codec", + "paste", + "polkadot-availability-recovery", + "polkadot-erasure-coding", + "polkadot-node-metrics", + "polkadot-node-network-protocol", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-types", + "polkadot-node-subsystem-util", + "polkadot-overseer", + "polkadot-primitives", + "polkadot-primitives-test-helpers", + "prometheus", + "pyroscope", + "pyroscope_pprofrs", + "rand 0.8.5", + "sc-keystore", + "sc-network", + "sc-service", + "serde", + "serde_yaml", + "sp-application-crypto", + "sp-core", + "sp-keyring", + "sp-keystore", + "substrate-prometheus-endpoint", + "tokio", + "tracing-gum", +] + +[[package]] +name = "polkadot-test-client" +version = "1.0.0" +dependencies = [ + "frame-benchmarking", + "futures", + "parity-scale-codec", + "polkadot-node-subsystem", + "polkadot-primitives", + "polkadot-test-runtime", + "polkadot-test-service", + "sc-block-builder", + "sc-consensus", + "sc-offchain", + "sc-service", + "sp-api", + "sp-blockchain", + "sp-consensus", + "sp-consensus-babe", + "sp-core", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-state-machine", + "sp-timestamp", + "substrate-test-client", +] + +[[package]] +name = "polkadot-test-malus" +version = "1.0.0" +dependencies = [ + "assert_matches", + "async-trait", + "clap 4.4.11", + "color-eyre", + "futures", + "futures-timer", + "polkadot-cli", + "polkadot-erasure-coding", + "polkadot-node-core-backing", + "polkadot-node-core-candidate-validation", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", @@ -13438,7 +13539,7 @@ dependencies = [ name = "polkadot-voter-bags" version = "1.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "generate-bags", "sp-io", "westend-runtime", @@ -13616,7 +13717,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -13657,7 +13758,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit", + "toml_edit 0.19.15", +] + +[[package]] +name = "proc-macro-crate" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97dc5fea232fc28d2f597b37c4876b348a40e33f3b02cc975c8d006d78d94b1a" +dependencies = [ + "toml_datetime", + "toml_edit 0.20.2", ] [[package]] @@ -13698,7 +13809,7 @@ checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -13770,7 +13881,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -13785,6 +13896,26 @@ dependencies = [ "regex", ] +[[package]] +name = "proptest" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.4.0", + "lazy_static", + "num-traits", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_xorshift", + "regex-syntax 0.8.2", + "rusty-fork", + "tempfile", + "unarray", +] + [[package]] name = "prost" version = "0.11.9" @@ -13858,7 +13989,7 @@ dependencies = [ "libc", "libflate", "log", - "names 0.14.0", + "names", "prost", "reqwest", "thiserror", @@ -13912,6 +14043,8 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ + "env_logger 0.8.4", + "log", "rand 0.8.5", ] @@ -13941,7 +14074,7 @@ dependencies = [ "thiserror", "tinyvec", "tracing", - "webpki 0.22.0", + "webpki", ] [[package]] @@ -14049,6 +14182,15 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "rawpointer" version = "0.2.1" @@ -14077,19 +14219,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "rcgen" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" -dependencies = [ - "pem", - "ring 0.16.20", - "time 0.3.27", - "x509-parser 0.13.2", - "yasna", -] - [[package]] name = "rcgen" version = "0.10.0" @@ -14098,7 +14227,7 @@ checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", "ring 0.16.20", - "time 0.3.27", + "time", "yasna", ] @@ -14120,6 +14249,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.3" @@ -14161,7 +14299,7 @@ checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -14178,14 +14316,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.3" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.6", - "regex-syntax 0.7.4", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", ] [[package]] @@ -14202,10 +14340,16 @@ name = "regex-automata" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" + +[[package]] +name = "regex-automata" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.8.2", ] [[package]] @@ -14216,15 +14360,15 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "remote-ext-tests-bags-list" version = "1.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "frame-system", "log", "pallet-bags-list-remote-tests", @@ -14284,17 +14428,6 @@ dependencies = [ "quick-error", ] -[[package]] -name = "rfc6979" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint 0.4.9", - "hmac 0.12.1", - "zeroize", -] - [[package]] name = "rfc6979" version = "0.4.0" @@ -14308,13 +14441,13 @@ dependencies = [ [[package]] name = "ring" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#edd1e90b847e560bf60fc2e8712235ccfa11a9a9" +source = "git+https://github.com/w3f/ring-proof#b273d33f9981e2bb3375ab45faeb537f7ee35224" dependencies = [ "ark-ec", - "ark-ff", + "ark-ff 0.4.2", "ark-poly", - "ark-serialize", - "ark-std", + "ark-serialize 0.4.2", + "ark-std 0.4.0", "blake2 0.10.6", "common", "fflonk", @@ -14336,6 +14469,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "rle-decode-fast" version = "1.0.3" @@ -14391,6 +14533,7 @@ dependencies = [ "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", + "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-utility", "frame-benchmarking", @@ -14546,6 +14689,7 @@ dependencies = [ "sp-runtime", "sp-weights", "staging-xcm", + "staging-xcm-builder", ] [[package]] @@ -14560,16 +14704,17 @@ dependencies = [ ] [[package]] -name = "rococo-wococo-system-emulated-network" +name = "rococo-westend-system-emulated-network" version = "0.0.0" dependencies = [ "asset-hub-rococo-emulated-chain", - "asset-hub-wococo-emulated-chain", + "asset-hub-westend-emulated-chain", "bridge-hub-rococo-emulated-chain", - "bridge-hub-wococo-emulated-chain", + "bridge-hub-westend-emulated-chain", "emulated-integration-tests-common", + "penpal-emulated-chain", "rococo-emulated-chain", - "wococo-emulated-chain", + "westend-emulated-chain", ] [[package]] @@ -14583,17 +14728,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "rtcp" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1919efd6d4a6a85d13388f9487549bb8e359f17198cc03ffd72f79b553873691" -dependencies = [ - "bytes", - "thiserror", - "webrtc-util", -] - [[package]] name = "rtnetlink" version = "0.10.1" @@ -14620,19 +14754,35 @@ dependencies = [ ] [[package]] -name = "rtp" -version = "0.6.8" +name = "ruint" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a095411ff00eed7b12e4c6a118ba984d113e1079582570d56a5ee723f11f80" +checksum = "608a5726529f2f0ef81b8fde9873c4bb829d6b5b5ca6be4d97345ddf0749c825" dependencies = [ - "async-trait", + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", "bytes", + "fastrlp", + "num-bigint", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", "rand 0.8.5", + "rlp", + "ruint-macro", "serde", - "thiserror", - "webrtc-util", + "valuable", + "zeroize", ] +[[package]] +name = "ruint-macro" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e666a5496a0b2186dbcd0ff6106e29e093c15591bde62c20d3842007c6978a09" + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -14660,6 +14810,15 @@ dependencies = [ "semver 0.9.0", ] +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.0" @@ -14719,19 +14878,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "rustls" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" -dependencies = [ - "base64 0.13.1", - "log", - "ring 0.16.20", - "sct 0.6.1", - "webpki 0.21.4", -] - [[package]] name = "rustls" version = "0.20.8" @@ -14740,8 +14886,8 @@ checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring 0.16.20", - "sct 0.7.0", - "webpki 0.22.0", + "sct", + "webpki", ] [[package]] @@ -14753,7 +14899,7 @@ dependencies = [ "log", "ring 0.16.20", "rustls-webpki 0.101.4", - "sct 0.7.0", + "sct", ] [[package]] @@ -14890,7 +15036,8 @@ dependencies = [ "ip_network", "libp2p", "log", - "multihash", + "multihash 0.18.1", + "multihash-codetable", "parity-scale-codec", "prost", "prost-build", @@ -14947,6 +15094,7 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", + "sp-trie", "substrate-test-runtime-client", ] @@ -14982,10 +15130,10 @@ dependencies = [ name = "sc-chain-spec-derive" version = "4.0.0-dev" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -14995,14 +15143,14 @@ dependencies = [ "array-bytes 6.1.0", "bip39", "chrono", - "clap 4.4.6", + "clap 4.4.11", "fdlimit", "futures", "futures-timer", "itertools 0.10.5", "libp2p-identity", "log", - "names 0.13.0", + "names", "parity-scale-codec", "rand 0.8.5", "regex", @@ -15567,6 +15715,7 @@ dependencies = [ "array-bytes 4.2.0", "arrayvec 0.7.4", "blake2 0.10.6", + "bytes", "futures", "futures-timer", "libp2p-identity", @@ -15632,6 +15781,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", + "tokio-stream", "tokio-test", "tokio-util", "unsigned-varint", @@ -15687,10 +15837,12 @@ name = "sc-network-gossip" version = "0.10.0-dev" dependencies = [ "ahash 0.8.3", + "async-trait", "futures", "futures-timer", "libp2p", "log", + "parity-scale-codec", "quickcheck", "sc-network", "sc-network-common", @@ -15977,7 +16129,9 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-externalities 0.19.0", "sp-maybe-compressed-blob", + "sp-rpc", "sp-runtime", "sp-version", "substrate-test-runtime", @@ -16133,7 +16287,7 @@ dependencies = [ name = "sc-storage-monitor" version = "0.1.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "fs4", "log", "sc-client-db", @@ -16203,12 +16357,13 @@ name = "sc-tracing" version = "4.0.0-dev" dependencies = [ "ansi_term", - "atty", "chrono", "criterion 0.4.0", + "is-terminal", "lazy_static", "libc", "log", + "parity-scale-codec", "parking_lot 0.12.1", "regex", "rustc-hash", @@ -16231,10 +16386,10 @@ dependencies = [ name = "sc-tracing-proc-macro" version = "4.0.0-dev" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -16321,7 +16476,7 @@ version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -16336,6 +16491,30 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "schemars" +version = "0.8.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "763f8cd0d4c71ed8389c90cb8100cba87e763bd01a8e614d4f0af97bcd50a161" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0f696e21e10fa546b7ffb1c9672c6de8fbc7a81acf59524386d8639bf12737" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 1.0.109", +] + [[package]] name = "schnellru" version = "0.2.1" @@ -16381,6 +16560,31 @@ dependencies = [ "zeroize", ] +[[package]] +name = "schnorrkel" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de18f6d8ba0aad7045f5feae07ec29899c1112584a38509a84ad7b04451eaa0" +dependencies = [ + "aead 0.5.2", + "arrayref", + "arrayvec 0.7.4", + "curve25519-dalek 4.1.1", + "getrandom_or_panic", + "merlin 3.0.0", + "rand_core 0.6.4", + "serde_bytes", + "sha2 0.10.7", + "subtle 2.4.1", + "zeroize", +] + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.2.0" @@ -16395,87 +16599,51 @@ checksum = "a3cf7c11c38cb994f3d40e8a8cde3bbd1f72a435e4c49e85d6553d8312306152" [[package]] name = "sct" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring 0.16.20", "untrusted", ] [[package]] -name = "sct" -version = "0.7.0" +name = "sec1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "ring 0.16.20", - "untrusted", + "base16ct", + "der", + "generic-array 0.14.7", + "pkcs8", + "subtle 2.4.1", + "zeroize", ] [[package]] -name = "sdp" -version = "0.5.3" +name = "seccompiler" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d22a5ef407871893fd72b4562ee15e4742269b173959db4b8df6f538c414e13" -dependencies = [ - "rand 0.8.5", - "substring", - "thiserror", - "url", -] - -[[package]] -name = "sec1" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" -dependencies = [ - "base16ct 0.1.1", - "der 0.6.1", - "generic-array 0.14.7", - "pkcs8 0.9.0", - "subtle 2.4.1", - "zeroize", -] - -[[package]] -name = "sec1" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" -dependencies = [ - "base16ct 0.2.0", - "der 0.7.8", - "generic-array 0.14.7", - "pkcs8 0.10.2", - "subtle 2.4.1", - "zeroize", -] - -[[package]] -name = "seccompiler" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345a3e4dddf721a478089d4697b83c6c0a8f5bf16086f6c13397e4534eb6e2e5" +checksum = "345a3e4dddf721a478089d4697b83c6c0a8f5bf16086f6c13397e4534eb6e2e5" dependencies = [ "libc", ] [[package]] name = "secp256k1" -version = "0.24.3" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62" +checksum = "2acea373acb8c21ecb5a23741452acd2593ed44ee3d343e72baaa143bc89d0d5" dependencies = [ "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.6.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" +checksum = "09e67c467c38fd24bd5499dc9a18183b31575c12ee549197e3e20d57aa4fe3b7" dependencies = [ "cc", ] @@ -16553,7 +16721,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", ] [[package]] @@ -16562,7 +16730,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser 0.10.2", ] [[package]] @@ -16580,6 +16757,15 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +[[package]] +name = "semver-parser" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] + [[package]] name = "separator" version = "0.4.1" @@ -16588,22 +16774,51 @@ checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" [[package]] name = "serde" -version = "1.0.188" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" dependencies = [ "serde_derive", ] +[[package]] +name = "serde-big-array" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd31f59f6fe2b0c055371bb2f16d7f0aa7d8881676c04a55b1596d1a17cd10a4" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_bytes" +version = "0.11.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" +dependencies = [ + "serde", +] + [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.193" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.41", +] + +[[package]] +name = "serde_derive_internals" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 1.0.109", ] [[package]] @@ -16628,9 +16843,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" dependencies = [ "serde", ] @@ -16647,6 +16862,19 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_yaml" +version = "0.9.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cc7a1570e38322cfe4154732e5110f887ea57e22b76f4bfd32b5bdd3368666c" +dependencies = [ + "indexmap 2.0.0", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + [[package]] name = "serial_test" version = "2.0.0" @@ -16669,7 +16897,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -16824,16 +17052,6 @@ dependencies = [ "libc", ] -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" -dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", -] - [[package]] name = "signature" version = "2.1.0" @@ -16926,6 +17144,15 @@ dependencies = [ "futures-lite", ] +[[package]] +name = "smol_str" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74212e6bbe9a4352329b2f68ba3130c15a3f26fe88ff22dbdc6cdd58fa85e99c" +dependencies = [ + "serde", +] + [[package]] name = "smoldot" version = "0.11.0" @@ -16942,7 +17169,7 @@ dependencies = [ "chacha20 0.9.1", "crossbeam-queue", "derive_more", - "ed25519-zebra 4.0.2", + "ed25519-zebra 4.0.3", "either", "event-listener", "fnv", @@ -17008,35 +17235,387 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "serde", - "serde_json", - "siphasher", - "slab", - "smol", - "smoldot", - "zeroize", + "serde_json", + "siphasher", + "slab", + "smol", + "smoldot", + "zeroize", +] + +[[package]] +name = "snap" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" + +[[package]] +name = "snow" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" +dependencies = [ + "aes-gcm 0.9.4", + "blake2 0.10.6", + "chacha20poly1305", + "curve25519-dalek 4.1.1", + "rand_core 0.6.4", + "ring 0.16.20", + "rustc_version 0.4.0", + "sha2 0.10.7", + "subtle 2.4.1", +] + +[[package]] +name = "snowbridge-beacon-primitives" +version = "0.0.1" +dependencies = [ + "byte-slice-cast", + "frame-support", + "frame-system", + "hex", + "hex-literal", + "milagro_bls", + "parity-scale-codec", + "rlp", + "scale-info", + "serde", + "snowbridge-ethereum", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 8.0.0", + "ssz_rs", + "ssz_rs_derive", + "static_assertions", +] + +[[package]] +name = "snowbridge-core" +version = "0.1.1" +dependencies = [ + "ethabi-decode", + "frame-support", + "frame-system", + "hex", + "hex-literal", + "parity-scale-codec", + "polkadot-parachain-primitives", + "scale-info", + "serde", + "snowbridge-beacon-primitives", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 8.0.0", + "staging-xcm", + "staging-xcm-builder", +] + +[[package]] +name = "snowbridge-ethereum" +version = "0.1.0" +dependencies = [ + "ethabi-decode", + "ethbloom", + "ethereum-types", + "hex-literal", + "parity-bytes", + "parity-scale-codec", + "rand 0.8.5", + "rlp", + "rustc-hex", + "scale-info", + "serde", + "serde-big-array", + "serde_json", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 8.0.0", + "wasm-bindgen-test", +] + +[[package]] +name = "snowbridge-ethereum-beacon-client" +version = "0.0.1" +dependencies = [ + "bp-runtime", + "byte-slice-cast", + "frame-benchmarking", + "frame-support", + "frame-system", + "hex-literal", + "log", + "pallet-timestamp", + "parity-scale-codec", + "rand 0.8.5", + "rlp", + "scale-info", + "serde", + "serde_json", + "snowbridge-beacon-primitives", + "snowbridge-core", + "snowbridge-ethereum", + "sp-core", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-std 8.0.0", + "ssz_rs", + "ssz_rs_derive", + "static_assertions", +] + +[[package]] +name = "snowbridge-inbound-queue" +version = "0.1.1" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-sol-types", + "frame-benchmarking", + "frame-support", + "frame-system", + "hex-literal", + "log", + "num-traits", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "serde", + "snowbridge-beacon-primitives", + "snowbridge-core", + "snowbridge-ethereum", + "snowbridge-ethereum-beacon-client", + "snowbridge-router-primitives", + "sp-core", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-std 8.0.0", + "staging-xcm", + "staging-xcm-builder", +] + +[[package]] +name = "snowbridge-outbound-queue" +version = "0.1.1" +dependencies = [ + "bridge-hub-common", + "ethabi-decode", + "frame-benchmarking", + "frame-support", + "frame-system", + "hex-literal", + "pallet-message-queue", + "parity-scale-codec", + "scale-info", + "serde", + "snowbridge-core", + "snowbridge-outbound-queue-merkle-tree", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-std 8.0.0", + "staging-xcm", +] + +[[package]] +name = "snowbridge-outbound-queue-merkle-tree" +version = "0.1.1" +dependencies = [ + "array-bytes 4.2.0", + "env_logger 0.9.3", + "hex", + "hex-literal", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-runtime", +] + +[[package]] +name = "snowbridge-outbound-queue-runtime-api" +version = "0.1.0" +dependencies = [ + "frame-support", + "parity-scale-codec", + "snowbridge-core", + "snowbridge-outbound-queue-merkle-tree", + "sp-api", + "sp-core", + "sp-std 8.0.0", + "staging-xcm", +] + +[[package]] +name = "snowbridge-rococo-common" +version = "0.0.1" +dependencies = [ + "frame-support", + "log", + "staging-xcm", +] + +[[package]] +name = "snowbridge-router-primitives" +version = "0.1.1" +dependencies = [ + "ethabi-decode", + "frame-support", + "frame-system", + "hex-literal", + "log", + "parity-scale-codec", + "rustc-hex", + "scale-info", + "serde", + "snowbridge-core", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 8.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", +] + +[[package]] +name = "snowbridge-runtime-common" +version = "0.1.1" +dependencies = [ + "frame-support", + "frame-system", + "log", + "snowbridge-core", + "sp-arithmetic", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", +] + +[[package]] +name = "snowbridge-runtime-tests" +version = "0.1.0" +dependencies = [ + "asset-hub-rococo-runtime", + "assets-common", + "bridge-hub-rococo-runtime", + "bridge-hub-test-utils", + "bridge-runtime-common", + "cumulus-pallet-aura-ext", + "cumulus-pallet-dmp-queue", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "log", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-multisig", + "pallet-session", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parachains-common", + "parachains-runtimes-test-utils", + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "rococo-runtime-constants", + "scale-info", + "serde", + "smallvec", + "snowbridge-beacon-primitives", + "snowbridge-core", + "snowbridge-ethereum-beacon-client", + "snowbridge-inbound-queue", + "snowbridge-outbound-queue", + "snowbridge-outbound-queue-runtime-api", + "snowbridge-router-primitives", + "snowbridge-system", + "snowbridge-system-runtime-api", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-transaction-pool", + "sp-version", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "static_assertions", ] [[package]] -name = "snap" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" +name = "snowbridge-system" +version = "0.1.1" +dependencies = [ + "ethabi-decode", + "frame-benchmarking", + "frame-support", + "frame-system", + "hex", + "hex-literal", + "log", + "pallet-balances", + "pallet-message-queue", + "parity-scale-codec", + "polkadot-primitives", + "scale-info", + "snowbridge-core", + "snowbridge-outbound-queue", + "sp-core", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-std 8.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", +] [[package]] -name = "snow" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" +name = "snowbridge-system-runtime-api" +version = "0.1.0" dependencies = [ - "aes-gcm 0.9.4", - "blake2 0.10.6", - "chacha20poly1305", - "curve25519-dalek 4.0.0", - "rand_core 0.6.4", - "ring 0.16.20", - "rustc_version 0.4.0", - "sha2 0.10.7", - "subtle 2.4.1", + "parity-scale-codec", + "snowbridge-core", + "sp-api", + "sp-core", + "sp-std 8.0.0", + "staging-xcm", ] [[package]] @@ -17105,10 +17684,10 @@ dependencies = [ "assert_matches", "blake2 0.10.6", "expander 2.0.0", - "proc-macro-crate", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -17387,7 +17966,7 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "merlin 2.0.1", + "merlin 3.0.0", "parity-scale-codec", "parking_lot 0.12.1", "paste", @@ -17395,7 +17974,7 @@ dependencies = [ "rand 0.8.5", "regex", "scale-info", - "schnorrkel 0.9.1", + "schnorrkel 0.11.4", "secp256k1", "secrecy", "serde", @@ -17415,6 +17994,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "sp-core-fuzz" +version = "0.0.0" +dependencies = [ + "lazy_static", + "libfuzzer-sys", + "regex", + "sp-core", +] + [[package]] name = "sp-core-hashing" version = "9.0.0" @@ -17433,7 +18022,7 @@ version = "9.0.0" dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -17451,7 +18040,7 @@ dependencies = [ "ark-ed-on-bls12-377-ext", "ark-ed-on-bls12-381-bandersnatch", "ark-ed-on-bls12-381-bandersnatch-ext", - "ark-scale", + "ark-scale 0.0.12", "sp-runtime-interface 17.0.0", "sp-std 8.0.0", ] @@ -17459,7 +18048,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.4.1" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -17472,7 +18061,7 @@ dependencies = [ "ark-ed-on-bls12-377-ext", "ark-ed-on-bls12-381-bandersnatch", "ark-ed-on-bls12-381-bandersnatch-ext", - "ark-scale", + "ark-scale 0.0.11", "sp-runtime-interface 17.0.0 (git+https://github.com/paritytech/polkadot-sdk)", "sp-std 8.0.0 (git+https://github.com/paritytech/polkadot-sdk)", ] @@ -17491,17 +18080,17 @@ version = "8.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -17517,7 +18106,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "environmental", "parity-scale-codec", @@ -17576,7 +18165,6 @@ dependencies = [ name = "sp-keyring" version = "24.0.0" dependencies = [ - "lazy_static", "sp-core", "sp-runtime", "strum", @@ -17588,7 +18176,7 @@ version = "0.27.0" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", - "rand 0.7.3", + "rand 0.8.5", "rand_chacha 0.2.2", "sp-core", "sp-externalities 0.19.0", @@ -17661,7 +18249,7 @@ dependencies = [ name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "honggfuzz", "rand 0.8.5", "sp-npos-elections", @@ -17700,6 +18288,7 @@ dependencies = [ name = "sp-runtime" version = "24.0.0" dependencies = [ + "docify", "either", "hash256-std-hasher", "impl-trait-for-tuples", @@ -17710,6 +18299,7 @@ dependencies = [ "scale-info", "serde", "serde_json", + "simple-mermaid", "sp-api", "sp-application-crypto", "sp-arithmetic", @@ -17749,7 +18339,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -17769,22 +18359,23 @@ name = "sp-runtime-interface-proc-macro" version = "11.0.0" dependencies = [ "Inflector", - "proc-macro-crate", + "expander 2.0.0", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "Inflector", - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -17881,7 +18472,7 @@ name = "sp-statement-store" version = "4.0.0-dev" dependencies = [ "aes-gcm 0.10.3", - "curve25519-dalek 4.0.0", + "curve25519-dalek 4.1.1", "ed25519-dalek", "hkdf", "parity-scale-codec", @@ -17906,7 +18497,7 @@ version = "8.0.0" [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" [[package]] name = "sp-storage" @@ -17923,7 +18514,7 @@ dependencies = [ [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "impl-serde", "parity-scale-codec", @@ -17972,7 +18563,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "parity-scale-codec", "sp-std 8.0.0 (git+https://github.com/paritytech/polkadot-sdk)", @@ -18011,7 +18602,6 @@ dependencies = [ "array-bytes 6.1.0", "criterion 0.4.0", "hash-db", - "hashbrown 0.13.2", "lazy_static", "memory-db", "nohash-hasher", @@ -18021,6 +18611,7 @@ dependencies = [ "scale-info", "schnellru", "sp-core", + "sp-externalities 0.19.0", "sp-runtime", "sp-std 8.0.0", "thiserror", @@ -18055,7 +18646,7 @@ dependencies = [ "proc-macro2", "quote", "sp-version", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -18073,7 +18664,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -18087,12 +18678,13 @@ dependencies = [ name = "sp-weights" version = "20.0.0" dependencies = [ + "bounded-collections", "parity-scale-codec", "scale-info", + "schemars", "serde", "smallvec", "sp-arithmetic", - "sp-core", "sp-debug-derive 8.0.0", "sp-std 8.0.0", ] @@ -18120,16 +18712,6 @@ dependencies = [ "strum", ] -[[package]] -name = "spki" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der 0.6.1", -] - [[package]] name = "spki" version = "0.7.2" @@ -18137,7 +18719,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", - "der 0.7.8", + "der", ] [[package]] @@ -18155,6 +18737,29 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "ssz_rs" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "057291e5631f280978fa9c8009390663ca4613359fc1318e36a8c24c392f6d1f" +dependencies = [ + "bitvec", + "num-bigint", + "sha2 0.9.9", + "ssz_rs_derive", +] + +[[package]] +name = "ssz_rs_derive" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -18165,18 +18770,11 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" name = "staging-chain-spec-builder" version = "2.0.0" dependencies = [ - "ansi_term", - "clap 4.4.6", - "kitchensink-runtime", + "clap 4.4.11", "log", - "rand 0.8.5", "sc-chain-spec", - "sc-keystore", "serde_json", - "sp-core", - "sp-keystore", "sp-tracing 10.0.0", - "staging-node-cli", ] [[package]] @@ -18185,10 +18783,12 @@ version = "3.0.0-dev" dependencies = [ "array-bytes 6.1.0", "assert_cmd", - "clap 4.4.6", + "clap 4.4.11", "clap_complete", "criterion 0.4.0", + "frame-benchmarking", "frame-benchmarking-cli", + "frame-support", "frame-system", "frame-system-rpc-runtime-api", "futures", @@ -18198,12 +18798,20 @@ dependencies = [ "nix 0.26.2", "node-primitives", "node-rpc", + "node-testing", "pallet-asset-conversion-tx-payment", "pallet-asset-tx-payment", "pallet-assets", "pallet-balances", + "pallet-contracts", + "pallet-glutton", "pallet-im-online", + "pallet-root-testing", + "pallet-skip-feeless-payment", + "pallet-sudo", "pallet-timestamp", + "pallet-transaction-payment", + "pallet-treasury", "parity-scale-codec", "platforms", "rand 0.8.5", @@ -18238,27 +18846,31 @@ dependencies = [ "sc-telemetry", "sc-transaction-pool", "sc-transaction-pool-api", + "scale-info", "serde", "serde_json", "soketto", "sp-api", + "sp-application-crypto", "sp-authority-discovery", "sp-blockchain", "sp-consensus", "sp-consensus-babe", "sp-consensus-grandpa", "sp-core", + "sp-externalities 0.19.0", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-mixnet", "sp-runtime", + "sp-state-machine", "sp-statement-store", "sp-timestamp", "sp-tracing 10.0.0", "sp-transaction-storage-proof", - "staging-node-executor", + "sp-trie", "staging-node-inspect", "substrate-build-script-utils", "substrate-cli-test-utils", @@ -18269,44 +18881,6 @@ dependencies = [ "tokio-util", "try-runtime-cli", "wait-timeout", -] - -[[package]] -name = "staging-node-executor" -version = "3.0.0-dev" -dependencies = [ - "criterion 0.4.0", - "frame-benchmarking", - "frame-support", - "frame-system", - "futures", - "kitchensink-runtime", - "node-primitives", - "node-testing", - "pallet-balances", - "pallet-contracts", - "pallet-glutton", - "pallet-im-online", - "pallet-root-testing", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-treasury", - "parity-scale-codec", - "sc-executor", - "scale-info", - "serde_json", - "sp-application-crypto", - "sp-consensus-babe", - "sp-core", - "sp-externalities 0.19.0", - "sp-keyring", - "sp-keystore", - "sp-runtime", - "sp-state-machine", - "sp-statement-store", - "sp-tracing 10.0.0", - "sp-trie", "wat", ] @@ -18314,14 +18888,16 @@ dependencies = [ name = "staging-node-inspect" version = "0.9.0-dev" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "parity-scale-codec", "sc-cli", "sc-client-api", "sc-service", "sp-blockchain", "sp-core", + "sp-io", "sp-runtime", + "sp-statement-store", "thiserror", ] @@ -18355,6 +18931,7 @@ dependencies = [ "log", "parity-scale-codec", "scale-info", + "schemars", "serde", "sp-io", "sp-weights", @@ -18401,6 +18978,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "parity-scale-codec", + "scale-info", "sp-arithmetic", "sp-core", "sp-io", @@ -18469,6 +19047,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "strobe-rs" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fabb238a1cccccfa4c4fb703670c0d157e1256c1ba695abf1b93bd2bb14bab2d" +dependencies = [ + "bitflags 1.3.2", + "byteorder", + "keccak", + "subtle 2.4.1", + "zeroize", +] + [[package]] name = "strsim" version = "0.10.0" @@ -18497,51 +19088,12 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "stun" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7e94b1ec00bad60e6410e058b52f1c66de3dc5fe4d62d09b3e52bb7d3b73e25" -dependencies = [ - "base64 0.13.1", - "crc", - "lazy_static", - "md-5", - "rand 0.8.5", - "ring 0.16.20", - "subtle 2.4.1", - "thiserror", - "tokio", - "url", - "webrtc-util", -] - [[package]] name = "subkey" version = "3.0.0" dependencies = [ - "clap 4.4.6", - "sc-cli", -] - -[[package]] -name = "substrate" -version = "1.0.0" -dependencies = [ - "frame-support", - "sc-chain-spec", + "clap 4.4.11", "sc-cli", - "sc-consensus-aura", - "sc-consensus-babe", - "sc-consensus-beefy", - "sc-consensus-grandpa", - "sc-consensus-manual-seal", - "sc-consensus-pow", - "sc-service", - "simple-mermaid", - "sp-runtime", - "staging-chain-spec-builder", - "subkey", ] [[package]] @@ -18582,7 +19134,7 @@ dependencies = [ name = "substrate-frame-cli" version = "4.0.0-dev" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "frame-support", "frame-system", "sc-cli", @@ -18801,20 +19353,11 @@ dependencies = [ "sp-maybe-compressed-blob", "strum", "tempfile", - "toml 0.7.6", + "toml 0.8.2", "walkdir", "wasm-opt", ] -[[package]] -name = "substring" -version = "1.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ee6433ecef213b2e72f587ef64a2f5943e7cd16fbd82dbe8bc07486c534c86" -dependencies = [ - "autocfg", -] - [[package]] name = "subtle" version = "1.0.0" @@ -18937,15 +19480,27 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.38" +version = "2.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "44c8b28c477cc3bf0e7966561e3460130e1255f7a1cf71931075f1c5e7a7e269" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "syn-solidity" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b837ef12ab88835251726eb12237655e61ec8dc8a280085d1961cdc3dfd047" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.41", +] + [[package]] name = "synstructure" version = "0.12.6" @@ -19004,13 +19559,13 @@ checksum = "9d0e916b1148c8e263850e1ebcbd046f333e0683c724876bb0da63ea4373dc8a" [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", "fastrand 2.0.0", - "redox_syscall 0.3.5", + "redox_syscall 0.4.1", "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -19057,7 +19612,7 @@ dependencies = [ name = "test-parachain-adder-collator" version = "1.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "futures", "futures-timer", "log", @@ -19105,7 +19660,7 @@ dependencies = [ name = "test-parachain-undying-collator" version = "1.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.11", "futures", "futures-timer", "log", @@ -19159,9 +19714,9 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" dependencies = [ "thiserror-impl", ] @@ -19188,13 +19743,13 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -19266,17 +19821,6 @@ dependencies = [ "tikv-jemalloc-sys", ] -[[package]] -name = "time" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", -] - [[package]] name = "time" version = "0.3.27" @@ -19341,9 +19885,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.32.0" +version = "1.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" +checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" dependencies = [ "backtrace", "bytes", @@ -19366,7 +19910,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -19453,14 +19997,26 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.6" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.19.15", +] + +[[package]] +name = "toml" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" +checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit", + "toml_edit 0.20.2", ] [[package]] @@ -19474,9 +20030,22 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.14" +version = "0.19.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +dependencies = [ + "indexmap 2.0.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "toml_edit" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ "indexmap 2.0.0", "serde", @@ -19547,14 +20116,14 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", @@ -19586,10 +20155,10 @@ version = "1.0.0" dependencies = [ "assert_matches", "expander 2.0.0", - "proc-macro-crate", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] @@ -19742,7 +20311,7 @@ version = "0.10.0-dev" dependencies = [ "assert_cmd", "async-trait", - "clap 4.4.6", + "clap 4.4.11", "frame-remote-externalities", "frame-try-runtime", "hex", @@ -19818,25 +20387,6 @@ dependencies = [ "utf-8", ] -[[package]] -name = "turn" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4712ee30d123ec7ae26d1e1b218395a16c87cdbaf4b3925d170d684af62ea5e8" -dependencies = [ - "async-trait", - "base64 0.13.1", - "futures", - "log", - "md-5", - "rand 0.8.5", - "ring 0.16.20", - "stun", - "thiserror", - "tokio", - "webrtc-util", -] - [[package]] name = "twox-hash" version = "1.6.3" @@ -19873,6 +20423,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicode-bidi" version = "0.3.13" @@ -19926,6 +20482,12 @@ dependencies = [ "subtle 2.4.1", ] +[[package]] +name = "unsafe-libyaml" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" + [[package]] name = "unsigned-varint" version = "0.7.1" @@ -19972,9 +20534,6 @@ name = "uuid" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" -dependencies = [ - "getrandom 0.2.10", -] [[package]] name = "valuable" @@ -20045,8 +20604,8 @@ dependencies = [ "ark-bls12-377", "ark-bls12-381", "ark-ec", - "ark-ff", - "ark-serialize", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", "ark-serialize-derive", "arrayref", "constcat", @@ -20069,15 +20628,6 @@ dependencies = [ "libc", ] -[[package]] -name = "waitgroup" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1f50000a783467e6c0200f9d10642f4bc424e39efc1b770203e88b488f79292" -dependencies = [ - "atomic-waker", -] - [[package]] name = "waker-fn" version = "1.1.0" @@ -20109,12 +20659,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -20144,7 +20688,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", "wasm-bindgen-shared", ] @@ -20178,7 +20722,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -20189,6 +20733,30 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +[[package]] +name = "wasm-bindgen-test" +version = "0.3.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e6e302a7ea94f83a6d09e78e7dc7d9ca7b186bc2829c24a22d0753efd680671" +dependencies = [ + "console_error_panic_hook", + "js-sys", + "scoped-tls", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecb993dd8c836930ed130e020e77d9b2e65dd0fbab1b67c790b0f5d80b11a575" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "wasm-encoder" version = "0.31.1" @@ -20547,16 +21115,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring 0.16.20", - "untrusted", -] - [[package]] name = "webpki" version = "0.22.0" @@ -20573,7 +21131,7 @@ version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ - "webpki 0.22.0", + "webpki", ] [[package]] @@ -20591,214 +21149,6 @@ version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" -[[package]] -name = "webrtc" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3bc9049bdb2cea52f5fd4f6f728184225bdb867ed0dc2410eab6df5bdd67bb" -dependencies = [ - "arc-swap", - "async-trait", - "bytes", - "hex", - "interceptor", - "lazy_static", - "log", - "rand 0.8.5", - "rcgen 0.9.3", - "regex", - "ring 0.16.20", - "rtcp", - "rtp", - "rustls 0.19.1", - "sdp", - "serde", - "serde_json", - "sha2 0.10.7", - "stun", - "thiserror", - "time 0.3.27", - "tokio", - "turn", - "url", - "waitgroup", - "webrtc-data", - "webrtc-dtls", - "webrtc-ice", - "webrtc-mdns", - "webrtc-media", - "webrtc-sctp", - "webrtc-srtp", - "webrtc-util", -] - -[[package]] -name = "webrtc-data" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef36a4d12baa6e842582fe9ec16a57184ba35e1a09308307b67d43ec8883100" -dependencies = [ - "bytes", - "derive_builder", - "log", - "thiserror", - "tokio", - "webrtc-sctp", - "webrtc-util", -] - -[[package]] -name = "webrtc-dtls" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a00f4242f2db33307347bd5be53263c52a0331c96c14292118c9a6bb48d267" -dependencies = [ - "aes 0.6.0", - "aes-gcm 0.10.3", - "async-trait", - "bincode", - "block-modes", - "byteorder", - "ccm", - "curve25519-dalek 3.2.0", - "der-parser 8.2.0", - "elliptic-curve 0.12.3", - "hkdf", - "hmac 0.12.1", - "log", - "p256", - "p384", - "rand 0.8.5", - "rand_core 0.6.4", - "rcgen 0.10.0", - "ring 0.16.20", - "rustls 0.19.1", - "sec1 0.3.0", - "serde", - "sha1", - "sha2 0.10.7", - "signature 1.6.4", - "subtle 2.4.1", - "thiserror", - "tokio", - "webpki 0.21.4", - "webrtc-util", - "x25519-dalek 2.0.0", - "x509-parser 0.13.2", -] - -[[package]] -name = "webrtc-ice" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80" -dependencies = [ - "arc-swap", - "async-trait", - "crc", - "log", - "rand 0.8.5", - "serde", - "serde_json", - "stun", - "thiserror", - "tokio", - "turn", - "url", - "uuid", - "waitgroup", - "webrtc-mdns", - "webrtc-util", -] - -[[package]] -name = "webrtc-mdns" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" -dependencies = [ - "log", - "socket2 0.4.9", - "thiserror", - "tokio", - "webrtc-util", -] - -[[package]] -name = "webrtc-media" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f72e1650a8ae006017d1a5280efb49e2610c19ccc3c0905b03b648aee9554991" -dependencies = [ - "byteorder", - "bytes", - "rand 0.8.5", - "rtp", - "thiserror", -] - -[[package]] -name = "webrtc-sctp" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d47adcd9427eb3ede33d5a7f3424038f63c965491beafcc20bc650a2f6679c0" -dependencies = [ - "arc-swap", - "async-trait", - "bytes", - "crc", - "log", - "rand 0.8.5", - "thiserror", - "tokio", - "webrtc-util", -] - -[[package]] -name = "webrtc-srtp" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6183edc4c1c6c0175f8812eefdce84dfa0aea9c3ece71c2bf6ddd3c964de3da5" -dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "aes-gcm 0.9.4", - "async-trait", - "byteorder", - "bytes", - "ctr 0.8.0", - "hmac 0.11.0", - "log", - "rtcp", - "rtp", - "sha-1 0.9.8", - "subtle 2.4.1", - "thiserror", - "tokio", - "webrtc-util", -] - -[[package]] -name = "webrtc-util" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f1db1727772c05cf7a2cfece52c3aca8045ca1e176cd517d323489aa3c6d87" -dependencies = [ - "async-trait", - "bitflags 1.3.2", - "bytes", - "cc", - "ipnet", - "lazy_static", - "libc", - "log", - "nix 0.24.3", - "rand 0.8.5", - "thiserror", - "tokio", - "winapi", -] - [[package]] name = "westend-emulated-chain" version = "0.0.0" @@ -20942,6 +21292,7 @@ dependencies = [ "sp-runtime", "sp-weights", "staging-xcm", + "staging-xcm-builder", ] [[package]] @@ -20950,6 +21301,7 @@ version = "0.0.0" dependencies = [ "asset-hub-westend-emulated-chain", "bridge-hub-westend-emulated-chain", + "collectives-westend-emulated-chain", "emulated-integration-tests-common", "penpal-emulated-chain", "westend-emulated-chain", @@ -21216,37 +21568,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "wococo-emulated-chain" -version = "0.0.0" -dependencies = [ - "emulated-integration-tests-common", - "pallet-im-online", - "parachains-common", - "polkadot-primitives", - "rococo-emulated-chain", - "rococo-runtime", - "rococo-runtime-constants", - "sc-consensus-grandpa", - "serde_json", - "sp-authority-discovery", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-core", - "sp-runtime", -] - -[[package]] -name = "wococo-system-emulated-network" -version = "0.0.0" -dependencies = [ - "asset-hub-wococo-emulated-chain", - "bridge-hub-wococo-emulated-chain", - "emulated-integration-tests-common", - "penpal-emulated-chain", - "wococo-emulated-chain", -] - [[package]] name = "wyz" version = "0.5.1" @@ -21273,47 +21594,28 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ - "curve25519-dalek 4.0.0", + "curve25519-dalek 4.1.1", "rand_core 0.6.4", "serde", "zeroize", ] -[[package]] -name = "x509-parser" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb9bace5b5589ffead1afb76e43e34cff39cd0f3ce7e170ae0c29e53b88eb1c" -dependencies = [ - "asn1-rs 0.3.1", - "base64 0.13.1", - "data-encoding", - "der-parser 7.0.0", - "lazy_static", - "nom", - "oid-registry 0.4.0", - "ring 0.16.20", - "rusticata-macros", - "thiserror", - "time 0.3.27", -] - [[package]] name = "x509-parser" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" dependencies = [ - "asn1-rs 0.5.2", + "asn1-rs", "base64 0.13.1", "data-encoding", - "der-parser 8.2.0", + "der-parser", "lazy_static", "nom", - "oid-registry 0.6.1", + "oid-registry", "rusticata-macros", "thiserror", - "time 0.3.27", + "time", ] [[package]] @@ -21364,6 +21666,7 @@ dependencies = [ "frame-support", "frame-system", "futures", + "pallet-transaction-payment", "pallet-xcm", "parity-scale-codec", "polkadot-test-client", @@ -21385,7 +21688,8 @@ dependencies = [ "Inflector", "proc-macro2", "quote", - "syn 2.0.38", + "staging-xcm", + "syn 2.0.41", "trybuild", ] @@ -21485,7 +21789,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ - "time 0.3.27", + "time", ] [[package]] @@ -21505,7 +21809,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.41", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 42bbac37a6cac1d2d8aec4085fb78cc1844d279f..983b3bdf2059d0cf2e7abd2e5e529bd5d5552782 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,24 +13,20 @@ members = [ "bridges/modules/messages", "bridges/modules/parachains", "bridges/modules/relayers", + "bridges/modules/xcm-bridge-hub", "bridges/modules/xcm-bridge-hub-router", - "bridges/primitives/chain-asset-hub-kusama", - "bridges/primitives/chain-asset-hub-polkadot", "bridges/primitives/chain-asset-hub-rococo", "bridges/primitives/chain-asset-hub-westend", - "bridges/primitives/chain-asset-hub-wococo", "bridges/primitives/chain-bridge-hub-cumulus", "bridges/primitives/chain-bridge-hub-kusama", "bridges/primitives/chain-bridge-hub-polkadot", "bridges/primitives/chain-bridge-hub-rococo", "bridges/primitives/chain-bridge-hub-westend", - "bridges/primitives/chain-bridge-hub-wococo", "bridges/primitives/chain-kusama", "bridges/primitives/chain-polkadot", "bridges/primitives/chain-polkadot-bulletin", "bridges/primitives/chain-rococo", "bridges/primitives/chain-westend", - "bridges/primitives/chain-wococo", "bridges/primitives/header-chain", "bridges/primitives/messages", "bridges/primitives/parachains", @@ -38,7 +34,22 @@ members = [ "bridges/primitives/relayers", "bridges/primitives/runtime", "bridges/primitives/test-utils", + "bridges/primitives/xcm-bridge-hub", "bridges/primitives/xcm-bridge-hub-router", + "bridges/snowbridge/parachain/pallets/ethereum-beacon-client", + "bridges/snowbridge/parachain/pallets/inbound-queue", + "bridges/snowbridge/parachain/pallets/outbound-queue", + "bridges/snowbridge/parachain/pallets/outbound-queue/merkle-tree", + "bridges/snowbridge/parachain/pallets/outbound-queue/runtime-api", + "bridges/snowbridge/parachain/pallets/system", + "bridges/snowbridge/parachain/pallets/system/runtime-api", + "bridges/snowbridge/parachain/primitives/beacon", + "bridges/snowbridge/parachain/primitives/core", + "bridges/snowbridge/parachain/primitives/ethereum", + "bridges/snowbridge/parachain/primitives/router", + "bridges/snowbridge/parachain/runtime/rococo-common", + "bridges/snowbridge/parachain/runtime/runtime-common", + "bridges/snowbridge/parachain/runtime/tests", "cumulus/client/cli", "cumulus/client/collator", "cumulus/client/consensus/aura", @@ -65,40 +76,37 @@ members = [ "cumulus/parachain-template/pallets/template", "cumulus/parachain-template/runtime", "cumulus/parachains/common", - "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo", - "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend", - "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo", - "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend", - "cumulus/parachains/integration-tests/emulated/common", - "cumulus/parachains/integration-tests/emulated/chains/relays/rococo", - "cumulus/parachains/integration-tests/emulated/chains/relays/wococo", - "cumulus/parachains/integration-tests/emulated/chains/relays/westend", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo", - "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend", "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo", "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend", + "cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend", + "cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal", + "cumulus/parachains/integration-tests/emulated/chains/relays/rococo", + "cumulus/parachains/integration-tests/emulated/chains/relays/westend", + "cumulus/parachains/integration-tests/emulated/common", "cumulus/parachains/integration-tests/emulated/networks/rococo-system", - "cumulus/parachains/integration-tests/emulated/networks/wococo-system", - "cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system", + "cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system", "cumulus/parachains/integration-tests/emulated/networks/westend-system", + "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo", + "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend", + "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo", + "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend", "cumulus/parachains/pallets/collective-content", "cumulus/parachains/pallets/parachain-info", "cumulus/parachains/pallets/ping", - "cumulus/parachains/runtimes/assets/asset-hub-kusama", - "cumulus/parachains/runtimes/assets/asset-hub-polkadot", "cumulus/parachains/runtimes/assets/asset-hub-rococo", "cumulus/parachains/runtimes/assets/asset-hub-westend", "cumulus/parachains/runtimes/assets/common", "cumulus/parachains/runtimes/assets/test-utils", - "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama", - "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot", "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo", "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend", "cumulus/parachains/runtimes/bridge-hubs/test-utils", - "cumulus/parachains/runtimes/collectives/collectives-polkadot", + "cumulus/parachains/runtimes/collectives/collectives-westend", "cumulus/parachains/runtimes/contracts/contracts-rococo", - "cumulus/parachains/runtimes/glutton/glutton-kusama", + "cumulus/parachains/runtimes/coretime/coretime-rococo", + "cumulus/parachains/runtimes/coretime/coretime-westend", + "cumulus/parachains/runtimes/glutton/glutton-westend", "cumulus/parachains/runtimes/starters/seedling", "cumulus/parachains/runtimes/starters/shell", "cumulus/parachains/runtimes/test-utils", @@ -108,6 +116,7 @@ members = [ "cumulus/primitives/aura", "cumulus/primitives/core", "cumulus/primitives/parachain-inherent", + "cumulus/primitives/proof-size-hostfunction", "cumulus/primitives/timestamp", "cumulus/primitives/utility", "cumulus/test/client", @@ -115,6 +124,7 @@ members = [ "cumulus/test/runtime", "cumulus/test/service", "cumulus/xcm/xcm-emulator", + "docs/sdk", "polkadot", "polkadot/cli", "polkadot/core-primitives", @@ -132,8 +142,8 @@ members = [ "polkadot/node/core/parachains-inherent", "polkadot/node/core/prospective-parachains", "polkadot/node/core/provisioner", - "polkadot/node/core/pvf-checker", "polkadot/node/core/pvf", + "polkadot/node/core/pvf-checker", "polkadot/node/core/pvf/common", "polkadot/node/core/pvf/execute-worker", "polkadot/node/core/pvf/prepare-worker", @@ -156,12 +166,14 @@ members = [ "polkadot/node/overseer", "polkadot/node/primitives", "polkadot/node/service", + "polkadot/node/subsystem", + "polkadot/node/subsystem-bench", "polkadot/node/subsystem-test-helpers", "polkadot/node/subsystem-types", "polkadot/node/subsystem-util", - "polkadot/node/subsystem", "polkadot/node/test/client", "polkadot/node/test/service", + "polkadot/node/tracking-allocator", "polkadot/node/zombienet-backchannel", "polkadot/parachain", "polkadot/parachain/test-parachains", @@ -187,8 +199,8 @@ members = [ "polkadot/utils/generate-bags", "polkadot/utils/remote-ext-tests/bags-list", "polkadot/xcm", - "polkadot/xcm/pallet-xcm-benchmarks", "polkadot/xcm/pallet-xcm", + "polkadot/xcm/pallet-xcm-benchmarks", "polkadot/xcm/procedural", "polkadot/xcm/xcm-builder", "polkadot/xcm/xcm-executor", @@ -198,13 +210,11 @@ members = [ "polkadot/xcm/xcm-simulator/fuzzer", "substrate/bin/minimal/node", "substrate/bin/minimal/runtime", - "substrate", "substrate/bin/node-template/node", "substrate/bin/node-template/pallets/template", "substrate/bin/node-template/runtime", "substrate/bin/node/bench", "substrate/bin/node/cli", - "substrate/bin/node/executor", "substrate/bin/node/inspect", "substrate/bin/node/primitives", "substrate/bin/node/rpc", @@ -242,8 +252,8 @@ members = [ "substrate/client/merkle-mountain-range", "substrate/client/merkle-mountain-range/rpc", "substrate/client/mixnet", - "substrate/client/network-gossip", "substrate/client/network", + "substrate/client/network-gossip", "substrate/client/network/bitswap", "substrate/client/network/common", "substrate/client/network/light", @@ -253,10 +263,10 @@ members = [ "substrate/client/network/transactions", "substrate/client/offchain", "substrate/client/proposer-metrics", + "substrate/client/rpc", "substrate/client/rpc-api", "substrate/client/rpc-servers", "substrate/client/rpc-spec-v2", - "substrate/client/rpc", "substrate/client/service", "substrate/client/service/test", "substrate/client/state-db", @@ -284,8 +294,8 @@ members = [ "substrate/frame/bags-list/fuzzer", "substrate/frame/bags-list/remote-tests", "substrate/frame/balances", - "substrate/frame/beefy-mmr", "substrate/frame/beefy", + "substrate/frame/beefy-mmr", "substrate/frame/benchmarking", "substrate/frame/benchmarking/pov", "substrate/frame/bounties", @@ -294,8 +304,10 @@ members = [ "substrate/frame/collective", "substrate/frame/contracts", "substrate/frame/contracts/fixtures", - "substrate/frame/contracts/primitives", + "substrate/frame/contracts/fixtures/contracts/common", + "substrate/frame/contracts/mock-network", "substrate/frame/contracts/proc-macro", + "substrate/frame/contracts/uapi", "substrate/frame/conviction-voting", "substrate/frame/core-fellowship", "substrate/frame/democracy", @@ -310,9 +322,11 @@ members = [ "substrate/frame/examples/basic", "substrate/frame/examples/default-config", "substrate/frame/examples/dev-mode", + "substrate/frame/examples/frame-crate", "substrate/frame/examples/kitchensink", "substrate/frame/examples/offchain-worker", "substrate/frame/examples/split", + "substrate/frame/examples/tasks", "substrate/frame/executive", "substrate/frame/fast-unstake", "substrate/frame/glutton", @@ -352,6 +366,7 @@ members = [ "substrate/frame/root-testing", "substrate/frame/safe-mode", "substrate/frame/salary", + "substrate/frame/sassafras", "substrate/frame/scheduler", "substrate/frame/scored-pool", "substrate/frame/session", @@ -382,6 +397,7 @@ members = [ "substrate/frame/transaction-payment/asset-tx-payment", "substrate/frame/transaction-payment/rpc", "substrate/frame/transaction-payment/rpc/runtime-api", + "substrate/frame/transaction-payment/skip-feeless-payment", "substrate/frame/transaction-storage", "substrate/frame/treasury", "substrate/frame/try-runtime", @@ -409,6 +425,7 @@ members = [ "substrate/primitives/consensus/sassafras", "substrate/primitives/consensus/slots", "substrate/primitives/core", + "substrate/primitives/core/fuzz", "substrate/primitives/core/hashing", "substrate/primitives/core/hashing/proc-macro", "substrate/primitives/crypto/ec-utils", @@ -429,12 +446,12 @@ members = [ "substrate/primitives/offchain", "substrate/primitives/panic-handler", "substrate/primitives/rpc", + "substrate/primitives/runtime", "substrate/primitives/runtime-interface", "substrate/primitives/runtime-interface/proc-macro", - "substrate/primitives/runtime-interface/test-wasm-deprecated", - "substrate/primitives/runtime-interface/test-wasm", "substrate/primitives/runtime-interface/test", - "substrate/primitives/runtime", + "substrate/primitives/runtime-interface/test-wasm", + "substrate/primitives/runtime-interface/test-wasm-deprecated", "substrate/primitives/session", "substrate/primitives/staking", "substrate/primitives/state-machine", @@ -474,15 +491,43 @@ members = [ "substrate/utils/prometheus", "substrate/utils/wasm-builder", ] -default-members = [ "polkadot", "substrate/bin/node/cli" ] +default-members = ["polkadot", "substrate/bin/node/cli"] + +[workspace.lints.rust] +suspicious_double_ref_op = { level = "allow", priority = 2 } + +[workspace.lints.clippy] +all = { level = "allow", priority = 0 } +correctness = { level = "warn", priority = 1 } +complexity = { level = "warn", priority = 1 } +if-same-then-else = { level = "allow", priority = 2 } +zero-prefixed-literal = { level = "allow", priority = 2 } # 00_1000_000 +type_complexity = { level = "allow", priority = 2 } # raison d'etre +nonminimal-bool = { level = "allow", priority = 2 } # maybe +borrowed-box = { level = "allow", priority = 2 } # Reasonable to fix this one +too-many-arguments = { level = "allow", priority = 2 } # (Turning this on would lead to) +needless-lifetimes = { level = "allow", priority = 2 } # generated code +unnecessary_cast = { level = "allow", priority = 2 } # Types may change +identity-op = { level = "allow", priority = 2 } # One case where we do 0 + +useless_conversion = { level = "allow", priority = 2 } # Types may change +unit_arg = { level = "allow", priority = 2 } # stylistic +option-map-unit-fn = { level = "allow", priority = 2 } # stylistic +bind_instead_of_map = { level = "allow", priority = 2 } # stylistic +erasing_op = { level = "allow", priority = 2 } # E.g. 0 * DOLLARS +eq_op = { level = "allow", priority = 2 } # In tests we test equality. +while_immutable_condition = { level = "allow", priority = 2 } # false positives +needless_option_as_deref = { level = "allow", priority = 2 } # false positives +derivable_impls = { level = "allow", priority = 2 } # false positives +stable_sort_primitive = { level = "allow", priority = 2 } # prefer stable sort +extra-unused-type-parameters = { level = "allow", priority = 2 } # stylistic +default_constructed_unit_structs = { level = "allow", priority = 2 } # stylistic [profile.release] # Polkadot runtime requires unwinding. panic = "unwind" opt-level = 3 -# make sure dev builds with backtrace do -# not slow us down +# make sure dev builds with backtrace do not slow us down [profile.dev.package.backtrace] inherits = "release" diff --git a/README.md b/README.md index 56b3481bafc038f7d3dd0606558e9e37243dcd53..1f255823b5b695baf63196304448ae1d9ee23c13 100644 --- a/README.md +++ b/README.md @@ -46,12 +46,12 @@ Below are the primary upstream dependencies utilized in this project: ## Security -The security policy and procedures can be found in [docs/SECURITY.md](./docs/SECURITY.md). +The security policy and procedures can be found in [docs/contributor/SECURITY.md](./docs/contributor/SECURITY.md). ## Contributing & Code of Conduct -Ensure you follow our [contribution guidelines](./docs/CONTRIBUTING.md). In every interaction and contribution, this -project adheres to the [Contributor Covenant Code of Conduct](./docs/CODE_OF_CONDUCT.md). +Ensure you follow our [contribution guidelines](./docs/contributor/CONTRIBUTING.md). In every interaction and +contribution, this project adheres to the [Contributor Covenant Code of Conduct](./docs/contributor/CODE_OF_CONDUCT.md). ## Additional Resources diff --git a/bridges/README.md b/bridges/README.md index da46fe67d924acb2afffcf971bacb60b560f0cd5..a2ce213d2541c346361eb28125a06e3079e1c269 100644 --- a/bridges/README.md +++ b/bridges/README.md @@ -68,7 +68,7 @@ For example, consider the case below where we want to bridge two Substrate based ``` +---------------+ +---------------+ | | | | -| Rialto | | Millau | +| Rococo | | Westend | | | | | +-------+-------+ +-------+-------+ ^ ^ @@ -79,9 +79,9 @@ For example, consider the case below where we want to bridge two Substrate based +---------------+ ``` -The Millau chain must be able to accept Rialto headers and verify their integrity. It does this by using a runtime +The Rococo chain must be able to accept Westend headers and verify their integrity. It does this by using a runtime module designed to track GRANDPA finality. Since two blockchains can't interact directly they need an external service, -called a relayer, to communicate. The relayer will subscribe to new Rialto headers via RPC and submit them to the Millau +called a relayer, to communicate. The relayer will subscribe to new Rococo headers via RPC and submit them to the Westend chain for verification. Take a look at [Bridge High Level Documentation](./docs/high-level-overview.md) for more in-depth description of the @@ -94,164 +94,23 @@ Here's an overview of how the project is laid out. The main bits are the `bin`, messages between chains. ``` -├── bin // Node and Runtime for the various Substrate chains -│ └── ... -├── deployments // Useful tools for deploying test networks +├── modules // Substrate Runtime Modules (a.k.a Pallets) +│ ├── beefy // On-Chain BEEFY Light Client (in progress) +│ ├── grandpa // On-Chain GRANDPA Light Client +│ ├── messages // Cross Chain Message Passing +│ ├── parachains // On-Chain Parachains Light Client +│ ├── relayers // Relayer Rewards Registry +│ ├── xcm-bridge-hub // Multiple Dynamic Bridges Support +│ ├── xcm-bridge-hub-router // XCM Router that may be used to Connect to XCM Bridge Hub +├── primitives // Code shared between modules, runtimes, and relays │ └── ... -├── modules // Substrate Runtime Modules (a.k.a Pallets) -│ ├── beefy // On-Chain BEEFY Light Client (in progress) -│ ├── grandpa // On-Chain GRANDPA Light Client -│ ├── messages // Cross Chain Message Passing -│ ├── parachains // On-Chain Parachains Light Client -│ ├── relayers // Relayer rewards registry +├── relays // Application for sending finality proofs and messages between chains │ └── ... -├── primitives // Code shared between modules, runtimes, and relays -│ └── ... -├── relays // Application for sending finality proofs and messages between chains -│ └── ... -└── scripts // Useful development and maintenance scripts +└── scripts // Useful development and maintenance scripts ``` ## Running the Bridge -To run the Bridge you need to be able to connect the bridge relay node to the RPC interface of nodes on each side of the -bridge (source and target chain). - -There are 2 ways to run the bridge, described below: - -- building & running from source: with this option, you'll be able to run the bridge between two standalone chains that -are running GRANDPA finality gadget to achieve finality; - -- running a Docker Compose setup: this is a recommended option, where you'll see bridges with parachains, complex relays -and more. - -### Using the Source - -First you'll need to build the bridge nodes and relay. This can be done as follows: - -```bash -# In `parity-bridges-common` folder -cargo build -p rialto-bridge-node -cargo build -p millau-bridge-node -cargo build -p substrate-relay -``` - -### Running a Dev network - -We will launch a dev network to demonstrate how to relay a message between two Substrate based chains (named Rialto and -Millau). - -To do this we will need two nodes, two relayers which will relay headers, and two relayers which will relay messages. - -#### Running from local scripts - -To run a simple dev network you can use the scripts located in the [`deployments/local-scripts` -folder](./deployments/local-scripts). - -First, we must run the two Substrate nodes. - -```bash -# In `parity-bridges-common` folder -./deployments/local-scripts/run-rialto-node.sh -./deployments/local-scripts/run-millau-node.sh -``` - -After the nodes are up we can run the header relayers. - -```bash -./deployments/local-scripts/relay-millau-to-rialto.sh -./deployments/local-scripts/relay-rialto-to-millau.sh -``` - -At this point you should see the relayer submitting headers from the Millau Substrate chain to the Rialto Substrate -chain. - -``` -# Header Relayer Logs -[Millau_to_Rialto_Sync] [date] DEBUG bridge Going to submit finality proof of Millau header #147 to Rialto -[...] [date] INFO bridge Synced 147 of 147 headers -[...] [date] DEBUG bridge Going to submit finality proof of Millau header #148 to Rialto -[...] [date] INFO bridge Synced 148 of 149 headers -``` - -Finally, we can run the message relayers. - -```bash -./deployments/local-scripts/relay-messages-millau-to-rialto.sh -./deployments/local-scripts/relay-messages-rialto-to-millau.sh -``` - -You will also see the message lane relayers listening for new messages. - -``` -# Message Relayer Logs -[Millau_to_Rialto_MessageLane_00000000] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about best message nonces -[...] [date] INFO bridge Synced Some(2) of Some(3) nonces in Millau::MessagesDelivery -> Rialto::MessagesDelivery race -[...] [date] DEBUG bridge Asking Millau::MessagesDelivery about message nonces -[...] [date] DEBUG bridge Received best nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { - latest_nonce: 0, nonces_data: () } -[...] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about finalized message nonces -[...] [date] DEBUG bridge Received finalized nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { - latest_nonce: 0, nonces_data: () } -[...] [date] DEBUG bridge Received nonces from Millau::MessagesDelivery: SourceClientNonces { new_nonces: {}, confirmed_nonce: Some(0) } -[...] [date] DEBUG bridge Asking Millau node about its state -[...] [date] DEBUG bridge Received state from Millau node: ClientState { best_self: HeaderId(1593, 0xacac***), best_finalized_self: - HeaderId(1590, 0x0be81d...), best_finalized_peer_at_best_self: HeaderId(0, 0xdcdd89...) } -``` - -To send a message see the ["How to send a message" section](#how-to-send-a-message). - -### How to send a message - -In this section we'll show you how to quickly send a bridge message. The message is just an encoded XCM `Trap(43)` -message. - -```bash -# In `parity-bridges-common` folder -./scripts/send-message-from-millau-rialto.sh -``` - -After sending a message you will see the following logs showing a message was successfully sent: - -``` -INFO bridge Sending message to Rialto. Size: 11. -TRACE bridge Sent transaction to Millau node: 0x5e68... -``` - -And at the Rialto node logs you'll something like this: - -``` -... runtime::bridge-messages: Received messages: total=1, valid=1. Weight used: Weight(ref_time: 1215065371, proof_size: - 48559)/Weight(ref_time: 1215065371, proof_size: 54703). -``` - -It means that the message has been delivered and dispatched. Message may be dispatched with an error, though - the goal -of our test bridge is to ensure that messages are successfully delivered and all involved components are working. - -## Full Network Docker Compose Setup - -For a more sophisticated deployment which includes bidirectional header sync, message passing, monitoring dashboards, -etc. see the [Deployments README](./deployments/README.md). - -You should note that you can find images for all the bridge components published on [Docker -Hub](https://hub.docker.com/u/paritytech). - -To run a Rialto node for example, you can use the following command: - -```bash -docker run -p 30333:30333 -p 9933:9933 -p 9944:9944 \ - -it paritytech/rialto-bridge-node --dev --tmp \ - --rpc-cors=all --unsafe-rpc-external -``` - -## Community - -Main hangout for the community is [Element](https://element.io/) (formerly Riot). Element is a chat server like, for -example, Discord. Most discussions around Polkadot and Substrate happen in various Element "rooms" (channels). So, -joining Element might be a good idea, anyway. - -If you are interested in information exchange and development of Polkadot related bridges please feel free to join the -[Polkadot Bridges](https://app.element.io/#/room/#bridges:web3.foundation) Element channel. - -The [Substrate Technical](https://app.element.io/#/room/#substrate-technical:matrix.org) Element channel is most suited -for discussions regarding Substrate itself. +Apart from live Rococo <> Westend bridge, you may spin up local networks and test see how it works locally. More +details may be found in +[this document](https://github.com/paritytech/polkadot-sdk/tree/master//cumulus/parachains/runtimes/bridge-hubs/README.md). diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index 0ccf30987e822be4bcc739c2806246caafe28ef0..8c3e8c989dbcd0938e42356eca9681221654459c 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -7,6 +7,9 @@ edition.workspace = true repository.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } hash-db = { version = "0.16.0", default-features = false } @@ -22,6 +25,7 @@ bp-parachains = { path = "../../primitives/parachains", default-features = false bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } bp-relayers = { path = "../../primitives/relayers", default-features = false } bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-xcm-bridge-hub = { path = "../../primitives/xcm-bridge-hub", default-features = false } bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } pallet-bridge-grandpa = { path = "../../modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../modules/messages", default-features = false } @@ -50,7 +54,7 @@ bp-test-utils = { path = "../../primitives/test-utils" } pallet-balances = { path = "../../../substrate/frame/balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-messages/std", @@ -59,6 +63,7 @@ std = [ "bp-relayers/std", "bp-runtime/std", "bp-xcm-bridge-hub-router/std", + "bp-xcm-bridge-hub/std", "codec/std", "frame-support/std", "frame-system/std", @@ -92,4 +97,4 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", ] -integrity-test = [ "static_assertions" ] +integrity-test = ["static_assertions"] diff --git a/bridges/bin/runtime-common/src/lib.rs b/bridges/bin/runtime-common/src/lib.rs index ae6f40b142145dc265a69e8c78ef313f5ce9340f..d3b3b21061d05ab1e120ca3c17f8e9d12aaefe39 100644 --- a/bridges/bin/runtime-common/src/lib.rs +++ b/bridges/bin/runtime-common/src/lib.rs @@ -22,7 +22,6 @@ use crate::messages_call_ext::MessagesCallSubType; use pallet_bridge_grandpa::CallSubType as GrandpaCallSubType; use pallet_bridge_parachains::CallSubType as ParachainsCallSubtype; use sp_runtime::transaction_validity::TransactionValidity; -use xcm::v3::NetworkId; pub mod messages; pub mod messages_api; @@ -92,8 +91,8 @@ where /// ```nocompile /// generate_bridge_reject_obsolete_headers_and_messages!{ /// Call, AccountId -/// BridgeRialtoGrandpa, BridgeWestendGrandpa, -/// BridgeRialtoParachains +/// BridgeRococoGrandpa, BridgeRococoMessages, +/// BridgeRococoParachains /// } /// ``` /// @@ -147,42 +146,6 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { }; } -/// A mapping over `NetworkId`. -/// Since `NetworkId` doesn't include `Millau`, `Rialto` and `RialtoParachain`, we create some -/// synthetic associations between these chains and `NetworkId` chains. -pub enum CustomNetworkId { - /// The Millau network ID, associated with Kusama. - Millau, - /// The Rialto network ID, associated with Polkadot. - Rialto, - /// The RialtoParachain network ID, associated with Westend. - RialtoParachain, -} - -impl TryFrom for CustomNetworkId { - type Error = (); - - fn try_from(chain: bp_runtime::ChainId) -> Result { - Ok(match chain { - bp_runtime::MILLAU_CHAIN_ID => Self::Millau, - bp_runtime::RIALTO_CHAIN_ID => Self::Rialto, - bp_runtime::RIALTO_PARACHAIN_CHAIN_ID => Self::RialtoParachain, - _ => return Err(()), - }) - } -} - -impl CustomNetworkId { - /// Converts self to XCM' network id. - pub const fn as_network_id(&self) -> NetworkId { - match *self { - CustomNetworkId::Millau => NetworkId::Kusama, - CustomNetworkId::Rialto => NetworkId::Polkadot, - CustomNetworkId::RialtoParachain => NetworkId::Westend, - } - } -} - #[cfg(test)] mod tests { use crate::BridgeRuntimeFilterCall; diff --git a/bridges/bin/runtime-common/src/messages_xcm_extension.rs b/bridges/bin/runtime-common/src/messages_xcm_extension.rs index 77c23db3b2ba2d01aef0d2c45a20377eaf8ea129..53c0579c4cd0456b62fb6355af6d34bd492ac2b9 100644 --- a/bridges/bin/runtime-common/src/messages_xcm_extension.rs +++ b/bridges/bin/runtime-common/src/messages_xcm_extension.rs @@ -22,26 +22,23 @@ //! `XcmRouter` <- `MessageDispatch` <- `InboundMessageQueue` use bp_messages::{ - source_chain::{MessagesBridge, OnMessagesDelivered}, + source_chain::OnMessagesDelivered, target_chain::{DispatchMessage, MessageDispatch}, LaneId, MessageNonce, }; use bp_runtime::messages::MessageDispatchResult; +pub use bp_xcm_bridge_hub::XcmAsPlainPayload; use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; use codec::{Decode, Encode}; use frame_support::{traits::Get, weights::Weight, CloneNoBound, EqNoBound, PartialEqNoBound}; use pallet_bridge_messages::{ - Config as MessagesConfig, OutboundLanesCongestedSignals, Pallet as MessagesPallet, - WeightInfoExt as MessagesPalletWeights, + Config as MessagesConfig, OutboundLanesCongestedSignals, WeightInfoExt as MessagesPalletWeights, }; use scale_info::TypeInfo; use sp_runtime::SaturatedConversion; use sp_std::{fmt::Debug, marker::PhantomData}; use xcm::prelude::*; -use xcm_builder::{DispatchBlob, DispatchBlobError, HaulBlob, HaulBlobError}; - -/// Plain "XCM" payload, which we transfer through bridge -pub type XcmAsPlainPayload = sp_std::prelude::Vec; +use xcm_builder::{DispatchBlob, DispatchBlobError}; /// Message dispatch result type for single message #[derive(CloneNoBound, EqNoBound, PartialEqNoBound, Encode, Decode, Debug, TypeInfo)] @@ -123,6 +120,7 @@ impl< /// A pair of sending chain location and message lane, used by this chain to send messages /// over the bridge. +#[cfg_attr(feature = "std", derive(Debug, Eq, PartialEq))] pub struct SenderAndLane { /// Sending chain relative location. pub location: MultiLocation, @@ -144,8 +142,6 @@ pub trait XcmBlobHauler { type Runtime: MessagesConfig; /// Instance of the messages pallet that is used to send messages. type MessagesInstance: 'static; - /// Returns lane used by this hauler. - type SenderAndLane: Get; /// Actual XCM message sender (`HRMP` or `UMP`) to the source chain /// location (`Self::SenderAndLane::get().location`). @@ -166,54 +162,25 @@ pub trait XcmBlobHauler { /// makes sure that XCM blob is sent to the outbound lane to be relayed. /// /// It needs to be used at the source bridge hub. -pub struct XcmBlobHaulerAdapter(sp_std::marker::PhantomData); +pub struct XcmBlobHaulerAdapter( + sp_std::marker::PhantomData<(XcmBlobHauler, Lanes)>, +); -impl HaulBlob for XcmBlobHaulerAdapter -where - H::Runtime: MessagesConfig, +impl< + H: XcmBlobHauler, + Lanes: Get>, + > OnMessagesDelivered for XcmBlobHaulerAdapter { - fn haul_blob(blob: sp_std::prelude::Vec) -> Result<(), HaulBlobError> { - let sender_and_lane = H::SenderAndLane::get(); - MessagesPallet::::send_message(sender_and_lane.lane, blob) - .map(|artifacts| { - log::info!( - target: crate::LOG_TARGET_BRIDGE_DISPATCH, - "haul_blob result - ok: {:?} on lane: {:?}. Enqueued messages: {}", - artifacts.nonce, - sender_and_lane.lane, - artifacts.enqueued_messages, - ); - - // notify XCM queue manager about updated lane state - LocalXcmQueueManager::::on_bridge_message_enqueued( - &sender_and_lane, - artifacts.enqueued_messages, - ); - }) - .map_err(|error| { - log::error!( - target: crate::LOG_TARGET_BRIDGE_DISPATCH, - "haul_blob result - error: {:?} on lane: {:?}", - error, - sender_and_lane.lane, - ); - HaulBlobError::Transport("MessageSenderError") - }) - } -} - -impl OnMessagesDelivered for XcmBlobHaulerAdapter { fn on_messages_delivered(lane: LaneId, enqueued_messages: MessageNonce) { - let sender_and_lane = H::SenderAndLane::get(); - if sender_and_lane.lane != lane { - return + if let Some(sender_and_lane) = + Lanes::get().iter().find(|link| link.0.lane == lane).map(|link| &link.0) + { + // notify XCM queue manager about updated lane state + LocalXcmQueueManager::::on_bridge_messages_delivered( + sender_and_lane, + enqueued_messages, + ); } - - // notify XCM queue manager about updated lane state - LocalXcmQueueManager::::on_bridge_messages_delivered( - &sender_and_lane, - enqueued_messages, - ); } } @@ -342,6 +309,28 @@ impl LocalXcmQueueManager { } } +/// Adapter for the implementation of `GetVersion`, which attempts to find the minimal +/// configured XCM version between the destination `dest` and the bridge hub location provided as +/// `Get`. +pub struct XcmVersionOfDestAndRemoteBridge( + sp_std::marker::PhantomData<(Version, RemoteBridge)>, +); +impl> GetVersion + for XcmVersionOfDestAndRemoteBridge +{ + fn get_version_for(dest: &MultiLocation) -> Option { + let dest_version = Version::get_version_for(dest); + let bridge_hub_version = Version::get_version_for(&RemoteBridge::get()); + + match (dest_version, bridge_hub_version) { + (Some(dv), Some(bhv)) => Some(sp_std::cmp::min(dv, bhv)), + (Some(dv), None) => Some(dv), + (None, Some(bhv)) => Some(bhv), + (None, None) => None, + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -356,6 +345,9 @@ mod tests { location: MultiLocation::new(1, X1(Parachain(1000))), lane: TEST_LANE_ID, }; + pub TestLanes: sp_std::vec::Vec<(SenderAndLane, (NetworkId, InteriorMultiLocation))> = sp_std::vec![ + (TestSenderAndLane::get(), (NetworkId::ByGenesis([0; 32]), InteriorMultiLocation::Here)) + ]; pub DummyXcmMessage: Xcm<()> = Xcm::new(); } @@ -389,37 +381,44 @@ mod tests { impl XcmBlobHauler for TestBlobHauler { type Runtime = TestRuntime; type MessagesInstance = (); - type SenderAndLane = TestSenderAndLane; type ToSourceChainSender = DummySendXcm; type CongestedMessage = DummyXcmMessage; type UncongestedMessage = DummyXcmMessage; } - type TestBlobHaulerAdapter = XcmBlobHaulerAdapter; + type TestBlobHaulerAdapter = XcmBlobHaulerAdapter; - fn fill_up_lane_to_congestion() { + fn fill_up_lane_to_congestion() -> MessageNonce { + let latest_generated_nonce = OUTBOUND_LANE_CONGESTED_THRESHOLD; OutboundLanes::::insert( TEST_LANE_ID, OutboundLaneData { oldest_unpruned_nonce: 0, latest_received_nonce: 0, - latest_generated_nonce: OUTBOUND_LANE_CONGESTED_THRESHOLD, + latest_generated_nonce, }, ); + latest_generated_nonce } #[test] fn congested_signal_is_not_sent_twice() { run_test(|| { - fill_up_lane_to_congestion(); + let enqueued = fill_up_lane_to_congestion(); // next sent message leads to congested signal - TestBlobHaulerAdapter::haul_blob(vec![42]).unwrap(); + LocalXcmQueueManager::::on_bridge_message_enqueued( + &TestSenderAndLane::get(), + enqueued + 1, + ); assert_eq!(DummySendXcm::messages_sent(), 1); // next sent message => we don't sent another congested signal - TestBlobHaulerAdapter::haul_blob(vec![42]).unwrap(); + LocalXcmQueueManager::::on_bridge_message_enqueued( + &TestSenderAndLane::get(), + enqueued, + ); assert_eq!(DummySendXcm::messages_sent(), 1); }); } @@ -427,7 +426,10 @@ mod tests { #[test] fn congested_signal_is_not_sent_when_outbound_lane_is_not_congested() { run_test(|| { - TestBlobHaulerAdapter::haul_blob(vec![42]).unwrap(); + LocalXcmQueueManager::::on_bridge_message_enqueued( + &TestSenderAndLane::get(), + 1, + ); assert_eq!(DummySendXcm::messages_sent(), 0); }); } @@ -435,10 +437,13 @@ mod tests { #[test] fn congested_signal_is_sent_when_outbound_lane_is_congested() { run_test(|| { - fill_up_lane_to_congestion(); + let enqueued = fill_up_lane_to_congestion(); // next sent message leads to congested signal - TestBlobHaulerAdapter::haul_blob(vec![42]).unwrap(); + LocalXcmQueueManager::::on_bridge_message_enqueued( + &TestSenderAndLane::get(), + enqueued + 1, + ); assert_eq!(DummySendXcm::messages_sent(), 1); assert!(LocalXcmQueueManager::::is_congested_signal_sent(TEST_LANE_ID)); }); diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index 67ae974668e7728cee08b73d0be59d46c7b3c47b..bd47d37fc07d0ce7ccce84547ed71599bc3a2641 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -14,12 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! A mock runtime for testing different stuff in the crate. We've been using Millau -//! runtime for that before, but it has two drawbacks: -//! -//! - circular dependencies between this crate and Millau runtime; -//! -//! - we can't use (e.g. as git subtree or by copying) this crate in repo without Millau. +//! A mock runtime for testing different stuff in the crate. #![cfg(test)] @@ -44,13 +39,13 @@ use bp_runtime::{ }; use codec::{Decode, Encode}; use frame_support::{ - parameter_types, + derive_impl, parameter_types, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight}, }; use pallet_transaction_payment::Multiplier; use sp_runtime::{ testing::H256, - traits::{BlakeTwo256, ConstU32, ConstU64, ConstU8, IdentityLookup}, + traits::{BlakeTwo256, ConstU32, ConstU64, ConstU8}, FixedPointNumber, Perquintill, }; @@ -146,30 +141,14 @@ parameter_types! { pub const ReserveId: [u8; 8] = *b"brdgrlrs"; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Hash = ThisChainHash; type Hashing = ThisChainHasher; type AccountId = ThisChainAccountId; - type Lookup = IdentityLookup; type Block = ThisChainBlock; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = DbWeight; - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; + type BlockHashCount = ConstU32<250>; } impl pallet_utility::Config for TestRuntime { @@ -179,21 +158,10 @@ impl pallet_utility::Config for TestRuntime { type WeightInfo = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for TestRuntime { - type Balance = ThisChainBalance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); - type MaxLocks = ConstU32<50>; - type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; + type AccountStore = System; } impl pallet_transaction_payment::Config for TestRuntime { @@ -408,8 +376,8 @@ impl ChainWithGrandpa for BridgedUnderlyingChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; - const MAX_HEADER_SIZE: u32 = 256; - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = 64; + const MAX_MANDATORY_HEADER_SIZE: u32 = 256; + const AVERAGE_HEADER_SIZE: u32 = 64; } impl Chain for BridgedUnderlyingParachain { diff --git a/bridges/bin/runtime-common/src/priority_calculator.rs b/bridges/bin/runtime-common/src/priority_calculator.rs index fd10344812517bab17ad33e5d276a5627e01c0c0..a597fb9e2f49289360acfd7ee305b44eb7874a3e 100644 --- a/bridges/bin/runtime-common/src/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/priority_calculator.rs @@ -27,6 +27,7 @@ use frame_support::traits::Get; use sp_runtime::transaction_validity::TransactionPriority; // reexport everything from `integrity_tests` module +#[allow(unused_imports)] pub use integrity_tests::*; /// Compute priority boost for message delivery transaction that delivers diff --git a/bridges/docs/high-level-overview.md b/bridges/docs/high-level-overview.md index 42efc8100bd080763c22ea3e4e813f3c3c87db37..d6d6fb3f0996dd15d4fc2987deacf79e4ecd4e5f 100644 --- a/bridges/docs/high-level-overview.md +++ b/bridges/docs/high-level-overview.md @@ -1,7 +1,7 @@ # High-Level Bridge Documentation This document gives a brief, abstract description of main components that may be found in this repository. If you want -to see how we're using them to build Rococo <> Wococo (Kusama <> Polkadot) bridge, please refer to the [Polkadot <> +to see how we're using them to build Rococo <> Westend (Kusama <> Polkadot) bridge, please refer to the [Polkadot <> Kusama Bridge](./polkadot-kusama-bridge-overview.md). ## Purpose diff --git a/bridges/modules/grandpa/Cargo.toml b/bridges/modules/grandpa/Cargo.toml index dbbe18febc618a0c8d16c6cd5edc5fd207a4a6ba..e346f2061e2e59d8cef9075ef164e6b203068a90 100644 --- a/bridges/modules/grandpa/Cargo.toml +++ b/bridges/modules/grandpa/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -37,7 +40,7 @@ sp-core = { path = "../../../substrate/primitives/core" } sp-io = { path = "../../../substrate/primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-runtime/std", diff --git a/bridges/modules/grandpa/src/call_ext.rs b/bridges/modules/grandpa/src/call_ext.rs index f238064f92bcacde89971479a501b60469f01691..c1585020be13ca710178b59aefde4a0cde2ab87a 100644 --- a/bridges/modules/grandpa/src/call_ext.rs +++ b/bridges/modules/grandpa/src/call_ext.rs @@ -15,7 +15,10 @@ // along with Parity Bridges Common. If not, see . use crate::{weights::WeightInfo, BridgedBlockNumber, BridgedHeader, Config, Error, Pallet}; -use bp_header_chain::{justification::GrandpaJustification, ChainWithGrandpa}; +use bp_header_chain::{ + justification::GrandpaJustification, max_expected_submit_finality_proof_arguments_size, + ChainWithGrandpa, GrandpaConsensusLogReader, +}; use bp_runtime::{BlockNumberOf, OwnedBridgeModule}; use codec::Encode; use frame_support::{dispatch::CallableCallFor, traits::IsSubType, weights::Weight}; @@ -169,28 +172,28 @@ pub(crate) fn submit_finality_proof_info_from_args, I: 'static>( Weight::zero() }; + // check if the `finality_target` is a mandatory header. If so, we are ready to refund larger + // size + let is_mandatory_finality_target = + GrandpaConsensusLogReader::>::find_scheduled_change( + finality_target.digest(), + ) + .is_some(); + // we can estimate extra call size easily, without any additional significant overhead let actual_call_size: u32 = finality_target .encoded_size() .saturating_add(justification.encoded_size()) .saturated_into(); - let max_expected_call_size = max_expected_call_size::(required_precommits); + let max_expected_call_size = max_expected_submit_finality_proof_arguments_size::( + is_mandatory_finality_target, + required_precommits, + ); let extra_size = actual_call_size.saturating_sub(max_expected_call_size); SubmitFinalityProofInfo { block_number, extra_weight, extra_size } } -/// Returns maximal expected size of `submit_finality_proof` call arguments. -fn max_expected_call_size, I: 'static>(required_precommits: u32) -> u32 { - let max_expected_justification_size = - GrandpaJustification::>::max_reasonable_size::( - required_precommits, - ); - - // call arguments are header and justification - T::BridgedChain::MAX_HEADER_SIZE.saturating_add(max_expected_justification_size) -} - #[cfg(test)] mod tests { use crate::{ diff --git a/bridges/modules/grandpa/src/mock.rs b/bridges/modules/grandpa/src/mock.rs index f88a0a3e6a6ee187222a6fb8eb1628e22b37abbe..a54f56c4a624951a84e65d8f3b593afa9f661fac 100644 --- a/bridges/modules/grandpa/src/mock.rs +++ b/bridges/modules/grandpa/src/mock.rs @@ -20,16 +20,9 @@ use bp_header_chain::ChainWithGrandpa; use bp_runtime::Chain; use frame_support::{ - construct_runtime, parameter_types, - traits::{ConstU32, ConstU64, Hooks}, - weights::Weight, + construct_runtime, derive_impl, parameter_types, traits::Hooks, weights::Weight, }; use sp_core::sr25519::Signature; -use sp_runtime::{ - testing::H256, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; pub type AccountId = u64; pub type TestHeader = sp_runtime::testing::Header; @@ -49,43 +42,14 @@ construct_runtime! { } } -parameter_types! { - pub const MaximumBlockWeight: Weight = Weight::from_parts(1024, 0); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } parameter_types! { pub const MaxFreeMandatoryHeadersPerBlock: u32 = 2; pub const HeadersToKeep: u32 = 5; - pub const SessionLength: u64 = 5; - pub const NumValidators: u32 = 5; } impl grandpa::Config for TestRuntime { @@ -122,8 +86,8 @@ impl ChainWithGrandpa for TestBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = MAX_BRIDGED_AUTHORITIES; const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; - const MAX_HEADER_SIZE: u32 = 256; - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = 64; + const MAX_MANDATORY_HEADER_SIZE: u32 = 256; + const AVERAGE_HEADER_SIZE: u32 = 64; } /// Return test externalities to use in tests. diff --git a/bridges/modules/grandpa/src/weights.rs b/bridges/modules/grandpa/src/weights.rs index 89ed70d13ac3359f23fa6b756d3a8e07ff8b7092..a75e7b5a8e4ada8ce880a040492c904d8035642c 100644 --- a/bridges/modules/grandpa/src/weights.rs +++ b/bridges/modules/grandpa/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/unknown-bridge-node // benchmark // pallet // --chain=dev @@ -58,39 +58,39 @@ pub trait WeightInfo { /// Those weights are test only and must never be used in production. pub struct BridgeWeight(PhantomData); impl WeightInfo for BridgeWeight { - /// Storage: BridgeRialtoGrandpa PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownGrandpa PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa RequestCount (r:1 w:1) + /// Storage: BridgeUnknownGrandpa RequestCount (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: 499, - /// mode: MaxEncodedLen) + /// Proof: BridgeUnknownGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: + /// 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa BestFinalized (r:1 w:1) + /// Storage: BridgeUnknownGrandpa BestFinalized (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: + /// Proof: BridgeUnknownGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: /// 531, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa CurrentAuthoritySet (r:1 w:0) + /// Storage: BridgeUnknownGrandpa CurrentAuthoritySet (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), + /// Proof: BridgeUnknownGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), /// added: 704, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashesPointer (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashesPointer (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), + /// Proof: BridgeUnknownGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), /// added: 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashes (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashes (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), + /// Proof: BridgeUnknownGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), /// added: 2016, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:0 w:2) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:0 w:2) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 4]`. @@ -113,39 +113,39 @@ impl WeightInfo for BridgeWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: BridgeRialtoGrandpa PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownGrandpa PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa RequestCount (r:1 w:1) + /// Storage: BridgeUnknownGrandpa RequestCount (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: 499, - /// mode: MaxEncodedLen) + /// Proof: BridgeUnknownGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: + /// 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa BestFinalized (r:1 w:1) + /// Storage: BridgeUnknownGrandpa BestFinalized (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: + /// Proof: BridgeUnknownGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: /// 531, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa CurrentAuthoritySet (r:1 w:0) + /// Storage: BridgeUnknownGrandpa CurrentAuthoritySet (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), + /// Proof: BridgeUnknownGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), /// added: 704, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashesPointer (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashesPointer (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), + /// Proof: BridgeUnknownGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), /// added: 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashes (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashes (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), + /// Proof: BridgeUnknownGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), /// added: 2016, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:0 w:2) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:0 w:2) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 4]`. diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index d3d68b338026360fe4156302c1d6f71ee06304e4..4d9371448df8a855db986095d77584c19559379c 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } log = { version = "0.4.20", default-features = false } @@ -22,7 +25,6 @@ bp-runtime = { path = "../../primitives/runtime", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } @@ -32,7 +34,7 @@ pallet-balances = { path = "../../../substrate/frame/balances" } sp-io = { path = "../../../substrate/primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-messages/std", "bp-runtime/std", @@ -43,7 +45,6 @@ std = [ "log/std", "num-traits/std", "scale-info/std", - "sp-core/std", "sp-runtime/std", "sp-std/std", ] diff --git a/bridges/modules/messages/src/mock.rs b/bridges/modules/messages/src/mock.rs index e98f9e1f5def5789cf6f363fad29e9a1de9d1d7c..648acad772d7a04b2985f69d038d5c52634c4708 100644 --- a/bridges/modules/messages/src/mock.rs +++ b/bridges/modules/messages/src/mock.rs @@ -34,16 +34,11 @@ use bp_messages::{ use bp_runtime::{messages::MessageDispatchResult, Size}; use codec::{Decode, Encode}; use frame_support::{ - parameter_types, - traits::ConstU64, + derive_impl, parameter_types, weights::{constants::RocksDbWeight, Weight}, }; use scale_info::TypeInfo; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, ConstU32, IdentityLookup}, - BuildStorage, Perbill, -}; +use sp_runtime::BuildStorage; use std::{ collections::{BTreeMap, VecDeque}, ops::RangeInclusive, @@ -84,56 +79,19 @@ frame_support::construct_runtime! { } } -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = Weight::from_parts(1024, 0); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - pub type DbWeight = RocksDbWeight; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); type DbWeight = DbWeight; - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for TestRuntime { - type MaxLocks = (); - type Balance = Balance; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; - type AccountStore = frame_system::Pallet; - type WeightInfo = (); - type MaxReserves = (); - type ReserveIdentifier = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; + type ReserveIdentifier = [u8; 8]; + type AccountStore = System; } parameter_types! { diff --git a/bridges/modules/messages/src/weights.rs b/bridges/modules/messages/src/weights.rs index 5b6863984ec78c0e250c54a0ed3363d1278f196f..5bf7d56756079df8a5e469b9c50ba7607b65d983 100644 --- a/bridges/modules/messages/src/weights.rs +++ b/bridges/modules/messages/src/weights.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Autogenerated weights for RialtoMessages +//! Autogenerated weights for pallet_bridge_messages //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-03-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -23,13 +23,13 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/unknown-bridge-node // benchmark // pallet // --chain=dev // --steps=50 // --repeat=20 -// --pallet=RialtoMessages +// --pallet=pallet_bridge_messages // --extrinsic=* // --execution=wasm // --wasm-execution=Compiled @@ -48,7 +48,7 @@ use frame_support::{ }; use sp_std::marker::PhantomData; -/// Weight functions needed for RialtoMessages. +/// Weight functions needed for pallet_bridge_messages. pub trait WeightInfo { fn receive_single_message_proof() -> Weight; fn receive_two_messages_proof() -> Weight; @@ -61,24 +61,24 @@ pub trait WeightInfo { fn receive_single_message_proof_with_dispatch(i: u32) -> Weight; } -/// Weights for `RialtoMessages` that are generated using one of the Bridge testnets. +/// Weights for `pallet_bridge_messages` that are generated using one of the Bridge testnets. /// /// Those weights are test only and must never be used in production. pub struct BridgeWeight(PhantomData); impl WeightInfo for BridgeWeight { - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: @@ -89,19 +89,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: @@ -112,19 +112,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: @@ -135,19 +135,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: @@ -158,19 +158,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: @@ -181,19 +181,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -209,19 +209,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -237,19 +237,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) @@ -265,19 +265,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) /// /// The range of component `i` is `[128, 2048]`. @@ -296,19 +296,19 @@ impl WeightInfo for BridgeWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: @@ -319,19 +319,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: @@ -342,19 +342,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: @@ -365,19 +365,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: @@ -388,19 +388,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: @@ -411,19 +411,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -439,19 +439,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -467,19 +467,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) @@ -495,19 +495,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) /// /// The range of component `i` is `[128, 2048]`. diff --git a/bridges/modules/messages/src/weights_ext.rs b/bridges/modules/messages/src/weights_ext.rs index aeb3a581a69ee6ebb233ca6ec5e0f0bd4d25a408..c12e04f692bf8304fb58d7c97ec50d1b860ccb56 100644 --- a/bridges/modules/messages/src/weights_ext.rs +++ b/bridges/modules/messages/src/weights_ext.rs @@ -60,7 +60,8 @@ pub fn ensure_weights_are_correct() { // W::receive_messages_delivery_proof_messages_overhead(1).ref_time() may be zero because: // there's no code that iterates over confirmed messages in confirmation transaction assert_eq!(W::receive_messages_delivery_proof_messages_overhead(1).proof_size(), 0); - assert_ne!(W::receive_messages_delivery_proof_relayers_overhead(1).ref_time(), 0); + // W::receive_messages_delivery_proof_relayers_overhead(1).ref_time() may be zero because: + // runtime **can** choose not to pay any rewards to relayers // W::receive_messages_delivery_proof_relayers_overhead(1).proof_size() is an exception // it may or may not cause additional db reads, so proof size may vary assert_ne!(W::storage_proof_size_overhead(1).ref_time(), 0); diff --git a/bridges/modules/parachains/Cargo.toml b/bridges/modules/parachains/Cargo.toml index 0d1b61ddea8022db5b9aaafee03624e3be3959d6..77a5366c78daedd368d9a4f412075ac803a89530 100644 --- a/bridges/modules/parachains/Cargo.toml +++ b/bridges/modules/parachains/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } log = { version = "0.4.20", default-features = false } @@ -35,7 +38,7 @@ sp-core = { path = "../../../substrate/primitives/core" } sp-io = { path = "../../../substrate/primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-parachains/std", diff --git a/bridges/modules/parachains/src/mock.rs b/bridges/modules/parachains/src/mock.rs index 14afe38417105a789f9eb13ca913c1be91e6f789..1c7851364d1c047dae3e8e8213708ffc6db9a128 100644 --- a/bridges/modules/parachains/src/mock.rs +++ b/bridges/modules/parachains/src/mock.rs @@ -17,17 +17,18 @@ use bp_header_chain::ChainWithGrandpa; use bp_polkadot_core::parachains::ParaId; use bp_runtime::{Chain, Parachain}; -use frame_support::{construct_runtime, parameter_types, traits::ConstU32, weights::Weight}; +use frame_support::{ + construct_runtime, derive_impl, parameter_types, traits::ConstU32, weights::Weight, +}; use sp_runtime::{ testing::H256, - traits::{BlakeTwo256, Header as HeaderT, IdentityLookup}, - MultiSignature, Perbill, + traits::{BlakeTwo256, Header as HeaderT}, + MultiSignature, }; use crate as pallet_bridge_parachains; pub type AccountId = u64; -pub type TestNumber = u64; pub type RelayBlockHeader = sp_runtime::generic::Header; @@ -152,42 +153,12 @@ construct_runtime! { } } -parameter_types! { - pub const BlockHashCount: TestNumber = 250; - pub const MaximumBlockWeight: Weight = Weight::from_parts(1024, 0); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Block = Block; - type Hash = H256; - type Hashing = RegularParachainHasher; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } parameter_types! { - pub const SessionLength: u64 = 5; - pub const NumValidators: u32 = 5; pub const HeadersToKeep: u32 = 5; } @@ -281,8 +252,8 @@ impl ChainWithGrandpa for TestBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; - const MAX_HEADER_SIZE: u32 = 256; - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = 64; + const MAX_MANDATORY_HEADER_SIZE: u32 = 256; + const AVERAGE_HEADER_SIZE: u32 = 64; } #[derive(Debug)] @@ -312,8 +283,8 @@ impl ChainWithGrandpa for OtherBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; - const MAX_HEADER_SIZE: u32 = 256; - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = 64; + const MAX_MANDATORY_HEADER_SIZE: u32 = 256; + const AVERAGE_HEADER_SIZE: u32 = 64; } /// Return test externalities to use in tests. diff --git a/bridges/modules/parachains/src/weights.rs b/bridges/modules/parachains/src/weights.rs index 9182ec466117b2c642910148a1240a10ae438b8a..abddc8768947006e574bf6bca4d2301c2047199a 100644 --- a/bridges/modules/parachains/src/weights.rs +++ b/bridges/modules/parachains/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/unknown-bridge-node // benchmark // pallet // --chain=dev @@ -60,29 +60,29 @@ pub trait WeightInfo { /// Those weights are test only and must never be used in production. pub struct BridgeWeight(PhantomData); impl WeightInfo for BridgeWeight { - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 2]`. @@ -97,29 +97,29 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: @@ -130,29 +130,29 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: @@ -167,29 +167,29 @@ impl WeightInfo for BridgeWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 2]`. @@ -204,29 +204,29 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: @@ -237,29 +237,29 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: diff --git a/bridges/modules/parachains/src/weights_ext.rs b/bridges/modules/parachains/src/weights_ext.rs index 13bc9ad2bbce0f26d5946cf8198e689597e5f968..393086a85690fcc2846c1708bc788e1d67a61d66 100644 --- a/bridges/modules/parachains/src/weights_ext.rs +++ b/bridges/modules/parachains/src/weights_ext.rs @@ -31,7 +31,7 @@ use frame_support::weights::{RuntimeDbWeight, Weight}; pub const DEFAULT_PARACHAIN_HEAD_SIZE: u32 = 384; /// Number of extra bytes (excluding size of storage value itself) of storage proof, built at -/// the Rialto chain. +/// some generic chain. pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; /// Extended weight info. diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index 10b60c3006b0d657cf10dbc5a00b5a4db542d7f8..8c8305ef64c9f7e419aead17a9989063ec28d290 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } log = { version = "0.4.20", default-features = false } @@ -30,12 +33,11 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false [dev-dependencies] bp-runtime = { path = "../../primitives/runtime" } pallet-balances = { path = "../../../substrate/frame/balances" } -sp-core = { path = "../../../substrate/primitives/core" } sp-io = { path = "../../../substrate/primitives/io" } sp-runtime = { path = "../../../substrate/primitives/runtime" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-messages/std", "bp-relayers/std", diff --git a/bridges/modules/relayers/src/benchmarking.rs b/bridges/modules/relayers/src/benchmarking.rs index 2d74ab38f9dbd1711b62df5e6bebd697fda1b988..00c3814a4c38d9bf0f18b70c0eedc75c239b8ad0 100644 --- a/bridges/modules/relayers/src/benchmarking.rs +++ b/bridges/modules/relayers/src/benchmarking.rs @@ -104,7 +104,7 @@ benchmarks! { // create slash destination account let lane = LaneId([0, 0, 0, 0]); let slash_destination = RewardsAccountParams::new(lane, *b"test", RewardsAccountOwner::ThisChain); - T::prepare_rewards_account(slash_destination.clone(), Zero::zero()); + T::prepare_rewards_account(slash_destination, Zero::zero()); }: { crate::Pallet::::slash_and_deregister(&relayer, slash_destination) } @@ -121,7 +121,7 @@ benchmarks! { let account_params = RewardsAccountParams::new(lane, *b"test", RewardsAccountOwner::ThisChain); }: { - crate::Pallet::::register_relayer_reward(account_params.clone(), &relayer, One::one()); + crate::Pallet::::register_relayer_reward(account_params, &relayer, One::one()); } verify { assert_eq!(RelayerRewards::::get(relayer, &account_params), Some(One::one())); diff --git a/bridges/modules/relayers/src/mock.rs b/bridges/modules/relayers/src/mock.rs index d19d47eec5cd7ece86ce9b8770d1b3ee304ac7f0..667b10e5c125ed74bad2aa7796756f372578c2ce 100644 --- a/bridges/modules/relayers/src/mock.rs +++ b/bridges/modules/relayers/src/mock.rs @@ -22,12 +22,10 @@ use bp_messages::LaneId; use bp_relayers::{ PayRewardFromAccount, PaymentProcedure, RewardsAccountOwner, RewardsAccountParams, }; -use frame_support::{parameter_types, traits::fungible::Mutate, weights::RuntimeDbWeight}; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, ConstU32, IdentityLookup}, - BuildStorage, +use frame_support::{ + derive_impl, parameter_types, traits::fungible::Mutate, weights::RuntimeDbWeight, }; +use sp_runtime::BuildStorage; pub type AccountId = u64; pub type Balance = u64; @@ -61,47 +59,17 @@ parameter_types! { pub const Lease: BlockNumber = 8; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Block = Block; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = frame_support::traits::ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); type DbWeight = DbWeight; - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for TestRuntime { - type MaxLocks = (); - type Balance = Balance; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Pallet; - type WeightInfo = (); - type MaxReserves = ConstU32<1>; type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; + type AccountStore = System; } impl pallet_bridge_relayers::Config for TestRuntime { diff --git a/bridges/modules/relayers/src/weights.rs b/bridges/modules/relayers/src/weights.rs index 2e064a3936df3a0661fc08af48be87520c04dba0..c2c065b0c0a270a254a60dccb62465d6c2fa4aa6 100644 --- a/bridges/modules/relayers/src/weights.rs +++ b/bridges/modules/relayers/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/rip-bridge-node // benchmark // pallet // --chain=dev diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index 56b9139d7d5f6771dd0dd048ff7a791a7e099c4c..1d84f723ee9d490359c32bef05e9c9547abeb31d 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } log = { version = "0.4.20", default-features = false } @@ -34,7 +37,7 @@ sp-io = { path = "../../../substrate/primitives/io" } sp-std = { path = "../../../substrate/primitives/std" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-xcm-bridge-hub-router/std", "codec/std", diff --git a/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs b/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs index c4d1e3971e74777668b8bfa4dbcfdb88fbac3779..922e4bf94ba8a947f1fcc2f83db675f539cfc295 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs @@ -21,7 +21,7 @@ use crate::{Bridge, Call}; use bp_xcm_bridge_hub_router::{BridgeState, MINIMAL_DELIVERY_FEE_FACTOR}; -use frame_benchmarking::benchmarks_instance_pallet; +use frame_benchmarking::{benchmarks_instance_pallet, BenchmarkError}; use frame_support::traits::{EnsureOrigin, Get, Hooks, UnfilteredDispatchable}; use sp_runtime::traits::Zero; use xcm::prelude::*; @@ -37,11 +37,11 @@ pub trait Config: crate::Config { /// Returns destination which is valid for this router instance. /// (Needs to pass `T::Bridges`) /// Make sure that `SendXcm` will pass. - fn ensure_bridged_target_destination() -> MultiLocation { - MultiLocation::new( + fn ensure_bridged_target_destination() -> Result { + Ok(MultiLocation::new( Self::UniversalLocation::get().len() as u8, X1(GlobalConsensus(Self::BridgedNetworkId::get().unwrap())), - ) + )) } } @@ -61,7 +61,7 @@ benchmarks_instance_pallet! { delivery_fee_factor: MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR, }); - let _ = T::ensure_bridged_target_destination(); + let _ = T::ensure_bridged_target_destination()?; T::make_congested(); }: { crate::Pallet::::on_initialize(Zero::zero()) @@ -81,7 +81,7 @@ benchmarks_instance_pallet! { } send_message { - let dest = T::ensure_bridged_target_destination(); + let dest = T::ensure_bridged_target_destination()?; let xcm = sp_std::vec![].into(); // make local queue congested, because it means additional db write diff --git a/bridges/modules/xcm-bridge-hub-router/src/lib.rs b/bridges/modules/xcm-bridge-hub-router/src/lib.rs index cf51ef82412fbeb99ed9b1b77b693ebe3a288dd4..229628aedcb8a67f1bc8b652a55fc967f106dbe0 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/lib.rs @@ -89,6 +89,8 @@ pub mod pallet { /// **possible fee**. Allows to externalize better control over allowed **bridged /// networks/locations**. type Bridges: ExporterFor; + /// Checks the XCM version for the destination. + type DestinationVersion: GetVersion; /// Origin of the sibling bridge hub that is allowed to report bridge status. type BridgeHubOrigin: EnsureOrigin; @@ -319,12 +321,13 @@ impl, I: 'static> SendXcm for Pallet { dest: &mut Option, xcm: &mut Option>, ) -> SendResult { - // we won't have an access to `dest` and `xcm` in the `delvier` method, so precompute + // `dest` and `xcm` are required here + let dest_ref = dest.as_ref().ok_or(SendError::MissingArgument)?; + let xcm_ref = xcm.as_ref().ok_or(SendError::MissingArgument)?; + + // we won't have an access to `dest` and `xcm` in the `deliver` method, so precompute // everything required here - let message_size = xcm - .as_ref() - .map(|xcm| xcm.encoded_size() as _) - .ok_or(SendError::MissingArgument)?; + let message_size = xcm_ref.encoded_size() as _; // bridge doesn't support oversized/overweight messages now. So it is better to drop such // messages here than at the bridge hub. Let's check the message size. @@ -332,6 +335,18 @@ impl, I: 'static> SendXcm for Pallet { return Err(SendError::ExceedsMaxMessageSize) } + // We need to ensure that the known `dest`'s XCM version can comprehend the current `xcm` + // program. This may seem like an additional, unnecessary check, but it is not. A similar + // check is probably performed by the `ViaBridgeHubExporter`, which attempts to send a + // versioned message to the sibling bridge hub. However, the local bridge hub may have a + // higher XCM version than the remote `dest`. Once again, it is better to discard such + // messages here than at the bridge hub (e.g., to avoid losing funds). + let destination_version = T::DestinationVersion::get_version_for(dest_ref) + .ok_or(SendError::DestinationUnsupported)?; + let _ = VersionedXcm::from(xcm_ref.clone()) + .into_version(destination_version) + .map_err(|()| SendError::DestinationUnsupported)?; + // just use exporter to validate destination and insert instructions to pay message fee // at the sibling/child bridge hub // @@ -358,6 +373,7 @@ impl, I: 'static> SendXcm for Pallet { #[cfg(test)] mod tests { use super::*; + use frame_support::assert_ok; use mock::*; use frame_support::traits::Hooks; @@ -451,6 +467,19 @@ mod tests { }); } + #[test] + fn destination_unsupported_if_wrap_version_fails() { + run_test(|| { + assert_eq!( + send_xcm::( + UnknownXcmVersionLocation::get(), + vec![ClearOrigin].into(), + ), + Err(SendError::DestinationUnsupported), + ); + }); + } + #[test] fn returns_proper_delivery_price() { run_test(|| { @@ -488,17 +517,14 @@ mod tests { fn sent_message_doesnt_increase_factor_if_xcm_channel_is_uncongested() { run_test(|| { let old_bridge = XcmBridgeHubRouter::bridge(); - assert_eq!( - send_xcm::( - MultiLocation::new( - 2, - X2(GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)) - ), - vec![ClearOrigin].into(), - ) - .map(drop), - Ok(()), - ); + assert_ok!(send_xcm::( + MultiLocation::new( + 2, + X2(GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)) + ), + vec![ClearOrigin].into(), + ) + .map(drop)); assert!(TestToBridgeHubSender::is_message_sent()); assert_eq!(old_bridge, XcmBridgeHubRouter::bridge()); @@ -511,17 +537,14 @@ mod tests { TestWithBridgeHubChannel::make_congested(); let old_bridge = XcmBridgeHubRouter::bridge(); - assert_eq!( - send_xcm::( - MultiLocation::new( - 2, - X2(GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)) - ), - vec![ClearOrigin].into(), - ) - .map(drop), - Ok(()), - ); + assert_ok!(send_xcm::( + MultiLocation::new( + 2, + X2(GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)) + ), + vec![ClearOrigin].into(), + ) + .map(drop)); assert!(TestToBridgeHubSender::is_message_sent()); assert!( @@ -536,17 +559,14 @@ mod tests { Bridge::::put(congested_bridge(MINIMAL_DELIVERY_FEE_FACTOR)); let old_bridge = XcmBridgeHubRouter::bridge(); - assert_eq!( - send_xcm::( - MultiLocation::new( - 2, - X2(GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)) - ), - vec![ClearOrigin].into(), - ) - .map(drop), - Ok(()), - ); + assert_ok!(send_xcm::( + MultiLocation::new( + 2, + X2(GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)) + ), + vec![ClearOrigin].into(), + ) + .map(drop)); assert!(TestToBridgeHubSender::is_message_sent()); assert!( diff --git a/bridges/modules/xcm-bridge-hub-router/src/mock.rs b/bridges/modules/xcm-bridge-hub-router/src/mock.rs index 2152b4eb28f336d86fcd531077c8ea65a3138fe5..9079f4b9c4c64e980f5be66e9cd99fe8dd7e20fa 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/mock.rs @@ -19,13 +19,12 @@ use crate as pallet_xcm_bridge_hub_router; use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; -use frame_support::{construct_runtime, parameter_types}; -use frame_system::EnsureRoot; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, ConstU128, IdentityLookup}, - BuildStorage, +use frame_support::{ + construct_runtime, derive_impl, parameter_types, + traits::{Contains, Equals}, }; +use frame_system::EnsureRoot; +use sp_runtime::{traits::ConstU128, BuildStorage}; use xcm::prelude::*; use xcm_builder::{NetworkExportTable, NetworkExportTableItem}; @@ -62,32 +61,12 @@ parameter_types! { Some((BridgeFeeAsset::get(), BASE_FEE).into()) ) ]; + pub UnknownXcmVersionLocation: MultiLocation = MultiLocation::new(2, X2(GlobalConsensus(BridgedNetworkId::get()), Parachain(9999))); } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Block = Block; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = frame_support::traits::ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { @@ -96,6 +75,8 @@ impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { type UniversalLocation = UniversalLocation; type BridgedNetworkId = BridgedNetworkId; type Bridges = NetworkExportTable; + type DestinationVersion = + LatestOrNoneForLocationVersionChecker>; type BridgeHubOrigin = EnsureRoot; type ToBridgeHubSender = TestToBridgeHubSender; @@ -105,6 +86,18 @@ impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { type FeeAsset = BridgeFeeAsset; } +pub struct LatestOrNoneForLocationVersionChecker(sp_std::marker::PhantomData); +impl> GetVersion + for LatestOrNoneForLocationVersionChecker +{ + fn get_version_for(dest: &MultiLocation) -> Option { + if Location::contains(dest) { + return None + } + Some(XCM_VERSION) + } +} + pub struct TestToBridgeHubSender; impl TestToBridgeHubSender { diff --git a/bridges/modules/xcm-bridge-hub-router/src/weights.rs b/bridges/modules/xcm-bridge-hub-router/src/weights.rs index 62936e997f3cd0aaea17859b49d7c0edf6b65989..b0c8fc6252cd5e6eaa968cce06636a308e1c7e05 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/weights.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/rip-bridge-node // benchmark // pallet // --chain=dev diff --git a/bridges/modules/xcm-bridge-hub/Cargo.toml b/bridges/modules/xcm-bridge-hub/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..061d4b7ced881d6ac7638fdcc0e56bccb08a7be3 --- /dev/null +++ b/bridges/modules/xcm-bridge-hub/Cargo.toml @@ -0,0 +1,77 @@ +[package] +name = "pallet-xcm-bridge-hub" +description = "Module that adds dynamic bridges/lanes support to XCM infrastucture at the bridge hub." +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +log = { version = "0.4.20", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } + +# Bridge Dependencies +bp-messages = { path = "../../primitives/messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-xcm-bridge-hub = { path = "../../primitives/xcm-bridge-hub", default-features = false } +pallet-bridge-messages = { path = "../messages", default-features = false } +bridge-runtime-common = { path = "../../bin/runtime-common", default-features = false } + +# Substrate Dependencies +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } + +# Polkadot Dependencies +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } + +[dev-dependencies] +bp-header-chain = { path = "../../primitives/header-chain" } +pallet-balances = { path = "../../../substrate/frame/balances" } +sp-io = { path = "../../../substrate/primitives/io" } + +[features] +default = ["std"] +std = [ + "bp-messages/std", + "bp-runtime/std", + "bp-xcm-bridge-hub/std", + "bridge-runtime-common/std", + "codec/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-bridge-messages/std", + "scale-info/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] +runtime-benchmarks = [ + "bridge-runtime-common/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-bridge-messages/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-bridge-messages/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/bridges/modules/xcm-bridge-hub/src/exporter.rs b/bridges/modules/xcm-bridge-hub/src/exporter.rs new file mode 100644 index 0000000000000000000000000000000000000000..5318b222c5452e05aded6151eb8a43b806c405cb --- /dev/null +++ b/bridges/modules/xcm-bridge-hub/src/exporter.rs @@ -0,0 +1,209 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! The code that allows to use the pallet (`pallet-xcm-bridge-hub`) as XCM message +//! exporter at the sending bridge hub. Internally, it just enqueues outbound blob +//! in the messages pallet queue. +//! +//! This code is executed at the source bridge hub. + +use crate::{Config, Pallet, LOG_TARGET}; + +use bp_messages::source_chain::MessagesBridge; +use bp_xcm_bridge_hub::XcmAsPlainPayload; +use bridge_runtime_common::messages_xcm_extension::{LocalXcmQueueManager, SenderAndLane}; +use pallet_bridge_messages::{Config as BridgeMessagesConfig, Pallet as BridgeMessagesPallet}; +use xcm::prelude::*; +use xcm_builder::{HaulBlob, HaulBlobError, HaulBlobExporter}; +use xcm_executor::traits::ExportXcm; + +/// An easy way to access `HaulBlobExporter`. +pub type PalletAsHaulBlobExporter = HaulBlobExporter< + DummyHaulBlob, + >::BridgedNetwork, + >::DestinationVersion, + >::MessageExportPrice, +>; +/// An easy way to access associated messages pallet. +type MessagesPallet = BridgeMessagesPallet>::BridgeMessagesPalletInstance>; + +impl, I: 'static> ExportXcm for Pallet +where + T: BridgeMessagesConfig< + >::BridgeMessagesPalletInstance, + OutboundPayload = XcmAsPlainPayload, + >, +{ + type Ticket = (SenderAndLane, XcmAsPlainPayload, XcmHash); + + fn validate( + network: NetworkId, + channel: u32, + universal_source: &mut Option, + destination: &mut Option, + message: &mut Option>, + ) -> Result<(Self::Ticket, MultiAssets), SendError> { + // Find supported lane_id. + let sender_and_lane = Self::lane_for( + universal_source.as_ref().ok_or(SendError::MissingArgument)?, + (&network, destination.as_ref().ok_or(SendError::MissingArgument)?), + ) + .ok_or(SendError::NotApplicable)?; + + // check if we are able to route the message. We use existing `HaulBlobExporter` for that. + // It will make all required changes and will encode message properly, so that the + // `DispatchBlob` at the bridged bridge hub will be able to decode it + let ((blob, id), price) = PalletAsHaulBlobExporter::::validate( + network, + channel, + universal_source, + destination, + message, + )?; + + Ok(((sender_and_lane, blob, id), price)) + } + + fn deliver( + (sender_and_lane, blob, id): (SenderAndLane, XcmAsPlainPayload, XcmHash), + ) -> Result { + let lane_id = sender_and_lane.lane; + let send_result = MessagesPallet::::send_message(lane_id, blob); + + match send_result { + Ok(artifacts) => { + log::info!( + target: LOG_TARGET, + "XCM message {:?} has been enqueued at bridge {:?} with nonce {}", + id, + lane_id, + artifacts.nonce, + ); + + // notify XCM queue manager about updated lane state + LocalXcmQueueManager::::on_bridge_message_enqueued( + &sender_and_lane, + artifacts.enqueued_messages, + ); + }, + Err(error) => { + log::debug!( + target: LOG_TARGET, + "XCM message {:?} has been dropped because of bridge error {:?} on bridge {:?}", + id, + error, + lane_id, + ); + return Err(SendError::Transport("BridgeSendError")) + }, + } + + Ok(id) + } +} + +/// Dummy implementation of the `HaulBlob` trait that is never called. +/// +/// We are using `HaulBlobExporter`, which requires `HaulBlob` implementation. It assumes that +/// there's a single channel between two bridge hubs - `HaulBlob` only accepts the blob and nothing +/// else. But bridge messages pallet may have a dedicated channel (lane) for every pair of bridged +/// chains. So we are using our own `ExportXcm` implementation, but to utilize `HaulBlobExporter` we +/// still need this `DummyHaulBlob`. +pub struct DummyHaulBlob; + +impl HaulBlob for DummyHaulBlob { + fn haul_blob(_blob: XcmAsPlainPayload) -> Result<(), HaulBlobError> { + Err(HaulBlobError::Transport("DummyHaulBlob")) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::*; + use frame_support::assert_ok; + use xcm_executor::traits::export_xcm; + + fn universal_source() -> InteriorMultiLocation { + X2(GlobalConsensus(RelayNetwork::get()), Parachain(SIBLING_ASSET_HUB_ID)) + } + + fn universal_destination() -> InteriorMultiLocation { + BridgedDestination::get() + } + + #[test] + fn export_works() { + run_test(|| { + assert_ok!(export_xcm::( + BridgedRelayNetwork::get(), + 0, + universal_source(), + universal_destination(), + vec![Instruction::ClearOrigin].into(), + )); + }) + } + + #[test] + fn export_fails_if_argument_is_missing() { + run_test(|| { + assert_eq!( + XcmOverBridge::validate( + BridgedRelayNetwork::get(), + 0, + &mut None, + &mut Some(universal_destination()), + &mut Some(Vec::new().into()), + ), + Err(SendError::MissingArgument), + ); + + assert_eq!( + XcmOverBridge::validate( + BridgedRelayNetwork::get(), + 0, + &mut Some(universal_source()), + &mut None, + &mut Some(Vec::new().into()), + ), + Err(SendError::MissingArgument), + ); + }) + } + + #[test] + fn exporter_computes_correct_lane_id() { + run_test(|| { + let expected_lane_id = TEST_LANE_ID; + + assert_eq!( + XcmOverBridge::validate( + BridgedRelayNetwork::get(), + 0, + &mut Some(universal_source()), + &mut Some(universal_destination()), + &mut Some(Vec::new().into()), + ) + .unwrap() + .0 + .0 + .lane, + expected_lane_id, + ); + }) + } +} diff --git a/bridges/modules/xcm-bridge-hub/src/lib.rs b/bridges/modules/xcm-bridge-hub/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..44f6903b018b839fa3f4c97a0ba2c84c7d239c89 --- /dev/null +++ b/bridges/modules/xcm-bridge-hub/src/lib.rs @@ -0,0 +1,118 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Module that adds XCM support to bridge pallets. + +#![warn(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +use bridge_runtime_common::messages_xcm_extension::XcmBlobHauler; +use pallet_bridge_messages::Config as BridgeMessagesConfig; +use xcm::prelude::*; + +pub use exporter::PalletAsHaulBlobExporter; +pub use pallet::*; + +mod exporter; +mod mock; + +/// The target that will be used when publishing logs related to this pallet. +pub const LOG_TARGET: &str = "runtime::bridge-xcm"; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use bridge_runtime_common::messages_xcm_extension::SenderAndLane; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: + BridgeMessagesConfig + { + /// Runtime's universal location. + type UniversalLocation: Get; + // TODO: https://github.com/paritytech/parity-bridges-common/issues/1666 remove `ChainId` and + // replace it with the `NetworkId` - then we'll be able to use + // `T as pallet_bridge_messages::Config::BridgedChain::NetworkId` + /// Bridged network as relative location of bridged `GlobalConsensus`. + #[pallet::constant] + type BridgedNetwork: Get; + /// Associated messages pallet instance that bridges us with the + /// `BridgedNetworkId` consensus. + type BridgeMessagesPalletInstance: 'static; + + /// Price of single message export to the bridged consensus (`Self::BridgedNetworkId`). + type MessageExportPrice: Get; + /// Checks the XCM version for the destination. + type DestinationVersion: GetVersion; + + /// Get point-to-point links with bridged consensus (`Self::BridgedNetworkId`). + /// (this will be replaced with dynamic on-chain bridges - `Bridges V2`) + type Lanes: Get>; + /// Support for point-to-point links + /// (this will be replaced with dynamic on-chain bridges - `Bridges V2`) + type LanesSupport: XcmBlobHauler; + } + + #[pallet::pallet] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + fn integrity_test() { + assert!( + Self::bridged_network_id().is_some(), + "Configured `T::BridgedNetwork`: {:?} does not contain `GlobalConsensus` junction with `NetworkId`", + T::BridgedNetwork::get() + ) + } + } + + impl, I: 'static> Pallet { + /// Returns dedicated/configured lane identifier. + pub(crate) fn lane_for( + source: &InteriorMultiLocation, + dest: (&NetworkId, &InteriorMultiLocation), + ) -> Option { + let source = source.relative_to(&T::UniversalLocation::get()); + + // Check that we have configured a point-to-point lane for 'source' and `dest`. + T::Lanes::get() + .into_iter() + .find_map(|(lane_source, (lane_dest_network, lane_dest))| { + if lane_source.location == source && + &lane_dest_network == dest.0 && + Self::bridged_network_id().as_ref() == Some(dest.0) && + &lane_dest == dest.1 + { + Some(lane_source) + } else { + None + } + }) + } + + /// Returns some `NetworkId` if contains `GlobalConsensus` junction. + fn bridged_network_id() -> Option { + match T::BridgedNetwork::get().take_first_interior() { + Some(GlobalConsensus(network)) => Some(network), + _ => None, + } + } + } +} diff --git a/bridges/modules/xcm-bridge-hub/src/mock.rs b/bridges/modules/xcm-bridge-hub/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..8edd4b1f7aa9b7890305302093d462785a64a6f8 --- /dev/null +++ b/bridges/modules/xcm-bridge-hub/src/mock.rs @@ -0,0 +1,334 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg(test)] + +use crate as pallet_xcm_bridge_hub; + +use bp_messages::{ + source_chain::LaneMessageVerifier, + target_chain::{DispatchMessage, MessageDispatch}, + LaneId, OutboundLaneData, VerificationError, +}; +use bp_runtime::{messages::MessageDispatchResult, Chain, UnderlyingChainProvider}; +use bridge_runtime_common::{ + messages::{ + source::TargetHeaderChainAdapter, target::SourceHeaderChainAdapter, + BridgedChainWithMessages, HashOf, MessageBridge, ThisChainWithMessages, + }, + messages_xcm_extension::{SenderAndLane, XcmBlobHauler}, +}; +use codec::Encode; +use frame_support::{derive_impl, parameter_types, traits::ConstU32, weights::RuntimeDbWeight}; +use sp_core::H256; +use sp_runtime::{ + testing::Header as SubstrateHeader, + traits::{BlakeTwo256, IdentityLookup}, + AccountId32, BuildStorage, +}; +use xcm::prelude::*; + +pub type AccountId = AccountId32; +pub type Balance = u64; + +type Block = frame_system::mocking::MockBlock; + +pub const SIBLING_ASSET_HUB_ID: u32 = 2001; +pub const THIS_BRIDGE_HUB_ID: u32 = 2002; +pub const BRIDGED_ASSET_HUB_ID: u32 = 1001; +pub const TEST_LANE_ID: LaneId = LaneId([0, 0, 0, 1]); + +frame_support::construct_runtime! { + pub enum TestRuntime { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Event}, + Messages: pallet_bridge_messages::{Pallet, Call, Event}, + XcmOverBridge: pallet_xcm_bridge_hub::{Pallet}, + } +} + +parameter_types! { + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 1, write: 2 }; + pub const ExistentialDeposit: Balance = 1; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for TestRuntime { + type AccountId = AccountId; + type AccountData = pallet_balances::AccountData; + type Block = Block; + type Lookup = IdentityLookup; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +impl pallet_balances::Config for TestRuntime { + type AccountStore = System; +} + +/// Lane message verifier that is used in tests. +#[derive(Debug, Default)] +pub struct TestLaneMessageVerifier; + +impl LaneMessageVerifier> for TestLaneMessageVerifier { + fn verify_message( + _lane: &LaneId, + _lane_outbound_data: &OutboundLaneData, + _payload: &Vec, + ) -> Result<(), VerificationError> { + Ok(()) + } +} + +parameter_types! { + pub const ActiveOutboundLanes: &'static [LaneId] = &[TEST_LANE_ID]; +} + +impl pallet_bridge_messages::Config for TestRuntime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = TestMessagesWeights; + + type BridgedChainId = (); + type ActiveOutboundLanes = ActiveOutboundLanes; + type MaxUnrewardedRelayerEntriesAtInboundLane = (); + type MaxUnconfirmedMessagesAtInboundLane = (); + type MaximalOutboundPayloadSize = ConstU32<2048>; + type OutboundPayload = Vec; + type InboundPayload = Vec; + type InboundRelayer = (); + type DeliveryPayments = (); + type TargetHeaderChain = TargetHeaderChainAdapter; + type LaneMessageVerifier = TestLaneMessageVerifier; + type DeliveryConfirmationPayments = (); + type OnMessagesDelivered = (); + type SourceHeaderChain = SourceHeaderChainAdapter; + type MessageDispatch = TestMessageDispatch; +} + +pub struct TestMessagesWeights; + +impl pallet_bridge_messages::WeightInfo for TestMessagesWeights { + fn receive_single_message_proof() -> Weight { + Weight::zero() + } + fn receive_single_message_proof_with_outbound_lane_state() -> Weight { + Weight::zero() + } + fn receive_delivery_proof_for_single_message() -> Weight { + Weight::zero() + } + fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { + Weight::zero() + } + fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { + Weight::zero() + } + + fn receive_two_messages_proof() -> Weight { + Weight::zero() + } + + fn receive_single_message_proof_1_kb() -> Weight { + Weight::zero() + } + + fn receive_single_message_proof_16_kb() -> Weight { + Weight::zero() + } + + fn receive_single_message_proof_with_dispatch(_: u32) -> Weight { + Weight::from_parts(1, 0) + } +} + +impl pallet_bridge_messages::WeightInfoExt for TestMessagesWeights { + fn expected_extra_storage_proof_size() -> u32 { + 0 + } + + fn receive_messages_proof_overhead_from_runtime() -> Weight { + Weight::zero() + } + + fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight { + Weight::zero() + } +} + +parameter_types! { + pub const RelayNetwork: NetworkId = NetworkId::Kusama; + pub const BridgedRelayNetwork: NetworkId = NetworkId::Polkadot; + pub const BridgedRelayNetworkLocation: MultiLocation = MultiLocation { + parents: 1, + interior: X1(GlobalConsensus(BridgedRelayNetwork::get())) + }; + pub const NonBridgedRelayNetwork: NetworkId = NetworkId::Rococo; + pub const BridgeReserve: Balance = 100_000; + pub UniversalLocation: InteriorMultiLocation = X2( + GlobalConsensus(RelayNetwork::get()), + Parachain(THIS_BRIDGE_HUB_ID), + ); + pub const Penalty: Balance = 1_000; +} + +impl pallet_xcm_bridge_hub::Config for TestRuntime { + type UniversalLocation = UniversalLocation; + type BridgedNetwork = BridgedRelayNetworkLocation; + type BridgeMessagesPalletInstance = (); + + type MessageExportPrice = (); + type DestinationVersion = AlwaysLatest; + + type Lanes = TestLanes; + type LanesSupport = TestXcmBlobHauler; +} + +parameter_types! { + pub TestSenderAndLane: SenderAndLane = SenderAndLane { + location: MultiLocation::new(1, X1(Parachain(SIBLING_ASSET_HUB_ID))), + lane: TEST_LANE_ID, + }; + pub const BridgedDestination: InteriorMultiLocation = X1( + Parachain(BRIDGED_ASSET_HUB_ID) + ); + pub TestLanes: sp_std::vec::Vec<(SenderAndLane, (NetworkId, InteriorMultiLocation))> = sp_std::vec![ + (TestSenderAndLane::get(), (BridgedRelayNetwork::get(), BridgedDestination::get())) + ]; +} + +pub struct TestXcmBlobHauler; +impl XcmBlobHauler for TestXcmBlobHauler { + type Runtime = TestRuntime; + type MessagesInstance = (); + type ToSourceChainSender = (); + type CongestedMessage = (); + type UncongestedMessage = (); +} + +pub struct ThisChain; + +impl Chain for ThisChain { + type BlockNumber = u64; + type Hash = H256; + type Hasher = BlakeTwo256; + type Header = SubstrateHeader; + type AccountId = AccountId; + type Balance = Balance; + type Nonce = u64; + type Signature = sp_runtime::MultiSignature; + + fn max_extrinsic_size() -> u32 { + u32::MAX + } + + fn max_extrinsic_weight() -> Weight { + Weight::MAX + } +} + +pub struct BridgedChain; +pub type BridgedHeaderHash = H256; +pub type BridgedChainHeader = SubstrateHeader; + +impl Chain for BridgedChain { + type BlockNumber = u64; + type Hash = BridgedHeaderHash; + type Hasher = BlakeTwo256; + type Header = BridgedChainHeader; + type AccountId = AccountId; + type Balance = Balance; + type Nonce = u64; + type Signature = sp_runtime::MultiSignature; + + fn max_extrinsic_size() -> u32 { + 4096 + } + + fn max_extrinsic_weight() -> Weight { + Weight::MAX + } +} + +/// Test message dispatcher. +pub struct TestMessageDispatch; + +impl TestMessageDispatch { + pub fn deactivate(lane: LaneId) { + frame_support::storage::unhashed::put(&(b"inactive", lane).encode()[..], &false); + } +} + +impl MessageDispatch for TestMessageDispatch { + type DispatchPayload = Vec; + type DispatchLevelResult = (); + + fn is_active() -> bool { + frame_support::storage::unhashed::take::(&(b"inactive").encode()[..]) != Some(false) + } + + fn dispatch_weight(_message: &mut DispatchMessage) -> Weight { + Weight::zero() + } + + fn dispatch( + _: DispatchMessage, + ) -> MessageDispatchResult { + MessageDispatchResult { unspent_weight: Weight::zero(), dispatch_level_result: () } + } +} + +pub struct WrappedThisChain; +impl UnderlyingChainProvider for WrappedThisChain { + type Chain = ThisChain; +} +impl ThisChainWithMessages for WrappedThisChain { + type RuntimeOrigin = RuntimeOrigin; +} + +pub struct WrappedBridgedChain; +impl UnderlyingChainProvider for WrappedBridgedChain { + type Chain = BridgedChain; +} +impl BridgedChainWithMessages for WrappedBridgedChain {} + +pub struct BridgedHeaderChain; +impl bp_header_chain::HeaderChain for BridgedHeaderChain { + fn finalized_header_state_root( + _hash: HashOf, + ) -> Option> { + unreachable!() + } +} + +/// Bridge that is deployed on `ThisChain` and allows sending/receiving messages to/from +/// `BridgedChain`. +#[derive(Debug, PartialEq, Eq)] +pub struct OnThisChainBridge; + +impl MessageBridge for OnThisChainBridge { + const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; + + type ThisChain = WrappedThisChain; + type BridgedChain = WrappedBridgedChain; + type BridgedHeaderChain = BridgedHeaderChain; +} + +/// Run pallet test. +pub fn run_test(test: impl FnOnce() -> T) -> T { + sp_io::TestExternalities::new( + frame_system::GenesisConfig::::default().build_storage().unwrap(), + ) + .execute_with(test) +} diff --git a/bridges/primitives/chain-asset-hub-kusama/Cargo.toml b/bridges/primitives/chain-asset-hub-kusama/Cargo.toml deleted file mode 100644 index 3e53f9407ffcee75dee896f45565eea1757a7640..0000000000000000000000000000000000000000 --- a/bridges/primitives/chain-asset-hub-kusama/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "bp-asset-hub-kusama" -description = "Primitives of AssetHubKusama parachain runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } - -# Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-xcm-bridge-hub-router/std", - "codec/std", - "frame-support/std", - "scale-info/std", -] diff --git a/bridges/primitives/chain-asset-hub-kusama/src/lib.rs b/bridges/primitives/chain-asset-hub-kusama/src/lib.rs deleted file mode 100644 index 94016c1da0cb1235dec6717bd6da79ff4dab74f4..0000000000000000000000000000000000000000 --- a/bridges/primitives/chain-asset-hub-kusama/src/lib.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects AssetHubKusama runtime setup. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode}; -use scale_info::TypeInfo; - -pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; - -/// `AssetHubKusama` Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to `AssetHubKusama` chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with -/// `AssetHubKusama` `construct_runtime`, so that we maintain SCALE-compatibility. -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// `ToPolkadotXcmRouter` bridge pallet. - #[codec(index = 43)] - ToPolkadotXcmRouter(XcmBridgeHubRouterCall), -} - -frame_support::parameter_types! { - /// Some sane weight to execute `xcm::Transact(pallet-xcm-bridge-hub-router::Call::report_bridge_status)`. - pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); - - /// Base delivery fee to `BridgeHubKusama`. - /// (initially was calculated `170733333` + `10%` by test `BridgeHubKusama::can_calculate_weight_for_paid_export_message_with_reserve_transfer`) - pub const BridgeHubKusamaBaseFeeInDots: u128 = 187806666; -} diff --git a/bridges/primitives/chain-asset-hub-polkadot/Cargo.toml b/bridges/primitives/chain-asset-hub-polkadot/Cargo.toml deleted file mode 100644 index 9c1b1a1f326cd77daebca36ed64bbc496b673837..0000000000000000000000000000000000000000 --- a/bridges/primitives/chain-asset-hub-polkadot/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "bp-asset-hub-polkadot" -description = "Primitives of AssetHubPolkadot parachain runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } - -# Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-xcm-bridge-hub-router/std", - "codec/std", - "frame-support/std", - "scale-info/std", - "sp-runtime/std", -] diff --git a/bridges/primitives/chain-asset-hub-polkadot/src/lib.rs b/bridges/primitives/chain-asset-hub-polkadot/src/lib.rs deleted file mode 100644 index 486fba60e1f8836d0eec2feece7919205cf6c372..0000000000000000000000000000000000000000 --- a/bridges/primitives/chain-asset-hub-polkadot/src/lib.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects AssetHubPolkadot runtime setup. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode}; -use scale_info::TypeInfo; - -pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; - -/// `AssetHubPolkadot` Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to `AssetHubPolkadot` chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with -/// `AssetHubPolkadot` `construct_runtime`, so that we maintain SCALE-compatibility. -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// `ToKusamaXcmRouter` bridge pallet. - #[codec(index = 43)] - ToKusamaXcmRouter(XcmBridgeHubRouterCall), -} - -frame_support::parameter_types! { - /// Some sane weight to execute `xcm::Transact(pallet-xcm-bridge-hub-router::Call::report_bridge_status)`. - pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); - - /// Base delivery fee to `BridgeHubPolkadot`. - /// (initially was calculated `51220000` + `10%` by test `BridgeHubPolkadot::can_calculate_weight_for_paid_export_message_with_reserve_transfer`) - pub const BridgeHubPolkadotBaseFeeInDots: u128 = 56342000; -} diff --git a/bridges/primitives/chain-asset-hub-rococo/Cargo.toml b/bridges/primitives/chain-asset-hub-rococo/Cargo.toml index 088510adcec63b6d30c76b965100823c0d466827..d5f724e581fbf44c237b4634863f0d9c2437837b 100644 --- a/bridges/primitives/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/primitives/chain-asset-hub-rococo/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } @@ -17,7 +20,7 @@ frame-support = { path = "../../../substrate/frame/support", default-features = bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-xcm-bridge-hub-router/std", "codec/std", diff --git a/bridges/primitives/chain-asset-hub-rococo/src/lib.rs b/bridges/primitives/chain-asset-hub-rococo/src/lib.rs index 6216b24d75c907236d1f274073374d68dec94576..de2e9ae856d1f8756f0a2a6b9cae3da3e265e76e 100644 --- a/bridges/primitives/chain-asset-hub-rococo/src/lib.rs +++ b/bridges/primitives/chain-asset-hub-rococo/src/lib.rs @@ -34,9 +34,6 @@ pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; #[allow(clippy::large_enum_variant)] #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum Call { - /// `ToWococoXcmRouter` bridge pallet. - #[codec(index = 43)] - ToWococoXcmRouter(XcmBridgeHubRouterCall), /// `ToWestendXcmRouter` bridge pallet. #[codec(index = 45)] ToWestendXcmRouter(XcmBridgeHubRouterCall), diff --git a/bridges/primitives/chain-asset-hub-westend/Cargo.toml b/bridges/primitives/chain-asset-hub-westend/Cargo.toml index c880f159ac1c1bca0f43c0beec4a06822f44b76f..d309e50bfbfeafbf0e32103695e004bd1bf0f7a8 100644 --- a/bridges/primitives/chain-asset-hub-westend/Cargo.toml +++ b/bridges/primitives/chain-asset-hub-westend/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } @@ -17,7 +20,7 @@ frame-support = { path = "../../../substrate/frame/support", default-features = bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-xcm-bridge-hub-router/std", "codec/std", diff --git a/bridges/primitives/chain-asset-hub-wococo/Cargo.toml b/bridges/primitives/chain-asset-hub-wococo/Cargo.toml deleted file mode 100644 index e1a5a262157afd77415ef928ba6bf0e61e070820..0000000000000000000000000000000000000000 --- a/bridges/primitives/chain-asset-hub-wococo/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "bp-asset-hub-wococo" -description = "Primitives of AssetHubWococo parachain runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } - -# Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-xcm-bridge-hub-router/std", - "codec/std", - "frame-support/std", - "scale-info/std", -] diff --git a/bridges/primitives/chain-asset-hub-wococo/src/lib.rs b/bridges/primitives/chain-asset-hub-wococo/src/lib.rs deleted file mode 100644 index c04eb04cce70b105ca06e4ebc1075a9099d73d07..0000000000000000000000000000000000000000 --- a/bridges/primitives/chain-asset-hub-wococo/src/lib.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects AssetHubWococo runtime setup. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode}; -use scale_info::TypeInfo; - -pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; - -/// `AssetHubWococo` Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to `AssetHubWococo` chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with -/// `AssetHubWococo` `construct_runtime`, so that we maintain SCALE-compatibility. -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// `ToRococoXcmRouter` bridge pallet. - #[codec(index = 44)] - ToRococoXcmRouter(XcmBridgeHubRouterCall), -} - -frame_support::parameter_types! { - /// Some sane weight to execute `xcm::Transact(pallet-xcm-bridge-hub-router::Call::report_bridge_status)`. - pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); -} - -/// Identifier of AssetHubWococo in the Wococo relay chain. -pub const ASSET_HUB_WOCOCO_PARACHAIN_ID: u32 = 1000; diff --git a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml b/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml index 24cf7236d45333254dd62073dfebcd85ec526362..73aaa53269fee9ae6e50c240043cc125fcf44a74 100644 --- a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml @@ -1,11 +1,14 @@ [package] name = "bp-bridge-hub-cumulus" -description = "Primitives of BridgeHubRococo parachain runtime." +description = "Primitives for BridgeHub parachain runtimes." version = "0.1.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] # Bridge Dependencies @@ -24,7 +27,7 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-messages/std", "bp-polkadot-core/std", diff --git a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml b/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml index 387f5e8ade6e7dc501159266a94168dd7055a8c1..ea09712ae304738ec1b468317ee5aa28eb1d0cca 100644 --- a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml @@ -1,11 +1,14 @@ [package] name = "bp-bridge-hub-kusama" -description = "Primitives of BridgeHubRococo parachain runtime." +description = "Primitives of BridgeHubKusama parachain runtime." version = "0.1.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] # Bridge Dependencies @@ -21,7 +24,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-bridge-hub-cumulus/std", "bp-messages/std", diff --git a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml b/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml index 40b386e22d224da198043f44210a8c1de65af3ac..de208895fb4362c59705a6d1f9911f534d0d3669 100644 --- a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml @@ -1,11 +1,14 @@ [package] name = "bp-bridge-hub-polkadot" -description = "Primitives of BridgeHubWococo parachain runtime." +description = "Primitives of BridgeHubPolkadot parachain runtime." version = "0.1.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] # Bridge Dependencies @@ -22,7 +25,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-bridge-hub-cumulus/std", "bp-messages/std", diff --git a/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml b/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml index 05b8163e9fcaacc8f32c0b45bfafffc882e0f1be..281e1f7426178c1c1924ca40e382ea63f7a75963 100644 --- a/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] # Bridge Dependencies @@ -21,7 +24,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-bridge-hub-cumulus/std", "bp-messages/std", diff --git a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs index e72e711de92701a5cf6427d5a36c53142cd64693..f79b8a8afb32173a12f7b02e93f8df7060478d71 100644 --- a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs +++ b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs @@ -74,11 +74,10 @@ pub const WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME: &str = "BridgeRococoMessa /// chains. pub const WITH_BRIDGE_HUB_ROCOCO_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; -/// Pallet index of `BridgeWococoMessages: pallet_bridge_messages::`. -pub const WITH_BRIDGE_ROCOCO_TO_WOCOCO_MESSAGES_PALLET_INDEX: u8 = 46; - /// Pallet index of `BridgeWestendMessages: pallet_bridge_messages::`. pub const WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX: u8 = 51; +/// Pallet index of `BridgePolkadotBulletinMessages: pallet_bridge_messages::`. +pub const WITH_BRIDGE_ROCOCO_TO_BULLETIN_MESSAGES_PALLET_INDEX: u8 = 61; decl_bridge_finality_runtime_apis!(bridge_hub_rococo); decl_bridge_messages_runtime_apis!(bridge_hub_rococo); @@ -87,13 +86,13 @@ frame_support::parameter_types! { /// The XCM fee that is paid for executing XCM program (with `ExportMessage` instruction) at the Rococo /// BridgeHub. /// (initially was calculated by test `BridgeHubRococo::can_calculate_weight_for_paid_export_message_with_reserve_transfer` + `33%`) - pub const BridgeHubRococoBaseXcmFeeInRocs: u128 = 1628875538; + pub const BridgeHubRococoBaseXcmFeeInRocs: u128 = 1_640_102_205; /// Transaction fee that is paid at the Rococo BridgeHub for delivering single inbound message. /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) - pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 6417262881; + pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 5_651_581_649; /// Transaction fee that is paid at the Rococo BridgeHub for delivering single outbound message confirmation. /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) - pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 6159996668; + pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 4_045_736_577; } diff --git a/bridges/primitives/chain-bridge-hub-westend/Cargo.toml b/bridges/primitives/chain-bridge-hub-westend/Cargo.toml index 22daf280868de14d5555631add5f57e0f3bbe6ad..beebfa8f1a04a8b46b0529218d0f18fb7b649f6e 100644 --- a/bridges/primitives/chain-bridge-hub-westend/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-westend/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] # Bridge Dependencies @@ -22,7 +25,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-bridge-hub-cumulus/std", "bp-messages/std", diff --git a/bridges/primitives/chain-bridge-hub-westend/src/lib.rs b/bridges/primitives/chain-bridge-hub-westend/src/lib.rs index 0124e05bf8871c35d68a64f00d9b34719b900c66..f4524f719f9fda3643a43a5c9509d989e4f4a777 100644 --- a/bridges/primitives/chain-bridge-hub-westend/src/lib.rs +++ b/bridges/primitives/chain-bridge-hub-westend/src/lib.rs @@ -78,13 +78,13 @@ frame_support::parameter_types! { /// The XCM fee that is paid for executing XCM program (with `ExportMessage` instruction) at the Westend /// BridgeHub. /// (initially was calculated by test `BridgeHubWestend::can_calculate_weight_for_paid_export_message_with_reserve_transfer` + `33%`) - pub const BridgeHubWestendBaseXcmFeeInWnds: u128 = 488662666666; + pub const BridgeHubWestendBaseXcmFeeInWnds: u128 = 492_077_333_333; /// Transaction fee that is paid at the Westend BridgeHub for delivering single inbound message. /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) - pub const BridgeHubWestendBaseDeliveryFeeInWnds: u128 = 1925196628010; + pub const BridgeHubWestendBaseDeliveryFeeInWnds: u128 = 1_695_489_961_344; /// Transaction fee that is paid at the Westend BridgeHub for delivering single outbound message confirmation. /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) - pub const BridgeHubWestendBaseConfirmationFeeInWnds: u128 = 1848016628010; + pub const BridgeHubWestendBaseConfirmationFeeInWnds: u128 = 1_618_309_961_344; } diff --git a/bridges/primitives/chain-bridge-hub-wococo/Cargo.toml b/bridges/primitives/chain-bridge-hub-wococo/Cargo.toml deleted file mode 100644 index 17c134f4412f7d74a45e089c920bd260bcd95008..0000000000000000000000000000000000000000 --- a/bridges/primitives/chain-bridge-hub-wococo/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "bp-bridge-hub-wococo" -description = "Primitives of BridgeHubWococo parachain runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] - -# Bridge Dependencies - -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } - -# Substrate Based Dependencies - -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-bridge-hub-cumulus/std", - "bp-messages/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs b/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs deleted file mode 100644 index c8bd397cec561684350c30e407e55915f294204b..0000000000000000000000000000000000000000 --- a/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects BridgeHubWococo runtime setup -//! (AccountId, Headers, Hashes...) - -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_bridge_hub_cumulus::*; -use bp_messages::*; -use bp_runtime::{ - decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, Parachain, -}; -use frame_support::dispatch::DispatchClass; -use sp_runtime::RuntimeDebug; - -/// BridgeHubWococo parachain. -#[derive(RuntimeDebug)] -pub struct BridgeHubWococo; - -impl Chain for BridgeHubWococo { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Nonce = Nonce; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) - } - - fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) - } -} - -impl Parachain for BridgeHubWococo { - const PARACHAIN_ID: u32 = BRIDGE_HUB_WOCOCO_PARACHAIN_ID; -} - -/// Identifier of BridgeHubWococo in the Wococo relay chain. -pub const BRIDGE_HUB_WOCOCO_PARACHAIN_ID: u32 = 1014; - -/// Name of the With-BridgeHubWococo messages pallet instance that is deployed at bridged chains. -pub const WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME: &str = "BridgeWococoMessages"; - -/// Name of the With-BridgeHubWococo bridge-relayers pallet instance that is deployed at bridged -/// chains. -pub const WITH_BRIDGE_HUB_WOCOCO_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; - -/// Pallet index of `BridgeRococoMessages: pallet_bridge_messages::`. -pub const WITH_BRIDGE_WOCOCO_TO_ROCOCO_MESSAGES_PALLET_INDEX: u8 = 45; - -decl_bridge_finality_runtime_apis!(bridge_hub_wococo); -decl_bridge_messages_runtime_apis!(bridge_hub_wococo); - -frame_support::parameter_types! { - /// The XCM fee that is paid for executing XCM program (with `ExportMessage` instruction) at the Wococo - /// BridgeHub. - /// (initially was calculated by test `BridgeHubWococo::can_calculate_weight_for_paid_export_message_with_reserve_transfer` + `33%`) - pub const BridgeHubWococoBaseXcmFeeInWocs: u128 = 1624803349; - - /// Transaction fee that is paid at the Wococo BridgeHub for delivering single inbound message. - /// (initially was calculated by test `BridgeHubWococo::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) - pub const BridgeHubWococoBaseDeliveryFeeInWocs: u128 = 6417262881; - - /// Transaction fee that is paid at the Wococo BridgeHub for delivering single outbound message confirmation. - /// (initially was calculated by test `BridgeHubWococo::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) - pub const BridgeHubWococoBaseConfirmationFeeInWocs: u128 = 6159996668; -} diff --git a/bridges/primitives/chain-kusama/Cargo.toml b/bridges/primitives/chain-kusama/Cargo.toml index 2d63c3f374fb50c94e2572c09511bf20d63b61e4..6ca4f051f1c15d8794ed50626588a72093206038 100644 --- a/bridges/primitives/chain-kusama/Cargo.toml +++ b/bridges/primitives/chain-kusama/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] # Bridge Dependencies @@ -21,7 +24,7 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-polkadot-core/std", diff --git a/bridges/primitives/chain-kusama/src/lib.rs b/bridges/primitives/chain-kusama/src/lib.rs index d5748aa132cea6caddeabd102b62345bbdc6153f..5f089fbc589f6de3921d30ea47e05aebc9762992 100644 --- a/bridges/primitives/chain-kusama/src/lib.rs +++ b/bridges/primitives/chain-kusama/src/lib.rs @@ -52,8 +52,8 @@ impl ChainWithGrandpa for Kusama { const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_HEADER_SIZE: u32 = MAX_HEADER_SIZE; - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = AVERAGE_HEADER_SIZE_IN_JUSTIFICATION; + const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; + const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } // The SignedExtension used by Kusama. diff --git a/bridges/primitives/chain-polkadot-bulletin/Cargo.toml b/bridges/primitives/chain-polkadot-bulletin/Cargo.toml index 1dd45ba95fd84820fc44cb5392cdd24668c689ee..98633847462e4653a57cd1a282b74e43cc253511 100644 --- a/bridges/primitives/chain-polkadot-bulletin/Cargo.toml +++ b/bridges/primitives/chain-polkadot-bulletin/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } @@ -26,7 +29,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-messages/std", diff --git a/bridges/primitives/chain-polkadot-bulletin/src/lib.rs b/bridges/primitives/chain-polkadot-bulletin/src/lib.rs index fcc6e90eb1b298e703b7c4b1a83c914f0a018031..fe82c9644b6735393ab55f6053e5d35d963d36d9 100644 --- a/bridges/primitives/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/primitives/chain-polkadot-bulletin/src/lib.rs @@ -42,8 +42,8 @@ use sp_runtime::{traits::DispatchInfoOf, transaction_validity::TransactionValidi // This chain reuses most of Polkadot primitives. pub use bp_polkadot_core::{ AccountAddress, AccountId, Balance, Block, BlockNumber, Hash, Hasher, Header, Nonce, Signature, - SignedBlock, UncheckedExtrinsic, AVERAGE_HEADER_SIZE_IN_JUSTIFICATION, - EXTRA_STORAGE_PROOF_SIZE, MAX_HEADER_SIZE, REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY, + SignedBlock, UncheckedExtrinsic, AVERAGE_HEADER_SIZE, EXTRA_STORAGE_PROOF_SIZE, + MAX_MANDATORY_HEADER_SIZE, REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY, }; /// Maximal number of GRANDPA authorities at Polkadot Bulletin chain. @@ -207,8 +207,8 @@ impl ChainWithGrandpa for PolkadotBulletin { const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_HEADER_SIZE: u32 = MAX_HEADER_SIZE; - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = AVERAGE_HEADER_SIZE_IN_JUSTIFICATION; + const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; + const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } decl_bridge_finality_runtime_apis!(polkadot_bulletin, grandpa); diff --git a/bridges/primitives/chain-polkadot/Cargo.toml b/bridges/primitives/chain-polkadot/Cargo.toml index 539b10ef9c68f490756d4f7e0453b25c9b7fc5c7..361901b7ae09c121d27329b2f2a238dc61dd7f2e 100644 --- a/bridges/primitives/chain-polkadot/Cargo.toml +++ b/bridges/primitives/chain-polkadot/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] # Bridge Dependencies @@ -21,7 +24,7 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-polkadot-core/std", diff --git a/bridges/primitives/chain-polkadot/src/lib.rs b/bridges/primitives/chain-polkadot/src/lib.rs index 61c8ca927d807ac6c169bb30d324f9720118e010..9a5b8970accb2338db542c91300ca8568c79cd65 100644 --- a/bridges/primitives/chain-polkadot/src/lib.rs +++ b/bridges/primitives/chain-polkadot/src/lib.rs @@ -52,8 +52,8 @@ impl ChainWithGrandpa for Polkadot { const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_HEADER_SIZE: u32 = MAX_HEADER_SIZE; - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = AVERAGE_HEADER_SIZE_IN_JUSTIFICATION; + const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; + const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } /// The SignedExtension used by Polkadot. diff --git a/bridges/primitives/chain-rococo/Cargo.toml b/bridges/primitives/chain-rococo/Cargo.toml index 469be1dbd336db0299df445989a117dafc4166f3..d59a00cfd147d4e5d3110e44cab906a72836a06a 100644 --- a/bridges/primitives/chain-rococo/Cargo.toml +++ b/bridges/primitives/chain-rococo/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] # Bridge Dependencies @@ -21,7 +24,7 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-polkadot-core/std", diff --git a/bridges/primitives/chain-rococo/src/lib.rs b/bridges/primitives/chain-rococo/src/lib.rs index 5436ad846468cda632aab9ffcf46748f4c1546f8..7f3e762715f3283d83fbdc91b0e69704071b55ee 100644 --- a/bridges/primitives/chain-rococo/src/lib.rs +++ b/bridges/primitives/chain-rococo/src/lib.rs @@ -52,8 +52,8 @@ impl ChainWithGrandpa for Rococo { const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_HEADER_SIZE: u32 = MAX_HEADER_SIZE; - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = AVERAGE_HEADER_SIZE_IN_JUSTIFICATION; + const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; + const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } parameter_types! { diff --git a/bridges/primitives/chain-westend/Cargo.toml b/bridges/primitives/chain-westend/Cargo.toml index 797621bbce2ea6a432082b148485140ea87865f5..6b6d2748aff7411d5247c8336aa2ac1251d11ea8 100644 --- a/bridges/primitives/chain-westend/Cargo.toml +++ b/bridges/primitives/chain-westend/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] # Bridge Dependencies @@ -21,7 +24,7 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-polkadot-core/std", diff --git a/bridges/primitives/chain-westend/src/lib.rs b/bridges/primitives/chain-westend/src/lib.rs index 45c13d600601fae14f48bcfb61dda225682f300a..7fa5e140d5707eb761ae5408fae729de43c1827e 100644 --- a/bridges/primitives/chain-westend/src/lib.rs +++ b/bridges/primitives/chain-westend/src/lib.rs @@ -52,8 +52,8 @@ impl ChainWithGrandpa for Westend { const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_HEADER_SIZE: u32 = MAX_HEADER_SIZE; - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = AVERAGE_HEADER_SIZE_IN_JUSTIFICATION; + const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; + const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } parameter_types! { diff --git a/bridges/primitives/chain-wococo/Cargo.toml b/bridges/primitives/chain-wococo/Cargo.toml deleted file mode 100644 index 05901821b366b71d40d9c28392150172be44a0b7..0000000000000000000000000000000000000000 --- a/bridges/primitives/chain-wococo/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "bp-wococo" -description = "Primitives of Wococo runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] - -# Bridge Dependencies - -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-rococo = { path = "../chain-rococo", default-features = false } - -# Substrate Based Dependencies - -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-header-chain/std", - "bp-polkadot-core/std", - "bp-rococo/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-std/std", -] diff --git a/bridges/primitives/chain-wococo/src/lib.rs b/bridges/primitives/chain-wococo/src/lib.rs deleted file mode 100644 index b1df65630beffdaad80ce8f22ba357dbf766ea0f..0000000000000000000000000000000000000000 --- a/bridges/primitives/chain-wococo/src/lib.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] - -pub use bp_polkadot_core::*; -pub use bp_rococo::{ - SS58Prefix, MAX_AUTHORITIES_COUNT, MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE, PARAS_PALLET_NAME, -}; - -use bp_header_chain::ChainWithGrandpa; -use bp_runtime::{decl_bridge_finality_runtime_apis, Chain}; -use frame_support::weights::Weight; - -/// Wococo Chain -pub struct Wococo; - -impl Chain for Wococo { - type BlockNumber = ::BlockNumber; - type Hash = ::Hash; - type Hasher = ::Hasher; - type Header = ::Header; - - type AccountId = ::AccountId; - type Balance = ::Balance; - type Nonce = ::Nonce; - type Signature = ::Signature; - - fn max_extrinsic_size() -> u32 { - PolkadotLike::max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - PolkadotLike::max_extrinsic_weight() - } -} - -impl ChainWithGrandpa for Wococo { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_WOCOCO_GRANDPA_PALLET_NAME; - const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_HEADER_SIZE: u32 = MAX_HEADER_SIZE; - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = AVERAGE_HEADER_SIZE_IN_JUSTIFICATION; -} - -// The SignedExtension used by Wococo. -pub use bp_rococo::CommonSignedExtension as SignedExtension; - -/// Name of the With-Wococo GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_WOCOCO_GRANDPA_PALLET_NAME: &str = "BridgeWococoGrandpa"; - -decl_bridge_finality_runtime_apis!(wococo, grandpa); diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index 19b2819bddce804f1db5e058871941db980c7bf5..7338996d69f2231f9f1d9c576a1a245263ef414a 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } @@ -30,7 +33,7 @@ hex = "0.4" hex-literal = "0.4" [features] -default = [ "std" ] +default = ["std"] std = [ "bp-runtime/std", "codec/std", diff --git a/bridges/primitives/header-chain/src/justification/mod.rs b/bridges/primitives/header-chain/src/justification/mod.rs index 72a5f68918d9703babe1e9c263f9148c57df4340..b32d8bdb5f1d8ce05722c938a083d7f582139835 100644 --- a/bridges/primitives/header-chain/src/justification/mod.rs +++ b/bridges/primitives/header-chain/src/justification/mod.rs @@ -82,8 +82,8 @@ impl GrandpaJustification { .saturating_add(BlockNumberOf::::max_encoded_len().saturated_into()) .saturating_add(HashOf::::max_encoded_len().saturated_into()); - let max_expected_votes_ancestries_size = C::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY - .saturating_mul(C::AVERAGE_HEADER_SIZE_IN_JUSTIFICATION); + let max_expected_votes_ancestries_size = + C::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY.saturating_mul(C::AVERAGE_HEADER_SIZE); // justification is round number (u64=8b), a signed GRANDPA commit and the // `votes_ancestries` vector diff --git a/bridges/primitives/header-chain/src/lib.rs b/bridges/primitives/header-chain/src/lib.rs index d2c7ec0759e884713cb37a871bbcc65cabd256af..1459b1c1994bcd867cb0bc4aaeeb3983d3102be8 100644 --- a/bridges/primitives/header-chain/src/lib.rs +++ b/bridges/primitives/header-chain/src/lib.rs @@ -266,23 +266,28 @@ pub trait ChainWithGrandpa: Chain { /// to submitter. const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32; - /// Maximal size of the chain header. The header may be the header that enacts new GRANDPA - /// authorities set (so it has large digest inside). + /// Maximal size of the mandatory chain header. Mandatory header is the header that enacts new + /// GRANDPA authorities set (so it has large digest inside). /// /// This isn't a strict limit. The relay may submit larger headers and the pallet will accept /// the call. The limit is only used to compute maximal refund amount and doing calls which /// exceed the limit, may be costly to submitter. - const MAX_HEADER_SIZE: u32; + const MAX_MANDATORY_HEADER_SIZE: u32; - /// Average size of the chain header from justification ancestry. We don't expect to see there - /// headers that change GRANDPA authorities set (GRANDPA will probably be able to finalize at - /// least one additional header per session on non test chains), so this is average size of - /// headers that aren't changing the set. + /// Average size of the chain header. We don't expect to see there headers that change GRANDPA + /// authorities set (GRANDPA will probably be able to finalize at least one additional header + /// per session on non test chains), so this is average size of headers that aren't changing the + /// set. /// - /// This isn't a strict limit. The relay may submit justifications with larger headers in its - /// ancestry and the pallet will accept the call. The limit is only used to compute maximal - /// refund amount and doing calls which exceed the limit, may be costly to submitter. - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32; + /// This isn't a strict limit. The relay may submit justifications with larger headers and the + /// pallet will accept the call. However, if the total size of all `submit_finality_proof` + /// arguments exceeds the maximal size, computed using this average size, relayer will only get + /// partial refund. + /// + /// We expect some headers on production chains that are above this size. But they are rare and + /// if rellayer cares about its profitability, we expect it'll select other headers for + /// submission. + const AVERAGE_HEADER_SIZE: u32; } impl ChainWithGrandpa for T @@ -295,7 +300,67 @@ where const MAX_AUTHORITIES_COUNT: u32 = ::MAX_AUTHORITIES_COUNT; const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = ::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_HEADER_SIZE: u32 = ::MAX_HEADER_SIZE; - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = - ::AVERAGE_HEADER_SIZE_IN_JUSTIFICATION; + const MAX_MANDATORY_HEADER_SIZE: u32 = + ::MAX_MANDATORY_HEADER_SIZE; + const AVERAGE_HEADER_SIZE: u32 = ::AVERAGE_HEADER_SIZE; +} + +/// Returns maximal expected size of `submit_finality_proof` call arguments. +pub fn max_expected_submit_finality_proof_arguments_size( + is_mandatory_finality_target: bool, + precommits: u32, +) -> u32 { + let max_expected_justification_size = + GrandpaJustification::>::max_reasonable_size::(precommits); + + // call arguments are header and justification + let max_expected_finality_target_size = if is_mandatory_finality_target { + C::MAX_MANDATORY_HEADER_SIZE + } else { + C::AVERAGE_HEADER_SIZE + }; + max_expected_finality_target_size.saturating_add(max_expected_justification_size) +} + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::weights::Weight; + use sp_runtime::{testing::H256, traits::BlakeTwo256, MultiSignature}; + + struct TestChain; + + impl Chain for TestChain { + type BlockNumber = u32; + type Hash = H256; + type Hasher = BlakeTwo256; + type Header = sp_runtime::generic::Header; + type AccountId = u64; + type Balance = u64; + type Nonce = u64; + type Signature = MultiSignature; + + fn max_extrinsic_size() -> u32 { + 0 + } + fn max_extrinsic_weight() -> Weight { + Weight::zero() + } + } + + impl ChainWithGrandpa for TestChain { + const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = "Test"; + const MAX_AUTHORITIES_COUNT: u32 = 128; + const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2; + const MAX_MANDATORY_HEADER_SIZE: u32 = 100_000; + const AVERAGE_HEADER_SIZE: u32 = 1_024; + } + + #[test] + fn max_expected_submit_finality_proof_arguments_size_respects_mandatory_argument() { + assert!( + max_expected_submit_finality_proof_arguments_size::(true, 100) > + max_expected_submit_finality_proof_arguments_size::(false, 100), + ); + } } diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index 7a61643a0bc5d38524d1177310613101909739ac..6333000a71ae8c7e0817362c03da0d17fc5739e6 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -6,8 +6,11 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive", "bit-vec"] } +codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } @@ -27,7 +30,7 @@ hex = "0.4" hex-literal = "0.4" [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-runtime/std", diff --git a/bridges/primitives/parachains/Cargo.toml b/bridges/primitives/parachains/Cargo.toml index 11e9336f66af7dc8a5673fed37922a8aad35047f..99b447f6c0aa92d9613aa6241c672e3a63808c72 100644 --- a/bridges/primitives/parachains/Cargo.toml +++ b/bridges/primitives/parachains/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2" @@ -25,7 +28,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-polkadot-core/std", diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index e2bd4c295225d0346ff321f01543bd9ccc249fd3..80382b3289faf94eed3adc58cb0500b4ee8d47be 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } parity-util-mem = { version = "0.12.0", optional = true } @@ -29,7 +32,7 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false hex = "0.4" [features] -default = [ "std" ] +default = ["std"] std = [ "bp-messages/std", "bp-runtime/std", diff --git a/bridges/primitives/polkadot-core/src/lib.rs b/bridges/primitives/polkadot-core/src/lib.rs index af39b5ab9babae2b2e6858bff83eaf8c29ef74bb..586cbf8cb9b47dffe66ea683306f41d31b7aa83a 100644 --- a/bridges/primitives/polkadot-core/src/lib.rs +++ b/bridges/primitives/polkadot-core/src/lib.rs @@ -64,30 +64,28 @@ pub const MAX_AUTHORITIES_COUNT: u32 = 1_256; /// /// See [`bp-header-chain::ChainWithGrandpa`] for more details. /// -/// This value comes from recent (February, 2023) Kusama and Polkadot headers. There are no +/// This value comes from recent (December, 2023) Kusama and Polkadot headers. There are no /// justifications with any additional headers in votes ancestry, so reasonable headers may /// be set to zero. But we assume that there may be small GRANDPA lags, so we're leaving some /// reserve here. pub const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2; -/// Approximate average header size in `votes_ancestries` field of justification on Polkadot-like +/// Average header size in `votes_ancestries` field of justification on Polkadot-like /// chains. /// /// See [`bp-header-chain::ChainWithGrandpa`] for more details. /// -/// This value comes from recent (February, 2023) Kusama headers. Average is `336` there, but some -/// non-mandatory headers has size `40kb` (they contain the BABE epoch descriptor with all -/// authorities - just like our mandatory header). Since we assume `2` headers in justification -/// votes ancestry, let's set average header to `40kb / 2`. -pub const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = 20 * 1024; +/// This value comes from recent (December, 2023) Kusama headers. Most of headers are `327` bytes +/// there, but let's have some reserve and make it 1024. +pub const AVERAGE_HEADER_SIZE: u32 = 1024; /// Approximate maximal header size on Polkadot-like chains. /// /// See [`bp-header-chain::ChainWithGrandpa`] for more details. /// -/// This value comes from recent (February, 2023) Kusama headers. Maximal header is a mandatory -/// header. In its SCALE-encoded form it is `80348` bytes. Let's have some reserve here. -pub const MAX_HEADER_SIZE: u32 = 90_000; +/// This value comes from recent (December, 2023) Kusama headers. Maximal header is a mandatory +/// header. In its SCALE-encoded form it is `113407` bytes. Let's have some reserve here. +pub const MAX_MANDATORY_HEADER_SIZE: u32 = 120 * 1024; /// Number of extra bytes (excluding size of storage value itself) of storage proof, built at /// Polkadot-like chain. This mostly depends on number of entries in the storage trie. diff --git a/bridges/primitives/relayers/Cargo.toml b/bridges/primitives/relayers/Cargo.toml index ffed2debbe68ec4b1354e90692e094d9adab038d..563d27c91c9eb9ac45c54784961d494b6f66b616 100644 --- a/bridges/primitives/relayers/Cargo.toml +++ b/bridges/primitives/relayers/Cargo.toml @@ -6,8 +6,11 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive", "bit-vec"] } +codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } # Bridge Dependencies @@ -26,7 +29,7 @@ hex = "0.4" hex-literal = "0.4" [features] -default = [ "std" ] +default = ["std"] std = [ "bp-messages/std", "bp-runtime/std", diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index 48f6722c982b71ae88e7958fcdebd9d70ec88bd3..779030b5278ad2fd1d14352ede6e1c31e2087bca 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } hash-db = { version = "0.16.0", default-features = false } @@ -31,7 +34,7 @@ trie-db = { version = "0.28.0", default-features = false } hex-literal = "0.4" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index e1809e145248f1cb37022ed4c2bb29d1547b2b2a..81a2070bece5f471eccbf61ab971d604becc9041 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -15,7 +15,7 @@ // along with Parity Bridges Common. If not, see . use crate::HeaderIdProvider; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Codec, Decode, Encode, MaxEncodedLen}; use frame_support::{weights::Weight, Parameter}; use num_traits::{AsPrimitive, Bounded, CheckedSub, Saturating, SaturatingAdd, Zero}; use sp_runtime::{ @@ -39,7 +39,7 @@ pub enum EncodedOrDecodedCall { Decoded(ChainCall), } -impl EncodedOrDecodedCall { +impl EncodedOrDecodedCall { /// Returns decoded call. pub fn to_decoded(&self) -> Result { match self { @@ -57,6 +57,14 @@ impl EncodedOrDecodedCall { Self::Decoded(decoded_call) => Ok(decoded_call), } } + + /// Converts self to encoded call. + pub fn into_encoded(self) -> Vec { + match self { + Self::Encoded(encoded_call) => encoded_call, + Self::Decoded(decoded_call) => decoded_call.encode(), + } + } } impl From for EncodedOrDecodedCall { @@ -191,7 +199,7 @@ pub trait Chain: Send + Sync + 'static { } /// A trait that provides the type of the underlying chain. -pub trait UnderlyingChainProvider { +pub trait UnderlyingChainProvider: Send + Sync + 'static { /// Underlying chain type. type Chain: Chain; } @@ -280,7 +288,7 @@ pub type TransactionEraOf = crate::TransactionEra, HashOf /// - constants that are stringified names of runtime API methods: /// - `BEST_FINALIZED__HEADER_METHOD` /// - `_ACCEPTED__FINALITY_PROOFS_METHOD` -/// The name of the chain has to be specified in snake case (e.g. `rialto_parachain`). +/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_finality_runtime_apis { ($chain: ident $(, $consensus: ident => $justification_type: ty)?) => { @@ -332,7 +340,7 @@ macro_rules! decl_bridge_finality_runtime_apis { /// - `FromInboundLaneApi` /// - constants that are stringified names of runtime API methods: /// - `FROM__MESSAGE_DETAILS_METHOD`, -/// The name of the chain has to be specified in snake case (e.g. `rialto_parachain`). +/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_messages_runtime_apis { ($chain: ident) => { @@ -390,7 +398,7 @@ macro_rules! decl_bridge_messages_runtime_apis { /// Convenience macro that declares bridge finality runtime apis, bridge messages runtime apis /// and related constants for a chain. -/// The name of the chain has to be specified in snake case (e.g. `rialto_parachain`). +/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_runtime_apis { ($chain: ident $(, $consensus: ident)?) => { diff --git a/bridges/primitives/runtime/src/extensions.rs b/bridges/primitives/runtime/src/extensions.rs index 44eeaad93c91603b34713ca477b5623f4e07aad0..8a618721b23a6665a14da11684553803170676be 100644 --- a/bridges/primitives/runtime/src/extensions.rs +++ b/bridges/primitives/runtime/src/extensions.rs @@ -88,7 +88,7 @@ pub type BridgeRejectObsoleteHeadersAndMessages = GenericSignedExtensionSchema<( /// wildcard/placeholder, which relies on the scale encoding for `()` or `((), ())`, or `((), (), /// ())` is the same. So runtime can contains any kind of tuple: /// `(BridgeRefundBridgeHubRococoMessages)` -/// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWococoMessages)` +/// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWestendMessages)` /// `(BridgeRefundParachainMessages1, ..., BridgeRefundParachainMessagesN)` pub type RefundBridgedParachainMessagesSchema = GenericSignedExtensionSchema<(), ()>; diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index e5277d8db6a8484a7ccba0c8e64326269e837e8c..0513cfa2a6c75e2509edfe78b0a2c8043ea828bc 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -61,15 +61,6 @@ pub use sp_runtime::paste; /// Use this when something must be shared among all instances. pub const NO_INSTANCE_ID: ChainId = [0, 0, 0, 0]; -/// Rialto chain id. -pub const RIALTO_CHAIN_ID: ChainId = *b"rlto"; - -/// RialtoParachain chain id. -pub const RIALTO_PARACHAIN_CHAIN_ID: ChainId = *b"rlpa"; - -/// Millau chain id. -pub const MILLAU_CHAIN_ID: ChainId = *b"mlau"; - /// Polkadot chain id. pub const POLKADOT_CHAIN_ID: ChainId = *b"pdot"; @@ -88,15 +79,9 @@ pub const ASSET_HUB_WESTEND_CHAIN_ID: ChainId = *b"ahwe"; /// Rococo chain id. pub const ROCOCO_CHAIN_ID: ChainId = *b"roco"; -/// Wococo chain id. -pub const WOCOCO_CHAIN_ID: ChainId = *b"woco"; - /// BridgeHubRococo chain id. pub const BRIDGE_HUB_ROCOCO_CHAIN_ID: ChainId = *b"bhro"; -/// BridgeHubWococo chain id. -pub const BRIDGE_HUB_WOCOCO_CHAIN_ID: ChainId = *b"bhwo"; - /// BridgeHubWestend chain id. pub const BRIDGE_HUB_WESTEND_CHAIN_ID: ChainId = *b"bhwd"; @@ -277,18 +262,6 @@ pub fn storage_map_final_key( StorageKey(final_key) } -/// This is how a storage key of storage parameter (`parameter_types! { storage Param: bool = false; -/// }`) is computed. -/// -/// Copied from `frame_support::parameter_types` macro. -pub fn storage_parameter_key(parameter_name: &str) -> StorageKey { - let mut buffer = Vec::with_capacity(1 + parameter_name.len() + 1); - buffer.push(b':'); - buffer.extend_from_slice(parameter_name.as_bytes()); - buffer.push(b':'); - StorageKey(sp_io::hashing::twox_128(&buffer).to_vec()) -} - /// This is how a storage key of storage value is computed. /// /// Copied from `frame_support::storage::storage_prefix`. @@ -574,14 +547,6 @@ where mod tests { use super::*; - #[test] - fn storage_parameter_key_works() { - assert_eq!( - storage_parameter_key("MillauToRialtoConversionRate"), - StorageKey(hex_literal::hex!("58942375551bb0af1682f72786b59d04").to_vec()), - ); - } - #[test] fn storage_value_key_works() { assert_eq!( diff --git a/bridges/primitives/test-utils/Cargo.toml b/bridges/primitives/test-utils/Cargo.toml index 9836c1877f00239f72dd97c74b6b5fa8698b12e6..3ccec9d9033d782d73631a7594b98f8e38b61461 100644 --- a/bridges/primitives/test-utils/Cargo.toml +++ b/bridges/primitives/test-utils/Cargo.toml @@ -6,13 +6,16 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] -bp-header-chain = { path = "../header-chain", default-features = false } +bp-header-chain = { path = "../header-chain", default-features = false } bp-parachains = { path = "../parachains", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } +bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -ed25519-dalek = { version = "2.0", default-features = false } +ed25519-dalek = { version = "2.1", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false } @@ -22,7 +25,7 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-parachains/std", diff --git a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml index fb079b48e42a7aca9ae9846ef9eaac383cc06649..fa537bda960a4ac2b14c974df1e37fa9e6095489 100644 --- a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml @@ -6,8 +6,11 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive", "bit-vec"] } +codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } # Substrate Dependencies @@ -15,5 +18,5 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-core = { path = "../../../substrate/primitives/core", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "scale-info/std", "sp-core/std", "sp-runtime/std" ] +default = ["std"] +std = ["codec/std", "scale-info/std", "sp-core/std", "sp-runtime/std"] diff --git a/bridges/primitives/xcm-bridge-hub/Cargo.toml b/bridges/primitives/xcm-bridge-hub/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..f9f44fd0f8d974e7aea66477ab818d8f96595031 --- /dev/null +++ b/bridges/primitives/xcm-bridge-hub/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "bp-xcm-bridge-hub" +description = "Primitives of the xcm-bridge-hub pallet." +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[lints] +workspace = true + +[dependencies] + +# Substrate Dependencies +sp-std = { path = "../../../substrate/primitives/std", default-features = false } + +[features] +default = ["std"] +std = ["sp-std/std"] diff --git a/bridges/primitives/xcm-bridge-hub/src/lib.rs b/bridges/primitives/xcm-bridge-hub/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..9745011c902d2c3949b81886c872f438678a11b8 --- /dev/null +++ b/bridges/primitives/xcm-bridge-hub/src/lib.rs @@ -0,0 +1,24 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Primitives of the xcm-bridge-hub pallet. + +#![warn(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +/// Encoded XCM blob. We expect the bridge messages pallet to use this blob type for both inbound +/// and outbound payloads. +pub type XcmAsPlainPayload = sp_std::vec::Vec; diff --git a/bridges/scripts/verify-pallets-build.sh b/bridges/scripts/verify-pallets-build.sh index e797f77d02657fd0a65e38cb0acc80d3dd23b483..b96bbf1833b6b3ce2bb34d2dc34aa5b8f54eb528 100755 --- a/bridges/scripts/verify-pallets-build.sh +++ b/bridges/scripts/verify-pallets-build.sh @@ -61,19 +61,12 @@ trap revert_to_clean_state EXIT rm -rf $BRIDGES_FOLDER/.config rm -rf $BRIDGES_FOLDER/.github rm -rf $BRIDGES_FOLDER/.maintain -rm -rf $BRIDGES_FOLDER/bin/millau -rm -rf $BRIDGES_FOLDER/bin/rialto -rm -rf $BRIDGES_FOLDER/bin/rialto-parachain -rm -rf $BRIDGES_FOLDER/bin/.keep rm -rf $BRIDGES_FOLDER/deployments rm -f $BRIDGES_FOLDER/docs/dockerhub-* rm -rf $BRIDGES_FOLDER/fuzz rm -rf $BRIDGES_FOLDER/modules/beefy rm -rf $BRIDGES_FOLDER/modules/shift-session-manager rm -rf $BRIDGES_FOLDER/primitives/beefy -rm -rf $BRIDGES_FOLDER/primitives/chain-millau -rm -rf $BRIDGES_FOLDER/primitives/chain-rialto -rm -rf $BRIDGES_FOLDER/primitives/chain-rialto-parachain rm -rf $BRIDGES_FOLDER/relays rm -rf $BRIDGES_FOLDER/scripts/add_license.sh rm -rf $BRIDGES_FOLDER/scripts/build-containers.sh @@ -81,8 +74,6 @@ rm -rf $BRIDGES_FOLDER/scripts/ci-cache.sh rm -rf $BRIDGES_FOLDER/scripts/dump-logs.sh rm -rf $BRIDGES_FOLDER/scripts/license_header rm -rf $BRIDGES_FOLDER/scripts/regenerate_runtimes.sh -rm -rf $BRIDGES_FOLDER/scripts/send-message-from-millau-rialto.sh -rm -rf $BRIDGES_FOLDER/scripts/send-message-from-rialto-millau.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights-setup.sh rm -rf $BRIDGES_FOLDER/scripts/update_substrate.sh diff --git a/bridges/snowbridge/LICENSE b/bridges/snowbridge/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/bridges/snowbridge/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/bridges/snowbridge/README.md b/bridges/snowbridge/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a38910da3164e853f54b284f8d38795d4220aafe --- /dev/null +++ b/bridges/snowbridge/README.md @@ -0,0 +1,127 @@ +# Snowbridge · +[![codecov](https://codecov.io/gh/Snowfork/snowbridge/branch/main/graph/badge.svg?token=9hvgSws4rN)] +(https://codecov.io/gh/Snowfork/snowbridge) +![GitHub](https://img.shields.io/github/license/Snowfork/snowbridge) + +Snowbridge is a trustless bridge between Polkadot and Ethereum. For documentation, visit https://docs.snowbridge.network. + +## Components + +### Parachain + +Polkadot parachain and our pallets. See [parachain/README.md](https://github.com/Snowfork/snowbridge/blob/main/parachain/README.md). + +### Contracts + +Ethereum contracts and unit tests. See [contracts/README.md](https://github.com/Snowfork/snowbridge/blob/main/contracts/README.md) + +### Relayer + +Off-chain relayer services for relaying messages between Polkadot and Ethereum. See +[relayer/README.md](https://github.com/Snowfork/snowbridge/blob/main/relayer/README.md) + +### Local Testnet + +Scripts to provision a local testnet, running the above services to bridge between local deployments of Polkadot and +Ethereum. See [web/packages/test/README.md](https://github.com/Snowfork/snowbridge/blob/main/web/packages/test/README.md). + +### Smoke Tests + +Integration tests for our local testnet. See [smoketest/README.md](https://github.com/Snowfork/snowbridge/blob/main/smoketest/README.md). + +## Development + +We use the Nix package manager to provide a reproducible and maintainable developer environment. + +After [installing nix](https://nixos.org/download.html) Nix, enable [flakes](https://nixos.wiki/wiki/Flakes): + +```sh +mkdir -p ~/.config/nix +echo 'experimental-features = nix-command flakes' >> ~/.config/nix/nix.conf +``` + +Then activate a developer shell in the root of our repo, where +[`flake.nix`](https://github.com/Snowfork/snowbridge/blob/main/flake.nix) is located: + +```sh +nix develop +``` + +Also make sure to run this initialization script once: +```sh +scripts/init.sh +``` + +### Support for code editors + +To ensure your code editor (such as VS Code) can execute tools in the nix shell, startup your editor within the +interactive shell. + +Example for VS Code: + +```sh +nix develop +code . +``` + +### Custom shells + +The developer shell is bash by default. To preserve your existing shell: + +```sh +nix develop --command $SHELL +``` + +### Automatic developer shells + +To automatically enter the developer shell whenever you open the project, install +[`direnv`](https://direnv.net/docs/installation.html) and use the template `.envrc`: + +```sh +cp .envrc.example .envrc +direnv allow +``` + +### Upgrading the Rust toolchain + +Sometimes we would like to upgrade rust toolchain. First update `parachain/rust-toolchain.toml` as required and then +update `flake.lock` running +```sh +nix flake lock --update-input rust-overlay +``` + +## Troubleshooting + +Check the contents of all `.envrc` files. + +Remove untracked files: +```sh +git clean -idx +``` + +Ensure that the current Rust toolchain is the one selected in `scripts/init.sh`. + +Ensure submodules are up-to-date: +```sh +git submodule update +``` + +Check untracked files & directories: +```sh +git clean -ndx | awk '{print $3}' +``` +After removing `node_modules` directories (eg. with `git clean above`), clear the pnpm cache: +```sh +pnpm store prune +``` + +Check Nix config in `~/.config/nix/nix.conf`. + +Run a pure developer shell (note that this removes access to your local tools): +```sh +nix develop -i --pure-eval +``` + +## Security + +The security policy and procedures can be found in SECURITY.md. diff --git a/bridges/snowbridge/parachain/LICENSE b/bridges/snowbridge/parachain/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/bridges/snowbridge/parachain/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/bridges/snowbridge/parachain/README.md b/bridges/snowbridge/parachain/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ddcbedab0c635983dff18fb00d207a9408d353db --- /dev/null +++ b/bridges/snowbridge/parachain/README.md @@ -0,0 +1,155 @@ +# Parachain modules + +## Configuration + +Note: This section is not necessary for local development, as there are scripts to auto-configure the parachain in the +[test directory](https://github.com/Snowfork/snowbridge/blob/main/web/packages/test). + +For a fully operational chain, further configuration of the initial chain spec is required. The specific configuration will +depend heavily on your environment, so this guide will remain high-level. + +After completing a release build of the parachain, build an initial spec for the snowbase runtime: + +```bash +target/release/snowbridge build-spec --chain snowbase --disable-default-bootnode > spec.json +``` + +Now edit the spec and configure the following: +1. Recently finalized ethereum header and difficulty for the ethereum light client +2. Contract addresses for the Ether, Erc20, and Dot apps. +3. Authorized principal for the basic channel + +For an example configuration, consult the [setup script](https://github.com/Snowfork/snowbridge/blob/main/web/packages/test/scripts/start-services.sh) +for our local development stack. Specifically the `start_polkadot_launch` bash function. + +## Tests + +To run the parachain tests locally, use `cargo test --workspace`. For the full suite of tests, use +`cargo test --workspace --features runtime-benchmarks`. + +Optionally exclude the top-level and runtime crates: + +```bash +cargo test --workspace \ + --features runtime-benchmarks \ + --exclude snowbridge \ + --exclude snowbridge-runtime \ + --exclude snowblink-runtime \ + --exclude snowbase-runtime +``` + +### Updating test data for inbound channel unit tests + +To regenerate the test data, use a test with multiple `submit` calls in `ethereum/test/test_basic_outbound_channel.js`, eg. +"should increment nonces correctly". + +Add the following preamble: + +```javascript +const rlp = require("rlp"); +const contract = BasicOutboundChannel; +const signature = 'Message(address,address,uint64,uint64,bytes)'; +``` + +For each encoded log you want to create, find a transaction object `tx` returned from a `submit` call and run this: + +```javascript +const rawLog = tx.receipt.rawLogs[0]; +const encodedLog = rlp.encode([rawLog.address, rawLog.topics, rawLog.data]).toString("hex"); +console.log(`encodedLog: ${encodedLog}`); +const iface = new ethers.utils.Interface(contract.abi); +const decodedEventLog = iface.decodeEventLog( + signature, + rawLog.data, + rawLog.topics, +); +console.log(`decoded rawLog.data: ${JSON.stringify(decodedEventLog)}`); +``` + +Place the `encodedLog` string in the `message.data` field in the test data. Use the `decoded rawLog.data` field to +update the comments with the decoded log data. + +## Generating pallet weights from benchmarks + +Build the parachain with the runtime benchmark flags for the chosen runtime: + +```bash +runtime=snowbase +cargo build \ + --release \ + --no-default-features \ + --features "$runtime-native,rococo-native,runtime-benchmarks,$runtime-runtime-benchmarks" \ + --bin snowbridge +``` + +List available pallets and their benchmarks: + +```bash +./target/release/snowbridge benchmark pallet --chain $runtime --list +``` + +Run a benchmark for a pallet, generating weights: + +```bash +target/release/snowbridge benchmark pallet \ + --chain=$runtime \ + --execution=wasm \ + --wasm-execution=compiled \ + --pallet=basic_channel_inbound \ + --extra \ + --extrinsic=* \ + --repeat=20 \ + --steps=50 \ + --output=pallets/basic-channel/src/inbound/weights.rs \ + --template=templates/module-weight-template.hbs +``` + +## Generating beacon test fixtures and benchmarking data + +### Minimal Spec + +To generate `minimal` test data and benchmarking data, make sure to start the local E2E setup to spin up a local beacon +node instance to connect to: + +```bash +cd web/packages/test +./scripts/start-services.sh +``` + +Wait for output `Testnet has been initialized`. + +In a separate terminal, from the `snowbridge` directory, run: + +```bash +mage -d relayer build && relayer/build/snowbridge-relay generate-beacon-data --spec "minimal" && cd parachain && +cargo +nightly fmt -- --config-path rustfmt.toml && cd - +``` + +### Mainnet Spec + +We only use the mainnet spec for generating fixtures for pallet weight benchmarks. + +To generate the data we can connect to the Lodestar Goerli public node. The script already connects to the Lodestar node, +so no need to start up additional services. In the event of the Lodestar node not being available, you can start up your +own stack with these commands: + +```bash +cd web/packages/test +./scripts/start-goerli.sh +``` + +From the `snowbridge` directory, run: + +```bash +mage -d relayer build && relayer/build/snowbridge-relay generate-beacon-data --spec "mainnet" && cd parachain && +cargo +nightly fmt -- --config-path rustfmt.toml && cd - +``` + +### Benchmarking tests + +To run the benchmark tests + +```bash +cd parachain/pallets/ethereum-beacon-client +cargo test --release --features runtime-benchmarks +``` diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/Cargo.toml b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..5c4acda13d8da9104f545dcfe19d69496d7d7dac --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/Cargo.toml @@ -0,0 +1,95 @@ +[package] +name = "snowbridge-ethereum-beacon-client" +description = "Snowbridge Beacon Client Pallet" +version = "0.0.1" +edition = "2021" +authors = ["Snowfork "] +repository = "https://github.com/Snowfork/snowbridge" +license = "Apache-2.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { version = "1.0.188", optional = true } +serde_json = { version = "1.0.96", optional = true } +codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } +scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +ssz_rs = { version = "0.9.0", default-features = false } +ssz_rs_derive = { version = "0.9.0", default-features = false } +byte-slice-cast = { version = "1.2.1", default-features = false } +rlp = { version = "0.5.2", default-features = false } +hex-literal = { version = "0.4.1", optional = true } +log = { version = "0.4.20", default-features = false } + +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false, optional = true } + +snowbridge-core = { path = "../../primitives/core", default-features = false } +snowbridge-ethereum = { path = "../../primitives/ethereum", default-features = false } +primitives = { package = "snowbridge-beacon-primitives", path = "../../primitives/beacon", default-features = false } +static_assertions = { version = "1.1.0", default-features = false } +bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false, optional = true } + +[dev-dependencies] +rand = "0.8.5" +sp-keyring = { path = "../../../../../substrate/primitives/keyring" } +serde_json = "1.0.96" +hex-literal = "0.4.1" +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp" } +sp-io = { path = "../../../../../substrate/primitives/io" } +serde = "1.0.188" + +[features] +default = ["std"] +fuzzing = [ + "hex-literal", + "pallet-timestamp", + "serde", + "serde_json", + "sp-io", +] +std = [ + "bp-runtime/std", + "byte-slice-cast/std", + "codec/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-timestamp/std", + "primitives/std", + "rlp/std", + "scale-info/std", + "serde", + "snowbridge-core/std", + "snowbridge-ethereum/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "ssz_rs/std", + 'frame-benchmarking/std', +] +runtime-benchmarks = [ + "beacon-spec-mainnet", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "hex-literal", + "pallet-timestamp?/runtime-benchmarks", + "snowbridge-core/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-timestamp?/try-runtime", + "sp-runtime/try-runtime", +] +beacon-spec-mainnet = [] diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/benchmark.md b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/benchmark.md new file mode 100644 index 0000000000000000000000000000000000000000..de976e121496b773fcdb9a55c1ae77f9de86542f --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/benchmark.md @@ -0,0 +1,88 @@ +# Motivation +Demonstrate that +[FastAggregateVerify](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-3.3.4) is the most +expensive call in ethereum beacon light client, though in [#13031](https://github.com/paritytech/substrate/pull/13031) +Parity team has wrapped some low level host functions for `bls-12381` but adding a high level host function specific +for it is super helpful. + +# Benchmark +We add several benchmarks +[here](https://github.com/Snowfork/snowbridge/blob/8891ca3cdcf2e04d8118c206588c956541ae4710/parachain/pallets/ethereum-beacon-client/src/benchmarking/mod.rs#L98-L124) +as following to demonstrate +[bls_fast_aggregate_verify](https://github.com/Snowfork/snowbridge/blob/8891ca3cdcf2e04d8118c206588c956541ae4710/parachain/pallets/ethereum-beacon-client/src/lib.rs#L764) +is the main bottleneck. Test data +[here](https://github.com/Snowfork/snowbridge/blob/8891ca3cdcf2e04d8118c206588c956541ae4710/parachain/pallets/ethereum-beacon-client/src/benchmarking/data_mainnet.rs#L553-L1120) +is real from goerli network which contains 512 public keys from sync committee. + +## sync_committee_period_update +Base line benchmark for extrinsic [sync_committee_period_update](https://github.com/Snowfork/snowbridge/blob/8891ca3cdcf2e04d8118c206588c956541ae4710/parachain/pallets/ethereum-beacon-client/src/lib.rs#L233) + +## bls_fast_aggregate_verify +Subfunction of extrinsic `sync_committee_period_update` which does what +[FastAggregateVerify](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-3.3.4) requires. + +## bls_aggregate_pubkey +Subfunction of `bls_fast_aggregate_verify` which decompress and instantiate G1 pubkeys only. + +## bls_verify_message +Subfunction of `bls_fast_aggregate_verify` which verify the prepared signature only. + + +# Result + +## hardware spec +Run benchmark in a EC2 instance +``` +cargo run --release --bin polkadot-parachain --features runtime-benchmarks -- benchmark machine --base-path /mnt/scratch/benchmark + ++----------+----------------+-------------+-------------+-------------------+ +| Category | Function | Score | Minimum | Result | ++===========================================================================+ +| CPU | BLAKE2-256 | 1.08 GiBs | 1.00 GiBs | ✅ Pass (107.5 %) | +|----------+----------------+-------------+-------------+-------------------| +| CPU | SR25519-Verify | 568.87 KiBs | 666.00 KiBs | ❌ Fail ( 85.4 %) | +|----------+----------------+-------------+-------------+-------------------| +| Memory | Copy | 13.67 GiBs | 14.32 GiBs | ✅ Pass ( 95.4 %) | +|----------+----------------+-------------+-------------+-------------------| +| Disk | Seq Write | 334.35 MiBs | 450.00 MiBs | ❌ Fail ( 74.3 %) | +|----------+----------------+-------------+-------------+-------------------| +| Disk | Rnd Write | 143.59 MiBs | 200.00 MiBs | ❌ Fail ( 71.8 %) | ++----------+----------------+-------------+-------------+-------------------+ +``` + +## benchmark + +``` +cargo run --release --bin polkadot-parachain \ +--features runtime-benchmarks \ +-- \ +benchmark pallet \ +--base-path /mnt/scratch/benchmark \ +--chain=bridge-hub-rococo-dev \ +--pallet=snowbridge_ethereum_beacon_client \ +--extrinsic="*" \ +--execution=wasm --wasm-execution=compiled \ +--steps 50 --repeat 20 \ +--output ./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_ethereum_beacon_client.rs +``` + +### [Weights](https://github.com/Snowfork/cumulus/blob/ron/benchmark-beacon-bridge/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_ethereum_beacon_client.rs) + +|extrinsic | minimum execution time benchmarked(us) | +| --------------------------------------- |----------------------------------------| +|sync_committee_period_update | 123_126 | +|bls_fast_aggregate_verify| 121_083 | +|bls_aggregate_pubkey | 90_306 | +|bls_verify_message | 28_000 | + +- [bls_fast_aggregate_verify](#bls_fast_aggregate_verify) consumes 98% execution time of [sync_committee_period_update](#sync_committee_period_update) + +- [bls_aggregate_pubkey](#bls_aggregate_pubkey) consumes 75% execution time of [bls_fast_aggregate_verify](#bls_fast_aggregate_verify) + +- [bls_verify_message](#bls_verify_message) consumes 23% execution time of [bls_fast_aggregate_verify](#bls_fast_aggregate_verify) + +# Conclusion + +A high level host function specific for +[bls_fast_aggregate_verify](https://github.com/Snowfork/snowbridge/blob/8891ca3cdcf2e04d8118c206588c956541ae4710/parachain/pallets/ethereum-beacon-client/src/lib.rs#L764) +is super helpful. diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/benchmarking/fixtures.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/benchmarking/fixtures.rs new file mode 100644 index 0000000000000000000000000000000000000000..b50be81360a3cdad6a50c9a4058e4c20282eba70 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/benchmarking/fixtures.rs @@ -0,0 +1,1215 @@ +// Generated, do not edit! +// See README.md for instructions to generate +use crate::{CheckpointUpdate, ExecutionHeaderUpdate, Update}; +use hex_literal::hex; +use primitives::{ + updates::AncestryProof, BeaconHeader, ExecutionPayloadHeader, NextSyncCommitteeUpdate, + SyncAggregate, SyncCommittee, +}; +use sp_core::U256; +use sp_std::{boxed::Box, vec}; + +pub fn make_checkpoint() -> Box { + Box::new(CheckpointUpdate { + header: BeaconHeader { + slot: 5809344, + proposer_index: 101696, + parent_root: hex!("ea7ce4ad810829cf37a2235b1126c82aecfc5955a1647ec83640cf3f7db91bd2").into(), + state_root: hex!("56f6363d3604e61a907c774edf0bddf6477a8d410f026414bc420f751de1f092").into(), + body_root: hex!("8c799aeef815cbc4499e0b46723623105afb177a5c522ecda3415ad9fb259e6c").into(), + }, + current_sync_committee: SyncCommittee { + pubkeys: [ + hex!("adf5a4907639db7bdcbecbc295b57d8950b0abe34ab17798686643427023c4f3983550d1496f81a27e52b070e4f4e6ee").into(), + hex!("91b036b30405531cacebf5d4f7e939b44438bb9942123ee55b44453e32febfbf2c846e0e4fb08190b01a000d072dcaa7").into(), + hex!("a86e70f00161ec6c4b780b4fc631c8dbae979f1e6c9ed037dd0745833ed6e3e18831478eb4753861f339293c0508f4d4").into(), + hex!("84196b1f39fba1fb7570074e7dd2768ed5c28db7f91a6374e413c8fc82f97738af771f90496526088bfa1ee2c01ee299").into(), + hex!("b9a230fc12d85281cbfcc7f5e6b13ab17b3dcbf0adf256d031c01acb30734d061683d40fd62175da73a621440bb04367").into(), + hex!("80b8be5a3d6f39aa7362c5feee9f89b75d1e5c2b485ea9a776c60fd60dba611e9bf5ca8b2528f42651b3dad212acfe77").into(), + hex!("ac8fcfc40028d04bdbea87b4b335781e35e10f881bfeb07b94c538eb37a43b18b1a04aad3dbe80bbff6e128017251f2e").into(), + hex!("b0b2136cc729b7de8868c02de6247ff2a68694296c78f088ce967219f08cfb7be9e1830e2630b10ca650c715d85d89f3").into(), + hex!("a70627c99777970eb9bf3268bac06bdc41c2ead41b1b76d30e7fd2aefe83319461d03aaf7ab93150343be9fbd2e48e7d").into(), + hex!("9599aae109d31ddc9028c428a148ea9ebffdb5ab6a684895dbd3772c1baf947c8a255e4c7ebaedae2a9e046219d80d76").into(), + hex!("8652e099adb88b2a25ab64fb01314e24cf26dbc4ae110d7fd73d74a0e0a4fea2ce2ce87cb1ddb7c0b9fb50cc6afa2153").into(), + hex!("83ba74c6e31073865eaed3c38a8e885ee715f03cfd6e36929655b6aa790d8f676c4ac4ae27f963e311d00923357eb087").into(), + hex!("86b05425d880027fbde9be3ea526283c5b958ccb31eff997d9d7b5e3b70e2d011ed95f891248082e870d55704a471deb").into(), + hex!("a4eeb958121bc5be5b1a68b73faa83b19272ef2f2cb627431be08e9844ef9d4548b4208670754d0954e5b012e3933859").into(), + hex!("87b0119e4c54aa2b4f8260f9ace7722788578bc6d822361d011e751caf13be5ca94b7d5842df5c16e52bfb4d658a405a").into(), + hex!("b696ec7f5dc82655cf027f5827fff3ce39195c1ee4ea9fba1808880cdea16d6086fb583edfbf66608e4f33211ddf9f27").into(), + hex!("a69bafcb3af59786acf009cc31e245009156a7e4fd2af98cdf2b7e63c39aff2baba08a338cdac93e94f6132b2cfe7a7c").into(), + hex!("8aa0aebb24b8c62168b255b6041474e8abf0d589a7467bc4712d910a9c2d470432d251e336a80dded27c0e9eea43aebd").into(), + hex!("a08d6976d3579411080957dfa2ca9487b1c4d8dbcdf640c1fea0a46f2c0228c2ddadf0553781c3e0dc5c7503b1bf29a0").into(), + hex!("a4abccfcfce6754e4d6c5c8bbad6668a55dc555ba99189936385e4dafa0435b323b813ce69f76e24299b5842c244139a").into(), + hex!("b89e4fd2dfb46c2af6df73f7185693ac535b67ab31f2805b1c24f400e0068cb32aff164b53668512f5895883189f7c02").into(), + hex!("b7c221a5884d10048bf9dd8611fb5231ce444fb756e59dd60d18a2332e889c014b27d739ed6012ff86056c04f36a87f1").into(), + hex!("a82875d66a4da52d6eac9dcd9ba9c728332253ad4b83acf1601a34efdd0c398eaad4acd1a948203d1bbd4a17d01a7b56").into(), + hex!("8c0a8ec162b3d48ee6a0f39620432cd67a1eb33a6d6f7bf1aada4f24c7498cf2e2b2476898f14c875f5efcf439aa0bce").into(), + hex!("a4b5d6451ae5baba3984bfbd5eef59543bf7c923662ea182cec6bb29aded9afd4c89e618ff756b5290506e0bf5a7e690").into(), + hex!("80664b43e3bd2f6e8eeb81a46cdca4f571499f3f0c77eb007ceb33c5b3dc18348a68cedc19d9967117f21a6c1ea29060").into(), + hex!("9174e46939c6915c757e793c9a02e46ed49d869216b720d0924b9697d94098182cad2cc6d4caf4cf140445377f1b4c80").into(), + hex!("b6e25cc134e089d306c648228d84aeee9516c8a276a2d37b54b458314b8f8980d49614550e821aac31000a5e7d518fca").into(), + hex!("ab1ab623a70f1e33cdbee356530a6f8fd9d00b2f9d8f94c0854816e5dd2bb8b258b5fb7a89ebc0725d8ebb74395847b6").into(), + hex!("8e75ea6f0678abfe1a7a9201c9fde992451327d61290fd803b3a1cdf2f7537fd7c23e0e06af5bb886a28928d0baba1c5").into(), + hex!("b41f4d4421de7b94eb9bc61602d08d77dc3f5f5d025e04f44c1426f14e8f707031b9e1d3a6e92c200f54166b2040f0a0").into(), + hex!("a8fa0c61435b851f9bda4da8dae0d544984ba5c0fef338eb6896b9c08306c3e2aec0a6c2028da319f210387320df4f73").into(), + hex!("a884af449df4cbd39e161de10f9d1f645eedfae0d259ddd84347733836fded047ca079916d365ef3d93fb354c8c795a6").into(), + hex!("936970bdeffc92f32915d141ccd8334df7966a3cafecb6d33fab9f477c389179f612cd6e368b615a98112d71756756aa").into(), + hex!("88dfe631c1e16ec3634e06a83a857ce9c909cb5b05d40490e6d02e553dd3bf213f0e178d31b4d913a4796b7226ab3ba2").into(), + hex!("a0c8357b4fb9c4431bc88e6336c2218590a7cb351ef4405a80aa6d352912f0b3110f3a09eef337bfe98da6b0841c6214").into(), + hex!("a7def54e08e2cea7def767d1108bc5c24d64e2dbdea9d07f0c8c63e60eec2db4e095b4a84bf6a4822103560b0497d1e7").into(), + hex!("80991c4f933985d9662d2e047187f244dcbb79606410aeb66ab250b2fdb9bd9daa392339e9b16d0d07648e847b02f942").into(), + hex!("b63fe559e2b4580238a0b0ce52ab8258838c2b64c1922c56b64cd65be2f88c140cb4e6ce96932e92f1ae06b20ee9e613").into(), + hex!("926ece3480c5c1f24f03d8289dcac7d3b202fcda277dd4453294dffc2953d91c842653a9e272b76fe2cebe1de3aba63d").into(), + hex!("930d29990821d26a748018bb6998488d5de811c8ed506c213c0ba346c8011e2d7c2235aa427a320129c1d016948eabf1").into(), + hex!("ae4e3fcd6c99a8f320dd4d160704db6baaae9aaa885f5d792212ea03cf06a459eb4a2730da9161ddd72a5e6544744e81").into(), + hex!("a01ae9d4f0008efdc5203abfdd0807ee7cc58c49cc5946d0b991ea2e069a224f0d99ea714b4d5cc464deb57372790660").into(), + hex!("ab17902ab255c575a133ed47fbafd62f898748ef6fc33455a2adcb2d4ea72c31255a9cd0c545d2ad8007a5a694f13371").into(), + hex!("86e2384e0469ab8ea9c5628d619d10336b3c0f334969dbc3335dfd72ed9899639fccfb5d1d9d6667a0dba20651584100").into(), + hex!("88ededd6ef3502be7c3c1b018cd814d4a8f98a7cf9521fe9cce6faaa6056315e22828397435b57d15e996fd15d7d700a").into(), + hex!("affa898a30a0dd2ccbe8d46c5a69dc1a8696b311094ac00e8ef6d398f6f132edb2d823004bc7895ac53bc09b6fb1059c").into(), + hex!("ac9f5bc3e2045a1eb356d88f2a62781fdb5775349c9bbdd9417b7bb7a9132a1cd42bf4986fcdc706eabbce45ab9f1cc9").into(), + hex!("986e3813ade7b9533ccfdcd76ea17490bc3dc62c8a596c8d07b246c28d7dea465c4dadda05111d2793d72d317c7c2833").into(), + hex!("ad4fbc51581cd520bcd0b88379ec0482c94b2ac344ce48a5facc1cf3e026edb088ce5cba5113f2c31f630a1fb37c5214").into(), + hex!("b499bfe19a22900c46bc0af1925887bfdd62455a3b85144e44b9a0a3756548c72b4f5c61f21c15f5e70b627d9f53a9c4").into(), + hex!("a5f40fcbe99b494c6acdd65ec3516d5a4a784ffc501e686764bf655c1f2f4bf784bac6b85e961d6a5ae513555a638323").into(), + hex!("a44c695e5490bca1cdc6b69fa80d6540239e42ae1765bff39285d1798696574c409e27c288b3b57196cf7c1753366969").into(), + hex!("a1ec5d9b8e55803477fbdc454d9c5cc605ead24147e5b04a03f80558a14f1e08bd6557f3fd11dab06a2816d32138cd43").into(), + hex!("98957c2e0a82210e2329b60dac05dd8b219ca00b18206227b725d011bb2f8b0dc1a79149a7d49a9a5505301b1b3847c5").into(), + hex!("ac8df8b1b596039c24c185108c89ef4da384e92c957c84bdefdab923e4be7c0e9480d837e1ff2e6269828b86b0b9f6c2").into(), + hex!("9358e3a48d4938b189059c3696169d31339851a1f4205dfe4de423ad4b96e7015c0dccb8bcebb64c1c357b65d5da6914").into(), + hex!("af1ab721df52d8b106449aaed05761b637c38f4c5513062afa55a09b94c8b088fe98cf430be4633b4f2f818da545b37e").into(), + hex!("8b8c063568a13f6522fc66285ab116a06d5e226a72f5a00fb321dd9b7d0bdb53a6b46ac17a1c9faf468c19b128c84488").into(), + hex!("b537f79bf008cd16711390c188c52d8dcf23cc41990c82005de2b0ff1fa09de85d3c34328ad9124e108ec252ec667f70").into(), + hex!("b9e8cf06d584197093d4b4b1e1f745786bb1f9772c0a771b321ae2dccac167ea79632138d60d8ed6dbbba2accd0a3c11").into(), + hex!("a674106b2c965c708b98a9f28a3f511f5dd8dfe7b2b9ca64a29152c7fd8de428316638d5091412e22e50406c372d9950").into(), + hex!("900a4953af5a28ca5b6f789a0dab9a03e3e5b9a0e866465d371b871fc59ecba31b524b0fc414eb7467d4384ca1e4ab5c").into(), + hex!("8b12a6114f8e0947995d72fee19e9204a4b552a85743d5320c1942bd2294d52a2d6d346f5e930ed788fc929069733297").into(), + hex!("a118f4b631a0bfc00df74a94930d69075b83be0bfec2affdf5e1ca4762d40592132108a7d2088891e7fe078c84ea9d9c").into(), + hex!("b074d18f84787d245018a2dabe2f0a51bf2f3786a802317122982a598f7f953339468e0392665c3f3a6b8fb17cd8f72c").into(), + hex!("8e6230b8186009b765ae6b176eb7dbcf503139472c0c2c5574e3d608a49932f0b852e744d2e215c58512ed2b4e8178da").into(), + hex!("98cd13f8400ecc9db458cb81840ed6467795f54f25680931881808909cc661230716fcf8b3fc2b18f4c12d30c32d9e20").into(), + hex!("b861f89275776bd640bc7d0d347df31e784292fa14ec468a6b0e8d64a9b077b6bdae1bdbc56385bf8f5676ba62002606").into(), + hex!("80ac9088ce82fd7a90c91b24e5e91636ed76844303a3b1105bc284b4e9a860acca3207d1804200a55eeb3ef45ba97558").into(), + hex!("940bc07a53373f075128b6c12f59120bc7368289d5a960c55d52abb68e8444f6f830b07850d31a9ed137f5357b38406c").into(), + hex!("b8c5919657811270976eb45b9f3f09be798fbcf6b34ce7443fb36207816b6a84d2b945a6e6522480c2feed12ca980df9").into(), + hex!("8294e6ce7d7d3b77bf48252eb97cca47a5b9dff1b8c97f8b7af8034b938864f38bad932bfe44a9bf4df81e394aee7ef2").into(), + hex!("8d9075a2c42c1cf51e082ab1a57a66670f0e81af2a651aacac5aaaf3876711aabbc0c96e44d883e6ad8a91a81be22940").into(), + hex!("882aea71e4512d5c41b5fd6509ede0fa35a49fff9648d277b531364613342711b2c7520a5f8d58ff31b7fc9407f968fc").into(), + hex!("8a34a824cbac06cd99fc68f36ae09c6adf2d472fcbd378cda61b838c29ca9750a7f7d63d9b61d1f30deda1ce048ffd42").into(), + hex!("a4f027b5466de4fd633cba98d2c8c78db10f91c8bee76c32ca5a80c5e87ffaf19df84e675f31bb6c14ee733ac3d4a33b").into(), + hex!("b990db2d743b389132d4d8281701b348bfc52f707e6553f309e4f59706871142e6b8e3d081057a5c43243911148318bc").into(), + hex!("a4500c88a58971460c33a404a65260a9e21c4a345601d1686ecc352ab088b2bd93d30930383901fd2d042dd037286761").into(), + hex!("a4de9078286cde237b4ff2da52a0fc1b8fe7b931ac35b3033dca0b87b5c86e58bec951a66a7308fc951ebb70fd0d0bfd").into(), + hex!("a9adb6dbff69b115be1cb37d5ee3a95d8c9f466f059128b6ab197d4119f6c0f87d1cf4d14d4beb2a43f7913700c1d909").into(), + hex!("a8a33f167e473eb2ace3bedc1cac2281bc9f522a0fdf6a9eb365859b9116067e07b7c380e8ea4dd33a4fbe23e2412be5").into(), + hex!("88c3785f853a192c50c38040ed52c413084dd069729cb14d806223f8a51aef40c21648d1e03bbbca031ce811e1b708ce").into(), + hex!("a7db446f88ef0d018675c0e7be0e9655d098529ce4b4c92ad809ed9f588c3f6c6dd98267ae0efb1659dc16a29e1feac0").into(), + hex!("916fb4b864ef46a2f8f570364cdf02c93d8432d8155e7ad1a15777d7f5d5fab94797257d0943aed5f16033528a4290b9").into(), + hex!("b9b7ae2ec02b0694d7828a69bbd1fd6e9070cc217319dc4e8fc854e48f5ce6e133fb0d5492e2f87e7cb8d3d557f17037").into(), + hex!("8cc36ade2b8039ffd41459272091d9cc4c46a49a187e18e9e3b283136831724313ee6eb5954c34acf9ecf7d79c30dcfe").into(), + hex!("b89a28db91eb06f191731a927445ca64cb685a206f4d77f335f510eab3a4973eb1d199525aa12df17f97cb4e079bc35a").into(), + hex!("87fb2881b92d5d9a2555080afe033512fd93306bd25f4a841d034c0fd9685ae09ecd29d27523e8f18664cdf2127ae6bc").into(), + hex!("aaf63cd83256de5b8558bdfd7f5fa44b3b3cd767983484ee482241286f82c6f51c1de99e2c03c8c99e6d4a27b7379cc1").into(), + hex!("89b52eb17e1c2868a3da6727e6f122046739c99cbb6aa7e30f65d7e649d09540b7ba77ea3efad77be597e48f7abe7643").into(), + hex!("88d097674b763a770872f17266f38200a4034347756c5ece6f11fb841e82447a75f6f3279ba8ab54e668b2072c6596ef").into(), + hex!("8de58933f07ac60ae919adb4be0becfb5e6f7330708a39cbc4c988c79033f0ffb365ae2a6308c234c218f0e45ec1ce6b").into(), + hex!("a4ec080db7716a0eae593b0ca91a16273abf534b8d153232e4b7d613c9fd21081bc8442d6e57a6e8e026c66c93bf0289").into(), + hex!("b680857b566466d1fb92d78a09b75d93753c275249dd05a17702e48560e4df5c7f9ca68370847250fd001433972c8b0e").into(), + hex!("84ec67623fc58128f4504b071b53e3c0fc60ed07318febc8d450035567691e2e58468b6799e5f3d93536100bb4b5f3f0").into(), + hex!("b22ce364b11e164d3df52050d59085c29c398556617277349637290b193727e3942064693ba1d7bda313905908a071d5").into(), + hex!("876198b5734f1ab5f7ec0231110e9cbe59d116ccc410678d5e0108fae42bab5dcc7cd15df200bc71f471ecbeb0e80d68").into(), + hex!("b081fc87056c34a8a78cc34f308502c5b6509adb0b344e83227b997aade90d6f0f1b8e5302601ca3217ecf9dcfa24ddb").into(), + hex!("8fe71e517db52831a0c4adb83fc6524817b9a358fc6bf58c03c91d4125482921936391e54c767ad2f13071e0e5ea266d").into(), + hex!("91212d83807ca11b030e5ab72ec85a8d13d468c714fc97c20b5d0569c382ee270d2ca486c88354994ee54f02dacbea50").into(), + hex!("b022bf24d2ea893125f3a89186f444aa5f301333f0667eb5862586a5eea8233c90202064eaf9363f7b367a6dda15d45b").into(), + hex!("b1f45fa596a32d3fe267c830eb042432eec6b79efcd5c84ea835d7108bd4290fb64749ebdf7e7e51e6138fedb3cd2eb8").into(), + hex!("8567d2366c584d975bea34d4f9e2ce62a62765125326c5e56b5cda08fd2f2e7f769db47eb514d2d9324b645879c82a66").into(), + hex!("b4816fa77c76e1f75e6fa903a4c0c031ac7a5f5ccb5f553b4eed83bb34067480804c0f6f308f8d0fd723dbe2198b0608").into(), + hex!("a9ceeea8878d799fbb6d52dc4112ae712429b9213f72b284698d68ea6827432f34e51daae1727b5402916a836567f611").into(), + hex!("984c8ca226df18ec574e0fabd6dc9ab3a2e1b319c4b6ab5b19872239833dcf447ee5d720305b2385d65facd297704809").into(), + hex!("b02fc16b3530532a2373776e2512421d6d2e42f3fd3c3e71393706d74ef9324571c8c1ba7b9caf65dedfff2bad946d71").into(), + hex!("849faa2060a75d08850b54e06376f252d9cb4add3e740225ee37d23e29f80cb9f98188a7eaf6a381af4b4bcc9874b792").into(), + hex!("b5331ab6646a8be3bc37c5fec56a597b914683402828dd4098b189f245c638e063c933a542c0122f063f98a9468687b7").into(), + hex!("868d8214f26e14e71ae0bd514275ae9442760af72269790228e733005293019984d3d463c2bb82936193dfae8c267fa3").into(), + hex!("a88c6c4af4b9d285997c9b5cca7e22ab9b8e5873ff36455bab6a6dd4518e966079524f8c35391811f16eca421f588694").into(), + hex!("a6f508524a938fb49a7da70251fc8300192882a3fe784f0bc51027ef2193d90c75d0d0720f2f5be634d81f38f3d85b8b").into(), + hex!("80a923acf2aa0349b4852f47edec37cd47bd74447f2f91c110ed092d015887a6625d5f1fd1f5d00c994edbff1435956c").into(), + hex!("84ed9f1ee0db4c9ae55492fd07fdd44851a6d0209da5b521435229b1b45d57c3b835af627122756ad0e63e915c15909c").into(), + hex!("b4c4c1e6fdc418bd29cbe8082fd774f678dc28a51896a51349c888ec98124b7a522baf70b820ae0729696624f3695af0").into(), + hex!("a8cdde029503aa3e23776a67952b2bc954fc0dc06f07a78bc94e6408edf381bdadc29efe4e5340a9b7efe99a3f3b3687").into(), + hex!("ab02a92f5ee21035e6e3e40a026d8d5680f98afdbf82dc037dfa30a87a1c101387a0085da8474989b24196ff494aa618").into(), + hex!("986c88a5d0bc6ff4127e34e0a5fefc1290936ec88d1765e776e199601f9660a8425c1fc6defff07fb81576461d0ed7bc").into(), + hex!("abe2259e880aa8b587ac9a31f794895bb18ff1bbed378a70fa09388e5a6a7343c072401c856bdd92fc2f60ca11056aea").into(), + hex!("8c04c98815c0c1f281c8783ff8098b0d806039a39fb2f4642a08f821c02506bc7c80ba7a1f4b225ecda9971ee3a22121").into(), + hex!("87b7bb0cce6244aa1a540941f0ff5ae4126fd4c62bd98d34993380a35e1c9a9f43b561506e5eafc1ddb8aef131403590").into(), + hex!("93cdf5f956e8b40ca0e31cf559937b997897c137a411930ca28075899abb6c08dd6aad5b2bfd5c09f07613a6854b3be5").into(), + hex!("83ebb284e03b4414694522caedabb391062ba9ec373e94427feff071644df91635ef498dcd9351ae259c5b15d6edbb38").into(), + hex!("80747223ba06d6465e26a354b79c42ec0624d33cdf015da7bcfb31f7009d92aaf6321f4a921f6b38e0a394a412edcefa").into(), + hex!("967b4b10a341abc5c01ffd413103c6468e3efa32c8ddc7e8622fd3ab2a765a420e0be3c81e1da05679becc5bd03e59f6").into(), + hex!("8a01b90d551f26c265b9987c67b641d15bd3b8e5af5c25472037645da05d9d54c0e59bab32578eeb3c1c5889f4fd9aa1").into(), + hex!("b6b38e40e3fbb31b257ecc18dcfff2fd850a41c2cfa5a642b0c383cc1a86b2b9adbcb22130665f544f1d9fdf87e92dd2").into(), + hex!("b490529e0da56e7e4d4cb2f79b704576c8b6569e9960dafef059dac0144b29ec337f4beb515465a57414d8965268a3dd").into(), + hex!("8a3b14616bb721543bfe007f1a042e76ef068a6e4f8964d68dbb7a733ea92dfe4e51f4008aabaa01a8a3566b00d083ae").into(), + hex!("8b82dbd0d0592d45d6309c795beb42274b74d844cfc393b34cb6992e3b25ea8f62f777124584eedf482949bb999ca5c3").into(), + hex!("918226266d7f02b2081edba64ca4b70339b7a63ef9194cd77a214620bc25618495cb4335c0c3621c75f821e685af3f1e").into(), + hex!("a61f56310689b9f383b45e8c8c647bd7150fc6dc3be96afb464b0c67c6f8c73ac9941bc8a5b0e2093255c204646c94af").into(), + hex!("abe204f55b8dd101bbd554561c1a7b50c01b31c967f6cea18dc898a10021eadca3c314f6b7afcc2251682717540d2100").into(), + hex!("a0cc3fbc4a05e3b5f93f4df85b92c1bed221f21700d8faaf84e99954cf6994d0052c8ba8ab894503b5515bdd1460ac5b").into(), + hex!("a5e5e175726b31163e13447e360f835ca64c3883901fb1fdc275b487106b39fdf43b83ee8f3985dc85157b94aaf8c389").into(), + hex!("b34106e71862b290f7bb47e5492417b07b541fdba23ed474f29d666cfb50bb5a3ea137ab717a41ff769af53ab385a3d6").into(), + hex!("aab726a0317c365aa15ef9527e5101b1a90cdd60888b733cdbd61dffac3374f995206abd154d099b2dfa03dbe666d503").into(), + hex!("b3fe9956454604b2aa1d51480ae96182ad1a8af64e80adbba1034619090c23d0d7ddb4163f400399d5946babada2f5a1").into(), + hex!("831a0d4008865576d9d0200dbe80eeafa7e6e6d442a46ebf949f39e32ad311995535a261795a7e27f2b23a6d506b7a33").into(), + hex!("8d7fe284c9ea1a2dce6ac70e3f225994f65ba9791520fecf5359b80f7c32c1e45e75b8b787ccf24b83c79301e046bb4d").into(), + hex!("b00df7ea640dcdaec66317924090f49380edc5c669ef1249ec8a24a3436b4bb41be0edd4cb0d04bc6ffd540ac8efad18").into(), + hex!("aa55d72470c024627edff24f9a19ae958d6b382bab6a24581183f762d736ac10f189ec3f34a7a41516f81696352f16c8").into(), + hex!("99e172cdb14a23161b5e8aa80121d98e69506ae0ab956912eb2ed959b73ee901852f263cb65798554ec0ee35089b4c03").into(), + hex!("84ce8ec6a7debf3cb2e53afff7d1f68bf75b7b209938192c7675286b17489d7996ecd9514c5233af0a17390b9982d805").into(), + hex!("943cddf3f5a6dc04f425aaca25be44438aabc7a661476761de596fcef5746f9d83c361da3fe1f21227ee5b9bf9a3ac76").into(), + hex!("aa81e66cb01e77c5db882792e9d896c83aa418b12c3b5201e1937ba5b74bf5fda974c82f6f40e3ca48bae72ed93437db").into(), + hex!("83bee12657fb462a5988ee26e2d0ef8b11e5fcec108724f6135e95913d7f4ec338031b697b24fd7a650cbeb088b26733").into(), + hex!("ac0070705c447e635b8df509de9ae03ea9b0314fc58b3befe14c316cb7b70ceaed081aad115a2126ecaef630c6bbab0f").into(), + hex!("a1c9c3b6f28c14ba91cf063153c50253d82440b81dd8ef938e181ef4116ede0b0ec62844e1d7e8b387668ddb8644852c").into(), + hex!("b41811ee8e385836fd709081a9a65a33ae1571bf943937615e97d49aeec3289624882915a5f5b6ee98601e752bac1212").into(), + hex!("a59cfa6d60f5c3b62197d3058a9b42c66bb841ee5d67ae34ad452dc70974de0a56a770c4ee5905c0d214c3d81a5269fd").into(), + hex!("ac47ff714d42056df3962cb4494019c977fb6200cdeabfa3ba85ec7d7d70c7d3ff4aa05e26aeae6ec6a3afa460244ea7").into(), + hex!("83c0ff348f1d018485c18417037016ec592c249830fa649b27754dfa70b94a549a42eada20ab1c4de2a5a513d742186c").into(), + hex!("ac5ddeddc94d18cbbad0e1889a2b64e91b3f927d3eb666ba018807f1e4e1451a43498ebacf12fd370b62c5386e36fedc").into(), + hex!("91606f0315fadbc42b1b27ff35b5601196a7a8beec3d5c76643e38ef28f0aef0aba9123bac7ceff0b297ca53727edbd5").into(), + hex!("af9cd077736f17c89ab4fd21fa2cee63b16f67277e9c5d54663f6d5a7abc3141f3045558899da70419b1e92ce88eba86").into(), + hex!("ab0757213aa3fcdc326925e0dadc2206f43c53f7abaf34a077f1cb29427261b4bd9981dbd1e33fedbd77fe00bbdaf8bd").into(), + hex!("8c97d256f9d4e0f309522f3899c5f74fd7e8c4dab6adf4886e7b058b323e294229fafce28871fd39e5c43f28c670b8a0").into(), + hex!("86eb85ad6fb7a3d5cd9aa5b22fd648fe9db688fe663c835abec75a6bfb67af0df0421d24203083aeb8ccda06dfb230c9").into(), + hex!("afa2dd3712eb94c9097135a69573c1f373ee0d7916f4ccec5a62445726aa4c1548bca45038e1a44ab7c8b7e3ea22dd6a").into(), + hex!("91e82407c442937af665ff8952d8b7ce3d68ebf807166aaf0fd710b76c65b39283e511b3314297ab0f2a9c8a2d76ffbe").into(), + hex!("a52cdd05f6e254c6da7a00c9210e33a49658f035b78bc7f15b527fce20c3893d3f7dc27a616eb3e107da060df251b082").into(), + hex!("81e6ddbaad6a18404832d2923697ea8df8dc3b39e53390269f197b976b5edbad074639e1e7bb25ae87b00681973fa021").into(), + hex!("a776979b38184661cc36ae9bf99b98cfb64babe37b16ed7e16e33e2187af71d9f62af4fab2bf0671baf3172727741d79").into(), + hex!("9395a004323f4ae604518224292a1fdb359fe9d4ec2a2262f13fb33d90a9dc50040d03fa6315a5ab2db043e7e16fb971").into(), + hex!("81bfad9e94c00fc810c4e63042fed6dc54c7c48637064376d5a4df8c8d6be3c2eed335640bf45bb8df99327a7e070d06").into(), + hex!("b6b38236ea973f91eff175206c4328cb97335bc8e498d9c9a2040468885f7d8464a8a1168929cdcc4c59513885e1589c").into(), + hex!("aa14a7baeb7b6d0048bdb8c772de512001174f764b37396c6481bff5aad30abd6143e4654a3d80406f8d08948ff8145f").into(), + hex!("a7297d6c09873e481c04f2e9e9a07567d78da504d2929c8b9d8ecae1c4d919611e061caa632776a8716b20e031cfd203").into(), + hex!("a0942935f58ef26a111d77b1c4598207eb6e3414c106b286f1c4dd344b3e70d3a46595ebb657e43f0e71dfff3b532382").into(), + hex!("95dca5de041e96f8c64f945817ab1ad62414b3002073b18331e288878c7a889774468d3c24f04e0714958ebd79ebe71c").into(), + hex!("b64f58f4eea309ea03c60f6ee66107fbe45c5ba81b8ea397a515435a179ee86bd098cb9acab4f374d29c8a388152fc6b").into(), + hex!("a70fe2ce7cfbdc22183a1a81c779c6071199768ad9b39ad0727ced4fcea5fc79e9833279ce93e1ef16cfc6dc0ef4f15a").into(), + hex!("8169d05ef0406b661022af53dde8ccd7315b3e35065c568673bfe5e59828480312a8ad418ad431beee12e7882d11142e").into(), + hex!("8148070a20eba3b61bd168e00ae8262d698263a8f22ee01ed6d46d154a08708a85533f54935bf92ee6ab0c04569eb3ac").into(), + hex!("b5e4f8b8a011c9cde6a9338d7c751ce2828fcf41b40e140ecf543150a5b4859f87836461d0ea2ed7cd0e6bfb8febdfc7").into(), + hex!("aab7b0022a1791339fbf567e771c43e9a2a46fcfed394b7216b556aacdebc259e5fc599eca66b12c23467b2443fa9c76").into(), + hex!("a843e5929fa14bbdb5f370d28547a7b585443f4d2fdf8e7237fcbb93a5220d62c8033665996f36288127a2bb4822f357").into(), + hex!("92a5abe1a8d508193c88827f93156e84199b14731a68b0b434663e5b9ce8e6e3005ccefb3ad8330c56fc0898eb9334d2").into(), + hex!("948c5a4bce25157f5f779fcdd89bfb4747a6178d464d15148742920aa2ab7fc63d6989b586152e1e79eced93f8686206").into(), + hex!("8702f3fccf470a294946970f8ecfed499f5ab3df799601f872d7be3d9227ff78a764550fd1a97ab25b7be96d366c82e5").into(), + hex!("85c5f99e913f1cb67a30807386b5292c841b51e959a13912fef2e0f4ba84ab3b1c483dd5fd33e80774de19695b622888").into(), + hex!("8a903e39b9b46dcaaca4fc968b298430b982ed3916f8ad533ceef5131dd507f1188fbe856c80bedc7bf34799743fa86c").into(), + hex!("a91ecd938c2a399b97576c43c5d1621fe748732090e360fa1e3ddd145438f9569d39a7be9d032b435a5d14ca4c905d15").into(), + hex!("aba9def4db5bbf2ba185c134f7734feeef976573e20d76aee476bfaa2af389ba5576a1476aae2d42d5470a46ca3f58ba").into(), + hex!("91d3529480d066817c0111bbd92714a40472ed6c877df358de98f0258f79fb8ccd54a4fa8bab3b9cc15bfabeb620c196").into(), + hex!("8cbce4e674d90185c47225c587dac654428427cef8a563cb89aee6fdc2ae6f12a8b11be46c779ed9afe38ea97d7d71ec").into(), + hex!("ac684cedfa58b2adbb6a13a94aa8398ef4a14970f5a43a344986cab68fff7fd48f7bcdc0506026eb0a2867efe86f283b").into(), + hex!("a21a2f8df2b811550d6e115c095d4d6781a84ba25b7f4017adb318776ba7452f48ed8b83a6a94aa68d83f2226a4c0549").into(), + hex!("a0d96e01d937144627c695aa4256f1d1a16c708894ce854f5ac656585e6852a43c39080909e7029b6611ef519d9983b2").into(), + hex!("a2f32ffa61e370d087058cd3ffc534da6a917f75ed5de568938885cf5220d474c930ba9bfdce91e031aab3b3167ad362").into(), + hex!("aa6a7c0162520c6706ab0f6188b718c1909a4aa12e71afc1c2d40e51fe44f667db0e7f1f0cdd81594447e267720f2dae").into(), + hex!("b4f16474ec3f37765e8750729f3245167b82472ec454329c9183a5d5ec939041d85b83523d11f2b895e2d15586f81422").into(), + hex!("917af7d2995b6466baaea2b3eaee5f76508d0c117d0452bca6a07ecb87c0cf595161abd5bd31a904e05684e55475a4ce").into(), + hex!("93dc25ff6a8ed93fa40d198a97955d40a5f29e50fc6fa6dbad34582478d3d1bbac0ec5789f11a92a738e533939c281ba").into(), + hex!("a41c824ff14ff5ee486a6130dc6cb01043e59f71e234390d464c95ac49ceda8b5400079ed4edecbb59be2083d8f06da7").into(), + hex!("850da042d678ac0aa31dfe0eca861ce17cc306188f260195fd10f940c67d42c9431cb68a64d27232e989c9c23a6e3d1a").into(), + hex!("a3e223f30d9782fa1fae634497c64fc58bc8289e48a67c8517621918e2b921cba1e90b2b01f838ea36071ed89bf64ed3").into(), + hex!("809001b01c33bf49a97ab6fbdf708fb224879c71679a2b335cfbb3cb4aba3201a32113de289878606c9feca057d9faea").into(), + hex!("a10ee1706f4c49a9cb2fee4ec6a0dbdc883fe40d4e1cd7a0388f49edf1f5f23a38e6075fa5fb54fd8e77ac3742266a6c").into(), + hex!("8c96e17529d7051f09f93faca150f5313e4d7f32235a4af6d12270780d6c14418749489a2674c728095a56585e0ed924").into(), + hex!("a9a7bf56eb25c9bda003a70128117adca9c33c6bb24bb4c381ac405e014d9c75aed0d704d801b9feacdf81c3b7f0040f").into(), + hex!("ad53d11d31bb9ea53bd23d673fb26211ee39ff4442e9efa1259bddae866e97bf07b0f9ca44e166b3b85d19b5865b1612").into(), + hex!("95e86d1427c8abd87e7f966c2ff9468d0bc3f76175bda677acea5113b5bd0d7631972c4172220e3a72e0dad1496bc14a").into(), + hex!("b8894228542dfbbc6eb65c9adf6549eb4ade838701356e7d672c095b1f997be5bbf3ff19474ee99e81320efeb04cd529").into(), + hex!("88bc36d6d90424e86374499e330ce5afeb63164fe81fcb4d56c5c997e07093a37df33437389879895c8b2cccac28ed0e").into(), + hex!("8ec1d43488aebb9544ae0a12ac7311bf873bee05caafca5176e26d681b881ef6b5e3ae5d9853b33577cc21d3acfa1e82").into(), + hex!("aec567db9a542eacf68cf4b7b9682ffe0b385dfd192296f8d8cd9e1db9d7da0b4a4a0d0ec2419825177413faad458cfa").into(), + hex!("ae242e9e2c7ff2c0898f92e7e9742ee5e19376ab97195c4eea0487490068199d0fd7ea08b832c43e208336b5c77d1947").into(), + hex!("93447c215479b68442636384d29fd5b4815cff904668e909c67fdcef1cf5d594219371f62edf189d6e54f04872705947").into(), + hex!("87f993a564e69e132c6cc4874fefd83bb5032b98bda5eaa8fe9e1713baaf08486aea21eac3231028715e846e33a3fa23").into(), + hex!("a9c4eade07d3d51bf733d8357005e08a5f86cb44c3dc6b66dccbdbb67cb5727bb456d6092418275f34b59063b3fd64eb").into(), + hex!("8939e3cc9c1dd203d8079aa4ac0d40d2e1b85bd876616bc6b589b0bd187701fcc36c52d79ec7f14b5e54fff459c99028").into(), + hex!("978122dfec6fabe4f737a3c9326f2f721cc212455001ca7e09b65b70ec1ada1ae26d451632f31f648cbc65a3337250c2").into(), + hex!("8d1eeed7fb1902deaf7d6dec7c86807c4aa8ea1d7130d6caf01d65e36f6a30e3442a97ad6918e67d2e17fff4ebe7a97c").into(), + hex!("a16203ba484b5b02a1b210d487a54c3da41b3815c307a30fdbcda0c3f5f2205e16bc7232e1b8d57d5f58718ed4941ec1").into(), + hex!("89b4e7bdd90323c53aa502a9839f57133ab0cbae1cb133fd0beb54f4d7785988eab89eb0bcdf61bf62a29b341befb883").into(), + hex!("b75550a71a4144a4f23ee27d86c10c44e8e57c118ff4f9a2685762a98a3770a6c2d1fd9229f9792dd4e784e8b2eb675c").into(), + hex!("9171ea1599ae47b04ffc307dfe5e49da0f48835cda926355606ddff47b18ce3c224828ddb942e63dd8153c273105125a").into(), + hex!("8e0e78d069f4d51b9b0c370100a9d10e395b8f88d009e33ed7fc4959bf140176cc316843c76d2a741a3471d56ced5db4").into(), + hex!("aad6b97330e76b22781e78ebb2fb2e92148d74546cddc7348e7a7f0563b986a7553907c8946258cc343a15a8918f7491").into(), + hex!("a9e8e436356d44c945d8248d249e20f5c50bd147de94418d4f04e1f67be2319e4d2a7291981378a1e457874dc91a9948").into(), + hex!("800b092bbe1f56e78d766c510dfe42f0d6670335f5931b3f821c77689fc11a502d7c82d2d887ab21caf312f8e5a037f7").into(), + hex!("aa8b05f90da0056b7659c26171df70c748d7a8fb52bfda42b7d129df386de331c1fef9d5ad1b19f0452cafbb813c3ec6").into(), + hex!("a6bb8153637b097a905342895ec1c927faa92ef8d59af86e43864ffeb6b8caa3f5b025079ccfa83214332aa4f6b71a9d").into(), + hex!("8244cab3e6f39492c8fda490a363dcbd8af265dd3a158c2af0e66182b48fc2b49f473b402bbf0951b42da5bb669504e8").into(), + hex!("8696508e20c144ef2cd954fe420c60f8432529b97a865a52de5292215c448984dd591170d88c286d7bf5a1cf8b94ae53").into(), + hex!("abb5b057c6b91d51df313b4690b15a218dfac6a30a05041c5cf451f515062eb02c54ee6cf6ff2df7640d15ddc7a95dc9").into(), + hex!("ada583911773366a4ca0b5d407520a590e4f3c6628c6d050f2641655d1654809b886807c1efeee9e9ca187b79b7676c8").into(), + hex!("9730edb86b7161715296bd5267bba55d3bd956dc9f4c640df92cc8aaeed8ab2dc1ff74988ec2122f6c3ea57b6e30dd91").into(), + hex!("8da42bb48c6f17c0e96b5edb71ed5e937c9aa65af142234e3ce61403df7f6ef05a4309e92469eaf68d83afc5bd800373").into(), + hex!("b3cb17866a99dfd048c4ad6024823841eb6602c7e4728340f1167b8af3c810926f0eb3e1a0f51ed6fba4a80743660db0").into(), + hex!("af121178dfa05ac08a2acd56f895f444e56968b703ec6b6cfae1e836d78afe6f51021d4a415aed89913df49bffe27ec6").into(), + hex!("958d5dad1aabe840881f29617dcd2f759f220974515507b0a63b3487b4cbfec69be7d22f4f7f45d693101177ab205303").into(), + hex!("ae6160e53c2c9ce5495bd0f0476703684d854048f2a8bdfeb6cd1e93fda36e44d879531f213feb1dce706d35f9fbb04d").into(), + hex!("8dca41a5808d3c75f41919cbe65a226355df9ebc7c1a2d2263d654bc66d1f5786ffba84a1670a7369258bc92d6bd68e6").into(), + hex!("81585a607df11d0a5dd778adfa1eca440a49e37b21677fe88709142243f5ffb2205e703366de53fbdbe3d7ded093e834").into(), + hex!("a80b1f358d284d3d8b18ef9f101d4f0d84c2ff99342e7150a55bb2f54ee231e333ffa930487a86e97f460696348e897b").into(), + hex!("a4a1f79af8ca4a5c5b44b05828449002c92c313c8bdc33465c099ce8f74c3d575ffcf0ac1ec5d29e80ff96b07f08636d").into(), + hex!("8797e455d44ad2721ce7de2fb8125af1bc4c0757d9c2fae25394e44b8952dc5fa597e0cf5d2b1c2ce996e380597a1db6").into(), + hex!("acd840fe9ed7f38ceb48d65c9f9f02fba4df0fd871efb58b35547c9b526e6e2416195d2c131a04408df7298db50a76aa").into(), + hex!("a9ae67c2d2bfe04d64bd4def66509c108f9ce85394da48d97407535c1aec05df39e3f7c66203f72bc65fa72c18bfa77d").into(), + hex!("abb785c66a7aa06200bec1960c572d61a9cf2c283404601259ba720a506b3831391e998346ee73392a3f7f12915b6f6b").into(), + hex!("8a453eb657c2a85bf93193d47ec26102c4d3adb666a7e1f05f1991782319bbfe104cea57ae1f4379cd115ff711be67fc").into(), + hex!("96c7151e34ed488a06946059722dd9d1b5a2ad2fec96b545ed662a3d0fc23bce6973d93dd2932128d95b0686f9208fc5").into(), + hex!("b5f39ed29d3f85e56e21ec2bce47b04ba16d72a9fad492815b485a93065f2a83dd46f92d74274f815c84792278a67cf0").into(), + hex!("ac9449a216a875c2f288e34443a94a521f8e9de28f70b729f393a483359ffd3ef8537b8a798b8c9a259ca390f9fa9751").into(), + hex!("b15584e841de0e25a301bc3378e89baee55989d9610c513b79748e7c51c484f7bb1d9adb33f3b63d52d36918514aed2a").into(), + hex!("b4dcbabb43cf694b024d36734baf824830304257d959f6300ce17f892a23000e036c4d3d59d7d1198bf4f6ad5ff07e57").into(), + hex!("b8a1f0a8ae246442517606f34ca4029deb727cab005c9952ee9858dd99497ba8a0e3311bd43aeee35275db74c7bbc52d").into(), + hex!("b1c443db1b5a00a87a399880ccbff4481f5742423c47d38b175527e84b32fd66110791c117fdb70782d75c476683f9fd").into(), + hex!("8e018c4b2b4cf1f1a417d00b13fc51ccebcd09a502bb14795b8274585d2e30d71c2c7a9b9f56a717f0676e685e65e907").into(), + hex!("871ea4444c7080995472fc8bc08f9091f9f706e9cfc49eeea5357867badd837649f059163835a7ad7263cf03fd13b198").into(), + hex!("88b67b5819119372e0fd7f97ba1eef877cc32d4be465001c35096adfa18e1811bec1620849a608de8420126fee9c37e5").into(), + hex!("9060dc7f55fdfc237799a2814a6bfe2d2f539ab76c38a9b1206890323bb4eb7b1ce011ea4fa552b412bbf6c67a95f025").into(), + hex!("8bd156a3a54bffe373fea65ecb2ffb12c96f04e07eee582200a0ade24d543bd6523ae5eb8a710c1de1912b2b4712fa0e").into(), + hex!("a8c3fb552f1a8c6cc2714b97d0cb8b2b6028bc3aa4571a7e3e33f46eb4c150771556c7884d575ce8fb7b62a5770ed2aa").into(), + hex!("a808f5a34beb7d62d23405a64d27ee5d7bf83cd880caf7bd4a615b84f22e1dbf11eab129d9cc9ad90d4e1dcd68613f0a").into(), + hex!("ae56febedf59fe99e79e87d7fe7aea5989493833a52f2e6012fd3400c69a6dde951fba50e0c280779d530d74452d63f3").into(), + hex!("8ed5f6de4a3ba85c6c857068bad6432e96c6054ea38ef07391b914c052c2262856d19403a590e8df63c6dec99da35b68").into(), + hex!("89c01fd1f37d826b9ef3b73e2b1aa5f4b4f86a263b2822cff0153fd2b945bbcf16eb3868ce66910073bf86b222becfc1").into(), + hex!("97b3ef6e0bfd3c399ec959d22d29fa9a79fe8746eec49e1675afbf7a955d02db2e89190ebf43118b65a7dc2db0c4d72d").into(), + hex!("a95700745f0ecbb1e794f4db9788af60df4772b5ffc8f5f693f213ce6230810df31716382dccd5a832ced7f34945d144").into(), + hex!("87d0a6a8cdc36fbd788bf744a443b632369fa0cd983d2b60e20856533ed6451d8476b9b3cff39ed0f75de94ad5c7aa48").into(), + hex!("a14c6f6463aadc8d1d2985b601bae8e74de54954ee7e3aa918837064a98efa5ab736628446582ccd13bd458ec2d50b1b").into(), + hex!("8b68ec274484c910f1a73a8cf8a8a149ff2942ac9de6e73641619fdd9e778e9c0fe6198745f049fa9fed9e56287da0b6").into(), + hex!("b77e981a03493852e7ebd6efecaa647c69ce6d46b2190bb2d08f0eede4addda776fda92e9d943ba57331bc985cc8e112").into(), + hex!("9870ea49ed03991dc1a4f47fc978618d549b4f0ddea01d91e7c409db775c91cb2a58c0c0c57eb73e7b6d3418f850b0e6").into(), + hex!("a518fea50400ae263ab9cea0180079d0d353bdb7cd440cb4d2156b9628e487b704630d931bcab742e0f3d7230821ae91").into(), + hex!("861ba1b761d0ce92972f28b7a65cbf6026bdf7427774fe78ff1f45c67f9083fe94fd2c42f47082b6fba722abb648c61c").into(), + hex!("83d257a40e8418407c80851e2f14d0bc47c3b9ce9e2de53b5c6cd99f31dd25dc200fa90c822060d47c4225d61560706e").into(), + hex!("881f7f674959e4176731ecaf6e2c9b490e70c07abceef15707dba8c9aa3cfa2293a96bc9d5455f769642c9717a4fe949").into(), + hex!("835116735f8e21064c497fb0dcfc929004ae5eea1f3e6863ad0b227c820d36255230090812da0800e03af9fde4354a13").into(), + hex!("98d9c12ede55af8067af5b62b89002c66e3b6556ee201ecbaf585fe5026f997fda75105068d62fe5d2403c6c64c314d8").into(), + hex!("ab359e8e0ef4cbaa9830e2aab892db7cd7ddaaea54cf455c2ca24f10ca337f989641ea33fdb1772ed90a988083405cb6").into(), + hex!("ae9ded9c9fc4e812dcab3d8a1c74ea264eab2df0715c7107ec1ec336c0bb5f3761ac9580ca18109278be5cff837f754e").into(), + hex!("b373013674404122f39dd6fc29abef1b2634e2bf650b42c15d5a2f7d762eed98166be26372e8dc6bddeaff84cc2aaf4b").into(), + hex!("b34adf4c3acadd7e11a9d61f7df20cb2520cdbf2d16c217f39e3afbdf2180abe59d37115910b77a504b81a6000b982c4").into(), + hex!("8161d446296d39d0c27a3db1bcbf0619e0c49739c655af49f49ea8403374afa4f98aaf530413848b7d4b53eabc16864a").into(), + hex!("8256cd8e3c9354ffbb59818f0b24db969a7765d64c2fcedf591ce65f619237d6eabd110293bff42b388c9965ff6d51a5").into(), + hex!("a1c562787d2ba1cb64dba278080fccb1c6538ccb00b94db34b62ed1cd863792f8acc4df78d181badc38dd9bda544e395").into(), + hex!("94ad66b4066f53ff299dc4bde2bdc23a891959903174e8ec08dd79f163c6f4661b3eb3458a786bd8f3fa153c806e793a").into(), + hex!("ad1a50bcfaa5641422c6f10d31316035eaf061ad1fc0a36c8835e078d3fa6efbe6dde4bdb28158d9b7aa74fd9241523d").into(), + hex!("a5952780f78fd6afd9c31226a23d307be72aefe0bd99c32a139c3909b1ff1769e2441dd2c03f33cf98df25c76178e492").into(), + hex!("954100f83b800dfb89721ac06728c3d5e8a8edb7e1b56513a63c2b49dd44b9930edd897fecd262984301cb6df23a338d").into(), + hex!("85de42b8de3cc88181a50e9aae696d92e66cabcc7b86425f846dfac138d26eda7cbc420cfd10f5d2681b63bcf411afcc").into(), + hex!("abeda3b142d621f94f829ed4174d042461e95d978be206fd31f8661263bb7a87c648aeff8bf640ec173a77ab0970a93c").into(), + hex!("af99434f06a13d9c5ee7195ce58beea07940949b686b4ce06727bbbdfa1621c608c891227e2f026bcb58c60a6e925533").into(), + hex!("96e97ce1ed97b8afbbf282bfbdbdb4f863a6931cc781e9d7938617310ded35dcf043c2320507e91e94f470f0cbb98621").into(), + hex!("8693fe1860981505e4540b79ee7a7ef33b26535cde6e9aa019bd1e0d26f359a2d26f0341b7c1634eff1f5859ed3a8625").into(), + hex!("a94e940bc2d8f826c23bcce8fc4e49e29c5f918180f566a67395d33cd573e6dfb149490de1ae75068feaedfe6cce0e40").into(), + hex!("9697d6be370d808a49563e062c2b3a0b347281e00839dc3dc0ed888c623f346a42094fbe2489d0487049f2fe47887cb5").into(), + hex!("b06d4cc8e83acc4121bd278784061f6fd391b3ac378fc6ef46ca2158207f5c0d33a51d3dcc4a499aa48e5b27539c4a16").into(), + hex!("972aa57628ce57381aef9710ebb7cb7a8db28b1b64d7db8be38936479f39772c60d766b0c5dcb79676e9330d0406761e").into(), + hex!("ad2164467404544aabb70605a56e9b0f7887491a1691302b2bedf271b50cb6f1bea1b9637214aa3624f5d8f854359607").into(), + hex!("a9da2595107e9db07c56a59d2af529b036c50033ff43c282e2da551bed8faa96eca744881e600b6406569675643046cf").into(), + hex!("8680660eb867978df2474b25e225fd7536b88e9e73f0188c0dbe835677de701fd402916d8e3d17fe652c7ac6d2fa0330").into(), + hex!("b721e239f50bbc7af5578b75c8befa439474bc4e6ea8d35d1006ed54c6d81c718fa675901df591a69b4cc30899974362").into(), + hex!("a8af30765f1b00ad51a32d856a2b2f97831843878a1668a43e66b65b8d0bd4a2e2826fef5ca5bf140050dd81eaa6174d").into(), + hex!("82baad156e89d7b3da9ded4516603ec9aec36e3a0a9bdd0ded604e4fcc0ba10179f9517fc8372d3743cce5e676c8cd17").into(), + hex!("b3392745016dfafca36a7af4be273c5fb4170b71938d6b93691d7a3bc8791bda537ef001d19ffcf7b393a89c898b8b14").into(), + hex!("92f83c901eccb742618313ee2f5ca571406bbfb1d077ebffb92d52ca962403d34a24f9d333c3a155bb9a72a0fc2eda34").into(), + hex!("9792598e2f303896010e35bf670dc2f3799cbe6f0c66379030b0ea01b44ebf24b9257841ab80d2d6a401fc56bb722e68").into(), + hex!("ad1a3c9c5d699ebe4f1b2727dc94b290c84f44c9ffb38d5498b14fcfc5914f4ef4d1d57853e036ca11e42396808556cb").into(), + hex!("90729c7ec4250613062a4ffbcba5829743ba7fd03a4e3407c2ae00b4513c21f3ebd68d10759ac4dfda5544e77b2ac306").into(), + hex!("840add692580be7aea866045826baa4a07804f8e3f56593a2af6fe317046d7d0fae181632f4009ae0d64dfacd4600c4a").into(), + hex!("b1be58941fe077aa8721b020f34d3ad94d1d5083244c276b7f3e6f4c918517f8c5c8d5c1376178bf27cb35ed76699e6f").into(), + hex!("97ba3e3be55d17113fb63abdc808d89fe205d75fc1ac808ebb78ea1b7570f7a014fae099cbc4b2a4b2ec884977405f80").into(), + hex!("a2c57bf9373db5382465ce924bf7dc4e62f406c187a39ab456a7387ed9231ce059d197f8701af9ce2d6cc772367ecfde").into(), + hex!("a62ba8b81b9f8d40fad7fa1d7e8e49ee547f170889b5d6a2be9e2ad2ab0b265b4197fbbfd3b17803a6e727d41cba83a1").into(), + hex!("a94559a51d438b194fea96975a4571a118105479fbb7a37abc7d676fc8b8d2ca30c66b25b7727dcd297384773ddca074").into(), + hex!("957f9be0f15d8eecf621eb0978267c3fc85607f31c501179d9c83864ed9a9e5d526aae278af3767632bf56a20eca62d1").into(), + hex!("b89928c19d1101486d4299c493db5bb72f56f8bc24b71bb54eedf84284452250427b179dddd7fcfc0f521fbba09d0c5e").into(), + hex!("97dda9cbf61015b296307e510295be258045a1de9de52117f0aa28de48e27ffd24ff711f9187936293babe89da226fa1").into(), + hex!("a66b4064c5b1ee95a35a93209c89206b352e0666abd1b5c95eed3c382210334fedad7d531b9939cdb2fc649c4369236c").into(), + hex!("89d85e413030dc45194b2676a1f2a76801920535ccd909277af1ac87fd9b0d16d94d8c62a421c9d95e7a053cc4c3e0bb").into(), + hex!("b8f0350e1ff988bf23846f2d64e40b35a370d1c5d1f9dca4021508205611f788fe5485e966ff2bb6fe8acc1540a2e751").into(), + hex!("aef9ed7229aaadb1ac4344e5b4d0eaf9b89b20d50d8ae8dae24294ec3c0f2f68dee3186dce35d46020d8d1b2626a29e3").into(), + hex!("b9362f34ead1b0cef1fa9b35b76b644a323ff71c48f375c27e22c6878cdf778b1a3125445cc8cffb6c8b9a3ed046c3d5").into(), + hex!("b69c578e2223f3727b7b5e99f3926eb7424869b356541feecda54a0882e4a009b182c358052d788c2a7e776768ce2b7f").into(), + hex!("a9ab536cca003598d88f76cd0d666b1738802c7452201f5d99ba4fd82b6d7da3c9ad8c4707445d6cb4b2c43de7ba06b8").into(), + hex!("a02648e465634db73fd49bbcdb23cb6feed9688eaf4c5678799734d49f2d4cec9cdddf598114896de961ebdc07436884").into(), + hex!("8253283df9690e171af958f2a3cc37e0b2cd67768f2362bd604ba5c5db8d3500c0d7d6cdee982165eaece63bd016f2c5").into(), + hex!("a06d8256d22ef3891a751716449f97d374e27bedbb9dbd0f1c9528307787e4d9ab9450002bead2922c31fd4ccab9abc9").into(), + hex!("a3bcf7d7c1f1c5a2bdde1e0f4cd3cc6dfa061156534aacd6318d8192c27218b6e34e734a118a282b0d5fdf639e704c21").into(), + hex!("b5577e876cb3de8196b485ae828362419c2f7f8ca9c8f38b1254059873fbe53b57110be543c864bbcf8b485f63925169").into(), + hex!("8410e1b1ab99cf1868fa4494dc75129f42a5e633448e64321cb379175cf6eb704ad6863e3a6475f9cd3cd3f1fcd4b49e").into(), + hex!("b6b21b346d709d4897da4c535556d599486878f5c574bca2823ff9d382fea2f45e8d03aa0fc5a5d623098f1b67c77a60").into(), + hex!("92951386b734171accd57b66afad7ddd0ffdebbd9da835e273b11f96639aefa6259a1387fe3947f9f7eacdcfcbb54b65").into(), + hex!("837cca28b3bb00034d619a3b667b06b66d7cd351ee2c161014b4c33692c705e0fca1352c1e5a7fe8ee00515f4b9c9658").into(), + hex!("95589319552ee815c1e1b053cc4452f9ba600142c37bd700feb3c27468c769e45e6307d7c0a3f366440cdb4d3b997d1c").into(), + hex!("87ed23d8d5fd6aa0922565367ab405e666996e7b918795da299c204cb6d1e51a9c6ff1760dc3fde555b0900e677a9b9a").into(), + hex!("88564f50eb215320ad93b979c617cafcda9122ea02f113029702df1f39116e00b35e0beb0c70bb7d7f2d9b4bf68a1419").into(), + hex!("adce6ad54e5d24a2da5a04751151034245184bd7e61998ddfed673fddbe4e3d069b580e833b3bd4509c30a2e6a81f528").into(), + hex!("80800ca4fa6d0f3ea555ac7d37e54e7776f640f76fe89cd7f172c74723cdb3324da01314c6f66c4fc404c393aa8c7841").into(), + hex!("a16f473cbc9070881b9dd63be9f99670ca571822a67520cba885b2636137731b440561f83e199713ecdd51d4dd542997").into(), + hex!("a08c0625cbaaaba847a63ab4a96206bbcc7bfeb659505d0b6b0e58d22f00aadec41f8cd62ceb116258b78063f26796ea").into(), + hex!("a358200c83ff15fe3fef7a1610bfacdf86d55452258e7f4701082f993c8bd5d234b4d86f96d444e96c01367503663886").into(), + hex!("97278525e5e590ab6cc4fa4a1bac4ed7164a65887b29e658bdb2146008b02907488565dcea09e761f6922118d9933ed3").into(), + hex!("b4e76e4cf2c711a7f3be5b404f076b9d2272e15ea7c61fa4b7a14c5608c352a92fe6674a25dc493a6bc9e864ff4b4a85").into(), + hex!("a4612d268cffd31d092892268e6b4f9f564f0036bfee4f200d270a61a8e5e239996d288d28d6f281d8446a88da56cb55").into(), + hex!("81a70c88fd7d99165b41ee12883ae529458758650fd13001c86f8fc6ce5f8f9b69ce3a133e310619692faa1580dd5d67").into(), + hex!("a87124cbaafc8ee5418639ff27795003a43ac18d07a60fb8a1c155c5fece0f8b25525166ba697e07a5f2d47af6cf0bec").into(), + hex!("90807f573a322aac9d1cd546100e49cb8c771f3d32c89da1890c1c819d90b1dc668c2374249a093c0863d5988c358d4d").into(), + hex!("90cb26b40d10f193da22975b0507f04de6cd9d002e33226852cb40d948d1814009a39332c75f1067e8192b0c9230ce63").into(), + hex!("aefbe25ad8bef226d434b970bda8a47bce8269ad69cf91e02e04208a74055ea79a3a7dbc988981b79bcb9298af467e62").into(), + hex!("b4932ab425f4abad270b32b6f66066cd01fd6f18cf8c84dde99d21c9c2676d58be4f9ea5ebbc454c9d9f921c0333cff5").into(), + hex!("ad19368c70cd241b1a90e5b46f34c44351d298e2fc9ee5906596b20f5aa9e592e878afe8924aea112be60c07648fef8c").into(), + hex!("8b2d2953b4603b73bfb1edda8313cf07f6d9d16b0272d90bc46816677b602b4a7e6fcb36c4f69335918f1ba4ec95dec3").into(), + hex!("ac64d362f730c790ee3f9311990d9ffa3b99d4952ea74f63d141360d2edb7fd0b70d94e5865504af3caa94b63e34ff4f").into(), + hex!("b71ad17d1cdf6be4b7f0bf7d3fca689d5a69a7426dbedfc137bb162142d26e079f49c99797316e2a577225d881e31a04").into(), + hex!("8dd692a01ecd819981ea31f39a5950bac2af0deafb35323358736bc59f8fc69f58c865ed9cfe239ee34d6f80bceb562b").into(), + hex!("b2ae1f2d871d48b6157aa9d74c24e3fa3f09d6c40f421de9c545de5ecdc44d44d6c4a7caff315694d8173974b92c6119").into(), + hex!("8c934a58d5d2c06221c10c14f08f17265e918c6f7f158f6989acca4b1bdf3db58952e5500f930af02ac3e6e44133669e").into(), + hex!("9466e7c328d12ad5439ac01c994825a94665091aa00e212a75c4ffd39f4473a62c160d63e568b534dd7d5577ad266544").into(), + hex!("8e35a84ca6f167c75f98728000b5df8d9c5611080d2dda8c49f8d4afd2196da349cda481fdad8a0e7dad1cebf4b82446").into(), + hex!("8ec436a9690744a1c6a31fa796bfc8f054ea5efb1c8d6a70e9094dbdc32bc199a7c1b29216d706616029525883a9e342").into(), + hex!("87cadc50459a643648f5995ff7ac751c24454040f788e218c4894854ac658fc64c2ea0a8cae4973056ea11f7bb0e5a26").into(), + hex!("aa5a2d8278dffefc43d598186f1119bf1a6d2343143f4874aee24d3869fd4b58401e8bb220f2a228416c89c1f5344af4").into(), + hex!("a36f48c232cba1daae013418421fac6278bca09ee3816eb46cc4059254f1e7672dcecd6eebc9bc0896e96cfa0d8b485b").into(), + hex!("937f8e28abdb3859575cff574517975b96ad41dddd4efb23af86429b01ecbfea553be6cce336d170116752118368e05b").into(), + hex!("aaeb4e5c7f67ee23b1976b2e86f2897ced033e79012532599d130cdbe29d8cd551d9451794741d2ede8564af9070f07c").into(), + hex!("884d9608b7556dbddd0ed13bbe04a5bd9f2bda0bd090d47550f7362bd769a3b3dfa890191c64e44378792d97ee4df5f3").into(), + hex!("a8276ce24aabe42cab32ba7d77c2ef2ca84b3a3e3d750f8d0385f9027f0e009365c78196638dadde186bf44b780fff53").into(), + hex!("9148c5c797b4a6438360072c463008df8b17978335f36d4972b4f826861e8a175265a9eb00c56f47d3892783dcbd080d").into(), + hex!("a7e5100f51c611f010b2601d340ad7aa65bab89c8aeecb181e76185f3a739892f8b172e5cd2c108d5aed8f5cc91cb6e7").into(), + hex!("87c46e67d6643c07e0e318dcb22032753c624e646e4871be4098005100b306f3cfb25b6d81c718e83b42b92841c577d3").into(), + hex!("8bf429c735133cb05edd9b8943b5d4040e83191553452e69b3a1fd06edf2a9eefe8e280cdda811795e1da33e41e58345").into(), + hex!("800ce4d6c257a365e5d8e1bcde67a38c04ff723e38b0926af8b9fc352545317beb40621497aaac8d0e5da291c0208630").into(), + hex!("a5c795afa0ce78cbc11de13c8f58d0bd9ca5b8665d5d8d28513a0d8666f9336b0dc3295557801ba253983fc99f45ce3c").into(), + hex!("99e3e4a8e16ed2ea44aae56b537ca9b159d57e987b0511b6d34767744b04700ff287d431dcc6c67d7cba5748b3580899").into(), + hex!("ac88e78183973e9730ff0c88dd62eb23e2794067ac2574a1c8deed85a4aa6229ee620668dd16084c8f168b2555f04cb9").into(), + hex!("a9c4fa68984527577c6da60dfff110163f35ab7393b852c053d73485155fa1536d9701f660a08b160bf49a647ce3cf92").into(), + hex!("902c3138ff660230158ff69c33911cbe958d29178a54cbd13480addd948b19b0b97f6b235df2beeadb7f7e1803b8cbad").into(), + hex!("a6169eabbed09e08a0d290f9075c62ca4b14e1f7adc53545abae49858b3d5d7fd6cf8d8ba2ee1bf13f36f79ee4092935").into(), + hex!("85a0d7387b1db5635f17899bbadddc0fe6b11976385e12ea4272a8f61d81004406604c8f04f10ce928fb6b547d3bc654").into(), + hex!("a39e5f2112944a7ee31fdccaca927e4f4bbefed1274a134e8a038307820c1d14a6260e25ca5a3af0589f8faa8f516c5c").into(), + hex!("b5c86d0482a417bc94b42f8478de06918b36c5f45d0695275da2a3e773088268cc127ac1380f912307b6455dd0b16d07").into(), + hex!("b8b5e32afc4cf0fb92251b422ef9b757130455150c49ce51f6d6d95d895dafa25649363660608bbe7507d787db9d643c").into(), + hex!("b11f3856f691d84d49fdbeb4f33c45c37dd401a2bb71bbf946f3ddc53a57ce5c4da583f76cf4467d43034a61e1dc88fe").into(), + hex!("a56d1de276b2d0a4482e17cd358455aa19a47bd25c7fc97af8457ebc37781358cbf8a7eec9427881550a8db1bbf51771").into(), + hex!("9508c85488f15d2772522dd1e991f41da1f3af7e3527e098d5408ea96f11f4150415ac68ba0a3031e95528664b261de3").into(), + hex!("a28e129c656bc44d0b6892103b7d7c0d15ef1f1ce583e90e2c644c3016ab348ae26a662652c953a3c447272e52b007a5").into(), + hex!("a68dcb67d0cb585cf22f753602c46c7ccffff246b106db9c56248ecc5e94a036009bde23864879af62a4ede81d040c56").into(), + hex!("b9725383c63b2a522f4d976ad6be14a35b9e80145e058baa622238500f1a2ffd6869cde87fdb984654b2e57615bde3ee").into(), + hex!("90248e4cf47ec8b00ee874ac98227759a3f7ce4819e44176dd9b1acaa6270d144d3d707a35e0cbdd7ae23b15537a20e2").into(), + hex!("81327abb95401fc2fe0b1c2d27d7d9972811a63e12be0173fe8311678ff1fb097d73fa32cb85f13935e1a4b7fc59113a").into(), + hex!("a31bd2dfc9ddfa9ac748968b532a41a26007b23cda258fdacb3b0abb751cd7ce2eccecab2d5e3781fedfe0d8da027481").into(), + hex!("809b28c11c1abbf53f2dc005d30403937d9826960b24f4de857a9470067add08d49345586d7e58bb1107d232b3b47bbf").into(), + hex!("b0597958b75e64fe5c6e56ef803284b3b7420fd537e5625c75af3aef814a87a5ff01951261c2c7b27e374466658711d8").into(), + hex!("ad8fdb91216db4fa779774324162fd5bd7ff9a999030c42d9f90248bc328221aae315ef8617c9ba623091adaa0556074").into(), + hex!("8668991c8bffca4cbfc06f3429961c595d85803a262105907361758d677920796be70d2cf820f0b1caaef708d924e676").into(), + hex!("ab22ff4c2ec9e683d2d1ecf57f7af9b3aab1cd289e22b1b66f37dc3779e83e35211f7d4919dc3ef0babf876d491e0bff").into(), + hex!("b8696c811af5d3360951d7bcc9ba4e82e17a125501e91ff74b915de28a8cc217c6fb05d90985fea7ee431e519e494d9e").into(), + hex!("845c0bc4769c428fb30e63c8e4631f22e69f934b0e6089431ddda2c232172a4980cdb6f650563992667a0790f1a3870d").into(), + hex!("b4113cfe79b6b198b517ab5d14900a7189ba78b7ef85d04551e18fe1ce6a69564377fb86a0e11627cb794d1f416fbeb1").into(), + hex!("aa6848837b26df24b04198b0b09b77d7b59d26dd3b20f7843352f2436c046e9975af2985e64fbd6267d897e728a9e721").into(), + hex!("861456839cb76a9490dbe055559e3dfe3bcdc41646aa656d8aaabeb4c0e39a1b370a4f0107b78f900614e3fa09b46bb5").into(), + hex!("ad6a8ca9a21b7874f8b115024a7f079f5a1dac3b165267b33a59d1c8de2065bce4552369c930e9a949d0b07110a71452").into(), + hex!("aa83179de1682892257c57774844b040299789430a262c8b44dcbd81f8062deaa731ff4bebab1a815d3148ec719f4cb0").into(), + hex!("895549691861582abd102fc19a4ed269b335010aeef45ae9ae6b0d9c6d26f26c31371086ddeda626e76f7f07dc622fc6").into(), + hex!("a94e590989e81d269b5246f22a9c97b604af58352c70100f8a20454fecf36d19e601dc1201342841ab231dcefc461f2a").into(), + hex!("82541b5b7a392456d1936373396012b086c370e6dde41f6d4409d35373d1586d3c7119f6f0d1e38bce9cae67c97fcdd8").into(), + hex!("8b89fecfe83adce613e75a52e785ffc90847c09ed779ebef4d29048bbac04b58e27311461c25d4e68cc0e6778228b037").into(), + hex!("b3eeb09bc9ace8a62b71747711bbfa308b746c86ca87297cf2a8e768765a86ed1add2e1acd2925cf05537dffd4dbec50").into(), + hex!("a5364dc8221a37d73250f8498d9b6e163babfcb01e1fdef8ae570538e128b562a0ed8b353235159c3781ada8506ada33").into(), + hex!("ad0d54f0f67d0231ca0ab54ae881386b055c169fd7415c12e511e5ddc5d4b1beba0e1a157211996c7ab61f6e94cacd64").into(), + hex!("b4b978f7f9b084c089923e73a593a24d5aa22600c879eb03645a19ffe8b36cb8ae040378a378ebcb4a5b73331a2064e5").into(), + hex!("b3d79038de0c62c667ab4213524679934b33de22111626c258bf8fd8e16134425549dc7e3dee15239c32bcf122f5bbcc").into(), + hex!("af7a243eb665d9b2c37033363b252c42bc2c202c266ee125b5676b6f9f94ee5d46d3e2ca217f107719051de511625dfd").into(), + hex!("9726a0d607674fbde3fa5c346806c4083e092921c303ec86bf5a16e4b760d031a24585ab407b9b1b0692d12276912961").into(), + hex!("85a8eb7ff82937e2ccddf2f049af9d871c653de4d71ab36b198bbc7bfef2e32eb3f22462dd01affbce13813332193262").into(), + hex!("a8889fa7291017a3363a5e14cee8cb24c273be4aeb74c4b0e1e375f70060dbc9ba296b291e154eaa56703b8e3f7e85a3").into(), + hex!("a789cbc52c5468bf404fc1b19651ef6a805d96ab8a8991407e149a68d10d1f67d1d4b380c08f8be1faea8d1b32bb8c1a").into(), + hex!("9535392a86b73ac66ff8ac1ee0549d266a9e25e1db542b077d136d26710282529f34c43dd94ee77aa97c647e0e05356a").into(), + hex!("a1ae9d5fd0ca16021e0a33fa116eb5b94991aae02efc6f116e073def47253fe2d1f2438275f09f204cec0610ad523ff3").into(), + hex!("b5c6c5b37943e99bed4e63c9215bea95fc365a576a9f8f0b9da8d5ceb5f9da881a273f5692b0913a3bb922772923c07e").into(), + hex!("b3b0144fb027e0c7f1a0c4c703cc5e1c09422be38de4e10010c28bfac358a6c834df6794e5007ecc4c8d866ffb9a8725").into(), + hex!("aaea409068fd2cb94a7c6fe031ed47c7c5b366a33905d12a107799aea57a052b9bfbb1c4f88b1b3775d5bef7d6204b73").into(), + hex!("a10cea5af8405d807b66ad492a1aab8618324ec3d9d01181ce29512e38f03bfaf556251dc490b3d1e80576bbecb27dbb").into(), + hex!("b700d4046b0be98b3cdfb8a2eeee52df68bcc1c5550d1c17205664b0d896028bddaf5dd38482645e76f643ad9d2ea9ae").into(), + hex!("ac9163d5f57a2d1def901e74c1b07f4d14b1e9a5c362d3b082c45389ae8add929a1dfb0baf14f64790f86cddbdcaa32d").into(), + hex!("8e3b79b19d49d77844e3401c01984af7211268bdf6609919c9867a83cbbea21870ff108143795a85fbdb2c15a2d127f1").into(), + hex!("b9db73eeeafbeee4edc52c1fcac7de6d8acd22a8d3d1c4cf760a81dfb6cc91ee454385044301fb2253588f23f3a24079").into(), + hex!("b268631698668059d8eb44d50d532c9d8c49953ea2029d6a2d0eaf69713afc85c42d989cdd3bc0a479ad77cbad24fc0a").into(), + hex!("95f74950a24d82ba9a9df5d839d17d7ff830a5dff38663630efad5abe9c58724802d5bef891ca2b3b81923b55a94c6f4").into(), + hex!("969297c612c35347019f5bc80d2887a3c95c8ffbb011f5cefac63a1f51e48dc84d961aed56afc353791589a45c871cfe").into(), + hex!("98743e9521e5fb6a643c086a00423fea51b8ad2e55bbeadf791ede16eae64f9fa45c41101c6cbe4a8e96c692fc57c030").into(), + hex!("9374001ac3a8673e337b078da1b72090bf2450a5f53f6a600f4cd43ea4b5fe86a73d14bd0103b110f23e417dcb4c2e47").into(), + hex!("a91bd42a87f28a6fed7fac68b5306e5382a93fde2bc9aa5c48b747ab774f9c557343cafb46dcd4e93df5aa95ed832410").into(), + hex!("8fcdec0a825737ee2c61401014287079e729a8b8e49337e99b34c25dd9da570a1fefc532a0cf6ce1bec80be2d9ff46e8").into(), + hex!("8197cd84016cb41e4287d29b3a0fe8d221868e5993aef8c15c1578e038f9c43e93bc26dfc67fbef919322178223d0b9e").into(), + hex!("a4607e2b6ff802a4e497c53b206972d35520c78f14f7d4d78514333e90b2a8852603bf223f1c9eee3793008f87cd8fc3").into(), + hex!("a72b6185e451b3be2c140fb3f48225927e7c052805682e3de9b2c826997419084bf1e2034aef0c5d364b0004b3b7807c").into(), + hex!("aa99a2cd46884d2ecae4257c1db8fe1ef6b0cc1a0c25dcefb53540ae91ea7bc8955b8acfc6d96ef47fc3a5733f2f28e0").into(), + hex!("878ea42dbda59fb6f839f0b65eec295f2d543541f4fd576a60d104b94b49b1a1ac6e9a15ed3274e6305de3f35ce1e3a1").into(), + hex!("ab2f77f6036200b4ffd3192b8d06dfdae4eaed4e1105b27e64ae2e120c909095e59f4dbbc44e818afdffc7f9ea1f42be").into(), + hex!("a37e3fd9b1337734cdaf34111762403db11b1aa0324937a17e053242a9099b3db0d396b485ca996f91117c64623915bd").into(), + hex!("97c65c28a6a81690d4d6ff17d5cd3be0e15ab9cafe66e6f7b8da66ead8beec561c3abbc79af52a8986d576363f14cd27").into(), + hex!("ae138b3020373ca238d5dd780862fa28a2c3e05903366cdc0fd7a142db3d18eda63b8d049abed37f1fcb25f6cdedbd67").into(), + hex!("a800bf90b4e7aa7b5b00fbe6b5f45067e0d7ef2b1ed9a626211e07b66b12ddaf90ed05d369f8da13ecfd8cb499f192a1").into(), + hex!("96565fc4ef721f754b6f53db97d32ef5e5c3cc0f55928eba3eb341f4962b815381178763ef05c21c1247124d592b4449").into(), + hex!("b4896f9e2f88c990fab764a4e006f3f39ba6bdd0e1c75fc8dab2973544e052ce63f8c61a6ab213c85f6799d988a6fd61").into(), + hex!("87aa22b60a13edcede78e629d54714436a8d7d1e6e232d4df6047213b4a91e61e5feb38216e0ebb209f1dd8d7e4f5f9d").into(), + hex!("ac4d5e8bb39a2564f3bff053e2058f261209cf14e65f7dc540070d40dad7ec4f5fa81efe6274aeab8691b85a774307c5").into(), + hex!("8b13673c306988222f09ad896a75a6232ef3bfd2f6c37c2d751668466d45511542fe982ca5720c6518891830674e2cfd").into(), + hex!("afeaafa07eaf14f248d2a34e4f86064c5bccd92d3a6c0ace1ae827dd59111e9b3cf2722a270234eb5aa633c12e140354").into(), + hex!("aa31119422a52a7a5ba90f4e0b5676434b4f05289ea3143e8a2162e32b1e19b582a586da796ad9876d0991274f3363a8").into(), + hex!("82c78ac9f2018540eea744c003a76cd7bc8984103e941c680a4a833a7c81defdd28165256890d534aaca2991dfd856b7").into(), + hex!("b4a99846af0ffe14d0f820a574d42571e0186cd078840ebdf0684c806c08eac1d6afb2f7f9f9dbfee19c2ea12af5307a").into(), + hex!("93cd10366714618a7e8d4edf3c93a9bd30a280f765cb93071a279eb5bc4fe8dcff8d91a1efe8fe697d1cb5e760a07fd0").into(), + hex!("b17dd2a8817471efd91b60ee31bb5f7c2848bb40251dafc0e2250cdeec3202cbfc7a8af6d7f5c3300a53a73bb4a11b54").into(), + hex!("b396d11ed53f287ecab591707ec5ecd0c5d34a67854783dbf263fe2614c707a2226231fd8dbd6bb1ca0760f06f2fe7d9").into(), + hex!("9982a0fbdde6ad91f35e64de34183e4e7f7df6cf422912f3dde0cd16394f0f172dc32c4f68f2a09647fed32894471fb6").into(), + hex!("ac5889086fbfd2f2570191b5d92659fd17283509477f442dae81a8491b8641a5f63e275659e6592ebd0e62a8c7a9bdb2").into(), + hex!("8fd6151866cc75461b69b4685ac4efb5a21c10d5b3291617bee4ff300d90fa2290319967faa2b7189c090d3b60994fbf").into(), + hex!("adb4459e4e6410606a74742d6e48f7b84f30ecc8e849b6af9b4b617236dedf1707ec388a97523f18ec7e047743fa7151").into(), + hex!("a9fd9329ea3b6fb77dc577e2c891eb66c61a575ab75a66dcd897f1127e8bd9ad8d3eb9059c7f6a8b08199913b83e5ba7").into(), + hex!("b94cfafbd3ef7673023ea37996084acb3109ffbccd210184aaf8ce8d29bf36390bdbbf2870b0970e66831d28e90b248b").into(), + hex!("922404dd76801113ba23df87ba689e5cb609c94930370576d0d16e99a489de0fb079fb273159b8b07fc58bfe4f787c70").into(), + hex!("8c6005451c02b18458c3f069a521aafb44fab40f4260a60da6b9bb5f920e91990a868aafa4b6b071a899d3bc51fac72e").into(), + hex!("b0cf68badbb39413649b3171281ebfbabbcda1123549a4c6c09dbd4dc0427b51d555056c79f38840c52cf920dfa2c8d5").into(), + hex!("a08a09c8dac1f7bbbff2b7ea96899b64fed53e971569171224e675399467daea6870e48fddcf47179d3c7eab4cfde3ad").into(), + hex!("881c89cfce577898ad367abc2cf5989c647bf8904be5e061e632256b3750b0aedaf4d30c17f994358aecd069b62cff09").into(), + hex!("81674253d6664d414f667b10f6e8edc32af0a67b2b99d3e4657991f8a8b1dbf260447871c1c680d92f99abdf3da2d035").into(), + hex!("92e34adb15141ea58b2b481f9dd69e2584f512531bab13779fe99e18d48b6ab039bb28d9f444d517298e11464ebde4da").into(), + hex!("81ee1554da84a0a487e52c57528b69cd79f1b6530418354095ab976207e368379ae2fe0a4a340d209f13ac9783cd6d5f").into(), + hex!("839c0316ca07242ab52b76f55049f2da3e83f021591b0bba295677d80d4b407f88b0d207f3ecbe7eb85f19eba5d152c2").into(), + hex!("88b0ad748a61db5eb96016d9bf16bb05ffc4cf5ef56569397e9395f454fc1f731b48dcd8b3368163c36a3fb41577286f").into(), + hex!("b94fac438903b1e6cd11135b8fd35b91b61a0354addf8ead5cae4fc853ebb5be68bfb9901d8b4d3bdf58224675b6d675").into(), + hex!("a015eb1a7e1b814625b13c1b1bca7f738e80be5972f3a3a27cd9b21b033f16e4b5934bab69e38a6edb8d03e84e8725ef").into(), + hex!("83ef4eb739353f7679b27d3679551b2a0eb1bd4d372def5c0e8e5922a9ce7138dae4b62d5c147e6439a551d3ebb1e1cd").into(), + hex!("95907a3b288f3d4199434590340293881b94322e82f7fe9c186da3fdad7881b9a92cdcc5fc29d4124b1d05886bc9ec2c").into(), + hex!("868078f74e35b72a894d72f93d45333e423b1aec6d7e3cd7550254ed6a156957d4c5919489a84986f6134be8334bdf4e").into(), + hex!("8f089ffa10d8ff27470b8f6fcff49a69bd06ef3f88faee54a6bc8ea0dca6bb799199bdcd9a9e7686c43302cbadb584a0").into(), + hex!("980f4614de867eed7571cb3100f9566542b90d2b4110806dbad64249944cf3e4e484d03543107fdff0e91634d5193530").into(), + hex!("abab8578ecc6096bf063da248b376bd9e76a8b9364000a98c85813ada835017ffe693f908aa789cf09ca3020f3bbb9b8").into(), + hex!("a2beaf5eb12232e44bd251aaef3e007989794ad1df7a5f41ce1e6d862ff0607db47cacd4b04d684dc22d3640f6b8aa16").into(), + hex!("8f57e3d8d68264b5a07c9fc2399db0dfdd079abac46e1023373c22377612d3b005053e0df490d2435110c9fc2791b8ec").into(), + hex!("af7d2c45de945cb09adeaa1898fe0b026b5a5c5de2ac21f3b2c298d82fe4ae253d859c47f71def164a77b542905bfc73").into(), + hex!("93b026c1b083d82b3bd52d0004985f374acc81f754f7dd4e563a9ca5b20780f7872925a03e48d2154c526723b6d3fa88").into(), + hex!("b8a11c00250f9e148086818aad4dda9d69480b378ef5ca9e2fe85dfbb709d2a919067d9abd2eafdb5202cd334081b5a2").into(), + hex!("a7026b1a57d9d64f2526bc42c771cacf43a718725c2d0dd889b1af12481e3112bb5357e6434b3e83754b067bd5d533f8").into(), + hex!("84771640879d9628fea0ae1106a45bb8a383ca0a9a110093355395968b4d5af9e4a34201024a187d3a31b78a839af6ea").into(), + hex!("b99e5eda7144057e439ae752c2d879345ebba19e83c35785743d6fc1646069b0258ebb1ef62fdd43493498ed08a8de15").into(), + hex!("b2a17e5253f695a61697469df96f3182a62faf0b5d10c150f833543e7b3f5979506a068ddaebf310fe86efaf3adf13e5").into(), + hex!("8ce51f0a8ffbcfdddead1a94f45a42ec4f9e8770c0ca33b58207b8db06f3004c05e88f5b31896e1b7d3c79ff571426b7").into(), + hex!("a775ae2c3c59968d0add4c446c5f20e92f92eebb704016ea45a47c5496945b7d0935b4d8ea315990e6f58ec33d00aa9c").into(), + hex!("93bac2d2038c87f111c8000e721e4637b04ff8c3b7b1e8a9f02cae40e465a1b2928aa226af73ed643a4c21f3fec436f2").into(), + hex!("ae050b3f4784f2c12c902fd1881f6bb940806c0a9d7cceeeafd194dcd49cfa48acb10a09c55ea47b508d5d16702800db").into(), + hex!("a8c6680208271473433781cafde59a4aca2496d58c85f48ebaf447a0c175e784c4a1351d94b5d1a64c78bcf8c84f8f2e").into(), + hex!("8f9f71c20844682e17bfc30d745ee2848bd8782b05572ea64f9b9bdb2c24b3d1953bae317d79e267982c42f3f3aa60f9").into(), + hex!("abc4ec57e1625bcf3c10754a07a2047d359e503b05e2524175b48d5844756ea21f626a65a08f4f7b32cfb0646cc68fc8").into(), + hex!("abaccbb39d4fc4c252997503604764731889f5bd66382c90477df2f2d413f86892f7efdbee0e3a908b3cd69321d3db78").into(), + hex!("906a87855cc2b3765774d3e8c7daa62630fa7ba761ee2a0bfc5594380dc1d9b62740d1190fb882ffaf17c0d869356261").into(), + hex!("afd0ed291ba237f8b626698e3c54b8a84341628a524b897e53aad5231aa01977990c55215a677e9adf4319a98e81bef2").into(), + hex!("89512dacd42d13f5618f1979d9afd93c06247de2e1e9ab6625b5bc9efe24439189a564ea220c4ce2795f059064f5f5d5").into(), + hex!("90238fe2ab6b623ecc990ef8d2849d26229db5f3c8587cf06501e459c7742d81653c7dee45da82f5946e041f129a8df3").into(), + hex!("a420b197f4863782fbd676bdf1808ff1c6f49a506d525952c575c10a32cf63afef22f977d4b7d6258334c498f36e7f95").into(), + hex!("b7d8fe926876d1d01529eb6e30a78db47b6693af3171b2014bd18180066609cca9036e35953ebad7355362fa671d903d").into(), + hex!("a4b7dc0ae7e7d8c8eb65081427f5d41a122bcad56456dbcf4a4e83e188af1e3d80dc7f450766b8e87ed8b28ca6c5c479").into(), + hex!("8d3e1060196b4e8e827052c2ed782aa554540f21690b67d39db46fcf088d10f67f2b6e0d98689e7497e8029bc8f355bc").into(), + hex!("861657c4e2a48891939b49706791a03f63d092643cb491adfefb3cfa7d86ecacccaf86e4a1382444e466a61e29a12bf4").into(), + hex!("92f1c5b122943341854ab7d5c4603e083019344e2252da5664c676ffdd564345d7e28e2a5692dcaadc4371a95ea2a142").into(), + hex!("86100ccc2ee10dedee28a3ffa0bbaccc1ecb19d36a99e6deef1f2373c1c1c3dcb8b8fbec5809304d883c5d60f617d875").into(), + hex!("97c06ca658fe3ec45a6573bde07b3a95d80cff213c9f4eb8933c8dbaf147262291a5ebd55b0e58a4686c4971cdc45671").into(), + hex!("b13e9055709868f723014736b1fde59c05899bbf2aa6d4591d724c35c15ebe6d215e37fcb1e7b8585abab0b2ba309c00").into(), + hex!("8ce0532b968d8d770eb611131978a253e7940ceb627b7364b3a4dd517f26c31bee51c134a5b1297362f6f9b6d714ef33").into(), + hex!("92edce89bea01fdb25da5d0f903de2fee626681cec3db418a9161286a2e95bbb90ef2ccd8dad434b51776f85d982700a").into(), + hex!("aefc7dea295547984ab42a64f2f59ba2ae8220712778a71530623351a44372ddd1018cd8d4951934ae0fa39653ad6aae").into(), + hex!("ab297f28266bd5ed104c1b55088f114592e80aa098a0865e5543e12c6392b0f94b5cd4e0b6f375d1a0d0809d80c1fca0").into(), + ], + aggregate_pubkey: hex!("81a5778df2e724c98b4ef79ff33b9c5fa3ea265de81d49de5c4ab3be2165d32fe15c59c982f758c3b9c522ca5e659fce").into(), + }, + current_sync_committee_branch: vec![ + hex!("1da42c54eb912009030c084b700d2e0031c0a0e5759b0cc593601b99764a725c").into(), + hex!("0c960bd59f4a61104153da676eb38ebae603e9cbb55b0f6677cc1df0d535d60e").into(), + hex!("1682c67e0936255e351f8be6ccbdf048db06a80749aa900bd4265af1c366bd52").into(), + hex!("d95bb6af7d6be07e5d7d27337ab9b54d5bf725ac37671b9483434d22d724bb92").into(), + hex!("3abb1af4e9c3acb052119a42c2d4222d99e8b5b958c520a03526a8177b921cf5").into(), + ], + validators_root: hex!("043db0d9a83813551ee2f33450d23797757d430911a9320530ad8a0eabc43efb").into(), + block_roots_root: hex!("ed6ca045637c1c7dd54fbef547b8b1aa3f5b9fa8f0bfa5df26142a0c4237e617").into(), + block_roots_branch: vec![ + hex!("620abd1a8757614facfd9d2fa43795281bccd4055bc9b12e5cb3742a16a9f9cb").into(), + hex!("14c793be544d5fe1993a1b25d39f4b69e832e914c3d470745276a25d982df4f5").into(), + hex!("46aea5f0a7d66cffbd55e676b915be97cbf3dc6281146cdf4952047214ff74bd").into(), + hex!("0ccefa47e43d03e26def9fa07bacd91a5a2a20c6c5dec2ea090f71f91ac99282").into(), + hex!("f03f3d7a52241ab959560beb9b748a8ab93e2b7221c8070561a12a5fba8d4434").into(), + ], + }) +} + +pub fn make_sync_committee_update() -> Box { + Box::new(Update { + attested_header: BeaconHeader { + slot: 5808573, + proposer_index: 430716, + parent_root: hex!("0be3932fbc9ebdf3220e2195d87653f283d9f999946e53f6a9f6172b6f532779").into(), + state_root: hex!("cdfacee5c92a351843fdc4591ccf16c4f040d0276add8421f08dbd5c71035a1f").into(), + body_root: hex!("3d248ca71ec98250b8dcdeab1207806406f1434c11874655af56925da6bd88da").into(), + }, + sync_aggregate: SyncAggregate{ + sync_committee_bits: hex!("ffabbff6fcdefbebaefffff9e37dfffebff57f7bffe3efbdfef1f7f987751dd176f3b3ff7bfa3fedff5fdf7f7afff7ff777bef5f9f7fe75f97fffe7dfdfffbdf"), + sync_committee_signature: hex!("b405701a0227b7c40805504a66069fb5ef99cdd84f1e295c9b4a4eccbe4d93718740efa9f8eca62f563dbc73021c00e914a69b00a9ebaa906e78f26c1cb8088af916096801c787f18f493b1479fd43f1f5b28d15af827a1e580713fa82bfa1d7").into(), + }, + signature_slot: 5808575, + next_sync_committee_update: Some(NextSyncCommitteeUpdate { + next_sync_committee: SyncCommittee { + pubkeys: [ + hex!("8e9fbd36b3cbaaefc176cf46336592e2b59a51e3035d095da9e1df9d2fb5aac5e47ad05d27784ca675442abb875a6559").into(), + hex!("b4c6164c5ea19f3da5a76a2435db598bb012ea34cc8fb6d749f1588463e5c39d29cb3d45ceae0543372246549b17deaa").into(), + hex!("a89c780da1a713e86b149d63312aa840e865dd926565f0ee9d9627d363eadadf5a4bd5f79d8039f2e2927ed7fa60209f").into(), + hex!("aeff2bd9faa0201abd7dd681ff97888c0ae71d84e71590f424facb2e37b5759f07d338dcbb695ad6ffd08d903c0f92ec").into(), + hex!("b89cfb61a59cdcb61e9f3ed76cb5cf13c907bdf6b2622e16d140743c5021d45cb6d91ee94331130b876efd984575948e").into(), + hex!("a2c889cc5195532bcb5c83d035cd6881b889ffb9d0536843d3fb6f7b1c093a927162add5ab6ca5f06e7c3ec4ca4522e5").into(), + hex!("b5966a6d047ef679a9613114149530facbfc7b4bee6ab23a60853f45de034435b624ad0126ac6c7d6a12b1be93177e0d").into(), + hex!("99249360fc064fc2778b37b107d834eecd5eae29e8f10a45d946f11fe358db065242482935224226e83f518fa6916962").into(), + hex!("8c3548aa879d974c5542e59ea43bb34db91f92c7d21eca5e3e4fb9d01364c21e8e2341eeaba1d22da67f1f455644afe6").into(), + hex!("828b95590a46cdc4756fc1a7b7d7c4031637494938521b74a3740a970ff532b88ffcb5333197088f6700925dcab5c42a").into(), + hex!("ae2e6ae80c16831c02170dd273ff6808e4379a8baf00e707d497eec6cb50b5a1f132eddf053f243765a54695ca35c443").into(), + hex!("a7bfac686f7b307d794fb1740a05cb1a6ef14b06150e64353a0b6544e7b0c5e3a7c8985d257c5bd74e411c0cc8424479").into(), + hex!("b3671a59e2d425ef0ed109932402ff7dfeec72cee39c1840cade48a13f3ff36bd0f9b3931d0651fddd214a2dcaf7bf89").into(), + hex!("b56c962ab20fae058c256e37ba4091d7a9e5d3c602e3eaa2d90df65fd5a11ab68f245a5a6e53262335c6dd4f3e0e51f6").into(), + hex!("a8be83de4b06ebd8c14bce332a1175a4c651fdddd4a58ec85bf4c68cbce83a50dfff8c26070d104556883af678693076").into(), + hex!("b4b33b7013c6af21797478b14b1dc81fb7c5661fa2471d8cb4eeaa62a62f795aa9be2cfb65ff6b957cb7f89487a587af").into(), + hex!("991a42351791da02bf6c1a9ca8248901657d6f2a95225e4827ac3171b5247cd31f9465c9ab1c2b78e268c82b61db1f36").into(), + hex!("88a38e70998cbed82ae7f9c192e06df8abdd35278efb25a1112246d46a3d3f0bddee41f5c492949f15e651ed7fdd6a15").into(), + hex!("87dcb7f13c6af7f7d102c643db0406ac7fb06fbe1fc647f436ea839e75561b27beabdd6133da332383bf22ed4f83fb9f").into(), + hex!("a441e2c51448b6b2ddb38dab213d9ae3d1fe70e91e1feb0f98590b5fb6f3c18ec0adccc221fc44ba027511c52e5fa626").into(), + hex!("ae5f4dc4266016943cbe1db6538619c430639a1179d246cb820adf8edcbe55e9f79471134d06365b0d459b280aa2282c").into(), + hex!("8165bbc59ef3b15f29379a7ef90d8b3610590c662207ea7c49267f36b5b62af3d48008d182ec3384ca7c1063bd25b284").into(), + hex!("8cdc4e6a238afc55406920620fa90f696403afc1797562b424c26e679096950e7f42b8d8327ab0d7573608056364fa4c").into(), + hex!("a1f4c958f7bd1182cd4ef88561eb534c9ea3563d149a276fc256645be0b2e86a3d642ac17261696ada39a04a866973fc").into(), + hex!("b19e5ca1ef1d4fbac5633cd29e9510116bafb3229749e0e4444caf9819fabf9c4c805b5966c02446c1eb0029b3c1293a").into(), + hex!("a4682af7e19328a145a1a5c43ad3e14648b90f664c6139eabe1a13da9b763ef23947dc3ea2054af7d0b7018f7498df51").into(), + hex!("999ae1a8f2e0cad6a0378e7e0a67c8a6ef4a824043b34e67074d05ceee93cb7b49d3c3acc961f1aab69b45f89d12180f").into(), + hex!("94a9f1686e91ec733799b569e3b0313db64f3a219b48482e2a56c21016e800d4373c2f8b876a923e0753a464e5fe4684").into(), + hex!("b4936942d807ad09cbaead9f56ca124617fd1fda2ff5cd94fffbbdf5ff2b295867acd1e41599928ae455d597ea45cfb0").into(), + hex!("a74166db86410c9722e657cdd0f4d1da86a4f83168e2bd9ac71850bbfd9471e1ed88a6476b75ae5ddb42afc62a9ac121").into(), + hex!("a97909c10241e046dc707ff9d822c385dd68be297d6b54c84fbdc18f5a1dbb3350e93496698d6304ad1d6bfd34b4a041").into(), + hex!("b6d9b775129b048a6c577656ac2de15135c2bf1a3c7c8140ce20a990274e42d7b602ebe932855c1d03373797ea0bec63").into(), + hex!("b59f975937cfc8eb510c1da0a7fff1960c46b9235550cd6decb514805439f08b8f18d88ae0373bbf50b028a08612d552").into(), + hex!("8af01facbaefb24cc4c11e13c64445600b1d716be66908964ef79e12c0eded04e1d23295444818f024e55df2aa911034").into(), + hex!("b79607bbe31f159b208a0d1b2f95cc5373631908292126b8b75fc44b22a8bcc9550de7b51ada33e5596d0f17d5f4e48a").into(), + hex!("a1a474a66940bbf6e601b6c6e63103de2d5eb76d7ad3d39dbd74149658a14e31143a9723327a73bf72eaa75dea42c3c8").into(), + hex!("ada18b62cf80098f36921cb0c2f85200fa362721c4673546f8554e2f5fc8639f2ffb2cac68e888af7ead8c660b0db13a").into(), + hex!("9198582e8aebc174dd168c6cf20836a21cbb6baeffacf9f933850d8e0fe0619ac1ebb99fa6fe902c75927531c108ee5f").into(), + hex!("b8a44b23d29cc5ae1f00d5384fd06f31b73ef1a7ffe334b59db668c924aef2cdf60c3070a44a12b52a14ba185198035f").into(), + hex!("a688064e0b3fb3baca87d711b29419a02c06e6a1dd764af31574dd84fe870c8ef614d4c2d42fc9508711dc05fe373776").into(), + hex!("b86c167a1c6738bfef1feb7eef8f553898f69a933876acf675596fc2e39f0a8c83ac37df69dffb669fcba4e3f1caab92").into(), + hex!("a32d52f3e9acea45dfe9ce6c577dea8200e68d6ca39eab5d6fd24c508d2028f533b8b04f1a4fca7965315ee5dc5e2809").into(), + hex!("8ec96bf235d5e9bf36382d79b4bd1be8a8e2b23a9f7f9e02ab6d708e96a1c12fa81eb236f02b0180a0cb9f3c1bc28cc4").into(), + hex!("a0261a76664fe2fcebe1501e18eac7bce32b947db7bccb7b746757ba51cabbc8bb385600a99b248887edeb84f82a6f49").into(), + hex!("a44313f945a1d462376e03fafe6d7a9659dd81046460f45ff8914732ed268b2430ff632aa0d368828c2076144bdc8595").into(), + hex!("a55fbee79559e1fa7b85718306185e3769a92052cceb600283d0236accc6ba2343799c1856609faeb7c685dd504384e5").into(), + hex!("8a3d4ea2eea81742fcbde7a1bea5ffda55c58b5e4618ace17773057932b7216b96ad4a117d9054de18f71b3345a0076d").into(), + hex!("aeaa0984232b1fd5607a1a67051d42df3ffe71363639e5130de243cea84c87554e6597f3f07952b7d40d19b6e18957ad").into(), + hex!("986c868f8f25db957f44a39bc209f1ff8e98e9bff52236b2473b8ba977d0b7e90d146ec86a518a581c5de796290d505d").into(), + hex!("b1cb0755c54e0619c8306636e926930605f15901c01e36822131dde1538b063d0dc485a97534ef1e12f2f0febd1092c5").into(), + hex!("93cf415d4d7ea309b85bfba7ebefc0d1ea91b8e93fa351262d9eb34b728c7a516ef0904cb2b3549db2b3b3f788b147af").into(), + hex!("97359ca81e9fa330f4c0a3b4de96ff45391c2f83247d1f73a6884bd123d34edc66a4d3f29718f5543350204488ee51f3").into(), + hex!("8348c9b229787630ce26a41e7c016adeef5dd3ec1f124081baf9db4ebc1a3f3a37b40d94183ea9eedf9a458a2e65fb41").into(), + hex!("a4c6b13c7dd27497917bc9a4c4a91b953b88c819e147087b125c93657ad082971152d384e8c512f48cfe07a69f54fd95").into(), + hex!("88008b395718646492ab944a9139b95251214c42e90720c703b19b99afc971824bb87c2a4d40202cfbb62bd2ee30c15c").into(), + hex!("946969ae721cdb08dab293a638387dca6045e230cd7c7b7237c75e123355db9b8e444089633d0977dbb6e42a729ab4ea").into(), + hex!("83435817ddeeb242c37d31877a55194f208f4cab406b10a4a0605a54a19745f3517a880dcab8a5a4422c0e19e2ea8a2f").into(), + hex!("94fd0a0f870a6ed2e6a4f53f5dff5b5adc1a6943203da6a34c73694702733c991e146f8c7108ff35d563fb67f55a106b").into(), + hex!("89bc609d5223c73afbca46a8c3cc271990a8bac5191f1ef6a2c88d7984adff00d67bcfcdb3958c259e17d5cba62beb28").into(), + hex!("b39077093900919b51e68f647d11e0f78359be69c405fde5735ce6839f739081437b899f33c6c9e6c86d4bcfed059186").into(), + hex!("a0a6f9f588e336c14b91a9c0c56085830611df85ce6e99d759c72a4dbae500b47dbe736287f6b2d65b448a2a0e6ca237").into(), + hex!("8222a17ad961ad325b819bd0625e079a471e597adb89f2170cb490c40f6b8b1a08b2e23a1abec02011452d589b183702").into(), + hex!("8b77ca7fa195450ab3399f88341a9d323e8b9b7b9b2ca30985d97ebb287e1f9b7e0279f22ab3a2dad682d7906f6c8d59").into(), + hex!("b6b8389382d3336bc5ffdc752bc699a6bb0057dda7879901c7633787a2b412fb7852fc896ce95cd09a9b98c76bef1b1b").into(), + hex!("970a20613047ad84b61ede90efc41a91ef7259a5fa79ba23964ea907fba1fd88d2710b69fe5bcb0d75ed9fb68d02e557").into(), + hex!("8843dcb71117b6044b1c7eccb5010d0a2f93775a98909bf23c1773ac9eb1c0f43aba26dad08ea7823da593c38a30598a").into(), + hex!("b5b147bf651ef9696ed3ebcfc3ce226b2748a4c4e7cbfbf12b3ff5f1f0b2ee1372477e1d7d8aef8d9ce3ec602a63d01d").into(), + hex!("95433edd328aa9223f521daf6d78ab272fb83150bac78ef6639cbc032de8834049d4992af0828946eca69f359987584d").into(), + hex!("b2f4d2154ac750245e966f62b92022a136ed0313964ddf534ff3e9b4456cf58bfe429ac83b718bb38db5a4fbafab23ab").into(), + hex!("b6b1f2a3a99496dab156c6159b8c98990501894b5b0cf200c792bc462263cb0aaac570f5a785aecf367a0531ac2a87e2").into(), + hex!("9985e3ae265653082f068b8ac4c09d10b2543f920a19911cddf18ac53a7f921da86f11836f51f2adfb26c7bf4ad51efe").into(), + hex!("90c31f4985c7481e5939766dd080f6ee01ac7a4fedb9954b9d1fa8fa1cb0e6e7185a1e31d8503542f1d409ed2f550e88").into(), + hex!("830736923cdcbf7de3ae650768482845ed9b45c4dc9928d66481c76ead9b27427a96989389c8583a153851ab957d67a9").into(), + hex!("82312631f5b301fd3ecd8b0a6e83b130b6e997a5a1e6255e883c590efa00b0ac3bee45c15308efe824aa665c8d7a365b").into(), + hex!("b8ed7b3c092f7bb05aca8cc4c2041161426908e8db349cdd2064e95044f9e7649cd569039e0ef0a94e006094113d0e22").into(), + hex!("b1d6e5344b74a67699cda807ad4883369a77d79335cb8eded6e0ad9b64c8661b7ddb47ce4308ff69f947fc173f496ac0").into(), + hex!("922f0a2f84e476e6fc00c196eb913ebfaa6b205fa8ce8c8453330a58956872eb2e4ec0087b398eb51819ea2d0aae6b21").into(), + hex!("8c729483a3d2ea34337d9c6260944da7e2ea8646de66d39617924681189c79672b0ccdb63525cb4635e3cce1d8f72f13").into(), + hex!("a00af936fe87caf17f5b365f59d019f8438a62b8f174510d863da59097986011a9e76319d4125ceb64f1d83defd822c4").into(), + hex!("96d33c3832bf0af5900a20c067bd45dbf3f0ecdc086eb065afea6c44f117eaf9ae8841848578d2915452e61bad014803").into(), + hex!("8a16c15a161a1e898bf06a23f62a9ec042c5b9e875cbd54d62e11be181647cf09e6a0bc65fd62017ae150525c16ee746").into(), + hex!("b4a0443c452085bb77b5790be42005178dec8f9085e2f1b963d55de6978cb608b7ebb42e4a84f24350c768c2e78d22c4").into(), + hex!("9168aa07b4e29c67723f4b87a025fbe6876f13c69505520b4dd6b387f16530a886bfae5e5304539564debabc059589fc").into(), + hex!("9504db9c5ece4ac0b703ccf751503665746bd580f11106df3c8a903ae7a5c9b0520dd16c89671967e2aa12775af4f67a").into(), + hex!("a776127d4e2e46c7dee8559ac56b266e7eaaa26eb8db0a7f4df0c66fa1564a349f414c9091d1e4c3e7ba96938916c769").into(), + hex!("a85785ed5832dfa8c9a5dd9d20523591f04536712c19a38c2c1496ce9c8787cf37964d739f83d9979db5574ed524d557").into(), + hex!("a4ba9a3312c2c253394891714719d2cb369eea993353b07f9a6efe3ecbd245f08d69f3e1302d6ee312e743c05ae85cf0").into(), + hex!("8329604134885c08173b14b7c68b74ceceb3694a0a3f7997f566ba94bb3fb2ad3f78ab3d02c496858bfc95655f072e7f").into(), + hex!("8e3f5485c98cc317653375ceb44636054a3202045bea6e9f6faac128e115de7a658a49a6432858db7b4b14fccbb93f7c").into(), + hex!("a706c82514d19152bc4097f8602f792a4917f5cb409c42dd42a5e4f2ebd1bec8318019934ed6d19cb43123293bf4ec98").into(), + hex!("ae14d5f32cb99bf3eb0d844157f12b836963be0d6f91b776f973a66701924c1ad9c3496540db4292580e6be871486486").into(), + hex!("a45ee325452d4bb2c60ef5be60b7d601158ca1cfeef0734727562b94ef8f72190005567e2007e8940f8cc538838f1147").into(), + hex!("b8f1fcdabf33ba011c86487a082b19fb146de932a469b19518cda2ac046c319059382cb8ab3715f8025573ab53c5cdd6").into(), + hex!("a613f3dd6c8361893be08f816c640cdea4d57d3207704774eeea8818edf102cba7ff7b06c4c5d0fcf0873b09f72d1ef4").into(), + hex!("aaeab877b1d16a4db6e47a8a864e073c4742e0a84e46ae8dea1a0eed0d2cc9f23adce9e0c8d88464ec0c059df99a9583").into(), + hex!("8fed26ca2cc519a44ae38398d856c3f75d1ea6cd02dd36dab004188f3ef2167cd67d279580f37176dd70c1a0ab08d72c").into(), + hex!("ae18905c02f96e110f40d3bd99ab26bf28e0af939c6945966fd5e3ff440e54bcee56d667a0a21d8326f88e5c22e42506").into(), + hex!("97d803614adb6571f4ea11833d0d9ca8221e7fc99a960c637d4990a72727ed2713da874bf156dbaa70bd4c2f668681fa").into(), + hex!("aa86c3bf79ebf46e1cee54f517b7bdced4c7a96d3ab27405e7d68dba92ee6fc7fc91a107f3cca85096f0d2581cb4039e").into(), + hex!("b75cdcde1702b5bd6be180dc8ea26e5534da77b1c7bf711c8447a565a63d073474f0270d78dcec78ecf5baaea1f75d1b").into(), + hex!("a36e810e50d283e8ef625cf684c1fd333a0373e5b0a9d81ba40cabb76299af93c536285c5d7239e86ec56905245ed2b8").into(), + hex!("a16b6b41e5c31901f3c0fc2a7dd8c084fb508947314c4bf4b6fb338d95ff2cf49fdc5de1d6b9acddb1b096b835df6ad0").into(), + hex!("8b580da99256b1d0d7a90dc46a98ce5132fb3928d416f2df5ed1769544692482ec8f2ad5f57871041d8c78d00c949a0f").into(), + hex!("945dc91cffe575f06b4b01fdcd580da57403469a21db6ffaa77ae06d31b8a2aa9957e26db1bf89554611f51f10c8f73e").into(), + hex!("8c55c4000195fd1155ea608f586a327cccd1221036ffd29eb9903f8f28009083203f18480b35cf82e0390a5ffef4bfb9").into(), + hex!("87cec982094c85f6c1e402b74b52f7c0495ab4a2d3f2309734aa0bd2bfdbc88b8bdd9556664015c7d9fe2f138dd7c807").into(), + hex!("8fc576e4f9057d82e2fd2270a787c596bce5fedbdb9f6d612c2caeb1a778450d8c1f6e86dd011a45f3fe7f201e520438").into(), + hex!("a43eb1acf0de695d478a661a71128ce9c58923e3adfb62728a2e9f185c9f46877db645398546a300b75f2c849f5ab14c").into(), + hex!("b57be020fd23d3fcd6057997099fdd648dae32cb750e8d058b62a5e902ee5ca27771d762020cba2985884ffcfded3500").into(), + hex!("ad3ad1089c8232280e9fa2f6c314ae57758cfbc3a0663ad9517e35b74b19e49345e03d1d33d0d7b69d736501ec5b3f4e").into(), + hex!("aed7faac2e65c10b52d7b3009eef010a624c7f57a5c76c55afd310345707bc8959ad619101b9c1ee4bde44152697c537").into(), + hex!("a6b177c7f945cde42c5389f7258689aefe1b6ee0b243f9901c6e60ef1bebbea9bc297689cda0c93aac9b28c7d70d0022").into(), + hex!("b079f925c29da333461adc949ff4daf19d0500f516d95e3a4c3dc2e2f5ce26ba0f08b2473c03b6974146b239532deade").into(), + hex!("8c573c73d603c8ed73ac3eedacd8ffef4c18425699e30d46be2dbbeb3590380b0fb713daf3ff3cf7544da502dcf35cfe").into(), + hex!("a39331c8acd40377f020611ac9f3a758832e0a644a5cca318c01e654696fc607e299b744c0cc2ecee2bca755c9aa3581").into(), + hex!("8e71e261664d5a6094ee912fa7e3e866ebb5c4c610062fb5fd733359d0e5a5d806a3370155ec3b04e83cf7a2d7c4a0d0").into(), + hex!("ab048af1200dfb67b4fb6bc8bbcd8344547e57942f7397c06988c9c42cce53784a0282fc13bc878635a3317b8f306a81").into(), + hex!("b32fc2da89d3a3541a61338e6b0c5a7f477a23bbb9a7c63b1087f36c49b6d9a42d4720708af496d82c56e1e6836f5cb4").into(), + hex!("97d0e8f961033e4aaf96a75f585d16eca691dd05f4a5477e8d3a0fd97d02555d67b29d314b5d150dc0de3b72810338fa").into(), + hex!("8223bb67c99eda58237a765c8fb426871a1a9e02e6e91d956b16e57b8dbc30c0edeb76abb30ecb2f4139a19922a4c62a").into(), + hex!("8d9ec5c0575500e433a4bc66d196d404b8619ea38b0dcaa036e1c1453eb23c6949509243531ce59318c22db6e33ee1ef").into(), + hex!("933ac0e3e6acc7a238fb5495835a591db77c39e27f4034dfaea20bce7c072ff6bb6f59a9823a07a76a431905afa2dbbc").into(), + hex!("99f3cab4e8005fdb6bb44900a4f166ef0c2c48dad85c0a127c4d854bca4ad32a2954a586734ee0e57f3317e5b81923cc").into(), + hex!("8f01dc4011ea394a9f7a73b7f246bb00472632fe715314525f1db2cc6158b22dad22d1371e7d0b2d5e72cc408f07cb25").into(), + hex!("96e702adba7420e819338f6f8740946289bca6f24a5f14a5bdc727d1cd66bb7d2a573cdec8ef1333ca39685c33f6e7b0").into(), + hex!("890ab24865a2652a8fb96ced381530192d072cad275c19539cd74e03c001321216a0999ba83c8f3a162bed003dcbaae1").into(), + hex!("96e2f1ed5f78c0b018cb388447bb85b33da331a5a306ce4e216d1070beb7c3900f979ef128e85180c56958c0d729ecdc").into(), + hex!("8ef642d5a1fa4b32fd69f7f57886d1d9447ddd9a8425a03f15633cd688e41054d5243ad6f352a5a3fea2be2f3cb7bede").into(), + hex!("84a3177be656623fe280f91e2acddba52c068cf8a37bd79b9b4186ef199bad65f52cf3e47b581f1964c9987f088fabd2").into(), + hex!("8b913725eb48feaaed46b2e3ddc0cc414aeb433dfa584155e2eaf29020f6f1fa0e801b85bee4bd28831b5cc66944f411").into(), + hex!("88d802c75d422a713c19a600cfc9cd843ca41e35722e21a0614c3195ea84752337ee30991d860fa75a57ce3f614e0a50").into(), + hex!("b43cf4b09d02b20073903bf152f569a43864095622a472656d8a96efebfe3a20dca86871268ffc528a194bd951662d71").into(), + hex!("ac1c65ef79ad0e56184bcdf0680dde5547bd01b95d7e9c3c71671c71683709cdf7fb988440c3bfcb847c26f198b94f81").into(), + hex!("abebe453b3f2430a9287d0d5fc043f7ee434b33feac6b7dab58d5deed7568e0730d59f94b1883e3d43f3c2934d3f40c8").into(), + hex!("84e0acddddd0c202eaabfca7cbf88774ae374e841899942a2353064f132c6205ee378277b2703744a8bea9bc16449537").into(), + hex!("a69865f8a3f66ff4e548ce29a212041bcacdc85410b8467f0515842062b3204fb1b7616e45fb5f46a5619808fb390dbb").into(), + hex!("a55b426b402e9b27fadf87b27cabe5375c6941b22597dea75586eb9dcb699d925db77250b1d755512aefbd4eab0a2e4d").into(), + hex!("8047da13f072c9e848d33a0f397ecf3e783e7dd507ded7a4de25327fe89c183c8dd1da3d419b48f53d93537bc2c1a8e3").into(), + hex!("94a9345e464b9b28798c608115438f1eaaa60a56abad028729dddea3c856f7f871031b4f100626f8bb7a06d88f7cc6c8").into(), + hex!("aac8cc93a4bf5b383080738021fc56cf732988622fe0d493540545b19a6a54cdfe9f8cd2d2dcbd572bdde0d1f8cbb101").into(), + hex!("80f24fae3c8d202e8072092342f8b046dc9edfa1234c86e9f06cdd7fd2a1dc0f81ad69886a8c219f53a94b9a75cf6b78").into(), + hex!("88ac9c1d5c036f14566203d8e18421cdd21b2305cbd20f9857e4edd09e002ba0bb5c89b039cba417b353c6f2f63c50de").into(), + hex!("a45c8ac231d0ddca06f1bf03eeed331e9b524ecafa74642e4c4591cead603d4228cbb0701af58770100964fc880ff85f").into(), + hex!("88767dcda5fc82e5ee515639992868790ad56d2a4fdf1bd1ba1c5be51b381c149fc9db23b93488b54adc89fd4c48dcf9").into(), + hex!("b23e34136c22ac73157c7c5cc8a9491b0b5bc968c95a9c104b402cad9de598e323ada4cc527555157cbecadf48faf87e").into(), + hex!("99910638bfe8b9974a1bb7efed279de750deb046bc21a9655da4ea81a1aa807f2b76aa2a64d773b1b23af283ba3878f0").into(), + hex!("a05869387ea3b4c8f7403d85ec788499a993482538e0e2078d016f00d67571d1342187ae088c788dea518bdf295da88d").into(), + hex!("a7dd1735f178d53908e29db85ba6166640da8c8bc6f717e0da9bf74c547bb98a512266cf737937201cbf6d14bd9420ca").into(), + hex!("87f5b096a1263b51df28417fb423604879b18c4d0a8a48630f70e0f95226bd51a252d8be362df801680344330857fb5c").into(), + hex!("b1a5a549e27b8256c388465be3017dd123a7d257fdb49b2bb409c6430b6056cb8125bf88b5f196bc9e02567a6728c7e8").into(), + hex!("b70862d190351d6bec9c618057e407b43864a0dcf860b31ab6617f75e1ea02de49ff338a45af53783cbf10400c878a32").into(), + hex!("b27a654ece8541b9bf9c6ae0047969ebb69c4687a43030b1c412991dfaf349e2d3caeb6b7ae3d72ff0e2d758a04510fc").into(), + hex!("81368aaa4489c992a6ef3b55df26ece993958df2e40f04a95ca514fda56c2fb98f11d61faedd31860b89e89eab965f0d").into(), + hex!("94e14e03de977732b7c7faa60ec8180e77233a43d513a37c443be4fa0bac64308d6a1929de075b5d51efaa9bbd6855f7").into(), + hex!("b2e26d7b979f93e8dd55eea5a0f4985bb254128963a939ca07fbc33bf83ad7796e9426660b2f35088d7aa5fa0cda2ec2").into(), + hex!("850aee846e93c5204c1906a2782da71c0ff9e2d1962a778dac77561846e6f9290ab10daf72f189df0a57c1548bd4e6cb").into(), + hex!("aabcd7f870c299cabe4dad1857b3b6cc3b9fde2b525e9d8ec0fc1f497cd199108971173e61cdd5937c45758cbf7b9403").into(), + hex!("ac069d7ff2633fc73bb0b7607d9c27305a4e15c189c8da396d6685798c12ef179bb44cffeeb7435667fb03a799eee5cc").into(), + hex!("99e3eb82b955b2411d1b81d946e5ef6b9c6957ae0e368f4a9c279a0541c3a46e289fbff526a1f9db4aa21b92d13bc9e9").into(), + hex!("b5bc3dd1e05a66a1d775ae0ad159df19c7188f2c73a8553525855ab34617c7f080e217732003e09b29a5b36b12ba564e").into(), + hex!("aa631a69aa4a9c14de2c49fde83453633d17bb258a2b7ada723bc8e71ef22c617ebdf8ba64c72675440b35d419d0f836").into(), + hex!("8f42bb48587cafbb3936adc495e82981d7fd81d8c0233a4e4d44f9df72f8439a9a0228d6cf9d156ea608caffab8d9eaa").into(), + hex!("94e079215b8d187d546f33d5384673215ee65c70d3bd0778f67c11665af5fb025b4302518a0db6266996c136ee90d4e8").into(), + hex!("b3dcb504b50dc58ee7f2e2f78ca884d5fc081b570d1177b884c92bd34272ececf2a9319cb1cfb9df011d4db3ad266e42").into(), + hex!("a5e5b55940e379e6c0fe7c6ac9ab86f3836f261942e3933087f1e1deecd280af9afd95d1bfb384976d5947d5069531e9").into(), + hex!("b1a36c3a0a79836817a2890ac53c6768ed3965bf5d1663e2df69b1bba60910e84dcd4f917991812b305367786edfa288").into(), + hex!("a8d07cbfbbf31d113b80d3a1f82ec7c29c4d78007efb66b5592255acebbd8e1b0c8b927a866c79211d5d4994648153ca").into(), + hex!("acad1228fd1ffbc118ada45a27f33ea02a09455d0c295510da693d741ca3b5725af41b99967ea6d429f604736a4fac81").into(), + hex!("b201ad414928e315aa00dc60b89c7a15464d5e97c30b551a462d02c35e327d2ef3244a98a402f9e055a2f9af6e970733").into(), + hex!("861689f35fb72780dc0be92c140dec07857290495baf3137bd2e83ace2f268f205ffc58edfb0e09f323ea5f14d0ce10d").into(), + hex!("89138730c80c30dea01abfebbed79bbe6016b4924193d9c2e8bfaeef30616bcd92f0eb24d5345bfb005bcfea989fd8d3").into(), + hex!("b78092afc3f16397d2eaeb5bdd7fc6c01ef516a71102124febc0cb443f4446c18037ae75c7c1d0c8177454b092922ace").into(), + hex!("a739cb664cdefe7a2f38333fff13bacaca129d718a043fae1a1b7c4251a77319b44589429dcb9ad113f24e11d3b75024").into(), + hex!("879996d4bed3d3235c0f73ac8f3f612eecb6aef6756896920e0229f5deb1d91feff95734e6b4143ba89badb5cc1f0cf2").into(), + hex!("a213d854a0496d74526b3c37a48d6f610452b44202424a419acf206df1cf76f7357ff5c0899e45adb565535bb09c29c1").into(), + hex!("8afa04d66a3e8a2759ff088395cd98597883b3ca6d8811703f5fc74b822ce4e56e1dddeea2c099fb3e0f6648f990f1e9").into(), + hex!("8a2e7ca192972af2b77660b07aa612811fff94c951532e3fb6829e8031355363f4aeed0f9e02b845f00cc9ad4b744c4c").into(), + hex!("8f0757fa7ab1eabf429802c3811caad65833e763029c3aaaa43ce921abeb277d7dfd06e0e58d36e494871ab9bb090668").into(), + hex!("91c0c0b0564fd95db51c73637fad622e6769206bfa03e41474a4e68369d10de7da5d1bd2b5d226f0564cc1ee8c3e9074").into(), + hex!("82b35466d835a6f13080628ce407cfe495cbeee26a5168de9e595a122ae3757f2eb0a64a71ff1ef6ef26c8cc97ec1f52").into(), + hex!("80d11c7a711fd2dbfedc76fe018fca09295d5a3146df92496ba01063e5e098198cd9c52d3802e6cd033f64b3c651b67e").into(), + hex!("8607de2cda6838c70f262dadb21409649900c27a5bf3505ce2166ef6f616f4b7119aa3e3f3c62c1a508662d7d68e8f0e").into(), + hex!("a1c14cbb653115b6225f53e3e6ef8e25f87cf47315b25dea5e658493121ca22733d3fd2781920dfa3a04271d58970749").into(), + hex!("aa876cfb3d572bc1f84a5579dfe8df82a9177b441492382e8ef6528e28e46ce59fce9a82d42c1c2000b28cff06596d18").into(), + hex!("8742ee128452ee98f21360f903c0a57e600d622d4ac793f32d6732f5fc315f757bac89b0f39a4ddcf4b8668cd02f3e78").into(), + hex!("a2e04418db55c0d9163a1bc242e6d43230a943ead121bf8a5f50c109e4ecf0fd99e5b126a4fe2ae9b0a248e613b54f7e").into(), + hex!("841b7c0ab57c2cfb1a180a9b0a2875a7675624f0e5c779f01f3f92a1ea547cd1164485f54bc433d71c7b054a6fdfff15").into(), + hex!("9855f3506dabfea5a133ad49557c3c9e1c7b6965215cd940bce4bfa90e98d9c62999feb29da0af8768b99f5f82c64489").into(), + hex!("a65939fb29f1d913e36be1f877c8b9a3549ec17313c4354b1834cf7ca9ae220af26a72cdfdbc59567bcd7e4152d90930").into(), + hex!("b64edc36e0bcd48cb350350fce955609ae51f5bd197cb7d42b04a2ec7f8dbf236b2a3b23a6e0778d57796433f0e6e9df").into(), + hex!("9379a72a722c1a5c8399acf72ebadb7ab1c5a2e18137cd3850b211dfef907850399b6151ae7bdb590a6eb04387ce0c31").into(), + hex!("b621023f0d3c731f49a48378c3709a0c051fa1e3f8788d27169a76dc35d46cd6095b32d7e91794c35f4af8d75f950411").into(), + hex!("a82421a53687a444a065ff1e11c439cb7342a3edf496f2ccc04f56fc6630bcd79ccce1437479a6e7d6dce918d3d45181").into(), + hex!("857fb59242e6687e940fc114df3c06af5a89d85c762140b1e4b0f8cbcae9d604f435377d7a2d153a65e0dec099e3e8f3").into(), + hex!("95e6a571cbd7c7a58c1c599cb4c837c9f31757a6ee4ed6740e9d55c350baa847ce6d081023b43b397a3798c6843baf13").into(), + hex!("adc1e1f8523fc6e3d683dc0ca15ebdbf471de635f25fa7be6fde9907bd3fad130baafd7d21b43fa04738d4c19448d788").into(), + hex!("85f8ff5b661f9e529421f7e5f831db1919ba3170a59673546db695c3af8a82cb1ca352e07e6c801ef9fe6f501d5896ad").into(), + hex!("937b374871e35266c2815e4d0ad72dc2b6c756e840ec36fdb90a71ecdd4afe13f6064ef36b9d1590a39a7be2156fd728").into(), + hex!("ad4aa9b451187905652222dedfe6135111ce4eeebcca74ecc74f3464a07831754eb0072abfb96adc23d0c5c33a1d9f16").into(), + hex!("815bc7c9c7c84396bd0de5c71b78f2be5fddbbca3f600f341a21533ae6dcfef8bb94f4340ec2a90f40ab091efe4cc6e7").into(), + hex!("90bef1fc273005610cd79161686b25d88ed2ff2abe18f16a4054fa05dbcbaa339825616c117f55ab26d12bdf2e414f70").into(), + hex!("b96e51c2d2bd0fd78c4d3b9873d217eb76642c329a9ef293010fcadb45ef2f9ee3a9c34b0365e344d33c464c08a0f51c").into(), + hex!("94c4048c3fe7dfe736458ee16566027290f93b1f052c3cfaf28f5c33c32af6b9cc960d86181d54361dcc10aca9f81a58").into(), + hex!("aeaad402961126722aa5033c6bc7735d4cddf35ededaa08073ab1a8412e5d1d06e95c58c7a95409edf1566ae904d795d").into(), + hex!("93d019b814d00d5eb6e7545e6480da089fa48ba34f0a961c704db12e34a144818c306cdc4f31320e542c75eb0f1ca96a").into(), + hex!("a3c21f7864512c38a58f02c0c83993ce6294329b074b801404e4f941d21e4e7e5eaeccd41da8ac423c967b7230b2a505").into(), + hex!("80c26a2aff26da9d8d739496b5a63da6d8d35544c71b7b05b41ce4cbda89e6d32e85fb1e38a215aa01dc64cb43e089e0").into(), + hex!("8c5e66d7668ab7e0de06ebab4a4ffd13f24e4458610e64a972a1e1f15356f6e745cf36b8cde658d03817f2749616fe84").into(), + hex!("a1260c9a6727d6d4a4c147e0c7ba91c5e2a47b5a08a07a3ba0eaf9b50360b6919495b4aca5f85e8fb2e4ef1c307286c2").into(), + hex!("9150fbf49242afc6ab7f865d4a92e7013eabab432b341710232e0fd971eb2b214a3da5b82617a8b7defacbe060538ea6").into(), + hex!("ab44ddafebcba5a0fc1002f3bedf595f3245ae07c9184d640154968e4993d85087efa8a173a670d07eba1c00d3ed1c5f").into(), + hex!("91580bd78343a09b62e31bdef63dfa9e0c874d7b39eb8a4300388ab053f262e118f790392f73d4fcf7714b521690d94a").into(), + hex!("ad4b8eb477ff8e573a911a1a4c1ba027088828cde7907e193ccc4b853aea74c66d19ea99c3779f6ce4d505ad83f2174e").into(), + hex!("a5791ba6dd8534607100f405b2f104c987336d5c47a544ed571d0babc6dd88a634296773faacfc3fdc13a5f7ab0f0cc6").into(), + hex!("ada0d1c948bd8f66442cb4b9cdc3a5368ab6c585cd8be766b468864a8fbd60535e454943731d4121ca134743d05221d8").into(), + hex!("a83556f376d8cc4a26b53223b11426da96bdad5351c2fd451ea053346b334eafef9773e3486928b9d407d3e13d5dcdd0").into(), + hex!("83be01b65302a31c5ba09c8329f683904943cc8017cc8975272d7d284b6a15a3313e27887e4de9110f64445581747ec9").into(), + hex!("8acd831b27588a99743c5d4f61e6f0610faa530d6259f187aeade29f1c9d5956f1c01a5faed8be8924fe1e8d9de03571").into(), + hex!("b17e88d1fc760f3d6633f5b48411b7237e276e2fca621d3db4619da62993bdeb3c12cfe7d130a92e4d3a14693d1b87eb").into(), + hex!("b769850bf9b77a1958d5eb932f99807cb695eedafd476d99131e3d7340cb845a33cb2cc7b640a0f3b14901b506802ff8").into(), + hex!("89e7eb7cc852326b6e18cf5c720c4e44c474d254de9e912d22510aa4cc1952b5e5c40b46a4907be375b89bd57d9f1152").into(), + hex!("af7ec9a4b836709701fb497f69dbcd0d94bf986fb6894f48c67014bc8b0ca947da71722d87de0371923f5bf2ec82ec64").into(), + hex!("8d3ff45719a7fc5254e91c710d956a16b8e8435fc4c8f68d1a47672335246bba9627d7058510d25417b7eed5ece5c110").into(), + hex!("a99a7b987f7050c230ab1adfa50a30b4f3782cd31467ff9c2a749182a1974a36a6a375ed5b0909d1e627b32ce0245ef5").into(), + hex!("a0bea35f339b54d82e345204fd4b75d41af3bd08d33b223211e496ef7fcdd8e327dc5a9ecb6fcb7de134b3eaa43d30f5").into(), + hex!("9419df6b2bd022fb6c79566f932c37828ca7a5a1a9efb64f470d7ec06e0d6b0b0147cba88814581a0773c80cf3d21033").into(), + hex!("948b0cd553cafa57de03279b83fa4f28cdbd0ec4e2219e25fad53c9d3d28c619ede568ad6e095a155d117caadfa87551").into(), + hex!("b21383b264f67c8c66011a79e20a4d739d1f8fc258562e6351eb1e1c5b83e42090f1525886ff4b65875868ae17a8faa1").into(), + hex!("aca93939c30eeac8fc83c82ff6ba3549ff38121115a60b7fc94b7d64e1f36f65e932bd8f3bbf2bcba986c9309861fcfa").into(), + hex!("a728507043b7e86c0bf19cbd81a45e1ac98d2edcea4c7faa3381df13f6352232b711b03829e3eddb7770213866dde7ff").into(), + hex!("ae0bafa42eece82975171e94b14e7063a09bbcd44cea6b7da4b820cd5d984be4c00a2e9e5137b0d34603e1ac914f889b").into(), + hex!("b63cf4b55c4a62c50c356cc2721ae5a89244ba9aef2c9f5c93762837fb14197479435f593947c8943ac77e6a2ade0208").into(), + hex!("acb5cdbe2cc7c44ad3980f9ba74b0a97f36add3fbb4e9b513c62157c14812aa73fac68be8c30170c39d1aee626f5a1b8").into(), + hex!("b14ddfe1c42321feb8ffd76ac041814f3d690fc16ff47b23ffcc247e8722d50ae001b6df4e1af6cd7c67c8799c8a1907").into(), + hex!("abe60d6024a9d6874df7e59b4bbd7e1e55da22adba1d16320fbfa2b68e8db995997ce6f81f8809e96c40f548ba005787").into(), + hex!("a23307e2ee6d96561452294a9265cf0eb1d6f86b30c7ec48066cbbe889eb7f0d64819225293496db709a1fd60dde7e5f").into(), + hex!("8bbd00e149a9fd5eaca24581821c3dca114e008c3e92a36db536944f6b5e5e983628f155c2319cba9a8a2a26d3885add").into(), + hex!("954fcbe0655b82bfc15679237d98c3759a49ffd0eb7f5da1827711814b92e0a4be2c0b7a96fc16ee3e31099c993ce6ef").into(), + hex!("a9bb0a14061ab4de136605e94899a41c3585ed190b2a97f529e911f02ff389652049616b408aaaea81d38f08a8f6c533").into(), + hex!("b58fa12cd0b69ac2e5c50b543bb15abcc3a0c96cc9cebfff34c4f7dc83bb5ece69d881348860385456eb6198ecc640ad").into(), + hex!("b73597c5ffd3a812e8a553bd3ad2216282fe7c1203120624b86cacb8a7421ea6807e29fac3383cfd61d632db8e3af5d8").into(), + hex!("ad153b873be0eaa71ad3b0191067874e085164f8428b89c7d2e01af0802169a9afa1775ca0f9491350db9e6c7c6581e9").into(), + hex!("8a7fcbfc564fd1af76df52ac5802a7342aff25d745307d2b9cf29c4470273686d9877b4588754af0e1bbbbe0310c3fdb").into(), + hex!("84553c8b77c7e5c81bfb1413cfcda7f8fd95c78c011c19189784be6a5e7352248b3b30cf5c80d9262de6c35ae6d4f1a5").into(), + hex!("8866da76cc8ca6522c3d41c950ea7fd67d448e1d567ecfd0cb916912d597b754807f7489f5e3bea7b4110cc8088ded24").into(), + hex!("84adaf9a79d5c3bc8dc7e669ccc5d4964254d0fc32bc535e54c5e4a4f45aa3c409c11eadd4fb21f4c831329087adef06").into(), + hex!("a77ce1ee4f8feed6ffa0c5cb8fb7fe0f95a03117e746b56b5e8178d27a1582804e84a86aac1cbab53aadffd9f84c0bd4").into(), + hex!("863321bf40995482cff032854ad5017bd885baaa6ec4ef47ab6bc713640b1e258eb40797ba049fe677937e3ff7a2ba2b").into(), + hex!("a46fb6fad471bb923cc38748253f887b53153ccad475240bf7244c1f9f568ade931b0522911348d64460021639bd831a").into(), + hex!("86604e383195be9c40ab728db426af87698d0e34157edaecc357544037d66d40e558cbfef7b005f8db3c9faf541f2c6d").into(), + hex!("abb7d323687c1d0ecfe89d411d9a81d05d009b84e652af437cee40e89bd2657641cbacf28120fe93deb0a1d3b410fbd4").into(), + hex!("a8d2488d99e04b79057739e6e0522b38a0f68a21bc190696c38d96f0e58a9395e3b9011e54d2cf7e8fd0b380e753f2ea").into(), + hex!("a36fd6a64f64f40ece0babcca8926dfc005cb1d90e4adcaa9c01ff3bff8d73cc0f95bdcb4f09d7e3b5d761ad5c3c065a").into(), + hex!("9699c0c5b1695416470c302f3097e93b94004f42369be26afdf04aad49bed67851d50f14c44efc0a90e311ecb27b3387").into(), + hex!("929e6ba1338579c1cbe76f1b075c0fd9725adfa97ab8b821aeee75133a874426414fbfb5cde7f7f8b74fbd8b27bbf7db").into(), + hex!("ae874a087a61c3ba4bdc2a582cdeace6d321f81683636440943dd860c783344d4133196197a108f6f473bb1e75c597ae").into(), + hex!("95bb2da076a9fc25e96affc7e4adf71496dd5802d7443cb5a77e3d52ef544aaf939c0884169df547000b3afc55cc208d").into(), + hex!("b0b63d1993f601c8aa96448183ff560f291903e649192e2e34e796bb66a31f9d0edd0f03ba4f1d299fcfb1e931abbf39").into(), + hex!("84cc92f8897d0bc0efee72d62ec3a8c07b7c72e00913860623982bba412307c2c42069ed90dd996bc56ffd0573b607f4").into(), + hex!("8ae9804b99addebafd3672785d4402a583c97821087589b7a129961b6131fb18c2fa60d606cef4f636b6cbe46b5d6415").into(), + hex!("8f4c85506e99d383b103217077c70571ad8b9046d039174df6d9f1902f8b85143754969bcec37519a1340c79046f1c32").into(), + hex!("81a5a0214a381d72657e1142a781fa8db0d849e1e012babe4c912a1edebc5dbfc265bc7fbfdf8b6ccbefb55eb0fcbf86").into(), + hex!("82c0e11c9016d95501a97e551b8b926fa317f02fb6764cdf0981796e6e23cbf13e48d46bacc875681900fe8c1741cd27").into(), + hex!("a00912f7bf9abb33f1624dbeb5a960ed32addb4d6bbf9770b6d82d514eabdc339f751e41c8f4461e560141b53f086f8c").into(), + hex!("8d905bcf245556e52587f92957459a41b9974b5d8b8d2baf2d8a98edcac2a77fc8b1eb70024e1e28d5c7a190d9f2a77c").into(), + hex!("b8e19d883289d97b0174cebb92d12a8c6ab16e4a8f0db9d7b67ccb9bdf97f070352e6b24c2decd89e7894099445d8b96").into(), + hex!("89cee93ceec6e742aff71ff60085f04e9550ef5568012b4ef0aceec9928c677f9711ea553499db812bf80eb5df021396").into(), + hex!("89d58564c0215295070329974e51e528ee4d9cb197b089755a86451098bc2f347be8be5b0cc06240315f75222ba2e9a7").into(), + hex!("a312cf33dd49ba6488ee13200193f06a5801407a15fc79956f977586a27a4b2d4cebf0da22f6c1100ce2a0d08730a383").into(), + hex!("acb541c487eb8fb4034ca6208f542c5adec863f1346dfd50fcc0ba1c6866e43f0071c8cbdd62ce6a2498e16e80855fff").into(), + hex!("98022eb774377f49b90f41439cc6703fa152d1d38c0e0c78eed49cbc54670369cb2b7acbbc37dac6617e57e527e41b83").into(), + hex!("8f00d44e73473d7b96a686d1d3b0848095d0514b70128c22ccc4141219dc3f5d2ddc3345cc506ce0e747ba358289bcc6").into(), + hex!("911e37896367a3e8603eaf995480bbb62229a3758a608c4822410e46de45a1048ee6f67c2039aba9fc95281ba5476623").into(), + hex!("80a349a605d2968fbe362e40672b33eface969c975ef75a8fe82ee7ace1d0b5034b7af8667650e813876a8a7484414c8").into(), + hex!("8b00779d873c6976d8b01afc94734fcf943c1819ab1c46e512e0c43469cd08b93158caea1cde84d13a48f27407048748").into(), + hex!("99030a66c4afac7e3753abb669cdf576cf96e21b1d698135148ba133e2d8fe97b4875d770e6246597461958224f653c7").into(), + hex!("8766f592d757c09f617090eb8f226016073a992990e16fd64a705c0c3104b202d36de18ccadcdb3dac5d68afc2495b4c").into(), + hex!("961cce69a7a39c20500c96332d2ce4cdeb3d844082edd527fd1694cf499b30bd33f06da66047d3849b49f4c2ccc8bcf3").into(), + hex!("8bd5dee639c3ef32712931295cc5bd0a8820192aafd35d1f1f9a24130bf208b7cc3f2ae99d6fce02dcac4c8225564d5f").into(), + hex!("a89ba310a62330e7396ae361da7a74a596e4a8be02496c8f4b3c860ac5c3cacdfcf4790d00b2ffa75fa900db2bdb15fe").into(), + hex!("89bebf6f59151404989f282a567c378f2f5a04d85225e23e22e0963da27673f3c7e8990dfb526a1133a988811cd03f45").into(), + hex!("81e9ec6ed189c12ca8d4fe32e21c60834d7938f739545c7dd76303ce347b69beba9eb14ab780c00cfe3804c5756417ba").into(), + hex!("8bbbd7b584948e34852a26d18b9dbb46f2974fb68bc8317ba5a168094a74bbe2304e1dc438777ccf92117831f7986c84").into(), + hex!("8ffbf94991bddefde2bf0ccb115b00d7b19a6a448f093816b9db0c65a43a27519a52ac7b88e6e37a7f33d384d42b05e7").into(), + hex!("b62267be83a54451ace1b8e2b54994990d2e1d619e040c2075cf1906c25089dcbc08ed8c2f2f8f62953b822f163324fd").into(), + hex!("af6d55c342e0e7f0bc3ec547ebb4688a884b59410aca90ad6d8730b4fd3543952fe2476e2db871618d12901fbbd2b91b").into(), + hex!("9027b69f6e5d550acb459f8b4b9e3f05cf291c104594cd244b224eeb8cac419800ebfd5255d87e0dde31bba662e20134").into(), + hex!("8bd160918bfd8049878826f443fa416fe32bd018262b1b4802e015ffb0049197c34d730c5ecba951a93986cca1e23825").into(), + hex!("b82f2bf2bea66697c4b5ed6d340ba74bbe0dce84b2d23904270f3500507318ccca0dbc967a69c4379bd12766708dcbd8").into(), + hex!("b4d00a38be54fe5ba5984af648c9092b133f21b22e56ea106442421c03c26d282b81d31ef8d22ebf92c0c26f86d27512").into(), + hex!("857d95d8aab25f91e7cbe0ea70a3159723566192c1d6dc0e68c2b19565a865a0daeceb4b1c733f75b0dc9cbfe246d870").into(), + hex!("b222a1f3f2d05912987902a861796f43cde8124f7dc398170beb76b6434e7095a8e2d5de54b2692490cb0c325cef8956").into(), + hex!("b016f69dc65c3e72e77d43334863db2c364f3697c552d4bc0730f45cc32fab5f60a5dc0f9f2f6df409fe3e2ba3f2f3a2").into(), + hex!("931b966d70e048570c463bfe7ce7decaea3ad80d0540ca079ddb10958398ea27df85de2fb2b7c238d0763d6293d34b4c").into(), + hex!("aaa3cacaf65a90a6a8e8fcbb98b673160e5f410b28b08a8733444cd71de9e807e00b146ae35fff05160a786eba6793c7").into(), + hex!("8effa24ae2c3cc12aef32e737faf7985c03e2ccd984cdf740f31aac7a93ac295be7fafa3a4d47812d9e0fa5bc2b3472c").into(), + hex!("ae8f1044330885e22c376ba50926ca10177799628b1c3f6b731113126ace5faa7756ca80fda0c535ddc77d051632266e").into(), + hex!("8a6613706dce5417736438d9bc779e29646a0b12fb1c5b5e118aeffbe72d37ce71ef78d3da4f2cc8c1f3bb47f8721cd8").into(), + hex!("8a435b4d265a01f7de575ee8893105de8be608a370c2f1870b7b097bf3635abbdfaf164ac1c704a6c9b31d7baf48028a").into(), + hex!("ab842c0851dc81e247a42806ff83a2e23b86147d884cfc828cbd1f3abc7fee929657bb49a2910975be746f97d0bb7c7b").into(), + hex!("81966af3ed4bba12f6895bcc1e2d4af8a0b313f45446f4f2e966494460f77015d2c9e65eaf396653f8f55e50413e7986").into(), + hex!("b61357419b1d65649e79ccef51d68d1e7c746d77c7f32692b6ff315d9dacefdee2a527816ac3118e5c80e00212725c87").into(), + hex!("b3a0c1e36006ab666e4d4e98c78df5630abcc76e86b3c13c342efbb64c2f669d12a98e797429871c12a7171f7a751422").into(), + hex!("93601527015bc30178505d37cea121f19145e366be178e42c9ea7380ae34053c45938a3b4d8ef852ff8701764bf74a52").into(), + hex!("998a446e7b4dbd7a7a2055f437859dd3ddb44c52d3dad9250f085d797c821ed91a17dd13d00f532c5f0f2321c5b3eb9e").into(), + hex!("af35c5f4bb11a87ee3f626007cacfee4ca892851459cb9cb2e127e92c9c274f9082c905165758976f9c7bbaeb984acf6").into(), + hex!("a0df73b065667fa0f6a4894aba39b3e4aac620fb1a8a3be96c94423231917c3a7d75f04383b40cd802909d9cf018b0d1").into(), + hex!("85c9c5a5302b706af5af436c07d1a1a952ee1cf4a0cccf002f514473fcf85d05bc4c23b5da2d6d0b5d0aa503f7e41a65").into(), + hex!("abd1ffa853de61d8b26e6eb6c7eba5636967c155233a6d73fdddd361379ec51a74c242715b6ad0033a6343157aee7ebb").into(), + hex!("ae45b130af61f3e76012da75b19d46a786e0c21ca7c6b5bde193b2203d6d8b7b8afad25a198e8c920c69954d3d6bdc14").into(), + hex!("93729120899ef573f6f276a1ba861a400a35efff7a2074400bb6b5df818e3fc1f353cd5a8e4ce122a2bbd8f5b30126dc").into(), + hex!("b34615b2cf8912c51c02264008e0cd78b79c87b87d56db810d899490bc438d446f734ca958c7c291aff68e3211ec8c5c").into(), + hex!("a1ca372d158fd7eee15091582c6c1b9ac9854959677ee25e5786a94cf8c1d15b64f0019aef20331d9675e1f1ce41fd6c").into(), + hex!("a4bd66cf90f38233b579b8698f5655f077bdb1d626e1d36ceebc67cf7ab8ee8e129cbdc307895bf0fb6e34a4aeeffc68").into(), + hex!("ae06f6db3a3ea3a21193f6c6231db42d18fa3aa06a8295741bab35589dcb1d51256838dca01356d580e8c423c45ddbe9").into(), + hex!("b70b5f0cf21cb98c70996a9eb6e4b3562732505299149bbebef821477ff406dba3979a2526b9969213b9ba75e35de3f8").into(), + hex!("95cf5980f21a58f4604ffb99c8651752f724faacaa216f8c7cbe400774deda53f26aaa15fc6415e936f52ce13cec6ecb").into(), + hex!("933c0e5bbb358f5f83cf9e60c67ea8fdcc0b7a203fe5b07131e3bd69295c507880a1e542ab2a6a8d182866f7c6b14a8e").into(), + hex!("a8baa60ce583afcf85e4756dbb0a4871b330ee70f7872c3e36ac4d43f2587fbbdfa2a13162ceb7efdf897bb96fd2d97f").into(), + hex!("a127b3828c422ff51a067d482fb67074d45fd0a86bea5066c7f6dfa83f4b82b4584646518e898ff725cf5de055c6b236").into(), + hex!("871ef5a7f50e5ae528eb16bc30ceb64b97d111896d34fb4a65c93c8d0498eb7032033eb663f7b169a8af4b96e7acbe21").into(), + hex!("80d0bb10037029f0d8f8c9b9b46f0d0ff32b2198af44a4f84c8c0ace60f2b39f8f8d284308769c6075e9425d6229905b").into(), + hex!("b882ecbb78c758c951fe53b434af25b594e602dd783787f09ed077b79f7dd7851fed769a1593f5a5b938ea2171987d3c").into(), + hex!("ad41cb47b16077f73cbbc157527a17c936efa78a59d1f36e3c0dad67cd19fdc60cb772556018029611aa46643084f024").into(), + hex!("82ad3ad7a706ec19b39b0c8cf75d061ea3a1966dab04643f5b9d711e6651b45f0cd22ce5048cb51e4e118b305bdf231b").into(), + hex!("8286a4970b8db361abd04e5d197849dd335b7074f9c3fb91dfd19b7f43d2a3ae9e114b0cb6342463986d32d262c34d79").into(), + hex!("a844f14ffc4c99989a6c666dcdcc135c2fda96914220b1c565215d5c2c3102f5413b7edf9b25882a02a19aa78c2bc545").into(), + hex!("8e758f3b03fab7f5d0993e78674efe3f9cc211e268c12d23911fc01ae7a4c8f879a393e3fcde0c05c10106a59b59ab72").into(), + hex!("a973caa021aca6f0470460b84df9324ed894a441435a53c4f0c48fed4359242ee71fb3a0e4cf438839ed838f37e5c02e").into(), + hex!("ae17c713f10747282798487f02d25d2d8e7459ed436d90a895617afb9299ee81994ed68ef87ecdc0660b7565c323f0e8").into(), + hex!("978e68aa5f44daf9cfb9220147ff509ffde89d121d08d982a0fabae9f07cb3145c2312ad200f2f0dc051820fc54d07c0").into(), + hex!("844e58a9e35ce1005fd5785f56fdc9b3f7e8e073f48fa40da19a5e9e84aca00b8743c5407920cb554e926873092015c8").into(), + hex!("b296da231b6ca9d5535432c81f7d0c20a71cbd32d357740d1543e1e3910ea5d32b005938f9273af96e401637180f4606").into(), + hex!("8aee25d881e7ba99fa6aa2fc65ebd44aa498d31ecffc595ac8cab010f6cfdaca308f56e616ae51d7e2c2e15864eda0bc").into(), + hex!("a04298e32052d7f91096285c73d67cfb3f3f5463abb3d7caf3108d8d77aedf9896c359986ae598bf9602dc90e6eb3178").into(), + hex!("8c336e463dd98ecccefc55fa366ac70a2fcaf60acba2f2171490642a6f616a1c6b72601bfc3533a5f49ba02dd1e39fa1").into(), + hex!("b00b383ef5d68f0939c0538c7564614401283c6923dab4db4c72a05a88e05bb576ac374eda61c024345227bf45161e05").into(), + hex!("9273a1a6c9cbbc8d5a46498b7658f6125e955cbf19f0461d1372ea9de200688e4f7376b23b132b41cfd672fb42ec48b1").into(), + hex!("a7f04c0377207a6bb5d96e2e6cb9f7696d5dba2acc3dcd6021ecdb3d121a558999b2b3d92497f72f28f39a551b2fbfcb").into(), + hex!("8d530cf98af85dbd0bb7b1f2fdc24d499b19a941ef431bc7f37ca9328a4f6ceb0660eb87edbb1a5d868d3141fe6c51f5").into(), + hex!("ae88acd7fddf35c72c3ad1c507f8dc185546d8acdd92d70f00991f50afd67809167ad3171c7e45976456fca033f0a95b").into(), + hex!("8df71ffbc265a4bf475ac7ebdd5eba137f4c3b585075ea8957757661b3f11e7a92888094e85c8863ed53d91df45e37a7").into(), + hex!("917737cb24287f30c899dd88853ee3b9be54ea707ef38f1545ef9e436865be1399fd2de4d2c04e3fa5b4a3205f4305ac").into(), + hex!("837e57594b1b71fd9f25f27967b721df500e3d7c72f22e90f4315e10fabbef027ec1db0dd9863b817071fe3c9413a5af").into(), + hex!("94b9c2155509b2189883d2237cd37c9ed19c3a22203e9e2b045184aed072e406e93eb7b5c3fbfb85eeca4c5e630e4ed7").into(), + hex!("b022c75923080450ffe5ecb8e01972785628ec2027b6bf3dc2b09c92ba8ba55222965767c21a240705ed5af6e9d92695").into(), + hex!("b993e15339e28a472b3c98bec723ddeb7728822571ef1fb1c2a1607a4023f37d663b615c7855426170d9ad8f6a971617").into(), + hex!("b34db4df6a97056021b088c53cfa7cedc8e585f907a67d1ee8412a50d84e5e3d347011fb99d5d71b111c88f5efd44610").into(), + hex!("b582bbdd7e0d2ccabe94e6d193d1b8dcad1932d1e96ae8e1a295cc05b381646f682f1f66cb90ddcfcd7735b335ea0242").into(), + hex!("b289c068f7c988a69173d347361047211c302abfafbac1d87259388b5197274f5ee90d56228093a42eec32039e490868").into(), + hex!("88e84114e8e536051ef5197ad181f96ce13fcd5627f18964bf4bb2f461c6638033ba363800326e494e43aeee94c62125").into(), + hex!("80d16c3e8717274533ff3b764984479b3bc709f11ef5129644dfcbb5d8bdedc7a8cb2e539a4524bb0fd4d977ffd25fb0").into(), + hex!("9405d059e30017152eca6d6d86366a7a5570501d78c3869d638a2b8a0bdc8c5bce9f0b46764e78680e2fd41697af9d52").into(), + hex!("ae23483a1d25f8b9a5adea9527560cce5552994b3964ffde2fdda0ce7b6156e1d76e698d7314b0820976776baee37b63").into(), + hex!("8d8395fa4ff7ad0ae3be3d3c446cab058890cf7a07d0a2825e22396cc938cf2d7a986745be5e3c1758c5ca0dc29c0ea3").into(), + hex!("8c5899cfb437a72d99085d8abf54eeb345d7da59ef93978b0cd9207853dc491451939f4b1a7bf317c87504ce949713b2").into(), + hex!("816ca0740bce43365bb20e41c3d0c88cad587e4c743b2c0cac9dc966aa8de220da347d65392a9b750a2001499027e3c5").into(), + hex!("b3d23c55cec1d18bedd276d1454f93ad28c72d921dd6600d8102710770f52b79ee8cf445f6781c2ca095c9a25d41489f").into(), + hex!("95d541d6196c1221dfa5ff213dc3e658649a3cd4afc8e738631fc7b6914bf0dca74f41cf382fab364dcb0d2d6ed489ae").into(), + hex!("a1ab2fd361b973027f6ee6a9f8f2c081cb5d7199d69e36c280c7f9c3e99b1cfc994ad7f68ac0cd78c61bb419251fa14e").into(), + hex!("ad36ebc0cf82369457b665dfb2f8444fd2add0b49658cef2c800ab0297bade2c0249bb124f3d321536933128c1149c92").into(), + hex!("8f917e000d7688a0f508777b8db7c0ace39677c4458d7e50d8c9dc59d32faba4abdedfce3af26cf3d04c020e526ab597").into(), + hex!("aa994238e432c51896efdfa240f75fe40f1b1a7b624ff0aba66a35f827b9bf1197de3cdc0bbd9d0147304061ced0325f").into(), + hex!("943da065ec673dd41351270747e40c1f5a8dbd7ba259c501349ed754ffb91c56747a78c392cb4b78a796d748044798d7").into(), + hex!("b0c45ed1457710daa48edf2e61ba59988a8257ddf902318c6bb00be7a4ad8235b46180f7353d9f0c0f747a4cf219d1fc").into(), + hex!("81b56da0943d2940fc8041a51c74dd03f6dcd8a705ef2ec3b685395e313224861f29de31205d45e944e437179f19398d").into(), + hex!("aadd5eb1be98f3fc7e93925e46062353576ef2ee81421fe3f6850701728b8f74637d66cbcd344364565b0893d8bdcc9f").into(), + hex!("a5dd3dc1e172c899f4ed17fbdb842bea7d7f3f0d6b284af9749e25acbfbb2f9c1afb1848490bb22da9ddfeef30232323").into(), + hex!("84ba149e940db1663271eb16e920442bfeb035fc601a7389f85c78ce7bf27b13b5c1a5625d8b45dbbf199caf2d753bf9").into(), + hex!("b5634eb31a68f45a2aa17e8eae7752d8a58673fcc9efad34118b0f3db7415edfdc27166e6d809406da0bd26a0ae1371c").into(), + hex!("a2ad4aa94a40f1c159c7588ebdd77b80edbcfd95e867ec6991f711d39b4dc911cfb6da6075db23bc090218976e9ffa38").into(), + hex!("8de6002f3e789b014254b32161b5595257eb01ace67a8cd9657235e2d04f7ffce6ae0d059488bd9dc070c32b5b7b3fb3").into(), + hex!("a5f659b41f35fe6a9f43d1f72c803da876ee4aa5b879fbe5d5e93be38dcd5f10716122d83afd003b79b8120d83358884").into(), + hex!("a64e6b71dc6ab9eeb17d50e1d2516c5ae63680b50a6077fd870780aebffca70a8b7f8627e23731b79b3866813a20af0d").into(), + hex!("b0003260e70f86eeba286d3d9f6d73bf15f084e7240d9aeee38a1173ea5c47fd9a9637384204001873732e6a407edadf").into(), + hex!("8d4df6703cc9f1d0760c67ae6b20928ffeb6b13d67bc406b8a534ecb07d6ef415a106ed992b50267677f7d114f9d69c3").into(), + hex!("8cb982f382ac327918387c16c73de1ec5ff979923f1110b9660836ddc2f5f742aaf970700f7b27fb2fdacb126d341353").into(), + hex!("b6988aa5e4043e278c01c83a9774175b1188bc2d78b96817a7fb406f86aa4395b7eb666267e819a5a0615803f840171c").into(), + hex!("b69c70007de643e3fbaf7b557bcaaacb67288ef6ff616c9c89dee0cedd33a76396a90cb44207225568870c7c5601438c").into(), + hex!("971102768c0dba73925cfff2053b1cd8fa88f5c21aeba2cf3a78ac853818bb4a85cff714134879e5c7d7c7994cff20a7").into(), + hex!("b00b8d49b1f1fc0e792e257b0c3c33ab554fe231aaa6366f5aac11ff35051ae23cd2a5c6b9eecb3ad00e840c78d6a587").into(), + hex!("a34bf3b6d9659f1a89e40e3b35afe741a670a3a9305278a52d479535f4b5973b23b10ccdfa194cb2937f150beca3535f").into(), + hex!("a84c4fd757d86613a4bfe72cc9d7864c2c236b9463e365441360686e19f7f772ebb6c07a24c680462ffb1f3939c870d9").into(), + hex!("b3c34a2470e32395bd9c8789905166ba77b7b7d27cb504b8647dfb4fa6d65884afb2f120d83afd876b4a00cb104347a5").into(), + hex!("b9d18f57a669686eb8ec08555972e54505b7d487dfea7105afc76333138d5f934b9b5f9a3a7896481782fad5328bdbda").into(), + hex!("a23f65babfef6a2442833441200291028ebf56031d7154a4d7d0fa18acd4b7bd78dc34b85924c3073eb5be7733ac10f5").into(), + hex!("80948f3e12ffccd88605cd4d67abb83014c35d8d1a3254f6c546aa7197f810cdd06eb49b99f424187df218c8e8d7254e").into(), + hex!("b24ac23a86e23a16f4f04ed683ebaa51ccc6d2d038674e36d00a14fb71d46c433373a8a0bb75a0afd3c2f9605dd4867f").into(), + hex!("b002aef96f1702f4e3c92b00aa2976b57b77ed97a4d64619c6db676b286c6e633eca63fb232cec0d533a803660c20147").into(), + hex!("9867492f550c1f12276a201717bb4c420bffe904d55008655f929368b664e0293c1dabe9b5a5a71ad884a12686c1d9de").into(), + hex!("96b702733a7ef38b23e45999a04bacebe414a01bb41792cd9aff566fc23610d02e069c85c6fd4173846a40657a958e78").into(), + hex!("8e588db21f84245d034d7427055996f109133b9b3aa095472b879bd180052657da20a48513c1f625c339be90a64878e8").into(), + hex!("b6821bb1a130460ed1936d34cc189980d8fae8c5debc5149d47e90aad69ed3b143a3a00de5959e21ada73a06b3e8c9d1").into(), + hex!("a751ae06c6d699b5593f5928910e1ef3634dedd460ccd3f23d74f5f61a3950392aa66149ae366c048c6c7ece968c2d9c").into(), + hex!("86835cbe686b81fe7e096af5ac8c24f7cfffe2ce2b993626a606e42f6356c0d7c3d4ea19d110aa1a7b7f7ec5e68808f1").into(), + hex!("93719cea4911ce7e436f7b3ea77b9cfb83a1db903cb38f1de3b04d0a69a0f06bbc4f9acd3b313d46113009a917dd5996").into(), + hex!("b0d240c09c137a742d77edf11a7257030ef8b1a785b810de104fb24b22535cf0d62bc54f544b027d532f16b43a2df7e9").into(), + hex!("9376f46ab931f5c58b1be49c529ace24fade089af1af43b339721321a273169c5bb668250b2c2b0aa16aa522e6675bdd").into(), + hex!("82636e68d0d59d20dafd7486176b62ca2d5dd0275c8fff552fbb974565ef2406ff56cff5c43a5b9e383e0b09737de446").into(), + hex!("8f300fd7f29640aeb759d09735f9ac36d1035248c35ff38a165d2d931058268a055971b4fd4dd9d467960180cd255dba").into(), + hex!("a7d5f4e2b01dccd9cd7cbf566b5ab604efdaf9b682bb4ecae1b7801c2f93425350620e91ec807b0a110971c316e68cc1").into(), + hex!("8952700221cb45e3ab933ec20978d9c9c7b873784299b86d0c2d9998bb6b1d1efc1ee3bcd00c5148b2fd9473838ce067").into(), + hex!("b787e77d194e4dfb89968f4e289e97195c2674adba0a3e7d5582cefacdedf93a0e5f27d9d144aab68aaba878fd640414").into(), + hex!("8466c67bed3fbc30e46639de92c422f06bc80df658340a47299ad7798cae412c976fa6ae6bc0a32ce655b93b08cbeaf5").into(), + hex!("ab5d78cf76e16fcfc0491886fc1c95a492ccc67fd31060eb183b89bf59ed6e2d349324f9734c504e74c360272a22f369").into(), + hex!("a4f9f8539f9f89dc5a2c8296af591277c2d308275234729dee23e35e3d541919d1aa9a260780899615e2a895ad8fe703").into(), + hex!("94ed2cf23c5d28497b506515597ab71120f4ca42f7ebc5a4e87b798353eb70590923eaaf163bc550bf312bd6bb2c0b05").into(), + hex!("a78f064aa69402af33f1c9c1bcf04634384ad8528aaf8d28ce1d1a04804bdf93a8b49baddd6870eedf642e566d091af7").into(), + hex!("86796909d2dd3010e8d46c3d99a3c0efcbd4e986e581ef5be4c7810ed8b92268bbdac1dbf1b24d6805df6642f53f0b58").into(), + hex!("a6b555b50ec3b60e9768f407b84c5fd8a055150c17f78f2d5a3b0daa13f2c692e5041184bfc8919280c230c57adc9ddd").into(), + hex!("b4f9b0a0242ded4dc0a4903f16d270f21f2e15668b3abd45f79e1b465bf50074d232f905c6de6f2727a0a9ef039f7681").into(), + hex!("930b7dd8666a35358c6a0a42c600dbe8ad5d9682dfc641474fbd8fba90ddaac7d3ed5f5395f297dd571051ac2d603333").into(), + hex!("abba3bdcee368688a8e53d56627b915148fcab59717174fe8136c85bc24e8d15046da09b31c0c7c9a5bac1016bbfafb5").into(), + hex!("ac199e71110e15ebfb3e58b8ac14f1de8ebd3e0894f273f84783ef3fa4fd16ce6bfa5d41421e884e258fe8e2dce68075").into(), + hex!("a53eaed94fe550c07375ff5ed9706d69563bdba724a4022cf7c639737c97683f036400dcb87268184a9877eb116bd479").into(), + hex!("86e8269c1b368228438de5aa4065ab9d2888de7599b00d5382ebba8fb0600cf357de27e20edaa50127e213c7f6be1f6f").into(), + hex!("8f8d9b9b03b178d370a9e918dd54264f83c4ed20824be79427cbc973a9acf3d74637e5c35080192bb67e64390299c19a").into(), + hex!("a230cbc17aa669c2a9b02e736d20519d93a6fa1b6ed452e065fa78a0fb4b3a0f55fcbc5e719db6417353ef0798f70b43").into(), + hex!("b4e28c30e57cd33fdeb257cee66389365f4a1b9847f94e8b0552c65adff5719e51d50ec8bba36a71ec24641ce4fc6338").into(), + hex!("91799f066e16f9a07a419d3e425c959052f8ad1aee0e2f613d3b023829fb1946c81b16e8b733ed03ca924d03481bafcb").into(), + hex!("8ad935420f026b166233ad387c58857eafdd2fcd4efe55ce1bce9ffd668b8997555927fcec88b04de795129a10263d1e").into(), + hex!("a098f20bf1ae2510f1955c586c7115a29c64bd22a086a2f2a7ff5e08349bf24086504a1e5e1fa82b3aa73a097bcd948f").into(), + hex!("909a3d2e7bb5538bd89c446aa53f1f05a1ec17c88d793a310866cdda6e5d836d53fbb80afbf8baa8aa49db3836c912e8").into(), + hex!("89e64879667f34f1127cfacbd9a3337fa28c0227bac5c5ac907d6ad9c3a853472f4f7cf093e3b735968229398bc7c94b").into(), + hex!("b6ea32c4d640f9b7de841575a00003c1b25d0a845eb54b065129c271790bf602cc39761316c4e9dfc1644ae3ff4b05e1").into(), + hex!("80ef23a1cf6f50f96d5b4645bc79ed4c958f1394da9e5cda0cdaca3815ac8435a8d3a690ed19665b7cd1bef8bf7b0366").into(), + hex!("8b7346d1f30de7e50e3d3b32b441ba5681335d25ae6025623e1469466addc5515415a29ecfed987e07bd6a85ec1eabbc").into(), + hex!("99277ae52f7f2f193549739a704aa2f756c2ddff68b9848040ccacc675fa12d62c9fc1318daecba514089537a4e7b83a").into(), + hex!("94d392e29a3a1b8ce63c52288fdcd3f95f80bdd2a600626e2ce3517162e69b9c0eb36bae6786701ba12e23a33b8f90b4").into(), + hex!("8f270be40047911a4cd5997668bd6d62c90780882451c544ea4bdebe061a9e61bafa1d8bb8af62e0ce4fd73611a7f34d").into(), + hex!("a841f7229fd0490a853523edbd12a5dc6772bd607afd0516c582a813a10fd74d19f480d02b3e7b7b6256896620905976").into(), + hex!("aaf65a2e4e6a3d903ccb51a063d64688590a3ae54c7df3c5e8820d88533a6f10b81b130ab476c9f16c660a81f66dd3bf").into(), + hex!("b3683ebb70696339844ccac03925ec85c8dc06959608527a1744c23a67a805e71b5a91610fc6fbe0f667d054b4087e37").into(), + hex!("b042a5614245184e5a4620cd3a67c51811fcc0a0dff63ac06dc8030f01d8343ff0bd39ab3cdc3c09e4f3c1503ed35e62").into(), + hex!("8a343ff96dfae2c2e62558db3cc424b3c055e2cf84b53f63bf49c95d98ad0b372517df7afdb189007b7db725a3fbd567").into(), + hex!("a3a81a2b37160a371ceee3d07194a70e58041a3e6f75f47c4d8e2c619cae3f44b2de5703e3b80fbae9778e284927170a").into(), + hex!("b9554f94b7c611c2b3f63df5ac0f920d1eaff7b424bc8e857b94c354aa1d3c02a4348510099750e0af3e16dcd0f9a245").into(), + hex!("8678aae32f5bfb9dc249a39eb5d0638bd45d48d2b5b7be32b5e1c2be7b8d29d7198a15e2c24fa7813f060309f2493843").into(), + hex!("85f020a228f8952986c979028dbf2c2e8e59191970ea5e4cff6e6bb46251138d9edd90a211ccd53fd395db8addfb6d71").into(), + hex!("8b0801e8ea30467063d68caa5d3315809b3f21c7429f256fc2a10f7e173f0e1d1bbcf59e025b9e44238a53ef1b8318d9").into(), + hex!("a4d3fbad305853d8057c09bb10fdea9234ff90d51ad0c21342248895d77ead5a8c104c554e6008677e396b55d4efdea5").into(), + hex!("a7f12f870c5a3e2332f10cc2db7dc26ce58a94ff60ebc28f3ae06e820e6514d80c5133563225011213e53f51f748413e").into(), + hex!("a94ed25fba32b4302fff40e2c55e06b6fa4b9820635beb0b43df61010ab5cdaf944199bc73c4827214ae1f57bd75e70a").into(), + hex!("8607e973bc67d217ea67104eb538fcaabdcefcaa7da981ae0322916f7a74229b47f18820458823e7ef160f69f5363dbe").into(), + hex!("a83206fcc995ca57583c5952bef2027503308472bd712c536050217a391bf0fa9617d956ff6c913c2566cbc515cb291a").into(), + hex!("b9110bd697c294a0905503490d17d5536ac1782514bcbdd6f67e8fbb75c0922b39cb7f742d28cbf5356e4f1885b060c2").into(), + hex!("96e1d2b723a458f6b8cd4a8b2a83f33fc7931901cac1fe2169ffbf7c1ac8b4d8547165af3dc61c2b37ab88d3e81f940b").into(), + hex!("92cddef13af28962b7e281d5c0552ee5135b7a401944c9ab31d617b072cf00365e24dd87f86f6618b202d51c25f63fd6").into(), + hex!("a751e27a646ac1f3c828cb88585aeae6899d0940252973329b0589b05714bdc1cd271bf745d482f670f4ccbcd9a60d98").into(), + hex!("861fc00a2edf468353c6012a89ab7cddeaf964ee387e5eac48037eddc536df3d097c69a689f09bdad189384719d50e0e").into(), + hex!("a7dda298ac153aaa6a59785ed7b6900362b1220588a29b44764cc45834859bba0f5b9f8f17bba97fb49fa2c7ed4eb65f").into(), + hex!("a6b679d47e1ea1469e5dc14e1eba97ba2a0f2cec0a9a0983ca086f917298c93af45de60765aa2cb3759ed62c9eb5e4dc").into(), + hex!("b95b1f4472f8f69ee010d36db46c268c59cbd13864c63ce9e6a4755ad00c2db04c951e312b88060e8411018cf655e76a").into(), + hex!("935883b9eca730ef868329a67fe99ed5363b0384e7e6f97147d4c80a76d9b2d8ae6783e80d103149a8a3fbfd51f9f6be").into(), + hex!("ace980d1e3c76dcf78bbb87f3ca9bd0bba7897fcf9e24b27e00fa22855b1b4ac224137361ef6817c94fcc81fc3d3a3de").into(), + hex!("a614b6f113d74d4dd6dea66125b11195212031fd7d3da825b24739e5107cae653fb89c34527b399c43340064a9744a6e").into(), + hex!("83c27783856f9af7491ec9fd34be730600afa59484cae9d3981685cadb869dbd05555a07b93db7d0f361c9ae8e0bfe73").into(), + hex!("850f1389e21bec1c8785d17316a09af9355d32b75d02d9ad72791cfc5a411595a367ba9eb641c5b7acd6be1ee21579ea").into(), + hex!("b810405c7415c49bce0f7893aecde90da33b71684668877c5d6cdbe82161f9cd7eaa2d68597140e39fff8b9cd67424e5").into(), + hex!("a5923930d34526d70e083f4633de4766f04df901ca3adba4a462d03423b609d9813b78a2fcaa2d770a5abc27c260c39e").into(), + hex!("a6ec439bff50a4bef8d0cd47c52e92cf00846ca3fe97cf88e5b6d7800ea22d0ebcac49d9ecd123d4c156642f8bb4389f").into(), + hex!("97b7d3fbd11886976291a24e9e7d6f4974345e06024121eaf57097057c103b6f548d1f523416cd6b465e8109aa0be911").into(), + hex!("b45f04e0d4c17df2815a6fea2b04fd7cc2cbb8e6789084e224db3c1d00db1c6f1d325a63df25ee4a9a992553dab420b2").into(), + hex!("a91dc563b48b4cc210119ff55bec2957e8be50aa25928147d0434a9ea4088e98ba1f17c2050e713d2891a3c741ea6c6c").into(), + hex!("a12256c39f3b17c0540d2d3442b732b2485ae9da240a1ef47782549dc7e84f7a9c9240ff59a73fad228a3c01fe953169").into(), + hex!("aebc98b50533d844fe3149735750c10eab861e765aa7820e3d54fb66089ce15409206bb58d3aae5ef22a29ac5207c702").into(), + hex!("b023d0e4fc2eba4c60cedae9d2ffa79cdcd5c79279fa41baa94f536c17d746a6aa76e8fe203fb1678da126c4343eed8b").into(), + hex!("a673262367bde5d1775961c7d9aa26d9859c59600536130f9adc8f99f81f0106d2eab2c5ac3912476affcab3821fecdd").into(), + hex!("8008298954370c0c3026fd71fb48fc619caa394c9ea17273284a903b07de1d385336fad69c8ab6fe6692774d5fabcfc1").into(), + hex!("a98a824454bc0fba41a6543ba11ec6879c979e97c87e3f7f3a228ba995e33bcae9378740f925800b81d3627f2af36c51").into(), + hex!("a29c667540db41eb7ce06d74ad91c7ddeb3e1f019a028b8ed4ee705d8d079d6f7d36f1b64fd4b9b807f0dc1ab3d65d2d").into(), + hex!("86c32be8a7155f26696c1e541096ab43b3836315490a4bdad867b973ebfa8ac414e4074819b9639348a048bd6fc4bdee").into(), + hex!("a0de748319bb0c53c03f172c9aebc4f7538dfce6ffdd362d24ffdd111448277a8a705832e94f65c61aa635a5f40b6f0b").into(), + hex!("970de7621ef90cb3921f747ce1cf9d389f322cebc286c339395714a6d40167fe26e04e1fc3116d3b7bea1c99bfedf0fb").into(), + hex!("841ae6b0bdd22718fd917ca4a871d39cf6811b9a460b99422fb324235b2c51b460e48159d7e1bc1778de3513b7ab1f29").into(), + hex!("86764a78587892407b311892c4c4e70890bd757d3f72a832257f411646e9298eb4f042ec1c929cc6ffcf539dda90fe7e").into(), + hex!("b33008c5025d35243e91b6749e7d7934bd8334e5f58e88db907715435a28f02d018b09127fb0302d1ee68d7f97391040").into(), + hex!("b5ea29ebc8107525894d9872ac88b4c9662fd18d87316565a8b013529b478f17f6c1c0ddf6482db6ede54d27c0e80782").into(), + hex!("9576def9e5dd8673d3dec2090536672d44368b78b2c2f70dd9e36a9d0e5889cd9e2a47b77ebda94388ac75679257b829").into(), + hex!("823dd44b4635bf095786e47f836c4af02d4dea848bc1cd823876266341d56496e09bec8612413c302fb34c3383a55e0e").into(), + hex!("b6a32b13f8b1339b0591b9db343696aab77a0c2ff181d2069ff39581da8904499d619c26e6e44209b7791c3aa2fb7aa2").into(), + hex!("b9b3262a97049e236e29afc4a7c6d2e2253f437e013b90bd0882d35d46d7a67e1344b3dc822fcae27717a9887d842a81").into(), + hex!("8a94e08cda133175049eb2cddae936b16c477db54a749b8d3378233034f56fdea99520755ec8eb355b738af61138d9a2").into(), + hex!("a55933f63c4cce4d99d8df4fe6dc9d9a3054379ed2560bb2a9147f9e456925ab29f7b1f14321ab2501d67dd759f5ef36").into(), + hex!("8b5a713fb50a2aa0671f1405876b0a829a5c1f7c9915906bcad26de7e2011f8eb2e2b7a1cbf94c19685edf8b364f242d").into(), + hex!("97cd2419d4aaabe457b1efa6d2e557a0c4d8725e57e33018626fb31395ed78555b79cf90da49975360a497049004b20c").into(), + hex!("90d640d0c949a73543b476e0b634d442c0bbe0f4d3f1f4f99d19fd8d37d3b99c4bb3c5c08ee77c24f0af6dc7c29ce658").into(), + hex!("8f65263c9ac0026d9a536360c8b01b40243cc27a7896fada367372ba82ffcf2c758bba9f92c96fdd66956b25d7162903").into(), + hex!("ac50c7c4b3201f2c98e812e83452ded2e2dfbeb4c1910b873978c4b5b443b34dde042b13cb4943759cefe1a546bd8197").into(), + hex!("a07a157ebb964282729ab09205f5a2df132b5a0103233dca67dd55215b84e66423c576c6e0b055bf85c6a27025fe1aba").into(), + hex!("b89462ff76b35af9a1f23bbeb7f3b6e2f72f4d96aaa30fcce820573654895c76d813f27dea9251c96ea9e3f1726da99e").into(), + hex!("a81866a83434f91d464a93a50c4906224f3108777f731a1d363d16f769795be8ffd23c25f0e051d8c43a55b665c15bda").into(), + hex!("a974e15a6315d56d612f5e8d70171b01baba976ae1602265cdb74e1f5cf3a48c94e8b90bf1d343f3f56f8f9ece604ecb").into(), + hex!("97ad199e4ba05c97c5aa90cc6e63ed86f78fbea35b9f56f5107df643efe40647d1ac7e50cf6e1de8b4ede8e2225e090c").into(), + hex!("a67c3822c0e5902355fe053c3c41765146018dd90de0df89c6ae230826056e91cad2a28f06859b8ea899486ddbdbe6fa").into(), + hex!("a5dd8329edde8fdb8f82fb71637c7a4d1e13c51ab2b24cc98b80952575e821fe31c3e7de5a566f8a98e0243cebc6d1d5").into(), + hex!("b7070e8349dbd4359414c0d909de10a472c4a7fc4827804b3e1980b1cd8f468d21d4c6163c234c05f05534e423f6199b").into(), + hex!("a1acc31ccbb89ba6ba5be3ffc26ef83a28c3b0f76be71f87ef618f9f8171c8afdbc5f05c521c67171ea9fde1bad7a9dc").into(), + hex!("a86d620245d119b34f55c6145a65937e5ed5281803675e120d8c2de05eabc08f85eb8d8c877aebf535663518c4fe2f88").into(), + hex!("885317798c5b49430d50d5c5eb527540b0b704794d88d510b2511e9fea2de299d8f2cf2ae2e96196a9c0925ab1f30da1").into(), + hex!("8a5c37ad34f1867fc9fdd3303ffb86fb0d98ce27fd6739078949c070b44081349368302d7c910fd3d886165b5fbd3304").into(), + hex!("a1935e801664a3f51c22e616d25fbecb3f3be76b936e4d2dc28a4bbe79d002ddf01b94cdf9e18d07f2e2c4bccdfd76a9").into(), + hex!("b3d13a611ada38fb6c802e6f7e09d03703e9e7bf82424ded2774d6493dc0c7d8965e7839ff4d3f9fe00a1e8fb20bf13c").into(), + hex!("84f6edb320cf92e147ba3ba8b2470c57dd107b80ea8083219d3c91b77fba7b20b6774ae7ace26924c02c9a2247842bcc").into(), + hex!("86546f16627c3b875790b604aeb19c925f6d32db27c2ebf405df0324095fab1f8b748fd94b6c2b5a2dfeaf748a565b3f").into(), + hex!("b95563cebc351d8237e5f9b8ab984976e84ebf7e16c62102629cbda06c86b013c6a973d007d72a870883408b8343fd1f").into(), + hex!("8d5eff4d3238446be0f805b8f387fd57f298dfcadba88a0d87234bbede46d0ef833beb0f2ede7a51fa84a808393757b4").into(), + hex!("862179dd50e7a0fa7906248c0f3671d8d3b25504e30276da74a36edfebc8c92a04abc9dc3cba8c1b34005beb06cceca3").into(), + hex!("b12bf5775e3f9ae29da6b127ffe2892d5dec12f9c1bc21426c225220cb0d5fca5c6376afb5bf5f112f79c6563694007d").into(), + hex!("957cd40aa0864b86bc64420a184988be4489c0f0a3363f39571e71a7443ac6815a1b8ce4862c736be8441108dd78101b").into(), + hex!("8f31cdb655b66e1f8ad877639f71524aa78c09acc24aa493bd6f6be383c295f51a6e70f2573081cf87cd41ef55f5428d").into(), + hex!("a8f304c1f2faa78683d409dbb0c11e26583fdfe845f2557e378c56acc9baaa88cce25442733edcdd0da70f3e5557e53b").into(), + ], + aggregate_pubkey: hex!("870e9dfe2c909b7116a9a4180da4fb6ac4865f9304adc4c36dde6f82338c43352b58dfb494e6095bfade1dbf86e7f939").into(), + }, + next_sync_committee_branch: vec![ + hex!("d138fae7ec85d4d5ebd8d7375b3f39f4bf0d05439e6920a44bcc977e62ee0dfa").into(), + hex!("a6baa91932e6f9d9fec678e9fd75a140c8e74bd87f11d37093839826b95ceeec").into(), + hex!("926c0348ccc4c44119ca84e50911ac22078ab704b0784ebc593155da5c5adb53").into(), + hex!("c4a04575645ebf0cf5b3317a092e595adf49dd93669424c2a5efef700ed082a1").into(), + hex!("81a062566009887529ffc6350f713cd2aa30460c13173fe9ffcdbde71fd69f8b").into(), + ], + }), + finalized_header: BeaconHeader{ + slot: 5808479, + proposer_index: 218610, + parent_root: hex!("ac0c3b35e7e21d11d0563f98fb16bbfb0460aef2ee5fe39ea209aed66694601e").into(), + state_root: hex!("c66e3a4f1718ce82f35c898e8df8080c540aca493a535a2f6170a13b550faef3").into(), + body_root: hex!("207806f82ac8c5bdb6793dc61f31ce91dd06a7fe3a143d29b6579975c64d1d9c").into(), + }, + finality_branch: vec![ + hex!("0bc5020000000000000000000000000000000000000000000000000000000000").into(), + hex!("8c04962a994aadff4d3042da73e167e666323757db5b0234a497c7ddba058ded").into(), + hex!("95901d6dae3edaab0f29f2c6155edbc4eb3980b6816339a464fb51b91fafdb7a").into(), + hex!("926c0348ccc4c44119ca84e50911ac22078ab704b0784ebc593155da5c5adb53").into(), + hex!("c4a04575645ebf0cf5b3317a092e595adf49dd93669424c2a5efef700ed082a1").into(), + hex!("81a062566009887529ffc6350f713cd2aa30460c13173fe9ffcdbde71fd69f8b").into(), + ], + block_roots_root: hex!("93a5736680a9dfe23df1f8a6098c0671c583dae469847e25da3532b3649ae11b").into(), + block_roots_branch: vec![ + hex!("31a647639bd26edd8e3976b4475933d18d7d238210881f57570b7b4030133da0").into(), + hex!("0a3392c5febec2f099f93c5465c68f4f1630927d0326ad84c8d0b318364dcd82").into(), + hex!("986071ec073d43597d67a6595f7f6fc807ef1042c6821fda41ff80aa2717536f").into(), + hex!("732f545955de627e65c46201f053569dceab609948147690136bc64e060f38b4").into(), + hex!("2e7c74db495877af1e95da27113e89757ea475e8d672d319e655810ec64d4ba2").into(), + ], + }) +} + +pub fn make_finalized_header_update() -> Box { + Box::new(Update { + attested_header: BeaconHeader { + slot: 5809441, + proposer_index: 169069, + parent_root: hex!("a4d2fbf3ee62f32589738f386559a1e2358f4f54aff5f7eaea61144d3d9c00d1").into(), + state_root: hex!("4aad4183bc21fc96c90f8e043049f8c1d5ed205c6880c89cd99f2e080ef85138").into(), + body_root: hex!("406c96c6adad01df901df3625cbd622f1d541249b05c768ccc4db5643d973141").into(), + }, + sync_aggregate: SyncAggregate{ + sync_committee_bits: hex!("7fabbff6fcdefbebaefffff9e37dfffebff57f7bf3e3efbcfef1f7f987551dd176f3b3ff7bfa3fedff5fdf7f7afff5ff777bef5f9f7fe65f97fffe7dfdfffbdd"), + sync_committee_signature: hex!("84dc756c452ec9a3ba01cc98d03cf5471b871e9f3f77ddfe72ddf6d5d318ec3de9e5c1508e47ed362300cd45a144655a076d50073c24a67591b0454d2a4632bc01e97eab80f937a8288131a31ab76f400ba9c26a19df176c7e67b724f70407c3").into(), + }, + signature_slot: 5809445, + next_sync_committee_update: None, + finalized_header: BeaconHeader { + slot: 5809375, + proposer_index: 170923, + parent_root: hex!("87fed31787712fa6e802b9f296c1eb0b0ac5bc77f6945d4478c4d25bd7160d1a").into(), + state_root: hex!("246ac89e1854bada03be1da64081954e008238de219088609ddf45efc8000346").into(), + body_root: hex!("ffe0fdbdc2bf57bebdd084fce3820a13801095d236a2fb8f3a64c9d7cf94f8b9").into(), + }, + finality_branch: vec![ + hex!("27c5020000000000000000000000000000000000000000000000000000000000").into(), + hex!("8c04962a994aadff4d3042da73e167e666323757db5b0234a497c7ddba058ded").into(), + hex!("95901d6dae3edaab0f29f2c6155edbc4eb3980b6816339a464fb51b91fafdb7a").into(), + hex!("34e68ed57efdf18c5d2f455e77fa8b2a5be95bb827bdf7f7f6648103688d84b7").into(), + hex!("fc1d45f882aa66020a92c55da663ab9758581a020eb7336173fe84ef861bbdf9").into(), + hex!("7d1745c42ec44d4b2493a55dafdb770f6d38eb4a7ad68ae0264949cb7432e4a7").into(), + ], + block_roots_root: hex!("9e5aeee5467301f3a44d1ab664cebd198519423e73e2118ad046d9bae217f497").into(), + block_roots_branch: vec![ + hex!("ef671e41918c36e23a3673407050b420366022886dcce1b707622de97a695121").into(), + hex!("707cb79caeaf310c10ce1c177312e48b2331164c8327d2635203148c4d974f09").into(), + hex!("1fd802c27384482fdaacfa7406072f6f96ff5428f003af748068d1965cc36981").into(), + hex!("8a31cc13bddabda4f79d948e5e3d70806f638b61d89c87b40aa7131af43c18a8").into(), + hex!("70eb43218a3a6f619f1d0dc7f173fc9c3323fa7e3824ae6cd79af2f7d19634ad").into(), + ] + }) +} + +pub fn make_execution_header_update() -> Box { + Box::new(ExecutionHeaderUpdate { + header: BeaconHeader { + slot: 5809374, + proposer_index: 130336, + parent_root: hex!("2bb54c61560a80d1cfb0528e8ea207dfb9d55ab49238523e21609a9ee3b8a9b5").into(), + state_root: hex!("0116ea76f5c36373dfc8e039811eba86c8e8e16cfe9f0614376559b6585741a7").into(), + body_root: hex!("9a10b47e30bc11fc2ee1e21943a8382d727444f646f09664192236458b555ffe").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("eca009f3262f75b055e6c919e2c0a2c017f017e581a825a2618a2a76926a264e").into(), + hex!("a647371a5590630186dd47b9b8571f27e39a77b4aac1f763fabefe104bf94985").into(), + hex!("9a414690540e4c87ff5171b619b3ab6ff1115c21f247196989f5a0a9085b59a1").into(), + hex!("1bf7ae16fcde0833c6e97a83b72aef31a0b5ca055b87f86602b9b4aa193f557c").into(), + hex!("588d993d05b59bf3352f0f5ebb4cd3ec97ca3e41800da675996741e8fca374c0").into(), + hex!("fdfc6280944bb0a18c9cd0afa9f4a255719a4650233f19de478399276f198c92").into(), + hex!("1d96235b47c604f029b9ab7eb913b13b3c0c2df7f79e3301341b1ec38ea44e4c").into(), + hex!("3ee3af17ce8f5a4946d30b6bce7d6e7580b3981cd2af92246401e2326224f6d1").into(), + hex!("b53b5450e070adf02f4bb9d7c65dd131d07ae2218340eec95ac8aa5e5cdd82aa").into(), + hex!("4d7c09715a1f25574afa1dc3dd7bb44e4c1a723b9360c893b8510f675f85227a").into(), + hex!("38c159fc38dedc1e4f399a3f773ab4376fc40b126634b40d172d5daa6602cf94").into(), + hex!("9faac6fa44ed19fcf530f77b7090dd50dd17aeedabe763931ab7567276025a75").into(), + hex!("c6549c1b0f0027ac373164437e7010b955fbae1a0e78485408ec33ca906beb2d").into(), + ], + finalized_block_root: hex!("f6e721e4e65d9565091a557705285ec6db0a3a3072317317719ec8ad563859a3").into(), + }), + execution_header: ExecutionPayloadHeader { + parent_hash: hex!("6d51d7c94763813ffefa234097a51c6fd7009424d2991695f7bd6203157c86f9").into(), + fee_recipient: hex!("000095e79eac4d76aab57cb2c1f091d553b36ca0").into(), + state_root: hex!("fe9f753520a7b5c0263bbf4fdba728f69e9cf861ce1883aa13de5da30ff75d74").into(), + receipts_root: hex!("cf6ab47d8fc336155b18abfa2d965aae57d9d35a2fcf5cfc992b8dcd136958cb").into(), + logs_bloom: hex!("8427414fce71480d7e70cdbac68dd6f77608c05cf349c34c87ad3256e8dde9e3f9c52131945876c03b6e83ea5970536428283a180eb40efcc5fd834ce424f0dbf622dbba6cfda7945cc1f93a1b6e7ae448c598b4f45f7cfa933fe9808d835cb86e8a38261a031448e262f8e4f2dc4c3254c460e5faae4b518438c1330012154a1ba33ab7d85c8acaa9c47dc582fd003a771c9b09aa16c34d4f0c01fbb3f8c0a28e11d2eafb4e73b75a18e182eac7c021706832a9a785836d31f651efacf88a329334e5b3def3bf1871573dc3553f415f298a9457f7837a31302937a4178be1339cdbb83af329ae7e88d8ab6cba62f018be139896ecbc7ac11ef24b0b4ae343e9").into(), + prev_randao: hex!("5a76eff974d26bf74dc3003fac473ab4abc541be26bd61f124a1818a70ea0b3e").into(), + block_number: 9143323, + gas_limit: 30000000, + gas_used: 28165724, + timestamp: 1686220488, + extra_data: hex!("").into(), + base_fee_per_gas: U256::from(2267_u64), + block_hash: hex!("e4a67cdb1512f29ad9b331e7a37cf8e376222eafa58e72cee7771ad582cc0610").into(), + transactions_root: hex!("bd7eaeb676c14c37bbf0b6f3db2ce021a04a41dbf002f6c7df3bb61639ac7287").into(), + withdrawals_root: hex!("8647d3ecaaf62e1d087c5ab54a23f1d64f477b7ddd16fff458847181d89fc432").into(), + }, + execution_branch: vec![ + hex!("795608ac1294bcc663127b8428513ba4a5ffe952ff72f8322dca23628f13d716").into(), + hex!("336488033fe5f3ef4ccc12af07b9370b92e553e35ecb4a337a1b1c0e4afe1e0e").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("2a8f5c65655edeb2800f248f2e14044fc651061d0c00c8e8b627cb21ba421fb4").into(), + ], + }) +} diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/benchmarking/mod.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/benchmarking/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..cba22fc86c99f2c4e6c10d6cf0aacece175b55db --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/benchmarking/mod.rs @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use super::*; + +mod fixtures; +mod util; + +use crate::Pallet as EthereumBeaconClient; +use frame_benchmarking::v2::*; +use frame_system::RawOrigin; + +use fixtures::{ + make_checkpoint, make_execution_header_update, make_finalized_header_update, + make_sync_committee_update, +}; + +use primitives::{ + fast_aggregate_verify, prepare_aggregate_pubkey, prepare_aggregate_signature, + verify_merkle_branch, +}; +use util::*; + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn force_checkpoint() -> Result<(), BenchmarkError> { + let checkpoint_update = make_checkpoint(); + let block_root: H256 = checkpoint_update.header.hash_tree_root().unwrap(); + + #[extrinsic_call] + _(RawOrigin::Root, Box::new(*checkpoint_update)); + + assert!(>::get() == block_root); + assert!(>::get(block_root).is_some()); + + Ok(()) + } + + #[benchmark] + fn submit() -> Result<(), BenchmarkError> { + let caller: T::AccountId = whitelisted_caller(); + let checkpoint_update = make_checkpoint(); + let finalized_header_update = make_finalized_header_update(); + let block_root: H256 = finalized_header_update.finalized_header.hash_tree_root().unwrap(); + EthereumBeaconClient::::process_checkpoint_update(&checkpoint_update)?; + + #[extrinsic_call] + submit(RawOrigin::Signed(caller.clone()), Box::new(*finalized_header_update)); + + assert!(>::get() == block_root); + assert!(>::get(block_root).is_some()); + + Ok(()) + } + + #[benchmark] + fn submit_with_sync_committee() -> Result<(), BenchmarkError> { + let caller: T::AccountId = whitelisted_caller(); + let checkpoint_update = make_checkpoint(); + let sync_committee_update = make_sync_committee_update(); + EthereumBeaconClient::::process_checkpoint_update(&checkpoint_update)?; + + #[extrinsic_call] + submit(RawOrigin::Signed(caller.clone()), Box::new(*sync_committee_update)); + + assert!(>::exists()); + + Ok(()) + } + + #[benchmark] + fn submit_execution_header() -> Result<(), BenchmarkError> { + let caller: T::AccountId = whitelisted_caller(); + let checkpoint_update = make_checkpoint(); + let finalized_header_update = make_finalized_header_update(); + let execution_header_update = make_execution_header_update(); + let execution_header_hash = execution_header_update.execution_header.block_hash; + EthereumBeaconClient::::process_checkpoint_update(&checkpoint_update)?; + EthereumBeaconClient::::process_update(&finalized_header_update)?; + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), Box::new(*execution_header_update)); + + assert!(>::contains_key(execution_header_hash)); + + Ok(()) + } + + #[benchmark(extra)] + fn bls_fast_aggregate_verify_pre_aggregated() -> Result<(), BenchmarkError> { + EthereumBeaconClient::::process_checkpoint_update(&make_checkpoint())?; + let update = make_sync_committee_update(); + let participant_pubkeys = participant_pubkeys::(&update)?; + let signing_root = signing_root::(&update)?; + let agg_sig = + prepare_aggregate_signature(&update.sync_aggregate.sync_committee_signature).unwrap(); + let agg_pub_key = prepare_aggregate_pubkey(&participant_pubkeys).unwrap(); + + #[block] + { + agg_sig.fast_aggregate_verify_pre_aggregated(signing_root.as_bytes(), &agg_pub_key); + } + + Ok(()) + } + + #[benchmark(extra)] + fn bls_fast_aggregate_verify() -> Result<(), BenchmarkError> { + EthereumBeaconClient::::process_checkpoint_update(&make_checkpoint())?; + let update = make_sync_committee_update(); + let current_sync_committee = >::get(); + let absent_pubkeys = absent_pubkeys::(&update)?; + let signing_root = signing_root::(&update)?; + + #[block] + { + fast_aggregate_verify( + ¤t_sync_committee.aggregate_pubkey, + &absent_pubkeys, + signing_root, + &update.sync_aggregate.sync_committee_signature, + ) + .unwrap(); + } + + Ok(()) + } + + #[benchmark(extra)] + fn verify_merkle_proof() -> Result<(), BenchmarkError> { + EthereumBeaconClient::::process_checkpoint_update(&make_checkpoint())?; + let update = make_sync_committee_update(); + let block_root: H256 = update.finalized_header.hash_tree_root().unwrap(); + + #[block] + { + verify_merkle_branch( + block_root, + &update.finality_branch, + config::FINALIZED_ROOT_SUBTREE_INDEX, + config::FINALIZED_ROOT_DEPTH, + update.attested_header.state_root, + ); + } + + Ok(()) + } + + impl_benchmark_test_suite!( + EthereumBeaconClient, + crate::mock::mainnet::new_tester(), + crate::mock::mainnet::Test + ); +} diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/benchmarking/util.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/benchmarking/util.rs new file mode 100644 index 0000000000000000000000000000000000000000..7e5ded6e1f0d26cad99b5ee84f97eeb277605954 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/benchmarking/util.rs @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate::{ + decompress_sync_committee_bits, Config, CurrentSyncCommittee, Pallet as EthereumBeaconClient, + Update, ValidatorsRoot, Vec, +}; +use primitives::PublicKeyPrepared; +use sp_core::H256; + +pub fn participant_pubkeys( + update: &Update, +) -> Result, &'static str> { + let sync_committee_bits = + decompress_sync_committee_bits(update.sync_aggregate.sync_committee_bits); + let current_sync_committee = >::get(); + let pubkeys = EthereumBeaconClient::::find_pubkeys( + &sync_committee_bits, + (*current_sync_committee.pubkeys).as_ref(), + true, + ); + Ok(pubkeys) +} + +pub fn absent_pubkeys(update: &Update) -> Result, &'static str> { + let sync_committee_bits = + decompress_sync_committee_bits(update.sync_aggregate.sync_committee_bits); + let current_sync_committee = >::get(); + let pubkeys = EthereumBeaconClient::::find_pubkeys( + &sync_committee_bits, + (*current_sync_committee.pubkeys).as_ref(), + false, + ); + Ok(pubkeys) +} + +pub fn signing_root(update: &Update) -> Result { + let validators_root = >::get(); + let signing_root = EthereumBeaconClient::::signing_root( + &update.attested_header, + validators_root, + update.signature_slot, + )?; + Ok(signing_root) +} diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/config/mainnet.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/config/mainnet.rs new file mode 100644 index 0000000000000000000000000000000000000000..3d22ad82cec0a88aa4cd0c6ff9a1d6d7c6141165 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/config/mainnet.rs @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +pub const SLOTS_PER_EPOCH: usize = 32; +pub const SECONDS_PER_SLOT: usize = 12; +pub const EPOCHS_PER_SYNC_COMMITTEE_PERIOD: usize = 256; +pub const SYNC_COMMITTEE_SIZE: usize = 512; +pub const SYNC_COMMITTEE_BITS_SIZE: usize = SYNC_COMMITTEE_SIZE / 8; +pub const SLOTS_PER_HISTORICAL_ROOT: usize = 8192; +pub const IS_MINIMAL: bool = false; +pub const BLOCK_ROOT_AT_INDEX_DEPTH: usize = 13; diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/config/minimal.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/config/minimal.rs new file mode 100644 index 0000000000000000000000000000000000000000..affa86db976143c70773e017ee4fc14337aa322e --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/config/minimal.rs @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +pub const SLOTS_PER_EPOCH: usize = 8; +pub const SECONDS_PER_SLOT: usize = 6; +pub const EPOCHS_PER_SYNC_COMMITTEE_PERIOD: usize = 8; +pub const SYNC_COMMITTEE_SIZE: usize = 32; +pub const SYNC_COMMITTEE_BITS_SIZE: usize = SYNC_COMMITTEE_SIZE / 8; +pub const SLOTS_PER_HISTORICAL_ROOT: usize = 64; +pub const IS_MINIMAL: bool = true; +pub const BLOCK_ROOT_AT_INDEX_DEPTH: usize = 6; diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/config/mod.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/config/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..6b959ebfec9422f52c986614a4fb4c356b4273c9 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/config/mod.rs @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use primitives::merkle_proof::{generalized_index_length, subtree_index}; +use static_assertions::const_assert; + +pub mod mainnet; +pub mod minimal; + +#[cfg(not(feature = "beacon-spec-mainnet"))] +pub use minimal::*; + +#[cfg(feature = "beacon-spec-mainnet")] +pub use mainnet::*; + +// Generalized Indices + +// get_generalized_index(BeaconState, 'block_roots') +pub const BLOCK_ROOTS_INDEX: usize = 37; +pub const BLOCK_ROOTS_SUBTREE_INDEX: usize = subtree_index(BLOCK_ROOTS_INDEX); +pub const BLOCK_ROOTS_DEPTH: usize = generalized_index_length(BLOCK_ROOTS_INDEX); + +// get_generalized_index(BeaconState, 'finalized_checkpoint', 'root') +pub const FINALIZED_ROOT_INDEX: usize = 105; +pub const FINALIZED_ROOT_SUBTREE_INDEX: usize = subtree_index(FINALIZED_ROOT_INDEX); +pub const FINALIZED_ROOT_DEPTH: usize = generalized_index_length(FINALIZED_ROOT_INDEX); + +// get_generalized_index(BeaconState, 'current_sync_committee') +pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; +pub const CURRENT_SYNC_COMMITTEE_SUBTREE_INDEX: usize = subtree_index(CURRENT_SYNC_COMMITTEE_INDEX); +pub const CURRENT_SYNC_COMMITTEE_DEPTH: usize = + generalized_index_length(CURRENT_SYNC_COMMITTEE_INDEX); + +// get_generalized_index(BeaconState, 'next_sync_committee') +pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; +pub const NEXT_SYNC_COMMITTEE_SUBTREE_INDEX: usize = subtree_index(NEXT_SYNC_COMMITTEE_INDEX); +pub const NEXT_SYNC_COMMITTEE_DEPTH: usize = generalized_index_length(NEXT_SYNC_COMMITTEE_INDEX); + +// get_generalized_index(BeaconBlockBody, 'execution_payload') +pub const EXECUTION_HEADER_INDEX: usize = 25; +pub const EXECUTION_HEADER_SUBTREE_INDEX: usize = subtree_index(EXECUTION_HEADER_INDEX); +pub const EXECUTION_HEADER_DEPTH: usize = generalized_index_length(EXECUTION_HEADER_INDEX); + +pub const MAX_EXTRA_DATA_BYTES: usize = 32; +pub const MAX_LOGS_BLOOM_SIZE: usize = 256; +pub const MAX_FEE_RECIPIENT_SIZE: usize = 20; + +pub const MAX_BRANCH_PROOF_SIZE: usize = 20; + +/// DomainType('0x07000000') +/// +pub const DOMAIN_SYNC_COMMITTEE: [u8; 4] = [7, 0, 0, 0]; + +pub const PUBKEY_SIZE: usize = 48; +pub const SIGNATURE_SIZE: usize = 96; + +const_assert!(SYNC_COMMITTEE_BITS_SIZE == SYNC_COMMITTEE_SIZE / 8); diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/functions.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/functions.rs new file mode 100644 index 0000000000000000000000000000000000000000..751e63c7f86afb6ae4161e9ec4b9ebe750d67436 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/functions.rs @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate::config::{ + EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SLOTS_PER_EPOCH, SYNC_COMMITTEE_BITS_SIZE, + SYNC_COMMITTEE_SIZE, +}; + +/// Decompress packed bitvector into byte vector according to SSZ deserialization rules. Each byte +/// in the decompressed vector is either 0 or 1. +pub fn decompress_sync_committee_bits( + input: [u8; SYNC_COMMITTEE_BITS_SIZE], +) -> [u8; SYNC_COMMITTEE_SIZE] { + primitives::decompress_sync_committee_bits::( + input, + ) +} + +/// Compute the sync committee period in which a slot is contained. +pub fn compute_period(slot: u64) -> u64 { + slot / SLOTS_PER_EPOCH as u64 / EPOCHS_PER_SYNC_COMMITTEE_PERIOD as u64 +} + +/// Compute epoch in which a slot is contained. +pub fn compute_epoch(slot: u64, slots_per_epoch: u64) -> u64 { + slot / slots_per_epoch +} + +/// Sums the bit vector of sync committee participation. +pub fn sync_committee_sum(sync_committee_bits: &[u8]) -> u32 { + sync_committee_bits.iter().fold(0, |acc: u32, x| acc + *x as u32) +} diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/impls.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..7e72b12631cc482a712129befa7806352a82f449 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/impls.rs @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use super::*; + +use snowbridge_core::inbound::{ + VerificationError::{self, *}, + *, +}; +use snowbridge_ethereum::Receipt; + +impl Verifier for Pallet { + /// Verify a message by verifying the existence of the corresponding + /// Ethereum log in a block. Returns the log if successful. The execution header containing + /// the log should be in the beacon client storage, meaning it has been verified and is an + /// ancestor of a finalized beacon block. + fn verify(event_log: &Log, proof: &Proof) -> Result<(), VerificationError> { + log::info!( + target: "ethereum-beacon-client", + "💫 Verifying message with block hash {}", + proof.block_hash, + ); + + let header = >::get(proof.block_hash).ok_or(HeaderNotFound)?; + + let receipt = match Self::verify_receipt_inclusion(header.receipts_root, proof) { + Ok(receipt) => receipt, + Err(err) => { + log::error!( + target: "ethereum-beacon-client", + "💫 Verification of receipt inclusion failed for block {}: {:?}", + proof.block_hash, + err + ); + return Err(err) + }, + }; + + log::trace!( + target: "ethereum-beacon-client", + "💫 Verified receipt inclusion for transaction at index {} in block {}", + proof.tx_index, proof.block_hash, + ); + + event_log.validate().map_err(|_| InvalidLog)?; + + // Convert snowbridge_core::inbound::Log to snowbridge_ethereum::Log. + let event_log = snowbridge_ethereum::Log { + address: event_log.address, + topics: event_log.topics.clone(), + data: event_log.data.clone(), + }; + + if !receipt.contains_log(&event_log) { + log::error!( + target: "ethereum-beacon-client", + "💫 Event log not found in receipt for transaction at index {} in block {}", + proof.tx_index, proof.block_hash, + ); + return Err(LogNotFound) + } + + log::info!( + target: "ethereum-beacon-client", + "💫 Receipt verification successful for {}", + proof.block_hash, + ); + + Ok(()) + } +} + +impl Pallet { + /// Verifies that the receipt encoded in `proof.data` is included in the block given by + /// `proof.block_hash`. + pub fn verify_receipt_inclusion( + receipts_root: H256, + proof: &Proof, + ) -> Result { + let result = verify_receipt_proof(receipts_root, &proof.data.1).ok_or(InvalidProof)?; + + match result { + Ok(receipt) => Ok(receipt), + Err(err) => { + log::trace!( + target: "ethereum-beacon-client", + "💫 Failed to decode transaction receipt: {}", + err + ); + Err(InvalidProof) + }, + } + } +} diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/lib.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..fdda200251ac791d67cbe778b4e8d4363a615357 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/lib.rs @@ -0,0 +1,841 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Ethereum Beacon Client +//! +//! A light client that verifies consensus updates signed by the sync committee of the beacon chain. +//! +//! # Extrinsics +//! +//! ## Governance +//! +//! * [`Call::force_checkpoint`]: Set the initial trusted consensus checkpoint. +//! * [`Call::set_operating_mode`]: Set the operating mode of the pallet. Can be used to disable +//! processing of conensus updates. +//! +//! ## Consensus Updates +//! +//! * [`Call::submit`]: Submit a finalized beacon header with an optional sync committee update +//! * [`Call::submit_execution_header`]: Submit an execution header together with an ancestry proof +//! that can be verified against an already imported finalized beacon header. +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod config; +pub mod functions; +pub mod impls; +pub mod types; +pub mod weights; + +#[cfg(any(test, feature = "fuzzing"))] +pub mod mock; + +#[cfg(all(test, not(feature = "beacon-spec-mainnet")))] +mod tests; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +use frame_support::{ + dispatch::DispatchResult, pallet_prelude::OptionQuery, traits::Get, transactional, +}; +use frame_system::ensure_signed; +use primitives::{ + fast_aggregate_verify, verify_merkle_branch, verify_receipt_proof, BeaconHeader, BlsError, + CompactBeaconState, CompactExecutionHeader, ExecutionHeaderState, ForkData, ForkVersion, + ForkVersions, PublicKeyPrepared, SigningData, +}; +use snowbridge_core::{BasicOperatingMode, RingBufferMap}; +use sp_core::H256; +use sp_std::prelude::*; +pub use weights::WeightInfo; + +use functions::{ + compute_epoch, compute_period, decompress_sync_committee_bits, sync_committee_sum, +}; +pub use types::ExecutionHeaderBuffer; +use types::{ + CheckpointUpdate, ExecutionHeaderUpdate, FinalizedBeaconStateBuffer, SyncCommitteePrepared, + Update, +}; + +pub use pallet::*; + +pub use config::SLOTS_PER_HISTORICAL_ROOT; + +pub const LOG_TARGET: &str = "ethereum-beacon-client"; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] + #[codec(mel_bound(T: Config))] + #[scale_info(skip_type_params(T))] + pub struct MaxFinalizedHeadersToKeep(PhantomData); + impl Get for MaxFinalizedHeadersToKeep { + fn get() -> u32 { + // Consider max latency allowed between LatestFinalizedState and LatestExecutionState is + // the total slots in one sync_committee_period so 1 should be fine we keep 2 periods + // here for redundancy. + const MAX_REDUNDANCY: u32 = 2; + config::EPOCHS_PER_SYNC_COMMITTEE_PERIOD as u32 * MAX_REDUNDANCY + } + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + #[pallet::constant] + type ForkVersions: Get; + /// Maximum number of execution headers to keep + #[pallet::constant] + type MaxExecutionHeadersToKeep: Get; + type WeightInfo: WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + BeaconHeaderImported { + block_hash: H256, + slot: u64, + }, + ExecutionHeaderImported { + block_hash: H256, + block_number: u64, + }, + SyncCommitteeUpdated { + period: u64, + }, + /// Set OperatingMode + OperatingModeChanged { + mode: BasicOperatingMode, + }, + } + + #[pallet::error] + pub enum Error { + SkippedSyncCommitteePeriod, + /// Attested header is older than latest finalized header. + IrrelevantUpdate, + NotBootstrapped, + SyncCommitteeParticipantsNotSupermajority, + InvalidHeaderMerkleProof, + InvalidSyncCommitteeMerkleProof, + InvalidExecutionHeaderProof, + InvalidAncestryMerkleProof, + InvalidBlockRootsRootMerkleProof, + HeaderNotFinalized, + BlockBodyHashTreeRootFailed, + HeaderHashTreeRootFailed, + SyncCommitteeHashTreeRootFailed, + SigningRootHashTreeRootFailed, + ForkDataHashTreeRootFailed, + ExpectedFinalizedHeaderNotStored, + BLSPreparePublicKeysFailed, + BLSVerificationFailed(BlsError), + InvalidUpdateSlot, + /// The given update is not in the expected period, or the given next sync committee does + /// not match the next sync committee in storage. + InvalidSyncCommitteeUpdate, + ExecutionHeaderTooFarBehind, + ExecutionHeaderSkippedBlock, + Halted, + } + + /// Latest imported checkpoint root + #[pallet::storage] + #[pallet::getter(fn initial_checkpoint_root)] + pub(super) type InitialCheckpointRoot = StorageValue<_, H256, ValueQuery>; + + /// Latest imported finalized block root + #[pallet::storage] + #[pallet::getter(fn latest_finalized_block_root)] + pub(super) type LatestFinalizedBlockRoot = StorageValue<_, H256, ValueQuery>; + + /// Beacon state by finalized block root + #[pallet::storage] + #[pallet::getter(fn finalized_beacon_state)] + pub(super) type FinalizedBeaconState = + StorageMap<_, Identity, H256, CompactBeaconState, OptionQuery>; + + /// Finalized Headers: Current position in ring buffer + #[pallet::storage] + pub(crate) type FinalizedBeaconStateIndex = StorageValue<_, u32, ValueQuery>; + + /// Finalized Headers: Mapping of ring buffer index to a pruning candidate + #[pallet::storage] + pub(crate) type FinalizedBeaconStateMapping = + StorageMap<_, Identity, u32, H256, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn validators_root)] + pub(super) type ValidatorsRoot = StorageValue<_, H256, ValueQuery>; + + /// Sync committee for current period + #[pallet::storage] + pub(super) type CurrentSyncCommittee = + StorageValue<_, SyncCommitteePrepared, ValueQuery>; + + /// Sync committee for next period + #[pallet::storage] + pub(super) type NextSyncCommittee = + StorageValue<_, SyncCommitteePrepared, ValueQuery>; + + /// Latest imported execution header + #[pallet::storage] + #[pallet::getter(fn latest_execution_state)] + pub(super) type LatestExecutionState = + StorageValue<_, ExecutionHeaderState, ValueQuery>; + + /// Execution Headers + #[pallet::storage] + pub type ExecutionHeaders = + StorageMap<_, Identity, H256, CompactExecutionHeader, OptionQuery>; + + /// Execution Headers: Current position in ring buffer + #[pallet::storage] + pub type ExecutionHeaderIndex = StorageValue<_, u32, ValueQuery>; + + /// Execution Headers: Mapping of ring buffer index to a pruning candidate + #[pallet::storage] + pub type ExecutionHeaderMapping = StorageMap<_, Identity, u32, H256, ValueQuery>; + + /// The current operating mode of the pallet. + #[pallet::storage] + #[pallet::getter(fn operating_mode)] + pub type OperatingMode = StorageValue<_, BasicOperatingMode, ValueQuery>; + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::force_checkpoint())] + #[transactional] + /// Used for pallet initialization and light client resetting. Needs to be called by + /// the root origin. + pub fn force_checkpoint( + origin: OriginFor, + update: Box, + ) -> DispatchResult { + ensure_root(origin)?; + Self::process_checkpoint_update(&update)?; + Ok(()) + } + + #[pallet::call_index(1)] + #[pallet::weight({ + match update.next_sync_committee_update { + None => T::WeightInfo::submit(), + Some(_) => T::WeightInfo::submit_with_sync_committee(), + } + })] + #[transactional] + /// Submits a new finalized beacon header update. The update may contain the next + /// sync committee. + pub fn submit(origin: OriginFor, update: Box) -> DispatchResult { + ensure_signed(origin)?; + ensure!(!Self::operating_mode().is_halted(), Error::::Halted); + Self::process_update(&update)?; + Ok(()) + } + + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::submit_execution_header())] + #[transactional] + /// Submits a new execution header update. The relevant related beacon header + /// is also included to prove the execution header, as well as ancestry proof data. + pub fn submit_execution_header( + origin: OriginFor, + update: Box, + ) -> DispatchResult { + ensure_signed(origin)?; + ensure!(!Self::operating_mode().is_halted(), Error::::Halted); + Self::process_execution_header_update(&update)?; + Ok(()) + } + + /// Halt or resume all pallet operations. May only be called by root. + #[pallet::call_index(3)] + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_operating_mode( + origin: OriginFor, + mode: BasicOperatingMode, + ) -> DispatchResult { + ensure_root(origin)?; + OperatingMode::::set(mode); + Self::deposit_event(Event::OperatingModeChanged { mode }); + Ok(()) + } + } + + impl Pallet { + /// Forces a finalized beacon header checkpoint update. The current sync committee, + /// with a header attesting to the current sync committee, should be provided. + /// An `block_roots` proof should also be provided. This is used for ancestry proofs + /// for execution header updates. + pub(crate) fn process_checkpoint_update(update: &CheckpointUpdate) -> DispatchResult { + let sync_committee_root = update + .current_sync_committee + .hash_tree_root() + .map_err(|_| Error::::SyncCommitteeHashTreeRootFailed)?; + + // Verifies the sync committee in the Beacon state. + ensure!( + verify_merkle_branch( + sync_committee_root, + &update.current_sync_committee_branch, + config::CURRENT_SYNC_COMMITTEE_SUBTREE_INDEX, + config::CURRENT_SYNC_COMMITTEE_DEPTH, + update.header.state_root + ), + Error::::InvalidSyncCommitteeMerkleProof + ); + + let header_root: H256 = update + .header + .hash_tree_root() + .map_err(|_| Error::::HeaderHashTreeRootFailed)?; + + // This is used for ancestry proofs in ExecutionHeader updates. This verifies the + // BeaconState: the beacon state root is the tree root; the `block_roots` hash is the + // tree leaf. + ensure!( + verify_merkle_branch( + update.block_roots_root, + &update.block_roots_branch, + config::BLOCK_ROOTS_SUBTREE_INDEX, + config::BLOCK_ROOTS_DEPTH, + update.header.state_root + ), + Error::::InvalidBlockRootsRootMerkleProof + ); + + let sync_committee_prepared: SyncCommitteePrepared = (&update.current_sync_committee) + .try_into() + .map_err(|_| >::BLSPreparePublicKeysFailed)?; + >::set(sync_committee_prepared); + >::kill(); + InitialCheckpointRoot::::set(header_root); + >::kill(); + + Self::store_validators_root(update.validators_root); + Self::store_finalized_header(header_root, update.header, update.block_roots_root)?; + + Ok(()) + } + + pub(crate) fn process_update(update: &Update) -> DispatchResult { + Self::cross_check_execution_state()?; + Self::verify_update(update)?; + Self::apply_update(update)?; + Ok(()) + } + + /// Cross check to make sure that execution header import does not fall too far behind + /// finalised beacon header import. If that happens just return an error and pause + /// processing until execution header processing has caught up. + pub(crate) fn cross_check_execution_state() -> DispatchResult { + let latest_finalized_state = + FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) + .ok_or(Error::::NotBootstrapped)?; + let latest_execution_state = Self::latest_execution_state(); + // The execution header import should be at least within the slot range of a sync + // committee period. + let max_latency = config::EPOCHS_PER_SYNC_COMMITTEE_PERIOD * config::SLOTS_PER_EPOCH; + ensure!( + latest_execution_state.beacon_slot == 0 || + latest_finalized_state.slot < + latest_execution_state.beacon_slot + max_latency as u64, + Error::::ExecutionHeaderTooFarBehind + ); + Ok(()) + } + + /// References and strictly follows + /// Verifies that provided next sync committee is valid through a series of checks + /// (including checking that a sync committee period isn't skipped and that the header is + /// signed by the current sync committee. + fn verify_update(update: &Update) -> DispatchResult { + // Verify sync committee has sufficient participants. + let participation = + decompress_sync_committee_bits(update.sync_aggregate.sync_committee_bits); + Self::sync_committee_participation_is_supermajority(&participation)?; + + // Verify update does not skip a sync committee period. + ensure!( + update.signature_slot > update.attested_header.slot && + update.attested_header.slot >= update.finalized_header.slot, + Error::::InvalidUpdateSlot + ); + // Retrieve latest finalized state. + let latest_finalized_state = + FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) + .ok_or(Error::::NotBootstrapped)?; + let store_period = compute_period(latest_finalized_state.slot); + let signature_period = compute_period(update.signature_slot); + if >::exists() { + ensure!( + (store_period..=store_period + 1).contains(&signature_period), + Error::::SkippedSyncCommitteePeriod + ) + } else { + ensure!(signature_period == store_period, Error::::SkippedSyncCommitteePeriod) + } + + // Verify update is relevant. + let update_attested_period = compute_period(update.attested_header.slot); + let update_has_next_sync_committee = !>::exists() && + (update.next_sync_committee_update.is_some() && + update_attested_period == store_period); + ensure!( + update.attested_header.slot > latest_finalized_state.slot || + update_has_next_sync_committee, + Error::::IrrelevantUpdate + ); + + // Verify that the `finality_branch`, if present, confirms `finalized_header` to match + // the finalized checkpoint root saved in the state of `attested_header`. + let finalized_block_root: H256 = update + .finalized_header + .hash_tree_root() + .map_err(|_| Error::::HeaderHashTreeRootFailed)?; + ensure!( + verify_merkle_branch( + finalized_block_root, + &update.finality_branch, + config::FINALIZED_ROOT_SUBTREE_INDEX, + config::FINALIZED_ROOT_DEPTH, + update.attested_header.state_root + ), + Error::::InvalidHeaderMerkleProof + ); + + // Though following check does not belong to ALC spec we verify block_roots_root to + // match the finalized checkpoint root saved in the state of `finalized_header` so to + // cache it for later use in `verify_ancestry_proof`. + ensure!( + verify_merkle_branch( + update.block_roots_root, + &update.block_roots_branch, + config::BLOCK_ROOTS_SUBTREE_INDEX, + config::BLOCK_ROOTS_DEPTH, + update.finalized_header.state_root + ), + Error::::InvalidBlockRootsRootMerkleProof + ); + + // Verify that the `next_sync_committee`, if present, actually is the next sync + // committee saved in the state of the `attested_header`. + if let Some(next_sync_committee_update) = &update.next_sync_committee_update { + let sync_committee_root = next_sync_committee_update + .next_sync_committee + .hash_tree_root() + .map_err(|_| Error::::SyncCommitteeHashTreeRootFailed)?; + if update_attested_period == store_period && >::exists() { + let next_committee_root = >::get().root; + ensure!( + sync_committee_root == next_committee_root, + Error::::InvalidSyncCommitteeUpdate + ); + } + ensure!( + verify_merkle_branch( + sync_committee_root, + &next_sync_committee_update.next_sync_committee_branch, + config::NEXT_SYNC_COMMITTEE_SUBTREE_INDEX, + config::NEXT_SYNC_COMMITTEE_DEPTH, + update.attested_header.state_root + ), + Error::::InvalidSyncCommitteeMerkleProof + ); + } + + // Verify sync committee aggregate signature. + let sync_committee = if signature_period == store_period { + >::get() + } else { + >::get() + }; + let absent_pubkeys = + Self::find_pubkeys(&participation, (*sync_committee.pubkeys).as_ref(), false); + let signing_root = Self::signing_root( + &update.attested_header, + Self::validators_root(), + update.signature_slot, + )?; + // Improvement here per + // suggested start from the full set aggregate_pubkey then subtracting the absolute + // minority that did not participate. + fast_aggregate_verify( + &sync_committee.aggregate_pubkey, + &absent_pubkeys, + signing_root, + &update.sync_aggregate.sync_committee_signature, + ) + .map_err(|e| Error::::BLSVerificationFailed(e))?; + + Ok(()) + } + + /// Reference and strictly follows DispatchResult { + let latest_finalized_state = + FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) + .ok_or(Error::::NotBootstrapped)?; + if let Some(next_sync_committee_update) = &update.next_sync_committee_update { + let store_period = compute_period(latest_finalized_state.slot); + let update_finalized_period = compute_period(update.finalized_header.slot); + let sync_committee_prepared: SyncCommitteePrepared = (&next_sync_committee_update + .next_sync_committee) + .try_into() + .map_err(|_| >::BLSPreparePublicKeysFailed)?; + + if !>::exists() { + ensure!( + update_finalized_period == store_period, + >::InvalidSyncCommitteeUpdate + ); + >::set(sync_committee_prepared); + } else if update_finalized_period == store_period + 1 { + >::set(>::get()); + >::set(sync_committee_prepared); + } + log::info!( + target: LOG_TARGET, + "💫 SyncCommitteeUpdated at period {}.", + update_finalized_period + ); + Self::deposit_event(Event::SyncCommitteeUpdated { + period: update_finalized_period, + }); + }; + + if update.finalized_header.slot > latest_finalized_state.slot { + let finalized_block_root: H256 = update + .finalized_header + .hash_tree_root() + .map_err(|_| Error::::HeaderHashTreeRootFailed)?; + Self::store_finalized_header( + finalized_block_root, + update.finalized_header, + update.block_roots_root, + )?; + } + + Ok(()) + } + + /// Validates an execution header for import. The beacon header containing the execution + /// header is sent, plus the execution header, along with a proof that the execution header + /// is rooted in the beacon header body. + pub(crate) fn process_execution_header_update( + update: &ExecutionHeaderUpdate, + ) -> DispatchResult { + let latest_finalized_state = + FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) + .ok_or(Error::::NotBootstrapped)?; + // Checks that the header is an ancestor of a finalized header, using slot number. + ensure!( + update.header.slot <= latest_finalized_state.slot, + Error::::HeaderNotFinalized + ); + + // Checks that we don't skip execution headers, they need to be imported sequentially. + let latest_execution_state: ExecutionHeaderState = Self::latest_execution_state(); + ensure!( + latest_execution_state.block_number == 0 || + update.execution_header.block_number == + latest_execution_state.block_number + 1, + Error::::ExecutionHeaderSkippedBlock + ); + + // Gets the hash tree root of the execution header, in preparation for the execution + // header proof (used to check that the execution header is rooted in the beacon + // header body. + let execution_header_root: H256 = update + .execution_header + .hash_tree_root() + .map_err(|_| Error::::BlockBodyHashTreeRootFailed)?; + + ensure!( + verify_merkle_branch( + execution_header_root, + &update.execution_branch, + config::EXECUTION_HEADER_SUBTREE_INDEX, + config::EXECUTION_HEADER_DEPTH, + update.header.body_root + ), + Error::::InvalidExecutionHeaderProof + ); + + let block_root: H256 = update + .header + .hash_tree_root() + .map_err(|_| Error::::HeaderHashTreeRootFailed)?; + + match &update.ancestry_proof { + Some(proof) => { + Self::verify_ancestry_proof( + block_root, + update.header.slot, + &proof.header_branch, + proof.finalized_block_root, + )?; + }, + None => { + // If the ancestry proof is not provided, we expect this header to be a + // finalized header. We need to check that the header hash matches the finalized + // header root at the expected slot. + let state = >::get(block_root) + .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; + if update.header.slot != state.slot { + return Err(Error::::ExpectedFinalizedHeaderNotStored.into()) + } + }, + } + + Self::store_execution_header( + update.execution_header.block_hash, + update.execution_header.clone().into(), + update.header.slot, + block_root, + ); + + Ok(()) + } + + /// Verify that `block_root` is an ancestor of `finalized_block_root` Used to prove that + /// an execution header is an ancestor of a finalized header (i.e. the blocks are + /// on the same chain). + fn verify_ancestry_proof( + block_root: H256, + block_slot: u64, + block_root_proof: &[H256], + finalized_block_root: H256, + ) -> DispatchResult { + let state = >::get(finalized_block_root) + .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; + + ensure!(block_slot < state.slot, Error::::HeaderNotFinalized); + + let index_in_array = block_slot % (SLOTS_PER_HISTORICAL_ROOT as u64); + let leaf_index = (SLOTS_PER_HISTORICAL_ROOT as u64) + index_in_array; + + ensure!( + verify_merkle_branch( + block_root, + block_root_proof, + leaf_index as usize, + config::BLOCK_ROOT_AT_INDEX_DEPTH, + state.block_roots_root + ), + Error::::InvalidAncestryMerkleProof + ); + + Ok(()) + } + + /// Computes the signing root for a given beacon header and domain. The hash tree root + /// of the beacon header is computed, and then the combination of the beacon header hash + /// and the domain makes up the signing root. + pub(super) fn compute_signing_root( + beacon_header: &BeaconHeader, + domain: H256, + ) -> Result { + let beacon_header_root = beacon_header + .hash_tree_root() + .map_err(|_| Error::::HeaderHashTreeRootFailed)?; + + let hash_root = SigningData { object_root: beacon_header_root, domain } + .hash_tree_root() + .map_err(|_| Error::::SigningRootHashTreeRootFailed)?; + + Ok(hash_root) + } + + /// Stores a compacted (slot and block roots root (hash of the `block_roots` beacon state + /// field, used for ancestry proof)) beacon state in a ring buffer map, with the header root + /// as map key. + fn store_finalized_header( + header_root: H256, + header: BeaconHeader, + block_roots_root: H256, + ) -> DispatchResult { + let slot = header.slot; + + >::insert( + header_root, + CompactBeaconState { slot: header.slot, block_roots_root }, + ); + >::set(header_root); + + log::info!( + target: LOG_TARGET, + "💫 Updated latest finalized block root {} at slot {}.", + header_root, + slot + ); + + Self::deposit_event(Event::BeaconHeaderImported { block_hash: header_root, slot }); + + Ok(()) + } + + /// Stores the provided execution header in pallet storage. The header is stored + /// in a ring buffer map, with the block hash as map key. The last imported execution + /// header is also kept in storage, for the relayer to check import progress. + pub(crate) fn store_execution_header( + block_hash: H256, + header: CompactExecutionHeader, + beacon_slot: u64, + beacon_block_root: H256, + ) { + let block_number = header.block_number; + + >::insert(block_hash, header); + + log::trace!( + target: LOG_TARGET, + "💫 Updated latest execution block at {} to number {}.", + block_hash, + block_number + ); + + LatestExecutionState::::mutate(|s| { + s.beacon_block_root = beacon_block_root; + s.beacon_slot = beacon_slot; + s.block_hash = block_hash; + s.block_number = block_number; + }); + + Self::deposit_event(Event::ExecutionHeaderImported { block_hash, block_number }); + } + + /// Stores the validators root in storage. Validators root is the hash tree root of all the + /// validators at genesis and is used to used to identify the chain that we are on + /// (used in conjunction with the fork version). + /// + fn store_validators_root(validators_root: H256) { + >::set(validators_root); + } + + /// Returns the domain for the domain_type and fork_version. The domain is used to + /// distinguish between the different players in the chain (see DomainTypes + /// ) and to ensure we are + /// addressing the correct chain. + /// + pub(super) fn compute_domain( + domain_type: Vec, + fork_version: ForkVersion, + genesis_validators_root: H256, + ) -> Result { + let fork_data_root = + Self::compute_fork_data_root(fork_version, genesis_validators_root)?; + + let mut domain = [0u8; 32]; + domain[0..4].copy_from_slice(&(domain_type)); + domain[4..32].copy_from_slice(&(fork_data_root.0[..28])); + + Ok(domain.into()) + } + + /// Computes the fork data root. The fork data root is a merkleization of the current + /// fork version and the genesis validators root. + fn compute_fork_data_root( + current_version: ForkVersion, + genesis_validators_root: H256, + ) -> Result { + let hash_root = ForkData { + current_version, + genesis_validators_root: genesis_validators_root.into(), + } + .hash_tree_root() + .map_err(|_| Error::::ForkDataHashTreeRootFailed)?; + + Ok(hash_root) + } + + /// Checks that the sync committee bits (the votes of the sync committee members, + /// represented by bits 0 and 1) is more than a supermajority (2/3 of the votes are + /// positive). + pub(super) fn sync_committee_participation_is_supermajority( + sync_committee_bits: &[u8], + ) -> DispatchResult { + let sync_committee_sum = sync_committee_sum(sync_committee_bits); + ensure!( + ((sync_committee_sum * 3) as usize) >= sync_committee_bits.len() * 2, + Error::::SyncCommitteeParticipantsNotSupermajority + ); + + Ok(()) + } + + /// Returns the fork version based on the current epoch. The hard fork versions + /// are defined in pallet config. + pub(super) fn compute_fork_version(epoch: u64) -> ForkVersion { + Self::select_fork_version(&T::ForkVersions::get(), epoch) + } + + /// Returns the fork version based on the current epoch. + pub(super) fn select_fork_version(fork_versions: &ForkVersions, epoch: u64) -> ForkVersion { + if epoch >= fork_versions.capella.epoch { + return fork_versions.capella.version + } + if epoch >= fork_versions.bellatrix.epoch { + return fork_versions.bellatrix.version + } + if epoch >= fork_versions.altair.epoch { + return fork_versions.altair.version + } + + fork_versions.genesis.version + } + + /// Returns a vector of public keys that participated in the sync committee block signage. + /// Sync committee bits is an array of 0s and 1s, 0 meaning the corresponding sync committee + /// member did not participate in the vote, 1 meaning they participated. + /// This method can find the absent or participating members, based on the participant + /// parameter. participant = false will return absent participants, participant = true will + /// return participating members. + pub fn find_pubkeys( + sync_committee_bits: &[u8], + sync_committee_pubkeys: &[PublicKeyPrepared], + participant: bool, + ) -> Vec { + let mut pubkeys: Vec = Vec::new(); + for (bit, pubkey) in sync_committee_bits.iter().zip(sync_committee_pubkeys.iter()) { + if *bit == u8::from(participant) { + pubkeys.push(*pubkey); + } + } + pubkeys + } + + /// Calculates signing root for BeaconHeader. The signing root is used for the message + /// value in BLS signature verification. + pub fn signing_root( + header: &BeaconHeader, + validators_root: H256, + signature_slot: u64, + ) -> Result { + let fork_version = Self::compute_fork_version(compute_epoch( + signature_slot, + config::SLOTS_PER_EPOCH as u64, + )); + let domain_type = config::DOMAIN_SYNC_COMMITTEE.to_vec(); + // Domains are used for for seeds, for signatures, and for selecting aggregators. + let domain = Self::compute_domain(domain_type, fork_version, validators_root)?; + // Hash tree root of SigningData - object root + domain + let signing_root = Self::compute_signing_root(header, domain)?; + Ok(signing_root) + } + } +} diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..4d1d14a10158e777603baf250246c84c6831c9fc --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate as ethereum_beacon_client; +use frame_support::parameter_types; +use pallet_timestamp; +use primitives::{Fork, ForkVersions}; +use sp_core::H256; +use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; + +#[cfg(not(feature = "beacon-spec-mainnet"))] +pub mod minimal { + use super::*; + + use crate::config; + use hex_literal::hex; + use primitives::CompactExecutionHeader; + use snowbridge_core::inbound::{Log, Proof}; + use sp_runtime::BuildStorage; + use std::{fs::File, path::PathBuf}; + + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test { + System: frame_system::{Pallet, Call, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, + } + ); + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; + } + + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type OnSetCode = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type MaxConsumers = frame_support::traits::ConstU32<16>; + type Nonce = u64; + type Block = Block; + } + + impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = (); + type WeightInfo = (); + } + + parameter_types! { + pub const ExecutionHeadersPruneThreshold: u32 = 10; + pub const ChainForkVersions: ForkVersions = ForkVersions{ + genesis: Fork { + version: [0, 0, 0, 1], // 0x00000001 + epoch: 0, + }, + altair: Fork { + version: [1, 0, 0, 1], // 0x01000001 + epoch: 0, + }, + bellatrix: Fork { + version: [2, 0, 0, 1], // 0x02000001 + epoch: 0, + }, + capella: Fork { + version: [3, 0, 0, 1], // 0x03000001 + epoch: 0, + }, + }; + } + + impl ethereum_beacon_client::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ForkVersions = ChainForkVersions; + type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; + type WeightInfo = (); + } + + // Build genesis storage according to the mock runtime. + pub fn new_tester() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); + ext + } + + fn load_fixture(basename: &str) -> Result + where + T: for<'de> serde::Deserialize<'de>, + { + let filepath: PathBuf = + [env!("CARGO_MANIFEST_DIR"), "tests", "fixtures", basename].iter().collect(); + serde_json::from_reader(File::open(filepath).unwrap()) + } + + pub fn load_execution_header_update_fixture() -> primitives::ExecutionHeaderUpdate { + load_fixture("execution-header-update.minimal.json").unwrap() + } + + pub fn load_checkpoint_update_fixture( + ) -> primitives::CheckpointUpdate<{ config::SYNC_COMMITTEE_SIZE }> { + load_fixture("initial-checkpoint.minimal.json").unwrap() + } + + pub fn load_sync_committee_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("sync-committee-update.minimal.json").unwrap() + } + + pub fn load_finalized_header_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("finalized-header-update.minimal.json").unwrap() + } + + pub fn load_next_sync_committee_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("next-sync-committee-update.minimal.json").unwrap() + } + + pub fn load_next_finalized_header_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("next-finalized-header-update.minimal.json").unwrap() + } + + pub fn get_message_verification_payload() -> (Log, Proof) { + ( + Log { + address: hex!("ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0").into(), + topics: vec![ + hex!("1b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ad").into(), + hex!("00000000000000000000000000000000000000000000000000000000000003e8").into(), + hex!("0000000000000000000000000000000000000000000000000000000000000001").into(), + ], + data: hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000").into(), + }, + Proof { + block_hash: hex!("05aaa60b0f27cce9e71909508527264b77ee14da7b5bf915fcc4e32715333213").into(), + tx_index: 0, + data: (vec![ + hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb").to_vec(), + hex!("d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c185510").to_vec(), + hex!("b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646").to_vec(), + ], vec![ + hex!("f90131a0b601337b3aa10a671caa724eba641e759399979856141d3aea6b6b4ac59b889ba00c7d5dd48be9060221a02fb8fa213860b4c50d47046c8fa65ffaba5737d569e0a094601b62a1086cd9c9cb71a7ebff9e718f3217fd6e837efe4246733c0a196f63a06a4b0dd0aefc37b3c77828c8f07d1b7a2455ceb5dbfd3c77d7d6aeeddc2f7e8ca0d6e8e23142cdd8ec219e1f5d8b56aa18e456702b195deeaa210327284d42ade4a08a313d4c87023005d1ab631bbfe3f5de1e405d0e66d0bef3e033f1e5711b5521a0bf09a5d9a48b10ade82b8d6a5362a15921c8b5228a3487479b467db97411d82fa0f95cccae2a7c572ef3c566503e30bac2b2feb2d2f26eebf6d870dcf7f8cf59cea0d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c1855108080808080808080").to_vec(), + hex!("f851a0b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646a060a634b9280e3a23fb63375e7bbdd9ab07fd379ab6a67e2312bbc112195fa358808080808080808080808080808080").to_vec(), + hex!("f9030820b9030402f90300018301d6e2b9010000000000000800000000000020040008000000000000000000000000400000008000000000000000000000000000000000000000000000000000000000042010000000001000000000000000000000000000000000040000000000000000000000000000000000000000000000008000000000000000002000000000000000000000000200000000000000200000000000100000000040000001000200008000000000000200000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000f901f5f87a942ffa5ecdbe006d30397c7636d3e015eee251369ff842a0c965575a00553e094ca7c5d14f02e107c258dda06867cbf9e0e69f80e71bbcc1a000000000000000000000000000000000000000000000000000000000000003e8a000000000000000000000000000000000000000000000000000000000000003e8f9011c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000001b8a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000f858948cf6147918a5cbb672703f879f385036f8793a24e1a01449abf21e49fd025f33495e77f7b1461caefdd3d4bb646424a3f445c4576a5ba0000000000000000000000000440edffa1352b13227e8ee646f3ea37456dec701").to_vec(), + ]), + } + ) + } + + pub fn get_message_verification_header() -> CompactExecutionHeader { + CompactExecutionHeader { + parent_hash: hex!("04a7f6ab8282203562c62f38b0ab41d32aaebe2c7ea687702b463148a6429e04") + .into(), + block_number: 55, + state_root: hex!("894d968712976d613519f973a317cb0781c7b039c89f27ea2b7ca193f7befdb3") + .into(), + receipts_root: hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb") + .into(), + } + } +} + +#[cfg(feature = "beacon-spec-mainnet")] +pub mod mainnet { + use super::*; + + type Block = frame_system::mocking::MockBlock; + use sp_runtime::BuildStorage; + + frame_support::construct_runtime!( + pub enum Test { + System: frame_system::{Pallet, Call, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, + } + ); + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; + } + + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type OnSetCode = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type MaxConsumers = frame_support::traits::ConstU32<16>; + type Nonce = u64; + type Block = Block; + } + + impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = (); + type WeightInfo = (); + } + + parameter_types! { + pub const ChainForkVersions: ForkVersions = ForkVersions{ + genesis: Fork { + version: [0, 0, 16, 32], // 0x00001020 + epoch: 0, + }, + altair: Fork { + version: [1, 0, 16, 32], // 0x01001020 + epoch: 36660, + }, + bellatrix: Fork { + version: [2, 0, 16, 32], // 0x02001020 + epoch: 112260, + }, + capella: Fork { + version: [3, 0, 16, 32], // 0x03001020 + epoch: 162304, + }, + }; + pub const ExecutionHeadersPruneThreshold: u32 = 10; + } + + impl ethereum_beacon_client::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ForkVersions = ChainForkVersions; + type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; + type WeightInfo = (); + } + + // Build genesis storage according to the mock runtime. + pub fn new_tester() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); + ext + } +} diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/tests.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..92a93720ae9392a488bd4006545b2fc848533976 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/tests.rs @@ -0,0 +1,1032 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate::{ + config::{EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SLOTS_PER_EPOCH}, + functions::compute_period, + mock::minimal::*, + pallet::ExecutionHeaders, + sync_committee_sum, verify_merkle_branch, BeaconHeader, CompactBeaconState, Error, + ExecutionHeaderBuffer, FinalizedBeaconState, LatestExecutionState, LatestFinalizedBlockRoot, + NextSyncCommittee, SyncCommitteePrepared, +}; + +use frame_support::{assert_err, assert_noop, assert_ok}; +use hex_literal::hex; +use primitives::{ + CompactExecutionHeader, ExecutionHeaderState, Fork, ForkVersions, NextSyncCommitteeUpdate, +}; +use rand::{thread_rng, Rng}; +use snowbridge_core::{ + inbound::{VerificationError, Verifier}, + RingBufferMap, +}; +use sp_core::H256; +use sp_runtime::DispatchError; + +/// Arbitrary hash used for tests and invalid hashes. +const TEST_HASH: [u8; 32] = + hex!["5f6f02af29218292d21a69b64a794a7c0873b3e0f54611972863706e8cbdf371"]; + +/* UNIT TESTS */ + +#[test] +pub fn sum_sync_committee_participation() { + new_tester().execute_with(|| { + assert_eq!(sync_committee_sum(&[0, 1, 0, 1, 1, 0, 1, 0, 1]), 5); + }); +} + +#[test] +pub fn compute_domain() { + new_tester().execute_with(|| { + let domain = EthereumBeaconClient::compute_domain( + hex!("07000000").into(), + hex!("00000001"), + hex!("5dec7ae03261fde20d5b024dfabce8bac3276c9a4908e23d50ba8c9b50b0adff").into(), + ); + + assert_ok!(&domain); + assert_eq!( + domain.unwrap(), + hex!("0700000046324489ceb6ada6d118eacdbe94f49b1fcb49d5481a685979670c7c").into() + ); + }); +} + +#[test] +pub fn compute_signing_root_bls() { + new_tester().execute_with(|| { + let signing_root = EthereumBeaconClient::compute_signing_root( + &BeaconHeader { + slot: 3529537, + proposer_index: 192549, + parent_root: hex!( + "1f8dc05ea427f78e84e2e2666e13c3befb7106fd1d40ef8a3f67cf615f3f2a4c" + ) + .into(), + state_root: hex!( + "0dfb492a83da711996d2d76b64604f9bca9dc08b6c13cf63b3be91742afe724b" + ) + .into(), + body_root: hex!("66fba38f7c8c2526f7ddfe09c1a54dd12ff93bdd4d0df6a0950e88e802228bfa") + .into(), + }, + hex!("07000000afcaaba0efab1ca832a15152469bb09bb84641c405171dfa2d3fb45f").into(), + ); + + assert_ok!(&signing_root); + assert_eq!( + signing_root.unwrap(), + hex!("3ff6e9807da70b2f65cdd58ea1b25ed441a1d589025d2c4091182026d7af08fb").into() + ); + }); +} + +#[test] +pub fn compute_signing_root() { + new_tester().execute_with(|| { + let signing_root = EthereumBeaconClient::compute_signing_root( + &BeaconHeader { + slot: 222472, + proposer_index: 10726, + parent_root: hex!( + "5d481a9721f0ecce9610eab51d400d223683d599b7fcebca7e4c4d10cdef6ebb" + ) + .into(), + state_root: hex!( + "14eb4575895f996a84528b789ff2e4d5148242e2983f03068353b2c37015507a" + ) + .into(), + body_root: hex!("7bb669c75b12e0781d6fa85d7fc2f32d64eafba89f39678815b084c156e46cac") + .into(), + }, + hex!("07000000e7acb21061790987fa1c1e745cccfb358370b33e8af2b2c18938e6c2").into(), + ); + + assert_ok!(&signing_root); + assert_eq!( + signing_root.unwrap(), + hex!("da12b6a6d3516bc891e8a49f82fc1925cec40b9327e06457f695035303f55cd8").into() + ); + }); +} + +#[test] +pub fn compute_domain_bls() { + new_tester().execute_with(|| { + let domain = EthereumBeaconClient::compute_domain( + hex!("07000000").into(), + hex!("01000000"), + hex!("4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95").into(), + ); + + assert_ok!(&domain); + assert_eq!( + domain.unwrap(), + hex!("07000000afcaaba0efab1ca832a15152469bb09bb84641c405171dfa2d3fb45f").into() + ); + }); +} + +#[test] +pub fn verify_merkle_branch_for_finalized_root() { + new_tester().execute_with(|| { + assert!(verify_merkle_branch( + hex!("0000000000000000000000000000000000000000000000000000000000000000").into(), + &[ + hex!("0000000000000000000000000000000000000000000000000000000000000000").into(), + hex!("5f6f02af29218292d21a69b64a794a7c0873b3e0f54611972863706e8cbdf371").into(), + hex!("e7125ff9ab5a840c44bedb4731f440a405b44e15f2d1a89e27341b432fabe13d").into(), + hex!("002c1fe5bc0bd62db6f299a582f2a80a6d5748ccc82e7ed843eaf0ae0739f74a").into(), + hex!("d2dc4ba9fd4edff6716984136831e70a6b2e74fca27b8097a820cbbaa5a6e3c3").into(), + hex!("91f77a19d8afa4a08e81164bb2e570ecd10477b3b65c305566a6d2be88510584").into(), + ], + crate::config::FINALIZED_ROOT_INDEX, + crate::config::FINALIZED_ROOT_DEPTH, + hex!("e46559327592741956f6beaa0f52e49625eb85dce037a0bd2eff333c743b287f").into() + )); + }); +} + +#[test] +pub fn verify_merkle_branch_fails_if_depth_and_branch_dont_match() { + new_tester().execute_with(|| { + assert!(!verify_merkle_branch( + hex!("0000000000000000000000000000000000000000000000000000000000000000").into(), + &[ + hex!("0000000000000000000000000000000000000000000000000000000000000000").into(), + hex!("5f6f02af29218292d21a69b64a794a7c0873b3e0f54611972863706e8cbdf371").into(), + hex!("e7125ff9ab5a840c44bedb4731f440a405b44e15f2d1a89e27341b432fabe13d").into(), + ], + crate::config::FINALIZED_ROOT_INDEX, + crate::config::FINALIZED_ROOT_DEPTH, + hex!("e46559327592741956f6beaa0f52e49625eb85dce037a0bd2eff333c743b287f").into() + )); + }); +} + +#[test] +pub fn sync_committee_participation_is_supermajority() { + let bits = + hex!("bffffffff7f1ffdfcfeffeffbfdffffbfffffdffffefefffdffff7f7ffff77fffdf7bff77ffdf7fffafffffff77fefffeff7effffffff5f7fedfffdfb6ddff7b" + ); + let participation = primitives::decompress_sync_committee_bits::<512, 64>(bits); + assert_ok!(EthereumBeaconClient::sync_committee_participation_is_supermajority(&participation)); +} + +#[test] +pub fn sync_committee_participation_is_supermajority_errors_when_not_supermajority() { + new_tester().execute_with(|| { + let participation: [u8; 512] = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, + 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, + 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, + 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, + ]; + + assert_err!( + EthereumBeaconClient::sync_committee_participation_is_supermajority(&participation), + Error::::SyncCommitteeParticipantsNotSupermajority + ); + }); +} + +#[test] +pub fn execution_header_pruning() { + new_tester().execute_with(|| { + let execution_header_prune_threshold = ExecutionHeadersPruneThreshold::get(); + let to_be_deleted = execution_header_prune_threshold / 2; + + let mut stored_hashes = vec![]; + + for i in 0..execution_header_prune_threshold { + let mut hash = H256::default(); + thread_rng().try_fill(&mut hash.0[..]).unwrap(); + EthereumBeaconClient::store_execution_header( + hash, + CompactExecutionHeader::default(), + i as u64, + hash, + ); + stored_hashes.push(hash); + } + + // We should have stored everything until now + assert_eq!({ ExecutionHeaders::::iter().count() }, stored_hashes.len()); + + // Let's push extra entries so that some of the previous entries are deleted. + for i in 0..to_be_deleted { + let mut hash = H256::default(); + thread_rng().try_fill(&mut hash.0[..]).unwrap(); + EthereumBeaconClient::store_execution_header( + hash, + CompactExecutionHeader::default(), + (i + execution_header_prune_threshold) as u64, + hash, + ); + + stored_hashes.push(hash); + } + + // We should have only stored upto `execution_header_prune_threshold` + assert_eq!( + ExecutionHeaders::::iter().count() as u32, + execution_header_prune_threshold + ); + + // First `to_be_deleted` items must be deleted + for i in 0..to_be_deleted { + assert!(!ExecutionHeaders::::contains_key(stored_hashes[i as usize])); + } + + // Other entries should be part of data + for i in to_be_deleted..(to_be_deleted + execution_header_prune_threshold) { + assert!(ExecutionHeaders::::contains_key(stored_hashes[i as usize])); + } + }); +} + +#[test] +fn compute_fork_version() { + let mock_fork_versions = ForkVersions { + genesis: Fork { version: [0, 0, 0, 0], epoch: 0 }, + altair: Fork { version: [0, 0, 0, 1], epoch: 10 }, + bellatrix: Fork { version: [0, 0, 0, 2], epoch: 20 }, + capella: Fork { version: [0, 0, 0, 3], epoch: 30 }, + }; + new_tester().execute_with(|| { + assert_eq!(EthereumBeaconClient::select_fork_version(&mock_fork_versions, 0), [0, 0, 0, 0]); + assert_eq!(EthereumBeaconClient::select_fork_version(&mock_fork_versions, 1), [0, 0, 0, 0]); + assert_eq!( + EthereumBeaconClient::select_fork_version(&mock_fork_versions, 10), + [0, 0, 0, 1] + ); + assert_eq!( + EthereumBeaconClient::select_fork_version(&mock_fork_versions, 21), + [0, 0, 0, 2] + ); + assert_eq!( + EthereumBeaconClient::select_fork_version(&mock_fork_versions, 20), + [0, 0, 0, 2] + ); + assert_eq!( + EthereumBeaconClient::select_fork_version(&mock_fork_versions, 32), + [0, 0, 0, 3] + ); + }); +} + +#[test] +fn find_absent_keys() { + let participation: [u8; 32] = [ + 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, + ]; + let update = load_sync_committee_update_fixture(); + let sync_committee_prepared: SyncCommitteePrepared = + (&update.next_sync_committee_update.unwrap().next_sync_committee) + .try_into() + .unwrap(); + + new_tester().execute_with(|| { + let pubkeys = EthereumBeaconClient::find_pubkeys( + &participation, + (*sync_committee_prepared.pubkeys).as_ref(), + false, + ); + assert_eq!(pubkeys.len(), 2); + assert_eq!(pubkeys[0], sync_committee_prepared.pubkeys[0]); + assert_eq!(pubkeys[1], sync_committee_prepared.pubkeys[7]); + }); +} + +#[test] +fn find_present_keys() { + let participation: [u8; 32] = [ + 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, + 1, 0, + ]; + let update = load_sync_committee_update_fixture(); + let sync_committee_prepared: SyncCommitteePrepared = + (&update.next_sync_committee_update.unwrap().next_sync_committee) + .try_into() + .unwrap(); + + new_tester().execute_with(|| { + let pubkeys = EthereumBeaconClient::find_pubkeys( + &participation, + (*sync_committee_prepared.pubkeys).as_ref(), + true, + ); + assert_eq!(pubkeys.len(), 4); + assert_eq!(pubkeys[0], sync_committee_prepared.pubkeys[1]); + assert_eq!(pubkeys[1], sync_committee_prepared.pubkeys[8]); + assert_eq!(pubkeys[2], sync_committee_prepared.pubkeys[26]); + assert_eq!(pubkeys[3], sync_committee_prepared.pubkeys[30]); + }); +} + +#[test] +fn cross_check_execution_state() { + new_tester().execute_with(|| { + let header_root: H256 = TEST_HASH.into(); + >::insert( + header_root, + CompactBeaconState { + // set slot to period 5 + slot: ((EPOCHS_PER_SYNC_COMMITTEE_PERIOD * SLOTS_PER_EPOCH) * 5) as u64, + block_roots_root: Default::default(), + }, + ); + LatestFinalizedBlockRoot::::set(header_root); + >::set(ExecutionHeaderState { + beacon_block_root: Default::default(), + // set slot to period 2 + beacon_slot: ((EPOCHS_PER_SYNC_COMMITTEE_PERIOD * SLOTS_PER_EPOCH) * 2) as u64, + block_hash: Default::default(), + block_number: 0, + }); + + assert_err!( + EthereumBeaconClient::cross_check_execution_state(), + Error::::ExecutionHeaderTooFarBehind + ); + }); +} + +/* SYNC PROCESS TESTS */ + +#[test] +fn process_initial_checkpoint() { + let checkpoint = load_checkpoint_update_fixture(); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::force_checkpoint( + RuntimeOrigin::root(), + Box::new(checkpoint.clone()) + )); + let block_root: H256 = checkpoint.header.hash_tree_root().unwrap(); + assert!(>::contains_key(block_root)); + }); +} + +#[test] +fn process_initial_checkpoint_with_invalid_sync_committee_proof() { + let mut checkpoint = load_checkpoint_update_fixture(); + checkpoint.current_sync_committee_branch[0] = TEST_HASH.into(); + + new_tester().execute_with(|| { + assert_err!( + EthereumBeaconClient::force_checkpoint(RuntimeOrigin::root(), Box::new(checkpoint)), + Error::::InvalidSyncCommitteeMerkleProof + ); + }); +} + +#[test] +fn process_initial_checkpoint_with_invalid_blocks_root_proof() { + let mut checkpoint = load_checkpoint_update_fixture(); + checkpoint.block_roots_branch[0] = TEST_HASH.into(); + + new_tester().execute_with(|| { + assert_err!( + EthereumBeaconClient::force_checkpoint(RuntimeOrigin::root(), Box::new(checkpoint)), + Error::::InvalidBlockRootsRootMerkleProof + ); + }); +} + +#[test] +fn submit_update_in_current_period() { + let checkpoint = load_checkpoint_update_fixture(); + let update = load_finalized_header_update_fixture(); + let initial_period = compute_period(checkpoint.header.slot); + let update_period = compute_period(update.finalized_header.slot); + assert_eq!(initial_period, update_period); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(update.clone()) + )); + let block_root: H256 = update.finalized_header.hash_tree_root().unwrap(); + assert!(>::contains_key(block_root)); + }); +} + +#[test] +fn submit_update_with_sync_committee_in_current_period() { + let checkpoint = load_checkpoint_update_fixture(); + let update = load_sync_committee_update_fixture(); + let init_period = compute_period(checkpoint.header.slot); + let update_period = compute_period(update.finalized_header.slot); + assert_eq!(init_period, update_period); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert!(!>::exists()); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(update))); + assert!(>::exists()); + }); +} + +#[test] +fn submit_update_in_next_period() { + let checkpoint = load_checkpoint_update_fixture(); + let sync_committee_update = load_sync_committee_update_fixture(); + let update = load_next_finalized_header_update_fixture(); + let sync_committee_period = compute_period(sync_committee_update.finalized_header.slot); + let next_sync_committee_period = compute_period(update.finalized_header.slot); + assert_eq!(sync_committee_period + 1, next_sync_committee_period); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(sync_committee_update.clone()) + )); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(update.clone()) + )); + let block_root: H256 = update.finalized_header.clone().hash_tree_root().unwrap(); + assert!(>::contains_key(block_root)); + }); +} + +#[test] +fn submit_update_with_invalid_header_proof() { + let checkpoint = load_checkpoint_update_fixture(); + let mut update = load_sync_committee_update_fixture(); + let init_period = compute_period(checkpoint.header.slot); + let update_period = compute_period(update.finalized_header.slot); + assert_eq!(init_period, update_period); + update.finality_branch[0] = TEST_HASH.into(); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert!(!>::exists()); + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(update)), + Error::::InvalidHeaderMerkleProof + ); + }); +} + +#[test] +fn submit_update_with_invalid_block_roots_proof() { + let checkpoint = load_checkpoint_update_fixture(); + let mut update = load_sync_committee_update_fixture(); + let init_period = compute_period(checkpoint.header.slot); + let update_period = compute_period(update.finalized_header.slot); + assert_eq!(init_period, update_period); + update.block_roots_branch[0] = TEST_HASH.into(); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert!(!>::exists()); + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(update)), + Error::::InvalidBlockRootsRootMerkleProof + ); + }); +} + +#[test] +fn submit_update_with_invalid_next_sync_committee_proof() { + let checkpoint = load_checkpoint_update_fixture(); + let mut update = load_sync_committee_update_fixture(); + let init_period = compute_period(checkpoint.header.slot); + let update_period = compute_period(update.finalized_header.slot); + assert_eq!(init_period, update_period); + if let Some(ref mut next_sync_committee_update) = update.next_sync_committee_update { + next_sync_committee_update.next_sync_committee_branch[0] = TEST_HASH.into(); + } + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert!(!>::exists()); + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(update)), + Error::::InvalidSyncCommitteeMerkleProof + ); + }); +} + +#[test] +fn submit_update_with_skipped_period() { + let checkpoint = load_checkpoint_update_fixture(); + let sync_committee_update = load_sync_committee_update_fixture(); + let mut update = load_next_finalized_header_update_fixture(); + update.signature_slot += (EPOCHS_PER_SYNC_COMMITTEE_PERIOD * SLOTS_PER_EPOCH) as u64; + update.attested_header.slot = update.signature_slot - 1; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(sync_committee_update.clone()) + )); + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(update)), + Error::::SkippedSyncCommitteePeriod + ); + }); +} + +#[test] +fn submit_update_with_sync_committee_in_next_period() { + let checkpoint = load_checkpoint_update_fixture(); + let update = load_sync_committee_update_fixture(); + let next_update = load_next_sync_committee_update_fixture(); + let update_period = compute_period(update.finalized_header.slot); + let next_update_period = compute_period(next_update.finalized_header.slot); + assert_eq!(update_period + 1, next_update_period); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert!(!>::exists()); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(update.clone()) + )); + assert!(>::exists()); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(next_update.clone()) + )); + let last_finalized_state = + FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()).unwrap(); + let last_synced_period = compute_period(last_finalized_state.slot); + assert_eq!(last_synced_period, next_update_period); + }); +} + +#[test] +fn submit_update_with_sync_committee_invalid_signature_slot() { + let checkpoint = load_checkpoint_update_fixture(); + let mut update = load_sync_committee_update_fixture(); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + + // makes a invalid update with signature_slot should be more than attested_slot + update.signature_slot = update.attested_header.slot; + + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(update)), + Error::::InvalidUpdateSlot + ); + }); +} + +#[test] +fn submit_update_with_skipped_sync_committee_period() { + let checkpoint = load_checkpoint_update_fixture(); + let finalized_update = load_next_finalized_header_update_fixture(); + let checkpoint_period = compute_period(checkpoint.header.slot); + let next_sync_committee_period = compute_period(finalized_update.finalized_header.slot); + assert_eq!(checkpoint_period + 1, next_sync_committee_period); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(finalized_update)), + Error::::SkippedSyncCommitteePeriod + ); + }); +} + +#[test] +fn submit_update_execution_headers_too_far_behind() { + let checkpoint = load_checkpoint_update_fixture(); + let finalized_header_update = load_finalized_header_update_fixture(); + let execution_header_update = load_execution_header_update_fixture(); + let next_update = load_next_sync_committee_update_fixture(); + + new_tester().execute_with(|| { + let far_ahead_finalized_header_slot = finalized_header_update.finalized_header.slot + + (EPOCHS_PER_SYNC_COMMITTEE_PERIOD * SLOTS_PER_EPOCH * 2) as u64; + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(finalized_header_update) + )); + assert_ok!(EthereumBeaconClient::submit_execution_header( + RuntimeOrigin::signed(1), + Box::new(execution_header_update) + )); + + let header_root: H256 = TEST_HASH.into(); + >::insert( + header_root, + CompactBeaconState { + slot: far_ahead_finalized_header_slot, + block_roots_root: Default::default(), + }, + ); + LatestFinalizedBlockRoot::::set(header_root); + + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(next_update)), + Error::::ExecutionHeaderTooFarBehind + ); + }); +} + +#[test] +fn submit_irrelevant_update() { + let checkpoint = load_checkpoint_update_fixture(); + let mut update = load_next_finalized_header_update_fixture(); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + + // makes an invalid update where the attested_header slot value should be greater than the + // checkpoint slot value + update.finalized_header.slot = checkpoint.header.slot; + update.attested_header.slot = checkpoint.header.slot; + update.signature_slot = checkpoint.header.slot + 1; + + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(update)), + Error::::IrrelevantUpdate + ); + }); +} + +#[test] +fn submit_update_with_missing_bootstrap() { + let update = load_next_finalized_header_update_fixture(); + + new_tester().execute_with(|| { + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(update)), + Error::::NotBootstrapped + ); + }); +} + +#[test] +fn submit_update_with_invalid_sync_committee_update() { + let checkpoint = load_checkpoint_update_fixture(); + let update = load_sync_committee_update_fixture(); + let mut next_update = load_next_sync_committee_update_fixture(); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(update))); + + // makes update with invalid next_sync_committee + >::mutate(>::get(), |x| { + let prev = x.unwrap(); + *x = Some(CompactBeaconState { slot: next_update.attested_header.slot, ..prev }); + }); + next_update.attested_header.slot += 1; + next_update.signature_slot = next_update.attested_header.slot + 1; + let next_sync_committee = NextSyncCommitteeUpdate::default(); + next_update.next_sync_committee_update = Some(next_sync_committee); + + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(next_update)), + Error::::InvalidSyncCommitteeUpdate + ); + }); +} + +#[test] +fn submit_execution_header_update() { + let checkpoint = load_checkpoint_update_fixture(); + let finalized_header_update = load_finalized_header_update_fixture(); + let execution_header_update = load_execution_header_update_fixture(); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(finalized_header_update) + )); + assert_ok!(EthereumBeaconClient::submit_execution_header( + RuntimeOrigin::signed(1), + Box::new(execution_header_update.clone()) + )); + assert!(>::contains_key( + execution_header_update.execution_header.block_hash + )); + }); +} + +#[test] +fn submit_execution_header_update_invalid_ancestry_proof() { + let checkpoint = load_checkpoint_update_fixture(); + let finalized_header_update = load_finalized_header_update_fixture(); + let mut execution_header_update = load_execution_header_update_fixture(); + if let Some(ref mut ancestry_proof) = execution_header_update.ancestry_proof { + ancestry_proof.header_branch[0] = TEST_HASH.into() + } + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(finalized_header_update) + )); + assert_err!( + EthereumBeaconClient::submit_execution_header( + RuntimeOrigin::signed(1), + Box::new(execution_header_update) + ), + Error::::InvalidAncestryMerkleProof + ); + }); +} + +#[test] +fn submit_execution_header_update_invalid_execution_header_proof() { + let checkpoint = load_checkpoint_update_fixture(); + let finalized_header_update = load_finalized_header_update_fixture(); + let mut execution_header_update = load_execution_header_update_fixture(); + execution_header_update.execution_branch[0] = TEST_HASH.into(); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(finalized_header_update) + )); + assert_err!( + EthereumBeaconClient::submit_execution_header( + RuntimeOrigin::signed(1), + Box::new(execution_header_update) + ), + Error::::InvalidExecutionHeaderProof + ); + }); +} + +#[test] +fn submit_execution_header_update_that_skips_block() { + let checkpoint = load_checkpoint_update_fixture(); + let finalized_header_update = load_finalized_header_update_fixture(); + let execution_header_update = load_execution_header_update_fixture(); + let mut skipped_block_execution_header_update = load_execution_header_update_fixture(); + skipped_block_execution_header_update.execution_header.block_number = + execution_header_update.execution_header.block_number + 2; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(finalized_header_update) + )); + assert_ok!(EthereumBeaconClient::submit_execution_header( + RuntimeOrigin::signed(1), + Box::new(execution_header_update.clone()) + )); + assert!(>::contains_key( + execution_header_update.execution_header.block_hash + )); + assert_err!( + EthereumBeaconClient::submit_execution_header( + RuntimeOrigin::signed(1), + Box::new(skipped_block_execution_header_update) + ), + Error::::ExecutionHeaderSkippedBlock + ); + }); +} + +#[test] +fn submit_execution_header_update_that_is_also_finalized_header_which_is_not_stored() { + let checkpoint = load_checkpoint_update_fixture(); + let finalized_header_update = load_finalized_header_update_fixture(); + let mut execution_header_update = load_execution_header_update_fixture(); + execution_header_update.ancestry_proof = None; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(finalized_header_update) + )); + assert_err!( + EthereumBeaconClient::submit_execution_header( + RuntimeOrigin::signed(1), + Box::new(execution_header_update) + ), + Error::::ExpectedFinalizedHeaderNotStored + ); + }); +} + +#[test] +fn submit_execution_header_update_that_is_also_finalized_header_which_is_stored_but_slots_dont_match( +) { + let checkpoint = load_checkpoint_update_fixture(); + let finalized_header_update = load_finalized_header_update_fixture(); + let mut execution_header_update = load_execution_header_update_fixture(); + execution_header_update.ancestry_proof = None; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(finalized_header_update) + )); + + let block_root: H256 = execution_header_update.header.hash_tree_root().unwrap(); + + >::insert( + block_root, + CompactBeaconState { + slot: execution_header_update.header.slot + 1, + block_roots_root: Default::default(), + }, + ); + LatestFinalizedBlockRoot::::set(block_root); + + assert_err!( + EthereumBeaconClient::submit_execution_header( + RuntimeOrigin::signed(1), + Box::new(execution_header_update) + ), + Error::::ExpectedFinalizedHeaderNotStored + ); + }); +} + +#[test] +fn submit_execution_header_not_finalized() { + let checkpoint = load_checkpoint_update_fixture(); + let finalized_header_update = load_finalized_header_update_fixture(); + let update = load_execution_header_update_fixture(); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + Box::new(finalized_header_update) + )); + + >::mutate(>::get(), |x| { + let prev = x.unwrap(); + *x = Some(CompactBeaconState { slot: update.header.slot - 1, ..prev }); + }); + + assert_err!( + EthereumBeaconClient::submit_execution_header( + RuntimeOrigin::signed(1), + Box::new(update) + ), + Error::::HeaderNotFinalized + ); + }); +} + +/* IMPLS */ + +#[test] +fn verify_message() { + let header = get_message_verification_header(); + let (event_log, proof) = get_message_verification_payload(); + let block_hash = proof.block_hash; + + new_tester().execute_with(|| { + >::insert(block_hash, header); + assert_ok!(EthereumBeaconClient::verify(&event_log, &proof)); + }); +} + +#[test] +fn verify_message_missing_header() { + let (event_log, proof) = get_message_verification_payload(); + + new_tester().execute_with(|| { + assert_err!( + EthereumBeaconClient::verify(&event_log, &proof), + VerificationError::HeaderNotFound + ); + }); +} + +#[test] +fn verify_message_invalid_proof() { + let header = get_message_verification_header(); + let (event_log, mut proof) = get_message_verification_payload(); + proof.data.1[0] = TEST_HASH.into(); + let block_hash = proof.block_hash; + + new_tester().execute_with(|| { + >::insert(block_hash, header); + assert_err!( + EthereumBeaconClient::verify(&event_log, &proof), + VerificationError::InvalidProof + ); + }); +} + +#[test] +fn verify_message_invalid_receipts_root() { + let mut header = get_message_verification_header(); + let (event_log, proof) = get_message_verification_payload(); + let block_hash = proof.block_hash; + header.receipts_root = TEST_HASH.into(); + + new_tester().execute_with(|| { + >::insert(block_hash, header); + assert_err!( + EthereumBeaconClient::verify(&event_log, &proof), + VerificationError::InvalidProof + ); + }); +} + +#[test] +fn verify_message_invalid_log() { + let header = get_message_verification_header(); + let (mut event_log, proof) = get_message_verification_payload(); + let block_hash = proof.block_hash; + event_log.topics = vec![H256::zero(); 10]; + + new_tester().execute_with(|| { + >::insert(block_hash, header); + assert_err!( + EthereumBeaconClient::verify(&event_log, &proof), + VerificationError::InvalidLog + ); + }); +} + +#[test] +fn verify_message_receipt_does_not_contain_log() { + let header = get_message_verification_header(); + let (mut event_log, proof) = get_message_verification_payload(); + let block_hash = proof.block_hash; + event_log.data = hex!("f9013c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000002b8c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000068000f000000000000000101d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec70100000101001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c0000e8890423c78a0000000000000000000000000000000000000000000000000000000000000000").to_vec(); + + new_tester().execute_with(|| { + >::insert(block_hash, header); + assert_err!( + EthereumBeaconClient::verify(&event_log, &proof), + VerificationError::LogNotFound + ); + }); +} + +#[test] +fn set_operating_mode() { + let checkpoint = load_checkpoint_update_fixture(); + let update = load_finalized_header_update_fixture(); + let execution_header_update = load_execution_header_update_fixture(); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + + assert_ok!(EthereumBeaconClient::set_operating_mode( + RuntimeOrigin::root(), + snowbridge_core::BasicOperatingMode::Halted + )); + + assert_noop!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(update)), + Error::::Halted + ); + + assert_noop!( + EthereumBeaconClient::submit_execution_header( + RuntimeOrigin::signed(1), + Box::new(execution_header_update) + ), + Error::::Halted + ); + }); +} + +#[test] +fn set_operating_mode_root_only() { + new_tester().execute_with(|| { + assert_noop!( + EthereumBeaconClient::set_operating_mode( + RuntimeOrigin::signed(1), + snowbridge_core::BasicOperatingMode::Halted + ), + DispatchError::BadOrigin + ); + }); +} diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/types.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/types.rs new file mode 100644 index 0000000000000000000000000000000000000000..5dcefea9f80f4e201d8de633a7a323f530220a45 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/types.rs @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +pub use crate::config::{ + SLOTS_PER_HISTORICAL_ROOT, SYNC_COMMITTEE_BITS_SIZE as SC_BITS_SIZE, + SYNC_COMMITTEE_SIZE as SC_SIZE, +}; +use frame_support::storage::types::OptionQuery; +use snowbridge_core::RingBufferMapImpl; + +// Specialize types based on configured sync committee size +pub type SyncCommittee = primitives::SyncCommittee; +pub type SyncCommitteePrepared = primitives::SyncCommitteePrepared; +pub type SyncAggregate = primitives::SyncAggregate; +pub type CheckpointUpdate = primitives::CheckpointUpdate; +pub type Update = primitives::Update; +pub type NextSyncCommitteeUpdate = primitives::NextSyncCommitteeUpdate; + +pub use primitives::ExecutionHeaderUpdate; + +/// ExecutionHeader ring buffer implementation +pub type ExecutionHeaderBuffer = RingBufferMapImpl< + u32, + ::MaxExecutionHeadersToKeep, + crate::ExecutionHeaderIndex, + crate::ExecutionHeaderMapping, + crate::ExecutionHeaders, + OptionQuery, +>; + +/// FinalizedState ring buffer implementation +pub(crate) type FinalizedBeaconStateBuffer = RingBufferMapImpl< + u32, + crate::MaxFinalizedHeadersToKeep, + crate::FinalizedBeaconStateIndex, + crate::FinalizedBeaconStateMapping, + crate::FinalizedBeaconState, + OptionQuery, +>; diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/weights.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..69d3e809986b61bb54b5d98dedfd2d0b41053b14 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/weights.rs @@ -0,0 +1,68 @@ +//! Autogenerated weights for ethereum_beacon_client +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2022-09-27, STEPS: `10`, REPEAT: 10, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("/tmp/snowbridge/spec.json"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/snowbridge +// benchmark +// pallet +// --chain +// /tmp/snowbridge/spec.json +// --execution=wasm +// --pallet +// ethereum_beacon_client +// --extrinsic +// * +// --steps +// 10 +// --repeat +// 10 +// --output +// pallets/ethereum-beacon-client/src/weights.rs +// --template +// templates/module-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for ethereum_beacon_client. +pub trait WeightInfo { + fn force_checkpoint() -> Weight; + fn submit() -> Weight; + fn submit_with_sync_committee() -> Weight; + fn submit_execution_header() -> Weight; +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn force_checkpoint() -> Weight { + Weight::from_parts(97_263_571_000_u64, 0) + .saturating_add(Weight::from_parts(0, 3501)) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(9)) + } + fn submit() -> Weight { + Weight::from_parts(26_051_019_000_u64, 0) + .saturating_add(Weight::from_parts(0, 93857)) + .saturating_add(RocksDbWeight::get().reads(8)) + .saturating_add(RocksDbWeight::get().writes(4)) + } + fn submit_with_sync_committee() -> Weight { + Weight::from_parts(122_461_312_000_u64, 0) + .saturating_add(Weight::from_parts(0, 93857)) + .saturating_add(RocksDbWeight::get().reads(6)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + fn submit_execution_header() -> Weight { + Weight::from_parts(113_158_000_u64, 0) + .saturating_add(Weight::from_parts(0, 3537)) + .saturating_add(RocksDbWeight::get().reads(5)) + .saturating_add(RocksDbWeight::get().writes(4)) + } +} diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/execution-header-update.minimal.json b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/execution-header-update.minimal.json new file mode 100644 index 0000000000000000000000000000000000000000..3e17c14f4adbf38d4c57919bb91f9574bf515cd3 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/execution-header-update.minimal.json @@ -0,0 +1,43 @@ +{ + "header": { + "slot": 3622, + "proposer_index": 7, + "parent_root": "0x254c9215f6cce83e21b9776afb482181639602d3cb58cf99452a6a4a4f603930", + "state_root": "0xea98df6d30817d63f3e54ea118e2b1ba8675753c72dec1661c503d4eb43f9bdd", + "body_root": "0x765a0616a31d38e0ca2d10f6e8b234dd3d07e16aa929bcbc4de775c93f1972fd" + }, + "ancestry_proof": { + "header_branch": [ + "0x7690506882ac8c5f01d00f3ade06439259a3a0261ef5d61ec44920678b4104e6", + "0xf01aa0fdd7c9ef7b1affb7854fe8cbcc5c70643ee5b83e032faa702a0675a8cb", + "0x273a7b300b75ffa2c765af50680aa836299264f2107f38010278822313181801", + "0x30fe73a3bae6a31af32656ab759a4b67d27a213e01012b96cc4fedd0f2e77c75", + "0x7246cb3a35f13a1f0bbf907887985bb5382c45f2aa1699dbca48a0a82d5330af", + "0x5e7270e88a22dd4a905b2e76da2c8c358baeddd34de6c64a71bb1c80070ab717" + ], + "finalized_block_root": "0xa6fdc5df11c1759d11c9f0353a666715e5677e9ffd7d414e44cff0970553f1c9" + }, + "execution_header": { + "parent_hash": "0x6c9657f1267ad6040ea017ff6d02b55c4ba25cb092b8326d321dd98d01d1ee64", + "fee_recipient": "0x0000000000000000000000000000000000000000", + "state_root": "0x01f975f7cdff9b0a8844304aa59062fe18af0fef4636539312dfe20d238600ba", + "receipts_root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "prev_randao": "0xcdfcab74bc26b3f4311afdc72d2d21d33a4b045187a01fa208a9d687a6d1d25c", + "block_number": 3622, + "gas_limit": 30000000, + "gas_used": 0, + "timestamp": 1685722543, + "extra_data": "0xd983010b02846765746888676f312e31392e358664617277696e", + "base_fee_per_gas": 7, + "block_hash": "0x38c80e0e26cb80730df627d32f50266bd0fe32fb12b7606300ad81aa2b4033db", + "transactions_root": "0x7ffe241ea60187fdb0187bfa22de35d1f9bed7ab061d9401fd47e34a54fbede1", + "withdrawals_root": "0x28ba1834a3a7b657460ce79fa3a1d909ab8828fd557659d4d0554a9bdbc0ec30" + }, + "execution_branch": [ + "0x005b8d55b34b4323bfd4773c28b09eb53bc87959e65411ccd23728c7e42d5ff2", + "0x336488033fe5f3ef4ccc12af07b9370b92e553e35ecb4a337a1b1c0e4afe1e0e", + "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0x7061330dada1ba1c602ba98f647a441885460ed0db00483fea1282385dfab84b" + ] +} \ No newline at end of file diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/finalized-header-update.minimal.json b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/finalized-header-update.minimal.json new file mode 100644 index 0000000000000000000000000000000000000000..c6473529b10c6d32398e55e11a2f71cbdc50b279 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/finalized-header-update.minimal.json @@ -0,0 +1,38 @@ +{ + "attested_header": { + "slot": 3640, + "proposer_index": 5, + "parent_root": "0xf062fcec9c3379a08e6add37a834b1e39af395fc343973e44957ecebbf2ecddd", + "state_root": "0xb1581cb62fe376e305e02f26463153f5dfb804d8df97ef40fc315c1bc30731ba", + "body_root": "0x98461abcc6d130b7bcb9430292c8a269ea9f01082685347e2968d892f716067c" + }, + "sync_aggregate": { + "sync_committee_bits": "0xffffffff", + "sync_committee_signature": "0x925c6e4b67890a7e28a7ca19853f88247e92014b9d233ac9058efd4f3827f0055db308debe17596e635b93727b5a851e1366ca801f30b03fdec722f45011504702a27646488b5ab5e3428fe7b4d4a50132f374612f66e45d68db27c568f96f08" + }, + "signature_slot": 3641, + "next_sync_committee_update": null, + "finalized_header": { + "slot": 3624, + "proposer_index": 7, + "parent_root": "0x7690506882ac8c5f01d00f3ade06439259a3a0261ef5d61ec44920678b4104e6", + "state_root": "0x3726ebb8d9973977a71a8389caf5fc5830eeb8cd4fdfbbc7b0c4e6ca3e6a4090", + "body_root": "0x0f9a3f0fa5a4ffaf7c10504c86f23e7d554366ffd069fe958a160b253c3fd409" + }, + "finality_branch": [ + "0xc501000000000000000000000000000000000000000000000000000000000000", + "0x10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7", + "0x83c3d5360d254f4a44be712c1f433e88e810b6d1e0e789e90bada9e36126b857", + "0x97245fa01a89a6d7b4542cd731fef699f58b2bbaabdd6f641334c9e9eeae3a20", + "0xc3d19c773f66ab94bc2106d5e75a3205398dd6e94b6f8a5716f347741eb9fc5a", + "0x9e5040e56d765c1add56779a716be7497be27cba37f866cd8d34418d55e48715" + ], + "block_roots_root": "0x29a54625749fa25f9e36df14a3baa335c58246bba2f8c7eb8b1ec2e4908e2fd0", + "block_roots_branch": [ + "0x53616f9298818a8423c98adc47c92aaf82f0c5c911dc4ee5f88ba6d3022341c1", + "0x5d2f1c4bce6f63f26cbe3fbf480281c04a6b14bea74350a88ee945354ecbd79d", + "0x8333eefc7eaa4d10091e2014b3aae2bf6bd2d10c22c67100e189f8ab6caab261", + "0x3edfa69130bc193dec47c27a5903f03d5262b75899b69c0e95ac1816a664a3e7", + "0x5e046000f85aede8d4c28140b27778488d4ad21b1e16e345055d07ee53f2711b" + ] +} \ No newline at end of file diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/initial-checkpoint.minimal.json b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/initial-checkpoint.minimal.json new file mode 100644 index 0000000000000000000000000000000000000000..a7e48f459019e39af7e6eb016b3547840b7c028d --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/initial-checkpoint.minimal.json @@ -0,0 +1,62 @@ +{ + "header": { + "slot": 3616, + "proposer_index": 7, + "parent_root": "0x6c5e8c7b32b7bfbb250fa8fd7bc348d7325fb2bfc869e4c506af6802fcad87f4", + "state_root": "0x3e467e3429a1ae36572fe3fe1c953381242e950254cf97c7527a8cea8aa6c9de", + "body_root": "0x7da749680d2b0b4f779047fcfe7d0c13d247f6d23478817fe9c6fbe07993adb2" + }, + "current_sync_committee": { + "pubkeys": [ + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c" + ], + "aggregate_pubkey": "0x8fe11476a05750c52618deb79918e2e674f56dfbf12dbce55ae4386d108e8a1e83c6326f5957e2ef19137582ce270dc6" + }, + "current_sync_committee_branch": [ + "0x46af3f54acbea439b63aa5bb699c8f25ff584b23912366788f7c8e95011ce324", + "0x41dcb71ec3b3940399118d28e09fdc58a8e33b818b8c5cbb933c59929504ca08", + "0xfa53febb29348e3493a50c0e7c6d35796bf69c54dfc6f42f7600612789d0ed6d", + "0x5e7ea1693066b604fc60d4657b43e7a4aafd3f4f54d9a740d2abe765e92d8385", + "0x16c9bca64a82e80c23817bfec345d088e0adc3865e392965c1244f97979f816a" + ], + "validators_root": "0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69", + "block_roots_root": "0x00f6bbdeac1e1a922a9bf0e78720c0bffe558d8195e8ede8cb72bbd295f242f2", + "block_roots_branch": [ + "0x7a61086fb9e53ab4dd87243d6288c51793696168a73773277630da5b20bf6091", + "0x60733905cdc5dd65d05161bb3138eecc47d6d6057ab36b0d36cf5a3200484143", + "0x86d7de634ae45de5b3cbbc562dd976de7d06a3d96f83147413536e6b108c7a39", + "0x0ada571c9e0da6fce8dd13e6d9ce173768521ac32e0af456634556176789fa6e", + "0x2341538fd0aafbc1ff0f513545e5dcd4b8905dc9e00d6173480c18a4e8086ebc" + ] +} \ No newline at end of file diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/next-finalized-header-update.minimal.json b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/next-finalized-header-update.minimal.json new file mode 100755 index 0000000000000000000000000000000000000000..8f1ddc827c1f45ade879bfd611bdeb1cb223d17a --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/next-finalized-header-update.minimal.json @@ -0,0 +1,38 @@ +{ + "attested_header": { + "slot": 3696, + "proposer_index": 1, + "parent_root": "0x04a63c5dfb726c31a32a72c1c426ff89e21363223d7096486b629f1d58abe5d8", + "state_root": "0xbe20e69420cbf9400224ec5edeb0843776a2ccf945e9a3ba9311ae812cad1e30", + "body_root": "0x1d2acd1748f1c58096d1edc8badd3a1d7e1dc3c33bcb9229e4c03f3a84efeadb" + }, + "sync_aggregate": { + "sync_committee_bits": "0xffffffff", + "sync_committee_signature": "0xafa79bc0f3c731ab1eb6aeafc582a7dd1c100ea471df3af6ff485b58661b3ef8077264dea0b60df9aec2d3ca8ddab6770fc9d061462e5a6dc718146085425f863d00921c42413805cb5b4c5175f36f2087cfed740bb7d57e8d5b48352643cd5b" + }, + "signature_slot": 3697, + "next_sync_committee_update": null, + "finalized_header": { + "slot": 3680, + "proposer_index": 7, + "parent_root": "0x4d8f4fc47ad3eb045bd20cae13af6df02f96a3f8d7c8a285190ba10cfe2b84cf", + "state_root": "0xd498766d77277fe16a6a4609ab3ac3a6e9887d162d8dfffdfc9cc4ae833e4127", + "body_root": "0x9ba73bc9a4907cac0b887550e2b01a63dcc70473753ffcc243d33394cc64b4c0" + }, + "finality_branch": [ + "0xcc01000000000000000000000000000000000000000000000000000000000000", + "0x10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7", + "0x142061c4bc3673bf774cb8c7b6085057bd0ca85672b43afa2d9581b0b6a44e54", + "0x48b8cd8ca9d9563e30c1cca2a854cd7f75eb4cb013d10809b3138a72d94ea0c5", + "0x9b39523d05013ac7cbb9f43e5d6f9dc033b12aa1d6d6edd994ddc4f5efe7be9d", + "0x066c9aa26107bc8cb28bc73e518da6cc865ec1d67516b6ca24663b6b7ae3cb21" + ], + "block_roots_root": "0xb15aa2483811d8c5616cb93710f4fcb809d97443caac9de163f943a30f385db6", + "block_roots_branch": [ + "0xf7a43ad317417daa4c2a1e93c54895895a824ef1e43320eb44eab16673da5a61", + "0xe4b8d640660f765c2ef4dc886025dc8e54c6e70b66192582f42837ed5e9d8d41", + "0x841f113dc81e76419b6cdec8b0cf2fc20f9381492ed3c79e9b49179b4d3eacbc", + "0xeb5fdc4d8b5282b653ecbc9caa93bcfe482f6d6a32cbb0d9eb011bef947579bb", + "0x1f328cc5640efb191ae6aa86223b1aa9d083b26ac3e1fa3c071327bb09dc5727" + ] +} \ No newline at end of file diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/next-sync-committee-update.minimal.json b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/next-sync-committee-update.minimal.json new file mode 100755 index 0000000000000000000000000000000000000000..8f1c8b9ce21cda6d7a10fb7973c4e628cd40b122 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/next-sync-committee-update.minimal.json @@ -0,0 +1,83 @@ +{ + "attested_header": { + "slot": 3664, + "proposer_index": 4, + "parent_root": "0x15ac23a0c16bfa81e8595621118040c3e6cbddd4b09bae6fb39ba5fefd0258e8", + "state_root": "0x6fb81aa3827e7d580bb05b4df2686c9a49508bde2f8342fd75be609a23dd8362", + "body_root": "0x9906a1ae8065d268f8acb7f1b3119408d2f7f8e6e0764370c16ea3d15134981f" + }, + "sync_aggregate": { + "sync_committee_bits": "0xffffffff", + "sync_committee_signature": "0xa9b5584ec9290a4ac6c5616639d031f9ab1064d63b4889f1da52f6f4d66b645fca48bbe2fe8484adb0c05c647edd694d0340cf684b8ccf8e34c6d8cf447cfcfdcb856f5abdcfd85ada5a4a04d4c8f6f40c6e99308893c3941485a436d6c8e5f7" + }, + "signature_slot": 3665, + "next_sync_committee_update": { + "next_sync_committee": { + "pubkeys": [ + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373" + ], + "aggregate_pubkey": "0x8fe11476a05750c52618deb79918e2e674f56dfbf12dbce55ae4386d108e8a1e83c6326f5957e2ef19137582ce270dc6" + }, + "next_sync_committee_branch": [ + "0x46af3f54acbea439b63aa5bb699c8f25ff584b23912366788f7c8e95011ce324", + "0x5b118fe110ee4a1b0cf9823bc189fb38eb55a7b49adbdafcf466ec7cd4b7fd68", + "0xc2f12fb91a61abedb47f62a98258960edca21f31494cdf59b47a1c721e3e98f8", + "0x16fdfd5e6b591b3140a76efa4593a9c4d105b9e5c62d6f44edbd24790657be50", + "0xc8175ab66690cc94c0a24452754addd62a06948de5db9814e813437a130de452" + ] + }, + "finalized_header": { + "slot": 3648, + "proposer_index": 1, + "parent_root": "0x991ee98a70e8f90bdd61d0f5554e53d37473e75e16af171f6d88f27d20223dae", + "state_root": "0x59b04d660ac772005a13a7dc1d5f99bb0d0292f3c422f04f7365198d70dd30de", + "body_root": "0x5151f035e146258e7327ad9cf1df13f8ddec7a7842c19993cf739358717b5565" + }, + "finality_branch": [ + "0xc801000000000000000000000000000000000000000000000000000000000000", + "0x10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7", + "0x142061c4bc3673bf774cb8c7b6085057bd0ca85672b43afa2d9581b0b6a44e54", + "0xc2f12fb91a61abedb47f62a98258960edca21f31494cdf59b47a1c721e3e98f8", + "0x16fdfd5e6b591b3140a76efa4593a9c4d105b9e5c62d6f44edbd24790657be50", + "0xc8175ab66690cc94c0a24452754addd62a06948de5db9814e813437a130de452" + ], + "block_roots_root": "0xe6e2adaaad45363d7112945ef670e21c66bcb3276dc450962ade1e8950230380", + "block_roots_branch": [ + "0x386ede102258966d4c23031c5a02de2af8180d475c4c1716b07fb5b9f142a817", + "0x35e6c89bc38d993a1957f8a9fb1fbeab7420688091ba2cd7ee7b19b7e187f7d6", + "0x99249309825cafef7e694c09c4fdf95eb4b1e8743d3b23f6959d9980ad2d69b0", + "0x5e028d1d905db6430f0ce4aafbc78f442047ec3a132b4e69557fdf804a4cfbf3", + "0xd34afeab37851937920243683a1c926c41c626aacb145718fce755782d4996dd" + ] +} \ No newline at end of file diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/sync-committee-update.minimal.json b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/sync-committee-update.minimal.json new file mode 100644 index 0000000000000000000000000000000000000000..a962a0c87c4c5eedee944bd85f54a754576935f1 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/tests/fixtures/sync-committee-update.minimal.json @@ -0,0 +1,83 @@ +{ + "attested_header": { + "slot": 3600, + "proposer_index": 7, + "parent_root": "0xdf60c2d58beccd89678b9267c689e9ba1cf1d58ce5114ad5c16e8341459cfd75", + "state_root": "0x023f14c7a38ef4d6ec19b522edfb427c6b70c6ffbd8610ca802dd1491c92c852", + "body_root": "0x0f78a1c45e42711efc5fb7b7f6238be1bee9273f7c44ff6892d815858bb77e25" + }, + "sync_aggregate": { + "sync_committee_bits": "0xffffffff", + "sync_committee_signature": "0xa4dd8f0991de88ca6f81476f72f48cdb67b9414ad7bf6bba37f627c5ec84dd2c2ebc12cddd5d2e7c927276cee2d3d144158b4c067db3e9911fe52fe1875b14c93f90e4eb57bf5e8f0e6e6effe22f9ba076f30207e0ec683354961ae8e9779556" + }, + "signature_slot": 3601, + "next_sync_committee_update": { + "next_sync_committee": { + "pubkeys": [ + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c" + ], + "aggregate_pubkey": "0x8fe11476a05750c52618deb79918e2e674f56dfbf12dbce55ae4386d108e8a1e83c6326f5957e2ef19137582ce270dc6" + }, + "next_sync_committee_branch": [ + "0x1446606d0129c324a4ea374bd29a625175e0659512cd8650097e0a9c38ce6379", + "0xd92466c7e9a53b7b55f4fdb151746a3058931d7559b7e84e7b15384ddc903ca0", + "0x9fd10c3f68b75cfd3ebd2af0d4e2cbbfbe120e0b5423dde89ff0f743c7a4f937", + "0x1ed6aac0ab29a883de2bb2e3579ad4d6807ddcf3db8afcaf0ae25a076ac9a5f4", + "0xf17a840df410a15f0e4e48abf521c29ad0d296d3fb4e8b847ea37f2cc8236f1f" + ] + }, + "finalized_header": { + "slot": 3584, + "proposer_index": 1, + "parent_root": "0x91c285af2ec25d485310391afe667108b787ec570cdbb0e3fd87b1e0e2c47bd7", + "state_root": "0xccc4baf90024e035f1252520d2f2ef1e50f840ff0ecc8e6e365721e083871a32", + "body_root": "0x91df5e0077434aad609aaa7e030005cee77cca83868ffc2724e5befe9a3f6a02" + }, + "finality_branch": [ + "0xc001000000000000000000000000000000000000000000000000000000000000", + "0x10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7", + "0x83c3d5360d254f4a44be712c1f433e88e810b6d1e0e789e90bada9e36126b857", + "0x9fd10c3f68b75cfd3ebd2af0d4e2cbbfbe120e0b5423dde89ff0f743c7a4f937", + "0x1ed6aac0ab29a883de2bb2e3579ad4d6807ddcf3db8afcaf0ae25a076ac9a5f4", + "0xf17a840df410a15f0e4e48abf521c29ad0d296d3fb4e8b847ea37f2cc8236f1f" + ], + "block_roots_root": "0x9eab8a05c396a29c32f4f8ac9654fc0fb7cd97ec659236392ede48951a794505", + "block_roots_branch": [ + "0x5c175efdbafacdfdab21c93a318b0e8e2291a5a86c40b1fc564f91ad33c106d4", + "0x5c1e0b76176ab033858b2835f90d5e25d708b563f77efd7d9938f0faa1c20878", + "0x7aea32464adee801e2a05c3af227f24231d3c088e3b7265a5fada9ac850549fe", + "0x9d9fca29e23c5d4ae433adf17e7fd9a0e4d1b09b68f5c45e7ca1b13ebe4a9e98", + "0x6b35238f188021c859d6b317457ebb6fe4cf362cab35c988010cb1343eabbfc5" + ] +} \ No newline at end of file diff --git a/bridges/snowbridge/parachain/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/parachain/pallets/inbound-queue/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..f9e4d20be0fdbde414c24569f9e629de9611b051 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/inbound-queue/Cargo.toml @@ -0,0 +1,93 @@ +[package] +name = "snowbridge-inbound-queue" +description = "Snowbridge Inbound Queue" +version = "0.1.1" +edition = "2021" +authors = ["Snowfork "] +repository = "https://github.com/Snowfork/snowbridge" +license = "Apache-2.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { version = "1.0.188", optional = true } +codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } +scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +hex-literal = { version = "0.4.1", optional = true } +log = { version = "0.4.20", default-features = false } +alloy-primitives = { version = "0.4.2", default-features = false, features = ["rlp"] } +alloy-sol-types = { version = "0.4.2", default-features = false } +alloy-rlp = { version = "0.3.3", default-features = false, features = ["derive"] } +num-traits = { version = "0.2.16", default-features = false } + +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } + +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } + +snowbridge-core = { path = "../../primitives/core", default-features = false } +snowbridge-ethereum = { path = "../../primitives/ethereum", default-features = false } +snowbridge-router-primitives = { path = "../../primitives/router", default-features = false } +snowbridge-beacon-primitives = { path = "../../primitives/beacon", default-features = false, optional = true } + +[dev-dependencies] +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking" } +sp-keyring = { path = "../../../../../substrate/primitives/keyring" } +snowbridge-beacon-primitives = { path = "../../primitives/beacon" } +snowbridge-ethereum-beacon-client = { path = "../../pallets/ethereum-beacon-client" } +hex-literal = { version = "0.4.1" } + +[features] +default = ["std"] +std = [ + "alloy-primitives/std", + "alloy-rlp/std", + "alloy-sol-types/std", + "codec/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "log/std", + "num-traits/std", + "pallet-balances/std", + "scale-info/std", + "serde", + "snowbridge-core/std", + "snowbridge-ethereum/std", + "snowbridge-router-primitives/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "xcm-builder/std", + "xcm/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "hex-literal", + "pallet-balances/runtime-benchmarks", + "snowbridge-beacon-primitives", + "snowbridge-core/runtime-benchmarks", + "snowbridge-ethereum-beacon-client/runtime-benchmarks", + "snowbridge-router-primitives/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "snowbridge-ethereum-beacon-client/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/bridges/snowbridge/parachain/pallets/inbound-queue/src/benchmarking/fixtures.rs b/bridges/snowbridge/parachain/pallets/inbound-queue/src/benchmarking/fixtures.rs new file mode 100644 index 0000000000000000000000000000000000000000..4f2382d072abd5c145b800f0126ea076f3549cce --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/inbound-queue/src/benchmarking/fixtures.rs @@ -0,0 +1,40 @@ +use hex_literal::hex; +use snowbridge_beacon_primitives::CompactExecutionHeader; +use snowbridge_core::inbound::{Log, Message, Proof}; +use sp_std::vec; + +pub struct InboundQueueTest { + pub execution_header: CompactExecutionHeader, + pub message: Message, +} + +pub fn make_create_message() -> InboundQueueTest { + InboundQueueTest{ + execution_header: CompactExecutionHeader{ + parent_hash: hex!("b5608f0af7c3b6fe5c593772fc25436b8d6549eb236adb0855c6ad33e0004e04").into(), + block_number: 115, + state_root: hex!("47ed174789836c622499d9659a4ac32c3b91a7b15642d39b0a11b82ff23995c1").into(), + receipts_root: hex!("42c08b5303fcdf9e49c833fe5f1182cdbc8206bf8aec581125fc34aba11e1f1a").into(), + }, + message: Message { + event_log: Log { + address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), + hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), + ], + data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").into(), + }, + proof: Proof { + block_hash: hex!("add15f439c8a57fe375d0a679870b1359921d70cb0e3e44f0dd3e272849f4097").into(), + tx_index: 0, + data: (vec![ + hex!("42c08b5303fcdf9e49c833fe5f1182cdbc8206bf8aec581125fc34aba11e1f1a").to_vec(), + ], vec![ + hex!("f9028e822080b9028802f90284018301ed20b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").to_vec(), + ]), + }, + }, + } +} diff --git a/bridges/snowbridge/parachain/pallets/inbound-queue/src/benchmarking/mod.rs b/bridges/snowbridge/parachain/pallets/inbound-queue/src/benchmarking/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..c10de9dff2ff0e288068274c1d8b4075a471cf25 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/inbound-queue/src/benchmarking/mod.rs @@ -0,0 +1,55 @@ +mod fixtures; + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use super::*; + +use crate::Pallet as InboundQueue; +use frame_benchmarking::v2::*; +use frame_support::assert_ok; +use frame_system::RawOrigin; + +#[benchmarks] +mod benchmarks { + use super::*; + use crate::benchmarking::fixtures::make_create_message; + + #[benchmark] + fn submit() -> Result<(), BenchmarkError> { + let caller: T::AccountId = whitelisted_caller(); + + let create_message = make_create_message(); + + T::Helper::initialize_storage( + create_message.message.proof.block_hash, + create_message.execution_header, + ); + + let sovereign_account = sibling_sovereign_account::(1000u32.into()); + + let minimum_balance = T::Token::minimum_balance(); + + // So that the receiving account exists + assert_ok!(T::Token::mint_into(&caller, minimum_balance)); + // Fund the sovereign account (parachain sovereign account) so it can transfer a reward + // fee to the caller account + assert_ok!(T::Token::mint_into( + &sovereign_account, + 3_000_000_000_000u128 + .try_into() + .unwrap_or_else(|_| panic!("unable to cast sovereign account balance")), + )); + + #[block] + { + assert_ok!(InboundQueue::::submit( + RawOrigin::Signed(caller.clone()).into(), + create_message.message, + )); + } + + Ok(()) + } + + impl_benchmark_test_suite!(InboundQueue, crate::mock::new_tester(), crate::mock::Test); +} diff --git a/bridges/snowbridge/parachain/pallets/inbound-queue/src/envelope.rs b/bridges/snowbridge/parachain/pallets/inbound-queue/src/envelope.rs new file mode 100644 index 0000000000000000000000000000000000000000..826d535c2cb922610ba4811d607a9024de8d33ab --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/inbound-queue/src/envelope.rs @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use snowbridge_core::{inbound::Log, ChannelId}; + +use sp_core::{RuntimeDebug, H160, H256}; +use sp_std::{convert::TryFrom, prelude::*}; + +use alloy_primitives::B256; +use alloy_sol_types::{sol, SolEvent}; + +sol! { + event OutboundMessageAccepted(bytes32 indexed channel_id, uint64 nonce, bytes32 indexed message_id, bytes payload); +} + +/// An inbound message that has had its outer envelope decoded. +#[derive(Clone, RuntimeDebug)] +pub struct Envelope { + /// The address of the outbound queue on Ethereum that emitted this message as an event log + pub gateway: H160, + /// The message Channel + pub channel_id: ChannelId, + /// A nonce for enforcing replay protection and ordering. + pub nonce: u64, + /// An id for tracing the message on its route (has no role in bridge consensus) + pub message_id: H256, + /// The inner payload generated from the source application. + pub payload: Vec, +} + +#[derive(Copy, Clone, RuntimeDebug)] +pub struct EnvelopeDecodeError; + +impl TryFrom<&Log> for Envelope { + type Error = EnvelopeDecodeError; + + fn try_from(log: &Log) -> Result { + let topics: Vec = log.topics.iter().map(|x| B256::from_slice(x.as_ref())).collect(); + + let event = OutboundMessageAccepted::decode_log(topics, &log.data, true) + .map_err(|_| EnvelopeDecodeError)?; + + Ok(Self { + gateway: log.address, + channel_id: ChannelId::from(event.channel_id.as_ref()), + nonce: event.nonce, + message_id: H256::from(event.message_id.as_ref()), + payload: event.payload, + }) + } +} diff --git a/bridges/snowbridge/parachain/pallets/inbound-queue/src/lib.rs b/bridges/snowbridge/parachain/pallets/inbound-queue/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..834e805fbef5ab95314376f6650bb8dbd4955ada --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/inbound-queue/src/lib.rs @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Inbound Queue +//! +//! # Overview +//! +//! Receives messages emitted by the Gateway contract on Ethereum, whereupon they are verified, +//! translated to XCM, and finally sent to their final destination parachain. +//! +//! The message relayers are rewarded using native currency from the sovereign account of the +//! destination parachain. +//! +//! # Extrinsics +//! +//! ## Governance +//! +//! * [`Call::set_operating_mode`]: Set the operating mode of the pallet. Can be used to disable +//! processing of inbound messages. +//! +//! ## Message Submission +//! +//! * [`Call::submit`]: Submit a message for verification and dispatch the final destination +//! parachain. +#![cfg_attr(not(feature = "std"), no_std)] + +mod envelope; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +#[cfg(feature = "runtime-benchmarks")] +use snowbridge_beacon_primitives::CompactExecutionHeader; + +pub mod weights; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod test; + +use codec::{Decode, DecodeAll, Encode}; +use envelope::Envelope; +use frame_support::{ + traits::{ + fungible::{Inspect, Mutate}, + tokens::{Fortitude, Precision, Preservation}, + }, + weights::WeightToFee, + PalletError, +}; +use frame_system::ensure_signed; +use scale_info::TypeInfo; +use sp_core::{H160, H256}; +use sp_std::{convert::TryFrom, vec}; +use xcm::prelude::{ + send_xcm, Instruction::SetTopic, Junction::*, Junctions::*, MultiLocation, + SendError as XcmpSendError, SendXcm, Xcm, XcmHash, +}; + +use snowbridge_core::{ + inbound::{Message, VerificationError, Verifier}, + sibling_sovereign_account, BasicOperatingMode, Channel, ChannelId, ParaId, StaticLookup, +}; +use snowbridge_router_primitives::{ + inbound, + inbound::{ConvertMessage, ConvertMessageError}, +}; +use sp_runtime::traits::Saturating; + +pub use weights::WeightInfo; + +type BalanceOf = + <::Token as Inspect<::AccountId>>::Balance; + +pub use pallet::*; + +pub const LOG_TARGET: &str = "snowbridge-inbound-queue"; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use snowbridge_core::PricingParameters; + + #[pallet::pallet] + pub struct Pallet(_); + + #[cfg(feature = "runtime-benchmarks")] + pub trait BenchmarkHelper { + fn initialize_storage(block_hash: H256, header: CompactExecutionHeader); + } + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The verifier for inbound messages from Ethereum + type Verifier: Verifier; + + /// Message relayers are rewarded with this asset + type Token: Mutate + Inspect; + + /// XCM message sender + type XcmSender: SendXcm; + + // Address of the Gateway contract + #[pallet::constant] + type GatewayAddress: Get; + + /// Convert inbound message to XCM + type MessageConverter: ConvertMessage< + AccountId = Self::AccountId, + Balance = BalanceOf, + >; + + /// Lookup a channel descriptor + type ChannelLookup: StaticLookup; + + /// Lookup pricing parameters + type PricingParameters: Get>>; + + type WeightInfo: WeightInfo; + + #[cfg(feature = "runtime-benchmarks")] + type Helper: BenchmarkHelper; + + /// Convert a weight value into deductible balance type. + type WeightToFee: WeightToFee>; + + /// Convert a length value into deductible balance type + type LengthToFee: WeightToFee>; + + /// The upper limit here only used to estimate delivery cost + type MaxMessageSize: Get; + } + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A message was received from Ethereum + MessageReceived { + /// The message channel + channel_id: ChannelId, + /// The message nonce + nonce: u64, + /// ID of the XCM message which was forwarded to the final destination parachain + message_id: [u8; 32], + }, + /// Set OperatingMode + OperatingModeChanged { mode: BasicOperatingMode }, + } + + #[pallet::error] + pub enum Error { + /// Message came from an invalid outbound channel on the Ethereum side. + InvalidGateway, + /// Message has an invalid envelope. + InvalidEnvelope, + /// Message has an unexpected nonce. + InvalidNonce, + /// Message has an invalid payload. + InvalidPayload, + /// Message channel is invalid + InvalidChannel, + /// The max nonce for the type has been reached + MaxNonceReached, + /// Cannot convert location + InvalidAccountConversion, + /// Pallet is halted + Halted, + /// Message verification error, + Verification(VerificationError), + /// XCMP send failure + Send(SendError), + /// Message conversion error + ConvertMessage(ConvertMessageError), + } + + #[derive(Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo, PalletError)] + pub enum SendError { + NotApplicable, + NotRoutable, + Transport, + DestinationUnsupported, + ExceedsMaxMessageSize, + MissingArgument, + Fees, + } + + impl From for Error { + fn from(e: XcmpSendError) -> Self { + match e { + XcmpSendError::NotApplicable => Error::::Send(SendError::NotApplicable), + XcmpSendError::Unroutable => Error::::Send(SendError::NotRoutable), + XcmpSendError::Transport(_) => Error::::Send(SendError::Transport), + XcmpSendError::DestinationUnsupported => + Error::::Send(SendError::DestinationUnsupported), + XcmpSendError::ExceedsMaxMessageSize => + Error::::Send(SendError::ExceedsMaxMessageSize), + XcmpSendError::MissingArgument => Error::::Send(SendError::MissingArgument), + XcmpSendError::Fees => Error::::Send(SendError::Fees), + } + } + } + + /// The current nonce for each channel + #[pallet::storage] + pub type Nonce = StorageMap<_, Twox64Concat, ChannelId, u64, ValueQuery>; + + /// The current operating mode of the pallet. + #[pallet::storage] + #[pallet::getter(fn operating_mode)] + pub type OperatingMode = StorageValue<_, BasicOperatingMode, ValueQuery>; + + #[pallet::call] + impl Pallet { + /// Submit an inbound message originating from the Gateway contract on Ethereum + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::submit())] + pub fn submit(origin: OriginFor, message: Message) -> DispatchResult { + let who = ensure_signed(origin)?; + ensure!(!Self::operating_mode().is_halted(), Error::::Halted); + + // submit message to verifier for verification + T::Verifier::verify(&message.event_log, &message.proof) + .map_err(|e| Error::::Verification(e))?; + + // Decode event log into an Envelope + let envelope = + Envelope::try_from(&message.event_log).map_err(|_| Error::::InvalidEnvelope)?; + + // Verify that the message was submitted from the known Gateway contract + ensure!(T::GatewayAddress::get() == envelope.gateway, Error::::InvalidGateway); + + // Retrieve the registered channel for this message + let channel = + T::ChannelLookup::lookup(envelope.channel_id).ok_or(Error::::InvalidChannel)?; + + // Verify message nonce + >::try_mutate(envelope.channel_id, |nonce| -> DispatchResult { + if *nonce == u64::MAX { + return Err(Error::::MaxNonceReached.into()) + } + if envelope.nonce != nonce.saturating_add(1) { + Err(Error::::InvalidNonce.into()) + } else { + *nonce = nonce.saturating_add(1); + Ok(()) + } + })?; + + // Reward relayer from the sovereign account of the destination parachain + // Expected to fail if sovereign account has no funds + let sovereign_account = sibling_sovereign_account::(channel.para_id); + let delivery_cost = Self::calculate_delivery_cost(message.encode().len() as u32); + T::Token::transfer(&sovereign_account, &who, delivery_cost, Preservation::Preserve)?; + + // Decode message into XCM + let (xcm, fee) = + match inbound::VersionedMessage::decode_all(&mut envelope.payload.as_ref()) { + Ok(message) => Self::do_convert(envelope.message_id, message)?, + Err(_) => return Err(Error::::InvalidPayload.into()), + }; + + // We embed fees for xcm execution inside the xcm program using teleports + // so we must burn the amount of the fee embedded into the XCM script. + T::Token::burn_from(&sovereign_account, fee, Precision::Exact, Fortitude::Polite)?; + + log::info!( + target: LOG_TARGET, + "💫 xcm {:?} sent with fee {:?}", + xcm, + fee + ); + + // Attempt to send XCM to a dest parachain + let message_id = Self::send_xcm(xcm, channel.para_id)?; + + Self::deposit_event(Event::MessageReceived { + channel_id: envelope.channel_id, + nonce: envelope.nonce, + message_id, + }); + + Ok(()) + } + + /// Halt or resume all pallet operations. May only be called by root. + #[pallet::call_index(1)] + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_operating_mode( + origin: OriginFor, + mode: BasicOperatingMode, + ) -> DispatchResult { + ensure_root(origin)?; + OperatingMode::::set(mode); + Self::deposit_event(Event::OperatingModeChanged { mode }); + Ok(()) + } + } + + impl Pallet { + pub fn do_convert( + message_id: H256, + message: inbound::VersionedMessage, + ) -> Result<(Xcm<()>, BalanceOf), Error> { + let (mut xcm, fee) = + T::MessageConverter::convert(message).map_err(|e| Error::::ConvertMessage(e))?; + // Append the message id as an XCM topic + xcm.inner_mut().extend(vec![SetTopic(message_id.into())]); + Ok((xcm, fee)) + } + + pub fn send_xcm(xcm: Xcm<()>, dest: ParaId) -> Result> { + let dest = MultiLocation { parents: 1, interior: X1(Parachain(dest.into())) }; + let (xcm_hash, _) = send_xcm::(dest, xcm).map_err(Error::::from)?; + Ok(xcm_hash) + } + + pub fn calculate_delivery_cost(length: u32) -> BalanceOf { + let weight_fee = T::WeightToFee::weight_to_fee(&T::WeightInfo::submit()); + let len_fee = T::LengthToFee::weight_to_fee(&Weight::from_parts(length as u64, 0)); + weight_fee + .saturating_add(len_fee) + .saturating_add(T::PricingParameters::get().rewards.local) + } + } + + /// API for accessing the delivery cost of a message + impl Get> for Pallet { + fn get() -> BalanceOf { + // Cost here based on MaxMessagePayloadSize(the worst case) + Self::calculate_delivery_cost(T::MaxMessageSize::get()) + } + } +} diff --git a/bridges/snowbridge/parachain/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/parachain/pallets/inbound-queue/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..6b79a55e3c933304b720a42a97960df7de801dc9 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/inbound-queue/src/mock.rs @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use super::*; + +use frame_support::{ + parameter_types, + traits::{ConstU128, ConstU32, Everything}, + weights::IdentityFee, +}; +use hex_literal::hex; +use snowbridge_beacon_primitives::{Fork, ForkVersions}; +use snowbridge_core::{ + gwei, + inbound::{Log, Proof, VerificationError}, + meth, Channel, ChannelId, PricingParameters, Rewards, StaticLookup, +}; +use snowbridge_router_primitives::inbound::MessageToXcm; +use sp_core::{H160, H256}; +use sp_runtime::{ + traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, + BuildStorage, FixedU128, MultiSignature, +}; +use sp_std::convert::From; +use xcm::v3::{prelude::*, MultiAssets, SendXcm}; + +use crate::{self as inbound_queue}; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system::{Pallet, Call, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + EthereumBeaconClient: snowbridge_ethereum_beacon_client::{Pallet, Call, Storage, Event}, + InboundQueue: inbound_queue::{Pallet, Call, Storage, Event}, + } +); + +pub type Signature = MultiSignature; +pub type AccountId = <::Signer as IdentifyAccount>::AccountId; + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +type Balance = u128; + +impl frame_system::Config for Test { + type BaseCallFilter = Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; + type Nonce = u64; + type Block = Block; +} + +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ConstU128<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); + type MaxHolds = (); +} + +parameter_types! { + pub const ExecutionHeadersPruneThreshold: u32 = 10; + pub const ChainForkVersions: ForkVersions = ForkVersions{ + genesis: Fork { + version: [0, 0, 0, 1], // 0x00000001 + epoch: 0, + }, + altair: Fork { + version: [1, 0, 0, 1], // 0x01000001 + epoch: 0, + }, + bellatrix: Fork { + version: [2, 0, 0, 1], // 0x02000001 + epoch: 0, + }, + capella: Fork { + version: [3, 0, 0, 1], // 0x03000001 + epoch: 0, + }, + }; +} + +impl snowbridge_ethereum_beacon_client::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ForkVersions = ChainForkVersions; + type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; + type WeightInfo = (); +} + +// Mock verifier +pub struct MockVerifier; + +impl Verifier for MockVerifier { + fn verify(_: &Log, _: &Proof) -> Result<(), VerificationError> { + Ok(()) + } +} + +const GATEWAY_ADDRESS: [u8; 20] = hex!["eda338e4dc46038493b885327842fd3e301cab39"]; + +parameter_types! { + pub const EthereumNetwork: xcm::v3::NetworkId = xcm::v3::NetworkId::Ethereum { chain_id: 11155111 }; + pub const GatewayAddress: H160 = H160(GATEWAY_ADDRESS); + pub const CreateAssetCall: [u8;2] = [53, 0]; + pub const CreateAssetExecutionFee: u128 = 2_000_000_000; + pub const CreateAssetDeposit: u128 = 100_000_000_000; + pub const SendTokenExecutionFee: u128 = 1_000_000_000; + pub const InitialFund: u128 = 1_000_000_000_000; + pub const InboundQueuePalletInstance: u8 = 80; +} + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelper for Test { + // not implemented since the MockVerifier is used for tests + fn initialize_storage(_: H256, _: CompactExecutionHeader) {} +} + +// Mock XCM sender that always succeeds +pub struct MockXcmSender; + +impl SendXcm for MockXcmSender { + type Ticket = Xcm<()>; + + fn validate( + dest: &mut Option, + xcm: &mut Option>, + ) -> SendResult { + match dest { + Some(MultiLocation { interior, .. }) => { + if let X1(Parachain(1001)) = interior { + return Err(XcmpSendError::NotApplicable) + } + Ok((xcm.clone().unwrap(), MultiAssets::default())) + }, + _ => Ok((xcm.clone().unwrap(), MultiAssets::default())), + } + } + + fn deliver(xcm: Self::Ticket) -> core::result::Result { + let hash = xcm.using_encoded(sp_io::hashing::blake2_256); + Ok(hash) + } +} + +parameter_types! { + pub const OwnParaId: ParaId = ParaId::new(1013); + pub Parameters: PricingParameters = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 400), + fee_per_gas: gwei(20), + rewards: Rewards { local: DOT, remote: meth(1) } + }; +} + +pub const DOT: u128 = 10_000_000_000; + +pub struct MockChannelLookup; +impl StaticLookup for MockChannelLookup { + type Source = ChannelId; + type Target = Channel; + + fn lookup(channel_id: Self::Source) -> Option { + if channel_id != + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into() + { + return None + } + Some(Channel { agent_id: H256::zero(), para_id: ASSET_HUB_PARAID.into() }) + } +} + +impl inbound_queue::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Verifier = MockVerifier; + type Token = Balances; + type XcmSender = MockXcmSender; + type WeightInfo = (); + type GatewayAddress = GatewayAddress; + type MessageConverter = MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + >; + type PricingParameters = Parameters; + type ChannelLookup = MockChannelLookup; + #[cfg(feature = "runtime-benchmarks")] + type Helper = Test; + type WeightToFee = IdentityFee; + type LengthToFee = IdentityFee; + type MaxMessageSize = ConstU32<1024>; +} + +pub fn last_events(n: usize) -> Vec { + frame_system::Pallet::::events() + .into_iter() + .rev() + .take(n) + .rev() + .map(|e| e.event) + .collect() +} + +pub fn expect_events(e: Vec) { + assert_eq!(last_events(e.len()), e); +} + +pub fn setup() { + System::set_block_number(1); + Balances::mint_into( + &sibling_sovereign_account::(ASSET_HUB_PARAID.into()), + InitialFund::get(), + ) + .unwrap(); + Balances::mint_into( + &sibling_sovereign_account::(TEMPLATE_PARAID.into()), + InitialFund::get(), + ) + .unwrap(); +} + +pub fn new_tester() -> sp_io::TestExternalities { + let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext: sp_io::TestExternalities = storage.into(); + ext.execute_with(setup); + ext +} + +// Generated from smoketests: +// cd smoketests +// ./make-bindings +// cargo test --test register_token -- --nocapture +pub fn mock_event_log() -> Log { + Log { + // gateway address + address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + // channel id + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), + // message id + hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), + ], + // Nonce + Payload + data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e000f000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").into(), + } +} + +pub fn mock_event_log_invalid_channel() -> Log { + Log { + address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + // invalid channel id + hex!("0000000000000000000000000000000000000000000000000000000000000000").into(), + hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), + ], + data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000001e000f000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d0000").into(), + } +} + +pub fn mock_event_log_invalid_gateway() -> Log { + Log { + // gateway address + address: H160::zero(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + // channel id + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), + // message id + hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), + ], + // Nonce + Payload + data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000001e000f000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d0000").into(), + } +} + +pub const ASSET_HUB_PARAID: u32 = 1000u32; +pub const TEMPLATE_PARAID: u32 = 1001u32; diff --git a/bridges/snowbridge/parachain/pallets/inbound-queue/src/test.rs b/bridges/snowbridge/parachain/pallets/inbound-queue/src/test.rs new file mode 100644 index 0000000000000000000000000000000000000000..6dc3ac4537450fc0a930620e173ee3dab59e51f0 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/inbound-queue/src/test.rs @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use super::*; + +use frame_support::{assert_noop, assert_ok}; +use hex_literal::hex; +use snowbridge_core::{inbound::Proof, ChannelId}; +use sp_keyring::AccountKeyring as Keyring; +use sp_runtime::{DispatchError, TokenError}; +use sp_std::convert::From; + +use crate::{Error, Event as InboundQueueEvent}; + +use crate::mock::*; + +#[test] +fn test_submit_happy_path() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let channel_sovereign = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); + + let origin = RuntimeOrigin::signed(relayer.clone()); + + // Submit message + let message = Message { + event_log: mock_event_log(), + proof: Proof { + block_hash: Default::default(), + tx_index: Default::default(), + data: Default::default(), + }, + }; + + let initial_fund = InitialFund::get(); + assert_eq!(Balances::balance(&relayer), 0); + assert_eq!(Balances::balance(&channel_sovereign), initial_fund); + + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); + expect_events(vec![InboundQueueEvent::MessageReceived { + channel_id: hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539") + .into(), + nonce: 1, + message_id: [ + 27, 217, 88, 127, 46, 143, 199, 70, 236, 66, 212, 244, 85, 221, 153, 104, 175, 37, + 224, 20, 140, 95, 140, 7, 27, 74, 182, 199, 77, 12, 194, 236, + ], + } + .into()]); + + let delivery_cost = InboundQueue::calculate_delivery_cost(message.encode().len() as u32); + assert!( + Parameters::get().rewards.local < delivery_cost, + "delivery cost exceeds pure reward" + ); + + assert_eq!(Balances::balance(&relayer), delivery_cost, "relayer was rewarded"); + assert!( + Balances::balance(&channel_sovereign) <= initial_fund - delivery_cost, + "sovereign account paid reward" + ); + }); +} + +#[test] +fn test_submit_xcm_invalid_channel() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + + // Deposit funds into sovereign account of parachain 1001 + let sovereign_account = sibling_sovereign_account::(TEMPLATE_PARAID.into()); + println!("account: {}", sovereign_account); + let _ = Balances::mint_into(&sovereign_account, 10000); + + // Submit message + let message = Message { + event_log: mock_event_log_invalid_channel(), + proof: Proof { + block_hash: Default::default(), + tx_index: Default::default(), + data: Default::default(), + }, + }; + assert_noop!( + InboundQueue::submit(origin.clone(), message.clone()), + Error::::InvalidChannel, + ); + }); +} + +#[test] +fn test_submit_with_invalid_gateway() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + + // Deposit funds into sovereign account of Asset Hub (Statemint) + let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); + let _ = Balances::mint_into(&sovereign_account, 10000); + + // Submit message + let message = Message { + event_log: mock_event_log_invalid_gateway(), + proof: Proof { + block_hash: Default::default(), + tx_index: Default::default(), + data: Default::default(), + }, + }; + assert_noop!( + InboundQueue::submit(origin.clone(), message.clone()), + Error::::InvalidGateway + ); + }); +} + +#[test] +fn test_submit_with_invalid_nonce() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + + // Deposit funds into sovereign account of Asset Hub (Statemint) + let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); + let _ = Balances::mint_into(&sovereign_account, 10000); + + // Submit message + let message = Message { + event_log: mock_event_log(), + proof: Proof { + block_hash: Default::default(), + tx_index: Default::default(), + data: Default::default(), + }, + }; + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); + + let nonce: u64 = >::get(ChannelId::from(hex!( + "c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539" + ))); + assert_eq!(nonce, 1); + + // Submit the same again + assert_noop!( + InboundQueue::submit(origin.clone(), message.clone()), + Error::::InvalidNonce + ); + }); +} + +#[test] +fn test_submit_no_funds_to_reward_relayers() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + + // Reset balance of sovereign_account to zero so to trigger the FundsUnavailable error + let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); + Balances::set_balance(&sovereign_account, 0); + + // Submit message + let message = Message { + event_log: mock_event_log(), + proof: Proof { + block_hash: Default::default(), + tx_index: Default::default(), + data: Default::default(), + }, + }; + assert_noop!( + InboundQueue::submit(origin.clone(), message.clone()), + TokenError::FundsUnavailable + ); + }); +} + +#[test] +fn test_set_operating_mode() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + let message = Message { + event_log: mock_event_log(), + proof: Proof { + block_hash: Default::default(), + tx_index: Default::default(), + data: Default::default(), + }, + }; + + assert_ok!(InboundQueue::set_operating_mode( + RuntimeOrigin::root(), + snowbridge_core::BasicOperatingMode::Halted + )); + + assert_noop!(InboundQueue::submit(origin, message), Error::::Halted); + }); +} + +#[test] +fn test_set_operating_mode_root_only() { + new_tester().execute_with(|| { + assert_noop!( + InboundQueue::set_operating_mode( + RuntimeOrigin::signed(Keyring::Bob.into()), + snowbridge_core::BasicOperatingMode::Halted + ), + DispatchError::BadOrigin + ); + }); +} diff --git a/bridges/snowbridge/parachain/pallets/inbound-queue/src/weights.rs b/bridges/snowbridge/parachain/pallets/inbound-queue/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..c2c665f40d9e5e00ff452ad3e1151152b428bf07 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/inbound-queue/src/weights.rs @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Autogenerated weights for `snowbridge_inbound_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `macbook pro 14 m2`, CPU: `m2-arm64` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for ethereum_beacon_client. +pub trait WeightInfo { + fn submit() -> Weight; +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn submit() -> Weight { + Weight::from_parts(70_000_000, 0) + .saturating_add(Weight::from_parts(0, 3601)) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(2)) + } +} diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/parachain/pallets/outbound-queue/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..66dd1d838e7dc291b997bb24ff5d395308c194e3 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/Cargo.toml @@ -0,0 +1,78 @@ +[package] +name = "snowbridge-outbound-queue" +description = "Snowbridge Outbound Queue" +version = "0.1.1" +edition = "2021" +authors = ["Snowfork "] +repository = "https://github.com/Snowfork/snowbridge" +license = "Apache-2.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { version = "1.0.188", features = ["alloc", "derive"], default-features = false } +codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } +scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +hex-literal = { version = "0.4.1", optional = true } + +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } +sp-arithmetic = { path = "../../../../../substrate/primitives/arithmetic", default-features = false } + +bridge-hub-common = { path = "../../../../../cumulus/parachains/runtimes/bridge-hubs/common", default-features = false } + +snowbridge-core = { path = "../../primitives/core", features = ["serde"], default-features = false } +snowbridge-outbound-queue-merkle-tree = { path = "merkle-tree", default-features = false } +ethabi = { git = "https://github.com/snowfork/ethabi-decode.git", package = "ethabi-decode", branch = "master", default-features = false } + +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } + +[dev-dependencies] +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +sp-keyring = { path = "../../../../../substrate/primitives/keyring" } +hex-literal = { version = "0.4.1" } + +[features] +default = ["std"] +std = [ + "bridge-hub-common/std", + "codec/std", + "ethabi/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "pallet-message-queue/std", + "scale-info/std", + "serde/std", + "snowbridge-core/std", + "snowbridge-outbound-queue-merkle-tree/std", + "sp-arithmetic/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "xcm/std", +] +runtime-benchmarks = [ + "bridge-hub-common/runtime-benchmarks", + "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "hex-literal", + "pallet-message-queue/runtime-benchmarks", + "snowbridge-core/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-message-queue/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/parachain/pallets/outbound-queue/merkle-tree/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..a3432163622d4809838a6a1e678201ce8f5e8747 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "snowbridge-outbound-queue-merkle-tree" +description = "Snowbridge Outbound Queue Merkle Tree" +version = "0.1.1" +edition = "2021" +authors = ["Snowfork "] +repository = "https://github.com/Snowfork/snowbridge" +license = "Apache-2.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { version = "3.1.5", package = "parity-scale-codec", default-features = false, features = ["derive"] } +scale-info = { version = "2.7.0", default-features = false, features = ["derive"] } + +sp-core = { path = "../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../substrate/primitives/runtime", default-features = false } + +[dev-dependencies] +hex-literal = { version = "0.4.1" } +env_logger = "0.9" +hex = "0.4" +array-bytes = "4.1" + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "sp-core/std", + "sp-runtime/std", +] diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/merkle-tree/src/lib.rs b/bridges/snowbridge/parachain/pallets/outbound-queue/merkle-tree/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..d03eb578ef4d51f9505e63aa98e0a42b107a9958 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/merkle-tree/src/lib.rs @@ -0,0 +1,464 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +// SPDX-FileCopyrightText: 2021-2022 Parity Technologies (UK) Ltd. +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +//! This crate implements a simple binary Merkle Tree utilities required for inter-op with Ethereum +//! bridge & Solidity contract. +//! +//! The implementation is optimised for usage within Substrate Runtime and supports no-std +//! compilation targets. +//! +//! Merkle Tree is constructed from arbitrary-length leaves, that are initially hashed using the +//! same `\[`Hasher`\]` as the inner nodes. +//! Inner nodes are created by concatenating child hashes and hashing again. The implementation +//! does not perform any sorting of the input data (leaves) nor when inner nodes are created. +//! +//! If the number of leaves is not even, last leaf (hash of) is promoted to the upper layer. + +#[cfg(not(feature = "std"))] +extern crate alloc; +#[cfg(not(feature = "std"))] +use alloc::vec; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_core::{RuntimeDebug, H256}; +use sp_runtime::traits::Hash; + +/// Construct a root hash of a Binary Merkle Tree created from given leaves. +/// +/// See crate-level docs for details about Merkle Tree construction. +/// +/// In case an empty list of leaves is passed the function returns a 0-filled hash. +pub fn merkle_root(leaves: I) -> H256 +where + H: Hash, + I: Iterator, +{ + merkelize::(leaves, &mut ()) +} + +fn merkelize(leaves: I, visitor: &mut V) -> H256 +where + H: Hash, + V: Visitor, + I: Iterator, +{ + let upper = Vec::with_capacity(leaves.size_hint().0); + let mut next = match merkelize_row::(leaves, upper, visitor) { + Ok(root) => return root, + Err(next) if next.is_empty() => return H256::default(), + Err(next) => next, + }; + + let mut upper = Vec::with_capacity((next.len() + 1) / 2); + loop { + visitor.move_up(); + + match merkelize_row::(next.drain(..), upper, visitor) { + Ok(root) => return root, + Err(t) => { + // swap collections to avoid allocations + upper = next; + next = t; + }, + }; + } +} + +/// A generated merkle proof. +/// +/// The structure contains all necessary data to later on verify the proof and the leaf itself. +#[derive(Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)] +pub struct MerkleProof { + /// Root hash of generated merkle tree. + pub root: H256, + /// Proof items (does not contain the leaf hash, nor the root obviously). + /// + /// This vec contains all inner node hashes necessary to reconstruct the root hash given the + /// leaf hash. + pub proof: Vec, + /// Number of leaves in the original tree. + /// + /// This is needed to detect a case where we have an odd number of leaves that "get promoted" + /// to upper layers. + pub number_of_leaves: u64, + /// Index of the leaf the proof is for (0-based). + pub leaf_index: u64, + /// Leaf content (hashed). + pub leaf: H256, +} + +/// A trait of object inspecting merkle root creation. +/// +/// It can be passed to [`merkelize_row`] or [`merkelize`] functions and will be notified +/// about tree traversal. +trait Visitor { + /// We are moving one level up in the tree. + fn move_up(&mut self); + + /// We are creating an inner node from given `left` and `right` nodes. + /// + /// Note that in case of last odd node in the row `right` might be empty. + /// The method will also visit the `root` hash (level 0). + /// + /// The `index` is an index of `left` item. + fn visit(&mut self, index: u64, left: &Option, right: &Option); +} + +/// No-op implementation of the visitor. +impl Visitor for () { + fn move_up(&mut self) {} + fn visit(&mut self, _index: u64, _left: &Option, _right: &Option) {} +} + +/// Construct a Merkle Proof for leaves given by indices. +/// +/// The function constructs a (partial) Merkle Tree first and stores all elements required +/// to prove the requested item (leaf) given the root hash. +/// +/// Both the Proof and the Root Hash are returned. +/// +/// # Panic +/// +/// The function will panic if given `leaf_index` is greater than the number of leaves. +pub fn merkle_proof(leaves: I, leaf_index: u64) -> MerkleProof +where + H: Hash, + I: Iterator, +{ + let mut leaf = None; + let mut hashes = vec![]; + let mut number_of_leaves = 0; + for (idx, l) in (0u64..).zip(leaves) { + // count the leaves + number_of_leaves = idx + 1; + hashes.push(l); + // find the leaf for the proof + if idx == leaf_index { + leaf = Some(l); + } + } + + /// The struct collects a proof for single leaf. + struct ProofCollection { + proof: Vec, + position: u64, + } + + impl ProofCollection { + fn new(position: u64) -> Self { + ProofCollection { proof: Default::default(), position } + } + } + + impl Visitor for ProofCollection { + fn move_up(&mut self) { + self.position /= 2; + } + + fn visit(&mut self, index: u64, left: &Option, right: &Option) { + // we are at left branch - right goes to the proof. + if self.position == index { + if let Some(right) = right { + self.proof.push(*right); + } + } + // we are at right branch - left goes to the proof. + if self.position == index + 1 { + if let Some(left) = left { + self.proof.push(*left); + } + } + } + } + + let mut collect_proof = ProofCollection::new(leaf_index); + + let root = merkelize::(hashes.into_iter(), &mut collect_proof); + let leaf = leaf.expect("Requested `leaf_index` is greater than number of leaves."); + + #[cfg(feature = "debug")] + log::debug!( + "[merkle_proof] Proof: {:?}", + collect_proof.proof.iter().map(hex::encode).collect::>() + ); + + MerkleProof { root, proof: collect_proof.proof, number_of_leaves, leaf_index, leaf } +} + +/// Leaf node for proof verification. +/// +/// Can be either a value that needs to be hashed first, +/// or the hash itself. +#[derive(Debug, PartialEq, Eq)] +pub enum Leaf<'a> { + /// Leaf content. + Value(&'a [u8]), + /// Hash of the leaf content. + Hash(H256), +} + +impl<'a, T: AsRef<[u8]>> From<&'a T> for Leaf<'a> { + fn from(v: &'a T) -> Self { + Leaf::Value(v.as_ref()) + } +} + +impl<'a> From for Leaf<'a> { + fn from(v: H256) -> Self { + Leaf::Hash(v) + } +} + +/// Verify Merkle Proof correctness versus given root hash. +/// +/// The proof is NOT expected to contain leaf hash as the first +/// element, but only all adjacent nodes required to eventually by process of +/// concatenating and hashing end up with given root hash. +/// +/// The proof must not contain the root hash. +pub fn verify_proof<'a, H, P, L>( + root: &'a H256, + proof: P, + number_of_leaves: u64, + leaf_index: u64, + leaf: L, +) -> bool +where + H: Hash, + P: IntoIterator, + L: Into>, +{ + if leaf_index >= number_of_leaves { + return false + } + + let leaf_hash = match leaf.into() { + Leaf::Value(content) => ::hash(content), + Leaf::Hash(hash) => hash, + }; + + let hash_len = ::LENGTH; + let mut combined = [0_u8; 64]; + let computed = proof.into_iter().fold(leaf_hash, |a, b| { + if a < b { + combined[..hash_len].copy_from_slice(a.as_ref()); + combined[hash_len..].copy_from_slice(b.as_ref()); + } else { + combined[..hash_len].copy_from_slice(b.as_ref()); + combined[hash_len..].copy_from_slice(a.as_ref()); + } + ::hash(&combined) + }); + + root == &computed +} + +/// Processes a single row (layer) of a tree by taking pairs of elements, +/// concatenating them, hashing and placing into resulting vector. +/// +/// In case only one element is provided it is returned via `Ok` result, in any other case (also an +/// empty iterator) an `Err` with the inner nodes of upper layer is returned. +fn merkelize_row( + mut iter: I, + mut next: Vec, + visitor: &mut V, +) -> Result> +where + H: Hash, + V: Visitor, + I: Iterator, +{ + #[cfg(feature = "debug")] + log::debug!("[merkelize_row]"); + next.clear(); + + let hash_len = ::LENGTH; + let mut index = 0; + let mut combined = vec![0_u8; hash_len * 2]; + loop { + let a = iter.next(); + let b = iter.next(); + visitor.visit(index, &a, &b); + + #[cfg(feature = "debug")] + log::debug!(" {:?}\n {:?}", a.as_ref().map(hex::encode), b.as_ref().map(hex::encode)); + + index += 2; + match (a, b) { + (Some(a), Some(b)) => { + if a < b { + combined[..hash_len].copy_from_slice(a.as_ref()); + combined[hash_len..].copy_from_slice(b.as_ref()); + } else { + combined[..hash_len].copy_from_slice(b.as_ref()); + combined[hash_len..].copy_from_slice(a.as_ref()); + } + + next.push(::hash(&combined)); + }, + // Odd number of items. Promote the item to the upper layer. + (Some(a), None) if !next.is_empty() => { + next.push(a); + }, + // Last item = root. + (Some(a), None) => return Ok(a), + // Finish up, no more items. + _ => { + #[cfg(feature = "debug")] + log::debug!( + "[merkelize_row] Next: {:?}", + next.iter().map(hex::encode).collect::>() + ); + return Err(next) + }, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use hex_literal::hex; + use sp_core::keccak_256; + use sp_runtime::traits::Keccak256; + + fn make_leaves(count: u64) -> Vec { + (0..count).map(|i| keccak_256(&i.to_le_bytes()).into()).collect() + } + + #[test] + fn should_generate_empty_root() { + // given + let _ = env_logger::try_init(); + let data = vec![]; + + // when + let out = merkle_root::(data.into_iter()); + + // then + assert_eq!( + hex::encode(out), + "0000000000000000000000000000000000000000000000000000000000000000" + ); + } + + #[test] + fn should_generate_single_root() { + // given + let _ = env_logger::try_init(); + let data = make_leaves(1); + + // when + let out = merkle_root::(data.into_iter()); + + // then + assert_eq!( + hex::encode(out), + "011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce" + ); + } + + #[test] + fn should_generate_root_pow_2() { + // given + let _ = env_logger::try_init(); + let data = make_leaves(2); + + // when + let out = merkle_root::(data.into_iter()); + + // then + assert_eq!( + hex::encode(out), + "e497bd1c13b13a60af56fa0d2703517c232fde213ad20d2c3dd60735c6604512" + ); + } + + #[test] + fn should_generate_root_complex() { + let _ = env_logger::try_init(); + let test = |root, data: Vec| { + assert_eq!( + array_bytes::bytes2hex("", merkle_root::(data.into_iter()).as_ref()), + root + ); + }; + + test("816cc37bd8d39f7b0851838ebc875faf2afe58a03e95aca3b1333b3693f39dd3", make_leaves(3)); + + test("7501ea976cb92f305cca65ab11254589ea28bb8b59d3161506350adaa237d22f", make_leaves(4)); + + test("d26ba4eb398747bdd39255b1fadb99b803ce39696021b3b0bff7301ac146ee4e", make_leaves(10)); + } + + #[test] + #[ignore] + fn should_generate_and_verify_proof() { + // given + let _ = env_logger::try_init(); + let data: Vec = make_leaves(3); + + // when + let proof0 = merkle_proof::(data.clone().into_iter(), 0); + assert!(verify_proof::( + &proof0.root, + proof0.proof.clone(), + data.len() as u64, + proof0.leaf_index, + &data[0], + )); + + let proof1 = merkle_proof::(data.clone().into_iter(), 1); + assert!(verify_proof::( + &proof1.root, + proof1.proof, + data.len() as u64, + proof1.leaf_index, + &proof1.leaf, + )); + + let proof2 = merkle_proof::(data.clone().into_iter(), 2); + assert!(verify_proof::( + &proof2.root, + proof2.proof, + data.len() as u64, + proof2.leaf_index, + &proof2.leaf + )); + + // then + assert_eq!(hex::encode(proof0.root), hex::encode(proof1.root)); + assert_eq!(hex::encode(proof2.root), hex::encode(proof1.root)); + + assert!(!verify_proof::( + &H256::from_slice(&hex!( + "fb3b3be94be9e983ba5e094c9c51a7d96a4fa2e5d8e891df00ca89ba05bb1239" + )), + proof0.proof, + data.len() as u64, + proof0.leaf_index, + &proof0.leaf + )); + + assert!(!verify_proof::( + &proof0.root, + vec![], + data.len() as u64, + proof0.leaf_index, + &proof0.leaf + )); + } + + #[test] + #[should_panic] + fn should_panic_on_invalid_leaf_index() { + let _ = env_logger::try_init(); + merkle_proof::(make_leaves(1).into_iter(), 5); + } +} diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/parachain/pallets/outbound-queue/runtime-api/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..c92e725c60d5acd38dea2a88326862fed5863c13 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/runtime-api/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "snowbridge-outbound-queue-runtime-api" +description = "Snowbridge Outbound Queue Runtime API" +version = "0.1.0" +edition = "2021" +authors = ["Snowfork "] +repository = "https://github.com/Snowfork/snowbridge" +license = "Apache-2.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { version = "3.1.5", package = "parity-scale-codec", features = ["derive"], default-features = false } +sp-core = { path = "../../../../../../substrate/primitives/core", default-features = false } +sp-std = { path = "../../../../../../substrate/primitives/std", default-features = false } +sp-api = { path = "../../../../../../substrate/primitives/api", default-features = false } +frame-support = { path = "../../../../../../substrate/frame/support", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../../polkadot/xcm", default-features = false } +snowbridge-outbound-queue-merkle-tree = { path = "../merkle-tree", default-features = false } +snowbridge-core = { path = "../../../primitives/core", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "snowbridge-core/std", + "snowbridge-outbound-queue-merkle-tree/std", + "sp-api/std", + "sp-core/std", + "sp-std/std", + "xcm/std", +] diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/runtime-api/src/lib.rs b/bridges/snowbridge/parachain/pallets/outbound-queue/runtime-api/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..51f46a7b49c8838eddf44d9d3ba18f07b57c5dcd --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/runtime-api/src/lib.rs @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::traits::tokens::Balance as BalanceT; +use snowbridge_core::outbound::Message; +use snowbridge_outbound_queue_merkle_tree::MerkleProof; + +sp_api::decl_runtime_apis! { + pub trait OutboundQueueApi where Balance: BalanceT + { + /// Generate a merkle proof for a committed message identified by `leaf_index`. + /// The merkle root is stored in the block header as a + /// `\[`sp_runtime::generic::DigestItem::Other`\]` + fn prove_message(leaf_index: u64) -> Option; + + /// Calculate the delivery fee for `message` + fn calculate_fee(message: Message) -> Option; + } +} diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/src/api.rs b/bridges/snowbridge/parachain/pallets/outbound-queue/src/api.rs new file mode 100644 index 0000000000000000000000000000000000000000..44d63f1e2d23f48f3d13d7834de5cde8d2c78dfc --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/src/api.rs @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Helpers for implementing runtime api + +use crate::{Config, MessageLeaves}; +use frame_support::storage::StorageStreamIter; +use snowbridge_core::outbound::{Message, SendMessage}; +use snowbridge_outbound_queue_merkle_tree::{merkle_proof, MerkleProof}; + +pub fn prove_message(leaf_index: u64) -> Option +where + T: Config, +{ + if !MessageLeaves::::exists() { + return None + } + let proof = + merkle_proof::<::Hashing, _>(MessageLeaves::::stream_iter(), leaf_index); + Some(proof) +} + +pub fn calculate_fee(message: Message) -> Option +where + T: Config, +{ + match crate::Pallet::::validate(&message) { + Ok((_, fees)) => Some(fees.total()), + _ => None, + } +} diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/src/benchmarking.rs b/bridges/snowbridge/parachain/pallets/outbound-queue/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..ee5754e86962f807b9bd68d0a18560ad34c08cb2 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/src/benchmarking.rs @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use super::*; + +use bridge_hub_common::AggregateMessageOrigin; +use codec::Encode; +use frame_benchmarking::v2::*; +use snowbridge_core::{ + outbound::{Command, Initializer}, + ChannelId, +}; +use sp_core::{H160, H256}; + +#[allow(unused_imports)] +use crate::Pallet as OutboundQueue; + +#[benchmarks( + where + ::MaxMessagePayloadSize: Get, +)] +mod benchmarks { + use super::*; + + /// Benchmark for processing a message. + #[benchmark] + fn do_process_message() -> Result<(), BenchmarkError> { + let enqueued_message = QueuedMessage { + id: H256::zero(), + channel_id: ChannelId::from([1; 32]), + command: Command::Upgrade { + impl_address: H160::zero(), + impl_code_hash: H256::zero(), + initializer: Some(Initializer { + params: [7u8; 256].into_iter().collect(), + maximum_required_gas: 200_000, + }), + }, + }; + let origin = AggregateMessageOrigin::Snowbridge([1; 32].into()); + let encoded_enqueued_message = enqueued_message.encode(); + + #[block] + { + let _ = OutboundQueue::::do_process_message(origin, &encoded_enqueued_message); + } + + assert_eq!(MessageLeaves::::decode_len().unwrap(), 1); + + Ok(()) + } + + /// Benchmark for producing final messages commitment + #[benchmark] + fn commit() -> Result<(), BenchmarkError> { + // Assume worst case, where `MaxMessagesPerBlock` messages need to be committed. + for i in 0..T::MaxMessagesPerBlock::get() { + let leaf_data: [u8; 1] = [i as u8]; + let leaf = ::Hashing::hash(&leaf_data); + MessageLeaves::::append(leaf); + } + + #[block] + { + OutboundQueue::::commit(); + } + + Ok(()) + } + + /// Benchmark for producing commitment for a single message + #[benchmark] + fn commit_single() -> Result<(), BenchmarkError> { + let leaf = ::Hashing::hash(&[100; 1]); + MessageLeaves::::append(leaf); + + #[block] + { + OutboundQueue::::commit(); + } + + Ok(()) + } + + impl_benchmark_test_suite!(OutboundQueue, crate::mock::new_tester(), crate::mock::Test,); +} diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/src/lib.rs b/bridges/snowbridge/parachain/pallets/outbound-queue/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..201e524fb9120849ee3bce514deeabf4ae304a03 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/src/lib.rs @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Pallet for committing outbound messages for delivery to Ethereum +//! +//! # Overview +//! +//! Messages come either from sibling parachains via XCM, or BridgeHub itself +//! via the `snowbridge-system` pallet: +//! +//! 1. `snowbridge_router_primitives::outbound::EthereumBlobExporter::deliver` +//! 2. `snowbridge_system::Pallet::send` +//! +//! The message submission pipeline works like this: +//! 1. The message is first validated via the implementation for +//! [`snowbridge_core::outbound::SendMessage::validate`] +//! 2. The message is then enqueued for later processing via the implementation for +//! [`snowbridge_core::outbound::SendMessage::deliver`] +//! 3. The underlying message queue is implemented by [`Config::MessageQueue`] +//! 4. The message queue delivers messages back to this pallet via the implementation for +//! [`frame_support::traits::ProcessMessage::process_message`] +//! 5. The message is processed in `Pallet::do_process_message`: a. Assigned a nonce b. ABI-encoded, +//! hashed, and stored in the `MessageLeaves` vector +//! 6. At the end of the block, a merkle root is constructed from all the leaves in `MessageLeaves`. +//! 7. This merkle root is inserted into the parachain header as a digest item +//! 8. Offchain relayers are able to relay the message to Ethereum after: a. Generating a merkle +//! proof for the committed message using the `prove_message` runtime API b. Reading the actual +//! message content from the `Messages` vector in storage +//! +//! On the Ethereum side, the message root is ultimately the thing being +//! verified by the Polkadot light client. +//! +//! # Message Priorities +//! +//! The processing of governance commands can never be halted. This effectively +//! allows us to pause processing of normal user messages while still allowing +//! governance commands to be sent to Ethereum. +//! +//! # Fees +//! +//! An upfront fee must be paid for delivering a message. This fee covers several +//! components: +//! 1. The weight of processing the message locally +//! 2. The gas refund paid out to relayers for message submission +//! 3. An additional reward paid out to relayers for message submission +//! +//! Messages are weighed to determine the maximum amount of gas they could +//! consume on Ethereum. Using this upper bound, a final fee can be calculated. +//! +//! The fee calculation also requires the following parameters: +//! * ETH/DOT exchange rate +//! * Ether fee per unit of gas +//! +//! By design, it is expected that governance should manually update these +//! parameters every few weeks using the `set_pricing_parameters` extrinsic in the +//! system pallet. +//! +//! ## Fee Computation Function +//! +//! ```text +//! LocalFee(Message) = WeightToFee(ProcessMessageWeight(Message)) +//! RemoteFee(Message) = MaxGasRequired(Message) * FeePerGas + Reward +//! Fee(Message) = LocalFee(Message) + (RemoteFee(Message) / Ratio("ETH/DOT")) +//! ``` +//! +//! By design, the computed fee is always going to conservative, to cover worst-case +//! costs of dispatch on Ethereum. In future iterations of the design, we will optimize +//! this, or provide a mechanism to asynchronously refund a portion of collected fees. +//! +//! # Extrinsics +//! +//! * [`Call::set_operating_mode`]: Set the operating mode +//! +//! # Runtime API +//! +//! * `prove_message`: Generate a merkle proof for a committed message +//! * `calculate_fee`: Calculate the delivery fee for a message +#![cfg_attr(not(feature = "std"), no_std)] +pub mod api; +pub mod process_message_impl; +pub mod send_message_impl; +pub mod types; +pub mod weights; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod test; + +use bridge_hub_common::{AggregateMessageOrigin, CustomDigestItem}; +use codec::Decode; +use frame_support::{ + storage::StorageStreamIter, + traits::{tokens::Balance, Contains, Defensive, EnqueueMessage, Get, ProcessMessageError}, + weights::{Weight, WeightToFee}, +}; +use snowbridge_core::{ + outbound::{Fee, GasMeter, QueuedMessage, VersionedQueuedMessage, ETHER_DECIMALS}, + BasicOperatingMode, ChannelId, +}; +use snowbridge_outbound_queue_merkle_tree::merkle_root; +pub use snowbridge_outbound_queue_merkle_tree::MerkleProof; +use sp_core::{H256, U256}; +use sp_runtime::{ + traits::{CheckedDiv, Hash}, + DigestItem, +}; +use sp_std::prelude::*; +pub use types::{CommittedMessage, FeeConfigRecord, ProcessMessageOriginOf}; +pub use weights::WeightInfo; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use snowbridge_core::PricingParameters; + use sp_arithmetic::FixedU128; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + type Hashing: Hash; + + type MessageQueue: EnqueueMessage; + + /// Measures the maximum gas used to execute a command on Ethereum + type GasMeter: GasMeter; + + type Balance: Balance + From; + + /// Number of decimal places in native currency + #[pallet::constant] + type Decimals: Get; + + /// Max bytes in a message payload + #[pallet::constant] + type MaxMessagePayloadSize: Get; + + /// Max number of messages processed per block + #[pallet::constant] + type MaxMessagesPerBlock: Get; + + /// Check whether a channel exists + type Channels: Contains; + + type PricingParameters: Get>; + + /// Convert a weight value into a deductible fee based. + type WeightToFee: WeightToFee; + + /// Weight information for extrinsics in this pallet + type WeightInfo: WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Message has been queued and will be processed in the future + MessageQueued { + /// ID of the message. Usually the XCM message hash or a SetTopic. + id: H256, + }, + /// Message will be committed at the end of current block. From now on, to track the + /// progress the message, use the `nonce` of `id`. + MessageAccepted { + /// ID of the message + id: H256, + /// The nonce assigned to this message + nonce: u64, + }, + /// Some messages have been committed + MessagesCommitted { + /// Merkle root of the committed messages + root: H256, + /// number of committed messages + count: u64, + }, + /// Set OperatingMode + OperatingModeChanged { + mode: BasicOperatingMode, + }, + FeeConfigChanged { + fee_config: FeeConfigRecord, + }, + } + + #[pallet::error] + pub enum Error { + /// The message is too large + MessageTooLarge, + /// The pallet is halted + Halted, + // Invalid fee config + InvalidFeeConfig, + /// Invalid Channel + InvalidChannel, + } + + /// Messages to be committed in the current block. This storage value is killed in + /// `on_initialize`, so should never go into block PoV. + /// + /// Is never read in the runtime, only by offchain message relayers. + /// + /// Inspired by the `frame_system::Pallet::Events` storage value + #[pallet::storage] + #[pallet::unbounded] + pub(super) type Messages = StorageValue<_, Vec, ValueQuery>; + + /// Hashes of the ABI-encoded messages in the [`Messages`] storage value. Used to generate a + /// merkle root during `on_finalize`. This storage value is killed in + /// `on_initialize`, so should never go into block PoV. + #[pallet::storage] + #[pallet::unbounded] + #[pallet::getter(fn message_leaves)] + pub(super) type MessageLeaves = StorageValue<_, Vec, ValueQuery>; + + /// The current nonce for each message origin + #[pallet::storage] + pub type Nonce = StorageMap<_, Twox64Concat, ChannelId, u64, ValueQuery>; + + /// The current operating mode of the pallet. + #[pallet::storage] + #[pallet::getter(fn operating_mode)] + pub type OperatingMode = StorageValue<_, BasicOperatingMode, ValueQuery>; + + #[pallet::hooks] + impl Hooks> for Pallet + where + T::AccountId: AsRef<[u8]>, + { + fn on_initialize(_: BlockNumberFor) -> Weight { + // Remove storage from previous block + Messages::::kill(); + MessageLeaves::::kill(); + // Reserve some weight for the `on_finalize` handler + T::WeightInfo::commit() + } + + fn on_finalize(_: BlockNumberFor) { + Self::commit(); + } + + fn integrity_test() { + let decimals = T::Decimals::get(); + assert!(decimals == 10 || decimals == 12, "Decimals should be 10 or 12"); + } + } + + #[pallet::call] + impl Pallet { + /// Halt or resume all pallet operations. May only be called by root. + #[pallet::call_index(0)] + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_operating_mode( + origin: OriginFor, + mode: BasicOperatingMode, + ) -> DispatchResult { + ensure_root(origin)?; + OperatingMode::::put(mode); + Self::deposit_event(Event::OperatingModeChanged { mode }); + Ok(()) + } + } + + impl Pallet { + /// Generate a messages commitment and insert it into the header digest + pub(crate) fn commit() { + let count = MessageLeaves::::decode_len().unwrap_or_default() as u64; + if count == 0 { + return + } + + // Create merkle root of messages + let root = merkle_root::<::Hashing, _>(MessageLeaves::::stream_iter()); + + let digest_item: DigestItem = CustomDigestItem::Snowbridge(root).into(); + + // Insert merkle root into the header digest + >::deposit_log(digest_item); + + Self::deposit_event(Event::MessagesCommitted { root, count }); + } + + /// Process a message delivered by the MessageQueue pallet + pub(crate) fn do_process_message( + _: ProcessMessageOriginOf, + mut message: &[u8], + ) -> Result { + use ProcessMessageError::*; + + // Yield if the maximum number of messages has been processed this block. + // This ensures that the weight of `on_finalize` has a known maximum bound. + ensure!( + MessageLeaves::::decode_len().unwrap_or(0) < + T::MaxMessagesPerBlock::get() as usize, + Yield + ); + + // Decode bytes into versioned message + let versioned_queued_message: VersionedQueuedMessage = + VersionedQueuedMessage::decode(&mut message).map_err(|_| Corrupt)?; + + // Convert versioned message into latest supported message version + let queued_message: QueuedMessage = + versioned_queued_message.try_into().map_err(|_| Unsupported)?; + + // Obtain next nonce + let nonce = >::try_mutate( + queued_message.channel_id, + |nonce| -> Result { + *nonce = nonce.checked_add(1).ok_or(Unsupported)?; + Ok(*nonce) + }, + )?; + + let pricing_params = T::PricingParameters::get(); + let command = queued_message.command.index(); + let params = queued_message.command.abi_encode(); + let max_dispatch_gas = + T::GasMeter::maximum_dispatch_gas_used_at_most(&queued_message.command); + let reward = pricing_params.rewards.remote; + + // Construct the final committed message + let message = CommittedMessage { + channel_id: queued_message.channel_id, + nonce, + command, + params, + max_dispatch_gas, + max_fee_per_gas: pricing_params + .fee_per_gas + .try_into() + .defensive_unwrap_or(u128::MAX), + reward: reward.try_into().defensive_unwrap_or(u128::MAX), + id: queued_message.id, + }; + + // ABI-encode and hash the prepared message + let message_abi_encoded = ethabi::encode(&[message.clone().into()]); + let message_abi_encoded_hash = ::Hashing::hash(&message_abi_encoded); + + Messages::::append(Box::new(message)); + MessageLeaves::::append(message_abi_encoded_hash); + + Self::deposit_event(Event::MessageAccepted { id: queued_message.id, nonce }); + + Ok(true) + } + + /// Calculate total fee in native currency to cover all costs of delivering a message to the + /// remote destination. See module-level documentation for more details. + pub(crate) fn calculate_fee( + gas_used_at_most: u64, + params: PricingParameters, + ) -> Fee { + // Remote fee in ether + let fee = Self::calculate_remote_fee( + gas_used_at_most, + params.fee_per_gas, + params.rewards.remote, + ); + + // downcast to u128 + let fee: u128 = fee.try_into().defensive_unwrap_or(u128::MAX); + + // convert to local currency + let fee = FixedU128::from_inner(fee) + .checked_div(¶ms.exchange_rate) + .expect("exchange rate is not zero; qed") + .into_inner(); + + // adjust fixed point to match local currency + let fee = Self::convert_from_ether_decimals(fee); + + Fee::from((Self::calculate_local_fee(), fee)) + } + + /// Calculate fee in remote currency for dispatching a message on Ethereum + pub(crate) fn calculate_remote_fee( + gas_used_at_most: u64, + fee_per_gas: U256, + reward: U256, + ) -> U256 { + fee_per_gas.saturating_mul(gas_used_at_most.into()).saturating_add(reward) + } + + /// The local component of the message processing fees in native currency + pub(crate) fn calculate_local_fee() -> T::Balance { + T::WeightToFee::weight_to_fee( + &T::WeightInfo::do_process_message().saturating_add(T::WeightInfo::commit_single()), + ) + } + + // 1 DOT has 10 digits of precision + // 1 KSM has 12 digits of precision + // 1 ETH has 18 digits of precision + pub(crate) fn convert_from_ether_decimals(value: u128) -> T::Balance { + let decimals = ETHER_DECIMALS.saturating_sub(T::Decimals::get()) as u32; + let denom = 10u128.saturating_pow(decimals); + value.checked_div(denom).expect("divisor is non-zero; qed").into() + } + } +} diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/parachain/pallets/outbound-queue/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..dd8fee4e2ed08ec0f3090b765fa882b063a98300 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/src/mock.rs @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use super::*; + +use frame_support::{ + parameter_types, + traits::{Everything, Hooks}, + weights::IdentityFee, +}; + +use snowbridge_core::{ + gwei, meth, + outbound::*, + pricing::{PricingParameters, Rewards}, + ParaId, PRIMARY_GOVERNANCE_CHANNEL, +}; +use sp_core::{ConstU32, ConstU8, H160, H256}; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup, Keccak256}, + AccountId32, BuildStorage, FixedU128, +}; +use sp_std::marker::PhantomData; + +type Block = frame_system::mocking::MockBlock; +type AccountId = AccountId32; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system::{Pallet, Call, Storage, Event}, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event}, + OutboundQueue: crate::{Pallet, Storage, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +impl frame_system::Config for Test { + type BaseCallFilter = Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; + type Nonce = u64; + type Block = Block; +} + +parameter_types! { + pub const HeapSize: u32 = 32 * 1024; + pub const MaxStale: u32 = 32; + pub static ServiceWeight: Option = Some(Weight::from_parts(100, 100)); +} + +impl pallet_message_queue::Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type MessageProcessor = OutboundQueue; + type Size = u32; + type QueueChangeHandler = (); + type HeapSize = HeapSize; + type MaxStale = MaxStale; + type ServiceWeight = ServiceWeight; + type QueuePausedQuery = (); +} + +parameter_types! { + pub const OwnParaId: ParaId = ParaId::new(1013); + pub Parameters: PricingParameters = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 400), + fee_per_gas: gwei(20), + rewards: Rewards { local: DOT, remote: meth(1) } + }; +} + +pub const DOT: u128 = 10_000_000_000; + +impl crate::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Hashing = Keccak256; + type MessageQueue = MessageQueue; + type Decimals = ConstU8<12>; + type MaxMessagePayloadSize = ConstU32<1024>; + type MaxMessagesPerBlock = ConstU32<20>; + type GasMeter = ConstantGasMeter; + type Balance = u128; + type PricingParameters = Parameters; + type Channels = Everything; + type WeightToFee = IdentityFee; + type WeightInfo = (); +} + +fn setup() { + System::set_block_number(1); +} + +pub fn new_tester() -> sp_io::TestExternalities { + let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext: sp_io::TestExternalities = storage.into(); + ext.execute_with(setup); + ext +} + +pub fn run_to_end_of_next_block() { + // finish current block + MessageQueue::on_finalize(System::block_number()); + OutboundQueue::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + // start next block + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + OutboundQueue::on_initialize(System::block_number()); + MessageQueue::on_initialize(System::block_number()); + // finish next block + MessageQueue::on_finalize(System::block_number()); + OutboundQueue::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); +} + +pub fn mock_governance_message() -> Message +where + T: Config, +{ + let _marker = PhantomData::; // for clippy + + Message { + id: None, + channel_id: PRIMARY_GOVERNANCE_CHANNEL, + command: Command::Upgrade { + impl_address: H160::zero(), + impl_code_hash: H256::zero(), + initializer: None, + }, + } +} + +// Message should fail validation as it is too large +pub fn mock_invalid_governance_message() -> Message +where + T: Config, +{ + let _marker = PhantomData::; // for clippy + + Message { + id: None, + channel_id: PRIMARY_GOVERNANCE_CHANNEL, + command: Command::Upgrade { + impl_address: H160::zero(), + impl_code_hash: H256::zero(), + initializer: Some(Initializer { + params: (0..1000).map(|_| 1u8).collect::>(), + maximum_required_gas: 0, + }), + }, + } +} + +pub fn mock_message(sibling_para_id: u32) -> Message { + Message { + id: None, + channel_id: ParaId::from(sibling_para_id).into(), + command: Command::AgentExecute { + agent_id: Default::default(), + command: AgentExecuteCommand::TransferToken { + token: Default::default(), + recipient: Default::default(), + amount: 0, + }, + }, + } +} diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/src/process_message_impl.rs b/bridges/snowbridge/parachain/pallets/outbound-queue/src/process_message_impl.rs new file mode 100644 index 0000000000000000000000000000000000000000..575ed9e0e7c225a8be4b3ad09f67a26975f5a94a --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/src/process_message_impl.rs @@ -0,0 +1,23 @@ +//! Implementation for [`frame_support::traits::ProcessMessage`] +use super::*; +use crate::weights::WeightInfo; +use frame_support::{ + traits::{ProcessMessage, ProcessMessageError}, + weights::WeightMeter, +}; + +impl ProcessMessage for Pallet { + type Origin = AggregateMessageOrigin; + fn process_message( + message: &[u8], + origin: Self::Origin, + meter: &mut WeightMeter, + _: &mut [u8; 32], + ) -> Result { + let weight = T::WeightInfo::do_process_message(); + if meter.try_consume(weight).is_err() { + return Err(ProcessMessageError::Overweight(weight)) + } + Self::do_process_message(origin, message) + } +} diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/src/send_message_impl.rs b/bridges/snowbridge/parachain/pallets/outbound-queue/src/send_message_impl.rs new file mode 100644 index 0000000000000000000000000000000000000000..a84e2c520e59000ab44ae6e160ba6071a263bf99 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/src/send_message_impl.rs @@ -0,0 +1,98 @@ +//! Implementation for [`snowbridge_core::outbound::SendMessage`] +use super::*; +use bridge_hub_common::AggregateMessageOrigin; +use codec::Encode; +use frame_support::{ + ensure, + traits::{EnqueueMessage, Get}, + CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, +}; +use frame_system::unique; +use snowbridge_core::{ + outbound::{ + Fee, Message, QueuedMessage, SendError, SendMessage, SendMessageFeeProvider, + VersionedQueuedMessage, + }, + ChannelId, PRIMARY_GOVERNANCE_CHANNEL, +}; +use sp_core::H256; +use sp_runtime::BoundedVec; + +/// The maximal length of an enqueued message, as determined by the MessageQueue pallet +pub type MaxEnqueuedMessageSizeOf = + <::MessageQueue as EnqueueMessage>::MaxMessageLen; + +#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound)] +pub struct Ticket +where + T: Config, +{ + pub message_id: H256, + pub channel_id: ChannelId, + pub message: BoundedVec>, +} + +impl SendMessage for Pallet +where + T: Config, +{ + type Ticket = Ticket; + + fn validate( + message: &Message, + ) -> Result<(Self::Ticket, Fee<::Balance>), SendError> { + // The inner payload should not be too large + let payload = message.command.abi_encode(); + ensure!( + payload.len() < T::MaxMessagePayloadSize::get() as usize, + SendError::MessageTooLarge + ); + + // Ensure there is a registered channel we can transmit this message on + ensure!(T::Channels::contains(&message.channel_id), SendError::InvalidChannel); + + // Generate a unique message id unless one is provided + let message_id: H256 = message + .id + .unwrap_or_else(|| unique((message.channel_id, &message.command)).into()); + + let gas_used_at_most = T::GasMeter::maximum_gas_used_at_most(&message.command); + let fee = Self::calculate_fee(gas_used_at_most, T::PricingParameters::get()); + + let queued_message: VersionedQueuedMessage = QueuedMessage { + id: message_id, + channel_id: message.channel_id, + command: message.command.clone(), + } + .into(); + // The whole message should not be too large + let encoded = queued_message.encode().try_into().map_err(|_| SendError::MessageTooLarge)?; + + let ticket = Ticket { message_id, channel_id: message.channel_id, message: encoded }; + + Ok((ticket, fee)) + } + + fn deliver(ticket: Self::Ticket) -> Result { + let origin = AggregateMessageOrigin::Snowbridge(ticket.channel_id); + + if ticket.channel_id != PRIMARY_GOVERNANCE_CHANNEL { + ensure!(!Self::operating_mode().is_halted(), SendError::Halted); + } + + let message = ticket.message.as_bounded_slice(); + + T::MessageQueue::enqueue_message(message, origin); + Self::deposit_event(Event::MessageQueued { id: ticket.message_id }); + Ok(ticket.message_id) + } +} + +impl SendMessageFeeProvider for Pallet { + type Balance = T::Balance; + + /// The local component of the message processing fees in native currency + fn local_fee() -> Self::Balance { + Self::calculate_local_fee() + } +} diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/src/test.rs b/bridges/snowbridge/parachain/pallets/outbound-queue/src/test.rs new file mode 100644 index 0000000000000000000000000000000000000000..0028d75e7b79eea5ea17947f52b07af32558610b --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/src/test.rs @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate::{mock::*, *}; + +use frame_support::{ + assert_err, assert_noop, assert_ok, + traits::{Hooks, ProcessMessage, ProcessMessageError}, + weights::WeightMeter, +}; + +use codec::Encode; +use snowbridge_core::{ + outbound::{Command, SendError, SendMessage}, + ParaId, +}; +use sp_arithmetic::FixedU128; +use sp_core::H256; +use sp_runtime::FixedPointNumber; + +#[test] +fn submit_messages_and_commit() { + new_tester().execute_with(|| { + for para_id in 1000..1004 { + let message = mock_message(para_id); + let (ticket, _) = OutboundQueue::validate(&message).unwrap(); + assert_ok!(OutboundQueue::deliver(ticket)); + } + + ServiceWeight::set(Some(Weight::MAX)); + run_to_end_of_next_block(); + + for para_id in 1000..1004 { + let origin: ParaId = (para_id as u32).into(); + let channel_id: ChannelId = origin.into(); + assert_eq!(Nonce::::get(channel_id), 1); + } + + let digest = System::digest(); + let digest_items = digest.logs(); + assert!(digest_items.len() == 1 && digest_items[0].as_other().is_some()); + assert_eq!(Messages::::decode_len(), Some(4)); + }); +} + +#[test] +fn submit_message_fail_too_large() { + new_tester().execute_with(|| { + let message = mock_invalid_governance_message::(); + assert_err!(OutboundQueue::validate(&message), SendError::MessageTooLarge); + }); +} + +#[test] +fn convert_from_ether_decimals() { + assert_eq!( + OutboundQueue::convert_from_ether_decimals(1_000_000_000_000_000_000), + 1_000_000_000_000 + ); +} + +#[test] +fn commit_exits_early_if_no_processed_messages() { + new_tester().execute_with(|| { + // on_finalize should do nothing, nor should it panic + OutboundQueue::on_finalize(System::block_number()); + + let digest = System::digest(); + let digest_items = digest.logs(); + assert_eq!(digest_items.len(), 0); + }); +} + +#[test] +fn process_message_yields_on_max_messages_per_block() { + new_tester().execute_with(|| { + for _ in 0..::MaxMessagesPerBlock::get() { + MessageLeaves::::append(H256::zero()) + } + + let channel_id: ChannelId = ParaId::from(1000).into(); + let origin = AggregateMessageOrigin::Snowbridge(channel_id); + let message = QueuedMessage { + id: Default::default(), + channel_id, + command: Command::Upgrade { + impl_address: Default::default(), + impl_code_hash: Default::default(), + initializer: None, + }, + } + .encode(); + + let mut meter = WeightMeter::new(); + + assert_noop!( + OutboundQueue::process_message(message.as_slice(), origin, &mut meter, &mut [0u8; 32]), + ProcessMessageError::Yield + ); + }) +} + +#[test] +fn process_message_fails_on_max_nonce_reached() { + new_tester().execute_with(|| { + let sibling_id = 1000; + let channel_id: ChannelId = ParaId::from(sibling_id).into(); + let origin = AggregateMessageOrigin::Snowbridge(channel_id); + let message: QueuedMessage = QueuedMessage { + id: H256::zero(), + channel_id, + command: mock_message(sibling_id).command, + }; + let versioned_queued_message: VersionedQueuedMessage = message.try_into().unwrap(); + let encoded = versioned_queued_message.encode(); + let mut meter = WeightMeter::with_limit(Weight::MAX); + + Nonce::::set(channel_id, u64::MAX); + + assert_noop!( + OutboundQueue::process_message(encoded.as_slice(), origin, &mut meter, &mut [0u8; 32]), + ProcessMessageError::Unsupported + ); + }) +} + +#[test] +fn process_message_fails_on_overweight_message() { + new_tester().execute_with(|| { + let sibling_id = 1000; + let channel_id: ChannelId = ParaId::from(sibling_id).into(); + let origin = AggregateMessageOrigin::Snowbridge(channel_id); + let message: QueuedMessage = QueuedMessage { + id: H256::zero(), + channel_id, + command: mock_message(sibling_id).command, + }; + let versioned_queued_message: VersionedQueuedMessage = message.try_into().unwrap(); + let encoded = versioned_queued_message.encode(); + let mut meter = WeightMeter::with_limit(Weight::from_parts(1, 1)); + assert_noop!( + OutboundQueue::process_message(encoded.as_slice(), origin, &mut meter, &mut [0u8; 32]), + ProcessMessageError::Overweight(::WeightInfo::do_process_message()) + ); + }) +} + +// Governance messages should be able to bypass a halted operating mode +// Other message sends should fail when halted +#[test] +fn submit_upgrade_message_success_when_queue_halted() { + new_tester().execute_with(|| { + // halt the outbound queue + OutboundQueue::set_operating_mode(RuntimeOrigin::root(), BasicOperatingMode::Halted) + .unwrap(); + + // submit a high priority message from bridge_hub should success + let message = mock_governance_message::(); + let (ticket, _) = OutboundQueue::validate(&message).unwrap(); + assert_ok!(OutboundQueue::deliver(ticket)); + + // submit a low priority message from asset_hub will fail as pallet is halted + let message = mock_message(1000); + let (ticket, _) = OutboundQueue::validate(&message).unwrap(); + assert_noop!(OutboundQueue::deliver(ticket), SendError::Halted); + }); +} + +#[test] +fn governance_message_does_not_get_the_chance_to_processed_in_same_block_when_congest_of_low_priority_sibling_messages( +) { + use snowbridge_core::PRIMARY_GOVERNANCE_CHANNEL; + use AggregateMessageOrigin::*; + + let sibling_id: u32 = 1000; + let sibling_channel_id: ChannelId = ParaId::from(sibling_id).into(); + + new_tester().execute_with(|| { + // submit a lot of low priority messages from asset_hub which will need multiple blocks to + // execute(20 messages for each block so 40 required at least 2 blocks) + let max_messages = 40; + for _ in 0..max_messages { + // submit low priority message + let message = mock_message(sibling_id); + let (ticket, _) = OutboundQueue::validate(&message).unwrap(); + OutboundQueue::deliver(ticket).unwrap(); + } + + let footprint = MessageQueue::footprint(Snowbridge(sibling_channel_id)); + assert_eq!(footprint.storage.count, (max_messages) as u64); + + let message = mock_governance_message::(); + let (ticket, _) = OutboundQueue::validate(&message).unwrap(); + OutboundQueue::deliver(ticket).unwrap(); + + // move to next block + ServiceWeight::set(Some(Weight::MAX)); + run_to_end_of_next_block(); + + // first process 20 messages from sibling channel + let footprint = MessageQueue::footprint(Snowbridge(sibling_channel_id)); + assert_eq!(footprint.storage.count, 40 - 20); + + // and governance message does not have the chance to execute in same block + let footprint = MessageQueue::footprint(Snowbridge(PRIMARY_GOVERNANCE_CHANNEL)); + assert_eq!(footprint.storage.count, 1); + + // move to next block + ServiceWeight::set(Some(Weight::MAX)); + run_to_end_of_next_block(); + + // now governance message get executed in this block + let footprint = MessageQueue::footprint(Snowbridge(PRIMARY_GOVERNANCE_CHANNEL)); + assert_eq!(footprint.storage.count, 0); + + // and this time process 19 messages from sibling channel so we have 1 message left + let footprint = MessageQueue::footprint(Snowbridge(sibling_channel_id)); + assert_eq!(footprint.storage.count, 1); + + // move to the next block, the last 1 message from sibling channel get executed + ServiceWeight::set(Some(Weight::MAX)); + run_to_end_of_next_block(); + let footprint = MessageQueue::footprint(Snowbridge(sibling_channel_id)); + assert_eq!(footprint.storage.count, 0); + }); +} + +#[test] +fn convert_local_currency() { + new_tester().execute_with(|| { + let fee: u128 = 1_000_000; + let fee1 = FixedU128::from_inner(fee).into_inner(); + let fee2 = FixedU128::from(fee) + .into_inner() + .checked_div(FixedU128::accuracy()) + .expect("accuracy is not zero; qed"); + assert_eq!(fee, fee1); + assert_eq!(fee, fee2); + }); +} + +#[test] +fn encode_digest_item_with_correct_index() { + new_tester().execute_with(|| { + let digest_item: DigestItem = CustomDigestItem::Snowbridge(H256::default()).into(); + let enum_prefix = match digest_item { + DigestItem::Other(data) => data[0], + _ => u8::MAX, + }; + assert_eq!(enum_prefix, 0); + }); +} + +#[test] +fn encode_digest_item() { + new_tester().execute_with(|| { + let digest_item: DigestItem = CustomDigestItem::Snowbridge([5u8; 32].into()).into(); + let digest_item_raw = digest_item.encode(); + assert_eq!(digest_item_raw[0], 0); // DigestItem::Other + assert_eq!(digest_item_raw[2], 0); // CustomDigestItem::Snowbridge + assert_eq!( + digest_item_raw, + [ + 0, 132, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5 + ] + ); + }); +} diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/src/types.rs b/bridges/snowbridge/parachain/pallets/outbound-queue/src/types.rs new file mode 100644 index 0000000000000000000000000000000000000000..07803ed9b738bee40cd1a0981f4f75d1674596ef --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/src/types.rs @@ -0,0 +1,99 @@ +use codec::{Decode, Encode, MaxEncodedLen}; +use ethabi::Token; +use frame_support::traits::ProcessMessage; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; +use sp_arithmetic::FixedU128; +use sp_core::H256; +use sp_runtime::{traits::Zero, RuntimeDebug}; +use sp_std::prelude::*; + +use super::Pallet; + +use snowbridge_core::ChannelId; +pub use snowbridge_outbound_queue_merkle_tree::MerkleProof; + +pub type ProcessMessageOriginOf = as ProcessMessage>::Origin; + +pub const LOG_TARGET: &str = "snowbridge-outbound-queue"; + +/// Message which has been assigned a nonce and will be committed at the end of a block +#[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug, TypeInfo)] +pub struct CommittedMessage { + /// Message channel + pub channel_id: ChannelId, + /// Unique nonce to prevent replaying messages + #[codec(compact)] + pub nonce: u64, + /// Command to execute in the Gateway contract + pub command: u8, + /// Params for the command + pub params: Vec, + /// Maximum gas allowed for message dispatch + #[codec(compact)] + pub max_dispatch_gas: u64, + /// Maximum fee per gas + #[codec(compact)] + pub max_fee_per_gas: u128, + /// Reward in ether for delivering this message, in addition to the gas refund + #[codec(compact)] + pub reward: u128, + /// Message ID (Used for tracing messages across route, has no role in consensus) + pub id: H256, +} + +/// Convert message into an ABI-encoded form for delivery to the InboundQueue contract on Ethereum +impl From for Token { + fn from(x: CommittedMessage) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(Vec::from(x.channel_id.as_ref())), + Token::Uint(x.nonce.into()), + Token::Uint(x.command.into()), + Token::Bytes(x.params.to_vec()), + Token::Uint(x.max_dispatch_gas.into()), + Token::Uint(x.max_fee_per_gas.into()), + Token::Uint(x.reward.into()), + Token::FixedBytes(Vec::from(x.id.as_ref())), + ]) + } +} + +/// Configuration for fee calculations +#[derive( + Encode, + Decode, + Copy, + Clone, + PartialEq, + RuntimeDebug, + MaxEncodedLen, + TypeInfo, + Serialize, + Deserialize, +)] +pub struct FeeConfigRecord { + /// ETH/DOT exchange rate + pub exchange_rate: FixedU128, + /// Ether fee per unit of gas + pub fee_per_gas: u128, + /// Ether reward for delivering message + pub reward: u128, +} + +#[derive(RuntimeDebug)] +pub struct InvalidFeeConfig; + +impl FeeConfigRecord { + pub fn validate(&self) -> Result<(), InvalidFeeConfig> { + if self.exchange_rate == FixedU128::zero() { + return Err(InvalidFeeConfig) + } + if self.fee_per_gas == 0 { + return Err(InvalidFeeConfig) + } + if self.reward == 0 { + return Err(InvalidFeeConfig) + } + Ok(()) + } +} diff --git a/bridges/snowbridge/parachain/pallets/outbound-queue/src/weights.rs b/bridges/snowbridge/parachain/pallets/outbound-queue/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..e4b6f8439b0f5b97924cdab3d87c8282f6ec7b9d --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/outbound-queue/src/weights.rs @@ -0,0 +1,81 @@ + +//! Autogenerated weights for `snowbridge_outbound_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-19, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `192.168.1.7`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: `1024` + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --pallet=snowbridge_outbound_queue +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --template +// ../parachain/templates/module-weight-template.hbs +// --output +// ../parachain/pallets/outbound-queue/src/weights.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `snowbridge_outbound_queue`. +pub trait WeightInfo { + fn do_process_message() -> Weight; + fn commit() -> Weight; + fn commit_single() -> Weight; +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: EthereumOutboundQueue MessageLeaves (r:1 w:1) + /// Proof Skipped: EthereumOutboundQueue MessageLeaves (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: EthereumOutboundQueue PendingHighPriorityMessageCount (r:1 w:1) + /// Proof: EthereumOutboundQueue PendingHighPriorityMessageCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue Nonce (r:1 w:1) + /// Proof: EthereumOutboundQueue Nonce (max_values: None, max_size: Some(20), added: 2495, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue Messages (r:1 w:1) + /// Proof Skipped: EthereumOutboundQueue Messages (max_values: Some(1), max_size: None, mode: Measured) + fn do_process_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3485` + // Minimum execution time: 39_000_000 picoseconds. + Weight::from_parts(39_000_000, 3485) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: EthereumOutboundQueue MessageLeaves (r:1 w:0) + /// Proof Skipped: EthereumOutboundQueue MessageLeaves (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: System Digest (r:1 w:1) + /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) + fn commit() -> Weight { + // Proof Size summary in bytes: + // Measured: `1094` + // Estimated: `2579` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(28_000_000, 2579) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + + fn commit_single() -> Weight { + // Proof Size summary in bytes: + // Measured: `1094` + // Estimated: `2579` + // Minimum execution time: 9_000_000 picoseconds. + Weight::from_parts(9_000_000, 1586) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/bridges/snowbridge/parachain/pallets/system/Cargo.toml b/bridges/snowbridge/parachain/pallets/system/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..4356bf5722056fd0fa13fc0f1d24f82bb458e260 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/system/Cargo.toml @@ -0,0 +1,83 @@ +[package] +name = "snowbridge-system" +description = "Snowbridge System" +version = "0.1.1" +authors = ["Snowfork "] +edition = "2021" +repository = "https://github.com/Snowfork/snowbridge" +license = "Apache-2.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +log = { version = "0.4.20", default-features = false } + +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } + +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } + +ethabi = { git = "https://github.com/Snowfork/ethabi-decode.git", package = "ethabi-decode", branch = "master", default-features = false } +snowbridge-core = { path = "../../primitives/core", default-features = false } + +[dev-dependencies] +hex = "0.4.1" +hex-literal = { version = "0.4.1" } +pallet-balances = { path = "../../../../../substrate/frame/balances" } +sp-keyring = { path = "../../../../../substrate/primitives/keyring" } +polkadot-primitives = { path = "../../../../../polkadot/primitives" } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue" } +snowbridge-outbound-queue = { path = "../outbound-queue" } + +[features] +default = ["std"] +std = [ + "codec/std", + "ethabi/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "snowbridge-core/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", + "snowbridge-core/runtime-benchmarks", + "snowbridge-outbound-queue/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-message-queue/try-runtime", + "snowbridge-outbound-queue/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/bridges/snowbridge/parachain/pallets/system/README.md b/bridges/snowbridge/parachain/pallets/system/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9e4dc55267d69c47fff971cb0427bcb2e0ff871c --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/system/README.md @@ -0,0 +1 @@ +License: MIT-0 diff --git a/bridges/snowbridge/parachain/pallets/system/runtime-api/Cargo.toml b/bridges/snowbridge/parachain/pallets/system/runtime-api/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..97d0735bf63d6697feb2b74482156b4f6c3db3dd --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/system/runtime-api/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "snowbridge-system-runtime-api" +description = "Snowbridge System Runtime API" +version = "0.1.0" +edition = "2021" +authors = ["Snowfork "] +repository = "https://github.com/Snowfork/snowbridge" +license = "Apache-2.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } +sp-core = { path = "../../../../../../substrate/primitives/core", default-features = false } +sp-std = { path = "../../../../../../substrate/primitives/std", default-features = false } +sp-api = { path = "../../../../../../substrate/primitives/api", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../../polkadot/xcm", default-features = false } +snowbridge-core = { path = "../../../primitives/core", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "snowbridge-core/std", + "sp-api/std", + "sp-core/std", + "sp-std/std", + "xcm/std", +] diff --git a/bridges/snowbridge/parachain/pallets/system/runtime-api/src/lib.rs b/bridges/snowbridge/parachain/pallets/system/runtime-api/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..d99b456c84885ca649c223663aecb4471880cfe5 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/system/runtime-api/src/lib.rs @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +#![cfg_attr(not(feature = "std"), no_std)] + +use snowbridge_core::AgentId; +use xcm::VersionedMultiLocation; + +sp_api::decl_runtime_apis! { + pub trait ControlApi + { + fn agent_id(location: VersionedMultiLocation) -> Option; + } +} diff --git a/bridges/snowbridge/parachain/pallets/system/src/api.rs b/bridges/snowbridge/parachain/pallets/system/src/api.rs new file mode 100644 index 0000000000000000000000000000000000000000..245e6eea1c1467e75f5e56184808152157bdeefd --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/system/src/api.rs @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Helpers for implementing runtime api + +use snowbridge_core::AgentId; +use xcm::{prelude::*, VersionedMultiLocation}; + +use crate::{agent_id_of, Config}; + +pub fn agent_id(location: VersionedMultiLocation) -> Option +where + Runtime: Config, +{ + let location: MultiLocation = location.try_into().ok()?; + agent_id_of::(&location).ok() +} diff --git a/bridges/snowbridge/parachain/pallets/system/src/benchmarking.rs b/bridges/snowbridge/parachain/pallets/system/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..8d26408b38e5ecf9258558534f3da930e38f3fbd --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/system/src/benchmarking.rs @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Benchmarking setup for pallet-template +use super::*; + +#[allow(unused)] +use crate::Pallet as SnowbridgeControl; +use frame_benchmarking::v2::*; +use frame_system::RawOrigin; +use snowbridge_core::{eth, outbound::OperatingMode}; +use sp_runtime::SaturatedConversion; +use xcm::prelude::*; + +#[allow(clippy::result_large_err)] +fn fund_sovereign_account(para_id: ParaId) -> Result<(), BenchmarkError> { + let amount: BalanceOf = (10_000_000_000_000_u64).saturated_into::().saturated_into(); + let sovereign_account = sibling_sovereign_account::(para_id); + T::Token::mint_into(&sovereign_account, amount)?; + Ok(()) +} + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn upgrade() -> Result<(), BenchmarkError> { + let impl_address = H160::repeat_byte(1); + let impl_code_hash = H256::repeat_byte(1); + + // Assume 256 bytes passed to initializer + let params: Vec = (0..256).map(|_| 1u8).collect(); + + #[extrinsic_call] + _( + RawOrigin::Root, + impl_address, + impl_code_hash, + Some(Initializer { params, maximum_required_gas: 100000 }), + ); + + Ok(()) + } + + #[benchmark] + fn set_operating_mode() -> Result<(), BenchmarkError> { + #[extrinsic_call] + _(RawOrigin::Root, OperatingMode::RejectingOutboundMessages); + + Ok(()) + } + + #[benchmark] + fn set_pricing_parameters() -> Result<(), BenchmarkError> { + let params = T::DefaultPricingParameters::get(); + + #[extrinsic_call] + _(RawOrigin::Root, params); + + Ok(()) + } + + #[benchmark] + fn create_agent() -> Result<(), BenchmarkError> { + let origin_para_id = 2000; + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(origin_para_id)) }; + let origin = T::Helper::make_xcm_origin(origin_location); + fund_sovereign_account::(origin_para_id.into())?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin); + + Ok(()) + } + + #[benchmark] + fn create_channel() -> Result<(), BenchmarkError> { + let origin_para_id = 2000; + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(origin_para_id)) }; + let origin = T::Helper::make_xcm_origin(origin_location); + fund_sovereign_account::(origin_para_id.into())?; + + SnowbridgeControl::::create_agent(origin.clone())?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, OperatingMode::Normal); + + Ok(()) + } + + #[benchmark] + fn update_channel() -> Result<(), BenchmarkError> { + let origin_para_id = 2000; + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(origin_para_id)) }; + let origin = T::Helper::make_xcm_origin(origin_location); + fund_sovereign_account::(origin_para_id.into())?; + SnowbridgeControl::::create_agent(origin.clone())?; + SnowbridgeControl::::create_channel(origin.clone(), OperatingMode::Normal)?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, OperatingMode::RejectingOutboundMessages); + + Ok(()) + } + + #[benchmark] + fn force_update_channel() -> Result<(), BenchmarkError> { + let origin_para_id = 2000; + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(origin_para_id)) }; + let origin = T::Helper::make_xcm_origin(origin_location); + let channel_id: ChannelId = ParaId::from(origin_para_id).into(); + + fund_sovereign_account::(origin_para_id.into())?; + SnowbridgeControl::::create_agent(origin.clone())?; + SnowbridgeControl::::create_channel(origin.clone(), OperatingMode::Normal)?; + + #[extrinsic_call] + _(RawOrigin::Root, channel_id, OperatingMode::RejectingOutboundMessages); + + Ok(()) + } + + #[benchmark] + fn transfer_native_from_agent() -> Result<(), BenchmarkError> { + let origin_para_id = 2000; + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(origin_para_id)) }; + let origin = T::Helper::make_xcm_origin(origin_location); + fund_sovereign_account::(origin_para_id.into())?; + SnowbridgeControl::::create_agent(origin.clone())?; + SnowbridgeControl::::create_channel(origin.clone(), OperatingMode::Normal)?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, H160::default(), 1); + + Ok(()) + } + + #[benchmark] + fn force_transfer_native_from_agent() -> Result<(), BenchmarkError> { + let origin_para_id = 2000; + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(origin_para_id)) }; + let origin = T::Helper::make_xcm_origin(origin_location); + fund_sovereign_account::(origin_para_id.into())?; + SnowbridgeControl::::create_agent(origin.clone())?; + + let versioned_location: VersionedMultiLocation = origin_location.into(); + + #[extrinsic_call] + _(RawOrigin::Root, Box::new(versioned_location), H160::default(), 1); + + Ok(()) + } + + #[benchmark] + fn set_token_transfer_fees() -> Result<(), BenchmarkError> { + #[extrinsic_call] + _(RawOrigin::Root, 1, 1, eth(1)); + + Ok(()) + } + + impl_benchmark_test_suite!( + SnowbridgeControl, + crate::mock::new_test_ext(true), + crate::mock::Test + ); +} diff --git a/bridges/snowbridge/parachain/pallets/system/src/lib.rs b/bridges/snowbridge/parachain/pallets/system/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..0042093ee662033318e68d353b1eebf8f268a458 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/system/src/lib.rs @@ -0,0 +1,681 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Governance API for controlling the Ethereum side of the bridge +//! +//! # Extrinsics +//! +//! ## Agents +//! +//! Agents are smart contracts on Ethereum that act as proxies for consensus systems on Polkadot +//! networks. +//! +//! * [`Call::create_agent`]: Create agent for a sibling parachain +//! * [`Call::transfer_native_from_agent`]: Withdraw ether from an agent +//! +//! The `create_agent` extrinsic should be called via an XCM `Transact` instruction from the sibling +//! parachain. +//! +//! ## Channels +//! +//! Each sibling parachain has its own dedicated messaging channel for sending and receiving +//! messages. As a prerequisite to creating a channel, the sibling should have already created +//! an agent using the `create_agent` extrinsic. +//! +//! * [`Call::create_channel`]: Create channel for a sibling +//! * [`Call::update_channel`]: Update a channel for a sibling +//! +//! ## Governance +//! +//! Only Polkadot governance itself can call these extrinsics. Delivery fees are waived. +//! +//! * [`Call::upgrade`]`: Upgrade the gateway contract +//! * [`Call::set_operating_mode`]: Update the operating mode of the gateway contract +//! * [`Call::force_update_channel`]: Allow root to update a channel for a sibling +//! * [`Call::force_transfer_native_from_agent`]: Allow root to withdraw ether from an agent +//! +//! Typically, Polkadot governance will use the `force_transfer_native_from_agent` and +//! `force_update_channel` and extrinsics to manage agents and channels for system parachains. +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +pub mod migration; + +pub mod api; +pub mod weights; +pub use weights::*; + +use frame_support::{ + pallet_prelude::*, + traits::{ + fungible::{Inspect, Mutate}, + tokens::Preservation, + Contains, EnsureOrigin, + }, +}; +use frame_system::pallet_prelude::*; +use snowbridge_core::{ + meth, + outbound::{Command, Initializer, Message, OperatingMode, SendError, SendMessage}, + sibling_sovereign_account, AgentId, Channel, ChannelId, ParaId, + PricingParameters as PricingParametersRecord, PRIMARY_GOVERNANCE_CHANNEL, + SECONDARY_GOVERNANCE_CHANNEL, +}; +use sp_core::{RuntimeDebug, H160, H256}; +use sp_io::hashing::blake2_256; +use sp_runtime::{traits::BadOrigin, DispatchError, SaturatedConversion}; +use sp_std::prelude::*; +use xcm::prelude::*; +use xcm_executor::traits::ConvertLocation; + +#[cfg(feature = "runtime-benchmarks")] +use frame_support::traits::OriginTrait; + +pub use pallet::*; + +pub type BalanceOf = + <::Token as Inspect<::AccountId>>::Balance; +pub type AccountIdOf = ::AccountId; +pub type PricingParametersOf = PricingParametersRecord>; + +/// Ensure origin location is a sibling +fn ensure_sibling(location: &MultiLocation) -> Result<(ParaId, H256), DispatchError> +where + T: Config, +{ + match location { + MultiLocation { parents: 1, interior: X1(Parachain(para_id)) } => { + let agent_id = agent_id_of::(location)?; + Ok(((*para_id).into(), agent_id)) + }, + _ => Err(BadOrigin.into()), + } +} + +/// Hash the location to produce an agent id +fn agent_id_of(location: &MultiLocation) -> Result { + T::AgentIdOf::convert_location(location).ok_or(Error::::LocationConversionFailed.into()) +} + +#[cfg(feature = "runtime-benchmarks")] +pub trait BenchmarkHelper +where + O: OriginTrait, +{ + fn make_xcm_origin(location: MultiLocation) -> O; +} + +/// Whether a fee should be withdrawn to an account for sending an outbound message +#[derive(Clone, PartialEq, RuntimeDebug)] +pub enum PaysFee +where + T: Config, +{ + /// Fully charge includes (local + remote fee) + Yes(AccountIdOf), + /// Partially charge includes local fee only + Partial(AccountIdOf), + /// No charge + No, +} + +#[frame_support::pallet] +pub mod pallet { + use snowbridge_core::StaticLookup; + use sp_core::U256; + + use super::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Send messages to Ethereum + type OutboundQueue: SendMessage>; + + /// Origin check for XCM locations that can create agents + type SiblingOrigin: EnsureOrigin; + + /// Converts MultiLocation to AgentId + type AgentIdOf: ConvertLocation; + + /// Token reserved for control operations + type Token: Mutate; + + /// TreasuryAccount to collect fees + #[pallet::constant] + type TreasuryAccount: Get; + + /// Number of decimal places of local currency + type DefaultPricingParameters: Get>; + + /// Cost of delivering a message from Ethereum + type InboundDeliveryCost: Get>; + + type WeightInfo: WeightInfo; + + #[cfg(feature = "runtime-benchmarks")] + type Helper: BenchmarkHelper; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// An Upgrade message was sent to the Gateway + Upgrade { + impl_address: H160, + impl_code_hash: H256, + initializer_params_hash: Option, + }, + /// An CreateAgent message was sent to the Gateway + CreateAgent { + location: Box, + agent_id: AgentId, + }, + /// An CreateChannel message was sent to the Gateway + CreateChannel { + channel_id: ChannelId, + agent_id: AgentId, + }, + /// An UpdateChannel message was sent to the Gateway + UpdateChannel { + channel_id: ChannelId, + mode: OperatingMode, + }, + /// An SetOperatingMode message was sent to the Gateway + SetOperatingMode { + mode: OperatingMode, + }, + /// An TransferNativeFromAgent message was sent to the Gateway + TransferNativeFromAgent { + agent_id: AgentId, + recipient: H160, + amount: u128, + }, + /// A SetTokenTransferFees message was sent to the Gateway + SetTokenTransferFees { + create_asset_xcm: u128, + transfer_asset_xcm: u128, + register_token: U256, + }, + PricingParametersChanged { + params: PricingParametersOf, + }, + } + + #[pallet::error] + pub enum Error { + LocationConversionFailed, + AgentAlreadyCreated, + NoAgent, + ChannelAlreadyCreated, + NoChannel, + UnsupportedLocationVersion, + InvalidLocation, + Send(SendError), + InvalidTokenTransferFees, + InvalidPricingParameters, + } + + /// The set of registered agents + #[pallet::storage] + #[pallet::getter(fn agents)] + pub type Agents = StorageMap<_, Twox64Concat, AgentId, (), OptionQuery>; + + /// The set of registered channels + #[pallet::storage] + #[pallet::getter(fn channels)] + pub type Channels = StorageMap<_, Twox64Concat, ChannelId, Channel, OptionQuery>; + + #[pallet::storage] + #[pallet::getter(fn parameters)] + pub type PricingParameters = + StorageValue<_, PricingParametersOf, ValueQuery, T::DefaultPricingParameters>; + + #[pallet::genesis_config] + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + // Own parachain id + pub para_id: ParaId, + // AssetHub's parachain id + pub asset_hub_para_id: ParaId, + #[serde(skip)] + pub _config: PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + Pallet::::initialize(self.para_id, self.asset_hub_para_id).expect("infallible; qed"); + } + } + + #[pallet::call] + impl Pallet { + /// Sends command to the Gateway contract to upgrade itself with a new implementation + /// contract + /// + /// Fee required: No + /// + /// - `origin`: Must be `Root`. + /// - `impl_address`: The address of the implementation contract. + /// - `impl_code_hash`: The codehash of the implementation contract. + /// - `initializer`: Optionally call an initializer on the implementation contract. + #[pallet::call_index(0)] + #[pallet::weight((T::WeightInfo::upgrade(), DispatchClass::Operational))] + pub fn upgrade( + origin: OriginFor, + impl_address: H160, + impl_code_hash: H256, + initializer: Option, + ) -> DispatchResult { + ensure_root(origin)?; + + let initializer_params_hash: Option = + initializer.as_ref().map(|i| H256::from(blake2_256(i.params.as_ref()))); + let command = Command::Upgrade { impl_address, impl_code_hash, initializer }; + Self::send(PRIMARY_GOVERNANCE_CHANNEL, command, PaysFee::::No)?; + + Self::deposit_event(Event::::Upgrade { + impl_address, + impl_code_hash, + initializer_params_hash, + }); + Ok(()) + } + + /// Sends a message to the Gateway contract to change its operating mode + /// + /// Fee required: No + /// + /// - `origin`: Must be `MultiLocation` + #[pallet::call_index(1)] + #[pallet::weight((T::WeightInfo::set_operating_mode(), DispatchClass::Operational))] + pub fn set_operating_mode(origin: OriginFor, mode: OperatingMode) -> DispatchResult { + ensure_root(origin)?; + + let command = Command::SetOperatingMode { mode }; + Self::send(PRIMARY_GOVERNANCE_CHANNEL, command, PaysFee::::No)?; + + Self::deposit_event(Event::::SetOperatingMode { mode }); + Ok(()) + } + + /// Set pricing parameters on both sides of the bridge + /// + /// Fee required: No + /// + /// - `origin`: Must be root + #[pallet::call_index(2)] + #[pallet::weight((T::WeightInfo::set_pricing_parameters(), DispatchClass::Operational))] + pub fn set_pricing_parameters( + origin: OriginFor, + params: PricingParametersOf, + ) -> DispatchResult { + ensure_root(origin)?; + params.validate().map_err(|_| Error::::InvalidPricingParameters)?; + PricingParameters::::put(params.clone()); + + let command = Command::SetPricingParameters { + exchange_rate: params.exchange_rate.into(), + delivery_cost: T::InboundDeliveryCost::get().saturated_into::(), + }; + Self::send(PRIMARY_GOVERNANCE_CHANNEL, command, PaysFee::::No)?; + + Self::deposit_event(Event::PricingParametersChanged { params }); + Ok(()) + } + + /// Sends a command to the Gateway contract to instantiate a new agent contract representing + /// `origin`. + /// + /// Fee required: Yes + /// + /// - `origin`: Must be `MultiLocation` of a sibling parachain + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::create_agent())] + pub fn create_agent(origin: OriginFor) -> DispatchResult { + let origin_location: MultiLocation = T::SiblingOrigin::ensure_origin(origin)?; + + // Ensure that origin location is some consensus system on a sibling parachain + let (para_id, agent_id) = ensure_sibling::(&origin_location)?; + + // Record the agent id or fail if it has already been created + ensure!(!Agents::::contains_key(agent_id), Error::::AgentAlreadyCreated); + Agents::::insert(agent_id, ()); + + let command = Command::CreateAgent { agent_id }; + let pays_fee = PaysFee::::Yes(sibling_sovereign_account::(para_id)); + Self::send(SECONDARY_GOVERNANCE_CHANNEL, command, pays_fee)?; + + Self::deposit_event(Event::::CreateAgent { + location: Box::new(origin_location), + agent_id, + }); + Ok(()) + } + + /// Sends a message to the Gateway contract to create a new channel representing `origin` + /// + /// Fee required: Yes + /// + /// This extrinsic is permissionless, so a fee is charged to prevent spamming and pay + /// for execution costs on the remote side. + /// + /// The message is sent over the bridge on BridgeHub's own channel to the Gateway. + /// + /// - `origin`: Must be `MultiLocation` + /// - `mode`: Initial operating mode of the channel + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::create_channel())] + pub fn create_channel(origin: OriginFor, mode: OperatingMode) -> DispatchResult { + let origin_location: MultiLocation = T::SiblingOrigin::ensure_origin(origin)?; + + // Ensure that origin location is a sibling parachain + let (para_id, agent_id) = ensure_sibling::(&origin_location)?; + + let channel_id: ChannelId = para_id.into(); + + ensure!(Agents::::contains_key(agent_id), Error::::NoAgent); + ensure!(!Channels::::contains_key(channel_id), Error::::ChannelAlreadyCreated); + + let channel = Channel { agent_id, para_id }; + Channels::::insert(channel_id, channel); + + let command = Command::CreateChannel { channel_id, agent_id, mode }; + let pays_fee = PaysFee::::Yes(sibling_sovereign_account::(para_id)); + Self::send(SECONDARY_GOVERNANCE_CHANNEL, command, pays_fee)?; + + Self::deposit_event(Event::::CreateChannel { channel_id, agent_id }); + Ok(()) + } + + /// Sends a message to the Gateway contract to update a channel configuration + /// + /// The origin must already have a channel initialized, as this message is sent over it. + /// + /// A partial fee will be charged for local processing only. + /// + /// - `origin`: Must be `MultiLocation` + /// - `mode`: Initial operating mode of the channel + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::update_channel())] + pub fn update_channel(origin: OriginFor, mode: OperatingMode) -> DispatchResult { + let origin_location: MultiLocation = T::SiblingOrigin::ensure_origin(origin)?; + + // Ensure that origin location is a sibling parachain + let (para_id, _) = ensure_sibling::(&origin_location)?; + + let channel_id: ChannelId = para_id.into(); + + ensure!(Channels::::contains_key(channel_id), Error::::NoChannel); + + let command = Command::UpdateChannel { channel_id, mode }; + let pays_fee = PaysFee::::Partial(sibling_sovereign_account::(para_id)); + + // Parachains send the update message on their own channel + Self::send(channel_id, command, pays_fee)?; + + Self::deposit_event(Event::::UpdateChannel { channel_id, mode }); + Ok(()) + } + + /// Sends a message to the Gateway contract to update an arbitrary channel + /// + /// Fee required: No + /// + /// - `origin`: Must be root + /// - `channel_id`: ID of channel + /// - `mode`: Initial operating mode of the channel + /// - `outbound_fee`: Fee charged to users for sending outbound messages to Polkadot + #[pallet::call_index(6)] + #[pallet::weight(T::WeightInfo::force_update_channel())] + pub fn force_update_channel( + origin: OriginFor, + channel_id: ChannelId, + mode: OperatingMode, + ) -> DispatchResult { + ensure_root(origin)?; + + ensure!(Channels::::contains_key(channel_id), Error::::NoChannel); + + let command = Command::UpdateChannel { channel_id, mode }; + Self::send(PRIMARY_GOVERNANCE_CHANNEL, command, PaysFee::::No)?; + + Self::deposit_event(Event::::UpdateChannel { channel_id, mode }); + Ok(()) + } + + /// Sends a message to the Gateway contract to transfer ether from an agent to `recipient`. + /// + /// A partial fee will be charged for local processing only. + /// + /// - `origin`: Must be `MultiLocation` + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::transfer_native_from_agent())] + pub fn transfer_native_from_agent( + origin: OriginFor, + recipient: H160, + amount: u128, + ) -> DispatchResult { + let origin_location: MultiLocation = T::SiblingOrigin::ensure_origin(origin)?; + + // Ensure that origin location is some consensus system on a sibling parachain + let (para_id, agent_id) = ensure_sibling::(&origin_location)?; + + // Since the origin is also the owner of the channel, they only need to pay + // the local processing fee. + let pays_fee = PaysFee::::Partial(sibling_sovereign_account::(para_id)); + + Self::do_transfer_native_from_agent( + agent_id, + para_id.into(), + recipient, + amount, + pays_fee, + ) + } + + /// Sends a message to the Gateway contract to transfer ether from an agent to `recipient`. + /// + /// Privileged. Can only be called by root. + /// + /// Fee required: No + /// + /// - `origin`: Must be root + /// - `location`: Location used to resolve the agent + /// - `recipient`: Recipient of funds + /// - `amount`: Amount to transfer + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::force_transfer_native_from_agent())] + pub fn force_transfer_native_from_agent( + origin: OriginFor, + location: Box, + recipient: H160, + amount: u128, + ) -> DispatchResult { + ensure_root(origin)?; + + // Ensure that location is some consensus system on a sibling parachain + let location: MultiLocation = + (*location).try_into().map_err(|_| Error::::UnsupportedLocationVersion)?; + let (_, agent_id) = + ensure_sibling::(&location).map_err(|_| Error::::InvalidLocation)?; + + let pays_fee = PaysFee::::No; + + Self::do_transfer_native_from_agent( + agent_id, + PRIMARY_GOVERNANCE_CHANNEL, + recipient, + amount, + pays_fee, + ) + } + + /// Sends a message to the Gateway contract to update fee related parameters for + /// token transfers. + /// + /// Privileged. Can only be called by root. + /// + /// Fee required: No + /// + /// - `origin`: Must be root + /// - `create_asset_xcm`: The XCM execution cost for creating a new asset class on AssetHub, + /// in DOT + /// - `transfer_asset_xcm`: The XCM execution cost for performing a reserve transfer on + /// AssetHub, in DOT + /// - `register_token`: The Ether fee for registering a new token, to discourage spamming + #[pallet::call_index(9)] + #[pallet::weight((T::WeightInfo::set_token_transfer_fees(), DispatchClass::Operational))] + pub fn set_token_transfer_fees( + origin: OriginFor, + create_asset_xcm: u128, + transfer_asset_xcm: u128, + register_token: U256, + ) -> DispatchResult { + ensure_root(origin)?; + + // Basic validation of new costs. Particularly for token registration, we want to ensure + // its relatively expensive to discourage spamming. Like at least 100 USD. + ensure!( + create_asset_xcm > 0 && transfer_asset_xcm > 0 && register_token > meth(100), + Error::::InvalidTokenTransferFees + ); + + let command = Command::SetTokenTransferFees { + create_asset_xcm, + transfer_asset_xcm, + register_token, + }; + Self::send(PRIMARY_GOVERNANCE_CHANNEL, command, PaysFee::::No)?; + + Self::deposit_event(Event::::SetTokenTransferFees { + create_asset_xcm, + transfer_asset_xcm, + register_token, + }); + Ok(()) + } + } + + impl Pallet { + /// Send `command` to the Gateway on the Channel identified by `channel_id` + fn send(channel_id: ChannelId, command: Command, pays_fee: PaysFee) -> DispatchResult { + let message = Message { id: None, channel_id, command }; + let (ticket, fee) = + T::OutboundQueue::validate(&message).map_err(|err| Error::::Send(err))?; + + let payment = match pays_fee { + PaysFee::Yes(account) => Some((account, fee.total())), + PaysFee::Partial(account) => Some((account, fee.local)), + PaysFee::No => None, + }; + + if let Some((payer, fee)) = payment { + T::Token::transfer( + &payer, + &T::TreasuryAccount::get(), + fee, + Preservation::Preserve, + )?; + } + + T::OutboundQueue::deliver(ticket).map_err(|err| Error::::Send(err))?; + Ok(()) + } + + /// Issue a `Command::TransferNativeFromAgent` command. The command will be sent on the + /// channel `channel_id` + pub fn do_transfer_native_from_agent( + agent_id: H256, + channel_id: ChannelId, + recipient: H160, + amount: u128, + pays_fee: PaysFee, + ) -> DispatchResult { + ensure!(Agents::::contains_key(agent_id), Error::::NoAgent); + + let command = Command::TransferNativeFromAgent { agent_id, recipient, amount }; + Self::send(channel_id, command, pays_fee)?; + + Self::deposit_event(Event::::TransferNativeFromAgent { + agent_id, + recipient, + amount, + }); + Ok(()) + } + + /// Initializes agents and channels. + pub fn initialize(para_id: ParaId, asset_hub_para_id: ParaId) -> Result<(), DispatchError> { + // Asset Hub + let asset_hub_location: MultiLocation = + ParentThen(X1(Parachain(asset_hub_para_id.into()))).into(); + let asset_hub_agent_id = agent_id_of::(&asset_hub_location)?; + let asset_hub_channel_id: ChannelId = asset_hub_para_id.into(); + Agents::::insert(asset_hub_agent_id, ()); + Channels::::insert( + asset_hub_channel_id, + Channel { agent_id: asset_hub_agent_id, para_id: asset_hub_para_id }, + ); + + // Governance channels + let bridge_hub_agent_id = agent_id_of::(&MultiLocation::here())?; + // Agent for BridgeHub + Agents::::insert(bridge_hub_agent_id, ()); + + // Primary governance channel + Channels::::insert( + PRIMARY_GOVERNANCE_CHANNEL, + Channel { agent_id: bridge_hub_agent_id, para_id }, + ); + + // Secondary governance channel + Channels::::insert( + SECONDARY_GOVERNANCE_CHANNEL, + Channel { agent_id: bridge_hub_agent_id, para_id }, + ); + + Ok(()) + } + + /// Checks if the pallet has been initialized. + pub(crate) fn is_initialized() -> bool { + let primary_exists = Channels::::contains_key(PRIMARY_GOVERNANCE_CHANNEL); + let secondary_exists = Channels::::contains_key(SECONDARY_GOVERNANCE_CHANNEL); + primary_exists && secondary_exists + } + } + + impl StaticLookup for Pallet { + type Source = ChannelId; + type Target = Channel; + fn lookup(channel_id: Self::Source) -> Option { + Channels::::get(channel_id) + } + } + + impl Contains for Pallet { + fn contains(channel_id: &ChannelId) -> bool { + Channels::::get(channel_id).is_some() + } + } + + impl Get> for Pallet { + fn get() -> PricingParametersOf { + PricingParameters::::get() + } + } +} diff --git a/bridges/snowbridge/parachain/pallets/system/src/migration.rs b/bridges/snowbridge/parachain/pallets/system/src/migration.rs new file mode 100644 index 0000000000000000000000000000000000000000..ee94fc091bd1ecb0511789c998e65c0b8f665451 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/system/src/migration.rs @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Governance API for controlling the Ethereum side of the bridge +use super::*; +use frame_support::traits::OnRuntimeUpgrade; +use log; + +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + +pub mod v0 { + use frame_support::{pallet_prelude::*, weights::Weight}; + + use super::*; + + const LOG_TARGET: &str = "ethereum_system::migration"; + + pub struct InitializeOnUpgrade( + sp_std::marker::PhantomData<(T, BridgeHubParaId, AssetHubParaId)>, + ); + impl OnRuntimeUpgrade + for InitializeOnUpgrade + where + T: Config, + BridgeHubParaId: Get, + AssetHubParaId: Get, + { + fn on_runtime_upgrade() -> Weight { + if !Pallet::::is_initialized() { + Pallet::::initialize( + BridgeHubParaId::get().into(), + AssetHubParaId::get().into(), + ) + .expect("infallible; qed"); + log::info!( + target: LOG_TARGET, + "Ethereum system initialized." + ); + T::DbWeight::get().reads_writes(2, 5) + } else { + log::info!( + target: LOG_TARGET, + "Ethereum system already initialized. Skipping." + ); + T::DbWeight::get().reads(2) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + if !Pallet::::is_initialized() { + log::info!( + target: LOG_TARGET, + "Agents and channels not initialized. Initialization will run." + ); + } else { + log::info!( + target: LOG_TARGET, + "Agents and channels are initialized. Initialization will not run." + ); + } + Ok(vec![]) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_: Vec) -> Result<(), TryRuntimeError> { + frame_support::ensure!( + Pallet::::is_initialized(), + "Agents and channels were not initialized." + ); + Ok(()) + } + } +} diff --git a/bridges/snowbridge/parachain/pallets/system/src/mock.rs b/bridges/snowbridge/parachain/pallets/system/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..7a4f61189305d004481ce2edf5f10759bd6936ee --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/system/src/mock.rs @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate as snowbridge_system; +use frame_support::{ + parameter_types, + traits::{tokens::fungible::Mutate, ConstU128, ConstU16, ConstU64, ConstU8}, + weights::IdentityFee, + PalletId, +}; +use sp_core::H256; +use xcm_executor::traits::ConvertLocation; + +use snowbridge_core::{ + gwei, meth, outbound::ConstantGasMeter, sibling_sovereign_account, AgentId, AllowSiblingsOnly, + ParaId, PricingParameters, Rewards, +}; +use sp_runtime::{ + traits::{AccountIdConversion, BlakeTwo256, IdentityLookup, Keccak256}, + AccountId32, BuildStorage, FixedU128, +}; +use xcm::prelude::*; + +#[cfg(feature = "runtime-benchmarks")] +use crate::BenchmarkHelper; + +type Block = frame_system::mocking::MockBlock; +type Balance = u128; + +pub type AccountId = AccountId32; + +// A stripped-down version of pallet-xcm that only inserts an XCM origin into the runtime +#[allow(dead_code)] +#[frame_support::pallet] +mod pallet_xcm_origin { + use frame_support::{ + pallet_prelude::*, + traits::{Contains, OriginTrait}, + }; + use xcm::latest::prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeOrigin: From + From<::RuntimeOrigin>; + } + + // Insert this custom Origin into the aggregate RuntimeOrigin + #[pallet::origin] + #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] + pub struct Origin(pub MultiLocation); + + impl From for Origin { + fn from(location: MultiLocation) -> Origin { + Origin(location) + } + } + + /// `EnsureOrigin` implementation succeeding with a `MultiLocation` value to recognize and + /// filter the contained location + pub struct EnsureXcm(PhantomData); + impl, F: Contains> EnsureOrigin for EnsureXcm + where + O::PalletsOrigin: From + TryInto, + { + type Success = MultiLocation; + + fn try_origin(outer: O) -> Result { + outer.try_with_caller(|caller| { + caller.try_into().and_then(|o| match o { + Origin(location) if F::contains(&location) => Ok(location), + o => Err(o.into()), + }) + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin(MultiLocation { parents: 1, interior: X1(Parachain(2000)) }))) + } + } +} + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + XcmOrigin: pallet_xcm_origin::{Pallet, Origin}, + OutboundQueue: snowbridge_outbound_queue::{Pallet, Call, Storage, Event}, + EthereumSystem: snowbridge_system, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} + } +); + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = ConstU16<42>; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; + type Nonce = u64; + type Block = Block; +} + +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ConstU128<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); + type MaxHolds = (); +} + +impl pallet_xcm_origin::Config for Test { + type RuntimeOrigin = RuntimeOrigin; +} + +parameter_types! { + pub const HeapSize: u32 = 32 * 1024; + pub const MaxStale: u32 = 32; + pub static ServiceWeight: Option = Some(Weight::from_parts(100, 100)); +} + +impl pallet_message_queue::Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type MessageProcessor = OutboundQueue; + type Size = u32; + type QueueChangeHandler = (); + type HeapSize = HeapSize; + type MaxStale = MaxStale; + type ServiceWeight = ServiceWeight; + type QueuePausedQuery = (); +} + +parameter_types! { + pub const MaxMessagePayloadSize: u32 = 1024; + pub const MaxMessagesPerBlock: u32 = 20; + pub const OwnParaId: ParaId = ParaId::new(1013); +} + +impl snowbridge_outbound_queue::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Hashing = Keccak256; + type MessageQueue = MessageQueue; + type Decimals = ConstU8<10>; + type MaxMessagePayloadSize = MaxMessagePayloadSize; + type MaxMessagesPerBlock = MaxMessagesPerBlock; + type GasMeter = ConstantGasMeter; + type Balance = u128; + type PricingParameters = EthereumSystem; + type Channels = EthereumSystem; + type WeightToFee = IdentityFee; + type WeightInfo = (); +} + +parameter_types! { + pub const SS58Prefix: u8 = 42; + pub const AnyNetwork: Option = None; + pub const RelayNetwork: Option = Some(NetworkId::Kusama); + pub const RelayLocation: MultiLocation = MultiLocation::parent(); + pub UniversalLocation: InteriorMultiLocation = + X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(1013)); +} + +pub const DOT: u128 = 10_000_000_000; + +parameter_types! { + pub TreasuryAccount: AccountId = PalletId(*b"py/trsry").into_account_truncating(); + pub Fee: u64 = 1000; + pub const RococoNetwork: NetworkId = NetworkId::Rococo; + pub const InitialFunding: u128 = 1_000_000_000_000; + pub AssetHubParaId: ParaId = ParaId::new(1000); + pub TestParaId: u32 = 2000; + pub Parameters: PricingParameters = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 400), + fee_per_gas: gwei(20), + rewards: Rewards { local: DOT, remote: meth(1) } + }; + pub const InboundDeliveryCost: u128 = 1_000_000_000; + +} + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelper for () { + fn make_xcm_origin(location: MultiLocation) -> RuntimeOrigin { + RuntimeOrigin::from(pallet_xcm_origin::Origin(location)) + } +} + +impl crate::Config for Test { + type RuntimeEvent = RuntimeEvent; + type OutboundQueue = OutboundQueue; + type SiblingOrigin = pallet_xcm_origin::EnsureXcm; + type AgentIdOf = snowbridge_core::AgentIdOf; + type TreasuryAccount = TreasuryAccount; + type Token = Balances; + type DefaultPricingParameters = Parameters; + type WeightInfo = (); + type InboundDeliveryCost = InboundDeliveryCost; + #[cfg(feature = "runtime-benchmarks")] + type Helper = (); +} + +// Build genesis storage according to the mock runtime. +pub fn new_test_ext(genesis_build: bool) -> sp_io::TestExternalities { + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + if genesis_build { + crate::GenesisConfig:: { + para_id: OwnParaId::get(), + asset_hub_para_id: AssetHubParaId::get(), + _config: Default::default(), + } + .assimilate_storage(&mut storage) + .unwrap(); + } + + let mut ext: sp_io::TestExternalities = storage.into(); + let initial_amount = InitialFunding::get(); + let test_para_id = TestParaId::get(); + let sovereign_account = sibling_sovereign_account::(test_para_id.into()); + let treasury_account = TreasuryAccount::get(); + ext.execute_with(|| { + System::set_block_number(1); + Balances::mint_into(&AccountId32::from([0; 32]), initial_amount).unwrap(); + Balances::mint_into(&sovereign_account, initial_amount).unwrap(); + Balances::mint_into(&treasury_account, initial_amount).unwrap(); + }); + ext +} + +// Test helpers + +pub fn make_xcm_origin(location: MultiLocation) -> RuntimeOrigin { + pallet_xcm_origin::Origin(location).into() +} + +pub fn make_agent_id(location: MultiLocation) -> AgentId { + ::AgentIdOf::convert_location(&location) + .expect("convert location") +} diff --git a/bridges/snowbridge/parachain/pallets/system/src/tests.rs b/bridges/snowbridge/parachain/pallets/system/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..e07481c1e33e5a9496c441383f2ba2908390f8e6 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/system/src/tests.rs @@ -0,0 +1,664 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate::{mock::*, *}; +use frame_support::{assert_noop, assert_ok}; +use hex_literal::hex; +use snowbridge_core::{eth, sibling_sovereign_account_raw}; +use sp_core::H256; +use sp_runtime::{AccountId32, DispatchError::BadOrigin, TokenError}; + +#[test] +fn create_agent() { + new_test_ext(true).execute_with(|| { + let origin_para_id = 2000; + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(origin_para_id)) }; + let agent_id = make_agent_id(origin_location); + let sovereign_account = sibling_sovereign_account::(origin_para_id.into()); + + // fund sovereign account of origin + let _ = Balances::mint_into(&sovereign_account, 10000); + + assert!(!Agents::::contains_key(agent_id)); + + let origin = make_xcm_origin(origin_location); + assert_ok!(EthereumSystem::create_agent(origin)); + + assert!(Agents::::contains_key(agent_id)); + }); +} + +#[test] +fn test_agent_for_here() { + new_test_ext(true).execute_with(|| { + let origin_location = MultiLocation::here(); + let agent_id = make_agent_id(origin_location); + assert_eq!( + agent_id, + hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), + ) + }); +} + +#[test] +fn create_agent_fails_on_funds_unavailable() { + new_test_ext(true).execute_with(|| { + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(2000)) }; + let origin = make_xcm_origin(origin_location); + // Reset balance of sovereign_account to zero so to trigger the FundsUnavailable error + let sovereign_account = sibling_sovereign_account::(2000.into()); + Balances::set_balance(&sovereign_account, 0); + assert_noop!(EthereumSystem::create_agent(origin), TokenError::FundsUnavailable); + }); +} + +#[test] +fn create_agent_bad_origin() { + new_test_ext(true).execute_with(|| { + // relay chain location not allowed + assert_noop!( + EthereumSystem::create_agent(make_xcm_origin(MultiLocation { + parents: 1, + interior: Here, + })), + BadOrigin, + ); + + // local account location not allowed + assert_noop!( + EthereumSystem::create_agent(make_xcm_origin(MultiLocation { + parents: 0, + interior: X1(Junction::AccountId32 { network: None, id: [67u8; 32] }), + })), + BadOrigin, + ); + + // Signed origin not allowed + assert_noop!( + EthereumSystem::create_agent(RuntimeOrigin::signed([14; 32].into())), + BadOrigin + ); + + // None origin not allowed + assert_noop!(EthereumSystem::create_agent(RuntimeOrigin::none()), BadOrigin); + }); +} + +#[test] +fn upgrade_as_root() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::root(); + let address: H160 = Default::default(); + let code_hash: H256 = Default::default(); + + assert_ok!(EthereumSystem::upgrade(origin, address, code_hash, None)); + + System::assert_last_event(RuntimeEvent::EthereumSystem(crate::Event::Upgrade { + impl_address: address, + impl_code_hash: code_hash, + initializer_params_hash: None, + })); + }); +} + +#[test] +fn upgrade_as_signed_fails() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::signed(AccountId32::new([0; 32])); + let address: H160 = Default::default(); + let code_hash: H256 = Default::default(); + + assert_noop!(EthereumSystem::upgrade(origin, address, code_hash, None), BadOrigin); + }); +} + +#[test] +fn upgrade_with_params() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::root(); + let address: H160 = Default::default(); + let code_hash: H256 = Default::default(); + let initializer: Option = + Some(Initializer { params: [0; 256].into(), maximum_required_gas: 10000 }); + assert_ok!(EthereumSystem::upgrade(origin, address, code_hash, initializer)); + }); +} + +#[test] +fn set_operating_mode() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::root(); + let mode = OperatingMode::RejectingOutboundMessages; + + assert_ok!(EthereumSystem::set_operating_mode(origin, mode)); + + System::assert_last_event(RuntimeEvent::EthereumSystem(crate::Event::SetOperatingMode { + mode, + })); + }); +} + +#[test] +fn set_operating_mode_as_signed_fails() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::signed([14; 32].into()); + let mode = OperatingMode::RejectingOutboundMessages; + + assert_noop!(EthereumSystem::set_operating_mode(origin, mode), BadOrigin); + }); +} + +#[test] +fn set_pricing_parameters() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::root(); + let mut params = Parameters::get(); + params.rewards.local = 7; + + assert_ok!(EthereumSystem::set_pricing_parameters(origin, params)); + + assert_eq!(PricingParameters::::get().rewards.local, 7); + }); +} + +#[test] +fn set_pricing_parameters_as_signed_fails() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::signed([14; 32].into()); + let params = Parameters::get(); + + assert_noop!(EthereumSystem::set_pricing_parameters(origin, params), BadOrigin); + }); +} + +#[test] +fn set_pricing_parameters_invalid() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::root(); + let mut params = Parameters::get(); + params.rewards.local = 0; + + assert_noop!( + EthereumSystem::set_pricing_parameters(origin.clone(), params), + Error::::InvalidPricingParameters + ); + + let mut params = Parameters::get(); + params.exchange_rate = 0u128.into(); + assert_noop!( + EthereumSystem::set_pricing_parameters(origin.clone(), params), + Error::::InvalidPricingParameters + ); + params = Parameters::get(); + params.fee_per_gas = sp_core::U256::zero(); + assert_noop!( + EthereumSystem::set_pricing_parameters(origin.clone(), params), + Error::::InvalidPricingParameters + ); + params = Parameters::get(); + params.rewards.local = 0; + assert_noop!( + EthereumSystem::set_pricing_parameters(origin.clone(), params), + Error::::InvalidPricingParameters + ); + params = Parameters::get(); + params.rewards.remote = sp_core::U256::zero(); + assert_noop!( + EthereumSystem::set_pricing_parameters(origin, params), + Error::::InvalidPricingParameters + ); + }); +} + +#[test] +fn set_token_transfer_fees() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::root(); + + assert_ok!(EthereumSystem::set_token_transfer_fees(origin, 1, 1, eth(1))); + }); +} + +#[test] +fn set_token_transfer_fees_root_only() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::signed([14; 32].into()); + + assert_noop!(EthereumSystem::set_token_transfer_fees(origin, 1, 1, 1.into()), BadOrigin); + }); +} + +#[test] +fn set_token_transfer_fees_invalid() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::root(); + + assert_noop!( + EthereumSystem::set_token_transfer_fees(origin, 0, 0, 0.into()), + Error::::InvalidTokenTransferFees + ); + }); +} + +#[test] +fn create_channel() { + new_test_ext(true).execute_with(|| { + let origin_para_id = 2000; + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(origin_para_id)) }; + let sovereign_account = sibling_sovereign_account::(origin_para_id.into()); + let origin = make_xcm_origin(origin_location); + + // fund sovereign account of origin + let _ = Balances::mint_into(&sovereign_account, 10000); + + assert_ok!(EthereumSystem::create_agent(origin.clone())); + assert_ok!(EthereumSystem::create_channel(origin, OperatingMode::Normal)); + }); +} + +#[test] +fn create_channel_fail_already_exists() { + new_test_ext(true).execute_with(|| { + let origin_para_id = 2000; + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(origin_para_id)) }; + let sovereign_account = sibling_sovereign_account::(origin_para_id.into()); + let origin = make_xcm_origin(origin_location); + + // fund sovereign account of origin + let _ = Balances::mint_into(&sovereign_account, 10000); + + assert_ok!(EthereumSystem::create_agent(origin.clone())); + assert_ok!(EthereumSystem::create_channel(origin.clone(), OperatingMode::Normal)); + + assert_noop!( + EthereumSystem::create_channel(origin, OperatingMode::Normal), + Error::::ChannelAlreadyCreated + ); + }); +} + +#[test] +fn create_channel_bad_origin() { + new_test_ext(true).execute_with(|| { + // relay chain location not allowed + assert_noop!( + EthereumSystem::create_channel( + make_xcm_origin(MultiLocation { parents: 1, interior: Here }), + OperatingMode::Normal, + ), + BadOrigin, + ); + + // child of sibling location not allowed + assert_noop!( + EthereumSystem::create_channel( + make_xcm_origin(MultiLocation { + parents: 1, + interior: X2( + Parachain(2000), + Junction::AccountId32 { network: None, id: [67u8; 32] } + ), + }), + OperatingMode::Normal, + ), + BadOrigin, + ); + + // local account location not allowed + assert_noop!( + EthereumSystem::create_channel( + make_xcm_origin(MultiLocation { + parents: 0, + interior: X1(Junction::AccountId32 { network: None, id: [67u8; 32] }), + }), + OperatingMode::Normal, + ), + BadOrigin, + ); + + // Signed origin not allowed + assert_noop!( + EthereumSystem::create_channel( + RuntimeOrigin::signed([14; 32].into()), + OperatingMode::Normal, + ), + BadOrigin + ); + + // None origin not allowed + assert_noop!(EthereumSystem::create_agent(RuntimeOrigin::none()), BadOrigin); + }); +} + +#[test] +fn update_channel() { + new_test_ext(true).execute_with(|| { + let origin_para_id = 2000; + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(origin_para_id)) }; + let sovereign_account = sibling_sovereign_account::(origin_para_id.into()); + let origin = make_xcm_origin(origin_location); + + // First create the channel + let _ = Balances::mint_into(&sovereign_account, 10000); + assert_ok!(EthereumSystem::create_agent(origin.clone())); + assert_ok!(EthereumSystem::create_channel(origin.clone(), OperatingMode::Normal)); + + // Now try to update it + assert_ok!(EthereumSystem::update_channel(origin, OperatingMode::Normal)); + + System::assert_last_event(RuntimeEvent::EthereumSystem(crate::Event::UpdateChannel { + channel_id: ParaId::from(2000).into(), + mode: OperatingMode::Normal, + })); + }); +} + +#[test] +fn update_channel_bad_origin() { + new_test_ext(true).execute_with(|| { + let mode = OperatingMode::Normal; + + // relay chain location not allowed + assert_noop!( + EthereumSystem::update_channel( + make_xcm_origin(MultiLocation { parents: 1, interior: Here }), + mode, + ), + BadOrigin, + ); + + // child of sibling location not allowed + assert_noop!( + EthereumSystem::update_channel( + make_xcm_origin(MultiLocation { + parents: 1, + interior: X2( + Parachain(2000), + Junction::AccountId32 { network: None, id: [67u8; 32] } + ), + }), + mode, + ), + BadOrigin, + ); + + // local account location not allowed + assert_noop!( + EthereumSystem::update_channel( + make_xcm_origin(MultiLocation { + parents: 0, + interior: X1(Junction::AccountId32 { network: None, id: [67u8; 32] }), + }), + mode, + ), + BadOrigin, + ); + + // Signed origin not allowed + assert_noop!( + EthereumSystem::update_channel(RuntimeOrigin::signed([14; 32].into()), mode), + BadOrigin + ); + + // None origin not allowed + assert_noop!(EthereumSystem::update_channel(RuntimeOrigin::none(), mode), BadOrigin); + }); +} + +#[test] +fn update_channel_fails_not_exist() { + new_test_ext(true).execute_with(|| { + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(2000)) }; + let origin = make_xcm_origin(origin_location); + + // Now try to update it + assert_noop!( + EthereumSystem::update_channel(origin, OperatingMode::Normal), + Error::::NoChannel + ); + }); +} + +#[test] +fn force_update_channel() { + new_test_ext(true).execute_with(|| { + let origin_para_id = 2000; + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(origin_para_id)) }; + let sovereign_account = sibling_sovereign_account::(origin_para_id.into()); + let origin = make_xcm_origin(origin_location); + + let channel_id: ChannelId = ParaId::from(origin_para_id).into(); + + // First create the channel + let _ = Balances::mint_into(&sovereign_account, 10000); + assert_ok!(EthereumSystem::create_agent(origin.clone())); + assert_ok!(EthereumSystem::create_channel(origin.clone(), OperatingMode::Normal)); + + // Now try to force update it + let force_origin = RuntimeOrigin::root(); + assert_ok!(EthereumSystem::force_update_channel( + force_origin, + channel_id, + OperatingMode::Normal, + )); + + System::assert_last_event(RuntimeEvent::EthereumSystem(crate::Event::UpdateChannel { + channel_id: ParaId::from(2000).into(), + mode: OperatingMode::Normal, + })); + }); +} + +#[test] +fn force_update_channel_bad_origin() { + new_test_ext(true).execute_with(|| { + let mode = OperatingMode::Normal; + + // signed origin not allowed + assert_noop!( + EthereumSystem::force_update_channel( + RuntimeOrigin::signed([14; 32].into()), + ParaId::from(1000).into(), + mode, + ), + BadOrigin, + ); + }); +} + +#[test] +fn transfer_native_from_agent() { + new_test_ext(true).execute_with(|| { + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(2000)) }; + let origin = make_xcm_origin(origin_location); + let recipient: H160 = [27u8; 20].into(); + let amount = 103435; + + // First create the agent and channel + assert_ok!(EthereumSystem::create_agent(origin.clone())); + assert_ok!(EthereumSystem::create_channel(origin, OperatingMode::Normal)); + + let origin = make_xcm_origin(origin_location); + assert_ok!(EthereumSystem::transfer_native_from_agent(origin, recipient, amount),); + + System::assert_last_event(RuntimeEvent::EthereumSystem( + crate::Event::TransferNativeFromAgent { + agent_id: make_agent_id(origin_location), + recipient, + amount, + }, + )); + }); +} + +#[test] +fn force_transfer_native_from_agent() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::root(); + let location = MultiLocation { parents: 1, interior: X1(Parachain(2000)) }; + let versioned_location: Box = Box::new(location.into()); + let recipient: H160 = [27u8; 20].into(); + let amount = 103435; + + // First create the agent + Agents::::insert(make_agent_id(location), ()); + + assert_ok!(EthereumSystem::force_transfer_native_from_agent( + origin, + versioned_location, + recipient, + amount + ),); + + System::assert_last_event(RuntimeEvent::EthereumSystem( + crate::Event::TransferNativeFromAgent { + agent_id: make_agent_id(location), + recipient, + amount, + }, + )); + }); +} + +#[test] +fn force_transfer_native_from_agent_bad_origin() { + new_test_ext(true).execute_with(|| { + let recipient: H160 = [27u8; 20].into(); + let amount = 103435; + + // signed origin not allowed + assert_noop!( + EthereumSystem::force_transfer_native_from_agent( + RuntimeOrigin::signed([14; 32].into()), + Box::new( + MultiLocation { + parents: 1, + interior: X2( + Parachain(2000), + Junction::AccountId32 { network: None, id: [67u8; 32] } + ), + } + .into() + ), + recipient, + amount, + ), + BadOrigin, + ); + }); +} + +// NOTE: The following tests are not actually tests and are more about obtaining location +// conversions for devops purposes. They need to be removed here and incorporated into a command +// line utility. + +#[ignore] +#[test] +fn check_sibling_sovereign_account() { + new_test_ext(true).execute_with(|| { + let para_id = 1001; + let sovereign_account = sibling_sovereign_account::(para_id.into()); + let sovereign_account_raw = sibling_sovereign_account_raw(para_id.into()); + println!( + "Sovereign account for parachain {}: {:#?}", + para_id, + hex::encode(sovereign_account.clone()) + ); + assert_eq!(sovereign_account, sovereign_account_raw.into()); + }); +} + +#[test] +fn charge_fee_for_create_agent() { + new_test_ext(true).execute_with(|| { + let para_id: u32 = TestParaId::get(); + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(para_id)) }; + let origin = make_xcm_origin(origin_location); + let sovereign_account = sibling_sovereign_account::(para_id.into()); + let (_, agent_id) = ensure_sibling::(&origin_location).unwrap(); + + let initial_sovereign_balance = Balances::balance(&sovereign_account); + assert_ok!(EthereumSystem::create_agent(origin.clone())); + let fee_charged = initial_sovereign_balance - Balances::balance(&sovereign_account); + + assert_ok!(EthereumSystem::create_channel(origin, OperatingMode::Normal)); + + // assert sovereign_balance decreased by (fee.base_fee + fee.delivery_fee) + let message = Message { + id: None, + channel_id: ParaId::from(para_id).into(), + command: Command::CreateAgent { agent_id }, + }; + let (_, fee) = OutboundQueue::validate(&message).unwrap(); + assert_eq!(fee.local + fee.remote, fee_charged); + + // and treasury_balance increased + let treasury_balance = Balances::balance(&TreasuryAccount::get()); + assert!(treasury_balance > InitialFunding::get()); + + let final_sovereign_balance = Balances::balance(&sovereign_account); + // (sovereign_balance + treasury_balance) keeps the same + assert_eq!(final_sovereign_balance + treasury_balance, { InitialFunding::get() * 2 }); + }); +} + +#[test] +fn charge_fee_for_transfer_native_from_agent() { + new_test_ext(true).execute_with(|| { + let para_id: u32 = TestParaId::get(); + let origin_location = MultiLocation { parents: 1, interior: X1(Parachain(para_id)) }; + let recipient: H160 = [27u8; 20].into(); + let amount = 103435; + let origin = make_xcm_origin(origin_location); + let (_, agent_id) = ensure_sibling::(&origin_location).unwrap(); + + let sovereign_account = sibling_sovereign_account::(para_id.into()); + + // create_agent & create_channel first + assert_ok!(EthereumSystem::create_agent(origin.clone())); + assert_ok!(EthereumSystem::create_channel(origin.clone(), OperatingMode::Normal)); + + // assert sovereign_balance decreased by only the base_fee + let sovereign_balance_before = Balances::balance(&sovereign_account); + assert_ok!(EthereumSystem::transfer_native_from_agent(origin.clone(), recipient, amount)); + let message = Message { + id: None, + channel_id: ParaId::from(para_id).into(), + command: Command::TransferNativeFromAgent { agent_id, recipient, amount }, + }; + let (_, fee) = OutboundQueue::validate(&message).unwrap(); + let sovereign_balance_after = Balances::balance(&sovereign_account); + assert_eq!(sovereign_balance_after + fee.local, sovereign_balance_before); + }); +} + +#[test] +fn charge_fee_for_upgrade() { + new_test_ext(true).execute_with(|| { + let para_id: u32 = TestParaId::get(); + let origin = RuntimeOrigin::root(); + let address: H160 = Default::default(); + let code_hash: H256 = Default::default(); + let initializer: Option = + Some(Initializer { params: [0; 256].into(), maximum_required_gas: 10000 }); + assert_ok!(EthereumSystem::upgrade(origin, address, code_hash, initializer.clone())); + + // assert sovereign_balance does not change as we do not charge for sudo operations + let sovereign_account = sibling_sovereign_account::(para_id.into()); + let sovereign_balance = Balances::balance(&sovereign_account); + assert_eq!(sovereign_balance, InitialFunding::get()); + }); +} + +#[test] +fn genesis_build_initializes_correctly() { + new_test_ext(true).execute_with(|| { + assert!(EthereumSystem::is_initialized(), "Ethereum uninitialized."); + }); +} + +#[test] +fn no_genesis_build_is_uninitialized() { + new_test_ext(false).execute_with(|| { + assert!(!EthereumSystem::is_initialized(), "Ethereum initialized."); + }); +} diff --git a/bridges/snowbridge/parachain/pallets/system/src/weights.rs b/bridges/snowbridge/parachain/pallets/system/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..6e532a0d8a8c19339cc39574aeb09668468f34e9 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/system/src/weights.rs @@ -0,0 +1,249 @@ + +//! Autogenerated weights for `snowbridge_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-09, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `crake.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: `1024` + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// bridge-hub-rococo-dev +// --pallet=snowbridge_system +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --template +// ../parachain/templates/module-weight-template.hbs +// --output +// ../parachain/pallets/control/src/weights.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `snowbridge_system`. +pub trait WeightInfo { + fn upgrade() -> Weight; + fn create_agent() -> Weight; + fn create_channel() -> Weight; + fn update_channel() -> Weight; + fn force_update_channel() -> Weight; + fn set_operating_mode() -> Weight; + fn transfer_native_from_agent() -> Weight; + fn force_transfer_native_from_agent() -> Weight; + fn set_token_transfer_fees() -> Weight; + fn set_pricing_parameters() -> Weight; +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(44_000_000, 3517) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: EthereumSystem Agents (r:1 w:1) + /// Proof: EthereumSystem Agents (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: System Account (r:2 w:2) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn create_agent() -> Weight { + // Proof Size summary in bytes: + // Measured: `187` + // Estimated: `6196` + // Minimum execution time: 85_000_000 picoseconds. + Weight::from_parts(85_000_000, 6196) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: System Account (r:2 w:2) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: EthereumSystem Agents (r:1 w:0) + /// Proof: EthereumSystem Agents (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: EthereumSystem Channels (r:1 w:1) + /// Proof: EthereumSystem Channels (max_values: None, max_size: Some(12), added: 2487, mode: MaxEncodedLen) + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn create_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `602` + // Estimated: `69050` + // Minimum execution time: 83_000_000 picoseconds. + Weight::from_parts(83_000_000, 69050) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + } + /// Storage: EthereumSystem Channels (r:1 w:0) + /// Proof: EthereumSystem Channels (max_values: None, max_size: Some(12), added: 2487, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn update_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `256` + // Estimated: `6044` + // Minimum execution time: 40_000_000 picoseconds. + Weight::from_parts(40_000_000, 6044) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: EthereumSystem Channels (r:1 w:0) + /// Proof: EthereumSystem Channels (max_values: None, max_size: Some(12), added: 2487, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn force_update_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `256` + // Estimated: `6044` + // Minimum execution time: 41_000_000 picoseconds. + Weight::from_parts(41_000_000, 6044) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn set_operating_mode() -> Weight { + // Proof Size summary in bytes: + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 31_000_000 picoseconds. + Weight::from_parts(31_000_000, 3517) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: EthereumSystem Agents (r:1 w:0) + /// Proof: EthereumSystem Agents (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn transfer_native_from_agent() -> Weight { + // Proof Size summary in bytes: + // Measured: `252` + // Estimated: `6044` + // Minimum execution time: 45_000_000 picoseconds. + Weight::from_parts(45_000_000, 6044) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: EthereumSystem Agents (r:1 w:0) + /// Proof: EthereumSystem Agents (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn force_transfer_native_from_agent() -> Weight { + // Proof Size summary in bytes: + // Measured: `252` + // Estimated: `6044` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(42_000_000, 6044) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn set_token_transfer_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 31_000_000 picoseconds. + Weight::from_parts(42_000_000, 3517) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn set_pricing_parameters() -> Weight { + // Proof Size summary in bytes: + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 31_000_000 picoseconds. + Weight::from_parts(42_000_000, 3517) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } +} diff --git a/bridges/snowbridge/parachain/primitives/beacon/Cargo.toml b/bridges/snowbridge/parachain/primitives/beacon/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..e81e0208ba14da57ce7b935ecd697e188d2b8596 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/beacon/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "snowbridge-beacon-primitives" +description = "Snowbridge Beacon Primitives" +version = "0.0.1" +authors = ["Snowfork "] +edition = "2021" +license = "Apache-2.0" + +[dependencies] +serde = { version = "1.0.188", optional = true, features = ["derive"] } +hex = { version = "0.4", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +rlp = { version = "0.5", default-features = false } + +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } + +ssz_rs = { version = "0.9.0", default-features = false } +ssz_rs_derive = { version = "0.9.0", default-features = false } +byte-slice-cast = { version = "1.2.1", default-features = false } + +snowbridge-ethereum = { path = "../../primitives/ethereum", default-features = false } +static_assertions = { version = "1.1.0" } +milagro_bls = { git = "https://github.com/snowfork/milagro_bls", default-features = false, rev = "a6d66e4eb89015e352fb1c9f7b661ecdbb5b2176" } + +[dev-dependencies] +hex-literal = { version = "0.4.1" } + +[features] +default = ["std"] +std = [ + "byte-slice-cast/std", + "codec/std", + "frame-support/std", + "frame-system/std", + "hex/std", + "milagro_bls/std", + "rlp/std", + "scale-info/std", + "serde", + "snowbridge-ethereum/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "ssz_rs/std", +] diff --git a/bridges/snowbridge/parachain/primitives/beacon/src/bits.rs b/bridges/snowbridge/parachain/primitives/beacon/src/bits.rs new file mode 100644 index 0000000000000000000000000000000000000000..72b7135ee2939bdabb98c9c06df801c43c3db230 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/beacon/src/bits.rs @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use sp_std::{convert::TryInto, prelude::*}; +use ssz_rs::{Bitvector, Deserialize}; + +pub fn decompress_sync_committee_bits< + const SYNC_COMMITTEE_SIZE: usize, + const SYNC_COMMITTEE_BITS_SIZE: usize, +>( + input: [u8; SYNC_COMMITTEE_BITS_SIZE], +) -> [u8; SYNC_COMMITTEE_SIZE] { + Bitvector::<{ SYNC_COMMITTEE_SIZE }>::deserialize(&input) + .expect("checked statically; qed") + .iter() + .map(|bit| u8::from(bit == true)) + .collect::>() + .try_into() + .expect("checked statically; qed") +} diff --git a/bridges/snowbridge/parachain/primitives/beacon/src/bls.rs b/bridges/snowbridge/parachain/primitives/beacon/src/bls.rs new file mode 100644 index 0000000000000000000000000000000000000000..589b72e67348f70122ad2362b8bc51474cb577ac --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/beacon/src/bls.rs @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate::{PublicKey, Signature}; +use codec::{Decode, Encode}; +use frame_support::{ensure, PalletError}; +pub use milagro_bls::{ + AggregatePublicKey, AggregateSignature, PublicKey as PublicKeyPrepared, + Signature as SignaturePrepared, +}; +use scale_info::TypeInfo; +use sp_core::H256; +use sp_runtime::RuntimeDebug; +use sp_std::prelude::*; + +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, TypeInfo, RuntimeDebug, PalletError)] +pub enum BlsError { + InvalidSignature, + InvalidPublicKey, + InvalidAggregatePublicKeys, + SignatureVerificationFailed, +} + +/// fast_aggregate_verify optimized with aggregate key subtracting absent ones. +pub fn fast_aggregate_verify( + aggregate_pubkey: &PublicKeyPrepared, + absent_pubkeys: &Vec, + message: H256, + signature: &Signature, +) -> Result<(), BlsError> { + let agg_sig = prepare_aggregate_signature(signature)?; + let agg_key = prepare_aggregate_pubkey_from_absent(aggregate_pubkey, absent_pubkeys)?; + fast_aggregate_verify_pre_aggregated(agg_sig, agg_key, message) +} + +/// Decompress one public key into a point in G1. +pub fn prepare_milagro_pubkey(pubkey: &PublicKey) -> Result { + PublicKeyPrepared::from_bytes_unchecked(&pubkey.0).map_err(|_| BlsError::InvalidPublicKey) +} + +/// Prepare for G1 public keys. +pub fn prepare_g1_pubkeys(pubkeys: &[PublicKey]) -> Result, BlsError> { + pubkeys + .iter() + // Deserialize one public key from compressed bytes + .map(prepare_milagro_pubkey) + .collect::, BlsError>>() +} + +/// Prepare for G1 AggregatePublicKey. +pub fn prepare_aggregate_pubkey( + pubkeys: &[PublicKeyPrepared], +) -> Result { + AggregatePublicKey::into_aggregate(pubkeys).map_err(|_| BlsError::InvalidPublicKey) +} + +/// Prepare for G1 AggregatePublicKey. +pub fn prepare_aggregate_pubkey_from_absent( + aggregate_key: &PublicKeyPrepared, + absent_pubkeys: &Vec, +) -> Result { + let mut aggregate_pubkey = AggregatePublicKey::from_public_key(aggregate_key); + if !absent_pubkeys.is_empty() { + let absent_aggregate_key = prepare_aggregate_pubkey(absent_pubkeys)?; + aggregate_pubkey.point.sub(&absent_aggregate_key.point); + } + Ok(AggregatePublicKey { point: aggregate_pubkey.point }) +} + +/// Prepare for G2 AggregateSignature, normally more expensive than G1 operation. +pub fn prepare_aggregate_signature(signature: &Signature) -> Result { + Ok(AggregateSignature::from_signature( + &SignaturePrepared::from_bytes(&signature.0).map_err(|_| BlsError::InvalidSignature)?, + )) +} + +/// fast_aggregate_verify_pre_aggregated which is the most expensive call in beacon light client. +pub fn fast_aggregate_verify_pre_aggregated( + agg_sig: AggregateSignature, + aggregate_key: AggregatePublicKey, + message: H256, +) -> Result<(), BlsError> { + ensure!( + agg_sig.fast_aggregate_verify_pre_aggregated(&message[..], &aggregate_key), + BlsError::SignatureVerificationFailed + ); + Ok(()) +} diff --git a/bridges/snowbridge/parachain/primitives/beacon/src/config.rs b/bridges/snowbridge/parachain/primitives/beacon/src/config.rs new file mode 100644 index 0000000000000000000000000000000000000000..aa5fda706f9934148a74683d1bfa4ffc872ce8e2 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/beacon/src/config.rs @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +pub const MAX_PROOF_SIZE: u32 = 20; + +pub const FEE_RECIPIENT_SIZE: usize = 20; +pub const EXTRA_DATA_SIZE: usize = 32; +pub const LOGS_BLOOM_SIZE: usize = 256; + +pub const PUBKEY_SIZE: usize = 48; +pub const SIGNATURE_SIZE: usize = 96; diff --git a/bridges/snowbridge/parachain/primitives/beacon/src/lib.rs b/bridges/snowbridge/parachain/primitives/beacon/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..3527e1ff0d195a5c4c3b988f17f4eebc67bd0e52 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/beacon/src/lib.rs @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod bits; +pub mod bls; +pub mod config; +pub mod merkle_proof; +pub mod receipt; +pub mod ssz; +pub mod types; +pub mod updates; + +#[cfg(feature = "std")] +mod serde_utils; + +pub use types::{ + BeaconHeader, CompactBeaconState, CompactExecutionHeader, ExecutionHeaderState, + ExecutionPayloadHeader, FinalizedHeaderState, Fork, ForkData, ForkVersion, ForkVersions, Mode, + PublicKey, Signature, SigningData, SyncAggregate, SyncCommittee, SyncCommitteePrepared, +}; +pub use updates::{CheckpointUpdate, ExecutionHeaderUpdate, NextSyncCommitteeUpdate, Update}; + +pub use bits::decompress_sync_committee_bits; +pub use bls::{ + fast_aggregate_verify, prepare_aggregate_pubkey, prepare_aggregate_pubkey_from_absent, + prepare_aggregate_signature, prepare_g1_pubkeys, AggregatePublicKey, AggregateSignature, + BlsError, PublicKeyPrepared, SignaturePrepared, +}; +pub use merkle_proof::verify_merkle_branch; +pub use receipt::verify_receipt_proof; diff --git a/bridges/snowbridge/parachain/primitives/beacon/src/merkle_proof.rs b/bridges/snowbridge/parachain/primitives/beacon/src/merkle_proof.rs new file mode 100644 index 0000000000000000000000000000000000000000..a6ee6e9452c39d766565765e5aa5682e323c3f34 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/beacon/src/merkle_proof.rs @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use sp_core::H256; +use sp_io::hashing::sha2_256; + +/// Specified by +/// with improvements from +pub fn verify_merkle_branch( + leaf: H256, + branch: &[H256], + index: usize, + depth: usize, + root: H256, +) -> bool { + // verify the proof length + if branch.len() != depth { + return false + } + // verify the computed merkle root + root == compute_merkle_root(leaf, branch, index) +} + +fn compute_merkle_root(leaf: H256, proof: &[H256], index: usize) -> H256 { + let mut value: [u8; 32] = leaf.into(); + for (i, node) in proof.iter().enumerate() { + let mut data = [0u8; 64]; + if generalized_index_bit(index, i) { + // right node + data[0..32].copy_from_slice(node.as_bytes()); + data[32..64].copy_from_slice(&value); + value = sha2_256(&data); + } else { + // left node + data[0..32].copy_from_slice(&value); + data[32..64].copy_from_slice(node.as_bytes()); + value = sha2_256(&data); + } + } + value.into() +} + +/// Spec: +fn generalized_index_bit(index: usize, position: usize) -> bool { + index & (1 << position) > 0 +} + +/// Spec: +pub const fn subtree_index(generalized_index: usize) -> usize { + generalized_index % (1 << generalized_index_length(generalized_index)) +} + +/// Spec: +pub const fn generalized_index_length(generalized_index: usize) -> usize { + match generalized_index.checked_ilog2() { + Some(v) => v as usize, + None => panic!("checked statically; qed"), + } +} diff --git a/bridges/snowbridge/parachain/primitives/beacon/src/receipt.rs b/bridges/snowbridge/parachain/primitives/beacon/src/receipt.rs new file mode 100644 index 0000000000000000000000000000000000000000..0588f3f73f715b417476e5b9a9dd02b62388e585 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/beacon/src/receipt.rs @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use sp_core::H256; +use sp_io::hashing::keccak_256; +use sp_std::prelude::*; + +use snowbridge_ethereum::{mpt, Receipt}; + +pub fn verify_receipt_proof( + receipts_root: H256, + proof: &[Vec], +) -> Option> { + match apply_merkle_proof(proof) { + Some((root, data)) if root == receipts_root => Some(rlp::decode(&data)), + Some((_, _)) => None, + None => None, + } +} + +fn apply_merkle_proof(proof: &[Vec]) -> Option<(H256, Vec)> { + let mut iter = proof.iter().rev(); + let first_bytes = match iter.next() { + Some(b) => b, + None => return None, + }; + let item_to_prove: mpt::ShortNode = rlp::decode(first_bytes).ok()?; + + let final_hash: Option<[u8; 32]> = iter.try_fold(keccak_256(first_bytes), |acc, x| { + let node: Box = x.as_slice().try_into().ok()?; + if (*node).contains_hash(acc.into()) { + return Some(keccak_256(x)) + } + None + }); + + final_hash.map(|hash| (hash.into(), item_to_prove.value)) +} + +#[cfg(test)] +mod tests { + use super::*; + use hex_literal::hex; + + #[test] + fn test_verify_receipt_proof() { + let root: H256 = + hex!("fd5e397a84884641f53c496804f24b5276cbb8c5c9cfc2342246be8e3ce5ad02").into(); + + // Valid proof + let proof_receipt5 = vec!( + hex!("f90131a0b5ba404eb5a6a88e56579f4d37ef9813b5ad7f86f0823ff3b407ac5a6bb465eca0398ead2655e78e03c127ce22c5830e90f18b1601ec055f938336c084feb915a9a026d322c26e46c50942c1aabde50e36df5cde572aed650ce73ea3182c6e90a02ca00600a356135f4db1db0d9842264cdff2652676f881669e91e316c0b6dd783011a0837f1deb4075336da320388c1edfffc56c448a43f4a5ba031300d32a7b509fc5a01c3ac82fd65b4aba7f9afaf604d9c82ec7e2deb573a091ae235751bc5c0c288da05d454159d9071b0f68b6e0503d290f23ac7602c1db0c569dee4605d8f5298f09a00bbed10350ec954448df795f6fd46e3faefc800ede061b3840eedc6e2b07a74da0acb02d26a3650f2064c14a435fdf1f668d8655daf455ebdf671713a7c089b3898080808080808080").to_vec(), + hex!("f901f180a00046a08d4f0bdbdc6b31903086ce323182bce6725e7d9415f7ff91ee8f4820bda0e7cd26ad5f3d2771e4b5ab788e268a14a10209f94ee918eb6c829d21d3d11c1da00d4a56d9e9a6751874fd86c7e3cb1c6ad5a848da62751325f478978a00ea966ea064b81920c8f04a8a1e21f53a8280e739fbb7b00b2ab92493ca3f610b70e8ac85a0b1040ed4c55a73178b76abb16f946ce5bebd6b93ab873c83327df54047d12c27a0de6485e9ac58dc6e2b04b4bb38f562684f0b1a2ee586cc11079e7d9a9dc40b32a0d394f4d3532c3124a65fa36e69147e04fd20453a72ee9c50660f17e13ce9df48a066501003fc3e3478efd2803cd0eded6bbe9243ca01ba754d6327071ddbcbc649a0b2684e518f325fee39fc8ea81b68f3f5c785be00d087f3bed8857ae2ee8da26ea071060a5c52042e8d7ce21092f8ecf06053beb9a0b773a6f91a30c4220aa276b2a0fc22436632574ccf6043d0986dede27ea94c9ca9a3bb5ec03ce776a4ddef24a9a05a8a1d6698c4e7d8cc3a2506cb9b12ea9a079c9c7099bc919dc804033cc556e4a0170c468b0716fd36d161f0bf05875f15756a2976de92f9efe7716320509d79c9a0182f909a90cab169f3efb62387f9cccdd61440acc4deec42f68a4f7ca58075c7a055cf0e9202ac75689b76318f1171f3a44465eddc06aae0713bfb6b34fdd27b7980").to_vec(), + hex!("f904de20b904daf904d701830652f0b9010004200000000000000000000080020000000000010000000000010000000000000000000000000000000000000000000002000000080000000000000000200000000000000000000000000008000000220000000000400010000000000000000000000000000000000000000000000000000000000000040000000010000100000000000800000000004000000000000000000000000000080000004000000000020000000000020000000000000000000000000000000000000000000004000000000002000000000100000000000000000000000000001000000002000020000010200000000000010000000000000000000000000000000000000010000000f903ccf89b9421130f34829b4c343142047a28ce96ec07814b15f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000007d843005c7433c16b27ff939cb37471541561ebda0000000000000000000000000e9c1281aae66801fa35ec404d5f2aea393ff6988a000000000000000000000000000000000000000000000000000000005d09b7380f89b9421130f34829b4c343142047a28ce96ec07814b15f863a08c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a00000000000000000000000007d843005c7433c16b27ff939cb37471541561ebda00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da0ffffffffffffffffffffffffffffffffffffffffffffffffffffffcc840c6920f89b94c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa0000000000000000000000000e9c1281aae66801fa35ec404d5f2aea393ff6988a00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da000000000000000000000000000000000000000000000000003e973b5a5d1078ef87994e9c1281aae66801fa35ec404d5f2aea393ff6988e1a01c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1b840000000000000000000000000000000000000000000000000000001f1420ad1d40000000000000000000000000000000000000000000000014ad400879d159a38f8fc94e9c1281aae66801fa35ec404d5f2aea393ff6988f863a0d78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822a00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488db88000000000000000000000000000000000000000000000000000000005d415f3320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e973b5a5d1078ef87a94c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f842a07fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65a00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da000000000000000000000000000000000000000000000000003e973b5a5d1078e").to_vec(), + ); + assert!(verify_receipt_proof(root, &proof_receipt5).is_some()); + + // Various invalid proofs + let proof_empty: Vec> = vec![]; + let proof_missing_full_node = vec![proof_receipt5[0].clone(), proof_receipt5[2].clone()]; + let proof_missing_short_node1 = vec![proof_receipt5[0].clone(), proof_receipt5[1].clone()]; + let proof_missing_short_node2 = vec![proof_receipt5[0].clone()]; + let proof_invalid_encoding = vec![proof_receipt5[2][2..].to_vec()]; + let proof_no_full_node = vec![proof_receipt5[2].clone(), proof_receipt5[2].clone()]; + assert!(verify_receipt_proof(root, &proof_empty).is_none()); + assert!(verify_receipt_proof(root, &proof_missing_full_node).is_none()); + + assert_eq!( + verify_receipt_proof(root, &proof_missing_short_node1), + Some(Err(rlp::DecoderError::Custom("Unsupported receipt type"))) + ); + + assert_eq!( + verify_receipt_proof(root, &proof_missing_short_node2), + Some(Err(rlp::DecoderError::Custom("Unsupported receipt type"))) + ); + + assert!(verify_receipt_proof(root, &proof_invalid_encoding).is_none()); + assert!(verify_receipt_proof(root, &proof_no_full_node).is_none()); + } + + #[test] + fn test_verify_receipt_proof_with_intermediate_short_node() { + let root: H256 = + hex!("d128e3a57142d2bf15bc0cbcac7ad54f40750d571b5c3097e425882c10c9ba66").into(); + + let proof_receipt263 = vec![ + hex!("f90131a00d3cb8d3f57ac1c0e12918a2ebe0cafed8c273577b9dd73e7ed1079b403ef494a0678b9835b834f8a287c0dd33a8fca9146e456ca688555ed4ec1361a2180b778da0fe42da181a46677a043b3d9d4b8bb05a6a17b7b5c010c17e7c1d31cfb7c4f911a0c89f0e2c53241cdb578e1f2b4caf6ba36e00500bdc57fecd66b84a6a58394c19a086c3c1fae5a0575940b5d38e111c469d07883106c26856f3ef608469a2081f13a06c5992ff00aab6226a70a032fd2f571ba22f797321f45e2daa73020d638d21b0a050861e9503ef68728f6c90a44f7fe1bceb2a9bdab6957bbe7136166bd849561ea006aa6eaca8a07e57176e9aa41e6a09edfb7678d1a112404e0ec779d7e567e82ea0bb0b430d303ba21b0af11c487b8a218bd75db54c98940b3f11bad8ff47cad3ef8080808080808080").to_vec(), + hex!("f871a0246de222036ee6a03329b0105da0a6b3f916fc95a9ed5a403a581a0c4d74242ca0ac108a49a88b57a05ac34a108b39f1e45f6f167f2b9fbc8d52fb58e2e5a6af1ea0fcfe07ac2ccd3c28b6eab68d1bce112f6f6dbd9023e4ec3c05b96615aa803d798080808080808080808080808080").to_vec(), + hex!("e4820001a04fff54398cad4d05ea6abfd8b0f3b4fe14c04d7ff5f5211c5b927d9cf72ac1d8").to_vec(), + hex!("f851a096d010643ca2d47412ca66898286b5f2412963b9ec051b33e570d575914c9c5ca028cd24c652989542fe89479ec6388eac4592432242af5ba97563b3ac7c71c019808080808080808080808080808080").to_vec(), + hex!("f90211a0bb35a84c5b1dcb78ec9d32614912c696e62df77bebf9ab326ee55b5d3acdde46a01084b30dac8df0accfcd0fd6330b7f6fc72a4651246d0694be9162151686a620a03eed50afdce7909d784c6157c445a444c806b5f23d31f3b63786f600c84a95b2a0af5232f1df6c6d41879804d081abe867002abe26ba3e5f8e0254a83a54769831a0607915fb13dd5da594256389a45007a67a7f7a86e95d38d8462792b6c98a722ea00e1260fda1730f2738c650ce2bfba83857bc10f8fb119ebc4fb39acba24e6fbaa0d11de17e417327457812675ca3b84ae8e1b64827abfe01420953697c8313d5b1a05fcaf2f7a88f76336a0c32ffc78acb87ae2005454bd25d658035331be3173b46a03f94f4952ab9e650f83cfd0e7f367b1bcc493aacf39a06f16c4a2e1b5605da48a0bdb4ec79785ca8ae22d60f1bbd42d707b4d7ec4aff231a3ebab755e315b35053a043a67c3f2bcef37c8f47a673adcb7061007a553696d1092408601c11b2e6846aa0c519d5af48cae87c7f4538845417c9735813bee892a6fe2dda79f5c414e8576aa0f7058256e09589501d7c231d739e61c84a850e139690989d24fda6058b432e98a081a52faab520978cb19ce14400dba0cd5bcdc4e5a3c0740678aa8f97ee0e5c56a0bcecc61cadeae52518e3b68a48af4b11603dfd9d99d99d7985efa6d2de44f904a02cba4accfc6f39bc5adb6d4440eb6358b4a5103ef93298e4e694f1f940f8b48280").to_vec(), + hex!("f901ae20b901aaf901a70183bb444eb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000001000000000000000000000000000100000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000010000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000080000000000000000000000000000000000000000000000002000000000000000000081000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000f89df89b94dac17f958d2ee523a2206206994597c13d831ec7f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000002e514404ff6823f1b46a8318a709251db414e5e1a000000000000000000000000055021c55847c00d764357a352e5803237d328954a0000000000000000000000000000000000000000000000000000000000201c370").to_vec(), + ]; + assert!(verify_receipt_proof(root, &proof_receipt263).is_some()); + } +} diff --git a/bridges/snowbridge/parachain/primitives/beacon/src/serde_utils.rs b/bridges/snowbridge/parachain/primitives/beacon/src/serde_utils.rs new file mode 100644 index 0000000000000000000000000000000000000000..07f5cbe724ed92bbda1d0cc7ded1a60c92a38cf0 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/beacon/src/serde_utils.rs @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use sp_core::U256; + +use core::fmt::Formatter; +use serde::{Deserialize, Deserializer}; + +// helper to deserialize arbitrary arrays like [T; N] +pub mod arrays { + use std::{convert::TryInto, marker::PhantomData}; + + use serde::{ + de::{SeqAccess, Visitor}, + ser::SerializeTuple, + Deserialize, Deserializer, Serialize, Serializer, + }; + + pub fn serialize( + data: &[T; N], + ser: S, + ) -> Result { + let mut s = ser.serialize_tuple(N)?; + for item in data { + s.serialize_element(item)?; + } + s.end() + } + + struct ArrayVisitor(PhantomData); + + impl<'de, T, const N: usize> Visitor<'de> for ArrayVisitor + where + T: Deserialize<'de>, + { + type Value = [T; N]; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str(&format!("an array of length {}", N)) + } + + #[inline] + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + // can be optimized using MaybeUninit + let mut data = Vec::with_capacity(N); + for _ in 0..N { + match (seq.next_element())? { + Some(val) => data.push(val), + None => return Err(serde::de::Error::invalid_length(N, &self)), + } + } + match data.try_into() { + Ok(arr) => Ok(arr), + Err(_) => unreachable!(), + } + } + } + + pub fn deserialize<'de, D, T, const N: usize>(deserializer: D) -> Result<[T; N], D::Error> + where + D: Deserializer<'de>, + T: Deserialize<'de>, + { + deserializer.deserialize_tuple(N, ArrayVisitor::(PhantomData)) + } +} + +pub(crate) fn from_hex_to_bytes<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + + let str_without_0x = match s.strip_prefix("0x") { + Some(val) => val, + None => &s, + }; + + let hex_bytes = match hex::decode(str_without_0x) { + Ok(bytes) => bytes, + Err(e) => return Err(serde::de::Error::custom(e.to_string())), + }; + + Ok(hex_bytes) +} + +pub(crate) fn from_int_to_u256<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let number = u128::deserialize(deserializer)?; + + Ok(U256::from(number)) +} + +pub struct HexVisitor(); + +impl<'de, const LENGTH: usize> serde::de::Visitor<'de> for HexVisitor { + type Value = [u8; LENGTH]; + + fn expecting(&self, formatter: &mut Formatter) -> sp_std::fmt::Result { + formatter.write_str("a hex string with an '0x' prefix") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + let stripped = match v.strip_prefix("0x") { + Some(stripped) => stripped, + None => v, + }; + + let decoded = match hex::decode(stripped) { + Ok(decoded) => decoded, + Err(e) => return Err(serde::de::Error::custom(e.to_string())), + }; + if decoded.len() != LENGTH { + return Err(serde::de::Error::custom("publickey expected to be 48 characters")) + } + + let data: Self::Value = decoded + .try_into() + .map_err(|_e| serde::de::Error::custom("hex data has unexpected length"))?; + + Ok(data) + } +} diff --git a/bridges/snowbridge/parachain/primitives/beacon/src/ssz.rs b/bridges/snowbridge/parachain/primitives/beacon/src/ssz.rs new file mode 100644 index 0000000000000000000000000000000000000000..4f8b19ca8892ceceaff6e039712b67ff9cb98d2f --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/beacon/src/ssz.rs @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate::{ + config::{EXTRA_DATA_SIZE, FEE_RECIPIENT_SIZE, LOGS_BLOOM_SIZE, PUBKEY_SIZE, SIGNATURE_SIZE}, + types::{ + BeaconHeader, ExecutionPayloadHeader, ForkData, SigningData, SyncAggregate, SyncCommittee, + }, +}; +use byte_slice_cast::AsByteSlice; +use sp_core::H256; +use sp_std::{vec, vec::Vec}; +use ssz_rs::{ + prelude::{List, Vector}, + Bitvector, Deserialize, DeserializeError, SimpleSerialize, SimpleSerializeError, Sized, U256, +}; +use ssz_rs_derive::SimpleSerialize as SimpleSerializeDerive; + +#[derive(Default, SimpleSerializeDerive, Clone, Debug)] +pub struct SSZBeaconBlockHeader { + pub slot: u64, + pub proposer_index: u64, + pub parent_root: [u8; 32], + pub state_root: [u8; 32], + pub body_root: [u8; 32], +} + +impl From for SSZBeaconBlockHeader { + fn from(beacon_header: BeaconHeader) -> Self { + SSZBeaconBlockHeader { + slot: beacon_header.slot, + proposer_index: beacon_header.proposer_index, + parent_root: beacon_header.parent_root.to_fixed_bytes(), + state_root: beacon_header.state_root.to_fixed_bytes(), + body_root: beacon_header.body_root.to_fixed_bytes(), + } + } +} + +#[derive(Default, SimpleSerializeDerive, Clone)] +pub struct SSZSyncCommittee { + pub pubkeys: Vector, COMMITTEE_SIZE>, + pub aggregate_pubkey: Vector, +} + +impl From> + for SSZSyncCommittee +{ + fn from(sync_committee: SyncCommittee) -> Self { + let mut pubkeys_vec = Vec::new(); + + for pubkey in sync_committee.pubkeys.iter() { + // The only thing that can go wrong in the conversion from vec to Vector (ssz type) is + // that the Vector size is 0, or that the given data to create the Vector from does not + // match the expected size N. Because these sizes are statically checked (i.e. + // PublicKey's size is 48, and const PUBKEY_SIZE is 48, it is impossible for "try_from" + // to return an error condition. + let conv_pubkey = Vector::::try_from(pubkey.0.to_vec()) + .expect("checked statically; qed"); + + pubkeys_vec.push(conv_pubkey); + } + + let pubkeys = Vector::, { COMMITTEE_SIZE }>::try_from(pubkeys_vec) + .expect("checked statically; qed"); + + let aggregate_pubkey = + Vector::::try_from(sync_committee.aggregate_pubkey.0.to_vec()) + .expect("checked statically; qed"); + + SSZSyncCommittee { pubkeys, aggregate_pubkey } + } +} + +#[derive(Default, Debug, SimpleSerializeDerive, Clone)] +pub struct SSZSyncAggregate { + pub sync_committee_bits: Bitvector, + pub sync_committee_signature: Vector, +} + +impl + From> for SSZSyncAggregate +{ + fn from(sync_aggregate: SyncAggregate) -> Self { + SSZSyncAggregate { + sync_committee_bits: Bitvector::::deserialize( + &sync_aggregate.sync_committee_bits, + ) + .expect("checked statically; qed"), + sync_committee_signature: Vector::::try_from( + sync_aggregate.sync_committee_signature.0.to_vec(), + ) + .expect("checked statically; qed"), + } + } +} + +#[derive(Default, SimpleSerializeDerive, Clone)] +pub struct SSZForkData { + pub current_version: [u8; 4], + pub genesis_validators_root: [u8; 32], +} + +impl From for SSZForkData { + fn from(fork_data: ForkData) -> Self { + SSZForkData { + current_version: fork_data.current_version, + genesis_validators_root: fork_data.genesis_validators_root, + } + } +} + +#[derive(Default, SimpleSerializeDerive, Clone)] +pub struct SSZSigningData { + pub object_root: [u8; 32], + pub domain: [u8; 32], +} + +impl From for SSZSigningData { + fn from(signing_data: SigningData) -> Self { + SSZSigningData { + object_root: signing_data.object_root.into(), + domain: signing_data.domain.into(), + } + } +} + +#[derive(Default, SimpleSerializeDerive, Clone, Debug)] +pub struct SSZExecutionPayloadHeader { + pub parent_hash: [u8; 32], + pub fee_recipient: Vector, + pub state_root: [u8; 32], + pub receipts_root: [u8; 32], + pub logs_bloom: Vector, + pub prev_randao: [u8; 32], + pub block_number: u64, + pub gas_limit: u64, + pub gas_used: u64, + pub timestamp: u64, + pub extra_data: List, + pub base_fee_per_gas: U256, + pub block_hash: [u8; 32], + pub transactions_root: [u8; 32], + pub withdrawals_root: [u8; 32], +} + +impl TryFrom for SSZExecutionPayloadHeader { + type Error = SimpleSerializeError; + + fn try_from(payload: ExecutionPayloadHeader) -> Result { + Ok(SSZExecutionPayloadHeader { + parent_hash: payload.parent_hash.to_fixed_bytes(), + fee_recipient: Vector::::try_from( + payload.fee_recipient.to_fixed_bytes().to_vec(), + ) + .expect("checked statically; qed"), + state_root: payload.state_root.to_fixed_bytes(), + receipts_root: payload.receipts_root.to_fixed_bytes(), + // Logs bloom bytes size is not constrained, so here we do need to check the try_from + // error + logs_bloom: Vector::::try_from(payload.logs_bloom) + .map_err(|(_, err)| err)?, + prev_randao: payload.prev_randao.to_fixed_bytes(), + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + // Extra data bytes size is not constrained, so here we do need to check the try_from + // error + extra_data: List::::try_from(payload.extra_data) + .map_err(|(_, err)| err)?, + base_fee_per_gas: U256::from_bytes_le( + payload + .base_fee_per_gas + .as_byte_slice() + .try_into() + .expect("checked in prep; qed"), + ), + block_hash: payload.block_hash.to_fixed_bytes(), + transactions_root: payload.transactions_root.to_fixed_bytes(), + withdrawals_root: payload.withdrawals_root.to_fixed_bytes(), + }) + } +} + +pub fn hash_tree_root(mut object: T) -> Result { + match object.hash_tree_root() { + Ok(node) => { + let fixed_bytes: [u8; 32] = + node.as_ref().try_into().expect("Node is a newtype over [u8; 32]; qed"); + Ok(fixed_bytes.into()) + }, + Err(err) => Err(err.into()), + } +} diff --git a/bridges/snowbridge/parachain/primitives/beacon/src/types.rs b/bridges/snowbridge/parachain/primitives/beacon/src/types.rs new file mode 100644 index 0000000000000000000000000000000000000000..f893551d9d1720bf12a32982c6783c457dbc92ba --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/beacon/src/types.rs @@ -0,0 +1,512 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::{CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound}; +use scale_info::TypeInfo; +use sp_core::{H160, H256, U256}; +use sp_runtime::RuntimeDebug; +use sp_std::{boxed::Box, prelude::*}; + +use crate::config::{PUBKEY_SIZE, SIGNATURE_SIZE}; + +#[cfg(feature = "std")] +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +#[cfg(feature = "std")] +use crate::serde_utils::HexVisitor; + +use crate::ssz::{ + hash_tree_root, SSZBeaconBlockHeader, SSZExecutionPayloadHeader, SSZForkData, SSZSigningData, + SSZSyncAggregate, SSZSyncCommittee, +}; +use ssz_rs::SimpleSerializeError; + +pub use crate::bits::decompress_sync_committee_bits; + +use crate::bls::{prepare_g1_pubkeys, prepare_milagro_pubkey, BlsError}; +use milagro_bls::PublicKey as PublicKeyPrepared; + +pub type ValidatorIndex = u64; +pub type ForkVersion = [u8; 4]; + +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct ForkVersions { + pub genesis: Fork, + pub altair: Fork, + pub bellatrix: Fork, + pub capella: Fork, +} + +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct Fork { + pub version: [u8; 4], + pub epoch: u64, +} + +#[derive(Copy, Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct PublicKey(pub [u8; PUBKEY_SIZE]); + +impl Default for PublicKey { + fn default() -> Self { + PublicKey([0u8; PUBKEY_SIZE]) + } +} + +impl From<[u8; PUBKEY_SIZE]> for PublicKey { + fn from(v: [u8; PUBKEY_SIZE]) -> Self { + Self(v) + } +} + +impl MaxEncodedLen for PublicKey { + fn max_encoded_len() -> usize { + PUBKEY_SIZE + } +} + +#[cfg(feature = "std")] +impl<'de> Deserialize<'de> for PublicKey { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_str(HexVisitor::()).map(|v| v.into()) + } +} + +#[cfg(feature = "std")] +impl Serialize for PublicKey { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_bytes(&self.0) + } +} + +#[derive(Copy, Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct Signature(pub [u8; SIGNATURE_SIZE]); + +impl Default for Signature { + fn default() -> Self { + Signature([0u8; SIGNATURE_SIZE]) + } +} + +impl From<[u8; SIGNATURE_SIZE]> for Signature { + fn from(v: [u8; SIGNATURE_SIZE]) -> Self { + Self(v) + } +} + +#[cfg(feature = "std")] +impl<'de> Deserialize<'de> for Signature { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_str(HexVisitor::()).map(|v| v.into()) + } +} + +#[derive(Copy, Clone, Default, Encode, Decode, TypeInfo, MaxEncodedLen)] +pub struct ExecutionHeaderState { + pub beacon_block_root: H256, + pub beacon_slot: u64, + pub block_hash: H256, + pub block_number: u64, +} + +#[derive(Copy, Clone, Default, Encode, Decode, TypeInfo, MaxEncodedLen)] +pub struct FinalizedHeaderState { + pub beacon_block_root: H256, + pub beacon_slot: u64, +} + +#[derive(Clone, Default, Encode, Decode, PartialEq, RuntimeDebug)] +pub struct ForkData { + // 1 or 0 bit, indicates whether a sync committee participated in a vote + pub current_version: [u8; 4], + pub genesis_validators_root: [u8; 32], +} + +impl ForkData { + pub fn hash_tree_root(&self) -> Result { + hash_tree_root::(self.clone().into()) + } +} + +#[derive(Clone, Default, Encode, Decode, PartialEq, RuntimeDebug)] +pub struct SigningData { + pub object_root: H256, + pub domain: H256, +} + +impl SigningData { + pub fn hash_tree_root(&self) -> Result { + hash_tree_root::(self.clone().into()) + } +} + +/// Sync committee as it is stored in the runtime storage. +#[derive( + Encode, Decode, PartialEqNoBound, CloneNoBound, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen, +)] +#[cfg_attr( + feature = "std", + derive(Serialize, Deserialize), + serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) +)] +#[codec(mel_bound())] +pub struct SyncCommittee { + #[cfg_attr(feature = "std", serde(with = "crate::serde_utils::arrays"))] + pub pubkeys: [PublicKey; COMMITTEE_SIZE], + pub aggregate_pubkey: PublicKey, +} + +impl Default for SyncCommittee { + fn default() -> Self { + SyncCommittee { + pubkeys: [Default::default(); COMMITTEE_SIZE], + aggregate_pubkey: Default::default(), + } + } +} + +impl SyncCommittee { + pub fn hash_tree_root(&self) -> Result { + hash_tree_root::>(self.clone().into()) + } +} + +/// Prepared G1 public key of sync committee as it is stored in the runtime storage. +#[derive(Clone, PartialEq, Eq, Encode, Decode, TypeInfo, MaxEncodedLen)] +pub struct SyncCommitteePrepared { + pub root: H256, + pub pubkeys: Box<[PublicKeyPrepared; COMMITTEE_SIZE]>, + pub aggregate_pubkey: PublicKeyPrepared, +} + +impl Default for SyncCommitteePrepared { + fn default() -> Self { + SyncCommitteePrepared { + root: H256::default(), + pubkeys: Box::new([PublicKeyPrepared::default(); COMMITTEE_SIZE]), + aggregate_pubkey: PublicKeyPrepared::default(), + } + } +} + +impl TryFrom<&SyncCommittee> + for SyncCommitteePrepared +{ + type Error = BlsError; + + fn try_from(sync_committee: &SyncCommittee) -> Result { + let g1_pubkeys = prepare_g1_pubkeys(&sync_committee.pubkeys)?; + let sync_committee_root = sync_committee.hash_tree_root().expect("checked statically; qed"); + + Ok(SyncCommitteePrepared:: { + pubkeys: g1_pubkeys.try_into().expect("checked statically; qed"), + aggregate_pubkey: prepare_milagro_pubkey(&sync_committee.aggregate_pubkey)?, + root: sync_committee_root, + }) + } +} + +/// Beacon block header as it is stored in the runtime storage. The block root is the +/// Merkleization of a BeaconHeader. +#[derive( + Copy, Clone, Default, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen, +)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub struct BeaconHeader { + // The slot for which this block is created. Must be greater than the slot of the block defined + // by parent root. + pub slot: u64, + // The index of the validator that proposed the block. + pub proposer_index: ValidatorIndex, + // The block root of the parent block, forming a block chain. + pub parent_root: H256, + // The hash root of the post state of running the state transition through this block. + pub state_root: H256, + // The hash root of the beacon block body + pub body_root: H256, +} + +impl BeaconHeader { + pub fn hash_tree_root(&self) -> Result { + hash_tree_root::((*self).into()) + } +} + +#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] +#[cfg_attr( + feature = "std", + derive(Deserialize), + serde( + try_from = "IntermediateSyncAggregate", + deny_unknown_fields, + bound(serialize = ""), + bound(deserialize = "") + ) +)] +#[codec(mel_bound())] +pub struct SyncAggregate { + pub sync_committee_bits: [u8; COMMITTEE_BITS_SIZE], + pub sync_committee_signature: Signature, +} + +impl Default + for SyncAggregate +{ + fn default() -> Self { + SyncAggregate { + sync_committee_bits: [0; COMMITTEE_BITS_SIZE], + sync_committee_signature: Default::default(), + } + } +} + +impl + SyncAggregate +{ + pub fn hash_tree_root(&self) -> Result { + hash_tree_root::>(self.clone().into()) + } +} + +/// Serde deserialization helper for SyncAggregate +#[cfg(feature = "std")] +#[derive(Deserialize)] +struct IntermediateSyncAggregate { + #[cfg_attr(feature = "std", serde(deserialize_with = "crate::serde_utils::from_hex_to_bytes"))] + pub sync_committee_bits: Vec, + pub sync_committee_signature: Signature, +} + +#[cfg(feature = "std")] +impl + TryFrom for SyncAggregate +{ + type Error = String; + + fn try_from(other: IntermediateSyncAggregate) -> Result { + Ok(Self { + sync_committee_bits: other + .sync_committee_bits + .try_into() + .map_err(|_| "unexpected length".to_owned())?, + sync_committee_signature: other.sync_committee_signature, + }) + } +} + +/// ExecutionPayloadHeader +/// +#[derive( + Default, Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, +)] +#[cfg_attr( + feature = "std", + derive(Deserialize), + serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) +)] +#[codec(mel_bound())] +pub struct ExecutionPayloadHeader { + pub parent_hash: H256, + pub fee_recipient: H160, + pub state_root: H256, + pub receipts_root: H256, + #[cfg_attr(feature = "std", serde(deserialize_with = "crate::serde_utils::from_hex_to_bytes"))] + pub logs_bloom: Vec, + pub prev_randao: H256, + pub block_number: u64, + pub gas_limit: u64, + pub gas_used: u64, + pub timestamp: u64, + #[cfg_attr(feature = "std", serde(deserialize_with = "crate::serde_utils::from_hex_to_bytes"))] + pub extra_data: Vec, + #[cfg_attr(feature = "std", serde(deserialize_with = "crate::serde_utils::from_int_to_u256"))] + pub base_fee_per_gas: U256, + pub block_hash: H256, + pub transactions_root: H256, + pub withdrawals_root: H256, +} + +impl ExecutionPayloadHeader { + pub fn hash_tree_root(&self) -> Result { + hash_tree_root::(self.clone().try_into()?) + } +} + +#[derive( + Default, + Encode, + Decode, + CloneNoBound, + PartialEqNoBound, + RuntimeDebugNoBound, + TypeInfo, + MaxEncodedLen, +)] +pub struct CompactExecutionHeader { + pub parent_hash: H256, + #[codec(compact)] + pub block_number: u64, + pub state_root: H256, + pub receipts_root: H256, +} + +impl From for CompactExecutionHeader { + fn from(execution_payload: ExecutionPayloadHeader) -> Self { + Self { + parent_hash: execution_payload.parent_hash, + block_number: execution_payload.block_number, + state_root: execution_payload.state_root, + receipts_root: execution_payload.receipts_root, + } + } +} + +#[derive( + Default, + Encode, + Decode, + Copy, + Clone, + PartialEqNoBound, + RuntimeDebugNoBound, + TypeInfo, + MaxEncodedLen, +)] +pub struct CompactBeaconState { + #[codec(compact)] + pub slot: u64, + pub block_roots_root: H256, +} + +#[cfg(test)] +mod tests { + use super::*; + use hex_literal::hex; + + #[test] + pub fn test_hash_beacon_header1() { + let hash_root = BeaconHeader { + slot: 3, + proposer_index: 2, + parent_root: hex!("796ea53efb534eab7777809cc5ee2d84e7f25024b9d0c4d7e5bcaab657e4bdbd") + .into(), + state_root: hex!("ba3ff080912be5c9c158b2e962c1b39a91bc0615762ba6fa2ecacafa94e9ae0a") + .into(), + body_root: hex!("a18d7fcefbb74a177c959160e0ee89c23546482154e6831237710414465dcae5") + .into(), + } + .hash_tree_root(); + + assert!(hash_root.is_ok()); + assert_eq!( + hash_root.unwrap(), + hex!("7d42595818709e805dd2fa710a2d2c1f62576ef1ab7273941ac9130fb94b91f7").into() + ); + } + + #[test] + pub fn test_hash_beacon_header2() { + let hash_root = BeaconHeader { + slot: 3476424, + proposer_index: 314905, + parent_root: hex!("c069d7b49cffd2b815b0fb8007eb9ca91202ea548df6f3db60000f29b2489f28") + .into(), + state_root: hex!("444d293e4533501ee508ad608783a7d677c3c566f001313e8a02ce08adf590a3") + .into(), + body_root: hex!("6508a0241047f21ba88f05d05b15534156ab6a6f8e029a9a5423da429834e04a") + .into(), + } + .hash_tree_root(); + + assert!(hash_root.is_ok()); + assert_eq!( + hash_root.unwrap(), + hex!("0aa41166ff01e58e111ac8c42309a738ab453cf8d7285ed8477b1c484acb123e").into() + ); + } + + #[test] + pub fn test_hash_fork_data() { + let hash_root = ForkData { + current_version: hex!("83f38a34"), + genesis_validators_root: hex!( + "22370bbbb358800f5711a10ea9845284272d8493bed0348cab87b8ab1e127930" + ), + } + .hash_tree_root(); + + assert!(hash_root.is_ok()); + assert_eq!( + hash_root.unwrap(), + hex!("57c12c4246bc7152b174b51920506bf943eff9c7ffa50b9533708e9cc1f680fc").into() + ); + } + + #[test] + pub fn test_hash_signing_data() { + let hash_root = SigningData { + object_root: hex!("63654cbe64fc07853f1198c165dd3d49c54fc53bc417989bbcc66da15f850c54") + .into(), + domain: hex!("037da907d1c3a03c0091b2254e1480d9b1783476e228ab29adaaa8f133e08f7a").into(), + } + .hash_tree_root(); + + assert!(hash_root.is_ok()); + assert_eq!( + hash_root.unwrap(), + hex!("b9eb2caf2d691b183c2d57f322afe505c078cd08101324f61c3641714789a54e").into() + ); + } + + #[test] + pub fn test_hash_sync_aggregate() { + let hash_root = SyncAggregate::<512, 64>{ + sync_committee_bits: hex!("cefffffefffffff767fffbedffffeffffeeffdffffdebffffff7f7dbdf7fffdffffbffcfffdff79dfffbbfefff2ffffff7ddeff7ffffc98ff7fbfffffffffff7"), + sync_committee_signature: hex!("8af1a8577bba419fe054ee49b16ed28e081dda6d3ba41651634685e890992a0b675e20f8d9f2ec137fe9eb50e838aa6117f9f5410e2e1024c4b4f0e098e55144843ce90b7acde52fe7b94f2a1037342c951dc59f501c92acf7ed944cb6d2b5f7").into(), + }.hash_tree_root(); + + assert!(hash_root.is_ok()); + assert_eq!( + hash_root.unwrap(), + hex!("e6dcad4f60ce9ff8a587b110facbaf94721f06cd810b6d8bf6cffa641272808d").into() + ); + } + + #[test] + pub fn test_hash_execution_payload() { + let hash_root = + ExecutionPayloadHeader{ + parent_hash: hex!("eadee5ab098dde64e9fd02ae5858064bad67064070679625b09f8d82dec183f7").into(), + fee_recipient: hex!("f97e180c050e5ab072211ad2c213eb5aee4df134").into(), + state_root: hex!("564fa064c2a324c2b5978d7fdfc5d4224d4f421a45388af1ed405a399c845dff").into(), + receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").to_vec(), + prev_randao: hex!("6bf538bdfbdf1c96ff528726a40658a91d0bda0f1351448c4c4f3604db2a0ccf").into(), + block_number: 477434, + gas_limit: 8154925, + gas_used: 0, + timestamp: 1652816940, + extra_data: vec![], + base_fee_per_gas: U256::from(7_i16), + block_hash: hex!("cd8df91b4503adb8f2f1c7a4f60e07a1f1a2cbdfa2a95bceba581f3ff65c1968").into(), + transactions_root: hex!("7ffe241ea60187fdb0187bfa22de35d1f9bed7ab061d9401fd47e34a54fbede1").into(), + withdrawals_root: hex!("28ba1834a3a7b657460ce79fa3a1d909ab8828fd557659d4d0554a9bdbc0ec30").into(), + }.hash_tree_root(); + assert!(hash_root.is_ok()); + } +} + +/// Operating modes for beacon client +#[derive(Encode, Decode, Copy, Clone, PartialEq, RuntimeDebug, TypeInfo)] +pub enum Mode { + Active, + Blocked, +} diff --git a/bridges/snowbridge/parachain/primitives/beacon/src/updates.rs b/bridges/snowbridge/parachain/primitives/beacon/src/updates.rs new file mode 100644 index 0000000000000000000000000000000000000000..9a78b4f1e2d3de4af23c27c7e4fc111a79e22dc6 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/beacon/src/updates.rs @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use codec::{Decode, Encode}; +use frame_support::{CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound}; +use scale_info::TypeInfo; +use sp_core::H256; +use sp_std::prelude::*; + +use crate::types::{BeaconHeader, ExecutionPayloadHeader, SyncAggregate, SyncCommittee}; + +#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] +#[cfg_attr( + feature = "std", + derive(serde::Serialize, serde::Deserialize), + serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) +)] +pub struct CheckpointUpdate { + pub header: BeaconHeader, + pub current_sync_committee: SyncCommittee, + pub current_sync_committee_branch: Vec, + pub validators_root: H256, + pub block_roots_root: H256, + pub block_roots_branch: Vec, +} + +impl Default for CheckpointUpdate { + fn default() -> Self { + CheckpointUpdate { + header: Default::default(), + current_sync_committee: Default::default(), + current_sync_committee_branch: Default::default(), + validators_root: Default::default(), + block_roots_root: Default::default(), + block_roots_branch: Default::default(), + } + } +} + +#[derive( + Default, Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, +)] +#[cfg_attr( + feature = "std", + derive(serde::Deserialize), + serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) +)] +pub struct Update { + /// A recent header attesting to the finalized header, using its `state_root`. + pub attested_header: BeaconHeader, + /// The signing data that the sync committee produced for this attested header, including + /// who participated in the vote and the resulting signature. + pub sync_aggregate: SyncAggregate, + /// The slot at which the sync aggregate can be found, typically attested_header.slot + 1, if + /// the next slot block was not missed. + pub signature_slot: u64, + /// The next sync committee for the next sync committee period, if present. + pub next_sync_committee_update: Option>, + /// The latest finalized header. + pub finalized_header: BeaconHeader, + /// The merkle proof testifying to the finalized header, using the `attested_header.state_root` + /// as tree root. + pub finality_branch: Vec, + /// The finalized_header's `block_roots` root in the beacon state, used for ancestry proofs. + pub block_roots_root: H256, + /// The merkle path to prove the `block_roots_root` value. + pub block_roots_branch: Vec, +} + +#[derive( + Default, Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, +)] +#[cfg_attr( + feature = "std", + derive(serde::Deserialize), + serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) +)] +pub struct NextSyncCommitteeUpdate { + pub next_sync_committee: SyncCommittee, + pub next_sync_committee_branch: Vec, +} + +#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] +#[cfg_attr( + feature = "std", + derive(serde::Deserialize), + serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) +)] +pub struct ExecutionHeaderUpdate { + /// Header for the beacon block containing the execution payload + pub header: BeaconHeader, + /// Proof that `header` is an ancestor of a finalized header + pub ancestry_proof: Option, + /// Execution header to be imported + pub execution_header: ExecutionPayloadHeader, + /// Merkle proof that execution payload is contained within `header` + pub execution_branch: Vec, +} + +#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] +#[cfg_attr( + feature = "std", + derive(serde::Deserialize), + serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) +)] +pub struct AncestryProof { + /// Merkle proof that `header` is an ancestor of `finalized_header` + pub header_branch: Vec, + /// Root of a finalized block that has already been imported into the light client + pub finalized_block_root: H256, +} diff --git a/bridges/snowbridge/parachain/primitives/core/Cargo.toml b/bridges/snowbridge/parachain/primitives/core/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..262fc60b0cba37ade7959aae1e60959746023326 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/core/Cargo.toml @@ -0,0 +1,60 @@ +[package] +name = "snowbridge-core" +description = "Snowbridge Core" +version = "0.1.1" +authors = ["Snowfork "] +edition = "2021" +license = "Apache-2.0" + +[dependencies] +serde = { version = "1.0.188", optional = true, features = ["alloc", "derive"], default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +hex-literal = { version = "0.4.1" } + +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } + +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-arithmetic = { path = "../../../../../substrate/primitives/arithmetic", default-features = false } + +snowbridge-beacon-primitives = { path = "../../primitives/beacon", default-features = false } + +ethabi = { git = "https://github.com/Snowfork/ethabi-decode.git", package = "ethabi-decode", branch = "master", default-features = false } + +[dev-dependencies] +hex = { version = "0.4.3" } + +[features] +default = ["std"] +std = [ + "codec/std", + "ethabi/std", + "frame-support/std", + "frame-system/std", + "polkadot-parachain-primitives/std", + "scale-info/std", + "serde/std", + "snowbridge-beacon-primitives/std", + "sp-arithmetic/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "xcm-builder/std", + "xcm/std", +] +serde = ["dep:serde", "scale-info/serde"] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", +] diff --git a/bridges/snowbridge/parachain/primitives/core/src/inbound.rs b/bridges/snowbridge/parachain/primitives/core/src/inbound.rs new file mode 100644 index 0000000000000000000000000000000000000000..4b04470ad02615d86f1a1c530f3cbed809649328 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/core/src/inbound.rs @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Types for representing inbound messages + +use codec::{Decode, Encode}; +use frame_support::PalletError; +use scale_info::TypeInfo; +use sp_core::{H160, H256}; +use sp_runtime::RuntimeDebug; +use sp_std::vec::Vec; + +/// A trait for verifying inbound messages from Ethereum. +pub trait Verifier { + fn verify(event: &Log, proof: &Proof) -> Result<(), VerificationError>; +} + +#[derive(Clone, Encode, Decode, RuntimeDebug, PalletError, TypeInfo)] +#[cfg_attr(feature = "std", derive(PartialEq))] +pub enum VerificationError { + /// Execution header is missing + HeaderNotFound, + /// Event log was not found in the verified transaction receipt + LogNotFound, + /// Event log has an invalid format + InvalidLog, + /// Unable to verify the transaction receipt with the provided proof + InvalidProof, +} + +pub type MessageNonce = u64; + +/// A bridge message from the Gateway contract on Ethereum +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct Message { + /// Event log emitted by Gateway contract + pub event_log: Log, + /// Inclusion proof for a transaction receipt containing the event log + pub proof: Proof, +} + +const MAX_TOPICS: usize = 4; + +#[derive(Clone, RuntimeDebug)] +pub enum LogValidationError { + TooManyTopics, +} + +/// Event log +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct Log { + pub address: H160, + pub topics: Vec, + pub data: Vec, +} + +impl Log { + pub fn validate(&self) -> Result<(), LogValidationError> { + if self.topics.len() > MAX_TOPICS { + return Err(LogValidationError::TooManyTopics) + } + Ok(()) + } +} + +/// Inclusion proof for a transaction receipt +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct Proof { + // The block hash of the block in which the receipt was included. + pub block_hash: H256, + // The index of the transaction (and receipt) within the block. + pub tx_index: u32, + // Proof keys and values (receipts tree) + pub data: (Vec>, Vec>), +} diff --git a/bridges/snowbridge/parachain/primitives/core/src/lib.rs b/bridges/snowbridge/parachain/primitives/core/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ecbc3bb365fce14d233ae17ca36cf690c135af13 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/core/src/lib.rs @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! # Core +//! +//! Common traits and types +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(test)] +mod tests; + +pub mod inbound; +pub mod operating_mode; +pub mod outbound; +pub mod pricing; +pub mod ringbuffer; + +pub use polkadot_parachain_primitives::primitives::{ + Id as ParaId, IsSystem, Sibling as SiblingParaId, +}; +pub use ringbuffer::{RingBufferMap, RingBufferMapImpl}; +pub use sp_core::U256; + +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::traits::Contains; +use hex_literal::hex; +use scale_info::TypeInfo; +use sp_core::H256; +use sp_io::hashing::keccak_256; +use sp_runtime::{traits::AccountIdConversion, RuntimeDebug}; +use sp_std::prelude::*; +use xcm::prelude::{ + Junction::Parachain, + Junctions::{Here, X1}, + MultiLocation, +}; +use xcm_builder::{DescribeAllTerminal, DescribeFamily, DescribeLocation, HashedDescription}; + +/// The ID of an agent contract +pub type AgentId = H256; +pub use operating_mode::BasicOperatingMode; + +pub use pricing::{PricingParameters, Rewards}; + +pub fn sibling_sovereign_account(para_id: ParaId) -> T::AccountId +where + T: frame_system::Config, +{ + SiblingParaId::from(para_id).into_account_truncating() +} + +pub fn sibling_sovereign_account_raw(para_id: ParaId) -> [u8; 32] { + SiblingParaId::from(para_id).into_account_truncating() +} + +pub struct AllowSiblingsOnly; +impl Contains for AllowSiblingsOnly { + fn contains(location: &MultiLocation) -> bool { + matches!(location, MultiLocation { parents: 1, interior: X1(Parachain(_)) }) + } +} + +pub fn gwei(x: u128) -> U256 { + U256::from(1_000_000_000u128).saturating_mul(x.into()) +} + +pub fn meth(x: u128) -> U256 { + U256::from(1_000_000_000_000_000u128).saturating_mul(x.into()) +} + +pub fn eth(x: u128) -> U256 { + U256::from(1_000_000_000_000_000_000u128).saturating_mul(x.into()) +} + +pub const ROC: u128 = 1_000_000_000_000; + +/// Identifier for a message channel +#[derive( + Clone, Copy, Encode, Decode, PartialEq, Eq, Default, RuntimeDebug, MaxEncodedLen, TypeInfo, +)] +pub struct ChannelId([u8; 32]); + +/// Deterministically derive a ChannelId for a sibling parachain +/// Generator: keccak256("para" + big_endian_bytes(para_id)) +/// +/// The equivalent generator on the Solidity side is in +/// contracts/src/Types.sol:into(). +fn derive_channel_id_for_sibling(para_id: ParaId) -> ChannelId { + let para_id: u32 = para_id.into(); + let para_id_bytes: [u8; 4] = para_id.to_be_bytes(); + let prefix: [u8; 4] = *b"para"; + let preimage: Vec = prefix.into_iter().chain(para_id_bytes).collect(); + keccak_256(&preimage).into() +} + +impl ChannelId { + pub const fn new(id: [u8; 32]) -> Self { + ChannelId(id) + } +} + +impl From for ChannelId { + fn from(value: ParaId) -> Self { + derive_channel_id_for_sibling(value) + } +} + +impl From<[u8; 32]> for ChannelId { + fn from(value: [u8; 32]) -> Self { + ChannelId(value) + } +} + +impl From for [u8; 32] { + fn from(value: ChannelId) -> Self { + value.0 + } +} + +impl<'a> From<&'a [u8; 32]> for ChannelId { + fn from(value: &'a [u8; 32]) -> Self { + ChannelId(*value) + } +} + +impl From for ChannelId { + fn from(value: H256) -> Self { + ChannelId(value.into()) + } +} + +impl AsRef<[u8]> for ChannelId { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +#[derive(Clone, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct Channel { + /// ID of the agent contract deployed on Ethereum + pub agent_id: AgentId, + /// ID of the parachain who will receive or send messages using this channel + pub para_id: ParaId, +} + +pub trait StaticLookup { + /// Type to lookup from. + type Source; + /// Type to lookup into. + type Target; + /// Attempt a lookup. + fn lookup(s: Self::Source) -> Option; +} + +/// Channel for high-priority governance commands +pub const PRIMARY_GOVERNANCE_CHANNEL: ChannelId = + ChannelId::new(hex!("0000000000000000000000000000000000000000000000000000000000000001")); + +/// Channel for lower-priority governance commands +pub const SECONDARY_GOVERNANCE_CHANNEL: ChannelId = + ChannelId::new(hex!("0000000000000000000000000000000000000000000000000000000000000002")); + +pub struct DescribeHere; +impl DescribeLocation for DescribeHere { + fn describe_location(l: &MultiLocation) -> Option> { + match (l.parents, l.interior) { + (0, Here) => Some(Vec::::new().encode()), + _ => None, + } + } +} + +/// Creates an AgentId from a MultiLocation. An AgentId is a unique mapping to a Agent contract on +/// Ethereum which acts as the sovereign account for the MultiLocation. +pub type AgentIdOf = HashedDescription)>; diff --git a/bridges/snowbridge/parachain/primitives/core/src/operating_mode.rs b/bridges/snowbridge/parachain/primitives/core/src/operating_mode.rs new file mode 100644 index 0000000000000000000000000000000000000000..9894e587ef5e7ff31d19a0b5593c3c78cb7c1f99 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/core/src/operating_mode.rs @@ -0,0 +1,25 @@ +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +use sp_runtime::RuntimeDebug; + +/// Basic operating modes for a bridges module (Normal/Halted). +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub enum BasicOperatingMode { + /// Normal mode, when all operations are allowed. + Normal, + /// The pallet is halted. All non-governance operations are disabled. + Halted, +} + +impl Default for BasicOperatingMode { + fn default() -> Self { + Self::Normal + } +} + +impl BasicOperatingMode { + pub fn is_halted(&self) -> bool { + *self == BasicOperatingMode::Halted + } +} diff --git a/bridges/snowbridge/parachain/primitives/core/src/outbound.rs b/bridges/snowbridge/parachain/primitives/core/src/outbound.rs new file mode 100644 index 0000000000000000000000000000000000000000..bce123878d3a456fc8b50f841cd64516a5d58dee --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/core/src/outbound.rs @@ -0,0 +1,413 @@ +use codec::{Decode, Encode}; +use frame_support::PalletError; +use scale_info::TypeInfo; +use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; +use sp_core::{RuntimeDebug, H256}; +pub use v1::{AgentExecuteCommand, Command, Initializer, Message, OperatingMode, QueuedMessage}; + +/// Enqueued outbound messages need to be versioned to prevent data corruption +/// or loss after forkless runtime upgrades +#[derive(Encode, Decode, TypeInfo, Clone, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(PartialEq))] +pub enum VersionedQueuedMessage { + V1(QueuedMessage), +} + +impl TryFrom for QueuedMessage { + type Error = (); + fn try_from(x: VersionedQueuedMessage) -> Result { + use VersionedQueuedMessage::*; + match x { + V1(x) => Ok(x), + } + } +} + +impl> From for VersionedQueuedMessage { + fn from(x: T) -> Self { + VersionedQueuedMessage::V1(x.into()) + } +} + +mod v1 { + use crate::{pricing::UD60x18, ChannelId}; + use codec::{Decode, Encode}; + use ethabi::Token; + use scale_info::TypeInfo; + use sp_core::{RuntimeDebug, H160, H256, U256}; + use sp_std::{borrow::ToOwned, vec, vec::Vec}; + + /// A message which can be accepted by implementations of `/[`SendMessage`\]` + #[derive(Encode, Decode, TypeInfo, Clone, RuntimeDebug)] + #[cfg_attr(feature = "std", derive(PartialEq))] + pub struct Message { + /// ID for this message. One will be automatically generated if not provided. + /// + /// When this message is created from an XCM message, the ID should be extracted + /// from the `SetTopic` instruction. + /// + /// The ID plays no role in bridge consensus, and is purely meant for message tracing. + pub id: Option, + /// The message channel ID + pub channel_id: ChannelId, + /// The stable ID for a receiving gateway contract + pub command: Command, + } + + /// The operating mode of Channels and Gateway contract on Ethereum. + #[derive(Copy, Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo)] + pub enum OperatingMode { + /// Normal operations. Allow sending and receiving messages. + Normal, + /// Reject outbound messages. This allows receiving governance messages but does now allow + /// enqueuing of new messages from the Ethereum side. This can be used to close off an + /// deprecated channel or pause the bridge for upgrade operations. + RejectingOutboundMessages, + } + + /// A command which is executable by the Gateway contract on Ethereum + #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] + #[cfg_attr(feature = "std", derive(PartialEq))] + pub enum Command { + /// Execute a sub-command within an agent for a consensus system in Polkadot + AgentExecute { + /// The ID of the agent + agent_id: H256, + /// The sub-command to be executed + command: AgentExecuteCommand, + }, + /// Upgrade the Gateway contract + Upgrade { + /// Address of the new implementation contract + impl_address: H160, + /// Codehash of the implementation contract + impl_code_hash: H256, + /// Optionally invoke an initializer in the implementation contract + initializer: Option, + }, + /// Create an agent representing a consensus system on Polkadot + CreateAgent { + /// The ID of the agent, derived from the `MultiLocation` of the consensus system on + /// Polkadot + agent_id: H256, + }, + /// Create bidirectional messaging channel to a parachain + CreateChannel { + /// The ID of the channel + channel_id: ChannelId, + /// The agent ID of the parachain + agent_id: H256, + /// Initial operating mode + mode: OperatingMode, + }, + /// Update the configuration of a channel + UpdateChannel { + /// The ID of the channel + channel_id: ChannelId, + /// The new operating mode + mode: OperatingMode, + }, + /// Set the global operating mode of the Gateway contract + SetOperatingMode { + /// The new operating mode + mode: OperatingMode, + }, + /// Transfer ether from an agent contract to a recipient account + TransferNativeFromAgent { + /// The agent ID + agent_id: H256, + /// The recipient of the ether + recipient: H160, + /// The amount to transfer + amount: u128, + }, + /// Set token fees of the Gateway contract + SetTokenTransferFees { + /// The fee(DOT) for the cost of creating asset on AssetHub + create_asset_xcm: u128, + /// The fee(DOT) for the cost of sending asset on AssetHub + transfer_asset_xcm: u128, + /// The fee(Ether) for register token to discourage spamming + register_token: U256, + }, + /// Set pricing parameters + SetPricingParameters { + // ETH/DOT exchange rate + exchange_rate: UD60x18, + // Cost of delivering a message from Ethereum to BridgeHub, in ROC/KSM/DOT + delivery_cost: u128, + }, + } + + impl Command { + /// Compute the enum variant index + pub fn index(&self) -> u8 { + match self { + Command::AgentExecute { .. } => 0, + Command::Upgrade { .. } => 1, + Command::CreateAgent { .. } => 2, + Command::CreateChannel { .. } => 3, + Command::UpdateChannel { .. } => 4, + Command::SetOperatingMode { .. } => 5, + Command::TransferNativeFromAgent { .. } => 6, + Command::SetTokenTransferFees { .. } => 7, + Command::SetPricingParameters { .. } => 8, + } + } + + /// ABI-encode the Command. + pub fn abi_encode(&self) -> Vec { + match self { + Command::AgentExecute { agent_id, command } => + ethabi::encode(&[Token::Tuple(vec![ + Token::FixedBytes(agent_id.as_bytes().to_owned()), + Token::Bytes(command.abi_encode()), + ])]), + Command::Upgrade { impl_address, impl_code_hash, initializer, .. } => + ethabi::encode(&[Token::Tuple(vec![ + Token::Address(*impl_address), + Token::FixedBytes(impl_code_hash.as_bytes().to_owned()), + initializer + .clone() + .map_or(Token::Bytes(vec![]), |i| Token::Bytes(i.params)), + ])]), + Command::CreateAgent { agent_id } => + ethabi::encode(&[Token::Tuple(vec![Token::FixedBytes( + agent_id.as_bytes().to_owned(), + )])]), + Command::CreateChannel { channel_id, agent_id, mode } => + ethabi::encode(&[Token::Tuple(vec![ + Token::FixedBytes(channel_id.as_ref().to_owned()), + Token::FixedBytes(agent_id.as_bytes().to_owned()), + Token::Uint(U256::from((*mode) as u64)), + ])]), + Command::UpdateChannel { channel_id, mode } => + ethabi::encode(&[Token::Tuple(vec![ + Token::FixedBytes(channel_id.as_ref().to_owned()), + Token::Uint(U256::from((*mode) as u64)), + ])]), + Command::SetOperatingMode { mode } => + ethabi::encode(&[Token::Tuple(vec![Token::Uint(U256::from((*mode) as u64))])]), + Command::TransferNativeFromAgent { agent_id, recipient, amount } => + ethabi::encode(&[Token::Tuple(vec![ + Token::FixedBytes(agent_id.as_bytes().to_owned()), + Token::Address(*recipient), + Token::Uint(U256::from(*amount)), + ])]), + Command::SetTokenTransferFees { + create_asset_xcm, + transfer_asset_xcm, + register_token, + } => ethabi::encode(&[Token::Tuple(vec![ + Token::Uint(U256::from(*create_asset_xcm)), + Token::Uint(U256::from(*transfer_asset_xcm)), + Token::Uint(*register_token), + ])]), + Command::SetPricingParameters { exchange_rate, delivery_cost } => + ethabi::encode(&[Token::Tuple(vec![ + Token::Uint(exchange_rate.clone().into_inner()), + Token::Uint(U256::from(*delivery_cost)), + ])]), + } + } + } + + /// Representation of a call to the initializer of an implementation contract. + /// The initializer has the following ABI signature: `initialize(bytes)`. + #[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] + pub struct Initializer { + /// ABI-encoded params of type `bytes` to pass to the initializer + pub params: Vec, + /// The initializer is allowed to consume this much gas at most. + pub maximum_required_gas: u64, + } + + /// A Sub-command executable within an agent + #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] + #[cfg_attr(feature = "std", derive(PartialEq))] + pub enum AgentExecuteCommand { + /// Transfer ERC20 tokens + TransferToken { + /// Address of the ERC20 token + token: H160, + /// The recipient of the tokens + recipient: H160, + /// The amount of tokens to transfer + amount: u128, + }, + } + + impl AgentExecuteCommand { + fn index(&self) -> u8 { + match self { + AgentExecuteCommand::TransferToken { .. } => 0, + } + } + + /// ABI-encode the sub-command + pub fn abi_encode(&self) -> Vec { + match self { + AgentExecuteCommand::TransferToken { token, recipient, amount } => + ethabi::encode(&[ + Token::Uint(self.index().into()), + Token::Bytes(ethabi::encode(&[ + Token::Address(*token), + Token::Address(*recipient), + Token::Uint(U256::from(*amount)), + ])), + ]), + } + } + } + + /// Message which is awaiting processing in the MessageQueue pallet + #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] + #[cfg_attr(feature = "std", derive(PartialEq))] + pub struct QueuedMessage { + /// Message ID + pub id: H256, + /// Channel ID + pub channel_id: ChannelId, + /// Command to execute in the Gateway contract + pub command: Command, + } +} + +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +/// Fee for delivering message +pub struct Fee +where + Balance: BaseArithmetic + Unsigned + Copy, +{ + /// Fee to cover cost of processing the message locally + pub local: Balance, + /// Fee to cover cost processing the message remotely + pub remote: Balance, +} + +impl Fee +where + Balance: BaseArithmetic + Unsigned + Copy, +{ + pub fn total(&self) -> Balance { + self.local.saturating_add(self.remote) + } +} + +impl From<(Balance, Balance)> for Fee +where + Balance: BaseArithmetic + Unsigned + Copy, +{ + fn from((local, remote): (Balance, Balance)) -> Self { + Self { local, remote } + } +} + +/// A trait for sending messages to Ethereum +pub trait SendMessage: SendMessageFeeProvider { + type Ticket: Clone + Encode + Decode; + + /// Validate an outbound message and return a tuple: + /// 1. Ticket for submitting the message + /// 2. Delivery fee + fn validate( + message: &Message, + ) -> Result<(Self::Ticket, Fee<::Balance>), SendError>; + + /// Submit the message ticket for eventual delivery to Ethereum + fn deliver(ticket: Self::Ticket) -> Result; +} + +pub trait Ticket: Encode + Decode + Clone { + fn message_id(&self) -> H256; +} + +/// A trait for getting the local costs associated with sending a message. +pub trait SendMessageFeeProvider { + type Balance: BaseArithmetic + Unsigned + Copy; + + /// The local component of the message processing fees in native currency + fn local_fee() -> Self::Balance; +} + +/// Reasons why sending to Ethereum could not be initiated +#[derive(Copy, Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, PalletError, TypeInfo)] +pub enum SendError { + /// Message is too large to be safely executed on Ethereum + MessageTooLarge, + /// The bridge has been halted for maintenance + Halted, + /// Invalid Channel + InvalidChannel, +} + +pub trait GasMeter { + /// All the gas used for submitting a message to Ethereum, minus the cost of dispatching + /// the command within the message + const MAXIMUM_BASE_GAS: u64; + + fn maximum_gas_used_at_most(command: &Command) -> u64 { + Self::MAXIMUM_BASE_GAS + Self::maximum_dispatch_gas_used_at_most(command) + } + + /// Measures the maximum amount of gas a command payload will require to dispatch, AFTER + /// validation & verification. + fn maximum_dispatch_gas_used_at_most(command: &Command) -> u64; +} + +/// A meter that assigns a constant amount of gas for the execution of a command +/// +/// The gas figures are extracted from this report: +/// > forge test --match-path test/Gateway.t.sol --gas-report +/// +/// A healthy buffer is added on top of these figures to account for: +/// * The EIP-150 63/64 rule +/// * Future EVM upgrades that may increase gas cost +pub struct ConstantGasMeter; + +impl GasMeter for ConstantGasMeter { + // The base transaction cost, which includes: + // 21_000 transaction cost, roughly worst case 64_000 for calldata, and 100_000 + // for message verification + const MAXIMUM_BASE_GAS: u64 = 185_000; + + fn maximum_dispatch_gas_used_at_most(command: &Command) -> u64 { + match command { + Command::CreateAgent { .. } => 275_000, + Command::CreateChannel { .. } => 100_000, + Command::UpdateChannel { .. } => 50_000, + Command::TransferNativeFromAgent { .. } => 60_000, + Command::SetOperatingMode { .. } => 40_000, + Command::AgentExecute { command, .. } => match command { + // Execute IERC20.transferFrom + // + // Worst-case assumptions are important: + // * No gas refund for clearing storage slot of source account in ERC20 contract + // * Assume dest account in ERC20 contract does not yet have a storage slot + // * ERC20.transferFrom possibly does other business logic besides updating balances + AgentExecuteCommand::TransferToken { .. } => 100_000, + }, + Command::Upgrade { initializer, .. } => { + let initializer_max_gas = match *initializer { + Some(Initializer { maximum_required_gas, .. }) => maximum_required_gas, + None => 0, + }; + // total maximum gas must also include the gas used for updating the proxy before + // the the initializer is called. + 50_000 + initializer_max_gas + }, + Command::SetTokenTransferFees { .. } => 60_000, + Command::SetPricingParameters { .. } => 60_000, + } + } +} + +impl GasMeter for () { + const MAXIMUM_BASE_GAS: u64 = 1; + + fn maximum_dispatch_gas_used_at_most(_: &Command) -> u64 { + 1 + } +} + +pub const ETHER_DECIMALS: u8 = 18; diff --git a/bridges/snowbridge/parachain/primitives/core/src/pricing.rs b/bridges/snowbridge/parachain/primitives/core/src/pricing.rs new file mode 100644 index 0000000000000000000000000000000000000000..33aeda6d15c4701ce4594b1e783d7aa69f84cc8e --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/core/src/pricing.rs @@ -0,0 +1,67 @@ +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +use sp_arithmetic::traits::{BaseArithmetic, Unsigned, Zero}; +use sp_core::U256; +use sp_runtime::{FixedU128, RuntimeDebug}; +use sp_std::prelude::*; + +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct PricingParameters { + /// ETH/DOT exchange rate + pub exchange_rate: FixedU128, + /// Relayer rewards + pub rewards: Rewards, + /// Ether (wei) fee per gas unit + pub fee_per_gas: U256, +} + +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct Rewards { + /// Local reward in DOT + pub local: Balance, + /// Remote reward in ETH (wei) + pub remote: U256, +} + +#[derive(RuntimeDebug)] +pub struct InvalidPricingParameters; + +impl PricingParameters +where + Balance: BaseArithmetic + Unsigned + Copy, +{ + pub fn validate(&self) -> Result<(), InvalidPricingParameters> { + if self.exchange_rate == FixedU128::zero() { + return Err(InvalidPricingParameters) + } + if self.fee_per_gas == U256::zero() { + return Err(InvalidPricingParameters) + } + if self.rewards.local.is_zero() { + return Err(InvalidPricingParameters) + } + if self.rewards.remote.is_zero() { + return Err(InvalidPricingParameters) + } + Ok(()) + } +} + +/// Holder for fixed point number implemented in +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(PartialEq))] +pub struct UD60x18(U256); + +impl From for UD60x18 { + fn from(value: FixedU128) -> Self { + // Both FixedU128 and UD60x18 have 18 decimal places + let inner: u128 = value.into_inner(); + UD60x18(inner.into()) + } +} + +impl UD60x18 { + pub fn into_inner(self) -> U256 { + self.0 + } +} diff --git a/bridges/snowbridge/parachain/primitives/core/src/ringbuffer.rs b/bridges/snowbridge/parachain/primitives/core/src/ringbuffer.rs new file mode 100644 index 0000000000000000000000000000000000000000..dcee20359a78ebc27fd5ce5c1479f8241d8583e0 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/core/src/ringbuffer.rs @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use codec::FullCodec; +use core::{cmp::Ord, marker::PhantomData, ops::Add}; +use frame_support::storage::{types::QueryKindTrait, StorageMap, StorageValue}; +use sp_core::{Get, GetDefault}; +use sp_runtime::traits::{One, Zero}; + +/// Trait object presenting the ringbuffer interface. +pub trait RingBufferMap +where + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, +{ + /// Insert a map entry. + fn insert(k: Key, v: Value); + + /// Check if map contains a key + fn contains_key(k: Key) -> bool; + + /// Get the value of the key + fn get(k: Key) -> QueryKind::Query; +} + +pub struct RingBufferMapImpl( + PhantomData<(Index, B, CurrentIndex, Intermediate, M, QueryKind)>, +); + +/// Ringbuffer implementation based on `RingBufferTransient` +impl + RingBufferMap + for RingBufferMapImpl +where + Key: FullCodec + Clone, + Value: FullCodec, + Index: Ord + One + Zero + Add + Copy + FullCodec + Eq, + B: Get, + CurrentIndex: StorageValue, + Intermediate: StorageMap, + M: StorageMap, + QueryKind: QueryKindTrait, +{ + /// Insert a map entry. + fn insert(k: Key, v: Value) { + let bound = B::get(); + let mut current_index = CurrentIndex::get(); + + // Adding one here as bound denotes number of items but our index starts with zero. + if (current_index + Index::one()) >= bound { + current_index = Index::zero(); + } else { + current_index = current_index + Index::one(); + } + + // Deleting earlier entry if it exists + if Intermediate::contains_key(current_index) { + let older_key = Intermediate::get(current_index); + M::remove(older_key); + } + + Intermediate::insert(current_index, k.clone()); + CurrentIndex::set(current_index); + M::insert(k, v); + } + + /// Check if map contains a key + fn contains_key(k: Key) -> bool { + M::contains_key(k) + } + + /// Get the value associated with key + fn get(k: Key) -> M::Query { + M::get(k) + } +} diff --git a/bridges/snowbridge/parachain/primitives/core/src/tests.rs b/bridges/snowbridge/parachain/primitives/core/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..725fff1a9c941ae2e270d591aabd80fc8fa95b54 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/core/src/tests.rs @@ -0,0 +1,13 @@ +use crate::{ChannelId, ParaId}; +use hex_literal::hex; + +const EXPECT_CHANNEL_ID: [u8; 32] = + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539"); + +// The Solidity equivalent code is tested in Gateway.t.sol:testDeriveChannelID +#[test] +fn generate_channel_id() { + let para_id: ParaId = 1000.into(); + let channel_id: ChannelId = para_id.into(); + assert_eq!(channel_id, EXPECT_CHANNEL_ID.into()); +} diff --git a/bridges/snowbridge/parachain/primitives/core/tests/fixtures/packet.scale b/bridges/snowbridge/parachain/primitives/core/tests/fixtures/packet.scale new file mode 100644 index 0000000000000000000000000000000000000000..d5f6696ea69fffd243e7b5b8eb5cef9a7943802c Binary files /dev/null and b/bridges/snowbridge/parachain/primitives/core/tests/fixtures/packet.scale differ diff --git a/bridges/snowbridge/parachain/primitives/core/tests/mod.rs b/bridges/snowbridge/parachain/primitives/core/tests/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..2da5d2df182e9411c48427ef09048b83e18aca55 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/core/tests/mod.rs @@ -0,0 +1,14 @@ +#[cfg(test)] +mod tests { + use frame_support::traits::Contains; + use snowbridge_core::AllowSiblingsOnly; + use xcm::prelude::{Junction::Parachain, Junctions::X1, MultiLocation}; + + #[test] + fn allow_siblings_predicate_only_allows_siblings() { + let sibling = MultiLocation::new(1, X1(Parachain(1000))); + let child = MultiLocation::new(0, X1(Parachain(1000))); + assert!(AllowSiblingsOnly::contains(&sibling), "Sibling returns true."); + assert!(!AllowSiblingsOnly::contains(&child), "Child returns false."); + } +} diff --git a/bridges/snowbridge/parachain/primitives/ethereum/.cargo/config.toml b/bridges/snowbridge/parachain/primitives/ethereum/.cargo/config.toml new file mode 100644 index 0000000000000000000000000000000000000000..4ec2f3b8620332641758c95f2c1c685e261cba42 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/ethereum/.cargo/config.toml @@ -0,0 +1,2 @@ +[target.wasm32-unknown-unknown] +runner = 'wasm-bindgen-test-runner' diff --git a/bridges/snowbridge/parachain/primitives/ethereum/Cargo.toml b/bridges/snowbridge/parachain/primitives/ethereum/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..1eff2632b099836614dee1d4c8976599162af967 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/ethereum/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "snowbridge-ethereum" +description = "Snowbridge Ethereum" +version = "0.1.0" +authors = ["Snowfork "] +edition = "2021" +license = "Apache-2.0" + +[dependencies] +serde = { version = "1.0.188", optional = true, features = ["derive"] } +serde-big-array = { version = "0.3.2", optional = true, features = ["const-generics"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +ethbloom = { version = "0.13.0", default-features = false } +ethereum-types = { version = "0.14.1", default-features = false, features = ["codec", "rlp", "serialize"] } +hex = { package = "rustc-hex", version = "2.1.0", default-features = false } +hex-literal = { version = "0.4.1", default-features = false } +parity-bytes = { version = "0.1.2", default-features = false } +rlp = { version = "0.5.2", default-features = false } + +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } + +ethabi = { git = "https://github.com/snowfork/ethabi-decode.git", package = "ethabi-decode", branch = "master", default-features = false } + +[dev-dependencies] +wasm-bindgen-test = "0.3.19" +rand = "0.8.5" +serde_json = "1.0.96" + +[features] +default = ["std"] +expensive_tests = [] +std = [ + "codec/std", + "ethabi/std", + "ethbloom/std", + "ethereum-types/std", + "hex/std", + "parity-bytes/std", + "rlp/std", + "scale-info/std", + "serde", + "serde-big-array", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/bridges/snowbridge/parachain/primitives/ethereum/src/header.rs b/bridges/snowbridge/parachain/primitives/ethereum/src/header.rs new file mode 100644 index 0000000000000000000000000000000000000000..f0b51f8c79de8fa3f1b37205c38d8a8640771f0c --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/ethereum/src/header.rs @@ -0,0 +1,414 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use codec::{Decode, Encode}; +use ethbloom::Bloom as EthBloom; +use hex_literal::hex; +use parity_bytes::Bytes; +use rlp::RlpStream; +use scale_info::TypeInfo; +use sp_io::hashing::keccak_256; +use sp_runtime::RuntimeDebug; +use sp_std::{convert::TryInto, prelude::*}; + +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "std")] +use serde_big_array::BigArray; + +use ethereum_types::{Address, H256, H64, U256}; + +use crate::{mpt, receipt}; + +/// Complete block header id. +#[derive(Clone, Copy, Default, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct HeaderId { + /// Header number. + pub number: u64, + /// Header hash. + pub hash: H256, +} + +const EMPTY_OMMERS_HASH: [u8; 32] = + hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"); + +/// An Ethereum block header. +#[derive(Clone, Default, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub struct Header { + /// Parent block hash. + pub parent_hash: H256, + /// Block timestamp. + pub timestamp: u64, + /// Block number. + pub number: u64, + /// Block author. + pub author: Address, + + /// Transactions root. + pub transactions_root: H256, + /// Block ommers hash. + pub ommers_hash: H256, + /// Block extra data. + pub extra_data: Bytes, + + /// State root. + pub state_root: H256, + /// Block receipts root. + pub receipts_root: H256, + /// Block bloom. + pub logs_bloom: Bloom, + /// Gas used for contracts execution. + pub gas_used: U256, + /// Block gas limit. + pub gas_limit: U256, + + /// Block difficulty. + pub difficulty: U256, + /// Vector of post-RLP-encoded fields. + pub seal: Vec, + + // Base fee per gas (EIP-1559), only in headers from the London hardfork onwards. + pub base_fee: Option, +} + +impl Header { + /// Compute hash of this header (keccak of the RLP with seal). + pub fn compute_hash(&self) -> H256 { + keccak_256(&self.rlp(true)).into() + } + + /// Compute hash of the truncated header i.e. excluding seal. + pub fn compute_partial_hash(&self) -> H256 { + keccak_256(&self.rlp(false)).into() + } + + pub fn check_receipt_proof( + &self, + proof: &[Vec], + ) -> Option> { + match self.apply_merkle_proof(proof) { + Some((root, data)) if root == self.receipts_root => Some(rlp::decode(&data)), + Some((_, _)) => None, + None => None, + } + } + + pub fn apply_merkle_proof(&self, proof: &[Vec]) -> Option<(H256, Vec)> { + let mut iter = proof.iter().rev(); + let first_bytes = match iter.next() { + Some(b) => b, + None => return None, + }; + let item_to_prove: mpt::ShortNode = rlp::decode(first_bytes).ok()?; + + let final_hash: Option<[u8; 32]> = iter.try_fold(keccak_256(first_bytes), |acc, x| { + let node: Box = x.as_slice().try_into().ok()?; + if (*node).contains_hash(acc.into()) { + return Some(keccak_256(x)) + } + None + }); + + final_hash.map(|hash| (hash.into(), item_to_prove.value)) + } + + pub fn mix_hash(&self) -> Option { + let bytes: Bytes = self.decoded_seal_field(0, 32)?; + let size = bytes.len(); + let mut mix_hash = [0u8; 32]; + for i in 0..size { + mix_hash[31 - i] = bytes[size - 1 - i]; + } + Some(mix_hash.into()) + } + + pub fn nonce(&self) -> Option { + let bytes: Bytes = self.decoded_seal_field(1, 8)?; + let size = bytes.len(); + let mut nonce = [0u8; 8]; + for i in 0..size { + nonce[7 - i] = bytes[size - 1 - i]; + } + Some(nonce.into()) + } + + pub fn has_ommers(&self) -> bool { + self.ommers_hash != EMPTY_OMMERS_HASH.into() + } + + fn decoded_seal_field(&self, index: usize, max_len: usize) -> Option { + let bytes: Bytes = rlp::decode(self.seal.get(index)?).ok()?; + if bytes.len() > max_len { + return None + } + Some(bytes) + } + + /// Returns header RLP with or without seals. + /// For EIP-1559 baseFee addition refer to: + /// + fn rlp(&self, with_seal: bool) -> Bytes { + let mut s = RlpStream::new(); + + let stream_length_without_seal = if self.base_fee.is_some() { 14 } else { 13 }; + + if with_seal { + s.begin_list(stream_length_without_seal + self.seal.len()); + } else { + s.begin_list(stream_length_without_seal); + } + + s.append(&self.parent_hash); + s.append(&self.ommers_hash); + s.append(&self.author); + s.append(&self.state_root); + s.append(&self.transactions_root); + s.append(&self.receipts_root); + s.append(&EthBloom::from(self.logs_bloom.0)); + s.append(&self.difficulty); + s.append(&self.number); + s.append(&self.gas_limit); + s.append(&self.gas_used); + s.append(&self.timestamp); + s.append(&self.extra_data); + + if with_seal { + for b in &self.seal { + s.append_raw(b, 1); + } + } + + if let Some(base_fee) = self.base_fee { + s.append(&base_fee); + } + + s.out().to_vec() + } +} + +/// Logs bloom. +#[derive(Clone, Debug, Encode, Decode, TypeInfo)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub struct Bloom(#[cfg_attr(feature = "std", serde(with = "BigArray"))] [u8; 256]); + +impl<'a> From<&'a [u8; 256]> for Bloom { + fn from(buffer: &'a [u8; 256]) -> Bloom { + Bloom(*buffer) + } +} + +impl PartialEq for Bloom { + fn eq(&self, other: &Bloom) -> bool { + self.0.iter().zip(other.0.iter()).all(|(l, r)| l == r) + } +} + +impl Default for Bloom { + fn default() -> Self { + Bloom([0; 256]) + } +} + +impl rlp::Decodable for Bloom { + fn decode(rlp: &rlp::Rlp) -> Result { + let v: Vec = rlp.as_val()?; + match v.len() { + 256 => { + let mut bytes = [0u8; 256]; + bytes.copy_from_slice(&v); + Ok(Self(bytes)) + }, + _ => Err(rlp::DecoderError::Custom("Expected 256 bytes")), + } + } +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn bloom_decode_rlp() { + let raw_bloom = hex!( + " + b901000420000000000000000000008002000000000001000000000001000000000000000000 + 0000000000000000000000000002000000080000000000000000200000000000000000000000 + 0000080000002200000000004000100000000000000000000000000000000000000000000000 + 0000000000000004000000001000010000000000080000000000400000000000000000000000 + 0000080000004000000000020000000000020000000000000000000000000000000000000000 + 0000040000000000020000000001000000000000000000000000000010000000020000200000 + 10200000000000010000000000000000000000000000000000000010000000 + " + ); + let expected_bytes = &raw_bloom[3..]; + let bloom: Bloom = rlp::decode(&raw_bloom).unwrap(); + assert_eq!(bloom.0, expected_bytes); + } + + #[test] + fn header_compute_hash_poa() { + // PoA header + let header = Header { + parent_hash: Default::default(), + timestamp: 0, + number: 0, + author: Default::default(), + transactions_root: hex!( + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + ) + .into(), + ommers_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") + .into(), + extra_data: vec![], + state_root: hex!("eccf6b74c2bcbe115c71116a23fe963c54406010c244d9650526028ad3e32cce") + .into(), + receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + .into(), + logs_bloom: Default::default(), + gas_used: Default::default(), + gas_limit: 0x222222.into(), + difficulty: 0x20000.into(), + seal: vec![vec![0x80], { + let mut vec = vec![0xb8, 0x41]; + vec.resize(67, 0); + vec + }], + base_fee: None, + }; + assert_eq!( + header.compute_hash().as_bytes(), + hex!("9ff57c7fa155853586382022f0982b71c51fa313a0942f8c456300896643e890"), + ); + } + + #[test] + fn header_compute_hash_pow() { + // + let nonce = hex!("6935bbe7b63c4f8e").to_vec(); + let mix_hash = + hex!("be3adfb0087be62b28b716e2cdf3c79329df5caa04c9eee035d35b5d52102815").to_vec(); + let header = Header { + parent_hash: hex!("bede0bddd6f32c895fc505ffe0c39d9bde58e9a5272f31a3dee448b796edcbe3") + .into(), + timestamp: 1603160977, + number: 11090290, + author: hex!("ea674fdde714fd979de3edf0f56aa9716b898ec8").into(), + transactions_root: hex!( + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + ) + .into(), + ommers_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") + .into(), + extra_data: hex!("65746865726d696e652d61736961312d33").to_vec(), + state_root: hex!("7dcb8aca872b712bad81df34a89d4efedc293566ffc3eeeb5cbcafcc703e42c9") + .into(), + receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + .into(), + logs_bloom: Default::default(), + gas_used: 0.into(), + gas_limit: 0xbe8c19.into(), + difficulty: 0xbc140caa61087i64.into(), + seal: vec![rlp::encode(&mix_hash).to_vec(), rlp::encode(&nonce).to_vec()], + base_fee: None, + }; + assert_eq!( + header.compute_hash().as_bytes(), + hex!("0f9bdc91c2e0140acb873330742bda8c8181fa3add91fe7ae046251679cedef7"), + ); + } + + #[test] + fn header_pow_seal_fields_extracted_correctly() { + let nonce: H64 = hex!("6935bbe7b63c4f8e").into(); + let mix_hash: H256 = + hex!("be3adfb0087be62b28b716e2cdf3c79329df5caa04c9eee035d35b5d52102815").into(); + let header = Header { + seal: vec![ + rlp::encode(&mix_hash.0.to_vec()).to_vec(), + rlp::encode(&nonce.0.to_vec()).to_vec(), + ], + ..Default::default() + }; + + assert_eq!(header.nonce().unwrap(), nonce); + assert_eq!(header.mix_hash().unwrap(), mix_hash); + } + + #[test] + fn header_pow_seal_fields_return_none_for_invalid_values() { + let nonce = hex!("696935bbe7b63c4f8e").to_vec(); + let mix_hash = + hex!("bebe3adfb0087be62b28b716e2cdf3c79329df5caa04c9eee035d35b5d52102815").to_vec(); + let mut header = Header { + seal: vec![rlp::encode(&mix_hash).to_vec(), rlp::encode(&nonce).to_vec()], + ..Default::default() + }; + assert_eq!(header.nonce(), None); + assert_eq!(header.mix_hash(), None); + + header.seal = Vec::new(); + assert_eq!(header.nonce(), None); + assert_eq!(header.mix_hash(), None); + } + + #[test] + fn header_check_receipt_proof() { + let header = Header { + receipts_root: hex!("fd5e397a84884641f53c496804f24b5276cbb8c5c9cfc2342246be8e3ce5ad02") + .into(), + ..Default::default() + }; + + // Valid proof + let proof_receipt5 = vec!( + hex!("f90131a0b5ba404eb5a6a88e56579f4d37ef9813b5ad7f86f0823ff3b407ac5a6bb465eca0398ead2655e78e03c127ce22c5830e90f18b1601ec055f938336c084feb915a9a026d322c26e46c50942c1aabde50e36df5cde572aed650ce73ea3182c6e90a02ca00600a356135f4db1db0d9842264cdff2652676f881669e91e316c0b6dd783011a0837f1deb4075336da320388c1edfffc56c448a43f4a5ba031300d32a7b509fc5a01c3ac82fd65b4aba7f9afaf604d9c82ec7e2deb573a091ae235751bc5c0c288da05d454159d9071b0f68b6e0503d290f23ac7602c1db0c569dee4605d8f5298f09a00bbed10350ec954448df795f6fd46e3faefc800ede061b3840eedc6e2b07a74da0acb02d26a3650f2064c14a435fdf1f668d8655daf455ebdf671713a7c089b3898080808080808080").to_vec(), + hex!("f901f180a00046a08d4f0bdbdc6b31903086ce323182bce6725e7d9415f7ff91ee8f4820bda0e7cd26ad5f3d2771e4b5ab788e268a14a10209f94ee918eb6c829d21d3d11c1da00d4a56d9e9a6751874fd86c7e3cb1c6ad5a848da62751325f478978a00ea966ea064b81920c8f04a8a1e21f53a8280e739fbb7b00b2ab92493ca3f610b70e8ac85a0b1040ed4c55a73178b76abb16f946ce5bebd6b93ab873c83327df54047d12c27a0de6485e9ac58dc6e2b04b4bb38f562684f0b1a2ee586cc11079e7d9a9dc40b32a0d394f4d3532c3124a65fa36e69147e04fd20453a72ee9c50660f17e13ce9df48a066501003fc3e3478efd2803cd0eded6bbe9243ca01ba754d6327071ddbcbc649a0b2684e518f325fee39fc8ea81b68f3f5c785be00d087f3bed8857ae2ee8da26ea071060a5c52042e8d7ce21092f8ecf06053beb9a0b773a6f91a30c4220aa276b2a0fc22436632574ccf6043d0986dede27ea94c9ca9a3bb5ec03ce776a4ddef24a9a05a8a1d6698c4e7d8cc3a2506cb9b12ea9a079c9c7099bc919dc804033cc556e4a0170c468b0716fd36d161f0bf05875f15756a2976de92f9efe7716320509d79c9a0182f909a90cab169f3efb62387f9cccdd61440acc4deec42f68a4f7ca58075c7a055cf0e9202ac75689b76318f1171f3a44465eddc06aae0713bfb6b34fdd27b7980").to_vec(), + hex!("f904de20b904daf904d701830652f0b9010004200000000000000000000080020000000000010000000000010000000000000000000000000000000000000000000002000000080000000000000000200000000000000000000000000008000000220000000000400010000000000000000000000000000000000000000000000000000000000000040000000010000100000000000800000000004000000000000000000000000000080000004000000000020000000000020000000000000000000000000000000000000000000004000000000002000000000100000000000000000000000000001000000002000020000010200000000000010000000000000000000000000000000000000010000000f903ccf89b9421130f34829b4c343142047a28ce96ec07814b15f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000007d843005c7433c16b27ff939cb37471541561ebda0000000000000000000000000e9c1281aae66801fa35ec404d5f2aea393ff6988a000000000000000000000000000000000000000000000000000000005d09b7380f89b9421130f34829b4c343142047a28ce96ec07814b15f863a08c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a00000000000000000000000007d843005c7433c16b27ff939cb37471541561ebda00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da0ffffffffffffffffffffffffffffffffffffffffffffffffffffffcc840c6920f89b94c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa0000000000000000000000000e9c1281aae66801fa35ec404d5f2aea393ff6988a00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da000000000000000000000000000000000000000000000000003e973b5a5d1078ef87994e9c1281aae66801fa35ec404d5f2aea393ff6988e1a01c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1b840000000000000000000000000000000000000000000000000000001f1420ad1d40000000000000000000000000000000000000000000000014ad400879d159a38f8fc94e9c1281aae66801fa35ec404d5f2aea393ff6988f863a0d78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822a00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488db88000000000000000000000000000000000000000000000000000000005d415f3320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e973b5a5d1078ef87a94c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f842a07fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65a00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da000000000000000000000000000000000000000000000000003e973b5a5d1078e").to_vec(), + ); + assert!(header.check_receipt_proof(&proof_receipt5).is_some()); + + // Various invalid proofs + let proof_empty: Vec> = vec![]; + let proof_missing_full_node = vec![proof_receipt5[0].clone(), proof_receipt5[2].clone()]; + let proof_missing_short_node1 = vec![proof_receipt5[0].clone(), proof_receipt5[1].clone()]; + let proof_missing_short_node2 = vec![proof_receipt5[0].clone()]; + let proof_invalid_encoding = vec![proof_receipt5[2][2..].to_vec()]; + let proof_no_full_node = vec![proof_receipt5[2].clone(), proof_receipt5[2].clone()]; + assert!(header.check_receipt_proof(&proof_empty).is_none()); + assert!(header.check_receipt_proof(&proof_missing_full_node).is_none()); + + assert_eq!( + header.check_receipt_proof(&proof_missing_short_node1), + Some(Err(rlp::DecoderError::Custom("Unsupported receipt type"))) + ); + + assert_eq!( + header.check_receipt_proof(&proof_missing_short_node2), + Some(Err(rlp::DecoderError::Custom("Unsupported receipt type"))) + ); + + assert!(header.check_receipt_proof(&proof_invalid_encoding).is_none()); + assert!(header.check_receipt_proof(&proof_no_full_node).is_none()); + } + + #[test] + fn header_check_receipt_proof_with_intermediate_short_node() { + let header = Header { + receipts_root: hex!("d128e3a57142d2bf15bc0cbcac7ad54f40750d571b5c3097e425882c10c9ba66") + .into(), + ..Default::default() + }; + + let proof_receipt263 = vec![ + hex!("f90131a00d3cb8d3f57ac1c0e12918a2ebe0cafed8c273577b9dd73e7ed1079b403ef494a0678b9835b834f8a287c0dd33a8fca9146e456ca688555ed4ec1361a2180b778da0fe42da181a46677a043b3d9d4b8bb05a6a17b7b5c010c17e7c1d31cfb7c4f911a0c89f0e2c53241cdb578e1f2b4caf6ba36e00500bdc57fecd66b84a6a58394c19a086c3c1fae5a0575940b5d38e111c469d07883106c26856f3ef608469a2081f13a06c5992ff00aab6226a70a032fd2f571ba22f797321f45e2daa73020d638d21b0a050861e9503ef68728f6c90a44f7fe1bceb2a9bdab6957bbe7136166bd849561ea006aa6eaca8a07e57176e9aa41e6a09edfb7678d1a112404e0ec779d7e567e82ea0bb0b430d303ba21b0af11c487b8a218bd75db54c98940b3f11bad8ff47cad3ef8080808080808080").to_vec(), + hex!("f871a0246de222036ee6a03329b0105da0a6b3f916fc95a9ed5a403a581a0c4d74242ca0ac108a49a88b57a05ac34a108b39f1e45f6f167f2b9fbc8d52fb58e2e5a6af1ea0fcfe07ac2ccd3c28b6eab68d1bce112f6f6dbd9023e4ec3c05b96615aa803d798080808080808080808080808080").to_vec(), + hex!("e4820001a04fff54398cad4d05ea6abfd8b0f3b4fe14c04d7ff5f5211c5b927d9cf72ac1d8").to_vec(), + hex!("f851a096d010643ca2d47412ca66898286b5f2412963b9ec051b33e570d575914c9c5ca028cd24c652989542fe89479ec6388eac4592432242af5ba97563b3ac7c71c019808080808080808080808080808080").to_vec(), + hex!("f90211a0bb35a84c5b1dcb78ec9d32614912c696e62df77bebf9ab326ee55b5d3acdde46a01084b30dac8df0accfcd0fd6330b7f6fc72a4651246d0694be9162151686a620a03eed50afdce7909d784c6157c445a444c806b5f23d31f3b63786f600c84a95b2a0af5232f1df6c6d41879804d081abe867002abe26ba3e5f8e0254a83a54769831a0607915fb13dd5da594256389a45007a67a7f7a86e95d38d8462792b6c98a722ea00e1260fda1730f2738c650ce2bfba83857bc10f8fb119ebc4fb39acba24e6fbaa0d11de17e417327457812675ca3b84ae8e1b64827abfe01420953697c8313d5b1a05fcaf2f7a88f76336a0c32ffc78acb87ae2005454bd25d658035331be3173b46a03f94f4952ab9e650f83cfd0e7f367b1bcc493aacf39a06f16c4a2e1b5605da48a0bdb4ec79785ca8ae22d60f1bbd42d707b4d7ec4aff231a3ebab755e315b35053a043a67c3f2bcef37c8f47a673adcb7061007a553696d1092408601c11b2e6846aa0c519d5af48cae87c7f4538845417c9735813bee892a6fe2dda79f5c414e8576aa0f7058256e09589501d7c231d739e61c84a850e139690989d24fda6058b432e98a081a52faab520978cb19ce14400dba0cd5bcdc4e5a3c0740678aa8f97ee0e5c56a0bcecc61cadeae52518e3b68a48af4b11603dfd9d99d99d7985efa6d2de44f904a02cba4accfc6f39bc5adb6d4440eb6358b4a5103ef93298e4e694f1f940f8b48280").to_vec(), + hex!("f901ae20b901aaf901a70183bb444eb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000001000000000000000000000000000100000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000010000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000080000000000000000000000000000000000000000000000002000000000000000000081000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000f89df89b94dac17f958d2ee523a2206206994597c13d831ec7f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000002e514404ff6823f1b46a8318a709251db414e5e1a000000000000000000000000055021c55847c00d764357a352e5803237d328954a0000000000000000000000000000000000000000000000000000000000201c370").to_vec(), + ]; + assert!(header.check_receipt_proof(&proof_receipt263).is_some()); + } +} diff --git a/bridges/snowbridge/parachain/primitives/ethereum/src/lib.rs b/bridges/snowbridge/parachain/primitives/ethereum/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a10ea9abb7723180321e546e9e177e32685d94b --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/ethereum/src/lib.rs @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod header; +pub mod log; +pub mod mpt; +pub mod receipt; + +pub use ethereum_types::{Address, H160, H256, H64, U256}; + +pub use header::{Bloom, Header, HeaderId}; +pub use log::Log; +pub use receipt::Receipt; + +#[derive(Debug)] +pub enum DecodeError { + // Unexpected RLP data + InvalidRLP(rlp::DecoderError), + // Data does not match expected ABI + InvalidABI(ethabi::Error), + // Invalid message payload + InvalidPayload, +} + +impl From for DecodeError { + fn from(err: rlp::DecoderError) -> Self { + DecodeError::InvalidRLP(err) + } +} + +impl From for DecodeError { + fn from(err: ethabi::Error) -> Self { + DecodeError::InvalidABI(err) + } +} diff --git a/bridges/snowbridge/parachain/primitives/ethereum/src/log.rs b/bridges/snowbridge/parachain/primitives/ethereum/src/log.rs new file mode 100644 index 0000000000000000000000000000000000000000..7b8e35bb1133ec105cf8eaf080aadb2f55b7e02b --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/ethereum/src/log.rs @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use codec::{Decode, Encode}; +use ethereum_types::{H160, H256}; +use sp_std::prelude::*; + +#[derive(Clone, Debug, Encode, Decode, PartialEq, Eq)] +pub struct Log { + pub address: H160, + pub topics: Vec, + pub data: Vec, +} + +impl rlp::Decodable for Log { + /// We need to implement rlp::Decodable manually as the derive macro RlpDecodable + /// didn't seem to generate the correct code for parsing our logs. + fn decode(rlp: &rlp::Rlp) -> Result { + let mut iter = rlp.iter(); + + let address: H160 = match iter.next() { + Some(data) => data.as_val()?, + None => return Err(rlp::DecoderError::Custom("Expected log address")), + }; + + let topics: Vec = match iter.next() { + Some(data) => data.as_list()?, + None => return Err(rlp::DecoderError::Custom("Expected log topics")), + }; + + let data: Vec = match iter.next() { + Some(data) => data.data()?.to_vec(), + None => return Err(rlp::DecoderError::Custom("Expected log data")), + }; + + Ok(Self { address, topics, data }) + } +} + +#[cfg(test)] +mod tests { + + use super::Log; + use hex_literal::hex; + + const RAW_LOG: [u8; 605] = hex!( + " + f9025a941cfd66659d44cfe2e627c5742ba7477a3284cffae1a0266413be5700ce8dd5ac6b9a7dfb + abe99b3e45cae9a68ac2757858710b401a38b9022000000000000000000000000000000000000000 + 00000000000000000000000060000000000000000000000000000000000000000000000000000000 + 00000000c00000000000000000000000000000000000000000000000000000000000000100000000 + 00000000000000000000000000000000000000000000000000000000283163466436363635394434 + 34636665324536323763353734324261373437376133323834634666410000000000000000000000 + 00000000000000000000000000000000000000000000000000000000000000000000000000000000 + 000000000773656e6445544800000000000000000000000000000000000000000000000000000000 + 00000000000000000000000000000000000000000000000000000001000000000000000000000000 + 00cffeaaf7681c89285d65cfbe808b80e50269657300000000000000000000000000000000000000 + 000000000000000000000000a0000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000a000000 + 00000000000000000000000000000000000000000000000000000000020000000000000000000000 + 00000000000000000000000000000000000000002f3146524d4d3850456957585961783772705336 + 5834585a5831614141785357783143724b5479725659685632346667000000000000000000000000 + 0000000000 + " + ); + + #[test] + fn decode_log() { + let log: Log = rlp::decode(&RAW_LOG).unwrap(); + assert_eq!(log.address.as_bytes(), hex!["1cfd66659d44cfe2e627c5742ba7477a3284cffa"]); + assert_eq!( + log.topics[0].as_bytes(), + hex!["266413be5700ce8dd5ac6b9a7dfbabe99b3e45cae9a68ac2757858710b401a38"] + ); + } +} diff --git a/bridges/snowbridge/parachain/primitives/ethereum/src/mpt.rs b/bridges/snowbridge/parachain/primitives/ethereum/src/mpt.rs new file mode 100644 index 0000000000000000000000000000000000000000..9a2dae486dcc05ee5c078e0794ee2d27193eb207 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/ethereum/src/mpt.rs @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Helper types to work with Ethereum's Merkle Patricia Trie nodes + +use ethereum_types::H256; +use sp_std::{convert::TryFrom, prelude::*}; + +pub trait Node { + fn contains_hash(&self, hash: H256) -> bool; +} + +impl TryFrom<&[u8]> for Box { + type Error = rlp::DecoderError; + + fn try_from(bytes: &[u8]) -> Result, Self::Error> { + let rlp = rlp::Rlp::new(bytes); + match rlp.item_count()? { + 2 => { + let node: ShortNode = rlp.as_val()?; + Ok(Box::new(node)) + }, + 17 => { + let node: FullNode = rlp.as_val()?; + Ok(Box::new(node)) + }, + _ => Err(rlp::DecoderError::Custom("Invalid number of list elements")), + } + } +} + +/// Intermediate trie node with children (refers to node with same name in Geth). +/// This struct only handles the proof representation, i.e. a child is either empty +/// or a 32-byte hash of its subtree. +pub struct FullNode { + pub children: Vec>, +} + +impl rlp::Decodable for FullNode { + fn decode(rlp: &rlp::Rlp) -> Result { + let children: Vec> = rlp + .iter() + .map(|item| { + let v: Vec = item.as_val()?; + match v.len() { + 0 => Ok(None), + 32 => { + let mut bytes = [0u8; 32]; + bytes.copy_from_slice(&v); + Ok(Some(bytes.into())) + }, + _ => Err(rlp::DecoderError::Custom("Expected 32-byte hash or empty child")), + } + }) + .collect::>()?; + + Ok(Self { children }) + } +} + +impl Node for FullNode { + fn contains_hash(&self, hash: H256) -> bool { + self.children.iter().any(|h| Some(hash) == *h) + } +} + +/// Trie node where `value` is either the RLP-encoded item we're +/// proving or an intermediate hash (refers to node with same name in Geth) +/// Proof verification should return `value`. `key` is an implementation +/// detail of the trie. +pub struct ShortNode { + pub key: Vec, + pub value: Vec, +} + +impl rlp::Decodable for ShortNode { + fn decode(rlp: &rlp::Rlp) -> Result { + let mut iter = rlp.iter(); + + let key: Vec = match iter.next() { + Some(data) => data.as_val()?, + None => return Err(rlp::DecoderError::Custom("Expected key bytes")), + }; + + let value: Vec = match iter.next() { + Some(data) => data.as_val()?, + None => return Err(rlp::DecoderError::Custom("Expected value bytes")), + }; + + Ok(Self { key, value }) + } +} + +impl Node for ShortNode { + fn contains_hash(&self, hash: H256) -> bool { + self.value == hash.0 + } +} + +#[cfg(test)] +mod tests { + + use super::*; + use hex_literal::hex; + + const RAW_PROOF: [&[u8]; 3] = [ + &hex!("f90131a0b5ba404eb5a6a88e56579f4d37ef9813b5ad7f86f0823ff3b407ac5a6bb465eca0398ead2655e78e03c127ce22c5830e90f18b1601ec055f938336c084feb915a9a026d322c26e46c50942c1aabde50e36df5cde572aed650ce73ea3182c6e90a02ca00600a356135f4db1db0d9842264cdff2652676f881669e91e316c0b6dd783011a0837f1deb4075336da320388c1edfffc56c448a43f4a5ba031300d32a7b509fc5a01c3ac82fd65b4aba7f9afaf604d9c82ec7e2deb573a091ae235751bc5c0c288da05d454159d9071b0f68b6e0503d290f23ac7602c1db0c569dee4605d8f5298f09a00bbed10350ec954448df795f6fd46e3faefc800ede061b3840eedc6e2b07a74da0acb02d26a3650f2064c14a435fdf1f668d8655daf455ebdf671713a7c089b3898080808080808080"), + &hex!("f901f180a00046a08d4f0bdbdc6b31903086ce323182bce6725e7d9415f7ff91ee8f4820bda0e7cd26ad5f3d2771e4b5ab788e268a14a10209f94ee918eb6c829d21d3d11c1da00d4a56d9e9a6751874fd86c7e3cb1c6ad5a848da62751325f478978a00ea966ea064b81920c8f04a8a1e21f53a8280e739fbb7b00b2ab92493ca3f610b70e8ac85a0b1040ed4c55a73178b76abb16f946ce5bebd6b93ab873c83327df54047d12c27a0de6485e9ac58dc6e2b04b4bb38f562684f0b1a2ee586cc11079e7d9a9dc40b32a0d394f4d3532c3124a65fa36e69147e04fd20453a72ee9c50660f17e13ce9df48a066501003fc3e3478efd2803cd0eded6bbe9243ca01ba754d6327071ddbcbc649a0b2684e518f325fee39fc8ea81b68f3f5c785be00d087f3bed8857ae2ee8da26ea071060a5c52042e8d7ce21092f8ecf06053beb9a0b773a6f91a30c4220aa276b2a0fc22436632574ccf6043d0986dede27ea94c9ca9a3bb5ec03ce776a4ddef24a9a05a8a1d6698c4e7d8cc3a2506cb9b12ea9a079c9c7099bc919dc804033cc556e4a0170c468b0716fd36d161f0bf05875f15756a2976de92f9efe7716320509d79c9a0182f909a90cab169f3efb62387f9cccdd61440acc4deec42f68a4f7ca58075c7a055cf0e9202ac75689b76318f1171f3a44465eddc06aae0713bfb6b34fdd27b7980"), + &hex!("f904de20b904daf904d701830652f0b9010004200000000000000000000080020000000000010000000000010000000000000000000000000000000000000000000002000000080000000000000000200000000000000000000000000008000000220000000000400010000000000000000000000000000000000000000000000000000000000000040000000010000100000000000800000000004000000000000000000000000000080000004000000000020000000000020000000000000000000000000000000000000000000004000000000002000000000100000000000000000000000000001000000002000020000010200000000000010000000000000000000000000000000000000010000000f903ccf89b9421130f34829b4c343142047a28ce96ec07814b15f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000007d843005c7433c16b27ff939cb37471541561ebda0000000000000000000000000e9c1281aae66801fa35ec404d5f2aea393ff6988a000000000000000000000000000000000000000000000000000000005d09b7380f89b9421130f34829b4c343142047a28ce96ec07814b15f863a08c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a00000000000000000000000007d843005c7433c16b27ff939cb37471541561ebda00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da0ffffffffffffffffffffffffffffffffffffffffffffffffffffffcc840c6920f89b94c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa0000000000000000000000000e9c1281aae66801fa35ec404d5f2aea393ff6988a00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da000000000000000000000000000000000000000000000000003e973b5a5d1078ef87994e9c1281aae66801fa35ec404d5f2aea393ff6988e1a01c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1b840000000000000000000000000000000000000000000000000000001f1420ad1d40000000000000000000000000000000000000000000000014ad400879d159a38f8fc94e9c1281aae66801fa35ec404d5f2aea393ff6988f863a0d78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822a00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488db88000000000000000000000000000000000000000000000000000000005d415f3320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e973b5a5d1078ef87a94c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f842a07fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65a00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da000000000000000000000000000000000000000000000000003e973b5a5d1078e"), + ]; + + #[test] + fn decode_full_node() { + let node1: FullNode = rlp::decode(RAW_PROOF[0]).unwrap(); + let node2: FullNode = rlp::decode(RAW_PROOF[1]).unwrap(); + assert_eq!(node1.children.len(), 17); + assert_eq!(node2.children.len(), 17); + assert_eq!(node1.children.iter().filter(|c| c.is_none()).count(), 8); + assert_eq!(node2.children.iter().filter(|c| c.is_none()).count(), 2); + + let result: Result = rlp::decode(RAW_PROOF[2]); + assert!(result.is_err()); + } + + #[test] + fn decode_short_node() { + // key + item value + let node: ShortNode = rlp::decode(RAW_PROOF[2]).unwrap(); + assert_eq!(node.key, vec![32]); + assert!(!node.value.is_empty()); + + // key + item hash + let node: ShortNode = rlp::decode(&hex!( + "e4820001a04fff54398cad4d05ea6abfd8b0f3b4fe14c04d7ff5f5211c5b927d9cf72ac1d8" + )) + .unwrap(); + assert_eq!(node.key, vec![0, 1]); + assert_eq!( + node.value, + hex!("4fff54398cad4d05ea6abfd8b0f3b4fe14c04d7ff5f5211c5b927d9cf72ac1d8").to_vec() + ); + } +} diff --git a/bridges/snowbridge/parachain/primitives/ethereum/src/receipt.rs b/bridges/snowbridge/parachain/primitives/ethereum/src/receipt.rs new file mode 100644 index 0000000000000000000000000000000000000000..665a93dbb1e213c0752cf4b64dfea5469a7513bd --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/ethereum/src/receipt.rs @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate::{Bloom, Log}; +use codec::{Decode, Encode}; +use sp_runtime::RuntimeDebug; +use sp_std::prelude::*; + +#[derive(Clone, Default, Encode, Decode, PartialEq, RuntimeDebug)] +pub struct Receipt { + pub post_state_or_status: Vec, + pub cumulative_gas_used: u64, + pub bloom: Bloom, + pub logs: Vec, +} + +impl Receipt { + pub fn contains_log(&self, log: &Log) -> bool { + self.logs.iter().any(|l| l == log) + } + + fn decode_list(rlp: &rlp::Rlp) -> Result { + let mut iter = rlp.iter(); + + let post_state_or_status: Vec = match iter.next() { + Some(data) => data.as_val()?, + None => return Err(rlp::DecoderError::Custom("Expected receipt post state or status")), + }; + + let cumulative_gas_used: u64 = match iter.next() { + Some(data) => data.as_val()?, + None => return Err(rlp::DecoderError::Custom("Expected receipt cumulative gas used")), + }; + + let bloom: Bloom = match iter.next() { + Some(data) => data.as_val()?, + None => return Err(rlp::DecoderError::Custom("Expected receipt bloom")), + }; + + let logs: Vec = match iter.next() { + Some(data) => data.as_list()?, + None => return Err(rlp::DecoderError::Custom("Expected receipt logs")), + }; + + Ok(Self { post_state_or_status, cumulative_gas_used, bloom, logs }) + } +} + +impl rlp::Decodable for Receipt { + fn decode(rlp: &rlp::Rlp) -> Result { + if rlp.is_data() { + // Typed receipt + let data = rlp.as_raw(); + match data[0] { + // 1 = EIP-2930, 2 = EIP-1559 + 1 | 2 => { + let receipt_rlp = &rlp::Rlp::new(&data[1..]); + if !receipt_rlp.is_list() { + return Err(rlp::DecoderError::RlpExpectedToBeList) + } + Self::decode_list(&rlp::Rlp::new(&data[1..])) + }, + _ => Err(rlp::DecoderError::Custom("Unsupported receipt type")), + } + } else if rlp.is_list() { + // Legacy receipt + Self::decode_list(rlp) + } else { + Err(rlp::DecoderError::RlpExpectedToBeList) + } + } +} + +#[cfg(test)] +mod tests { + + use super::Receipt; + use hex_literal::hex; + + const RAW_RECEIPT: [u8; 1242] = hex!( + " + f904d701830652f0b901000420000000000000000000008002000000000001000000000001000000 + 00000000000000000000000000000000000000020000000800000000000000002000000000000000 + 00000000000008000000220000000000400010000000000000000000000000000000000000000000 + 00000000000000000004000000001000010000000000080000000000400000000000000000000000 + 00000800000040000000000200000000000200000000000000000000000000000000000000000000 + 04000000000002000000000100000000000000000000000000001000000002000020000010200000 + 000000010000000000000000000000000000000000000010000000f903ccf89b9421130f34829b4c + 343142047a28ce96ec07814b15f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a116 + 28f55a4df523b3efa00000000000000000000000007d843005c7433c16b27ff939cb37471541561e + bda0000000000000000000000000e9c1281aae66801fa35ec404d5f2aea393ff6988a00000000000 + 0000000000000000000000000000000000000000000005d09b7380f89b9421130f34829b4c343142 + 047a28ce96ec07814b15f863a08c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200a + c8c7c3b925a00000000000000000000000007d843005c7433c16b27ff939cb37471541561ebda000 + 00000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da0ffffffffffffffff + ffffffffffffffffffffffffffffffffffffffcc840c6920f89b94c02aaa39b223fe8d0a0e5c4f27 + ead9083c756cc2f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523 + b3efa0000000000000000000000000e9c1281aae66801fa35ec404d5f2aea393ff6988a000000000 + 00000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da00000000000000000000000 + 0000000000000000000000000003e973b5a5d1078ef87994e9c1281aae66801fa35ec404d5f2aea3 + 93ff6988e1a01c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1b840 + 000000000000000000000000000000000000000000000000000001f1420ad1d40000000000000000 + 000000000000000000000000000000014ad400879d159a38f8fc94e9c1281aae66801fa35ec404d5 + f2aea393ff6988f863a0d78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159 + d822a00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da000000000 + 00000000000000007a250d5630b4cf539739df2c5dacb4c659f2488db88000000000000000000000 + 000000000000000000000000000000000005d415f332000000000000000000000000000000000000 + 00000000000000000000000000000000000000000000000000000000000000000000000000000000 + 00000000000000000000000000000000000000000000000000000000000003e973b5a5d1078ef87a + 94c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f842a07fcf532c15f0a6db0bd6d0e038bea71d + 30d808c7d98cb3bf7268a95bf5081b65a00000000000000000000000007a250d5630b4cf539739df + 2c5dacb4c659f2488da000000000000000000000000000000000000000000000000003e973b5a5d1 + 078e + " + ); + + #[test] + fn decode_legacy_receipt() { + let receipt: Receipt = rlp::decode(&RAW_RECEIPT).unwrap(); + assert_eq!(receipt.post_state_or_status, vec!(1)); + assert_eq!(receipt.cumulative_gas_used, 414448); + assert_eq!( + receipt.bloom, + (&hex!( + " + 042000000000000000000000800200000000000100000000000100000000000000000000 + 000000000000000000000000020000000800000000000000002000000000000000000000 + 000000080000002200000000004000100000000000000000000000000000000000000000 + 000000000000000000000400000000100001000000000008000000000040000000000000 + 000000000000000800000040000000000200000000000200000000000000000000000000 + 000000000000000000040000000000020000000001000000000000000000000000000010 + 000000020000200000102000000000000100000000000000000000000000000000000000 + 10000000 + " + )) + .into(), + ); + assert_eq!(receipt.logs.len(), 6); + } +} diff --git a/bridges/snowbridge/parachain/primitives/router/Cargo.toml b/bridges/snowbridge/parachain/primitives/router/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..7badfebb6068ca1f3ff3feccf18c767a5a722c1c --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/router/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "snowbridge-router-primitives" +description = "Snowbridge Router Primitives" +version = "0.1.1" +authors = ["Snowfork "] +edition = "2021" +license = "Apache-2.0" + +[dependencies] +serde = { version = "1.0.188", optional = true, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +log = { version = "0.4.20", default-features = false } + +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } + +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } + +snowbridge-core = { path = "../../primitives/core", default-features = false } + +ethabi = { git = "https://github.com/Snowfork/ethabi-decode.git", package = "ethabi-decode", branch = "master", default-features = false } + +hex-literal = { version = "0.4.1" } + +[dev-dependencies] +hex = { package = "rustc-hex", version = "2.1.0" } + +[features] +default = ["std"] +std = [ + "codec/std", + "ethabi/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "serde", + "snowbridge-core/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "snowbridge-core/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] diff --git a/bridges/snowbridge/parachain/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/parachain/primitives/router/src/inbound/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..a07e0eae5d73d6c2205d5858ad75d631832ab1fe --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/router/src/inbound/mod.rs @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Converts messages from Ethereum to XCM messages + +#[cfg(test)] +mod tests; + +use codec::{Decode, Encode}; +use core::marker::PhantomData; +use frame_support::{traits::tokens::Balance as BalanceT, weights::Weight, PalletError}; +use scale_info::TypeInfo; +use sp_core::{Get, RuntimeDebug, H160}; +use sp_io::hashing::blake2_256; +use sp_runtime::MultiAddress; +use sp_std::prelude::*; +use xcm::prelude::{Junction::AccountKey20, *}; +use xcm_executor::traits::ConvertLocation; + +const MINIMUM_DEPOSIT: u128 = 1; + +/// Messages from Ethereum are versioned. This is because in future, +/// we may want to evolve the protocol so that the ethereum side sends XCM messages directly. +/// Instead having BridgeHub transcode the messages into XCM. +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub enum VersionedMessage { + V1(MessageV1), +} + +/// For V1, the ethereum side sends messages which are transcoded into XCM. These messages are +/// self-contained, in that they can be transcoded using only information in the message. +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub struct MessageV1 { + /// EIP-155 chain id of the origin Ethereum network + pub chain_id: u64, + /// The command originating from the Gateway contract + pub command: Command, +} + +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub enum Command { + /// Register a wrapped token on the AssetHub `ForeignAssets` pallet + RegisterToken { + /// The address of the ERC20 token to be bridged over to AssetHub + token: H160, + /// XCM execution fee on AssetHub + fee: u128, + }, + /// Send a token to AssetHub or another parachain + SendToken { + /// The address of the ERC20 token to be bridged over to AssetHub + token: H160, + /// The destination for the transfer + destination: Destination, + /// Amount to transfer + amount: u128, + /// XCM execution fee on AssetHub + fee: u128, + }, +} + +/// Destination for bridged tokens +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub enum Destination { + /// The funds will be deposited into account `id` on AssetHub + AccountId32 { id: [u8; 32] }, + /// The funds will deposited into the sovereign account of destination parachain `para_id` on + /// AssetHub, Account `id` on the destination parachain will receive the funds via a + /// reserve-backed transfer. See + ForeignAccountId32 { + para_id: u32, + id: [u8; 32], + /// XCM execution fee on final destination + fee: u128, + }, + /// The funds will deposited into the sovereign account of destination parachain `para_id` on + /// AssetHub, Account `id` on the destination parachain will receive the funds via a + /// reserve-backed transfer. See + ForeignAccountId20 { + para_id: u32, + id: [u8; 20], + /// XCM execution fee on final destination + fee: u128, + }, +} + +pub struct MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, +> where + CreateAssetCall: Get, + CreateAssetDeposit: Get, + Balance: BalanceT, +{ + _phantom: PhantomData<( + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + )>, +} + +/// Reason why a message conversion failed. +#[derive(Copy, Clone, TypeInfo, PalletError, Encode, Decode, RuntimeDebug)] +pub enum ConvertMessageError { + /// The message version is not supported for conversion. + UnsupportedVersion, +} + +/// convert the inbound message to xcm which will be forwarded to the destination chain +pub trait ConvertMessage { + type Balance: BalanceT + From; + type AccountId; + /// Converts a versioned message into an XCM message and an optional topicID + fn convert(message: VersionedMessage) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError>; +} + +pub type CallIndex = [u8; 2]; + +impl + ConvertMessage + for MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + > where + CreateAssetCall: Get, + CreateAssetDeposit: Get, + InboundQueuePalletInstance: Get, + Balance: BalanceT + From, + AccountId: Into<[u8; 32]>, +{ + type Balance = Balance; + type AccountId = AccountId; + + fn convert(message: VersionedMessage) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError> { + use Command::*; + use VersionedMessage::*; + match message { + V1(MessageV1 { chain_id, command: RegisterToken { token, fee } }) => + Ok(Self::convert_register_token(chain_id, token, fee)), + V1(MessageV1 { chain_id, command: SendToken { token, destination, amount, fee } }) => + Ok(Self::convert_send_token(chain_id, token, destination, amount, fee)), + } + } +} + +impl + MessageToXcm +where + CreateAssetCall: Get, + CreateAssetDeposit: Get, + InboundQueuePalletInstance: Get, + Balance: BalanceT + From, + AccountId: Into<[u8; 32]>, +{ + fn convert_register_token(chain_id: u64, token: H160, fee: u128) -> (Xcm<()>, Balance) { + let network = Ethereum { chain_id }; + let xcm_fee: MultiAsset = (MultiLocation::parent(), fee).into(); + let deposit: MultiAsset = (MultiLocation::parent(), CreateAssetDeposit::get()).into(); + + let total_amount = fee + CreateAssetDeposit::get(); + let total: MultiAsset = (MultiLocation::parent(), total_amount).into(); + + let bridge_location: MultiLocation = (Parent, Parent, GlobalConsensus(network)).into(); + + let owner = GlobalConsensusEthereumConvertsFor::<[u8; 32]>::from_chain_id(&chain_id); + let asset_id = Self::convert_token_address(network, token); + let create_call_index: [u8; 2] = CreateAssetCall::get(); + let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); + + let xcm: Xcm<()> = vec![ + // Teleport required fees. + ReceiveTeleportedAsset(total.into()), + // Pay for execution. + BuyExecution { fees: xcm_fee, weight_limit: Unlimited }, + // Fund the snowbridge sovereign with the required deposit for creation. + DepositAsset { assets: Definite(deposit.into()), beneficiary: bridge_location }, + // Only our inbound-queue pallet is allowed to invoke `UniversalOrigin` + DescendOrigin(X1(PalletInstance(inbound_queue_pallet_index))), + // Change origin to the bridge. + UniversalOrigin(GlobalConsensus(network)), + // Call create_asset on foreign assets pallet. + Transact { + origin_kind: OriginKind::Xcm, + require_weight_at_most: Weight::from_parts(400_000_000, 8_000), + call: ( + create_call_index, + asset_id, + MultiAddress::<[u8; 32], ()>::Id(owner), + MINIMUM_DEPOSIT, + ) + .encode() + .into(), + }, + RefundSurplus, + // Clear the origin so that remaining assets in holding + // are claimable by the physical origin (BridgeHub) + ClearOrigin, + ] + .into(); + + (xcm, total_amount.into()) + } + + fn convert_send_token( + chain_id: u64, + token: H160, + destination: Destination, + amount: u128, + asset_hub_fee: u128, + ) -> (Xcm<()>, Balance) { + let network = Ethereum { chain_id }; + let asset_hub_fee_asset: MultiAsset = (MultiLocation::parent(), asset_hub_fee).into(); + let asset: MultiAsset = (Self::convert_token_address(network, token), amount).into(); + + let (dest_para_id, beneficiary, dest_para_fee) = match destination { + // Final destination is a 32-byte account on AssetHub + Destination::AccountId32 { id } => ( + None, + MultiLocation { parents: 0, interior: X1(AccountId32 { network: None, id }) }, + 0, + ), + // Final destination is a 32-byte account on a sibling of AssetHub + Destination::ForeignAccountId32 { para_id, id, fee } => ( + Some(para_id), + MultiLocation { parents: 0, interior: X1(AccountId32 { network: None, id }) }, + // Total fee needs to cover execution on AssetHub and Sibling + fee, + ), + // Final destination is a 20-byte account on a sibling of AssetHub + Destination::ForeignAccountId20 { para_id, id, fee } => ( + Some(para_id), + MultiLocation { parents: 0, interior: X1(AccountKey20 { network: None, key: id }) }, + // Total fee needs to cover execution on AssetHub and Sibling + fee, + ), + }; + + let total_fees = asset_hub_fee.saturating_add(dest_para_fee); + let total_fee_asset: MultiAsset = (MultiLocation::parent(), total_fees).into(); + let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); + + let mut instructions = vec![ + ReceiveTeleportedAsset(total_fee_asset.into()), + BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, + DescendOrigin(X1(PalletInstance(inbound_queue_pallet_index))), + UniversalOrigin(GlobalConsensus(network)), + ReserveAssetDeposited(asset.clone().into()), + ClearOrigin, + ]; + + match dest_para_id { + Some(dest_para_id) => { + let dest_para_fee_asset: MultiAsset = + (MultiLocation::parent(), dest_para_fee).into(); + + instructions.extend(vec![ + // Perform a deposit reserve to send to destination chain. + DepositReserveAsset { + assets: Definite(vec![dest_para_fee_asset.clone(), asset.clone()].into()), + dest: MultiLocation { parents: 1, interior: X1(Parachain(dest_para_id)) }, + xcm: vec![ + // Buy execution on target. + BuyExecution { fees: dest_para_fee_asset, weight_limit: Unlimited }, + // Deposit asset to beneficiary. + DepositAsset { assets: Definite(asset.into()), beneficiary }, + ] + .into(), + }, + ]); + }, + None => { + instructions.extend(vec![ + // Deposit asset to beneficiary. + DepositAsset { assets: Definite(asset.into()), beneficiary }, + ]); + }, + } + + (instructions.into(), total_fees.into()) + } + + // Convert ERC20 token address to a Multilocation that can be understood by Assets Hub. + fn convert_token_address(network: NetworkId, token: H160) -> MultiLocation { + MultiLocation { + parents: 2, + interior: X2( + GlobalConsensus(network), + AccountKey20 { network: None, key: token.into() }, + ), + } + } +} + +pub struct GlobalConsensusEthereumConvertsFor(PhantomData); +impl ConvertLocation for GlobalConsensusEthereumConvertsFor +where + AccountId: From<[u8; 32]> + Clone, +{ + fn convert_location(location: &MultiLocation) -> Option { + if let MultiLocation { interior: X1(GlobalConsensus(Ethereum { chain_id })), .. } = location + { + Some(Self::from_chain_id(chain_id).into()) + } else { + None + } + } +} + +impl GlobalConsensusEthereumConvertsFor { + pub fn from_chain_id(chain_id: &u64) -> [u8; 32] { + (b"ethereum-chain", chain_id).using_encoded(blake2_256) + } +} diff --git a/bridges/snowbridge/parachain/primitives/router/src/inbound/tests.rs b/bridges/snowbridge/parachain/primitives/router/src/inbound/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..8c96c13cf223bcaf72acc2010137315599ee3258 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/router/src/inbound/tests.rs @@ -0,0 +1,41 @@ +use super::GlobalConsensusEthereumConvertsFor; +use crate::inbound::CallIndex; +use frame_support::parameter_types; +use hex_literal::hex; +use xcm::v3::prelude::*; +use xcm_executor::traits::ConvertLocation; + +const NETWORK: NetworkId = Ethereum { chain_id: 11155111 }; + +parameter_types! { + pub EthereumNetwork: NetworkId = NETWORK; + + pub const CreateAssetCall: CallIndex = [1, 1]; + pub const CreateAssetExecutionFee: u128 = 123; + pub const CreateAssetDeposit: u128 = 891; + pub const SendTokenExecutionFee: u128 = 592; +} + +#[test] +fn test_contract_location_with_network_converts_successfully() { + let expected_account: [u8; 32] = + hex!("ce796ae65569a670d0c1cc1ac12515a3ce21b5fbf729d63d7b289baad070139d"); + let contract_location = MultiLocation { parents: 2, interior: X1(GlobalConsensus(NETWORK)) }; + + let account = + GlobalConsensusEthereumConvertsFor::<[u8; 32]>::convert_location(&contract_location) + .unwrap(); + + assert_eq!(account, expected_account); +} + +#[test] +fn test_contract_location_with_incorrect_location_fails_convert() { + let contract_location = + MultiLocation { parents: 2, interior: X2(GlobalConsensus(Polkadot), Parachain(1000)) }; + + assert_eq!( + GlobalConsensusEthereumConvertsFor::<[u8; 32]>::convert_location(&contract_location), + None, + ); +} diff --git a/bridges/snowbridge/parachain/primitives/router/src/lib.rs b/bridges/snowbridge/parachain/primitives/router/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..d9031c69b22b88a8ae9dfbcb8ed3025a36332a82 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/router/src/lib.rs @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod inbound; +pub mod outbound; diff --git a/bridges/snowbridge/parachain/primitives/router/src/outbound/mod.rs b/bridges/snowbridge/parachain/primitives/router/src/outbound/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..c7f2f440834caa41efac3e7cbdf89a50065c0061 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/router/src/outbound/mod.rs @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Converts XCM messages into simpler commands that can be processed by the Gateway contract + +#[cfg(test)] +mod tests; + +use core::slice::Iter; + +use codec::{Decode, Encode}; + +use frame_support::{ensure, traits::Get}; +use snowbridge_core::{ + outbound::{AgentExecuteCommand, Command, Message, SendMessage}, + ChannelId, ParaId, +}; +use sp_core::{H160, H256}; +use sp_std::{iter::Peekable, marker::PhantomData, prelude::*}; +use xcm::v3::prelude::*; +use xcm_executor::traits::{ConvertLocation, ExportXcm}; + +pub struct EthereumBlobExporter< + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, +>(PhantomData<(UniversalLocation, EthereumNetwork, OutboundQueue, AgentHashedDescription)>); + +impl ExportXcm + for EthereumBlobExporter +where + UniversalLocation: Get, + EthereumNetwork: Get, + OutboundQueue: SendMessage, + AgentHashedDescription: ConvertLocation, +{ + type Ticket = (Vec, XcmHash); + + fn validate( + network: NetworkId, + _channel: u32, + universal_source: &mut Option, + destination: &mut Option, + message: &mut Option>, + ) -> SendResult { + let expected_network = EthereumNetwork::get(); + let universal_location = UniversalLocation::get(); + + if network != expected_network { + log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched bridge network {network:?}."); + return Err(SendError::NotApplicable) + } + + let dest = destination.take().ok_or(SendError::MissingArgument)?; + if dest != Here { + log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched remote destination {dest:?}."); + return Err(SendError::NotApplicable) + } + + let (local_net, local_sub) = universal_source + .take() + .ok_or_else(|| { + log::error!(target: "xcm::ethereum_blob_exporter", "universal source not provided."); + SendError::MissingArgument + })? + .split_global() + .map_err(|()| { + log::error!(target: "xcm::ethereum_blob_exporter", "could not get global consensus from universal source '{universal_source:?}'."); + SendError::Unroutable + })?; + + if Ok(local_net) != universal_location.global_consensus() { + log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched relay network {local_net:?}."); + return Err(SendError::NotApplicable) + } + + let para_id = match local_sub { + X1(Parachain(para_id)) => para_id, + _ => { + log::error!(target: "xcm::ethereum_blob_exporter", "could not get parachain id from universal source '{local_sub:?}'."); + return Err(SendError::MissingArgument) + }, + }; + + let message = message.take().ok_or_else(|| { + log::error!(target: "xcm::ethereum_blob_exporter", "xcm message not provided."); + SendError::MissingArgument + })?; + + let mut converter = XcmConverter::new(&message, &expected_network); + let (agent_execute_command, message_id) = converter.convert().map_err(|err|{ + log::error!(target: "xcm::ethereum_blob_exporter", "unroutable due to pattern matching error '{err:?}'."); + SendError::Unroutable + })?; + + let source_location: MultiLocation = MultiLocation { parents: 1, interior: local_sub }; + let agent_id = match AgentHashedDescription::convert_location(&source_location) { + Some(id) => id, + None => { + log::error!(target: "xcm::ethereum_blob_exporter", "unroutable due to not being able to create agent id. '{source_location:?}'"); + return Err(SendError::Unroutable) + }, + }; + + let channel_id: ChannelId = ParaId::from(para_id).into(); + + let outbound_message = Message { + id: Some(message_id.into()), + channel_id, + command: Command::AgentExecute { agent_id, command: agent_execute_command }, + }; + + // validate the message + let (ticket, fee) = OutboundQueue::validate(&outbound_message).map_err(|err| { + log::error!(target: "xcm::ethereum_blob_exporter", "OutboundQueue validation of message failed. {err:?}"); + SendError::Unroutable + })?; + + // convert fee to MultiAsset + let fee = MultiAsset::from((MultiLocation::parent(), fee.total())).into(); + + Ok(((ticket.encode(), message_id), fee)) + } + + fn deliver(blob: (Vec, XcmHash)) -> Result { + let ticket: OutboundQueue::Ticket = OutboundQueue::Ticket::decode(&mut blob.0.as_ref()) + .map_err(|_| { + log::trace!(target: "xcm::ethereum_blob_exporter", "undeliverable due to decoding error"); + SendError::NotApplicable + })?; + + let message_id = OutboundQueue::deliver(ticket).map_err(|_| { + log::error!(target: "xcm::ethereum_blob_exporter", "OutboundQueue submit of message failed"); + SendError::Transport("other transport error") + })?; + + log::info!(target: "xcm::ethereum_blob_exporter", "message delivered {message_id:#?}."); + Ok(message_id.into()) + } +} + +/// Errors that can be thrown to the pattern matching step. +#[derive(PartialEq, Debug)] +enum XcmConverterError { + UnexpectedEndOfXcm, + EndOfXcmMessageExpected, + WithdrawAssetExpected, + DepositAssetExpected, + NoReserveAssets, + FilterDoesNotConsumeAllAssets, + TooManyAssets, + ZeroAssetTransfer, + BeneficiaryResolutionFailed, + AssetResolutionFailed, + InvalidFeeAsset, + SetTopicExpected, +} + +macro_rules! match_expression { + ($expression:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )?, $value:expr $(,)?) => { + match $expression { + $( $pattern )|+ $( if $guard )? => Some($value), + _ => None, + } + }; +} + +struct XcmConverter<'a, Call> { + iter: Peekable>>, + ethereum_network: &'a NetworkId, +} +impl<'a, Call> XcmConverter<'a, Call> { + fn new(message: &'a Xcm, ethereum_network: &'a NetworkId) -> Self { + Self { iter: message.inner().iter().peekable(), ethereum_network } + } + + fn convert(&mut self) -> Result<(AgentExecuteCommand, [u8; 32]), XcmConverterError> { + // Get withdraw/deposit and make native tokens create message. + let result = self.native_tokens_unlock_message()?; + + // All xcm instructions must be consumed before exit. + if self.next().is_ok() { + return Err(XcmConverterError::EndOfXcmMessageExpected) + } + + Ok(result) + } + + fn native_tokens_unlock_message( + &mut self, + ) -> Result<(AgentExecuteCommand, [u8; 32]), XcmConverterError> { + use XcmConverterError::*; + + // Get the reserve assets from WithdrawAsset. + let reserve_assets = + match_expression!(self.next()?, WithdrawAsset(reserve_assets), reserve_assets) + .ok_or(WithdrawAssetExpected)?; + + // Check if clear origin exists and skip over it. + if match_expression!(self.peek(), Ok(ClearOrigin), ()).is_some() { + let _ = self.next(); + } + + // Get the fee asset item from BuyExecution or continue parsing. + let fee_asset = match_expression!(self.peek(), Ok(BuyExecution { fees, .. }), fees); + if fee_asset.is_some() { + let _ = self.next(); + } + + let (deposit_assets, beneficiary) = match_expression!( + self.next()?, + DepositAsset { assets, beneficiary }, + (assets, beneficiary) + ) + .ok_or(DepositAssetExpected)?; + + // assert that the beneficiary is AccountKey20. + let recipient = match_expression!( + beneficiary, + MultiLocation { parents: 0, interior: X1(AccountKey20 { network, key }) } + if self.network_matches(network), + H160(*key) + ) + .ok_or(BeneficiaryResolutionFailed)?; + + // Make sure there are reserved assets. + if reserve_assets.len() == 0 { + return Err(NoReserveAssets) + } + + // Check the the deposit asset filter matches what was reserved. + if reserve_assets.inner().iter().any(|asset| !deposit_assets.matches(asset)) { + return Err(FilterDoesNotConsumeAllAssets) + } + + // We only support a single asset at a time. + ensure!(reserve_assets.len() == 1, TooManyAssets); + let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; + + // If there was a fee specified verify it. + if let Some(fee_asset) = fee_asset { + // The fee asset must be the same as the reserve asset. + if fee_asset.id != reserve_asset.id || fee_asset.fun > reserve_asset.fun { + return Err(InvalidFeeAsset) + } + } + + let (token, amount) = match_expression!( + reserve_asset, + MultiAsset { + id: Concrete(MultiLocation { parents: 0, interior: X1(AccountKey20 { network , key })}), + fun: Fungible(amount) + } if self.network_matches(network), + (H160(*key), *amount) + ) + .ok_or(AssetResolutionFailed)?; + + // transfer amount must be greater than 0. + ensure!(amount > 0, ZeroAssetTransfer); + + // Check if there is a SetTopic and skip over it if found. + let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; + + Ok((AgentExecuteCommand::TransferToken { token, recipient, amount }, *topic_id)) + } + + fn next(&mut self) -> Result<&'a Instruction, XcmConverterError> { + self.iter.next().ok_or(XcmConverterError::UnexpectedEndOfXcm) + } + + fn peek(&mut self) -> Result<&&'a Instruction, XcmConverterError> { + self.iter.peek().ok_or(XcmConverterError::UnexpectedEndOfXcm) + } + + fn network_matches(&self, network: &Option) -> bool { + if let Some(network) = network { + network == self.ethereum_network + } else { + true + } + } +} diff --git a/bridges/snowbridge/parachain/primitives/router/src/outbound/tests.rs b/bridges/snowbridge/parachain/primitives/router/src/outbound/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..153d934c390962bd68c9fe9d43751a37e1d4c756 --- /dev/null +++ b/bridges/snowbridge/parachain/primitives/router/src/outbound/tests.rs @@ -0,0 +1,1063 @@ +use frame_support::parameter_types; +use hex_literal::hex; +use snowbridge_core::{ + outbound::{Fee, SendError, SendMessageFeeProvider}, + AgentIdOf, +}; +use xcm::v3::prelude::SendError as XcmSendError; + +use super::*; + +parameter_types! { + const MaxMessageSize: u32 = u32::MAX; + const RelayNetwork: NetworkId = Polkadot; + const UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(RelayNetwork::get()), Parachain(1013)); + const BridgedNetwork: NetworkId = Ethereum{ chain_id: 1 }; + const NonBridgedNetwork: NetworkId = Ethereum{ chain_id: 2 }; +} + +struct MockOkOutboundQueue; +impl SendMessage for MockOkOutboundQueue { + type Ticket = (); + + fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { + Ok(((), Fee { local: 1, remote: 1 })) + } + + fn deliver(_: Self::Ticket) -> Result { + Ok(H256::zero()) + } +} + +impl SendMessageFeeProvider for MockOkOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + 1 + } +} +struct MockErrOutboundQueue; +impl SendMessage for MockErrOutboundQueue { + type Ticket = (); + + fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { + Err(SendError::MessageTooLarge) + } + + fn deliver(_: Self::Ticket) -> Result { + Err(SendError::MessageTooLarge) + } +} + +impl SendMessageFeeProvider for MockErrOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + 1 + } +} + +#[test] +fn exporter_validate_with_unknown_network_yields_not_applicable() { + let network = Ethereum { chain_id: 1337 }; + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = None; + let mut message: Option> = None; + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + >::validate( + network, channel, &mut universal_source, &mut destination, &mut message + ); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_with_invalid_destination_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = None; + let mut message: Option> = None; + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + >::validate( + network, channel, &mut universal_source, &mut destination, &mut message + ); + assert_eq!(result, Err(XcmSendError::MissingArgument)); +} + +#[test] +fn exporter_validate_with_x8_destination_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = Some(X8( + OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, + )); + let mut message: Option> = None; + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + >::validate( + network, channel, &mut universal_source, &mut destination, &mut message + ); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_without_universal_source_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + >::validate( + network, channel, &mut universal_source, &mut destination, &mut message + ); + assert_eq!(result, Err(XcmSendError::MissingArgument)); +} + +#[test] +fn exporter_validate_without_global_universal_location_yields_unroutable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Here.into(); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + >::validate( + network, channel, &mut universal_source, &mut destination, &mut message + ); + assert_eq!(result, Err(XcmSendError::Unroutable)); +} + +#[test] +fn exporter_validate_without_global_bridge_location_yields_not_applicable() { + let network = NonBridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Here.into(); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + >::validate( + network, channel, &mut universal_source, &mut destination, &mut message + ); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_with_remote_universal_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some(X2(GlobalConsensus(Kusama), Parachain(1000))); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + >::validate( + network, channel, &mut universal_source, &mut destination, &mut message + ); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_without_para_id_in_source_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Some(X1(GlobalConsensus(Polkadot))); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + >::validate( + network, channel, &mut universal_source, &mut destination, &mut message + ); + assert_eq!(result, Err(XcmSendError::MissingArgument)); +} + +#[test] +fn exporter_validate_complex_para_id_in_source_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some(X3(GlobalConsensus(Polkadot), Parachain(1000), PalletInstance(12))); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + >::validate( + network, channel, &mut universal_source, &mut destination, &mut message + ); + assert_eq!(result, Err(XcmSendError::MissingArgument)); +} + +#[test] +fn exporter_validate_without_xcm_message_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some(X2(GlobalConsensus(Polkadot), Parachain(1000))); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + >::validate( + network, channel, &mut universal_source, &mut destination, &mut message + ); + assert_eq!(result, Err(XcmSendError::MissingArgument)); +} + +#[test] +fn exporter_validate_with_max_target_fee_yields_unroutable() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some(X2(GlobalConsensus(Polkadot), Parachain(1000))); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let fee = MultiAsset { id: Concrete(Here.into()), fun: Fungible(1000) }; + let fees: MultiAssets = vec![fee.clone()].into(); + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }] + .into(); + let filter: MultiAssetFilter = assets.clone().into(); + + let mut message: Option> = Some( + vec![ + WithdrawAsset(fees), + BuyExecution { fees: fee, weight_limit: Unlimited }, + WithdrawAsset(assets), + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: Some(network), key: beneficiary_address }) + .into(), + }, + SetTopic([0; 32]), + ] + .into(), + ); + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + >::validate( + network, channel, &mut universal_source, &mut destination, &mut message + ); + + assert_eq!(result, Err(XcmSendError::Unroutable)); +} + +#[test] +fn exporter_validate_with_unparsable_xcm_yields_unroutable() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some(X2(GlobalConsensus(Polkadot), Parachain(1000))); + + let channel: u32 = 0; + let fee = MultiAsset { id: Concrete(Here.into()), fun: Fungible(1000) }; + let fees: MultiAssets = vec![fee.clone()].into(); + + let mut message: Option> = + Some(vec![WithdrawAsset(fees), BuyExecution { fees: fee, weight_limit: Unlimited }].into()); + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + >::validate( + network, channel, &mut universal_source, &mut destination, &mut message + ); + + assert_eq!(result, Err(XcmSendError::Unroutable)); +} + +#[test] +fn exporter_validate_xcm_success_case_1() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some(X2(GlobalConsensus(Polkadot), Parachain(1000))); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: MultiAssetFilter = assets.clone().into(); + + let mut message: Option> = Some( + vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(), + ); + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + >::validate( + network, channel, &mut universal_source, &mut destination, &mut message + ); + + assert!(result.is_ok()); +} + +#[test] +fn exporter_deliver_with_submit_failure_yields_unroutable() { + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockErrOutboundQueue, + AgentIdOf, + >::deliver((hex!("deadbeef").to_vec(), XcmHash::default())); + assert_eq!(result, Err(XcmSendError::Transport("other transport error"))) +} + +#[test] +fn xcm_converter_convert_success() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }] + .into(); + let filter: MultiAssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + let expected_payload = AgentExecuteCommand::TransferToken { + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); +} + +#[test] +fn xcm_converter_convert_without_buy_execution_yields_success() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }] + .into(); + let filter: MultiAssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + let expected_payload = AgentExecuteCommand::TransferToken { + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); +} + +#[test] +fn xcm_converter_convert_with_wildcard_all_asset_filter_succeeds() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }] + .into(); + let filter: MultiAssetFilter = Wild(All); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + let expected_payload = AgentExecuteCommand::TransferToken { + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); +} + +#[test] +fn xcm_converter_convert_with_fees_less_than_reserve_yields_success() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location = X1(AccountKey20 { network: None, key: token_address }).into(); + let fee_asset = MultiAsset { id: Concrete(asset_location), fun: Fungible(500) }; + + let assets: MultiAssets = + vec![MultiAsset { id: Concrete(asset_location), fun: Fungible(1000) }].into(); + + let filter: MultiAssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee_asset, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + let expected_payload = AgentExecuteCommand::TransferToken { + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); +} + +#[test] +fn xcm_converter_convert_without_set_topic_yields_set_topic_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }] + .into(); + let filter: MultiAssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + ClearTopic, + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::SetTopicExpected)); +} + +#[test] +fn xcm_converter_convert_with_partial_message_yields_unexpected_end_of_xcm() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }] + .into(); + let message: Xcm<()> = vec![WithdrawAsset(assets)].into(); + + let mut converter = XcmConverter::new(&message, &network); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); +} + +#[test] +fn xcm_converter_with_different_fee_asset_fails() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location = X1(AccountKey20 { network: None, key: token_address }).into(); + let fee_asset = MultiAsset { + id: Concrete(MultiLocation { parents: 0, interior: Here }), + fun: Fungible(1000), + }; + + let assets: MultiAssets = + vec![MultiAsset { id: Concrete(asset_location), fun: Fungible(1000) }].into(); + + let filter: MultiAssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee_asset, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::InvalidFeeAsset)); +} + +#[test] +fn xcm_converter_with_fees_greater_than_reserve_fails() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location = X1(AccountKey20 { network: None, key: token_address }).into(); + let fee_asset = MultiAsset { id: Concrete(asset_location), fun: Fungible(1001) }; + + let assets: MultiAssets = + vec![MultiAsset { id: Concrete(asset_location), fun: Fungible(1000) }].into(); + + let filter: MultiAssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee_asset, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::InvalidFeeAsset)); +} + +#[test] +fn xcm_converter_convert_with_empty_xcm_yields_unexpected_end_of_xcm() { + let network = BridgedNetwork::get(); + + let message: Xcm<()> = vec![].into(); + + let mut converter = XcmConverter::new(&message, &network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); +} + +#[test] +fn xcm_converter_convert_with_extra_instructions_yields_end_of_xcm_message_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }] + .into(); + let filter: MultiAssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ClearError, + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::EndOfXcmMessageExpected)); +} + +#[test] +fn xcm_converter_convert_without_withdraw_asset_yields_withdraw_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }] + .into(); + let filter: MultiAssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::WithdrawAssetExpected)); +} + +#[test] +fn xcm_converter_convert_without_withdraw_asset_yields_deposit_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }] + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::DepositAssetExpected)); +} + +#[test] +fn xcm_converter_convert_without_assets_yields_no_reserve_assets() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![].into(); + let filter: MultiAssetFilter = assets.clone().into(); + + let fee = MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }; + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::NoReserveAssets)); +} + +#[test] +fn xcm_converter_convert_with_two_assets_yields_too_many_assets() { + let network = BridgedNetwork::get(); + + let token_address_1: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let token_address_2: [u8; 20] = hex!("1100000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![ + MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address_1 }).into()), + fun: Fungible(1000), + }, + MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address_2 }).into()), + fun: Fungible(500), + }, + ] + .into(); + let filter: MultiAssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::TooManyAssets)); +} + +#[test] +fn xcm_converter_convert_without_consuming_filter_yields_filter_does_not_consume_all_assets() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }] + .into(); + let filter: MultiAssetFilter = Wild(WildMultiAsset::AllCounted(0)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::FilterDoesNotConsumeAllAssets)); +} + +#[test] +fn xcm_converter_convert_with_zero_amount_asset_yields_zero_asset_transfer() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(0), + }] + .into(); + let filter: MultiAssetFilter = Wild(WildMultiAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::ZeroAssetTransfer)); +} + +#[test] +fn xcm_converter_convert_non_ethereum_asset_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X3(GlobalConsensus(Polkadot), Parachain(1000), GeneralIndex(0)).into()), + fun: Fungible(1000), + }] + .into(); + let filter: MultiAssetFilter = Wild(WildMultiAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); +} + +#[test] +fn xcm_converter_convert_non_ethereum_chain_asset_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete( + X1(AccountKey20 { network: Some(Ethereum { chain_id: 2 }), key: token_address }).into(), + ), + fun: Fungible(1000), + }] + .into(); + let filter: MultiAssetFilter = Wild(WildMultiAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); +} + +#[test] +fn xcm_converter_convert_non_ethereum_chain_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete( + X1(AccountKey20 { network: Some(NonBridgedNetwork::get()), key: token_address }).into(), + ), + fun: Fungible(1000), + }] + .into(); + let filter: MultiAssetFilter = Wild(WildMultiAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { network: None, key: beneficiary_address }).into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); +} + +#[test] +fn xcm_converter_convert_with_non_ethereum_beneficiary_yields_beneficiary_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let beneficiary_address: [u8; 32] = + hex!("2000000000000000000000000000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }] + .into(); + let filter: MultiAssetFilter = Wild(WildMultiAsset::AllCounted(1)); + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X3( + GlobalConsensus(Polkadot), + Parachain(1000), + AccountId32 { network: Some(Polkadot), id: beneficiary_address }, + ) + .into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); +} + +#[test] +fn xcm_converter_convert_with_non_ethereum_chain_beneficiary_yields_beneficiary_resolution_failed() +{ + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: MultiAssets = vec![MultiAsset { + id: Concrete(X1(AccountKey20 { network: None, key: token_address }).into()), + fun: Fungible(1000), + }] + .into(); + let filter: MultiAssetFilter = Wild(WildMultiAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: X1(AccountKey20 { + network: Some(Ethereum { chain_id: 2 }), + key: beneficiary_address, + }) + .into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::new(&message, &network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); +} + +#[test] +fn test_describe_asset_hub() { + let legacy_location: MultiLocation = + MultiLocation { parents: 0, interior: X1(Parachain(1000)) }; + let legacy_agent_id = AgentIdOf::convert_location(&legacy_location).unwrap(); + assert_eq!( + legacy_agent_id, + hex!("72456f48efed08af20e5b317abf8648ac66e86bb90a411d9b0b713f7364b75b4").into() + ); + let location: MultiLocation = MultiLocation { parents: 1, interior: X1(Parachain(1000)) }; + let agent_id = AgentIdOf::convert_location(&location).unwrap(); + assert_eq!( + agent_id, + hex!("81c5ab2571199e3188135178f3c2c8e2d268be1313d029b30f534fa579b69b79").into() + ) +} + +#[test] +fn test_describe_here() { + let location: MultiLocation = MultiLocation { parents: 0, interior: Here }; + let agent_id = AgentIdOf::convert_location(&location).unwrap(); + assert_eq!( + agent_id, + hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into() + ) +} diff --git a/bridges/snowbridge/parachain/runtime/rococo-common/Cargo.toml b/bridges/snowbridge/parachain/runtime/rococo-common/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..656ed6de26e83acfcfa35adff0ad9aae4ba8ba78 --- /dev/null +++ b/bridges/snowbridge/parachain/runtime/rococo-common/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "snowbridge-rococo-common" +description = "Snowbridge Rococo Common" +version = "0.0.1" +authors = ["Snowfork "] +edition = "2021" +license = "Apache-2.0" + +[dependencies] +log = { version = "0.4.20", default-features = false } + +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } + +[dev-dependencies] + +[features] +default = ["std"] +std = [ + "frame-support/std", + "log/std", + "xcm/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", +] diff --git a/bridges/snowbridge/parachain/runtime/rococo-common/src/lib.rs b/bridges/snowbridge/parachain/runtime/rococo-common/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..97f0332fe66bafb49461d4a619f05f13a486306c --- /dev/null +++ b/bridges/snowbridge/parachain/runtime/rococo-common/src/lib.rs @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! # Rococo Common +//! +//! Config used for the Rococo asset hub and bridge hub runtimes. +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::parameter_types; +use xcm::opaque::lts::NetworkId; + +pub const INBOUND_QUEUE_MESSAGES_PALLET_INDEX: u8 = 80; + +parameter_types! { + // Network and location for the Ethereum chain. + pub EthereumNetwork: NetworkId = NetworkId::Ethereum { chain_id: 11155111 }; +} diff --git a/bridges/snowbridge/parachain/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/parachain/runtime/runtime-common/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..b835152cac0d6f74b1995aa3d2ede1184ab010bf --- /dev/null +++ b/bridges/snowbridge/parachain/runtime/runtime-common/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "snowbridge-runtime-common" +description = "Snowbridge Runtime Common" +version = "0.1.1" +authors = ["Snowfork "] +edition = "2021" +license = "Apache-2.0" + +[dependencies] +log = { version = "0.4.20", default-features = false } + +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +sp-arithmetic = { path = "../../../../../substrate/primitives/arithmetic", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } + +snowbridge-core = { path = "../../primitives/core", default-features = false } + +[dev-dependencies] + +[features] +default = ["std"] +std = [ + "frame-support/std", + "frame-system/std", + "log/std", + "snowbridge-core/std", + "sp-arithmetic/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "snowbridge-core/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] diff --git a/bridges/snowbridge/parachain/runtime/runtime-common/src/lib.rs b/bridges/snowbridge/parachain/runtime/runtime-common/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..b7f54d262bbb33c2dec1330c9de55cd399a70e04 --- /dev/null +++ b/bridges/snowbridge/parachain/runtime/runtime-common/src/lib.rs @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! # Runtime Common +//! +//! Common traits and types shared by runtimes. +#![cfg_attr(not(feature = "std"), no_std)] + +use core::marker::PhantomData; +use frame_support::traits::Get; +use snowbridge_core::{outbound::SendMessageFeeProvider, sibling_sovereign_account_raw}; +use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; +use xcm::prelude::*; +use xcm_builder::{deposit_or_burn_fee, HandleFee}; +use xcm_executor::traits::{FeeReason, TransactAsset}; + +/// A `HandleFee` implementation that takes fees from `ExportMessage` XCM instructions +/// to Snowbridge and splits off the remote fee and deposits it to the origin +/// parachain sovereign account. The local fee is then returned back to be handled by +/// the next fee handler in the chain. Most likely the treasury account. +pub struct XcmExportFeeToSibling< + Balance, + AccountId, + FeeAssetLocation, + EthereumNetwork, + AssetTransactor, + FeeProvider, +>( + PhantomData<( + Balance, + AccountId, + FeeAssetLocation, + EthereumNetwork, + AssetTransactor, + FeeProvider, + )>, +); + +impl HandleFee + for XcmExportFeeToSibling< + Balance, + AccountId, + FeeAssetLocation, + EthereumNetwork, + AssetTransactor, + FeeProvider, + > where + Balance: BaseArithmetic + Unsigned + Copy + From + Into, + AccountId: Clone + Into<[u8; 32]> + From<[u8; 32]>, + FeeAssetLocation: Get, + EthereumNetwork: Get, + AssetTransactor: TransactAsset, + FeeProvider: SendMessageFeeProvider, +{ + fn handle_fee( + fees: MultiAssets, + context: Option<&XcmContext>, + reason: FeeReason, + ) -> MultiAssets { + let token_location = FeeAssetLocation::get(); + + // Check the reason to see if this export is for snowbridge. + if !matches!( + reason, + FeeReason::Export { network: bridged_network, destination } + if bridged_network == EthereumNetwork::get() && destination == Here + ) { + return fees + } + + // Get the parachain sovereign from the `context`. + let para_sovereign = if let Some(XcmContext { + origin: Some(MultiLocation { parents: 1, interior }), + .. + }) = context + { + if let Some(Parachain(sibling_para_id)) = interior.first() { + let account: AccountId = + sibling_sovereign_account_raw((*sibling_para_id).into()).into(); + account + } else { + return fees + } + } else { + return fees + }; + + // Get the total fee offered by export message. + let maybe_total_supplied_fee: Option<(usize, Balance)> = fees + .inner() + .iter() + .enumerate() + .filter_map(|(index, asset)| { + if let MultiAsset { id: Concrete(location), fun: Fungible(amount) } = asset { + if *location == token_location { + return Some((index, (*amount).into())) + } + } + None + }) + .next(); + + if let Some((fee_index, total_fee)) = maybe_total_supplied_fee { + let remote_fee = total_fee.saturating_sub(FeeProvider::local_fee()); + if remote_fee > (0u128).into() { + // Refund remote component of fee to physical origin + deposit_or_burn_fee::( + MultiAsset { id: Concrete(token_location), fun: Fungible(remote_fee.into()) } + .into(), + context, + para_sovereign, + ); + // Return remaining fee to the next fee handler in the chain. + let mut modified_fees = fees.inner().clone(); + modified_fees.remove(fee_index); + modified_fees.push(MultiAsset { + id: Concrete(token_location), + fun: Fungible((total_fee - remote_fee).into()), + }); + return modified_fees.into() + } + } + + log::info!( + target: "xcm::fees", + "XcmExportFeeToSibling skipped: {fees:?}, context: {context:?}, reason: {reason:?}", + ); + fees + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml b/bridges/snowbridge/parachain/runtime/tests/Cargo.toml similarity index 50% rename from cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml rename to bridges/snowbridge/parachain/runtime/tests/Cargo.toml index f71499e0c29184b64a7284e411f4ac8876842d58..da1fe878d935c8f9b1f0c9bf452894341c949cef 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml +++ b/bridges/snowbridge/parachain/runtime/tests/Cargo.toml @@ -1,172 +1,103 @@ [package] -name = "asset-hub-kusama-runtime" -version = "0.9.420" -authors.workspace = true -edition.workspace = true -description = "Kusama variant of Asset Hub parachain runtime" +name = "snowbridge-runtime-tests" +description = "Snowbridge Runtime Tests" +version = "0.1.0" +authors = ["Snowfork "] +edition = "2021" license = "Apache-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.188", optional = true, features = ["derive"] } smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false} -pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false} -pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false} -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false, optional = true } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} -sp-weights = { path = "../../../../../substrate/primitives/weights", default-features = false} -# num-traits feature needed for dex integer sq root: -primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "scale-info", "num-traits"] } +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +cumulus-pallet-aura-ext = { path = "../../../../../cumulus/pallets/aura-ext", default-features = false } +cumulus-pallet-dmp-queue = { path = "../../../../../cumulus/pallets/dmp-queue", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../../../cumulus/pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-session-benchmarking = { path = "../../../../../cumulus/pallets/session-benchmarking", default-features = false } +cumulus-pallet-xcm = { path = "../../../../../cumulus/pallets/xcm", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../../../../cumulus/pallets/xcmp-queue", default-features = false, features = ["bridging"] } +cumulus-primitives-core = { path = "../../../../../cumulus/primitives/core", default-features = false } +cumulus-primitives-utility = { path = "../../../../../cumulus/primitives/utility", default-features = false } +pallet-collator-selection = { path = "../../../../../cumulus/pallets/collator-selection", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../../../cumulus/parachains/pallets/parachain-info", default-features = false } +parachains-common = { path = "../../../../../cumulus/parachains/common", default-features = false } +parachains-runtimes-test-utils = { path = "../../../../../cumulus/parachains/runtimes/test-utils", default-features = false } +bridge-hub-rococo-runtime = { path = "../../../../../cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } +asset-hub-rococo-runtime = { path = "../../../../../cumulus/parachains/runtimes/assets/asset-hub-rococo", default-features = false } +assets-common = { path = "../../../../../cumulus/parachains/runtimes/assets/common", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -assets-common = { path = "../common", default-features = false } +# Ethereum Bridge (Snowbridge) +snowbridge-core = { path = "../../primitives/core", default-features = false } +snowbridge-beacon-primitives = { path = "../../primitives/beacon", default-features = false } +snowbridge-router-primitives = { path = "../../primitives/router", default-features = false } +snowbridge-ethereum-beacon-client = { path = "../../pallets/ethereum-beacon-client", default-features = false } +snowbridge-inbound-queue = { path = "../../pallets/inbound-queue", default-features = false } +snowbridge-outbound-queue = { path = "../../pallets/outbound-queue", default-features = false } +snowbridge-outbound-queue-runtime-api = { path = "../../pallets/outbound-queue/runtime-api", default-features = false } +snowbridge-system = { path = "../../pallets/system", default-features = false } +snowbridge-system-runtime-api = { path = "../../pallets/system/runtime-api", default-features = false } [dev-dependencies] -asset-test-utils = { path = "../test-utils" } - -[build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +static_assertions = "1.1" +bridge-hub-test-utils = { path = "../../../../../cumulus/parachains/runtimes/bridge-hubs/test-utils" } +bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", features = ["integrity-test"] } +sp-keyring = { path = "../../../../../substrate/primitives/keyring" } [features] -default = [ "std" ] -# When enabled the `state_version` is set to `1`. -# This means that the chain will start using the new state format. The migration is lazy, so -# it requires to write a storage value to use the new state format. To migrate all the other -# storage values that aren't touched the state migration pallet is added as well. -# This pallet will migrate the entire state, controlled through some account. -# -# This feature should be removed when the main-net will be migrated. -state-trie-version-1 = [ "pallet-state-trie-migration" ] -runtime-benchmarks = [ - "assets-common/runtime-benchmarks", - "cumulus-pallet-dmp-queue/runtime-benchmarks", - "cumulus-pallet-parachain-system/runtime-benchmarks", - "cumulus-pallet-session-benchmarking/runtime-benchmarks", - "cumulus-pallet-xcmp-queue/runtime-benchmarks", - "cumulus-primitives-core/runtime-benchmarks", - "cumulus-primitives-utility/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-asset-conversion/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-collator-selection/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", - "pallet-multisig/runtime-benchmarks", - "pallet-nft-fractionalization/runtime-benchmarks", - "pallet-nfts/runtime-benchmarks", - "pallet-proxy/runtime-benchmarks", - "pallet-state-trie-migration/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-uniques/runtime-benchmarks", - "pallet-utility/runtime-benchmarks", - "pallet-xcm-benchmarks/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-common/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] -try-runtime = [ - "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", - "cumulus-pallet-parachain-system/try-runtime", - "cumulus-pallet-xcm/try-runtime", - "cumulus-pallet-xcmp-queue/try-runtime", - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-asset-conversion-tx-payment/try-runtime", - "pallet-asset-conversion/try-runtime", - "pallet-assets/try-runtime", - "pallet-aura/try-runtime", - "pallet-authorship/try-runtime", - "pallet-balances/try-runtime", - "pallet-collator-selection/try-runtime", - "pallet-message-queue/try-runtime", - "pallet-multisig/try-runtime", - "pallet-nft-fractionalization/try-runtime", - "pallet-nfts/try-runtime", - "pallet-proxy/try-runtime", - "pallet-session/try-runtime", - "pallet-state-trie-migration/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", - "pallet-uniques/try-runtime", - "pallet-utility/try-runtime", - "pallet-xcm/try-runtime", - "parachain-info/try-runtime", - "polkadot-runtime-common/try-runtime", - "sp-runtime/try-runtime", -] +default = ["std"] std = [ + "asset-hub-rococo-runtime/std", "assets-common/std", + "bridge-hub-rococo-runtime/std", "codec/std", "cumulus-pallet-aura-ext/std", "cumulus-pallet-dmp-queue/std", @@ -176,7 +107,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-core/std", "cumulus-primitives-utility/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-executive/std", "frame-support/std", "frame-system-benchmarking?/std", @@ -184,41 +115,44 @@ std = [ "frame-system/std", "frame-try-runtime?/std", "log/std", - "pallet-asset-conversion-tx-payment/std", - "pallet-asset-conversion/std", - "pallet-assets/std", "pallet-aura/std", "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", "pallet-message-queue/std", "pallet-multisig/std", - "pallet-nft-fractionalization/std", - "pallet-nfts-runtime-api/std", - "pallet-nfts/std", - "pallet-proxy/std", "pallet-session/std", - "pallet-state-trie-migration/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", - "pallet-uniques/std", "pallet-utility/std", "pallet-xcm-benchmarks?/std", "pallet-xcm/std", "parachain-info/std", "parachains-common/std", + "parachains-runtimes-test-utils/std", "polkadot-core-primitives/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", - "primitive-types/std", + "rococo-runtime-constants/std", "scale-info/std", + "serde", + "snowbridge-beacon-primitives/std", + "snowbridge-core/std", + "snowbridge-ethereum-beacon-client/std", + "snowbridge-inbound-queue/std", + "snowbridge-outbound-queue-runtime-api/std", + "snowbridge-outbound-queue/std", + "snowbridge-router-primitives/std", + "snowbridge-system-runtime-api/std", + "snowbridge-system/std", "sp-api/std", "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", "sp-genesis-builder/std", "sp-inherents/std", + "sp-io/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", @@ -226,11 +160,85 @@ std = [ "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", - "sp-weights/std", - "substrate-wasm-builder", "xcm-builder/std", "xcm-executor/std", "xcm/std", ] -experimental = [ "pallet-aura/experimental" ] +runtime-benchmarks = [ + "asset-hub-rococo-runtime/runtime-benchmarks", + "assets-common/runtime-benchmarks", + "bridge-hub-rococo-runtime/runtime-benchmarks", + "bridge-runtime-common/runtime-benchmarks", + "cumulus-pallet-dmp-queue/runtime-benchmarks", + "cumulus-pallet-parachain-system/runtime-benchmarks", + "cumulus-pallet-session-benchmarking/runtime-benchmarks", + "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", + "cumulus-primitives-utility/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-collator-selection/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-multisig/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-xcm-benchmarks/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "parachains-common/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", + "snowbridge-core/runtime-benchmarks", + "snowbridge-ethereum-beacon-client/runtime-benchmarks", + "snowbridge-inbound-queue/runtime-benchmarks", + "snowbridge-outbound-queue/runtime-benchmarks", + "snowbridge-router-primitives/runtime-benchmarks", + "snowbridge-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] + +try-runtime = [ + "asset-hub-rococo-runtime/try-runtime", + "bridge-hub-rococo-runtime/try-runtime", + "cumulus-pallet-aura-ext/try-runtime", + "cumulus-pallet-dmp-queue/try-runtime", + "cumulus-pallet-parachain-system/try-runtime", + "cumulus-pallet-xcm/try-runtime", + "cumulus-pallet-xcmp-queue/try-runtime", + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-aura/try-runtime", + "pallet-authorship/try-runtime", + "pallet-balances/try-runtime", + "pallet-collator-selection/try-runtime", + "pallet-message-queue/try-runtime", + "pallet-multisig/try-runtime", + "pallet-session/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", + "pallet-utility/try-runtime", + "pallet-xcm/try-runtime", + "parachain-info/try-runtime", + "polkadot-runtime-common/try-runtime", + "snowbridge-ethereum-beacon-client/try-runtime", + "snowbridge-inbound-queue/try-runtime", + "snowbridge-outbound-queue/try-runtime", + "snowbridge-system/try-runtime", + "sp-runtime/try-runtime", +] +beacon-spec-mainnet = [ + "snowbridge-ethereum-beacon-client/beacon-spec-mainnet", +] +experimental = ["pallet-aura/experimental"] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/bridges/snowbridge/parachain/runtime/tests/src/lib.rs b/bridges/snowbridge/parachain/runtime/tests/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..9a5d12e28926b995929242136927cb1e1038f9ec --- /dev/null +++ b/bridges/snowbridge/parachain/runtime/tests/src/lib.rs @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork + +#![cfg(test)] + +mod test_cases; + +use asset_hub_rococo_runtime::xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee; +use bridge_hub_rococo_runtime::{ + xcm_config::XcmConfig, MessageQueueServiceWeight, Runtime, RuntimeEvent, SessionKeys, +}; +use codec::Decode; +use cumulus_primitives_core::XcmError::{FailedToTransactAsset, NotHoldingFees}; +use parachains_common::{AccountId, AuraId}; +use snowbridge_ethereum_beacon_client::WeightInfo; +use sp_core::H160; +use sp_keyring::AccountKeyring::Alice; + +pub fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys { + bridge_hub_test_utils::CollatorSessionKeys::new( + AccountId::from(Alice), + AccountId::from(Alice), + SessionKeys { aura: AuraId::from(Alice.public()) }, + ) +} + +#[test] +pub fn transfer_token_to_ethereum_works() { + test_cases::send_transfer_token_message_success::( + collator_session_keys(), + 1013, + 1000, + H160::random(), + H160::random(), + DefaultBridgeHubEthereumBaseFee::get(), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::EthereumOutboundQueue(event)) => Some(event), + _ => None, + } + }), + ) +} + +#[test] +pub fn unpaid_transfer_token_to_ethereum_fails_with_barrier() { + test_cases::send_unpaid_transfer_token_message::( + collator_session_keys(), + 1013, + 1000, + H160::random(), + H160::random(), + ) +} + +#[test] +pub fn transfer_token_to_ethereum_fee_not_enough() { + test_cases::send_transfer_token_message_failure::( + collator_session_keys(), + 1013, + 1000, + DefaultBridgeHubEthereumBaseFee::get() + 1_000_000_000, + H160::random(), + H160::random(), + // fee not enough + 1_000_000_000, + NotHoldingFees, + ) +} + +#[test] +pub fn transfer_token_to_ethereum_insufficient_fund() { + test_cases::send_transfer_token_message_failure::( + collator_session_keys(), + 1013, + 1000, + 1_000_000_000, + H160::random(), + H160::random(), + DefaultBridgeHubEthereumBaseFee::get(), + FailedToTransactAsset("InsufficientBalance"), + ) +} + +#[test] +fn max_message_queue_service_weight_is_more_than_beacon_extrinsic_weights() { + let max_message_queue_weight = MessageQueueServiceWeight::get(); + let force_checkpoint = + ::WeightInfo::force_checkpoint(); + let submit_checkpoint = + ::WeightInfo::submit(); + max_message_queue_weight.all_gt(force_checkpoint); + max_message_queue_weight.all_gt(submit_checkpoint); +} diff --git a/bridges/snowbridge/parachain/runtime/tests/src/test_cases.rs b/bridges/snowbridge/parachain/runtime/tests/src/test_cases.rs new file mode 100644 index 0000000000000000000000000000000000000000..19e45f7a15a7ecdc1dac78d611589c1149fe055b --- /dev/null +++ b/bridges/snowbridge/parachain/runtime/tests/src/test_cases.rs @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork + +//! Module contains predefined test-case scenarios for `Runtime` with bridging capabilities. + +use asset_hub_rococo_runtime::xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee; +use bridge_hub_rococo_runtime::EthereumSystem; +use codec::Encode; +use frame_support::{assert_err, assert_ok, traits::fungible::Mutate}; +use parachains_runtimes_test_utils::{ + AccountIdOf, BalanceOf, CollatorSessionKeys, ExtBuilder, ValidatorIdOf, XcmReceivedFrom, +}; +use sp_core::H160; +use sp_runtime::SaturatedConversion; +use xcm::latest::prelude::*; +use xcm_executor::XcmExecutor; +// Re-export test_case from `parachains-runtimes-test-utils` +pub use parachains_runtimes_test_utils::test_cases::change_storage_constant_by_governance_works; +use xcm::v3::Error::{self, Barrier}; + +type RuntimeHelper = + parachains_runtimes_test_utils::RuntimeHelper; + +pub fn initial_fund(assethub_parachain_id: u32, initial_amount: u128) +where + Runtime: frame_system::Config + pallet_balances::Config, +{ + // fund asset hub sovereign account enough so it can pay fees + let asset_hub_sovereign_account = + snowbridge_core::sibling_sovereign_account::(assethub_parachain_id.into()); + >::mint_into( + &asset_hub_sovereign_account, + initial_amount.saturated_into::>(), + ) + .unwrap(); +} + +pub fn send_transfer_token_message( + assethub_parachain_id: u32, + weth_contract_address: H160, + destination_address: H160, + fee_amount: u128, +) -> Outcome +where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + snowbridge_outbound_queue::Config, + XcmConfig: xcm_executor::Config, +{ + let assethub_parachain_location = MultiLocation::new(1, Parachain(assethub_parachain_id)); + let asset = MultiAsset { + id: Concrete(MultiLocation { + parents: 0, + interior: X1(AccountKey20 { network: None, key: weth_contract_address.into() }), + }), + fun: Fungible(1000000000), + }; + let assets = vec![asset.clone()]; + + let inner_xcm = Xcm(vec![ + WithdrawAsset(MultiAssets::from(assets.clone())), + ClearOrigin, + BuyExecution { fees: asset, weight_limit: Unlimited }, + DepositAsset { + assets: Wild(All), + beneficiary: MultiLocation { + parents: 0, + interior: X1(AccountKey20 { network: None, key: destination_address.into() }), + }, + }, + SetTopic([0; 32]), + ]); + + let fee = MultiAsset { + id: Concrete(MultiLocation { parents: 1, interior: Here }), + fun: Fungible(fee_amount), + }; + + // prepare transfer token message + let xcm = Xcm(vec![ + WithdrawAsset(MultiAssets::from(vec![fee.clone()])), + BuyExecution { fees: fee, weight_limit: Unlimited }, + ExportMessage { + network: Ethereum { chain_id: 11155111 }, + destination: Here, + xcm: inner_xcm, + }, + ]); + + // execute XCM + let hash = xcm.using_encoded(sp_io::hashing::blake2_256); + XcmExecutor::::execute_xcm( + assethub_parachain_location, + xcm, + hash, + RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Sibling), + ) +} + +pub fn send_transfer_token_message_success( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + assethub_parachain_id: u32, + weth_contract_address: H160, + destination_address: H160, + fee_amount: u128, + snowbridge_outbound_queue: Box< + dyn Fn(Vec) -> Option>, + >, +) where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + snowbridge_outbound_queue::Config + + snowbridge_system::Config, + XcmConfig: xcm_executor::Config, + ValidatorIdOf: From>, +{ + ExtBuilder::::default() + .with_collators(collator_session_key.collators()) + .with_session_keys(collator_session_key.session_keys()) + .with_para_id(runtime_para_id.into()) + .with_tracing() + .build() + .execute_with(|| { + EthereumSystem::initialize(runtime_para_id.into(), assethub_parachain_id.into()) + .unwrap(); + + // fund asset hub sovereign account enough so it can pay fees + initial_fund::( + assethub_parachain_id, + DefaultBridgeHubEthereumBaseFee::get() + 1_000_000_000, + ); + + let outcome = send_transfer_token_message::( + assethub_parachain_id, + weth_contract_address, + destination_address, + fee_amount, + ); + + assert_ok!(outcome.ensure_complete()); + + // check events + let mut events = >::events() + .into_iter() + .filter_map(|e| snowbridge_outbound_queue(e.event.encode())); + assert!( + events.any(|e| matches!(e, snowbridge_outbound_queue::Event::MessageQueued { .. })) + ); + }); +} + +pub fn send_unpaid_transfer_token_message( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + assethub_parachain_id: u32, + weth_contract_address: H160, + destination_contract: H160, +) where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + snowbridge_outbound_queue::Config, + XcmConfig: xcm_executor::Config, + ValidatorIdOf: From>, +{ + let assethub_parachain_location = MultiLocation::new(1, Parachain(assethub_parachain_id)); + + ExtBuilder::::default() + .with_collators(collator_session_key.collators()) + .with_session_keys(collator_session_key.session_keys()) + .with_para_id(runtime_para_id.into()) + .with_tracing() + .build() + .execute_with(|| { + let asset_hub_sovereign_account = + snowbridge_core::sibling_sovereign_account::(assethub_parachain_id.into()); + + >::mint_into( + &asset_hub_sovereign_account, + 4000000000u32.into(), + ) + .unwrap(); + + let asset = MultiAsset { + id: Concrete(MultiLocation { + parents: 0, + interior: X1(AccountKey20 { network: None, key: weth_contract_address.into() }), + }), + fun: Fungible(1000000000), + }; + let assets = vec![asset.clone()]; + + let inner_xcm = Xcm(vec![ + WithdrawAsset(MultiAssets::from(assets.clone())), + ClearOrigin, + BuyExecution { fees: asset, weight_limit: Unlimited }, + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: MultiLocation { + parents: 0, + interior: X1(AccountKey20 { + network: None, + key: destination_contract.into(), + }), + }, + }, + SetTopic([0; 32]), + ]); + + // prepare transfer token message + let xcm = Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + ExportMessage { + network: Ethereum { chain_id: 11155111 }, + destination: Here, + xcm: inner_xcm, + }, + ]); + + // execute XCM + let hash = xcm.using_encoded(sp_io::hashing::blake2_256); + let outcome = XcmExecutor::::execute_xcm( + assethub_parachain_location, + xcm, + hash, + RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Sibling), + ); + // check error is barrier + assert_err!(outcome.ensure_complete(), Barrier); + }); +} + +#[allow(clippy::too_many_arguments)] +pub fn send_transfer_token_message_failure( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + assethub_parachain_id: u32, + initial_amount: u128, + weth_contract_address: H160, + destination_address: H160, + fee_amount: u128, + expected_error: Error, +) where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + snowbridge_outbound_queue::Config + + snowbridge_system::Config, + XcmConfig: xcm_executor::Config, + ValidatorIdOf: From>, +{ + ExtBuilder::::default() + .with_collators(collator_session_key.collators()) + .with_session_keys(collator_session_key.session_keys()) + .with_para_id(runtime_para_id.into()) + .with_tracing() + .build() + .execute_with(|| { + EthereumSystem::initialize(runtime_para_id.into(), assethub_parachain_id.into()) + .unwrap(); + + // fund asset hub sovereign account enough so it can pay fees + initial_fund::(assethub_parachain_id, initial_amount); + + let outcome = send_transfer_token_message::( + assethub_parachain_id, + weth_contract_address, + destination_address, + fee_amount, + ); + // check err is NotHoldingFees + assert_err!(outcome.ensure_complete(), expected_error); + }); +} diff --git a/bridges/snowbridge/parachain/scripts/benchmark.sh b/bridges/snowbridge/parachain/scripts/benchmark.sh new file mode 100755 index 0000000000000000000000000000000000000000..c47649b2eebe213e45b2c2a18393dd0dbb85f45f --- /dev/null +++ b/bridges/snowbridge/parachain/scripts/benchmark.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# Example command for updating pallet benchmarking +pushd ../cumulus +cargo run --release --bin polkadot-parachain \ +--features runtime-benchmarks \ +-- \ +benchmark pallet \ +--chain=bridge-hub-rococo-dev \ +--pallet=snowbridge_ethereum_beacon_client \ +--extrinsic="*" \ +--execution=wasm --wasm-execution=compiled \ +--steps 50 --repeat 20 \ +--output ./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_ethereum_beacon_client.rs +popd diff --git a/bridges/snowbridge/parachain/scripts/hexliteral.sh b/bridges/snowbridge/parachain/scripts/hexliteral.sh new file mode 100755 index 0000000000000000000000000000000000000000..e34a2b9b5151a667b10354d987d89a187b807b1c --- /dev/null +++ b/bridges/snowbridge/parachain/scripts/hexliteral.sh @@ -0,0 +1,5 @@ +#!/bin/bash +# Creates a string constant from STDIN +echo "const DATA: &'static str = concat!(" +cat - | fold | sed 's/^.*/\t"&",/' +echo ");" \ No newline at end of file diff --git a/bridges/snowbridge/parachain/scripts/init.sh b/bridges/snowbridge/parachain/scripts/init.sh new file mode 100755 index 0000000000000000000000000000000000000000..1405a41ef333e6af863080d83f854d3edb5fb4fa --- /dev/null +++ b/bridges/snowbridge/parachain/scripts/init.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e + +echo "*** Initializing WASM build environment" + +if [ -z $CI_PROJECT_NAME ] ; then + rustup update nightly + rustup update stable +fi + +rustup target add wasm32-unknown-unknown --toolchain nightly diff --git a/bridges/snowbridge/parachain/scripts/make-build-config.sh b/bridges/snowbridge/parachain/scripts/make-build-config.sh new file mode 100755 index 0000000000000000000000000000000000000000..a1b116a5dd0c18e00213599918d7f2825f9cdf2f --- /dev/null +++ b/bridges/snowbridge/parachain/scripts/make-build-config.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +cd ../ethereum + +truffle exec scripts/dumpParachainConfig.js | sed '/^Using/d;/^$/d' diff --git a/bridges/snowbridge/parachain/scripts/verify-pallets-build.sh b/bridges/snowbridge/parachain/scripts/verify-pallets-build.sh new file mode 100755 index 0000000000000000000000000000000000000000..f060cf958b75800cc6c8e1e86940b4ffea188db5 --- /dev/null +++ b/bridges/snowbridge/parachain/scripts/verify-pallets-build.sh @@ -0,0 +1,113 @@ +#!/bin/bash + +# A script to remove everything from snowbridge repository/subtree, except: +# +# - parachain +# - readme +# - license + +set -eu + +# show CLI help +function show_help() { + set +x + echo " " + echo Error: $1 + echo "Usage:" + echo " ./scripts/verify-pallets-build.sh Exit with code 0 if pallets code is well decoupled from the other code in the repo" + echo "Options:" + echo " --no-revert Leaves only runtime code on exit" + echo " --ignore-git-state Ignores git actual state" + exit 1 +} + +# parse CLI args +NO_REVERT= +IGNORE_GIT_STATE= +for i in "$@" +do + case $i in + --no-revert) + NO_REVERT=true + shift + ;; + --ignore-git-state) + IGNORE_GIT_STATE=true + shift + ;; + *) + show_help "Unknown option: $i" + ;; + esac +done + +# the script is able to work only on clean git copy, unless we want to ignore this check +[[ ! -z "${IGNORE_GIT_STATE}" ]] || [[ -z "$(git status --porcelain)" ]] || { echo >&2 "The git copy must be clean"; exit 1; } + +# let's avoid any restrictions on where this script can be called for - snowbridge repo may be +# plugged into any other repo folder. So the script (and other stuff that needs to be removed) +# may be located either in call dir, or one of it subdirs. +SNOWBRIDGE_FOLDER="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )/../.." + +# remove everything we think is not required for our needs +rm -rf $SNOWBRIDGE_FOLDER/.cargo +rm -rf $SNOWBRIDGE_FOLDER/.github +rm -rf $SNOWBRIDGE_FOLDER/contracts +rm -rf $SNOWBRIDGE_FOLDER/codecov.yml +rm -rf $SNOWBRIDGE_FOLDER/docs +rm -rf $SNOWBRIDGE_FOLDER/hooks +rm -rf $SNOWBRIDGE_FOLDER/relayer +rm -rf $SNOWBRIDGE_FOLDER/smoketest +rm -rf $SNOWBRIDGE_FOLDER/web +rm -rf $SNOWBRIDGE_FOLDER/.envrc-example +rm -rf $SNOWBRIDGE_FOLDER/.gitbook.yaml +rm -rf $SNOWBRIDGE_FOLDER/.gitignore +rm -rf $SNOWBRIDGE_FOLDER/.gitmodules +rm -rf $SNOWBRIDGE_FOLDER/_typos.toml +rm -rf $SNOWBRIDGE_FOLDER/_codecov.yml +rm -rf $SNOWBRIDGE_FOLDER/flake.lock +rm -rf $SNOWBRIDGE_FOLDER/flake.nix +rm -rf $SNOWBRIDGE_FOLDER/go.work +rm -rf $SNOWBRIDGE_FOLDER/go.work.sum +rm -rf $SNOWBRIDGE_FOLDER/polkadot-sdk +rm -rf $SNOWBRIDGE_FOLDER/rust-toolchain.toml +rm -rf $SNOWBRIDGE_FOLDER/parachain/rustfmt.toml +rm -rf $SNOWBRIDGE_FOLDER/parachain/.gitignore +rm -rf $SNOWBRIDGE_FOLDER/parachain/templates +rm -rf $SNOWBRIDGE_FOLDER/parachain/.config +rm -rf $SNOWBRIDGE_FOLDER/parachain/pallets/ethereum-beacon-client/fuzz + +cd bridges/snowbridge/parachain + +# fix polkadot-sdk paths in Cargo.toml files +find "." -name 'Cargo.toml' | while read -r file; do + replace=$(printf '../../' ) + if [[ "$(uname)" = "Darwin" ]] || [[ "$(uname)" = *BSD ]]; then + sed -i '' "s|polkadot-sdk/|$replace|g" "$file" + else + sed -i "s|polkadot-sdk/|$replace|g" "$file" + fi +done + +# let's test if everything we need compiles +cargo check -p snowbridge-ethereum-beacon-client +cargo check -p snowbridge-ethereum-beacon-client --features runtime-benchmarks +cargo check -p snowbridge-ethereum-beacon-client --features try-runtime +cargo check -p snowbridge-inbound-queue +cargo check -p snowbridge-inbound-queue --features runtime-benchmarks +cargo check -p snowbridge-inbound-queue --features try-runtime +cargo check -p snowbridge-outbound-queue +cargo check -p snowbridge-outbound-queue --features runtime-benchmarks +cargo check -p snowbridge-outbound-queue --features try-runtime +cargo check -p snowbridge-system +cargo check -p snowbridge-system --features runtime-benchmarks +cargo check -p snowbridge-system --features try-runtime + +cd - + +# we're removing lock file after all checks are done. Otherwise we may use different +# Substrate/Polkadot/Cumulus commits and our checks will fail +rm -f $SNOWBRIDGE_FOLDER/parachain/Cargo.toml +rm -f $SNOWBRIDGE_FOLDER/parachain/Cargo.lock + +echo "OK" diff --git a/bridges/zombienet/README.md b/bridges/zombienet/README.md index 7f7de770814b1430dfdb1b8ea7261a7563cf2ca9..b601154b624ce69ed921ea6c2453d17c4d37b6c8 100644 --- a/bridges/zombienet/README.md +++ b/bridges/zombienet/README.md @@ -1,4 +1,4 @@ -# Bridges Tests for Local Rococo <> Wococo Bridge +# Bridges Tests for Local Rococo <> Westend Bridge This folder contains [zombienet](https://github.com/paritytech/zombienet/) based integration tests for both onchain and offchain bridges code. Due to some @@ -9,7 +9,7 @@ To start those tests, you need to: - download latest [zombienet release](https://github.com/paritytech/zombienet/releases); -- build Polkadot binary by running `cargo build -p polkadot --release` command in the +- build Polkadot binary by running `cargo build -p polkadot --release --features fast-runtime` command in the [`polkadot-sdk`](https://github.com/paritytech/polkadot-sdk) repository clone; - build Polkadot Parachain binary by running `cargo build -p polkadot-parachain-bin --release` command in the diff --git a/bridges/zombienet/helpers/native-assets-balance-increased.js b/bridges/zombienet/helpers/native-assets-balance-increased.js new file mode 100644 index 0000000000000000000000000000000000000000..9ee1a769e9f2807ed7b73ca9c6aa4b89d5c135f9 --- /dev/null +++ b/bridges/zombienet/helpers/native-assets-balance-increased.js @@ -0,0 +1,20 @@ +async function run(nodeName, networkInfo, args) { + const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + const accountAddress = args[0]; + const initialAccountData = await api.query.system.account(accountAddress); + const initialAccountBalance = initialAccountData.data['free']; + while (true) { + const accountData = await api.query.system.account(accountAddress); + const accountBalance = accountData.data['free']; + if (accountBalance > initialAccountBalance) { + return accountBalance; + } + + // else sleep and retry + await new Promise((resolve) => setTimeout(resolve, 12000)); + } +} + +module.exports = { run } diff --git a/bridges/zombienet/helpers/wait-hrmp-channel-opened.js b/bridges/zombienet/helpers/wait-hrmp-channel-opened.js new file mode 100644 index 0000000000000000000000000000000000000000..e700cab1d7481d77631e55492e4b0032f4382028 --- /dev/null +++ b/bridges/zombienet/helpers/wait-hrmp-channel-opened.js @@ -0,0 +1,22 @@ +async function run(nodeName, networkInfo, args) { + const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + const sibling = args[0]; + + while (true) { + const messagingStateAsObj = await api.query.parachainSystem.relevantMessagingState(); + const messagingState = api.createType("Option", messagingStateAsObj); + if (messagingState.isSome) { + const egressChannels = messagingState.unwrap().egressChannels; + if (egressChannels.find(x => x[0] == sibling)) { + return; + } + } + + // else sleep and retry + await new Promise((resolve) => setTimeout(resolve, 12000)); + } +} + +module.exports = { run } diff --git a/bridges/zombienet/run-tests.sh b/bridges/zombienet/run-tests.sh index 1fdbc6b8d6111a1338c5629670fe6f417a6d23bd..4f80e06650eed0b4c6bb28114432d3f8a87a46f9 100755 --- a/bridges/zombienet/run-tests.sh +++ b/bridges/zombienet/run-tests.sh @@ -11,11 +11,14 @@ export BRIDGE_TESTS_FOLDER=$POLKADOT_SDK_FOLDER/bridges/zombienet/tests export POLKADOT_BINARY_PATH=$POLKADOT_SDK_FOLDER/target/release/polkadot export POLKADOT_PARACHAIN_BINARY_PATH=$POLKADOT_SDK_FOLDER/target/release/polkadot-parachain export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO=$POLKADOT_PARACHAIN_BINARY_PATH -export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO=$POLKADOT_PARACHAIN_BINARY_PATH +export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND=$POLKADOT_PARACHAIN_BINARY_PATH export ZOMBIENET_BINARY_PATH=~/local_bridge_testing/bin/zombienet-linux +# check if `wait` supports -p flag +if [ `printf "$BASH_VERSION\n5.1" | sort -V | head -n 1` = "5.1" ]; then IS_BASH_5_1=1; else IS_BASH_5_1=0; fi + # bridge configuration -export LANE_ID="00000001" +export LANE_ID="00000002" # tests configuration ALL_TESTS_FOLDER=`mktemp -d` @@ -66,12 +69,19 @@ do # wait until all tests are completed relay_exited=0 for n in `seq 1 $TEST_COPROCS_COUNT`; do - wait -n -p COPROC_PID - exit_code=$? - coproc_name=${TEST_COPROCS[$COPROC_PID, 0]} - coproc_log=${TEST_COPROCS[$COPROC_PID, 1]} - coproc_stdout=$(cat $coproc_log) - relay_exited=$(expr "${coproc_name}" == "relay") + if [ "$IS_BASH_5_1" -eq 1 ]; then + wait -n -p COPROC_PID + exit_code=$? + coproc_name=${TEST_COPROCS[$COPROC_PID, 0]} + coproc_log=${TEST_COPROCS[$COPROC_PID, 1]} + coproc_stdout=$(cat $coproc_log) + relay_exited=$(expr "${coproc_name}" == "relay") + else + wait -n + exit_code=$? + coproc_name="" + coproc_stdout="" + fi echo "Process $coproc_name has finished with exit code: $exit_code" # if exit code is not zero, exit diff --git a/bridges/zombienet/scripts/invoke-script.sh b/bridges/zombienet/scripts/invoke-script.sh index cb21d61ab91db7030553f8879a7d64abe527057a..6a3754a8824017e18409cde031be9a09e9392a75 100755 --- a/bridges/zombienet/scripts/invoke-script.sh +++ b/bridges/zombienet/scripts/invoke-script.sh @@ -1,5 +1,5 @@ #!/bin/bash pushd $POLKADOT_SDK_FOLDER/cumulus/scripts -./bridges_rococo_wococo.sh $1 +./bridges_rococo_westend.sh $1 popd diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..a61f1e039f451f4a5cff99e049d0369d28cced38 --- /dev/null +++ b/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl @@ -0,0 +1,34 @@ +Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back +Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml +Creds: config + +# step 1: initialize Westend AH +asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-westend-local" within 240 seconds +asset-hub-westend-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1002" within 400 seconds + +# step 2: initialize Westend bridge hub +bridge-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-westend-local" within 120 seconds + +# step 3: relay is started elsewhere - let's wait until with-Rococo GRANPDA pallet is initialized at Westend +bridge-hub-westend-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds + +# step 4: send WND to //Alice on Rococo AH +# (that's a required part of a sibling 0001-asset-transfer-works-westend-to-rococo.zndsl test) +asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 60 seconds + +# step 5: elsewhere Rococo has sent ROC to //Alice - let's wait for it +asset-hub-westend-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 600 seconds + +# step 6: check that the relayer //Charlie is rewarded by both our AH and target AH +bridge-hub-westend-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726f,BridgedChain,0" within 300 seconds +bridge-hub-westend-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 300 seconds + +# step 7: send wROC back to Alice at Rococo AH +asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "withdraw-reserve-assets-from-asset-hub-westend-local" within 60 seconds + +# step 8: elsewhere Rococo has sent wWND to //Alice - let's wait for it +# (we wait until //Alice account increases here - there are no other transactionc that may increase it) +asset-hub-westend-collator1: js-script ../helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 600 seconds + +# wait until other network test has completed OR exit with an error too +asset-hub-westend-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-wococo.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-wococo.zndsl deleted file mode 100644 index a1af2625c1ca569ce2e05310ead3cd14da2a9952..0000000000000000000000000000000000000000 --- a/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-wococo.zndsl +++ /dev/null @@ -1,25 +0,0 @@ -Description: User is able to transfer ROC from Rococo Asset Hub to Wococo Asset Hub -Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml -Creds: config - -# step 1: initialize Wococo asset hub -asset-hub-wococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-wococo-local" within 120 seconds - -# step 2: initialize Wococo bridge hub -bridge-hub-wococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-wococo-local" within 120 seconds - -# step 3: relay is started elsewhere - let's wait until with-Rococo GRANPDA pallet is initialized at Wococo -bridge-hub-wococo-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds - -# step 2: send WOC to Rococo -asset-hub-wococo-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-wococo-local" within 60 seconds - -# step 3: elsewhere Rococo has sent ROC to //Alice - let's wait for it -asset-hub-wococo-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 600 seconds - -# step 4: check that the relayer //Charlie is rewarded by both our AH and target AH -bridge-hub-wococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000001,0x6268726F,BridgedChain,0" within 300 seconds -bridge-hub-wococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000001,0x6268726F,ThisChain,0" within 300 seconds - -# wait until other network test has completed OR exit with an error too -asset-hub-wococo-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..2da5b7a772a7e5dfd61610ee1e02f5227994fdd3 --- /dev/null +++ b/bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl @@ -0,0 +1,34 @@ +Description: User is able to transfer WND from Westend Asset Hub to Rococo Asset Hub and back +Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml +Creds: config + +# step 1: initialize Rococo AH +asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-rococo-local" within 240 seconds +asset-hub-rococo-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1013" within 400 seconds + +# step 2: initialize Rococo bridge hub +bridge-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-rococo-local" within 120 seconds + +# step 3: relay is started elsewhere - let's wait until with-Westend GRANPDA pallet is initialized at Rococo +bridge-hub-rococo-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds + +# step 4: send ROC to //Alice on Westend AH +# (that's a required part of a sibling 0001-asset-transfer-works-rococo-to-westend.zndsl test) +asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 60 seconds + +# step 5: elsewhere Westend has sent WND to //Alice - let's wait for it +asset-hub-rococo-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Westend" within 600 seconds + +# step 6: check that the relayer //Charlie is rewarded by both our AH and target AH +bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,BridgedChain,0" within 300 seconds +bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 300 seconds + +# step 7: send wWND back to Alice at Westend AH +asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local" within 60 seconds + +# step 8: elsewhere Westend has sent wROC to //Alice - let's wait for it +# (we wait until //Alice account increases here - there are no other transactionc that may increase it) +asset-hub-rococo-collator1: js-script ../helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 600 seconds + +# wait until other network test has completed OR exit with an error too +asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-wococo-to-rococo.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-wococo-to-rococo.zndsl deleted file mode 100644 index ad2446d58ce743d063246e42ea2e378e753a78bc..0000000000000000000000000000000000000000 --- a/bridges/zombienet/tests/0001-asset-transfer-works-wococo-to-rococo.zndsl +++ /dev/null @@ -1,25 +0,0 @@ -Description: User is able to transfer WOC from Wococo Asset Hub to Rococo Asset Hub -Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml -Creds: config - -# step 1: initialize Rococo asset hub -asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-rococo-local" within 120 seconds - -# step 2: initialize Rococo bridge hub -bridge-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-rococo-local" within 120 seconds - -# step 3: relay is started elsewhere - let's wait until with-Wococo GRANPDA pallet is initialized at Rococo -bridge-hub-rococo-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Wococo,0" within 400 seconds - -# step 4: send ROC to Wococo -asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 60 seconds - -# step 5: elsewhere Wococo has sent WOC to //Alice - let's wait for it -asset-hub-rococo-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Wococo" within 600 seconds - -# step 6: check that the relayer //Charlie is rewarded by both our AH and target AH -bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000001,0x6268776F,BridgedChain,0" within 300 seconds -bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000001,0x6268776F,ThisChain,0" within 300 seconds - -# wait until other network test has completed OR exit with an error too -asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/bridges/zombienet/tests/0001-start-relay.sh b/bridges/zombienet/tests/0001-start-relay.sh old mode 100644 new mode 100755 index fc231fba89595112e84353db36086cf426fd08c3..7be2cf4d5938797b98b86e8abf08ae43a5cee449 --- a/bridges/zombienet/tests/0001-start-relay.sh +++ b/bridges/zombienet/tests/0001-start-relay.sh @@ -1,5 +1,5 @@ #!/bin/bash pushd $POLKADOT_SDK_FOLDER/cumulus/scripts -./bridges_rococo_wococo.sh run-relay +./bridges_rococo_westend.sh run-relay popd diff --git a/cumulus/README.md b/cumulus/README.md index 19f9f3f113dd06655c418a4caf6b30a9cad6ee9f..7e145ad7b4abaf1aea3ecaf18236d231d2599b5d 100644 --- a/cumulus/README.md +++ b/cumulus/README.md @@ -4,7 +4,7 @@ This repository contains both the Cumulus SDK and also specific chains implemented on top of this SDK. -If you only want to run a **Polkadot Parachain Node**, check out our [container section](./docs/container.md). +If you only want to run a **Polkadot Parachain Node**, check out our [container section](./docs/contributor/container.md). ## Cumulus SDK @@ -34,7 +34,7 @@ A Polkadot [collator](https://wiki.polkadot.network/docs/en/learn-collator) for `polkadot-parachain` binary (previously called `polkadot-collator`). You may run `polkadot-parachain` locally after building it or using one of the container option described -[here](./docs/container.md). +[here](./docs/contributor/container.md). ### Relay Chain Interaction To operate a parachain node, a connection to the corresponding relay chain is necessary. This can be achieved in one of @@ -142,8 +142,8 @@ zombienet --provider native spawn ./zombienet/examples/small_network.toml # Clone git clone https://github.com/paritytech/polkadot-sdk -# Compile Polkadot -cargo build --release --bin polkadot +# Compile Polkadot's required binaries +cargo build --release -p polkadot # Generate a raw chain spec ./target/release/polkadot build-spec --chain rococo-local --disable-default-bootnode --raw > rococo-local-cfde.json @@ -158,11 +158,8 @@ cargo build --release --bin polkadot #### Launch the Parachain ```bash -# Clone -git clone https://github.com/paritytech/polkadot-sdk - # Compile -cargo build --release --bin polkadot-parachain +cargo build --release -p polkadot-parachain-bin # Export genesis state ./target/release/polkadot-parachain export-genesis-state > genesis-state @@ -172,15 +169,15 @@ cargo build --release --bin polkadot-parachain # Collator1 ./target/release/polkadot-parachain --collator --alice --force-authoring \ - --tmp --port 40335 --rpc-port 9946 -- --chain ../polkadot/rococo-local-cfde.json --port 30335 + --tmp --port 40335 --rpc-port 9946 -- --chain rococo-local-cfde.json --port 30335 # Collator2 ./target/release/polkadot-parachain --collator --bob --force-authoring \ - --tmp --port 40336 --rpc-port 9947 -- --chain ../polkadot/rococo-local-cfde.json --port 30336 + --tmp --port 40336 --rpc-port 9947 -- --chain rococo-local-cfde.json --port 30336 # Parachain Full Node 1 ./target/release/polkadot-parachain --tmp --port 40337 --rpc-port 9948 -- \ - --chain ../polkadot/rococo-local-cfde.json --port 30337 + --chain rococo-local-cfde.json --port 30337 ``` #### Register the parachain @@ -245,7 +242,7 @@ Once the executable is built, launch collators for each parachain (repeat once e ./target/release/polkadot-parachain --chain $CHAIN --validator ``` -You can also build [using a container](./docs/container.md). +You can also build [using a container](./docs/contributor/container.md). ### Parachains diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 0f942feb59524d6557d71d58eb617ecf6b3fb52b..e57e7a44a568a3558b362237b04e33acd78cc95d 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -6,8 +6,11 @@ edition.workspace = true description = "Parachain node CLI utilities." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } url = "2.4.0" @@ -18,3 +21,4 @@ sc-chain-spec = { path = "../../../substrate/client/chain-spec" } sc-service = { path = "../../../substrate/client/service" } sp-core = { path = "../../../substrate/primitives/core" } sp-runtime = { path = "../../../substrate/primitives/runtime" } +sp-blockchain = { path = "../../../substrate/primitives/blockchain" } diff --git a/cumulus/client/cli/src/lib.rs b/cumulus/client/cli/src/lib.rs index 1b18ed064373fdd58224b70da022a969ad641eb6..1cebecb004312f553cb49e910841c5f8f2f310b2 100644 --- a/cumulus/client/cli/src/lib.rs +++ b/cumulus/client/cli/src/lib.rs @@ -23,20 +23,18 @@ use std::{ io::{self, Write}, net::SocketAddr, path::PathBuf, + sync::Arc, }; use codec::Encode; use sc_chain_spec::ChainSpec; -use sc_client_api::ExecutorProvider; +use sc_client_api::HeaderBackend; use sc_service::{ config::{PrometheusConfig, TelemetryEndpoints}, BasePath, TransactionPoolOptions, }; use sp_core::hexdisplay::HexDisplay; -use sp_runtime::{ - traits::{Block as BlockT, Hash as HashT, Header as HeaderT, Zero}, - StateVersion, -}; +use sp_runtime::traits::{Block as BlockT, Zero}; use url::Url; /// The `purge-chain` command used to remove the whole chain: the parachain and the relay chain. @@ -129,9 +127,9 @@ impl sc_cli::CliConfiguration for PurgeChainCmd { } } -/// Command for exporting the genesis state of the parachain +/// Command for exporting the genesis head data of the parachain #[derive(Debug, clap::Parser)] -pub struct ExportGenesisStateCommand { +pub struct ExportGenesisHeadCommand { /// Output file name or stdout if unspecified. #[arg()] pub output: Option, @@ -145,24 +143,29 @@ pub struct ExportGenesisStateCommand { pub shared_params: sc_cli::SharedParams, } -impl ExportGenesisStateCommand { - /// Run the export-genesis-state command - pub fn run( - &self, - chain_spec: &dyn ChainSpec, - client: &impl ExecutorProvider, - ) -> sc_cli::Result<()> { - let state_version = sc_chain_spec::resolve_state_version_from_wasm( - &chain_spec.build_storage()?, - client.executor(), - )?; - - let block: Block = generate_genesis_block(chain_spec, state_version)?; - let raw_header = block.header().encode(); +impl ExportGenesisHeadCommand { + /// Run the export-genesis-head command + pub fn run(&self, client: Arc) -> sc_cli::Result<()> + where + B: BlockT, + C: HeaderBackend + 'static, + { + let genesis_hash = client.hash(Zero::zero())?.ok_or(sc_cli::Error::Client( + sp_blockchain::Error::Backend( + "Failed to lookup genesis block hash when exporting genesis head data.".into(), + ), + ))?; + let genesis_header = client.header(genesis_hash)?.ok_or(sc_cli::Error::Client( + sp_blockchain::Error::Backend( + "Failed to lookup genesis header by hash when exporting genesis head data.".into(), + ), + ))?; + + let raw_header = genesis_header.encode(); let output_buf = if self.raw { raw_header } else { - format!("0x{:?}", HexDisplay::from(&block.header().encode())).into_bytes() + format!("0x{:?}", HexDisplay::from(&genesis_header.encode())).into_bytes() }; if let Some(output) = &self.output { @@ -175,46 +178,17 @@ impl ExportGenesisStateCommand { } } -/// Generate the genesis block from a given ChainSpec. -pub fn generate_genesis_block( - chain_spec: &dyn ChainSpec, - genesis_state_version: StateVersion, -) -> Result { - let storage = chain_spec.build_storage()?; - - let child_roots = storage.children_default.iter().map(|(sk, child_content)| { - let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - child_content.data.clone().into_iter().collect(), - genesis_state_version, - ); - (sk.clone(), state_root.encode()) - }); - let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - storage.top.clone().into_iter().chain(child_roots).collect(), - genesis_state_version, - ); - - let extrinsics_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - Vec::new(), - genesis_state_version, - ); - - Ok(Block::new( - <::Header as HeaderT>::new( - Zero::zero(), - extrinsics_root, - state_root, - Default::default(), - Default::default(), - ), - Default::default(), - )) -} - -impl sc_cli::CliConfiguration for ExportGenesisStateCommand { +impl sc_cli::CliConfiguration for ExportGenesisHeadCommand { fn shared_params(&self) -> &sc_cli::SharedParams { &self.shared_params } + + fn base_path(&self) -> sc_cli::Result> { + // As we are just exporting the genesis wasm a tmp database is enough. + // + // As otherwise we may "pollute" the global base path. + Ok(Some(BasePath::new_temp_dir()?)) + } } /// Command for exporting the genesis wasm file. @@ -266,6 +240,13 @@ impl sc_cli::CliConfiguration for ExportGenesisWasmCommand { fn shared_params(&self) -> &sc_cli::SharedParams { &self.shared_params } + + fn base_path(&self) -> sc_cli::Result> { + // As we are just exporting the genesis wasm a tmp database is enough. + // + // As otherwise we may "pollute" the global base path. + Ok(Some(BasePath::new_temp_dir()?)) + } } fn validate_relay_chain_url(arg: &str) -> Result { @@ -296,7 +277,14 @@ pub struct RunCmd { #[arg(long, conflicts_with = "validator")] pub collator: bool, - /// EXPERIMENTAL: Specify an URL to a relay chain full node to communicate with. + /// Creates a less resource-hungry node that retrieves relay chain data from an RPC endpoint. + /// + /// The provided URLs should point to RPC endpoints of the relay chain. + /// This node connects to the remote nodes following the order they were specified in. If the + /// connection fails, it attempts to connect to the next endpoint in the list. + /// + /// Note: This option doesn't stop the node from connecting to the relay chain network but + /// reduces bandwidth use. #[arg( long, value_parser = validate_relay_chain_url, diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index ad9f01ed08395cb52830495718d80ee920bbc32f..5aa260eae1b4c34c158194bbcb299b95448e7a76 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -6,9 +6,12 @@ edition.workspace = true description = "Common node-side functionality and glue code to collate parachain blocks." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] parking_lot = "0.12.1" -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.21" tracing = "0.1.25" @@ -31,7 +34,7 @@ cumulus-client-network = { path = "../network" } cumulus-primitives-core = { path = "../../primitives/core" } [dev-dependencies] -async-trait = "0.1.73" +async-trait = "0.1.74" # Substrate sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index f440270c9822d1c5dc9a3c99c25687efc44ef034..4c20911c645d4e2039b7bb3ed42b1a2572712c18 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -6,9 +6,12 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] -async-trait = "0.1.73" -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } +async-trait = "0.1.74" +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.28" tracing = "0.1.37" schnellru = "0.2.1" diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 9dfd14b1cf5e58c593f7590ea70f47bc46151fd9..e7fc7a88640e2c1f9576817549fd7cebff62fb9e 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -6,10 +6,13 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] -async-trait = "0.1.73" -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } -dyn-clone = "1.0.12" +async-trait = "0.1.74" +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +dyn-clone = "1.0.16" futures = "0.3.28" log = "0.4.20" tracing = "0.1.37" diff --git a/cumulus/client/consensus/common/src/lib.rs b/cumulus/client/consensus/common/src/lib.rs index 08bceabb2bd4a49ac8917e82caf1b97cd9eac183..cebe34e7ea58828372a9261e3be94866e119546a 100644 --- a/cumulus/client/consensus/common/src/lib.rs +++ b/cumulus/client/consensus/common/src/lib.rs @@ -111,12 +111,15 @@ impl ParachainConsensus for Box + Send + /// Parachain specific block import. /// -/// This is used to set `block_import_params.fork_choice` to `false` as long as the block origin is -/// not `NetworkInitialSync`. The best block for parachains is determined by the relay chain. -/// Meaning we will update the best block, as it is included by the relay-chain. +/// Specialized block import for parachains. It supports to delay setting the best block until the +/// relay chain has included a candidate in its best block. By default the delayed best block +/// setting is disabled. The block import also monitors the imported blocks and prunes by default if +/// there are too many blocks at the same height. Too many blocks at the same height can for example +/// happen if the relay chain is rejecting the parachain blocks in the validation. pub struct ParachainBlockImport { inner: BI, monitor: Option>>, + delayed_best_block: bool, } impl> ParachainBlockImport { @@ -141,13 +144,27 @@ impl> ParachainBlockImport let monitor = level_limit.map(|level_limit| SharedData::new(LevelMonitor::new(level_limit, backend))); - Self { inner, monitor } + Self { inner, monitor, delayed_best_block: false } + } + + /// Create a new instance which delays setting the best block. + /// + /// The number of leaves per level limit is set to `LevelLimit::Default`. + pub fn new_with_delayed_best_block(inner: BI, backend: Arc) -> Self { + Self { + delayed_best_block: true, + ..Self::new_with_limit(inner, backend, LevelLimit::Default) + } } } impl Clone for ParachainBlockImport { fn clone(&self) -> Self { - ParachainBlockImport { inner: self.inner.clone(), monitor: self.monitor.clone() } + ParachainBlockImport { + inner: self.inner.clone(), + monitor: self.monitor.clone(), + delayed_best_block: self.delayed_best_block, + } } } @@ -182,11 +199,13 @@ where params.finalized = true; } - // Best block is determined by the relay chain, or if we are doing the initial sync - // we import all blocks as new best. - params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom( - params.origin == sp_consensus::BlockOrigin::NetworkInitialSync, - )); + if self.delayed_best_block { + // Best block is determined by the relay chain, or if we are doing the initial sync + // we import all blocks as new best. + params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom( + params.origin == sp_consensus::BlockOrigin::NetworkInitialSync, + )); + } let maybe_lock = self.monitor.as_ref().map(|monitor_lock| { let mut monitor = monitor_lock.shared_data_locked(); diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index 9658a0add790da01ad9fe83f11956d1fbd88523a..597d1ab2acc2cff42d3230898c1129a7ba63b6f3 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -1124,7 +1124,8 @@ fn find_potential_parents_aligned_with_pending() { let backend = Arc::new(Backend::new_test(1000, 1)); let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); - let mut para_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let mut para_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); let relay_parent = relay_hash_from_block_num(10); // Choose different relay parent for alternative chain to get new hashes. @@ -1279,7 +1280,8 @@ fn find_potential_parents_aligned_no_pending() { let backend = Arc::new(Backend::new_test(1000, 1)); let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); - let mut para_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let mut para_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); let relay_parent = relay_hash_from_block_num(10); // Choose different relay parent for alternative chain to get new hashes. diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index 4cfba66cec371835f36afefc84eb8abbc50c1a91..107f466ca1049623c14c009f060deb010c2d8ac5 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -6,9 +6,12 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] anyhow = "1.0" -async-trait = "0.1.73" +async-trait = "0.1.74" thiserror = "1.0.48" # Substrate diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index de280e6e9a890fd7e4680b3ab8a1111d6a02b717..d7702809779db34e0cf47f953e45475e37b674c4 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -6,8 +6,11 @@ authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] -async-trait = "0.1.73" +async-trait = "0.1.74" futures = "0.3.28" parking_lot = "0.12.1" tracing = "0.1.37" diff --git a/cumulus/client/consensus/relay-chain/src/import_queue.rs b/cumulus/client/consensus/relay-chain/src/import_queue.rs index 9ee03b95904c60fb3b26f60e54023c13c88a2808..f44f440932437ff0cf658f7644bbf2e23670f8cd 100644 --- a/cumulus/client/consensus/relay-chain/src/import_queue.rs +++ b/cumulus/client/consensus/relay-chain/src/import_queue.rs @@ -55,6 +55,10 @@ where &mut self, mut block_params: BlockImportParams, ) -> Result, String> { + block_params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom( + block_params.origin == sp_consensus::BlockOrigin::NetworkInitialSync, + )); + // Skip checks that include execution, if being told so, or when importing only state. // // This is done for example when gap syncing and it is expected that the block after the gap @@ -100,7 +104,6 @@ where } block_params.post_hash = Some(block_params.header.hash()); - Ok(block_params) } } diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index 08956f9f6c641f7cf8ee4f2b21d5846fb8611c49..edd349155fa6b2f6659f445683c7b3605ac14386 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -6,9 +6,12 @@ description = "Cumulus-specific networking protocol" edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] -async-trait = "0.1.73" -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } +async-trait = "0.1.74" +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.28" futures-timer = "3.0.2" parking_lot = "0.12.1" diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index e407b33e0e2e73aa5f0b266c1d40516acfaa8d9c..ad55e0e9c4b846306abf3101c89fc4d4c35e2d7a 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -6,8 +6,11 @@ description = "Cumulus-specific networking protocol" edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.28" futures-timer = "3.0.2" rand = "0.8.5" @@ -29,7 +32,7 @@ polkadot-primitives = { path = "../../../polkadot/primitives" } # Cumulus cumulus-primitives-core = { path = "../../primitives/core" } cumulus-relay-chain-interface = { path = "../relay-chain-interface" } -async-trait = "0.1.73" +async-trait = "0.1.74" [dev-dependencies] tokio = { version = "1.32.0", features = ["macros"] } diff --git a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs index 322b19c796a8c10191f3bfb261b293aba4348944..2c635320ff4ae6f68f33bb9da5ca545098851f65 100644 --- a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs +++ b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs @@ -16,12 +16,12 @@ use sp_runtime::traits::Block as BlockT; -use polkadot_node_primitives::AvailableData; +use polkadot_node_primitives::PoV; use polkadot_node_subsystem::messages::AvailabilityRecoveryMessage; use futures::{channel::oneshot, stream::FuturesUnordered, Future, FutureExt, StreamExt}; -use std::{collections::HashSet, pin::Pin}; +use std::{collections::HashSet, pin::Pin, sync::Arc}; use crate::RecoveryHandle; @@ -30,9 +30,8 @@ use crate::RecoveryHandle; /// This handles the candidate recovery and tracks the activate recoveries. pub(crate) struct ActiveCandidateRecovery { /// The recoveries that are currently being executed. - recoveries: FuturesUnordered< - Pin)> + Send>>, - >, + recoveries: + FuturesUnordered>)> + Send>>>, /// The block hashes of the candidates currently being recovered. candidates: HashSet, recovery_handle: Box, @@ -68,7 +67,7 @@ impl ActiveCandidateRecovery { self.recoveries.push( async move { match rx.await { - Ok(Ok(res)) => (block_hash, Some(res)), + Ok(Ok(res)) => (block_hash, Some(res.pov)), Ok(Err(error)) => { tracing::debug!( target: crate::LOG_TARGET, @@ -93,8 +92,8 @@ impl ActiveCandidateRecovery { /// Waits for the next recovery. /// - /// If the returned [`AvailableData`] is `None`, it means that the recovery failed. - pub async fn wait_for_recovery(&mut self) -> (Block::Hash, Option) { + /// If the returned [`PoV`] is `None`, it means that the recovery failed. + pub async fn wait_for_recovery(&mut self) -> (Block::Hash, Option>) { loop { if let Some(res) = self.recoveries.next().await { self.candidates.remove(&res.0); diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index b050bc66799c7aa84db67374dc801890e0e7ce25..32aba6c8993a6da67cd3adeb394e87e6c067b46c 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -51,7 +51,7 @@ use sc_consensus::import_queue::{ImportQueueService, IncomingBlock}; use sp_consensus::{BlockOrigin, BlockStatus, SyncOracle}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use polkadot_node_primitives::{AvailableData, POV_BOMB_LIMIT}; +use polkadot_node_primitives::{PoV, POV_BOMB_LIMIT}; use polkadot_node_subsystem::messages::AvailabilityRecoveryMessage; use polkadot_overseer::Handle as OverseerHandle; use polkadot_primitives::{ @@ -346,15 +346,11 @@ where } /// Handle a recovered candidate. - async fn handle_candidate_recovered( - &mut self, - block_hash: Block::Hash, - available_data: Option, - ) { - let available_data = match available_data { - Some(data) => { + async fn handle_candidate_recovered(&mut self, block_hash: Block::Hash, pov: Option<&PoV>) { + let pov = match pov { + Some(pov) => { self.candidates_in_retry.remove(&block_hash); - data + pov }, None => if self.candidates_in_retry.insert(block_hash) { @@ -373,18 +369,16 @@ where }, }; - let raw_block_data = match sp_maybe_compressed_blob::decompress( - &available_data.pov.block_data.0, - POV_BOMB_LIMIT, - ) { - Ok(r) => r, - Err(error) => { - tracing::debug!(target: LOG_TARGET, ?error, "Failed to decompress PoV"); + let raw_block_data = + match sp_maybe_compressed_blob::decompress(&pov.block_data.0, POV_BOMB_LIMIT) { + Ok(r) => r, + Err(error) => { + tracing::debug!(target: LOG_TARGET, ?error, "Failed to decompress PoV"); - self.reset_candidate(block_hash); - return - }, - }; + self.reset_candidate(block_hash); + return + }, + }; let block_data = match ParachainBlockData::::decode(&mut &raw_block_data[..]) { Ok(d) => d, @@ -416,6 +410,7 @@ where ?block_hash, parent_hash = ?parent, parent_scheduled_for_recovery, + waiting_blocks = self.waiting_for_parent.len(), "Waiting for recovery of parent.", ); @@ -448,13 +443,13 @@ where _ => (), } - self.import_block(block).await; + self.import_block(block); } /// Import the given `block`. /// /// This will also recursivley drain `waiting_for_parent` and import them as well. - async fn import_block(&mut self, block: Block) { + fn import_block(&mut self, block: Block) { let mut blocks = VecDeque::new(); tracing::debug!(target: LOG_TARGET, block_hash = ?block.hash(), "Importing block retrieved using pov_recovery"); @@ -557,7 +552,6 @@ where }; futures::pin_mut!(pending_candidates); - loop { select! { pending_candidate = pending_candidates.next() => { @@ -579,6 +573,17 @@ where imported = imported_blocks.next() => { if let Some(imported) = imported { self.clear_waiting_recovery(&imported.hash); + + // We need to double check that no blocks are waiting for this block. + // Can happen when a waiting child block is queued to wait for parent while the parent block is still + // in the import queue. + if let Some(waiting_blocks) = self.waiting_for_parent.remove(&imported.hash) { + for block in waiting_blocks { + tracing::debug!(target: LOG_TARGET, block_hash = ?block.hash(), resolved_parent = ?imported.hash, "Found new waiting child block during import, queuing."); + self.import_block(block); + } + }; + } else { tracing::debug!(target: LOG_TARGET, "Imported blocks stream ended"); return; @@ -595,10 +600,10 @@ where next_to_recover = self.candidate_recovery_queue.next_recovery().fuse() => { self.recover_candidate(next_to_recover).await; }, - (block_hash, available_data) = + (block_hash, pov) = self.active_candidate_recovery.wait_for_recovery().fuse() => { - self.handle_candidate_recovered(block_hash, available_data).await; + self.handle_candidate_recovered(block_hash, pov.as_deref()).await; }, } } diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index 87f0eabd9b5cdc4f13ecb3e39acff305982bee6b..63f4c915474363467205c5d9b49226f56fb30f27 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -6,8 +6,11 @@ edition.workspace = true description = "Implementation of the RelayChainInterface trait for Polkadot full-nodes." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] -async-trait = "0.1.73" +async-trait = "0.1.74" futures = "0.3.28" futures-timer = "3.0.2" @@ -39,7 +42,7 @@ sp-keyring = { path = "../../../substrate/primitives/keyring" } # Polkadot polkadot-primitives = { path = "../../../polkadot/primitives" } polkadot-test-client = { path = "../../../polkadot/node/test/client" } -metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features=["futures_channel"] } +metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features = ["futures_channel"] } # Cumulus cumulus-test-service = { path = "../../test/service" } diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 42a56b649f035bdf10aa18099fe3851a3b5d238e..d384c9d9bd22028e835c80870306b2b0bf88121b 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -283,14 +283,15 @@ fn build_polkadot_full_node( config, polkadot_service::NewFullParams { is_parachain_node, - grandpa_pause: None, // Disable BEEFY. It should not be required by the internal relay chain node. enable_beefy: false, + force_authoring_backoff: false, jaeger_agent: None, telemetry_worker_handle, // Cumulus doesn't spawn PVF workers, so we can disable version checks. node_version: None, + secure_validator_mode: false, workers_path: None, workers_names: None, diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index c9d50afe8fa9d225ae9cd07396b065f0af0fda3a..5100119a2e49f41e7822f9810589b3813f484328 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true description = "Common interface for different relay chain datasources." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] polkadot-overseer = { path = "../../../polkadot/node/overseer" } @@ -17,7 +20,7 @@ sp-state-machine = { path = "../../../substrate/primitives/state-machine" } sc-client-api = { path = "../../../substrate/client/api" } futures = "0.3.28" -async-trait = "0.1.73" +async-trait = "0.1.74" thiserror = "1.0.48" jsonrpsee-core = "0.16.2" parity-scale-codec = "3.6.4" diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 53173fb4118944cf4dddf0cbaac8d626b51dcd25..45b958998bd8447416d47c56b1aa1d9ef07b90a7 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true description = "Minimal node implementation to be used in tandem with RPC or light-client mode." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] # polkadot deps polkadot-primitives = { path = "../../../polkadot/primitives" } @@ -19,6 +22,7 @@ polkadot-collator-protocol = { path = "../../../polkadot/node/network/collator-p polkadot-network-bridge = { path = "../../../polkadot/node/network/bridge" } polkadot-node-collation-generation = { path = "../../../polkadot/node/collation-generation" } polkadot-node-core-runtime-api = { path = "../../../polkadot/node/core/runtime-api" } +polkadot-node-core-chain-api = { path = "../../../polkadot/node/core/chain-api" } polkadot-node-core-prospective-parachains = { path = "../../../polkadot/node/core/prospective-parachains" } # substrate deps @@ -26,6 +30,7 @@ sc-authority-discovery = { path = "../../../substrate/client/authority-discovery sc-network = { path = "../../../substrate/client/network" } sc-network-common = { path = "../../../substrate/client/network/common" } sc-service = { path = "../../../substrate/client/service" } +sc-client-api = { path = "../../../substrate/client/api" } substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } sc-tracing = { path = "../../../substrate/client/tracing" } sc-utils = { path = "../../../substrate/client/utils" } @@ -33,6 +38,8 @@ sp-api = { path = "../../../substrate/primitives/api" } sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } sp-consensus = { path = "../../../substrate/primitives/consensus/common" } sp-runtime = { path = "../../../substrate/primitives/runtime" } +sp-blockchain = { path = "../../../substrate/primitives/blockchain" } +tokio = { version = "1.32.0", features = ["macros"] } # cumulus deps cumulus-relay-chain-interface = { path = "../relay-chain-interface" } @@ -41,6 +48,6 @@ cumulus-primitives-core = { path = "../../primitives/core" } array-bytes = "6.1" tracing = "0.1.37" -async-trait = "0.1.73" +async-trait = "0.1.74" futures = "0.3.28" - +parking_lot = "0.12.1" diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index 1e78df7115435449e6beb89224f0af32f3b3b988..ab56b62c4ca59b3058e9724032e2e09264a45640 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -20,13 +20,17 @@ use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use cumulus_relay_chain_rpc_interface::RelayChainRpcClient; use futures::{Stream, StreamExt}; use polkadot_core_primitives::{Block, BlockNumber, Hash, Header}; -use polkadot_overseer::RuntimeApiSubsystemClient; +use polkadot_overseer::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_primitives::{ async_backing::{AsyncBackingParams, BackingState}, slashing, + vstaging::{ApprovalVotingParams, NodeFeatures}, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; +use sc_client_api::AuxStore; use sp_api::{ApiError, RuntimeApiInfo}; +use sp_blockchain::Info; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; #[derive(Clone)] pub struct BlockChainRpcClient { @@ -53,6 +57,64 @@ impl BlockChainRpcClient { } } +#[async_trait::async_trait] +impl ChainApiBackend for BlockChainRpcClient { + async fn header( + &self, + hash: ::Hash, + ) -> sp_blockchain::Result::Header>> { + Ok(self.rpc_client.chain_get_header(Some(hash)).await?) + } + + async fn info(&self) -> sp_blockchain::Result> { + let (best_header_opt, genesis_hash, finalized_head) = futures::try_join!( + self.rpc_client.chain_get_header(None), + self.rpc_client.chain_get_head(Some(0)), + self.rpc_client.chain_get_finalized_head() + )?; + let best_header = best_header_opt.ok_or_else(|| { + RelayChainError::GenericError( + "Unable to retrieve best header from relay chain.".to_string(), + ) + })?; + + let finalized_header = + self.rpc_client.chain_get_header(Some(finalized_head)).await?.ok_or_else(|| { + RelayChainError::GenericError( + "Unable to retrieve finalized header from relay chain.".to_string(), + ) + })?; + Ok(Info { + best_hash: best_header.hash(), + best_number: best_header.number, + genesis_hash, + finalized_hash: finalized_head, + finalized_number: finalized_header.number, + finalized_state: Some((finalized_header.hash(), finalized_header.number)), + number_leaves: 1, + block_gap: None, + }) + } + + async fn number( + &self, + hash: ::Hash, + ) -> sp_blockchain::Result::Header as HeaderT>::Number>> { + Ok(self + .rpc_client + .chain_get_header(Some(hash)) + .await? + .map(|maybe_header| maybe_header.number)) + } + + async fn hash( + &self, + number: NumberFor, + ) -> sp_blockchain::Result::Hash>> { + Ok(self.rpc_client.chain_get_block_hash(number.into()).await?) + } +} + #[async_trait::async_trait] impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn validators( @@ -364,6 +426,22 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { ) -> Result, ApiError> { Ok(self.rpc_client.parachain_host_para_backing_state(at, para_id).await?) } + + /// Approval voting configuration parameters + async fn approval_voting_params( + &self, + at: Hash, + session_index: polkadot_primitives::SessionIndex, + ) -> Result { + Ok(self + .rpc_client + .parachain_host_staging_approval_voting_params(at, session_index) + .await?) + } + + async fn node_features(&self, at: Hash) -> Result { + Ok(self.rpc_client.parachain_host_node_features(at).await?) + } } #[async_trait::async_trait] @@ -398,3 +476,25 @@ impl BlockChainRpcClient { Ok(self.rpc_client.get_finalized_heads_stream()?.boxed()) } } + +// Implementation required by ChainApiSubsystem +// but never called in our case. +impl AuxStore for BlockChainRpcClient { + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + _insert: I, + _delete: D, + ) -> sp_blockchain::Result<()> { + unimplemented!("Not supported on the RPC collator") + } + + fn get_aux(&self, _key: &[u8]) -> sp_blockchain::Result>> { + unimplemented!("Not supported on the RPC collator") + } +} diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index 379217e4a638e625d5b3a70400ae28792a1a7318..5f5bf338ef9907756adb1eab3f0541e870677fe5 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -15,7 +15,8 @@ // along with Polkadot. If not, see . use futures::{select, StreamExt}; -use std::sync::Arc; +use parking_lot::Mutex; +use std::{collections::HashMap, sync::Arc}; use polkadot_availability_recovery::AvailabilityRecoverySubsystem; use polkadot_collator_protocol::{CollatorProtocolSubsystem, ProtocolSide}; @@ -24,10 +25,11 @@ use polkadot_network_bridge::{ NetworkBridgeTx as NetworkBridgeTxSubsystem, }; use polkadot_node_collation_generation::CollationGenerationSubsystem; +use polkadot_node_core_chain_api::ChainApiSubsystem; use polkadot_node_core_prospective_parachains::ProspectiveParachainsSubsystem; use polkadot_node_core_runtime_api::RuntimeApiSubsystem; use polkadot_node_network_protocol::{ - peer_set::PeerSetProtocolNames, + peer_set::{PeerSet, PeerSetProtocolNames}, request_response::{ v1::{self, AvailableDataFetchingRequest}, v2, IncomingRequestReceiver, ReqProtocolNames, @@ -41,7 +43,7 @@ use polkadot_overseer::{ use polkadot_primitives::CollatorPair; use sc_authority_discovery::Service as AuthorityDiscoveryService; -use sc_network::NetworkStateInfo; +use sc_network::{NetworkStateInfo, NotificationService}; use sc_service::TaskManager; use sc_utils::mpsc::tracing_unbounded; @@ -76,6 +78,8 @@ pub(crate) struct CollatorOverseerGenArgs<'a> { pub req_protocol_names: ReqProtocolNames, /// Peerset protocols name mapping pub peer_set_protocol_names: PeerSetProtocolNames, + /// Notification services for validation/collation protocols. + pub notification_services: HashMap>, } fn build_overseer( @@ -93,6 +97,7 @@ fn build_overseer( collator_pair, req_protocol_names, peer_set_protocol_names, + notification_services, }: CollatorOverseerGenArgs<'_>, ) -> Result< (Overseer, Arc>, OverseerHandle), @@ -100,9 +105,11 @@ fn build_overseer( > { let spawner = SpawnGlue(spawner); let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; + let notification_sinks = Arc::new(Mutex::new(HashMap::new())); + let builder = Overseer::builder() .availability_distribution(DummySubsystem) - .availability_recovery(AvailabilityRecoverySubsystem::with_availability_store_skip( + .availability_recovery(AvailabilityRecoverySubsystem::for_collator( available_data_req_receiver, Metrics::register(registry)?, )) @@ -112,7 +119,7 @@ fn build_overseer( .candidate_backing(DummySubsystem) .candidate_validation(DummySubsystem) .pvf_checker(DummySubsystem) - .chain_api(DummySubsystem) + .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) .collator_protocol({ let side = ProtocolSide::Collator { @@ -130,6 +137,8 @@ fn build_overseer( sync_oracle, network_bridge_metrics.clone(), peer_set_protocol_names.clone(), + notification_services, + notification_sinks.clone(), )) .network_bridge_tx(NetworkBridgeTxSubsystem::new( network_service, @@ -137,6 +146,7 @@ fn build_overseer( network_bridge_metrics, req_protocol_names, peer_set_protocol_names, + notification_sinks, )) .provisioner(DummySubsystem) .runtime_api(RuntimeApiSubsystem::new( diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index 8801f93640c181cbf27084656d9d7f28cabff1d7..d121d2d3356765d9327fdaa0a8c0563c3917266f 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -21,7 +21,7 @@ use cumulus_relay_chain_rpc_interface::{RelayChainRpcClient, RelayChainRpcInterf use network::build_collator_network; use polkadot_network_bridge::{peer_sets_info, IsAuthority}; use polkadot_node_network_protocol::{ - peer_set::PeerSetProtocolNames, + peer_set::{PeerSet, PeerSetProtocolNames}, request_response::{ v1, v2, IncomingRequest, IncomingRequestReceiver, Protocol, ReqProtocolNames, }, @@ -175,10 +175,13 @@ async fn new_minimal_relay_chain( let peer_set_protocol_names = PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; - - for config in peer_sets_info(is_authority, &peer_set_protocol_names) { - net_config.add_notification_protocol(config); - } + let notification_services = peer_sets_info(is_authority, &peer_set_protocol_names) + .into_iter() + .map(|(config, (peerset, service))| { + net_config.add_notification_protocol(config); + (peerset, service) + }) + .collect::>>(); let request_protocol_names = ReqProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); let (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) = @@ -218,6 +221,7 @@ async fn new_minimal_relay_chain( collator_pair, req_protocol_names: request_protocol_names, peer_set_protocol_names, + notification_services, }; let overseer_handle = diff --git a/cumulus/client/relay-chain-minimal-node/src/network.rs b/cumulus/client/relay-chain-minimal-node/src/network.rs index 813dca47a0398365f935424f79bcd95169d6f2a9..95785063c1aeb6649d7154fa39e4e111e226def3 100644 --- a/cumulus/client/relay-chain-minimal-node/src/network.rs +++ b/cumulus/client/relay-chain-minimal-node/src/network.rs @@ -26,10 +26,9 @@ use sc_network::{ NetworkService, }; -use sc_network::config::FullNetworkConfiguration; +use sc_network::{config::FullNetworkConfiguration, NotificationService}; use sc_network_common::{role::Roles, sync::message::BlockAnnouncesHandshake}; use sc_service::{error::Error, Configuration, NetworkStarter, SpawnTaskHandle}; -use sc_utils::mpsc::tracing_unbounded; use std::{iter, sync::Arc}; @@ -45,7 +44,7 @@ pub(crate) fn build_collator_network( Error, > { let protocol_id = config.protocol_id(); - let block_announce_config = get_block_announce_proto_config::( + let (block_announce_config, _notification_service) = get_block_announce_proto_config::( protocol_id.clone(), &None, Roles::from(&config.role), @@ -69,8 +68,6 @@ pub(crate) fn build_collator_network( let peer_store_handle = peer_store.handle(); spawn_handle.spawn("peer-store", Some("networking"), peer_store.run()); - // RX is not used for anything because syncing is not started for the minimal node - let (tx, _rx) = tracing_unbounded("mpsc_syncing_engine_protocol", 100_000); let network_params = sc_network::config::Params:: { role: config.role.clone(), executor: { @@ -86,7 +83,6 @@ pub(crate) fn build_collator_network( protocol_id, metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_announce_config, - tx, }; let network_worker = sc_network::NetworkWorker::new(network_params)?; @@ -150,7 +146,7 @@ fn get_block_announce_proto_config( best_number: NumberFor, best_hash: B::Hash, genesis_hash: B::Hash, -) -> NonDefaultSetConfig { +) -> (NonDefaultSetConfig, Box) { let block_announces_protocol = { let genesis_hash = genesis_hash.as_ref(); if let Some(ref fork_id) = fork_id { @@ -160,12 +156,11 @@ fn get_block_announce_proto_config( } }; - NonDefaultSetConfig { - notifications_protocol: block_announces_protocol.into(), - fallback_names: iter::once(format!("/{}/block-announces/1", protocol_id.as_ref()).into()) - .collect(), - max_notification_size: 1024 * 1024, - handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( + NonDefaultSetConfig::new( + block_announces_protocol.into(), + iter::once(format!("/{}/block-announces/1", protocol_id.as_ref()).into()).collect(), + 1024 * 1024, + Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( roles, best_number, best_hash, @@ -173,11 +168,11 @@ fn get_block_announce_proto_config( ))), // NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement // protocol is still hardcoded into the peerset. - set_config: SetConfig { + SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), non_reserved_mode: NonReservedPeerMode::Deny, }, - } + ) } diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 93fe57145b0c28e9f95a5b2cab1fad6105f61fad..2295bffddcf84c138f1949faa3b61854b7ff616b 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -6,6 +6,8 @@ edition.workspace = true description = "Implementation of the RelayChainInterface trait that connects to a remote RPC-node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true [dependencies] polkadot-overseer = { path = "../../../polkadot/node/overseer" } @@ -20,6 +22,7 @@ sp-authority-discovery = { path = "../../../substrate/primitives/authority-disco sp-state-machine = { path = "../../../substrate/primitives/state-machine" } sp-storage = { path = "../../../substrate/primitives/storage" } sp-runtime = { path = "../../../substrate/primitives/runtime" } +sp-version = { path = "../../../substrate/primitives/version" } sc-client-api = { path = "../../../substrate/client/api" } sc-rpc-api = { path = "../../../substrate/client/rpc-api" } sc-service = { path = "../../../substrate/client/service" } @@ -32,12 +35,12 @@ futures-timer = "3.0.2" parity-scale-codec = "3.6.4" jsonrpsee = { version = "0.16.2", features = ["ws-client"] } tracing = "0.1.37" -async-trait = "0.1.73" +async-trait = "0.1.74" url = "2.4.0" serde_json = "1.0.108" -serde = "1.0.188" +serde = "1.0.193" schnellru = "0.2.1" -smoldot = { version = "0.11.0", default_features = false, features = ["std"]} +smoldot = { version = "0.11.0", default_features = false, features = ["std"] } smoldot-light = { version = "0.9.0", default_features = false, features = ["std"] } either = "1.8.1" thiserror = "1.0.48" diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index 90af334e1334fe03323f8c4124e6590daa69ba09..c64fff77a29fd016d7e1723ab461dc8082770682 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -31,7 +31,9 @@ use parity_scale_codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain::{ async_backing::{AsyncBackingParams, BackingState}, - slashing, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + slashing, + vstaging::{ApprovalVotingParams, NodeFeatures}, + BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, OccupiedCoreAssumption, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, @@ -44,10 +46,10 @@ use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use sc_client_api::StorageData; use sc_rpc_api::{state::ReadProof, system::Health}; use sc_service::TaskManager; -use sp_api::RuntimeVersion; use sp_consensus_babe::Epoch; use sp_core::sp_std::collections::btree_map::BTreeMap; use sp_storage::StorageKey; +use sp_version::RuntimeVersion; use crate::{ light_client_worker::{build_smoldot_client, LightClientRpcWorker}, @@ -597,6 +599,14 @@ impl RelayChainRpcClient { .await } + pub async fn parachain_host_node_features( + &self, + at: RelayHash, + ) -> Result { + self.call_remote_runtime_function("ParachainHost_node_features", at, None::<()>) + .await + } + pub async fn parachain_host_disabled_validators( &self, at: RelayHash, @@ -615,6 +625,19 @@ impl RelayChainRpcClient { } #[allow(missing_docs)] + pub async fn parachain_host_staging_approval_voting_params( + &self, + at: RelayHash, + _session_index: SessionIndex, + ) -> Result { + self.call_remote_runtime_function( + "ParachainHost_staging_approval_voting_params", + at, + None::<()>, + ) + .await + } + pub async fn parachain_host_para_backing_state( &self, at: RelayHash, diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index f80c65128d5263f4d7809383476557b9df4ab1b4..997413ad0da8302c615dc5b6738d7871580425ea 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true description = "Common functions used to assemble the components of a parachain node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +[lints] +workspace = true + [dependencies] futures = "0.3.28" @@ -38,7 +41,7 @@ cumulus-client-consensus-common = { path = "../consensus/common" } cumulus-client-pov-recovery = { path = "../pov-recovery" } cumulus-client-network = { path = "../network" } cumulus-primitives-core = { path = "../../primitives/core" } +cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" } cumulus-relay-chain-interface = { path = "../relay-chain-interface" } cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" } - diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index f8ebca11c8c1fbc05c290b436e3e8abd99771eb3..950e59aff24ec19a2e0ed3b22215d6c77fb7dc5e 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -52,6 +52,8 @@ use sp_core::{traits::SpawnNamed, Decode}; use sp_runtime::traits::{Block as BlockT, BlockIdTo, Header}; use std::{sync::Arc, time::Duration}; +pub use cumulus_primitives_proof_size_hostfunction::storage_proof_size; + // Given the sporadic nature of the explicit recovery operation and the // possibility to retry infinite times this value is more than enough. // In practice here we expect no more than one queued messages. diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index c9d82ead1ebcb9df30fc95ec2c257a8025dd5a44..14dcd10ddfcbfb42f38e068ac7b75c7ee6356f51 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -6,19 +6,22 @@ edition.workspace = true description = "AURA consensus extension pallet for parachains" license = "Apache-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -pallet-aura = { path = "../../../substrate/frame/aura", default-features = false} -pallet-timestamp= { path = "../../../substrate/frame/timestamp", default-features = false} -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false} -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } +pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } +sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } +sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } # Cumulus cumulus-pallet-parachain-system = { path = "../parachain-system", default-features = false } @@ -29,7 +32,7 @@ cumulus-pallet-parachain-system = { path = "../parachain-system", default-featur cumulus-pallet-parachain-system = { path = "../parachain-system" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-parachain-system/std", diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index 68e4a681c2b179fa5b2d7bf3fe0d90cdc9befd77..9c2af8893ca11ecf005be4c14ee1b718a3674f5c 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -9,6 +9,9 @@ readme = "README.md" repository.workspace = true version = "3.0.0" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,15 +21,15 @@ codec = { default-features = false, features = ["derive"], package = "parity-sca rand = { version = "0.8.5", features = ["std_rng"], default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false} -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false} -pallet-session = { path = "../../../substrate/frame/session", default-features = false} +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-staking = { path = "../../../substrate/primitives/staking", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } +pallet-session = { path = "../../../substrate/frame/session", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true} +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } [dev-dependencies] sp-core = { path = "../../../substrate/primitives/core" } @@ -39,7 +42,7 @@ pallet-balances = { path = "../../../substrate/frame/balances" } pallet-aura = { path = "../../../substrate/frame/aura" } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -75,4 +78,4 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/pallets/collator-selection/src/benchmarking.rs b/cumulus/pallets/collator-selection/src/benchmarking.rs index 49999dc114df7bdc94dca2f8c563d7c1d8bb6083..fa95303495dd1039f93f79baf93dc5301a31840d 100644 --- a/cumulus/pallets/collator-selection/src/benchmarking.rs +++ b/cumulus/pallets/collator-selection/src/benchmarking.rs @@ -25,14 +25,11 @@ use codec::Decode; use frame_benchmarking::{ account, impl_benchmark_test_suite, v2::*, whitelisted_caller, BenchmarkError, }; -use frame_support::{ - dispatch::DispatchResult, - traits::{Currency, EnsureOrigin, Get, ReservableCurrency}, -}; +use frame_support::traits::{Currency, EnsureOrigin, Get, ReservableCurrency}; use frame_system::{pallet_prelude::BlockNumberFor, EventRecord, RawOrigin}; use pallet_authorship::EventHandler; use pallet_session::{self as session, SessionManager}; -use sp_std::prelude::*; +use sp_std::{cmp, prelude::*}; pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -94,7 +91,7 @@ fn register_candidates(count: u32) { assert!(>::get() > 0u32.into(), "Bond cannot be zero!"); for who in candidates { - T::Currency::make_free_balance_be(&who, >::get() * 2u32.into()); + T::Currency::make_free_balance_be(&who, >::get() * 3u32.into()); >::register_as_candidate(RawOrigin::Signed(who).into()).unwrap(); } } @@ -107,8 +104,11 @@ fn min_candidates() -> u32 { fn min_invulnerables() -> u32 { let min_collators = T::MinEligibleCollators::get(); - let candidates_length = >::get().len(); - min_collators.saturating_sub(candidates_length.try_into().unwrap()) + let candidates_length = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + min_collators.saturating_sub(candidates_length) } #[benchmarks(where T: pallet_authorship::Config + session::Config)] @@ -160,22 +160,19 @@ mod benchmarks { .unwrap(); } // ... and register them. - for (who, _) in candidates { + for (who, _) in candidates.iter() { let deposit = >::get(); - T::Currency::make_free_balance_be(&who, deposit * 1000_u32.into()); - let incoming = CandidateInfo { who: who.clone(), deposit }; - >::try_mutate(|candidates| -> DispatchResult { - if !candidates.iter().any(|candidate| candidate.who == who) { - T::Currency::reserve(&who, deposit)?; - candidates.try_push(incoming).expect("we've respected the bounded vec limit"); - >::insert( - who.clone(), - frame_system::Pallet::::block_number() + T::KickThreshold::get(), - ); - } - Ok(()) + T::Currency::make_free_balance_be(who, deposit * 1000_u32.into()); + >::try_mutate(|list| { + list.try_push(CandidateInfo { who: who.clone(), deposit }).unwrap(); + Ok::<(), BenchmarkError>(()) }) - .expect("only returns ok"); + .unwrap(); + T::Currency::reserve(who, deposit)?; + >::insert( + who.clone(), + frame_system::Pallet::::block_number() + T::KickThreshold::get(), + ); } // now we need to fill up invulnerables @@ -226,10 +223,27 @@ mod benchmarks { } #[benchmark] - fn set_candidacy_bond() -> Result<(), BenchmarkError> { - let bond_amount: BalanceOf = T::Currency::minimum_balance() * 10u32.into(); + fn set_candidacy_bond( + c: Linear<0, { T::MaxCandidates::get() }>, + k: Linear<0, { T::MaxCandidates::get() }>, + ) -> Result<(), BenchmarkError> { + let initial_bond_amount: BalanceOf = T::Currency::minimum_balance() * 2u32.into(); + >::put(initial_bond_amount); + register_validators::(c); + register_candidates::(c); + let kicked = cmp::min(k, c); let origin = T::UpdateOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let bond_amount = if k > 0 { + >::mutate(|candidates| { + for info in candidates.iter_mut().skip(kicked as usize) { + info.deposit = T::Currency::minimum_balance() * 3u32.into(); + } + }); + T::Currency::minimum_balance() * 3u32.into() + } else { + T::Currency::minimum_balance() + }; #[extrinsic_call] _(origin as T::RuntimeOrigin, bond_amount); @@ -238,6 +252,35 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn update_bond( + c: Linear<{ min_candidates::() + 1 }, { T::MaxCandidates::get() }>, + ) -> Result<(), BenchmarkError> { + >::put(T::Currency::minimum_balance()); + >::put(c); + + register_validators::(c); + register_candidates::(c); + + let caller = >::get()[0].who.clone(); + v2::whitelist!(caller); + + let bond_amount: BalanceOf = + T::Currency::minimum_balance() + T::Currency::minimum_balance(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), bond_amount); + + assert_last_event::( + Event::CandidateBondUpdated { account_id: caller, deposit: bond_amount }.into(), + ); + assert!( + >::get().iter().last().unwrap().deposit == + T::Currency::minimum_balance() * 2u32.into() + ); + Ok(()) + } + // worse case is when we have all the max-candidate slots filled except one, and we fill that // one. #[benchmark] @@ -267,6 +310,36 @@ mod benchmarks { ); } + #[benchmark] + fn take_candidate_slot(c: Linear<{ min_candidates::() + 1 }, { T::MaxCandidates::get() }>) { + >::put(T::Currency::minimum_balance()); + >::put(1); + + register_validators::(c); + register_candidates::(c); + + let caller: T::AccountId = whitelisted_caller(); + let bond: BalanceOf = T::Currency::minimum_balance() * 10u32.into(); + T::Currency::make_free_balance_be(&caller, bond); + + >::set_keys( + RawOrigin::Signed(caller.clone()).into(), + keys::(c + 1), + Vec::new(), + ) + .unwrap(); + + let target = >::get().iter().last().unwrap().who.clone(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), bond / 2u32.into(), target.clone()); + + assert_last_event::( + Event::CandidateReplaced { old: target, new: caller, deposit: bond / 2u32.into() } + .into(), + ); + } + // worse case is the last candidate leaving. #[benchmark] fn leave_intent(c: Linear<{ min_candidates::() + 1 }, { T::MaxCandidates::get() }>) { @@ -276,7 +349,7 @@ mod benchmarks { register_validators::(c); register_candidates::(c); - let leaving = >::get().last().unwrap().who.clone(); + let leaving = >::get().iter().last().unwrap().who.clone(); v2::whitelist!(leaving); #[extrinsic_call] @@ -323,31 +396,37 @@ mod benchmarks { let new_block: BlockNumberFor = 1800u32.into(); let zero_block: BlockNumberFor = 0u32.into(); - let candidates = >::get(); + let candidates: Vec = >::get() + .iter() + .map(|candidate_info| candidate_info.who.clone()) + .collect(); let non_removals = c.saturating_sub(r); for i in 0..c { - >::insert(candidates[i as usize].who.clone(), zero_block); + >::insert(candidates[i as usize].clone(), zero_block); } if non_removals > 0 { for i in 0..non_removals { - >::insert(candidates[i as usize].who.clone(), new_block); + >::insert(candidates[i as usize].clone(), new_block); } } else { for i in 0..c { - >::insert(candidates[i as usize].who.clone(), new_block); + >::insert(candidates[i as usize].clone(), new_block); } } let min_candidates = min_candidates::(); - let pre_length = >::get().len(); + let pre_length = >::decode_len().unwrap_or_default(); frame_system::Pallet::::set_block_number(new_block); - assert!(>::get().len() == c as usize); - + let current_length: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + assert!(c == current_length); #[block] { as SessionManager<_>>::new_session(0); @@ -357,16 +436,20 @@ mod benchmarks { // candidates > removals and remaining candidates > min candidates // => remaining candidates should be shorter than before removal, i.e. some were // actually removed. - assert!(>::get().len() < pre_length); + assert!(>::decode_len().unwrap_or_default() < pre_length); } else if c > r && non_removals < min_candidates { // candidates > removals and remaining candidates would be less than min candidates // => remaining candidates should equal min candidates, i.e. some were removed up to // the minimum, but then any more were "forced" to stay in candidates. - assert!(>::get().len() == min_candidates as usize); + let current_length: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + assert!(min_candidates == current_length); } else { // removals >= candidates, non removals must == 0 // can't remove more than exist - assert!(>::get().len() == pre_length); + assert!(>::decode_len().unwrap_or_default() == pre_length); } } diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs index 24493ce9d9cdc7f451f2e997615962ee5f0662e0..7449f4d68c7eacc8b07fa45f2f991c13f3d5713b 100644 --- a/cumulus/pallets/collator-selection/src/lib.rs +++ b/cumulus/pallets/collator-selection/src/lib.rs @@ -35,16 +35,36 @@ //! //! 1. [`Invulnerables`]: a set of collators appointed by governance. These accounts will always be //! collators. -//! 2. [`Candidates`]: these are *candidates to the collation task* and may or may not be elected as -//! a final collator. +//! 2. [`CandidateList`]: these are *candidates to the collation task* and may or may not be elected +//! as a final collator. //! -//! The current implementation resolves congestion of [`Candidates`] in a first-come-first-serve -//! manner. +//! The current implementation resolves congestion of [`CandidateList`] through a simple auction +//! mechanism. Candidates bid for the collator slots and at the end of the session, the auction ends +//! and the top candidates are selected to become collators. The number of selected candidates is +//! determined by the value of `DesiredCandidates`. +//! +//! Before the list reaches full capacity, candidates can register by placing the minimum bond +//! through `register_as_candidate`. Then, if an account wants to participate in the collator slot +//! auction, they have to replace an existing candidate by placing a greater deposit through +//! `take_candidate_slot`. Existing candidates can increase their bids through `update_bond`. +//! +//! At any point, an account can take the place of another account in the candidate list if they put +//! up a greater deposit than the target. While new joiners would like to deposit as little as +//! possible to participate in the auction, the replacement threat incentivizes candidates to bid as +//! close to their budget as possible in order to avoid being replaced. +//! +//! Candidates which are not on "winning" slots in the list can also decrease their deposits through +//! `update_bond`, but candidates who are on top slots and try to decrease their deposits will fail +//! in order to enforce auction mechanics and have meaningful bids. //! //! Candidates will not be allowed to get kicked or `leave_intent` if the total number of collators //! would fall below `MinEligibleCollators`. This is to ensure that some collators will always //! exist, i.e. someone is eligible to produce a block. //! +//! When a new session starts, candidates with the highest deposits will be selected in order until +//! the desired number of collators is reached. Candidates can increase or decrease their deposits +//! between sessions in order to ensure they receive a slot in the collator list. +//! //! ### Rewards //! //! The Collator Selection pallet maintains an on-chain account (the "Pot"). In each block, the @@ -56,8 +76,8 @@ //! //! To initiate rewards, an ED needs to be transferred to the pot address. //! -//! Note: Eventually the Pot distribution may be modified as discussed in -//! [this issue](https://github.com/paritytech/statemint/issues/21#issuecomment-810481073). +//! Note: Eventually the Pot distribution may be modified as discussed in [this +//! issue](https://github.com/paritytech/statemint/issues/21#issuecomment-810481073). #![cfg_attr(not(feature = "std"), no_std)] @@ -182,9 +202,12 @@ pub mod pallet { /// The (community, limited) collation candidates. `Candidates` and `Invulnerables` should be /// mutually exclusive. + /// + /// This list is sorted in ascending order by deposit and when the deposits are equal, the least + /// recently updated is considered greater. #[pallet::storage] - #[pallet::getter(fn candidates)] - pub type Candidates = StorageValue< + #[pallet::getter(fn candidate_list)] + pub type CandidateList = StorageValue< _, BoundedVec>, T::MaxCandidates>, ValueQuery, @@ -261,8 +284,12 @@ pub mod pallet { NewCandidacyBond { bond_amount: BalanceOf }, /// A new candidate joined. CandidateAdded { account_id: T::AccountId, deposit: BalanceOf }, + /// Bond of a candidate updated. + CandidateBondUpdated { account_id: T::AccountId, deposit: BalanceOf }, /// A candidate was removed. CandidateRemoved { account_id: T::AccountId }, + /// An account was replaced in the candidate list by another one. + CandidateReplaced { old: T::AccountId, new: T::AccountId, deposit: BalanceOf }, /// An account was unable to be added to the Invulnerables because they did not have keys /// registered. Other Invulnerables may have been set. InvalidInvulnerableSkipped { account_id: T::AccountId }, @@ -288,12 +315,38 @@ pub mod pallet { NoAssociatedValidatorId, /// Validator ID is not yet registered. ValidatorNotRegistered, + /// Could not insert in the candidate list. + InsertToCandidateListFailed, + /// Could not remove from the candidate list. + RemoveFromCandidateListFailed, + /// New deposit amount would be below the minimum candidacy bond. + DepositTooLow, + /// Could not update the candidate list. + UpdateCandidateListFailed, + /// Deposit amount is too low to take the target's slot in the candidate list. + InsufficientBond, + /// The target account to be replaced in the candidate list is not a candidate. + TargetIsNotCandidate, + /// The updated deposit amount is equal to the amount already reserved. + IdenticalDeposit, + /// Cannot lower candidacy bond while occupying a future collator slot in the list. + InvalidUnreserve, } #[pallet::hooks] impl Hooks> for Pallet { fn integrity_test() { assert!(T::MinEligibleCollators::get() > 0, "chain must require at least one collator"); + assert!( + T::MaxInvulnerables::get().saturating_add(T::MaxCandidates::get()) >= + T::MinEligibleCollators::get(), + "invulnerables and candidates must be able to satisfy collator demand" + ); + } + + #[cfg(feature = "try-runtime")] + fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() } } @@ -307,8 +360,8 @@ pub mod pallet { /// acceptable Invulnerables, and is not proposing a _set_ of new Invulnerables. /// /// This call does not maintain mutual exclusivity of `Invulnerables` and `Candidates`. It - /// is recommended to use a batch of `add_invulnerable` and `remove_invulnerable` instead. - /// A `batch_all` can also be used to enforce atomicity. If any candidates are included in + /// is recommended to use a batch of `add_invulnerable` and `remove_invulnerable` instead. A + /// `batch_all` can also be used to enforce atomicity. If any candidates are included in /// `new`, they should be removed with `remove_invulnerable_candidate` after execution. /// /// Must be called by the `UpdateOrigin`. @@ -319,8 +372,9 @@ pub mod pallet { // don't wipe out the collator set if new.is_empty() { + // Casting `u32` to `usize` should be safe on all machines running this. ensure!( - Candidates::::decode_len().unwrap_or_default() >= + CandidateList::::decode_len().unwrap_or_default() >= T::MinEligibleCollators::get() as usize, Error::::TooFewEligibleCollators ); @@ -401,17 +455,47 @@ pub mod pallet { /// Set the candidacy bond amount. /// + /// If the candidacy bond is increased by this call, all current candidates which have a + /// deposit lower than the new bond will be kicked from the list and get their deposits + /// back. + /// /// The origin for this call must be the `UpdateOrigin`. #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::set_candidacy_bond())] + #[pallet::weight(T::WeightInfo::set_candidacy_bond( + T::MaxCandidates::get(), + T::MaxCandidates::get() + ))] pub fn set_candidacy_bond( origin: OriginFor, bond: BalanceOf, ) -> DispatchResultWithPostInfo { T::UpdateOrigin::ensure_origin(origin)?; - >::put(bond); + let bond_increased = >::mutate(|old_bond| -> bool { + let bond_increased = *old_bond < bond; + *old_bond = bond; + bond_increased + }); + let initial_len = >::decode_len().unwrap_or_default(); + let kicked = (bond_increased && initial_len > 0) + .then(|| { + // Closure below returns the number of candidates which were kicked because + // their deposits were lower than the new candidacy bond. + >::mutate(|candidates| -> usize { + let first_safe_candidate = candidates + .iter() + .position(|candidate| candidate.deposit >= bond) + .unwrap_or(initial_len); + let kicked_candidates = candidates.drain(..first_safe_candidate); + for candidate in kicked_candidates { + T::Currency::unreserve(&candidate.who, candidate.deposit); + >::remove(candidate.who); + } + first_safe_candidate + }) + }) + .unwrap_or_default(); Self::deposit_event(Event::NewCandidacyBond { bond_amount: bond }); - Ok(().into()) + Ok(Some(T::WeightInfo::set_candidacy_bond(initial_len as u32, kicked as u32)).into()) } /// Register this account as a collator candidate. The account must (a) already have @@ -424,8 +508,11 @@ pub mod pallet { let who = ensure_signed(origin)?; // ensure we are below limit. - let length = >::decode_len().unwrap_or_default(); - ensure!((length as u32) < Self::desired_candidates(), Error::::TooManyCandidates); + let length: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + ensure!(length < T::MaxCandidates::get(), Error::::TooManyCandidates); ensure!(!Self::invulnerables().contains(&who), Error::::AlreadyInvulnerable); let validator_key = T::ValidatorIdOf::convert(who.clone()) @@ -437,25 +524,27 @@ pub mod pallet { let deposit = Self::candidacy_bond(); // First authored block is current block plus kick threshold to handle session delay - let incoming = CandidateInfo { who: who.clone(), deposit }; - - let current_count = - >::try_mutate(|candidates| -> Result { - if candidates.iter().any(|candidate| candidate.who == who) { - Err(Error::::AlreadyCandidate)? - } else { - T::Currency::reserve(&who, deposit)?; - candidates.try_push(incoming).map_err(|_| Error::::TooManyCandidates)?; - >::insert( - who.clone(), - frame_system::Pallet::::block_number() + T::KickThreshold::get(), - ); - Ok(candidates.len()) - } - })?; + >::try_mutate(|candidates| -> Result<(), DispatchError> { + ensure!( + !candidates.iter().any(|candidate_info| candidate_info.who == who), + Error::::AlreadyCandidate + ); + T::Currency::reserve(&who, deposit)?; + >::insert( + who.clone(), + frame_system::Pallet::::block_number() + T::KickThreshold::get(), + ); + candidates + .try_insert(0, CandidateInfo { who: who.clone(), deposit }) + .map_err(|_| Error::::InsertToCandidateListFailed)?; + Ok(()) + })?; Self::deposit_event(Event::CandidateAdded { account_id: who, deposit }); - Ok(Some(T::WeightInfo::register_as_candidate(current_count as u32)).into()) + // Safe to do unchecked add here because we ensure above that `length < + // T::MaxCandidates::get()`, and since `T::MaxCandidates` is `u32` it can be at most + // `u32::MAX`, therefore `length + 1` cannot overflow. + Ok(Some(T::WeightInfo::register_as_candidate(length + 1)).into()) } /// Deregister `origin` as a collator candidate. Note that the collator can only leave on @@ -468,13 +557,14 @@ pub mod pallet { pub fn leave_intent(origin: OriginFor) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!( - Self::eligible_collators() > T::MinEligibleCollators::get() as usize, + Self::eligible_collators() > T::MinEligibleCollators::get(), Error::::TooFewEligibleCollators ); + let length = >::decode_len().unwrap_or_default(); // Do remove their last authored block. - let current_count = Self::try_remove_candidate(&who, true)?; + Self::try_remove_candidate(&who, true)?; - Ok(Some(T::WeightInfo::leave_intent(current_count as u32)).into()) + Ok(Some(T::WeightInfo::leave_intent(length.saturating_sub(1) as u32)).into()) } /// Add a new account `who` to the list of `Invulnerables` collators. `who` must have @@ -521,7 +611,7 @@ pub mod pallet { .unwrap_or_default() .try_into() .unwrap_or(T::MaxInvulnerables::get().saturating_sub(1)), - Candidates::::decode_len() + >::decode_len() .unwrap_or_default() .try_into() .unwrap_or(T::MaxCandidates::get()), @@ -540,7 +630,7 @@ pub mod pallet { T::UpdateOrigin::ensure_origin(origin)?; ensure!( - Self::eligible_collators() > T::MinEligibleCollators::get() as usize, + Self::eligible_collators() > T::MinEligibleCollators::get(), Error::::TooFewEligibleCollators ); @@ -554,6 +644,154 @@ pub mod pallet { Self::deposit_event(Event::InvulnerableRemoved { account_id: who }); Ok(()) } + + /// Update the candidacy bond of collator candidate `origin` to a new amount `new_deposit`. + /// + /// Setting a `new_deposit` that is lower than the current deposit while `origin` is + /// occupying a top-`DesiredCandidates` slot is not allowed. + /// + /// This call will fail if `origin` is not a collator candidate, the updated bond is lower + /// than the minimum candidacy bond, and/or the amount cannot be reserved. + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::update_bond(T::MaxCandidates::get()))] + pub fn update_bond( + origin: OriginFor, + new_deposit: BalanceOf, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + ensure!(new_deposit >= >::get(), Error::::DepositTooLow); + // The function below will try to mutate the `CandidateList` entry for the caller to + // update their deposit to the new value of `new_deposit`. The return value is the + // position of the entry in the list, used for weight calculation. + let length = + >::try_mutate(|candidates| -> Result { + let idx = candidates + .iter() + .position(|candidate_info| candidate_info.who == who) + .ok_or_else(|| Error::::NotCandidate)?; + let candidate_count = candidates.len(); + // Remove the candidate from the list. + let mut info = candidates.remove(idx); + let old_deposit = info.deposit; + if new_deposit > old_deposit { + T::Currency::reserve(&who, new_deposit - old_deposit)?; + } else if new_deposit < old_deposit { + // Casting `u32` to `usize` should be safe on all machines running this. + ensure!( + idx.saturating_add(>::get() as usize) < + candidate_count, + Error::::InvalidUnreserve + ); + T::Currency::unreserve(&who, old_deposit - new_deposit); + } else { + return Err(Error::::IdenticalDeposit.into()) + } + + // Update the deposit and insert the candidate in the correct spot in the list. + info.deposit = new_deposit; + let new_pos = candidates + .iter() + .position(|candidate| candidate.deposit >= new_deposit) + .unwrap_or_else(|| candidates.len()); + candidates + .try_insert(new_pos, info) + .map_err(|_| Error::::InsertToCandidateListFailed)?; + + Ok(candidate_count) + })?; + + Self::deposit_event(Event::CandidateBondUpdated { + account_id: who, + deposit: new_deposit, + }); + Ok(Some(T::WeightInfo::update_bond(length as u32)).into()) + } + + /// The caller `origin` replaces a candidate `target` in the collator candidate list by + /// reserving `deposit`. The amount `deposit` reserved by the caller must be greater than + /// the existing bond of the target it is trying to replace. + /// + /// This call will fail if the caller is already a collator candidate or invulnerable, the + /// caller does not have registered session keys, the target is not a collator candidate, + /// and/or the `deposit` amount cannot be reserved. + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::take_candidate_slot(T::MaxCandidates::get()))] + pub fn take_candidate_slot( + origin: OriginFor, + deposit: BalanceOf, + target: T::AccountId, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + ensure!(!Self::invulnerables().contains(&who), Error::::AlreadyInvulnerable); + ensure!(deposit >= Self::candidacy_bond(), Error::::InsufficientBond); + + let validator_key = T::ValidatorIdOf::convert(who.clone()) + .ok_or(Error::::NoAssociatedValidatorId)?; + ensure!( + T::ValidatorRegistration::is_registered(&validator_key), + Error::::ValidatorNotRegistered + ); + + let length = >::decode_len().unwrap_or_default(); + // The closure below iterates through all elements of the candidate list to ensure that + // the caller isn't already a candidate and to find the target it's trying to replace in + // the list. The return value is a tuple of the position of the candidate to be replaced + // in the list along with its candidate information. + let target_info = >::try_mutate( + |candidates| -> Result>, DispatchError> { + // Find the position in the list of the candidate that is being replaced. + let mut target_info_idx = None; + let mut new_info_idx = None; + for (idx, candidate_info) in candidates.iter().enumerate() { + // While iterating through the candidates trying to find the target, + // also ensure on the same pass that our caller isn't already a + // candidate. + ensure!(candidate_info.who != who, Error::::AlreadyCandidate); + // If we find our target, update the position but do not stop the + // iteration since we're also checking that the caller isn't already a + // candidate. + if candidate_info.who == target { + target_info_idx = Some(idx); + } + // Find the spot where the new candidate would be inserted in the current + // version of the list. + if new_info_idx.is_none() && candidate_info.deposit >= deposit { + new_info_idx = Some(idx); + } + } + let target_info_idx = + target_info_idx.ok_or(Error::::TargetIsNotCandidate)?; + + // Remove the old candidate from the list. + let target_info = candidates.remove(target_info_idx); + ensure!(deposit > target_info.deposit, Error::::InsufficientBond); + + // We have removed one element before `new_info_idx`, so the position we have to + // insert to is reduced by 1. + let new_pos = new_info_idx + .map(|i| i.saturating_sub(1)) + .unwrap_or_else(|| candidates.len()); + let new_info = CandidateInfo { who: who.clone(), deposit }; + // Insert the new candidate in the correct spot in the list. + candidates + .try_insert(new_pos, new_info) + .expect("candidate count previously decremented; qed"); + + Ok(target_info) + }, + )?; + T::Currency::reserve(&who, deposit)?; + T::Currency::unreserve(&target_info.who, target_info.deposit); + >::remove(target_info.who.clone()); + >::insert( + who.clone(), + frame_system::Pallet::::block_number() + T::KickThreshold::get(), + ); + + Self::deposit_event(Event::CandidateReplaced { old: target, new: who, deposit }); + Ok(Some(T::WeightInfo::take_candidate_slot(length as u32)).into()) + } } impl Pallet { @@ -564,84 +802,122 @@ pub mod pallet { /// Return the total number of accounts that are eligible collators (candidates and /// invulnerables). - fn eligible_collators() -> usize { - Candidates::::decode_len() + fn eligible_collators() -> u32 { + >::decode_len() .unwrap_or_default() .saturating_add(Invulnerables::::decode_len().unwrap_or_default()) + .try_into() + .unwrap_or(u32::MAX) } /// Removes a candidate if they exist and sends them back their deposit. fn try_remove_candidate( who: &T::AccountId, remove_last_authored: bool, - ) -> Result { - let current_count = - >::try_mutate(|candidates| -> Result { - let index = candidates - .iter() - .position(|candidate| candidate.who == *who) - .ok_or(Error::::NotCandidate)?; - let candidate = candidates.remove(index); - T::Currency::unreserve(who, candidate.deposit); - if remove_last_authored { - >::remove(who.clone()) - }; - Ok(candidates.len()) - })?; + ) -> Result<(), DispatchError> { + >::try_mutate(|candidates| -> Result<(), DispatchError> { + let idx = candidates + .iter() + .position(|candidate_info| candidate_info.who == *who) + .ok_or(Error::::NotCandidate)?; + let deposit = candidates[idx].deposit; + T::Currency::unreserve(who, deposit); + candidates.remove(idx); + if remove_last_authored { + >::remove(who.clone()) + }; + Ok(()) + })?; Self::deposit_event(Event::CandidateRemoved { account_id: who.clone() }); - Ok(current_count) + Ok(()) } /// Assemble the current set of candidates and invulnerables into the next collator set. /// /// This is done on the fly, as frequent as we are told to do so, as the session manager. - pub fn assemble_collators( - candidates: BoundedVec, - ) -> Vec { + pub fn assemble_collators() -> Vec { + // Casting `u32` to `usize` should be safe on all machines running this. + let desired_candidates = >::get() as usize; let mut collators = Self::invulnerables().to_vec(); - collators.extend(candidates); + collators.extend( + >::get() + .iter() + .rev() + .cloned() + .take(desired_candidates) + .map(|candidate_info| candidate_info.who), + ); collators } /// Kicks out candidates that did not produce a block in the kick threshold and refunds /// their deposits. - pub fn kick_stale_candidates( - candidates: BoundedVec>, T::MaxCandidates>, - ) -> BoundedVec { + /// + /// Return value is the number of candidates left in the list. + pub fn kick_stale_candidates(candidates: impl IntoIterator) -> u32 { let now = frame_system::Pallet::::block_number(); let kick_threshold = T::KickThreshold::get(); let min_collators = T::MinEligibleCollators::get(); candidates .into_iter() .filter_map(|c| { - let last_block = >::get(c.who.clone()); + let last_block = >::get(c.clone()); let since_last = now.saturating_sub(last_block); - let is_invulnerable = Self::invulnerables().contains(&c.who); + let is_invulnerable = Self::invulnerables().contains(&c); let is_lazy = since_last >= kick_threshold; if is_invulnerable { - // They are invulnerable. No reason for them to be in Candidates also. + // They are invulnerable. No reason for them to be in `CandidateList` also. // We don't even care about the min collators here, because an Account // should not be a collator twice. - let _ = Self::try_remove_candidate(&c.who, false); + let _ = Self::try_remove_candidate(&c, false); None } else { - if Self::eligible_collators() <= min_collators as usize || !is_lazy { + if Self::eligible_collators() <= min_collators || !is_lazy { // Either this is a good collator (not lazy) or we are at the minimum // that the system needs. They get to stay. - Some(c.who) + Some(c) } else { // This collator has not produced a block recently enough. Bye bye. - let _ = Self::try_remove_candidate(&c.who, true); + let _ = Self::try_remove_candidate(&c, true); None } } }) - .collect::>() + .count() .try_into() .expect("filter_map operation can't result in a bounded vec larger than its original; qed") } + + /// Ensure the correctness of the state of this pallet. + /// + /// This should be valid before or after each state transition of this pallet. + /// + /// # Invariants + /// + /// ## `DesiredCandidates` + /// + /// * The current desired candidate count should not exceed the candidate list capacity. + /// * The number of selected candidates together with the invulnerables must be greater than + /// or equal to the minimum number of eligible collators. + #[cfg(any(test, feature = "try-runtime"))] + pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + let desired_candidates = >::get(); + + frame_support::ensure!( + desired_candidates <= T::MaxCandidates::get(), + "Shouldn't demand more candidates than the pallet config allows." + ); + + frame_support::ensure!( + desired_candidates.saturating_add(T::MaxInvulnerables::get()) >= + T::MinEligibleCollators::get(), + "Invulnerable set together with desired candidates should be able to meet the collator quota." + ); + + Ok(()) + } } /// Keep track of number of authored blocks per authority, uncles are counted as well since @@ -677,14 +953,23 @@ pub mod pallet { >::block_number(), ); - let candidates = Self::candidates(); - let candidates_len_before = candidates.len(); - let active_candidates = Self::kick_stale_candidates(candidates); - let removed = candidates_len_before - active_candidates.len(); - let result = Self::assemble_collators(active_candidates); + // The `expect` below is safe because the list is a `BoundedVec` with a max size of + // `T::MaxCandidates`, which is a `u32`. When `decode_len` returns `Some(len)`, `len` + // must be valid and at most `u32::MAX`, which must always be able to convert to `u32`. + let candidates_len_before: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .expect("length is at most `T::MaxCandidates`, so it must fit in `u32`; qed"); + let active_candidates_count = Self::kick_stale_candidates( + >::get() + .iter() + .map(|candidate_info| candidate_info.who.clone()), + ); + let removed = candidates_len_before.saturating_sub(active_candidates_count); + let result = Self::assemble_collators(); frame_system::Pallet::::register_extra_weight_unchecked( - T::WeightInfo::new_session(candidates_len_before as u32, removed as u32), + T::WeightInfo::new_session(candidates_len_before, removed), DispatchClass::Mandatory, ); Some(result) diff --git a/cumulus/pallets/collator-selection/src/mock.rs b/cumulus/pallets/collator-selection/src/mock.rs index 46143674bb39991f55394b1c730bae38d9da29e1..ab9ad5ec11a21420c289f92803124b7106a48554 100644 --- a/cumulus/pallets/collator-selection/src/mock.rs +++ b/cumulus/pallets/collator-selection/src/mock.rs @@ -16,7 +16,7 @@ use super::*; use crate as collator_selection; use frame_support::{ - ord_parameter_types, parameter_types, + derive_impl, ord_parameter_types, parameter_types, traits::{ConstBool, ConstU32, ConstU64, FindAuthor, ValidatorRegistration}, PalletId, }; @@ -50,6 +50,7 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/cumulus/pallets/collator-selection/src/tests.rs b/cumulus/pallets/collator-selection/src/tests.rs index d4dae513df375145c35a83d7af274e581a94a60b..ed2044ccdfad7def46ab92180e9b177643ac8679 100644 --- a/cumulus/pallets/collator-selection/src/tests.rs +++ b/cumulus/pallets/collator-selection/src/tests.rs @@ -28,7 +28,7 @@ fn basic_setup_works() { assert_eq!(CollatorSelection::desired_candidates(), 2); assert_eq!(CollatorSelection::candidacy_bond(), 10); - assert!(CollatorSelection::candidates().is_empty()); + assert_eq!(>::get().iter().count(), 0); // genesis should sort input assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); }); @@ -202,7 +202,8 @@ fn candidate_to_invulnerable_works() { initialize_to_block(1); assert_eq!(CollatorSelection::desired_candidates(), 2); assert_eq!(CollatorSelection::candidacy_bond(), 10); - assert_eq!(CollatorSelection::candidates(), Vec::new()); + + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); assert_eq!(Balances::free_balance(3), 100); @@ -226,7 +227,7 @@ fn candidate_to_invulnerable_works() { )); assert!(CollatorSelection::invulnerables().to_vec().contains(&3)); assert_eq!(Balances::free_balance(3), 100); - assert_eq!(CollatorSelection::candidates().len(), 1); + assert_eq!(>::get().iter().count(), 1); assert_ok!(CollatorSelection::add_invulnerable( RuntimeOrigin::signed(RootAccount::get()), @@ -240,7 +241,8 @@ fn candidate_to_invulnerable_works() { )); assert!(CollatorSelection::invulnerables().to_vec().contains(&4)); assert_eq!(Balances::free_balance(4), 100); - assert_eq!(CollatorSelection::candidates().len(), 0); + + assert_eq!(>::get().iter().count(), 0); }); } @@ -266,42 +268,230 @@ fn set_desired_candidates_works() { } #[test] -fn set_candidacy_bond() { +fn set_candidacy_bond_empty_candidate_list() { new_test_ext().execute_with(|| { // given assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); - // can set + // can decrease without candidates assert_ok!(CollatorSelection::set_candidacy_bond( RuntimeOrigin::signed(RootAccount::get()), 7 )); assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert!(>::get().is_empty()); // rejects bad origin. assert_noop!(CollatorSelection::set_candidacy_bond(RuntimeOrigin::signed(1), 8), BadOrigin); + + // can increase without candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert!(>::get().is_empty()); + assert_eq!(CollatorSelection::candidacy_bond(), 20); }); } #[test] -fn cannot_register_candidate_if_too_many() { +fn set_candidacy_bond_with_one_candidate() { new_test_ext().execute_with(|| { - // reset desired candidates: - >::put(0); + // given + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); - // can't accept anyone anymore. - assert_noop!( - CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3)), - Error::::TooManyCandidates, + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_eq!(>::get(), vec![candidate_3.clone()]); + + // can decrease with one candidate + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert_eq!(>::get(), vec![candidate_3.clone()]); + + // can increase up to initial deposit + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 10 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert_eq!(>::get(), vec![candidate_3.clone()]); + + // can increase past initial deposit, should kick existing candidate + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert!(>::get().is_empty()); + }); +} + +#[test] +fn set_candidacy_bond_with_many_candidates_same_deposit() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); + + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 10 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 10 }; + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_eq!( + >::get(), + vec![candidate_5.clone(), candidate_4.clone(), candidate_3.clone()] ); - // reset desired candidates: - >::put(1); + // can decrease with multiple candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert_eq!( + >::get(), + vec![candidate_5.clone(), candidate_4.clone(), candidate_3.clone()] + ); + + // can increase up to initial deposit + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 10 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert_eq!( + >::get(), + vec![candidate_5.clone(), candidate_4.clone(), candidate_3.clone()] + ); + + // can increase past initial deposit, should kick existing candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert!(>::get().is_empty()); + }); +} + +#[test] +fn set_candidacy_bond_with_many_candidates_different_deposits() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); + + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 20 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 30 }; + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 20)); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + + // can decrease with multiple candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + // can increase up to initial deposit + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 10 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + + // can increase to 4's deposit, should kick 3 + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 20); + assert_eq!( + >::get(), + vec![candidate_4.clone(), candidate_5.clone()] + ); + + // can increase past 4's deposit, should kick 4 + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 25 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 25); + assert_eq!(>::get(), vec![candidate_5.clone()]); + + // lowering the minimum deposit should have no effect + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 5 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 5); + assert_eq!(>::get(), vec![candidate_5.clone()]); + + // add 3 and 4 back but with higher deposits than minimum + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 10)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 20)); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + + // can increase the deposit above the current max in the list, all candidates should be + // kicked + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 40 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 40); + assert!(>::get().is_empty()); + }); +} + +#[test] +fn cannot_register_candidate_if_too_many() { + new_test_ext().execute_with(|| { + >::put(1); + + // MaxCandidates: u32 = 20 + // Aside from 3, 4, and 5, create enough accounts to have 21 potential + // candidates. + for i in 6..=23 { + Balances::make_free_balance_be(&i, 100); + let key = MockSessionKeys { aura: UintAuthorityId(i) }; + Session::set_keys(RuntimeOrigin::signed(i).into(), key, Vec::new()).unwrap(); + } + + for c in 3..=22 { + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(c))); + } - // but no more assert_noop!( - CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5)), + CollatorSelection::register_as_candidate(RuntimeOrigin::signed(23)), Error::::TooManyCandidates, ); }) @@ -310,7 +500,7 @@ fn cannot_register_candidate_if_too_many() { #[test] fn cannot_unregister_candidate_if_too_few() { new_test_ext().execute_with(|| { - assert_eq!(CollatorSelection::candidates(), Vec::new()); + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); assert_ok!(CollatorSelection::remove_invulnerable( RuntimeOrigin::signed(RootAccount::get()), @@ -368,8 +558,12 @@ fn cannot_register_dupe_candidate() { new_test_ext().execute_with(|| { // can add 3 as candidate assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + // tuple of (id, deposit). let addition = CandidateInfo { who: 3, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![addition]); + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![addition] + ); assert_eq!(CollatorSelection::last_authored_block(3), 10); assert_eq!(Balances::free_balance(3), 90); @@ -404,7 +598,8 @@ fn register_as_candidate_works() { // given assert_eq!(CollatorSelection::desired_candidates(), 2); assert_eq!(CollatorSelection::candidacy_bond(), 10); - assert_eq!(CollatorSelection::candidates(), Vec::new()); + + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); // take two endowed, non-invulnerables accounts. @@ -417,140 +612,888 @@ fn register_as_candidate_works() { assert_eq!(Balances::free_balance(3), 90); assert_eq!(Balances::free_balance(4), 90); - assert_eq!(CollatorSelection::candidates().len(), 2); + assert_eq!(>::get().iter().count(), 2); }); } #[test] -fn leave_intent() { +fn cannot_take_candidate_slot_if_invulnerable() { new_test_ext().execute_with(|| { - // register a candidate. - assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); - assert_eq!(Balances::free_balance(3), 90); - - // register too so can leave above min candidates - assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); - assert_eq!(Balances::free_balance(5), 90); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); - // cannot leave if not candidate. + // can't 1 because it is invulnerable. assert_noop!( - CollatorSelection::leave_intent(RuntimeOrigin::signed(4)), - Error::::NotCandidate + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(1), 50u64.into(), 2), + Error::::AlreadyInvulnerable, ); - - // bond is returned - assert_ok!(CollatorSelection::leave_intent(RuntimeOrigin::signed(3))); - assert_eq!(Balances::free_balance(3), 100); - assert_eq!(CollatorSelection::last_authored_block(3), 0); - }); + }) } #[test] -fn authorship_event_handler() { +fn cannot_take_candidate_slot_if_keys_not_registered() { new_test_ext().execute_with(|| { - // put 100 in the pot + 5 for ED - Balances::make_free_balance_be(&CollatorSelection::account_id(), 105); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(42), 50u64.into(), 3), + Error::::ValidatorNotRegistered + ); + }) +} - // 4 is the default author. - assert_eq!(Balances::free_balance(4), 100); +#[test] +fn cannot_take_candidate_slot_if_duplicate() { + new_test_ext().execute_with(|| { + // can add 3 as candidate + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); - // triggers `note_author` - Authorship::on_initialize(1); + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 10 }; + let actual_candidates = + >::get().iter().cloned().collect::>(); + assert_eq!(actual_candidates, vec![candidate_4, candidate_3]); + assert_eq!(CollatorSelection::last_authored_block(3), 10); + assert_eq!(CollatorSelection::last_authored_block(4), 10); + assert_eq!(Balances::free_balance(3), 90); - let collator = CandidateInfo { who: 4, deposit: 10 }; + // but no more + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(3), 50u64.into(), 4), + Error::::AlreadyCandidate, + ); + }) +} - assert_eq!(CollatorSelection::candidates(), vec![collator]); - assert_eq!(CollatorSelection::last_authored_block(4), 0); +#[test] +fn cannot_take_candidate_slot_if_target_invalid() { + new_test_ext().execute_with(|| { + // can add 3 as candidate + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_3] + ); + assert_eq!(CollatorSelection::last_authored_block(3), 10); + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 100); - // half of the pot goes to the collator who's the author (4 in tests). - assert_eq!(Balances::free_balance(4), 140); - // half + ED stays. - assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 55); - }); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(4), 50u64.into(), 5), + Error::::TargetIsNotCandidate, + ); + }) } #[test] -fn fees_edgecases() { +fn cannot_take_candidate_slot_if_poor() { new_test_ext().execute_with(|| { - // Nothing panics, no reward when no ED in balance - Authorship::on_initialize(1); - // put some money into the pot at ED - Balances::make_free_balance_be(&CollatorSelection::account_id(), 5); - // 4 is the default author. - assert_eq!(Balances::free_balance(4), 100); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); - // triggers `note_author` - Authorship::on_initialize(1); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(33), 0); - let collator = CandidateInfo { who: 4, deposit: 10 }; + // works + assert_ok!(CollatorSelection::take_candidate_slot( + RuntimeOrigin::signed(3), + 20u64.into(), + 4 + )); - assert_eq!(CollatorSelection::candidates(), vec![collator]); - assert_eq!(CollatorSelection::last_authored_block(4), 0); - // Nothing received - assert_eq!(Balances::free_balance(4), 90); - // all fee stays - assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 5); + // poor + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(33), 30u64.into(), 3), + BalancesError::::InsufficientBalance, + ); }); } #[test] -fn session_management_works() { +fn cannot_take_candidate_slot_if_insufficient_deposit() { new_test_ext().execute_with(|| { - initialize_to_block(1); - - assert_eq!(SessionChangeBlock::get(), 0); - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - - initialize_to_block(4); - - assert_eq!(SessionChangeBlock::get(), 0); - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - - // add a new collator assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); - - // session won't see this. - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - // but we have a new candidate. - assert_eq!(CollatorSelection::candidates().len(), 1); - - initialize_to_block(10); - assert_eq!(SessionChangeBlock::get(), 10); - // pallet-session has 1 session delay; current validators are the same. - assert_eq!(Session::validators(), vec![1, 2]); - // queued ones are changed, and now we have 3. - assert_eq!(Session::queued_keys().len(), 3); - // session handlers (aura, et. al.) cannot see this yet. - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - - initialize_to_block(20); - assert_eq!(SessionChangeBlock::get(), 20); - // changed are now reflected to session handlers. - assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3]); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 60u64.into())); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(4), 5u64.into(), 3), + Error::::InsufficientBond, + ); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); }); } #[test] -fn kick_mechanism() { +fn cannot_take_candidate_slot_if_deposit_less_than_target() { new_test_ext().execute_with(|| { - // add a new collator assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); - assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); - initialize_to_block(10); - assert_eq!(CollatorSelection::candidates().len(), 2); - initialize_to_block(20); - assert_eq!(SessionChangeBlock::get(), 20); - // 4 authored this block, gets to stay 3 was kicked - assert_eq!(CollatorSelection::candidates().len(), 1); - // 3 will be kicked after 1 session delay - assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]); - let collator = CandidateInfo { who: 4, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![collator]); - assert_eq!(CollatorSelection::last_authored_block(4), 20); - initialize_to_block(30); - // 3 gets kicked after 1 session delay - assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4]); - // kicked collator gets funds back + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 60u64.into())); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(4), 20u64.into(), 3), + Error::::InsufficientBond, + ); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); + }); +} + +#[test] +fn take_candidate_slot_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take two endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_eq!(>::get().iter().count(), 3); + + Balances::make_free_balance_be(&6, 100); + let key = MockSessionKeys { aura: UintAuthorityId(6) }; + Session::set_keys(RuntimeOrigin::signed(6).into(), key, Vec::new()).unwrap(); + + assert_ok!(CollatorSelection::take_candidate_slot( + RuntimeOrigin::signed(6), + 50u64.into(), + 4 + )); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 90); + assert_eq!(Balances::free_balance(6), 50); + + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_6 = CandidateInfo { who: 6, deposit: 50 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 10 }; + let mut actual_candidates = + >::get().iter().cloned().collect::>(); + actual_candidates.sort_by(|info_1, info_2| info_1.deposit.cmp(&info_2.deposit)); + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_5, candidate_3, candidate_6] + ); + }); +} + +#[test] +fn increase_candidacy_bond_non_candidate_account() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 20), + Error::::NotCandidate + ); + }); +} + +#[test] +fn increase_candidacy_bond_insufficient_balance() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take two endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 110), + BalancesError::::InsufficientBalance + ); + + assert_eq!(Balances::free_balance(3), 90); + }); +} + +#[test] +fn increase_candidacy_bond_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 40)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 80); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 40)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 60)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 60); + assert_eq!(Balances::free_balance(4), 40); + assert_eq!(Balances::free_balance(5), 60); + }); +} + +#[test] +fn decrease_candidacy_bond_non_candidate_account() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + + assert_eq!(Balances::free_balance(5), 100); + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 10), + Error::::NotCandidate + ); + assert_eq!(Balances::free_balance(5), 100); + }); +} + +#[test] +fn decrease_candidacy_bond_insufficient_funds() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take two endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 60)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 60)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 40); + assert_eq!(Balances::free_balance(5), 40); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 0), + Error::::DepositTooLow + ); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(4), 5), + Error::::DepositTooLow + ); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 9), + Error::::DepositTooLow + ); + + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 40); + assert_eq!(Balances::free_balance(5), 40); + }); +} + +#[test] +fn decrease_candidacy_bond_occupying_top_slot() { + new_test_ext().execute_with(|| { + assert_eq!(CollatorSelection::desired_candidates(), 2); + // Register 3 candidates. + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + // And update their bids. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 30u64.into())); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30u64.into())); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60u64.into())); + + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 30 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 30 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 60 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_4, candidate_3, candidate_5] + ); + + // Candidates 5 and 3 can't decrease their deposits because they are the 2 top candidates. + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 29), + Error::::InvalidUnreserve, + ); + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 29), + Error::::InvalidUnreserve, + ); + // But candidate 4 should have be able to decrease the deposit up to the minimum. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 29u64.into())); + + // Make candidate 4 outbid candidate 3, taking their spot as the second highest bid. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 35u64.into())); + + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 30 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 35 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 60 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_3, candidate_4, candidate_5] + ); + + // Now candidates 5 and 4 are the 2 top candidates, so they can't decrease their deposits. + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 34), + Error::::InvalidUnreserve, + ); + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(4), 34), + Error::::InvalidUnreserve, + ); + // Candidate 3 should have be able to decrease the deposit up to the minimum now that + // they've fallen out of the top spots. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 10u64.into())); + }); +} + +#[test] +fn decrease_candidacy_bond_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 40)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 80); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 10)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + }); +} + +#[test] +fn update_candidacy_bond_with_identical_amount() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 40)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 80); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20), + Error::::IdenticalDeposit + ); + assert_eq!(Balances::free_balance(3), 80); + }); +} + +#[test] +fn candidate_list_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 20)); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 30)); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 25)); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 10)); + + let candidate_3 = CandidateInfo { who: 3, deposit: 30 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 25 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_5, candidate_4, candidate_3] + ); + }); +} + +#[test] +fn leave_intent() { + new_test_ext().execute_with(|| { + // register a candidate. + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_eq!(Balances::free_balance(3), 90); + + // register too so can leave above min candidates + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_eq!(Balances::free_balance(5), 90); + + // cannot leave if not candidate. + assert_noop!( + CollatorSelection::leave_intent(RuntimeOrigin::signed(4)), + Error::::NotCandidate + ); + + // bond is returned + assert_ok!(CollatorSelection::leave_intent(RuntimeOrigin::signed(3))); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(CollatorSelection::last_authored_block(3), 0); + }); +} + +#[test] +fn authorship_event_handler() { + new_test_ext().execute_with(|| { + // put 100 in the pot + 5 for ED + Balances::make_free_balance_be(&CollatorSelection::account_id(), 105); + + // 4 is the default author. + assert_eq!(Balances::free_balance(4), 100); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + // triggers `note_author` + Authorship::on_initialize(1); + + // tuple of (id, deposit). + let collator = CandidateInfo { who: 4, deposit: 10 }; + + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); + assert_eq!(CollatorSelection::last_authored_block(4), 0); + + // half of the pot goes to the collator who's the author (4 in tests). + assert_eq!(Balances::free_balance(4), 140); + // half + ED stays. + assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 55); + }); +} + +#[test] +fn fees_edgecases() { + new_test_ext().execute_with(|| { + // Nothing panics, no reward when no ED in balance + Authorship::on_initialize(1); + // put some money into the pot at ED + Balances::make_free_balance_be(&CollatorSelection::account_id(), 5); + // 4 is the default author. + assert_eq!(Balances::free_balance(4), 100); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + // triggers `note_author` + Authorship::on_initialize(1); + + // tuple of (id, deposit). + let collator = CandidateInfo { who: 4, deposit: 10 }; + + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); + assert_eq!(CollatorSelection::last_authored_block(4), 0); + // Nothing received + assert_eq!(Balances::free_balance(4), 90); + // all fee stays + assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 5); + }); +} + +#[test] +fn session_management_single_candidate() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + // add a new collator + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 1); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 3. + assert_eq!(Session::queued_keys().len(), 3); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3]); + }); +} + +#[test] +fn session_management_max_candidates() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]); + }); +} + +#[test] +fn session_management_increase_bid_with_list_update() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 5, 3]); + }); +} + +#[test] +fn session_management_candidate_list_eager_sort() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 5, 3]); + }); +} + +#[test] +fn session_management_reciprocal_outbidding() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + initialize_to_block(5); + + // candidates 3 and 4 saw they were outbid and preemptively bid more + // than 5 in the next block. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 70)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 70)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4, 3]); + }); +} + +#[test] +fn session_management_decrease_bid_after_auction() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + initialize_to_block(5); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 70)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 70)); + + initialize_to_block(5); + + // candidate 5 saw it was outbid and wants to take back its bid, but + // not entirely so they still keep their place in the candidate list + // in case there is an opportunity in the future. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 10)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4, 3]); + }); +} + +#[test] +fn kick_mechanism() { + new_test_ext().execute_with(|| { + // add a new collator + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + initialize_to_block(10); + assert_eq!(>::get().iter().count(), 2); + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // 4 authored this block, gets to stay 3 was kicked + assert_eq!(>::get().iter().count(), 1); + // 3 will be kicked after 1 session delay + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]); + // tuple of (id, deposit). + let collator = CandidateInfo { who: 4, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); + assert_eq!(CollatorSelection::last_authored_block(4), 20); + initialize_to_block(30); + // 3 gets kicked after 1 session delay + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4]); + // kicked collator gets funds back assert_eq!(Balances::free_balance(3), 100); }); } @@ -559,7 +1502,8 @@ fn kick_mechanism() { fn should_not_kick_mechanism_too_few() { new_test_ext().execute_with(|| { // remove the invulnerables and add new collators 3 and 5 - assert_eq!(CollatorSelection::candidates(), Vec::new()); + + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); assert_ok!(CollatorSelection::remove_invulnerable( RuntimeOrigin::signed(RootAccount::get()), @@ -573,30 +1517,34 @@ fn should_not_kick_mechanism_too_few() { )); initialize_to_block(10); - assert_eq!(CollatorSelection::candidates().len(), 2); + assert_eq!(>::get().iter().count(), 2); initialize_to_block(20); assert_eq!(SessionChangeBlock::get(), 20); // 4 authored this block, 3 is kicked, 5 stays because of too few collators - assert_eq!(CollatorSelection::candidates().len(), 1); + assert_eq!(>::get().iter().count(), 1); // 3 will be kicked after 1 session delay assert_eq!(SessionHandlerCollators::get(), vec![3, 5]); - let collator = CandidateInfo { who: 5, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![collator]); + // tuple of (id, deposit). + let collator = CandidateInfo { who: 3, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); assert_eq!(CollatorSelection::last_authored_block(4), 20); initialize_to_block(30); // 3 gets kicked after 1 session delay - assert_eq!(SessionHandlerCollators::get(), vec![5]); + assert_eq!(SessionHandlerCollators::get(), vec![3]); // kicked collator gets funds back - assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(5), 100); }); } #[test] fn should_kick_invulnerables_from_candidates_on_session_change() { new_test_ext().execute_with(|| { - assert_eq!(CollatorSelection::candidates(), Vec::new()); + assert_eq!(>::get().iter().count(), 0); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); assert_eq!(Balances::free_balance(3), 90); @@ -606,16 +1554,22 @@ fn should_kick_invulnerables_from_candidates_on_session_change() { vec![1, 2, 3] )); + // tuple of (id, deposit). let collator_3 = CandidateInfo { who: 3, deposit: 10 }; let collator_4 = CandidateInfo { who: 4, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![collator_3, collator_4.clone()]); + let actual_candidates = + >::get().iter().cloned().collect::>(); + assert_eq!(actual_candidates, vec![collator_4.clone(), collator_3]); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2, 3]); // session change initialize_to_block(10); // 3 is removed from candidates - assert_eq!(CollatorSelection::candidates(), vec![collator_4]); + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator_4] + ); // but not from invulnerables assert_eq!(CollatorSelection::invulnerables(), vec![1, 2, 3]); // and it got its deposit back diff --git a/cumulus/pallets/collator-selection/src/weights.rs b/cumulus/pallets/collator-selection/src/weights.rs index f8f86fb7dec2c08601eebe4d8b345a0f7c930b70..1c01ad6cd6fe8e8ed4bc02c3c2d6703eb2882df4 100644 --- a/cumulus/pallets/collator-selection/src/weights.rs +++ b/cumulus/pallets/collator-selection/src/weights.rs @@ -30,9 +30,11 @@ pub trait WeightInfo { fn add_invulnerable(_b: u32, _c: u32) -> Weight; fn remove_invulnerable(_b: u32) -> Weight; fn set_desired_candidates() -> Weight; - fn set_candidacy_bond() -> Weight; + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight; fn register_as_candidate(_c: u32) -> Weight; fn leave_intent(_c: u32) -> Weight; + fn update_bond(_c: u32) -> Weight; + fn take_candidate_slot(_c: u32) -> Weight; fn note_author() -> Weight; fn new_session(_c: u32, _r: u32) -> Weight; } @@ -49,7 +51,7 @@ impl WeightInfo for SubstrateWeight { fn set_desired_candidates() -> Weight { Weight::from_parts(16_363_000_u64, 0).saturating_add(T::DbWeight::get().writes(1_u64)) } - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { Weight::from_parts(16_840_000_u64, 0).saturating_add(T::DbWeight::get().writes(1_u64)) } fn register_as_candidate(c: u32) -> Weight { @@ -66,6 +68,20 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } + fn update_bond(c: u32) -> Weight { + Weight::from_parts(55_336_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(151_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + fn take_candidate_slot(c: u32) -> Weight { + Weight::from_parts(71_196_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(198_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } fn note_author() -> Weight { Weight::from_parts(71_461_000_u64, 0) .saturating_add(T::DbWeight::get().reads(3_u64)) @@ -136,7 +152,7 @@ impl WeightInfo for () { fn set_desired_candidates() -> Weight { Weight::from_parts(16_363_000_u64, 0).saturating_add(RocksDbWeight::get().writes(1_u64)) } - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { Weight::from_parts(16_840_000_u64, 0).saturating_add(RocksDbWeight::get().writes(1_u64)) } fn register_as_candidate(c: u32) -> Weight { @@ -158,6 +174,20 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } + fn update_bond(c: u32) -> Weight { + Weight::from_parts(55_336_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(151_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + fn take_candidate_slot(c: u32) -> Weight { + Weight::from_parts(71_196_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(198_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } fn new_session(r: u32, c: u32) -> Weight { Weight::from_parts(0_u64, 0) // Standard Error: 1_010_000 diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index 43fb131aec2a7d3241b76190196b620c207f7b8a..bdcee0f5ff857a9323b4b3568d7b97bad4630dd6 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -7,6 +7,9 @@ repository.workspace = true description = "Migrates messages from the old DMP queue pallet." license = "Apache-2.0" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -23,7 +26,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-io = { path = "../../../substrate/primitives/io", default-features = false } # Polkadot -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } # Cumulus cumulus-primitives-core = { path = "../../primitives/core", default-features = false } @@ -33,7 +36,7 @@ sp-core = { path = "../../../substrate/primitives/core" } sp-tracing = { path = "../../../substrate/primitives/tracing" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 5600c95a2a6058dba008a9a28cff7c53b02ed4cd..d24fdfe101e9e94b7d84008cc0ff909b630defd1 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true description = "Base pallet for cumulus-based parachains" license = "Apache-2.0" +[lints] +workspace = true + [dependencies] bytes = { version = "1.4.0", default-features = false } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } @@ -16,34 +19,36 @@ trie-db = { version = "0.28.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false} -sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false} -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false} -sp-version = { path = "../../../substrate/primitives/version", default-features = false} +sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +sp-version = { path = "../../../substrate/primitives/version", default-features = false } # Polkadot -polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false, features = [ "wasm-api" ]} +polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false, features = ["wasm-api"] } polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } # Cumulus cumulus-pallet-parachain-system-proc-macro = { path = "proc-macro", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent", default-features = false } +cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction", default-features = false } [dev-dependencies] assert_matches = "1.5" hex-literal = "0.4.1" lazy_static = "1.4" +trie-standardmap = "0.16.0" rand = "0.8.5" futures = "0.3.28" @@ -58,13 +63,14 @@ cumulus-test-client = { path = "../../test/client" } cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } [features] -default = [ "std" ] +default = ["std"] std = [ "bytes/std", "codec/std", "cumulus-pallet-parachain-system-proc-macro/std", "cumulus-primitives-core/std", "cumulus-primitives-parachain-inherent/std", + "cumulus-primitives-proof-size-hostfunction/std", "environmental/std", "frame-benchmarking/std", "frame-support/std", diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index 6accfa92c572c57193cbe975d731664b26d623aa..676f333e065076611c68842a327159d9cb699711 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -6,15 +6,18 @@ edition.workspace = true description = "Proc macros provided by the parachain-system pallet" license = "Apache-2.0" +[lints] +workspace = true + [lib] proc-macro = true [dependencies] -syn = "2.0.38" +syn = "2.0.41" proc-macro2 = "1.0.64" quote = "1.0.33" -proc-macro-crate = "1.3.1" +proc-macro-crate = "2.0.1" [features] -default = [ "std" ] +default = ["std"] std = [] diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index bac1ee28a7ca3abe17bea66ad3bac4830d62bdc1..ba8aff0e369d6774865c330786b62c8fd7657b64 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -27,7 +27,7 @@ //! //! Users must ensure that they register this pallet as an inherent provider. -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain, AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, GetChannelInfo, InboundDownwardMessage, InboundHrmpMessage, MessageSendError, @@ -50,10 +50,9 @@ use scale_info::TypeInfo; use sp_runtime::{ traits::{Block as BlockT, BlockNumberProvider, Hash}, transaction_validity::{ - InvalidTransaction, TransactionLongevity, TransactionSource, TransactionValidity, - ValidTransaction, + InvalidTransaction, TransactionSource, TransactionValidity, ValidTransaction, }, - BoundedSlice, DispatchError, FixedU128, RuntimeDebug, Saturating, + BoundedSlice, FixedU128, RuntimeDebug, Saturating, }; use sp_std::{cmp, collections::btree_map::BTreeMap, prelude::*}; use xcm::latest::XcmHash; @@ -169,20 +168,6 @@ impl CheckAssociatedRelayNumber for RelayNumberMonotonicallyIncreases { } } -/// Information needed when a new runtime binary is submitted and needs to be authorized before -/// replacing the current runtime. -#[derive(Decode, Encode, Default, PartialEq, Eq, MaxEncodedLen, TypeInfo)] -#[scale_info(skip_type_params(T))] -struct CodeUpgradeAuthorization -where - T: Config, -{ - /// Hash of the new runtime binary. - code_hash: T::Hash, - /// Whether or not to carry out version checks. - check_version: bool, -} - /// The max length of a DMP message. pub type MaxDmpMessageLenOf = <::DmpQueue as HandleMessage>::MaxMessageLen; @@ -204,7 +189,7 @@ pub mod ump_constants { pub mod pallet { use super::*; use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; + use frame_system::{pallet_prelude::*, WeightInfo as SystemWeightInfo}; #[pallet::pallet] #[pallet::storage_version(migration::STORAGE_VERSION)] @@ -677,16 +662,18 @@ pub mod pallet { /// /// This call requires Root origin. #[pallet::call_index(2)] - #[pallet::weight((1_000_000, DispatchClass::Operational))] + #[pallet::weight(::SystemWeightInfo::authorize_upgrade())] + #[allow(deprecated)] + #[deprecated( + note = "To be removed after June 2024. Migrate to `frame_system::authorize_upgrade`." + )] pub fn authorize_upgrade( origin: OriginFor, code_hash: T::Hash, check_version: bool, ) -> DispatchResult { ensure_root(origin)?; - AuthorizedUpgrade::::put(CodeUpgradeAuthorization { code_hash, check_version }); - - Self::deposit_event(Event::UpgradeAuthorized { code_hash }); + frame_system::Pallet::::do_authorize_upgrade(code_hash, check_version); Ok(()) } @@ -700,15 +687,17 @@ pub mod pallet { /// /// All origins are allowed. #[pallet::call_index(3)] - #[pallet::weight({1_000_000})] + #[pallet::weight(::SystemWeightInfo::apply_authorized_upgrade())] + #[allow(deprecated)] + #[deprecated( + note = "To be removed after June 2024. Migrate to `frame_system::apply_authorized_upgrade`." + )] pub fn enact_authorized_upgrade( _: OriginFor, code: Vec, ) -> DispatchResultWithPostInfo { - Self::validate_authorized_upgrade(&code[..])?; - Self::schedule_code_upgrade(code)?; - AuthorizedUpgrade::::kill(); - Ok(Pays::No.into()) + let post = frame_system::Pallet::::do_apply_authorize_upgrade(code)?; + Ok(post) } } @@ -721,8 +710,6 @@ pub mod pallet { ValidationFunctionApplied { relay_chain_block_num: RelayChainBlockNumber }, /// The relay-chain aborted the upgrade process. ValidationFunctionDiscarded, - /// An upgrade has been authorized. - UpgradeAuthorized { code_hash: T::Hash }, /// Some downward messages have been received and will be processed. DownwardMessagesReceived { count: u32 }, /// Downward messages were processed using the given weight. @@ -928,10 +915,6 @@ pub mod pallet { #[pallet::storage] pub(super) type ReservedDmpWeightOverride = StorageValue<_, Weight>; - /// The next authorized upgrade, if there is one. - #[pallet::storage] - pub(super) type AuthorizedUpgrade = StorageValue<_, CodeUpgradeAuthorization>; - /// A custom head data that should be returned as result of `validate_block`. /// /// See `Pallet::set_custom_validation_head_data` for more information. @@ -982,7 +965,8 @@ pub mod pallet { fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::enact_authorized_upgrade { ref code } = call { - if let Ok(hash) = Self::validate_authorized_upgrade(code) { + if let Ok(hash) = frame_system::Pallet::::validate_authorized_upgrade(&code[..]) + { return Ok(ValidTransaction { priority: 100, requires: Vec::new(), @@ -1001,21 +985,6 @@ pub mod pallet { } impl Pallet { - fn validate_authorized_upgrade(code: &[u8]) -> Result { - let authorization = AuthorizedUpgrade::::get().ok_or(Error::::NothingAuthorized)?; - - // ensure that the actual hash matches the authorized hash - let actual_hash = T::Hashing::hash(code); - ensure!(actual_hash == authorization.code_hash, Error::::Unauthorized); - - // check versions if required as part of the authorization - if authorization.check_version { - frame_system::Pallet::::can_set_code(code)?; - } - - Ok(actual_hash) - } - /// Get the unincluded segment size after the given hash. /// /// If the unincluded segment doesn't contain the given hash, this returns the @@ -1563,8 +1532,8 @@ impl Pallet { } } +/// Type that implements `SetCode`. pub struct ParachainSetCode(sp_std::marker::PhantomData); - impl frame_system::SetCode for ParachainSetCode { fn set_code(code: Vec) -> DispatchResult { Pallet::::schedule_code_upgrade(code) @@ -1630,7 +1599,7 @@ impl Pallet { /// Get the relay chain block number which was used as an anchor for the last block in this /// chain. - pub fn last_relay_block_number(&self) -> RelayChainBlockNumber { + pub fn last_relay_block_number() -> RelayChainBlockNumber { LastRelayChainBlockNumber::::get() } } diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs index ab1775b40a84451e150ad979cbf8b3f7888e8ccf..7528d3d9fe8d97a24602c4206f5489f1602bd957 100755 --- a/cumulus/pallets/parachain-system/src/tests.rs +++ b/cumulus/pallets/parachain-system/src/tests.rs @@ -1127,8 +1127,9 @@ fn upgrade_version_checks_should_work() { let new_code = vec![1, 2, 3, 4]; let new_code_hash = H256(sp_core::blake2_256(&new_code)); - let _authorize = - ParachainSystem::authorize_upgrade(RawOrigin::Root.into(), new_code_hash, true); + #[allow(deprecated)] + let _authorize = ParachainSystem::authorize_upgrade(RawOrigin::Root.into(), new_code_hash, true); + #[allow(deprecated)] let res = ParachainSystem::enact_authorized_upgrade(RawOrigin::None.into(), new_code); assert_eq!(expected.map_err(DispatchErrorWithPostInfo::from), res); diff --git a/cumulus/pallets/parachain-system/src/validate_block/mod.rs b/cumulus/pallets/parachain-system/src/validate_block/mod.rs index db149401638aabeb65f3242fc450e27f8edb93cd..763a4cffd77f92171c1f102cfc70485a02e5f27e 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/mod.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/mod.rs @@ -26,6 +26,10 @@ mod tests; #[doc(hidden)] mod trie_cache; +#[cfg(any(test, not(feature = "std")))] +#[doc(hidden)] +mod trie_recorder; + #[cfg(not(feature = "std"))] #[doc(hidden)] pub use bytes; diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs new file mode 100644 index 0000000000000000000000000000000000000000..e73aef70aa491fc68aad4f9479222d9a076e7edc --- /dev/null +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -0,0 +1,286 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Provide a specialized trie-recorder and provider for use in validate-block. +//! +//! This file defines two main structs, [`SizeOnlyRecorder`] and +//! [`SizeOnlyRecorderProvider`]. They are used to track the current +//! proof-size without actually recording the accessed nodes themselves. + +use codec::Encode; + +use sp_std::{ + cell::{RefCell, RefMut}, + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + rc::Rc, +}; +use sp_trie::{NodeCodec, ProofSizeProvider, StorageProof}; +use trie_db::{Hasher, RecordedForKey, TrieAccess}; + +/// A trie recorder that only keeps track of the proof size. +/// +/// The internal size counting logic should align +/// with ['sp_trie::recorder::Recorder']. +pub(crate) struct SizeOnlyRecorder<'a, H: Hasher> { + seen_nodes: RefMut<'a, BTreeSet>, + encoded_size: RefMut<'a, usize>, + recorded_keys: RefMut<'a, BTreeMap, RecordedForKey>>, +} + +impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder<'a, H> { + fn record(&mut self, access: TrieAccess<'_, H::Out>) { + let mut encoded_size_update = 0; + match access { + TrieAccess::NodeOwned { hash, node_owned } => + if self.seen_nodes.insert(hash) { + let node = node_owned.to_encoded::>(); + encoded_size_update += node.encoded_size(); + }, + TrieAccess::EncodedNode { hash, encoded_node } => + if self.seen_nodes.insert(hash) { + encoded_size_update += encoded_node.encoded_size(); + }, + TrieAccess::Value { hash, value, full_key } => { + if self.seen_nodes.insert(hash) { + encoded_size_update += value.encoded_size(); + } + self.recorded_keys + .entry(full_key.into()) + .and_modify(|e| *e = RecordedForKey::Value) + .or_insert_with(|| RecordedForKey::Value); + }, + TrieAccess::Hash { full_key } => { + self.recorded_keys + .entry(full_key.into()) + .or_insert_with(|| RecordedForKey::Hash); + }, + TrieAccess::NonExisting { full_key } => { + self.recorded_keys + .entry(full_key.into()) + .and_modify(|e| *e = RecordedForKey::Value) + .or_insert_with(|| RecordedForKey::Value); + }, + TrieAccess::InlineValue { full_key } => { + self.recorded_keys + .entry(full_key.into()) + .and_modify(|e| *e = RecordedForKey::Value) + .or_insert_with(|| RecordedForKey::Value); + }, + }; + + *self.encoded_size += encoded_size_update; + } + + fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> RecordedForKey { + self.recorded_keys.get(key).copied().unwrap_or(RecordedForKey::None) + } +} + +#[derive(Clone)] +pub(crate) struct SizeOnlyRecorderProvider { + seen_nodes: Rc>>, + encoded_size: Rc>, + recorded_keys: Rc, RecordedForKey>>>, +} + +impl SizeOnlyRecorderProvider { + pub fn new() -> Self { + Self { + seen_nodes: Default::default(), + encoded_size: Default::default(), + recorded_keys: Default::default(), + } + } +} + +impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderProvider { + type Recorder<'a> = SizeOnlyRecorder<'a, H> where H: 'a; + + fn drain_storage_proof(self) -> Option { + None + } + + fn as_trie_recorder(&self, _storage_root: H::Out) -> Self::Recorder<'_> { + SizeOnlyRecorder { + encoded_size: self.encoded_size.borrow_mut(), + seen_nodes: self.seen_nodes.borrow_mut(), + recorded_keys: self.recorded_keys.borrow_mut(), + } + } +} + +impl ProofSizeProvider for SizeOnlyRecorderProvider { + fn estimate_encoded_size(&self) -> usize { + *self.encoded_size.borrow() + } +} + +// This is safe here since we are single-threaded in WASM +unsafe impl Send for SizeOnlyRecorderProvider {} +unsafe impl Sync for SizeOnlyRecorderProvider {} + +#[cfg(test)] +mod tests { + use rand::Rng; + use sp_trie::{ + cache::{CacheSize, SharedTrieCache}, + MemoryDB, ProofSizeProvider, TrieRecorderProvider, + }; + use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder}; + use trie_standardmap::{Alphabet, StandardMap, ValueMode}; + + use super::*; + + type Recorder = sp_trie::recorder::Recorder; + + fn create_trie() -> ( + sp_trie::MemoryDB, + TrieHash>, + Vec<(Vec, Vec)>, + ) { + let mut db = MemoryDB::default(); + let mut root = Default::default(); + + let mut seed = Default::default(); + let test_data: Vec<(Vec, Vec)> = StandardMap { + alphabet: Alphabet::Low, + min_key: 16, + journal_key: 0, + value_mode: ValueMode::Random, + count: 1000, + } + .make_with(&mut seed) + .into_iter() + .map(|(k, v)| { + // Double the length so we end up with some values of 2 bytes and some of 64 + let v = [v.clone(), v].concat(); + (k, v) + }) + .collect(); + + // Fill database with values + { + let mut trie = TrieDBMutBuilder::>::new( + &mut db, &mut root, + ) + .build(); + for (k, v) in &test_data { + trie.insert(k, v).expect("Inserts data"); + } + } + + (db, root, test_data) + } + + #[test] + fn recorder_equivalence_cache() { + let (db, root, test_data) = create_trie(); + + let mut rng = rand::thread_rng(); + for _ in 1..10 { + let reference_recorder = Recorder::default(); + let recorder_for_test: SizeOnlyRecorderProvider = + SizeOnlyRecorderProvider::new(); + let reference_cache: SharedTrieCache = + SharedTrieCache::new(CacheSize::new(1024 * 5)); + let cache_for_test: SharedTrieCache = + SharedTrieCache::new(CacheSize::new(1024 * 5)); + { + let local_cache = cache_for_test.local_cache(); + let mut trie_cache_for_reference = local_cache.as_trie_db_cache(root); + let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root); + let reference_trie = + TrieDBBuilder::>::new(&db, &root) + .with_recorder(&mut reference_trie_recorder) + .with_cache(&mut trie_cache_for_reference) + .build(); + + let local_cache_for_test = reference_cache.local_cache(); + let mut trie_cache_for_test = local_cache_for_test.as_trie_db_cache(root); + let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root); + let test_trie = + TrieDBBuilder::>::new(&db, &root) + .with_recorder(&mut trie_recorder_under_test) + .with_cache(&mut trie_cache_for_test) + .build(); + + // Access random values from the test data + for _ in 0..100 { + let index: usize = rng.gen_range(0..test_data.len()); + test_trie.get(&test_data[index].0).unwrap().unwrap(); + reference_trie.get(&test_data[index].0).unwrap().unwrap(); + } + + // Check that we have the same nodes recorded for both recorders + for (key, _) in test_data.iter() { + let reference = reference_trie_recorder.trie_nodes_recorded_for_key(key); + let test_value = trie_recorder_under_test.trie_nodes_recorded_for_key(key); + assert_eq!(format!("{:?}", reference), format!("{:?}", test_value)); + } + } + + // Check that we have the same size recorded for both recorders + assert_eq!( + reference_recorder.estimate_encoded_size(), + recorder_for_test.estimate_encoded_size() + ); + } + } + + #[test] + fn recorder_equivalence_no_cache() { + let (db, root, test_data) = create_trie(); + + let mut rng = rand::thread_rng(); + for _ in 1..10 { + let reference_recorder = Recorder::default(); + let recorder_for_test: SizeOnlyRecorderProvider = + SizeOnlyRecorderProvider::new(); + { + let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root); + let reference_trie = + TrieDBBuilder::>::new(&db, &root) + .with_recorder(&mut reference_trie_recorder) + .build(); + + let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root); + let test_trie = + TrieDBBuilder::>::new(&db, &root) + .with_recorder(&mut trie_recorder_under_test) + .build(); + + for _ in 0..200 { + let index: usize = rng.gen_range(0..test_data.len()); + test_trie.get(&test_data[index].0).unwrap().unwrap(); + reference_trie.get(&test_data[index].0).unwrap().unwrap(); + } + + // Check that we have the same nodes recorded for both recorders + for (key, _) in test_data.iter() { + let reference = reference_trie_recorder.trie_nodes_recorded_for_key(key); + let test_value = trie_recorder_under_test.trie_nodes_recorded_for_key(key); + assert_eq!(format!("{:?}", reference), format!("{:?}", test_value)); + } + } + + // Check that we have the same size recorded for both recorders + assert_eq!( + reference_recorder.estimate_encoded_size(), + recorder_for_test.estimate_encoded_size() + ); + } + } +} diff --git a/cumulus/pallets/session-benchmarking/Cargo.toml b/cumulus/pallets/session-benchmarking/Cargo.toml index a28971d66d3f667892676d7a9d756a40f1d9f4d2..af2dc2300d74b1822c9e926c719d449c5f1f792a 100644 --- a/cumulus/pallets/session-benchmarking/Cargo.toml +++ b/cumulus/pallets/session-benchmarking/Cargo.toml @@ -9,20 +9,23 @@ repository.workspace = true description = "FRAME sessions pallet benchmarking" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] parity-scale-codec = { version = "3.6.4", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true} -pallet-session = { path = "../../../substrate/frame/session", default-features = false} +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +pallet-session = { path = "../../../substrate/frame/session", default-features = false } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index e4ef72965c732135176a3c46df8481a65335f977..e1c94cbfde96ebe27793f713f92dc0e7b915711f 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -6,25 +6,28 @@ edition.workspace = true description = "Adds functionality to migrate from a Solo to a Parachain" license = "Apache-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false} +polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../parachain-system", default-features = false} +cumulus-pallet-parachain-system = { path = "../parachain-system", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-parachain-system/std", diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index c8e819979bdb1f9841632227e2ad2f5e7d3cbd63..9bbc281154ce3a7936ca3aa69dea615de43adbd5 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -6,22 +6,25 @@ version = "0.1.0" license = "Apache-2.0" description = "Pallet for stuff specific to parachains' usage of XCM" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index fba006e7986f990d08d70d5cc23d266b0e642dc4..50ec5cacb2e9d022e3c5c4f1bb361fdfdecf4572 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -6,18 +6,21 @@ edition.workspace = true description = "Pallet to queue outbound and inbound XCMP messages." license = "Apache-2.0" +[lints] +workspace = true + [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ], default-features = false } +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } log = { version = "0.4.20", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } # Polkadot @@ -30,7 +33,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm cumulus-primitives-core = { path = "../../primitives/core", default-features = false } # Optional import for benchmarking -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true} +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } bounded-collections = { version = "0.1.4", default-features = false } # Bridges @@ -50,7 +53,7 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/x cumulus-pallet-parachain-system = { path = "../parachain-system", features = ["parameterized-consensus-hook"] } [features] -default = [ "std" ] +default = ["std"] std = [ "bounded-collections/std", "bp-xcm-bridge-hub-router?/std", @@ -96,4 +99,4 @@ try-runtime = [ "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] -bridging = [ "bp-xcm-bridge-hub-router" ] +bridging = ["bp-xcm-bridge-hub-router"] diff --git a/cumulus/pallets/xcmp-queue/src/benchmarking.rs b/cumulus/pallets/xcmp-queue/src/benchmarking.rs index 81dfbc2bb71ce7196fb7adfc95732d7c56a23d7b..49e2cc8367348ad6a81c0017405f8c3148734be5 100644 --- a/cumulus/pallets/xcmp-queue/src/benchmarking.rs +++ b/cumulus/pallets/xcmp-queue/src/benchmarking.rs @@ -85,7 +85,7 @@ mod benchmarks { } assert!( - OutboundXcmpStatus::::get().iter().find(|p| p.recipient == para).is_none(), + OutboundXcmpStatus::::get().iter().all(|p| p.recipient != para), "No messages in the channel; therefore removed." ); } diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index d687f83d8b3e36bb673bd8540117429880012cd7..71cd21d45f777c4f9c2a5b3d2b5e35c26ecfc44b 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -60,7 +60,7 @@ use cumulus_primitives_core::{ use frame_support::{ defensive, defensive_assert, traits::{EnqueueMessage, EnsureOrigin, Get, QueueFootprint, QueuePausedQuery}, - weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight, WeightMeter}, + weights::{Weight, WeightMeter}, BoundedVec, }; use pallet_message_queue::OnQueueChanged; @@ -255,7 +255,7 @@ pub mod pallet { return meter.consumed() } - migration::lazy_migrate_inbound_queue::(); + migration::v3::lazy_migrate_inbound_queue::(); meter.consumed() } @@ -387,36 +387,16 @@ pub struct QueueConfigData { /// The number of pages which the queue must be reduced to before it signals that /// message sending may recommence after it has been suspended. resume_threshold: u32, - /// UNUSED - The amount of remaining weight under which we stop processing messages. - #[deprecated(note = "Will be removed")] - threshold_weight: Weight, - /// UNUSED - The speed to which the available weight approaches the maximum weight. A lower - /// number results in a faster progression. A value of 1 makes the entire weight available - /// initially. - #[deprecated(note = "Will be removed")] - weight_restrict_decay: Weight, - /// UNUSED - The maximum amount of weight any individual message may consume. Messages above - /// this weight go into the overweight queue and may only be serviced explicitly. - #[deprecated(note = "Will be removed")] - xcmp_max_individual_weight: Weight, } impl Default for QueueConfigData { fn default() -> Self { // NOTE that these default values are only used on genesis. They should give a rough idea of // what to set these values to, but is in no way a requirement. - #![allow(deprecated)] Self { drop_threshold: 48, // 64KiB * 48 = 3MiB suspend_threshold: 32, // 64KiB * 32 = 2MiB resume_threshold: 8, // 64KiB * 8 = 512KiB - // unused: - threshold_weight: Weight::from_parts(100_000, 0), - weight_restrict_decay: Weight::from_parts(2, 0), - xcmp_max_individual_weight: Weight::from_parts( - 20u64 * WEIGHT_REF_TIME_PER_MILLIS, - DEFAULT_POV_SIZE, - ), } } } @@ -474,11 +454,21 @@ impl Pallet { ) -> Result { let encoded_fragment = fragment.encode(); + // Optimization note: `max_message_size` could potentially be stored in + // `OutboundXcmpMessages` once known; that way it's only accessed when a new page is needed. + let channel_info = T::ChannelInfo::get_channel_info(recipient).ok_or(MessageSendError::NoChannel)?; - let max_message_size = channel_info.max_message_size as usize; // Max message size refers to aggregates, or pages. Not to individual fragments. - if encoded_fragment.len() > max_message_size { + let max_message_size = channel_info.max_message_size as usize; + let format_size = format.encoded_size(); + // We check the encoded fragment length plus the format size agains the max message size + // because the format is concatenated if a new page is needed. + let size_to_check = encoded_fragment + .len() + .checked_add(format_size) + .ok_or(MessageSendError::TooBig)?; + if size_to_check > max_message_size { return Err(MessageSendError::TooBig) } diff --git a/cumulus/pallets/xcmp-queue/src/migration.rs b/cumulus/pallets/xcmp-queue/src/migration.rs index 6d7f434b041a5d613efb7d6978f1e15643bbbd0c..6c86c3011d23807adfbde801ec6865b6731822df 100644 --- a/cumulus/pallets/xcmp-queue/src/migration.rs +++ b/cumulus/pallets/xcmp-queue/src/migration.rs @@ -16,7 +16,7 @@ //! A module that is responsible for migration of storage. -use crate::{Config, OverweightIndex, Pallet, ParaId, QueueConfig, DEFAULT_POV_SIZE}; +use crate::{Config, OverweightIndex, Pallet, QueueConfig, QueueConfigData, DEFAULT_POV_SIZE}; use cumulus_primitives_core::XcmpMessageFormat; use frame_support::{ pallet_prelude::*, @@ -25,37 +25,17 @@ use frame_support::{ }; /// The current storage version. -pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(3); +pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); pub const LOG: &str = "runtime::xcmp-queue-migration"; -/// Migrates the pallet storage to the most recent version. -pub struct MigrationToV3(PhantomData); - -impl OnRuntimeUpgrade for MigrationToV3 { - fn on_runtime_upgrade() -> Weight { - let mut weight = T::DbWeight::get().reads(1); - - if StorageVersion::get::>() == 1 { - weight.saturating_accrue(migrate_to_v2::()); - StorageVersion::new(2).put::>(); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - } - - if StorageVersion::get::>() == 2 { - weight.saturating_accrue(migrate_to_v3::()); - StorageVersion::new(3).put::>(); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - } - - weight - } -} - mod v1 { use super::*; use codec::{Decode, Encode}; + #[frame_support::storage_alias] + pub(crate) type QueueConfig = StorageValue, QueueConfigData, ValueQuery>; + #[derive(Encode, Decode, Debug)] pub struct QueueConfigData { pub suspend_threshold: u32, @@ -80,6 +60,84 @@ mod v1 { } } +pub mod v2 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type QueueConfig = StorageValue, QueueConfigData, ValueQuery>; + + #[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] + pub struct QueueConfigData { + pub suspend_threshold: u32, + pub drop_threshold: u32, + pub resume_threshold: u32, + pub threshold_weight: Weight, + pub weight_restrict_decay: Weight, + pub xcmp_max_individual_weight: Weight, + } + + impl Default for QueueConfigData { + fn default() -> Self { + Self { + suspend_threshold: 2, + drop_threshold: 5, + resume_threshold: 1, + threshold_weight: Weight::from_parts(100_000, 0), + weight_restrict_decay: Weight::from_parts(2, 0), + xcmp_max_individual_weight: Weight::from_parts( + 20u64 * WEIGHT_REF_TIME_PER_MILLIS, + DEFAULT_POV_SIZE, + ), + } + } + } + + /// Migrates `QueueConfigData` from v1 (using only reference time weights) to v2 (with + /// 2D weights). + pub struct UncheckedMigrationToV2(PhantomData); + + impl OnRuntimeUpgrade for UncheckedMigrationToV2 { + #[allow(deprecated)] + fn on_runtime_upgrade() -> Weight { + let translate = |pre: v1::QueueConfigData| -> v2::QueueConfigData { + v2::QueueConfigData { + suspend_threshold: pre.suspend_threshold, + drop_threshold: pre.drop_threshold, + resume_threshold: pre.resume_threshold, + threshold_weight: Weight::from_parts(pre.threshold_weight, 0), + weight_restrict_decay: Weight::from_parts(pre.weight_restrict_decay, 0), + xcmp_max_individual_weight: Weight::from_parts( + pre.xcmp_max_individual_weight, + DEFAULT_POV_SIZE, + ), + } + }; + + if v2::QueueConfig::::translate(|pre| pre.map(translate)).is_err() { + log::error!( + target: crate::LOG_TARGET, + "unexpected error when performing translation of the QueueConfig type \ + during storage upgrade to v2" + ); + } + + T::DbWeight::get().reads_writes(1, 1) + } + } + + /// [`UncheckedMigrationToV2`] wrapped in a + /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the + /// migration is only performed when on-chain version is 1. + #[allow(dead_code)] + pub type MigrationToV2 = frame_support::migrations::VersionedMigration< + 1, + 2, + UncheckedMigrationToV2, + Pallet, + ::DbWeight, + >; +} + pub mod v3 { use super::*; use crate::*; @@ -101,6 +159,10 @@ pub mod v3 { OptionQuery, >; + #[frame_support::storage_alias] + pub(crate) type QueueConfig = + StorageValue, v2::QueueConfigData, ValueQuery>; + #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, TypeInfo)] pub struct InboundChannelDetails { /// The `ParaId` of the parachain that this channel is connected with. @@ -121,98 +183,135 @@ pub mod v3 { Ok, Suspended, } -} -/// Migrates `QueueConfigData` from v1 (using only reference time weights) to v2 (with -/// 2D weights). -/// -/// NOTE: Only use this function if you know what you're doing. Default to using -/// `migrate_to_latest`. -#[allow(deprecated)] -pub fn migrate_to_v2() -> Weight { - let translate = |pre: v1::QueueConfigData| -> super::QueueConfigData { - super::QueueConfigData { - suspend_threshold: pre.suspend_threshold, - drop_threshold: pre.drop_threshold, - resume_threshold: pre.resume_threshold, - threshold_weight: Weight::from_parts(pre.threshold_weight, 0), - weight_restrict_decay: Weight::from_parts(pre.weight_restrict_decay, 0), - xcmp_max_individual_weight: Weight::from_parts( - pre.xcmp_max_individual_weight, - DEFAULT_POV_SIZE, - ), - } - }; + /// Migrates the pallet storage to v3. + pub struct UncheckedMigrationToV3(PhantomData); - if QueueConfig::::translate(|pre| pre.map(translate)).is_err() { - log::error!( - target: super::LOG_TARGET, - "unexpected error when performing translation of the QueueConfig type during storage upgrade to v2" - ); + impl OnRuntimeUpgrade for UncheckedMigrationToV3 { + fn on_runtime_upgrade() -> Weight { + #[frame_support::storage_alias] + type Overweight = + CountedStorageMap, Twox64Concat, OverweightIndex, ParaId>; + let overweight_messages = Overweight::::initialize_counter() as u64; + + T::DbWeight::get().reads_writes(overweight_messages, 1) + } } - T::DbWeight::get().reads_writes(1, 1) -} + /// [`UncheckedMigrationToV3`] wrapped in a + /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the + /// migration is only performed when on-chain version is 2. + pub type MigrationToV3 = frame_support::migrations::VersionedMigration< + 2, + 3, + UncheckedMigrationToV3, + Pallet, + ::DbWeight, + >; -pub fn migrate_to_v3() -> Weight { - #[frame_support::storage_alias] - type Overweight = - CountedStorageMap, Twox64Concat, OverweightIndex, ParaId>; - let overweight_messages = Overweight::::initialize_counter() as u64; + pub fn lazy_migrate_inbound_queue() { + let Some(mut states) = v3::InboundXcmpStatus::::get() else { + log::debug!(target: LOG, "Lazy migration finished: item gone"); + return + }; + let Some(ref mut next) = states.first_mut() else { + log::debug!(target: LOG, "Lazy migration finished: item empty"); + v3::InboundXcmpStatus::::kill(); + return + }; + log::debug!( + "Migrating inbound HRMP channel with sibling {:?}, msgs left {}.", + next.sender, + next.message_metadata.len() + ); + // We take the last element since the MQ is a FIFO and we want to keep the order. + let Some((block_number, format)) = next.message_metadata.pop() else { + states.remove(0); + v3::InboundXcmpStatus::::put(states); + return + }; + if format != XcmpMessageFormat::ConcatenatedVersionedXcm { + log::warn!(target: LOG, + "Dropping message with format {:?} (not ConcatenatedVersionedXcm)", + format + ); + v3::InboundXcmpMessages::::remove(&next.sender, &block_number); + v3::InboundXcmpStatus::::put(states); + return + } - T::DbWeight::get().reads_writes(overweight_messages, 1) -} + let Some(msg) = v3::InboundXcmpMessages::::take(&next.sender, &block_number) else { + defensive!("Storage corrupted: HRMP message missing:", (next.sender, block_number)); + v3::InboundXcmpStatus::::put(states); + return + }; -pub fn lazy_migrate_inbound_queue() { - let Some(mut states) = v3::InboundXcmpStatus::::get() else { - log::debug!(target: LOG, "Lazy migration finished: item gone"); - return - }; - let Some(ref mut next) = states.first_mut() else { - log::debug!(target: LOG, "Lazy migration finished: item empty"); - v3::InboundXcmpStatus::::kill(); - return - }; - log::debug!( - "Migrating inbound HRMP channel with sibling {:?}, msgs left {}.", - next.sender, - next.message_metadata.len() - ); - // We take the last element since the MQ is a FIFO and we want to keep the order. - let Some((block_number, format)) = next.message_metadata.pop() else { - states.remove(0); - v3::InboundXcmpStatus::::put(states); - return - }; - if format != XcmpMessageFormat::ConcatenatedVersionedXcm { - log::warn!(target: LOG, - "Dropping message with format {:?} (not ConcatenatedVersionedXcm)", - format - ); - v3::InboundXcmpMessages::::remove(&next.sender, &block_number); + let Ok(msg): Result, _> = msg.try_into() else { + log::error!(target: LOG, "Message dropped: too big"); + v3::InboundXcmpStatus::::put(states); + return + }; + + // Finally! We have a proper message. + T::XcmpQueue::enqueue_message(msg.as_bounded_slice(), next.sender); + log::debug!(target: LOG, "Migrated HRMP message to MQ: {:?}", (next.sender, block_number)); v3::InboundXcmpStatus::::put(states); - return } +} - let Some(msg) = v3::InboundXcmpMessages::::take(&next.sender, &block_number) else { - defensive!("Storage corrupted: HRMP message missing:", (next.sender, block_number)); - v3::InboundXcmpStatus::::put(states); - return - }; +pub mod v4 { + use super::*; - let Ok(msg): Result, _> = msg.try_into() else { - log::error!(target: LOG, "Message dropped: too big"); - v3::InboundXcmpStatus::::put(states); - return - }; + /// Migrates `QueueConfigData` to v4, removing deprecated fields and bumping page + /// thresholds to at least the default values. + pub struct UncheckedMigrationToV4(PhantomData); + + impl OnRuntimeUpgrade for UncheckedMigrationToV4 { + fn on_runtime_upgrade() -> Weight { + let translate = |pre: v2::QueueConfigData| -> QueueConfigData { + let pre_default = v2::QueueConfigData::default(); + // If the previous values are the default ones, let's replace them with the new + // default. + if pre.suspend_threshold == pre_default.suspend_threshold && + pre.drop_threshold == pre_default.drop_threshold && + pre.resume_threshold == pre_default.resume_threshold + { + return QueueConfigData::default() + } + + // If the previous values are not the default ones, let's leave them as they are. + QueueConfigData { + suspend_threshold: pre.suspend_threshold, + drop_threshold: pre.drop_threshold, + resume_threshold: pre.resume_threshold, + } + }; + + if QueueConfig::::translate(|pre| pre.map(translate)).is_err() { + log::error!( + target: crate::LOG_TARGET, + "unexpected error when performing translation of the QueueConfig type \ + during storage upgrade to v4" + ); + } - // Finally! We have a proper message. - T::XcmpQueue::enqueue_message(msg.as_bounded_slice(), next.sender); - log::debug!(target: LOG, "Migrated HRMP message to MQ: {:?}", (next.sender, block_number)); - v3::InboundXcmpStatus::::put(states); + T::DbWeight::get().reads_writes(1, 1) + } + } + + /// [`UncheckedMigrationToV4`] wrapped in a + /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the + /// migration is only performed when on-chain version is 3. + pub type MigrationToV4 = frame_support::migrations::VersionedMigration< + 3, + 4, + UncheckedMigrationToV4, + Pallet, + ::DbWeight, + >; } -#[cfg(test)] +#[cfg(all(feature = "try-runtime", test))] mod tests { use super::*; use crate::mock::{new_test_ext, Test}; @@ -230,14 +329,20 @@ mod tests { }; new_test_ext().execute_with(|| { + let storage_version = StorageVersion::new(1); + storage_version.put::>(); + frame_support::storage::unhashed::put_raw( &crate::QueueConfig::::hashed_key(), &v1.encode(), ); - migrate_to_v2::(); + let bytes = v2::MigrationToV2::::pre_upgrade(); + assert!(bytes.is_ok()); + v2::MigrationToV2::::on_runtime_upgrade(); + assert!(v2::MigrationToV2::::post_upgrade(bytes.unwrap()).is_ok()); - let v2 = crate::QueueConfig::::get(); + let v2 = v2::QueueConfig::::get(); assert_eq!(v1.suspend_threshold, v2.suspend_threshold); assert_eq!(v1.drop_threshold, v2.drop_threshold); @@ -247,4 +352,70 @@ mod tests { assert_eq!(v1.xcmp_max_individual_weight, v2.xcmp_max_individual_weight.ref_time()); }); } + + #[test] + #[allow(deprecated)] + fn test_migration_to_v4() { + new_test_ext().execute_with(|| { + let storage_version = StorageVersion::new(3); + storage_version.put::>(); + + let v2 = v2::QueueConfigData { + drop_threshold: 5, + suspend_threshold: 2, + resume_threshold: 1, + ..Default::default() + }; + + frame_support::storage::unhashed::put_raw( + &crate::QueueConfig::::hashed_key(), + &v2.encode(), + ); + + let bytes = v4::MigrationToV4::::pre_upgrade(); + assert!(bytes.is_ok()); + v4::MigrationToV4::::on_runtime_upgrade(); + assert!(v4::MigrationToV4::::post_upgrade(bytes.unwrap()).is_ok()); + + let v4 = QueueConfig::::get(); + + assert_eq!( + v4, + QueueConfigData { suspend_threshold: 32, drop_threshold: 48, resume_threshold: 8 } + ); + }); + + new_test_ext().execute_with(|| { + let storage_version = StorageVersion::new(3); + storage_version.put::>(); + + let v2 = v2::QueueConfigData { + drop_threshold: 100, + suspend_threshold: 50, + resume_threshold: 40, + ..Default::default() + }; + + frame_support::storage::unhashed::put_raw( + &crate::QueueConfig::::hashed_key(), + &v2.encode(), + ); + + let bytes = v4::MigrationToV4::::pre_upgrade(); + assert!(bytes.is_ok()); + v4::MigrationToV4::::on_runtime_upgrade(); + assert!(v4::MigrationToV4::::post_upgrade(bytes.unwrap()).is_ok()); + + let v4 = QueueConfig::::get(); + + assert_eq!( + v4, + QueueConfigData { + suspend_threshold: 50, + drop_threshold: 100, + resume_threshold: 40 + } + ); + }); + } } diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 7c3a3bd1bd02c7b897c2cafbb28ba93279fb69bd..a41be6fa9ca3098154504b30b1672d1ee2a8609d 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -19,7 +19,7 @@ use core::marker::PhantomData; use cumulus_pallet_parachain_system::AnyRelayNumber; use cumulus_primitives_core::{ChannelInfo, IsSystem, ParaId}; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, Everything, Nothing, OriginTrait}, BoundedSlice, }; @@ -30,7 +30,9 @@ use sp_runtime::{ BuildStorage, }; use xcm::prelude::*; -use xcm_builder::{CurrencyAdapter, FixedWeightBounds, IsConcrete, NativeAsset, ParentIsPreset}; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter; +use xcm_builder::{FixedWeightBounds, IsConcrete, NativeAsset, ParentIsPreset}; use xcm_executor::traits::ConvertOrigin; type Block = frame_system::mocking::MockBlock; @@ -55,6 +57,7 @@ parameter_types! { type AccountId = u64; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); @@ -129,6 +132,7 @@ parameter_types! { } /// Means for transacting assets on this chain. +#[allow(deprecated)] pub type LocalAssetTransactor = CurrencyAdapter< // Use this currency: Balances, diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index 30dba6ead3407cc0064d4eb61cd64003cb60bf1b..8e8f6e852e1e0ada76b58d33712fdd1324e37d93 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -22,8 +22,7 @@ use XcmpMessageFormat::*; use codec::Input; use cumulus_primitives_core::{ParaId, XcmpMessageHandler}; use frame_support::{ - assert_err, assert_noop, assert_ok, assert_storage_noop, hypothetically, - traits::{Footprint, Hooks}, + assert_err, assert_noop, assert_ok, assert_storage_noop, hypothetically, traits::Hooks, StorageNoopGuard, }; use mock::{new_test_ext, ParachainSystem, RuntimeOrigin as Origin, Test, XcmpQueue}; @@ -100,7 +99,7 @@ fn xcm_enqueueing_multiple_times_works() { } #[test] -#[cfg_attr(debug_assertions, should_panic = "Defensive failure")] +#[cfg_attr(debug_assertions, should_panic = "Could not enqueue XCMP messages.")] fn xcm_enqueueing_starts_dropping_on_overflow() { new_test_ext().execute_with(|| { let xcm = VersionedXcm::::from(Xcm::(vec![ClearOrigin])); @@ -112,12 +111,6 @@ fn xcm_enqueueing_starts_dropping_on_overflow() { repeat((1000.into(), 1, data.as_slice())).take(limit * 2), Weight::MAX, ); - assert_eq!(EnqueuedMessages::get().len(), limit); - // The drop threshold for pages is 48, the others numbers dont really matter: - assert_eq!( - ::XcmpQueue::footprint(1000.into()), - QueueFootprint { storage: Footprint { count: 256, size: 768 }, pages: 48 } - ); }) } @@ -155,7 +148,7 @@ fn xcm_enqueueing_broken_xcm_works() { .take(20) .collect::>(), ); - EnqueuedMessages::set(&vec![]); + EnqueuedMessages::take(); // But if we do it all in one page, then it only uses the first 10: XcmpQueue::handle_xcmp_messages( @@ -731,6 +724,50 @@ fn xcmp_queue_send_xcm_works() { }) } +#[test] +fn xcmp_queue_send_too_big_xcm_fails() { + new_test_ext().execute_with(|| { + let sibling_para_id = ParaId::from(12345); + let dest = (Parent, X1(Parachain(sibling_para_id.into()))).into(); + + let max_message_size = 100_u32; + + // open HRMP channel to the sibling_para_id with a set `max_message_size` + ParachainSystem::open_custom_outbound_hrmp_channel_for_benchmarks_or_tests( + sibling_para_id, + cumulus_primitives_core::AbridgedHrmpChannel { + max_message_size, + max_capacity: 10, + max_total_size: 10_000_000_u32, + msg_count: 0, + total_size: 0, + mqc_head: None, + }, + ); + + // Message is crafted to exceed `max_message_size` + let mut message = Xcm::builder_unsafe(); + for _ in 0..97 { + message = message.clear_origin(); + } + let message = message.build(); + let encoded_message_size = message.encode().len(); + let versioned_size = 1; // VersionedXcm enum is added by `send_xcm` and it add one additional byte + assert_eq!(encoded_message_size, max_message_size as usize - versioned_size); + + // check empty outbound queue + assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + + // Message is too big because after adding the VersionedXcm enum, it would reach + // `max_message_size` Then, adding the format, which is the worst case scenario in which a + // new page is needed, would get it over the limit + assert_eq!(send_xcm::(dest, message), Err(SendError::Transport("TooBig")),); + + // outbound queue is still empty + assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + }); +} + #[test] fn verify_fee_factor_increase_and_decrease() { use cumulus_primitives_core::AbridgedHrmpChannel; diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml index 73bbbb6d77146aee2f0d64dcc8d5963af5d4776c..4be848f4d2d78e19d011583e80bef04b4a2e234f 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/cumulus/parachain-template/node/Cargo.toml @@ -10,11 +10,14 @@ edition.workspace = true build = "build.rs" publish = false +[lints] +workspace = true + [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } log = "0.4.20" codec = { package = "parity-scale-codec", version = "3.0.0" } -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } jsonrpsee = { version = "0.16.2", features = ["server"] } futures = "0.3.28" serde_json = "1.0.108" @@ -57,7 +60,7 @@ substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } # Polkadot polkadot-cli = { path = "../../../polkadot/cli", features = ["rococo-native"] } polkadot-primitives = { path = "../../../polkadot/primitives" } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } # Cumulus cumulus-client-cli = { path = "../../client/cli" } @@ -91,4 +94,3 @@ try-runtime = [ "polkadot-cli/try-runtime", "sp-runtime/try-runtime", ] - diff --git a/cumulus/parachain-template/node/src/cli.rs b/cumulus/parachain-template/node/src/cli.rs index 098f59b0f373669e6679d1255f2e763506c49991..73ef996b7504114b3578604a8d2c37661c9261fc 100644 --- a/cumulus/parachain-template/node/src/cli.rs +++ b/cumulus/parachain-template/node/src/cli.rs @@ -24,8 +24,11 @@ pub enum Subcommand { /// Remove the whole chain. PurgeChain(cumulus_client_cli::PurgeChainCmd), - /// Export the genesis state of the parachain. - ExportGenesisState(cumulus_client_cli::ExportGenesisStateCommand), + /// Export the genesis head data of the parachain. + /// + /// Head data is the encoded block header. + #[command(alias = "export-genesis-state")] + ExportGenesisHead(cumulus_client_cli::ExportGenesisHeadCommand), /// Export the genesis wasm of the parachain. ExportGenesisWasm(cumulus_client_cli::ExportGenesisWasmCommand), diff --git a/cumulus/parachain-template/node/src/command.rs b/cumulus/parachain-template/node/src/command.rs index 4dd8463f6be67cb4d8012b8dba1f32be341dfe24..6ddb68a359a786be617e384b16d7292c3db45a88 100644 --- a/cumulus/parachain-template/node/src/command.rs +++ b/cumulus/parachain-template/node/src/command.rs @@ -162,12 +162,12 @@ pub fn run() -> Result<()> { cmd.run(config, polkadot_config) }) }, - Some(Subcommand::ExportGenesisState(cmd)) => { + Some(Subcommand::ExportGenesisHead(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| { let partials = new_partial(&config)?; - cmd.run(&*config.chain_spec, &*partials.client) + cmd.run(partials.client) }) }, Some(Subcommand::ExportGenesisWasm(cmd)) => { diff --git a/cumulus/parachain-template/node/src/main.rs b/cumulus/parachain-template/node/src/main.rs index ba9f28b354f13a17b7e2b57eb85ad1f027a2ff43..12738a6793c039dc20cc4f8721ebd2ea0a7e69e9 100644 --- a/cumulus/parachain-template/node/src/main.rs +++ b/cumulus/parachain-template/node/src/main.rs @@ -3,11 +3,10 @@ #![warn(missing_docs)] mod chain_spec; -#[macro_use] -mod service; mod cli; mod command; mod rpc; +mod service; fn main() -> sc_cli::Result<()> { command::run() diff --git a/cumulus/parachain-template/node/src/rpc.rs b/cumulus/parachain-template/node/src/rpc.rs index b5ca484fa48471dd30414e8fb1d15fd4db1e2cd1..bb52b974f0ce61713904aec3783770dbc8f95aad 100644 --- a/cumulus/parachain-template/node/src/rpc.rs +++ b/cumulus/parachain-template/node/src/rpc.rs @@ -9,8 +9,7 @@ use std::sync::Arc; use parachain_template_runtime::{opaque::Block, AccountId, Balance, Nonce}; -use sc_client_api::AuxStore; -pub use sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor}; +pub use sc_rpc::DenyUnsafe; use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; @@ -36,7 +35,6 @@ pub fn create_full( where C: ProvideRuntimeApi + HeaderBackend - + AuxStore + HeaderMetadata + Send + Sync diff --git a/cumulus/parachain-template/pallets/template/Cargo.toml b/cumulus/parachain-template/pallets/template/Cargo.toml index 925457839348217cd52ea0691c87161baeae0206..bd7f926d039385c16411a094c3fb2efbd0a0137e 100644 --- a/cumulus/parachain-template/pallets/template/Cargo.toml +++ b/cumulus/parachain-template/pallets/template/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true edition.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,20 +19,20 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-support = { path = "../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../substrate/frame/system", default-features = false} +frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../substrate/frame/system", default-features = false } [dev-dependencies] -serde = { version = "1.0.188" } +serde = { version = "1.0.193" } # Substrate -sp-core = { path = "../../../../substrate/primitives/core", default-features = false} -sp-io = { path = "../../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false} +sp-core = { path = "../../../../substrate/primitives/core", default-features = false } +sp-io = { path = "../../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", diff --git a/cumulus/parachain-template/pallets/template/src/mock.rs b/cumulus/parachain-template/pallets/template/src/mock.rs index 8fae1019f42dbfaf5ae0a3a2aaac1cf8b048523a..411a16b116c8f94757c29a686022842e159e6924 100644 --- a/cumulus/parachain-template/pallets/template/src/mock.rs +++ b/cumulus/parachain-template/pallets/template/src/mock.rs @@ -1,4 +1,4 @@ -use frame_support::{parameter_types, traits::Everything}; +use frame_support::{derive_impl, parameter_types, traits::Everything}; use frame_system as system; use sp_core::H256; use sp_runtime::{ @@ -22,6 +22,7 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/cumulus/parachain-template/runtime/Cargo.toml b/cumulus/parachain-template/runtime/Cargo.toml index 01e250078737f9f47f12a822810b7690d0b1834d..3944ff4ca08e0b0f2f6185d2e0037def823ceb63 100644 --- a/cumulus/parachain-template/runtime/Cargo.toml +++ b/cumulus/parachain-template/runtime/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true edition.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -25,48 +28,48 @@ smallvec = "1.11.0" pallet-parachain-template = { path = "../pallets/template", default-features = false } # Substrate -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false} +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false} -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false} -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -sp-api = { path = "../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../substrate/primitives/core", default-features = false} +pallet-session = { path = "../../../substrate/frame/session", default-features = false } +pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../substrate/primitives/version", default-features = false } # Polkadot -pallet-xcm = { path = "../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false} +pallet-xcm = { path = "../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false } +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../pallets/aura-ext", default-features = false } cumulus-pallet-dmp-queue = { path = "../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-session-benchmarking = { path = "../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } @@ -76,7 +79,7 @@ parachains-common = { path = "../../parachains/common", default-features = false parachain-info = { package = "staging-parachain-info", path = "../../parachains/pallets/parachain-info", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-aura-ext/std", @@ -183,4 +186,4 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/parachain-template/runtime/src/lib.rs b/cumulus/parachain-template/runtime/src/lib.rs index 7a064e227d4cf1e0471758cdac5303db6d716a73..1ef018a8ca34486c4b2861f834c420446d2ce614 100644 --- a/cumulus/parachain-template/runtime/src/lib.rs +++ b/cumulus/parachain-template/runtime/src/lib.rs @@ -16,7 +16,7 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, Verify}, + traits::{BlakeTwo256, Block as BlockT, IdentifyAccount, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, MultiSignature, }; @@ -28,13 +28,11 @@ use sp_version::RuntimeVersion; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ - ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, - }, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, weights::{ constants::WEIGHT_REF_TIME_PER_SECOND, ConstantMultiplier, Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, @@ -275,45 +273,27 @@ parameter_types! { pub const SS58Prefix: u16 = 42; } -// Configure FRAME pallets to include in runtime. - +/// The default types are being injected by [`derive_impl`](`frame_support::derive_impl`) from +/// [`ParaChainDefaultConfig`](`struct@frame_system::config_preludes::ParaChainDefaultConfig`), +/// but overridden as needed. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; /// The index type for storing how many extrinsics an account has signed. type Nonce = Nonce; /// The type for hashing blocks and tries. type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; /// The block type. type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// Runtime version. type Version = Version; - /// Converts a module to an index of this module in the runtime. - type PalletInfo = PalletInfo; /// The data to be stored in an account. type AccountData = pallet_balances::AccountData; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; - /// The basic call filter to use in dispatchable. - type BaseCallFilter = Everything; - /// Weight information for the extrinsics of this pallet. - type SystemWeightInfo = (); /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -513,8 +493,7 @@ impl pallet_parachain_template::Config for Runtime { // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( - pub enum Runtime - { + pub struct Runtime { // System support stuff. System: frame_system = 0, ParachainSystem: cumulus_pallet_parachain_system = 1, diff --git a/cumulus/parachain-template/runtime/src/weights/mod.rs b/cumulus/parachain-template/runtime/src/weights/mod.rs index 30fa2c4060689ff98cc427c84f81866172845e52..b473d49e20e67329d893e1e565330cbe9290c64f 100644 --- a/cumulus/parachain-template/runtime/src/weights/mod.rs +++ b/cumulus/parachain-template/runtime/src/weights/mod.rs @@ -24,5 +24,4 @@ pub mod rocksdb_weights; pub use block_weights::constants::BlockExecutionWeight; pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachain-template/runtime/src/xcm_config.rs b/cumulus/parachain-template/runtime/src/xcm_config.rs index 353f68d22e35085a4b6fb803171a7b5d3c680fc6..7d1a748819cebcaa738f2c2ec2a17d0fb3bb0e2f 100644 --- a/cumulus/parachain-template/runtime/src/xcm_config.rs +++ b/cumulus/parachain-template/runtime/src/xcm_config.rs @@ -12,13 +12,15 @@ use pallet_xcm::XcmPassthrough; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::impls::ToAuthor; use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom, - CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, - FixedWeightBounds, IsConcrete, NativeAsset, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WithComputedOrigin, WithUniqueTopic, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, IsConcrete, + NativeAsset, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::XcmExecutor; @@ -42,6 +44,7 @@ pub type LocationToAccountId = ( ); /// Means for transacting assets on this chain. +#[allow(deprecated)] pub type LocalAssetTransactor = CurrencyAdapter< // Use this currency: Balances, @@ -150,11 +153,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -180,8 +178,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index 946bfc5983df9bced1750c827370e48fb8b25d12..fba74b17f9607f58bdb17d7a0b05c8b764a9c4e5 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -22,7 +22,10 @@ "/dns/statemine-boot-ng.dwellir.com/tcp/30343/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk", "/dns/statemine-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk", "/dns/statemine-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL", - "/dns/statemine-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL" + "/dns/statemine-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL", + "/dns/mine14.rotko.net/tcp/33524/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", + "/dns/mine14.rotko.net/tcp/34524/ws/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", + "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index c26506eb995aaa3c1fc4e60aa9228e6882755d96..685a00ddc7145ed650f7cb5496fe81cfb23f0ffb 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -22,7 +22,10 @@ "/dns/statemint-boot-ng.dwellir.com/tcp/30344/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr", "/dns/statemint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr", "/dns/statemint-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx", - "/dns/statemint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx" + "/dns/statemint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx", + "/dns/mint14.rotko.net/tcp/33514/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", + "/dns/mint14.rotko.net/tcp/34514/ws/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", + "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index f0e71981e7a1cbeae8bcdd4511f54036544ac0f7..6f42b5f7d8bb4a76c7390b538a68b1bb0acc1613 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -20,7 +20,10 @@ "/dns/westmint-boot-ng.dwellir.com/tcp/30345/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux", "/dns/westmint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux", "/dns/westmint-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk", - "/dns/westmint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk" + "/dns/westmint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk", + "/dns/wmint14.rotko.net/tcp/33534/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", + "/dns/wmint14.rotko.net/tcp/34534/ws/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", + "/dns/wmint14.rotko.net/tcp/35534/wss/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 9daa60fa2635950c5e0dafd40e398c3bf4edffcd..0ef81806cc5c08621f3c0a308425c72f836c3ef4 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -22,7 +22,10 @@ "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/30337/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5", "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5", "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW", - "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW" + "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW", + "/dns/kbr13.rotko.net/tcp/33553/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", + "/dns/kbr13.rotko.net/tcp/34553/ws/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", + "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index d3e884284b4a9351f5ce7df5ba8988935cc885ce..130bdf31ef211ba6d353fa97944b2bf80320997d 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -18,7 +18,10 @@ "/dns/boot-node.helikon.io/tcp/8220/p2p/12D3KooWC38TZJA8ZBXZgAYVrceoJ56jNNLJPdpk3ojeFkTAwZVp", "/dns/boot-node.helikon.io/tcp/8222/wss/p2p/12D3KooWC38TZJA8ZBXZgAYVrceoJ56jNNLJPdpk3ojeFkTAwZVp", "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi", - "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi" + "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi", + "/dns/pbr13.rotko.net/tcp/33543/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", + "/dns/pbr13.rotko.net/tcp/34543/ws/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", + "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index dde3d437f416fadac1b2769bddb8385e1759443e..018ab0ee6fd9810595c841237dd253b9463aed79 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -16,7 +16,10 @@ "/dns/boot-node.helikon.io/tcp/9220/p2p/12D3KooWK3K1Mu5Jjg96Lt9DUzg84KsWnZo44V4KB7mvhGqi6xnp", "/dns/boot-node.helikon.io/tcp/9222/wss/p2p/12D3KooWK3K1Mu5Jjg96Lt9DUzg84KsWnZo44V4KB7mvhGqi6xnp", "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke", - "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke" + "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke", + "/dns/wbr13.rotko.net/tcp/33563/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", + "/dns/wbr13.rotko.net/tcp/34563/ws/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", + "/dns/wbr13.rotko.net/tcp/35563/wss/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index 003c6373429036eac3642d7eaeacce40d1af0f84..e9f690234e4381f54377c7fe7174f25834a973a5 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -22,7 +22,10 @@ "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/30341/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", - "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c" + "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", + "/dns/pch13.rotko.net/tcp/33573/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", + "/dns/pch13.rotko.net/tcp/34573/ws/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", + "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index e06671faa04e2b80a9c8e49badffc7d38c670a6d..7385889f0ec7cf82b702a2607e1165f47564d0e8 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -20,7 +20,10 @@ "/dns/collectives-westend.bootnode.amforc.com/tcp/30340/p2p/12D3KooWERPzUhHau6o2XZRUi3tn7544rYiaHL418Nw5t8fYWP1F", "/dns/collectives-westend.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWERPzUhHau6o2XZRUi3tn7544rYiaHL418Nw5t8fYWP1F", "/dns/westend-collectives-boot-ng.dwellir.com/tcp/30340/p2p/12D3KooWPFM93jgm4pgxx8PM8WJKAJF49qia8jRB95uciUQwYh7m", - "/dns/westend-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWPFM93jgm4pgxx8PM8WJKAJF49qia8jRB95uciUQwYh7m" + "/dns/westend-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWPFM93jgm4pgxx8PM8WJKAJF49qia8jRB95uciUQwYh7m", + "/dns/wch13.rotko.net/tcp/33593/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", + "/dns/wch13.rotko.net/tcp/34593/ws/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", + "/dns/wch13.rotko.net/tcp/35593/wss/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/coretime-rococo.json b/cumulus/parachains/chain-specs/coretime-rococo.json new file mode 100644 index 0000000000000000000000000000000000000000..39506095bfe0983c182850084f2602a882ea0caa --- /dev/null +++ b/cumulus/parachains/chain-specs/coretime-rococo.json @@ -0,0 +1,70 @@ +{ + "name": "Rococo Coretime", + "id": "coretime-rococo", + "chainType": "Live", + "bootNodes": [ + "/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/30333/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy", + "/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30333/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX" + ], + "telemetryEndpoints": null, + "protocolId": null, + "properties": { + "ss58Format": 42, + "tokenDecimals": 12, + "tokenSymbol": "ROC" + }, + "relay_chain": "rococo", + "para_id": 1005, + "codeSubstitutes": {}, + "genesis": { + "raw": { + "top": { + "0x0d715f2646c8f85767b5d2764bb2782604a74d81251e398fd8a0a4d55023bb3f": "0xed030000", + "0x0d715f2646c8f85767b5d2764bb278264e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x15464cac3378d46f113cd5b7a4d71c84476f594316a7dfe49c1f352d95abdaf1": "0x00000000", + "0x15464cac3378d46f113cd5b7a4d71c844e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0x15464cac3378d46f113cd5b7a4d71c845579297f4dfb9609e7e4c2ebab9ce40a": "0x0802f40601439cb3765ef8e2a0a5770a78fdda8ea3675f0d4262ceac46fe9b8a38b25ca9a71a8570a05814e75eee9eab0757d2c98e91b24c1fa2e3eb75f7b26d4b", + "0x15464cac3378d46f113cd5b7a4d71c84579f5a43435b04a98d64da0cefe18505": "0x50cd2d03000000000000000000000000", + "0x26aa394eea5630e07c48ae0c9558cef734abf5cb34d6244378cddbf18e849d96": "0x00000000829e74677a0a0600", + "0x26aa394eea5630e07c48ae0c9558cef74e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x26aa394eea5630e07c48ae0c9558cef75684a022a34dd8bfa2baaf44f172b710": "0x01", + "0x26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc": "0x4545454545454545454545454545454545454545454545454545454545454545", + "0x26aa394eea5630e07c48ae0c9558cef7a44704b568d21667356a5a050c118746b4def25cfda6ef3a00000000": "0x4545454545454545454545454545454545454545454545454545454545454545", + "0x26aa394eea5630e07c48ae0c9558cef7a7fd6c28836b9a28522dc924110cf439": "0x01", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9bb2c1ae8590211c475b041d595d99230b25ca9a71a8570a05814e75eee9eab0757d2c98e91b24c1fa2e3eb75f7b26d4b": "0x0000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9d440d8395438d7269bad990f83715c2002f40601439cb3765ef8e2a0a5770a78fdda8ea3675f0d4262ceac46fe9b8a38": "0x0000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7f9cce9c888469bb1a0dceaa129672ef8": "0x59933c636f726574696d652d726f636f636f", + "0x3a63": "0x", + "0x3a636f6465": "0x52bc537646db8e0528b52ffd0058f41c04ee820524125110306b291df7ec1bbc35021becd0370bad392219fb2238ceceb48c316bc12c7a11911586bf0e3140e7c0b16cc35ff0661d0ab1738ce914ce06c7dc9a10bd34368cb56fffdbfe2664efbdf7de524a29650a291376109b10d43fa5f9d446f3a95d693ec9a7de349fa457dca4c63399fdccaf20d3ab4faf994abecc75c2da671ea74b9f3365cd8fdcb79434dfcecda7ae4d49f3ede07c921d1b8dbebd6f749672a84fba7ce293a0549a30e9f3bb66c39e379aaadfd43a64ee09a30e353b94a0c41af44fc9ac6fef306c1dd331d83aa6532ce6c7faf3cb4b2b62d7a450f976fb5d9322e547eea773f389faf4703a951e7ed7ac08bdd18449afed530efa1a87d82e9f44e9eddd7dfd73ba9c33f74c4a205f7be328631e14777cfd7de3e3e6d3c864430851fa08fe2c80285d8874299aabc4b3164e909b4fa37ccebb7b481fa97f141b40a4ae01cddc374189eb83dd63891cf453db3721727d1494987ed8e4827dbe09d8ea1e4b24eda714f0d2a77728a4d2640844a4bd0422692f5d9c724af1750f35e0843e13d61ffaa813d60e35bbe9eb1ba33ff5848ddc8fdcb7f7e4014ea7e9ed5f63205297ff7506e2d71206b16b51ba7cd7a07cf9b1c64f1fc077ed49a09fa18f21a2f4ae3db9f3a3d283e07cda9c7afff46f863eb6dc72a80127049ab07eea4c0da012a875f4cb9083d953ff244871ff86e57f32a4fe514c25a77cd409ab0e447cea32dca09f7a873e7ac246f0a5cde3ec2d47373cd61fab53d1bfdea18faf430d6e1eeb5fe77cc3a3ffe6148fe3385210421847f8cd7bc23687a21c9e923df50d8f4d5ffd93e0c5fd14f77728a43e75a809352d9ea24fbde29e30e91dfaa0b94e9874a8e9df36669ba42f7c2da7d4240962d79ea0f951fce9b0f934824af36984fdf4e939e693fce9e27cea1a2dcd4f379a4ff3a7f7e4216363d34fefb00154aa681dfd50737adf98596ac7b78d999472ccd7f3ebf955ef1ba3f8d4bfda53bcd18475ae1346bdc32999f5d6a1e66731d73aa87f1dca3cce5e7ae3517ebbc4228ee0531feb5307e7533b7528daa10fe950b3cea7e9d4b9ee619d7ad73dfaa95bdc74089cb09eb051e9a7cbef9a0d80de68c2a6572cbf6fd02c257edb98512971685946ec5a163c2f694ffd73b5f78d51d27efac7e377fcd8593fb64f87a28dfb29eed6d1fe49b0711151aabd7f94b41fdba7cfef5a963f2f5ffa0c8348dad33c4adab77f1d0a11bf3ac54dead73c25b39e7ae371f6eddd3dc6ea2f20ded73ce57aea9de777fcd8593fb64f87a28dfb29eed6d1fe4db0b1c412a5cbefbea05d8653fed43b9c129fe6a609930e35a126d4849a50b3714f587bdf98594ad2be6dcca89c4a7c92264aef096b6ff06b1da2f4f6ee6ca89170e290f16cf460238d0d3436acd8b862438b0d2836a2d838c146161b586c3cc92dd89062a38a0d2b1bb40c45c622f371381e032e23abc960b2991c26c78091c82f6418b2982c439e01dfc958f90a2791a3e429380b5b81ed602cf017d92a43c956301adc4326010bc27e300fb9842c0543c159e041980bec02f6026b81bbc060f016784be6015781e5e038f90b2e02d3c1476037f80a6c04c621af8073c857b055f682a5649a57e159f8152e283f712c9c8ecff13ad88bc7713e6ec7e57820dcc5ef602edec75b70187c8d63e1295c0c5ec15ff0205c06fcc75dc08170141810a6024be133602dfe03d68387c06c1c0817023f81b3e035d8074c048602c3c17dfc8f43e17bdc07dc8403ca3a6035d889bc037e93abe42eee26a7c14ce41bf2991a6f6ac0a9b1a6469b1ab51a6c6afc50234d8d1e6aa8a9c1438d196a8ca941438d2e35acd4b0aa11a506141a65d080a2068d46568d2734c8a0d187461e1a56d0f88246201a55d0d843030b1a52d0b88286201a4bd0e043230a1a7f688841a38b170f2f2568107a2d4143091a4fbcf4d01844438b26169abc34add0d4a5c98526304d2d347d6982a12986a6ab26129aa6344969b26a22d36443530d4d34348d69c26a9aa1498626314d4f9a6e6842d374a6c94c539926159a526842a1894bd396262d4d599ab0345d69b2d254a589ca2b8c17a1d715af412f305e5abcbe7865f1c2e225e865c50bcf0bd0cb881713af3baf235e48bce4bc8a78e579a579e9f092e125e635c32b86570a2f185e2bbcbcbc547861bdbabcbebcb2bcb4bcb08840d620f5baf2aaf22ae16585890f13144c7a98986082c39486c90526304c6598b62c7db1548217094b602c65b184c592a0a52b96a05872624989a524969658c2b314c5929b252296de2cc95982b314674908a52e94c05022a4f4855209940a298da1248692174a8194b25012a48485d2154c4328d1512a42698e521da59ad20e4c4524b9412a23e987241e92cc20fd49ea21c94a920f496592aa245149ba923444121ba42e90b8400a03c90ba42f92d62001429a02290aa42c928840e283a403d20e486a907840c2018906242ca41a90d2208d41b2016906a41c904a40ba729d1066f1b042128e78100175551061e16a473b147971a4c4119ea324709ead96d4a6cb02a90c121924335d954e0e51a1253e358458f2b3a4a748cb05e2fa916390a4181db105d15de1a27556885bb8a288828e8ae8b428c2234231f2a1a5e7480dd1174460745a5a4c101152e2417cd2e9e9ac2c8d41c487480e911b222288de101541a404511c223c447088e6dc2a88aa20a262488f92159c105c10db9b8dce501e1a630c2df1f2e2d5c5c6a6c84a0d1fc2329b122f3b442c1079211a43b48258c5d11177869be576b9295c19ee975be692b9365cac4bc3ade18eb95e2e98cbe5be7055b82e5c2d37cc157359b830dc15ee139b7569b68c8bc26de16eb9666e0c368ab5b250885a2002933426092b6986241992c424c5900443d20b4961925c480293d442d297241692bc24ad90d4254985a414925048e292b425490b9396a2276219ac03d11434b2706d3942624b33c4a668900d12bcc661915110b3289a5364470c246a215a2162512447bca2a888223a3c442e38268ee474588457e21762174c3f0ce1e9ec74578ea6781b1b26d8b8723d21ca626990d19aa52dc418c41744179686b051829826ab40140327088fe1b2429455b4c595820b850d8990875087a13de10e2e2e442a1cd929ba53e4a6284e2b8b83a255e8684e1111ad12707c88521007717b5a651cd52932a2a84e119cd6182e2b57094c6d5a54145d1565a1e18591106109e1094c4e7446746e2e114b59d74d07a723e2beb9703a2c9707a62744508a4aa8914351151a5414d1ba37dd10446474755e3b7c3c7c52be34df942f878f04223444377c553c42de171e185e18df095b998dcc77e5d3f26dd9d61471d16dd18141c4860885a337446d383d476e8e8688bdb8745d1cc53922e205a6fbe2084ee7c58b8a170aaf2d4443d41842a9874d88d7109b115d1c233545515e718a4c10cd10897194a6688a7803d118e299221298a01449590aa3e8caab8557151c15dc141b139b1ed79416192e29dc1347743827926a99050e502788fbf3e282f363e38c38c5ab07af0d931c242db62236372d2d8e7a58aa6d70b63a4b768c7ab87198681d1b1a5bbcea74452c15518465288bd08c521613111b10578a1b6833b3d9e079e17571a7f0acf0aef0aaf0047981bc2dbc41db0d1b0e1b9a2d87edccb6c3a6c3a542dab0cd79b9d98e582a83a88ca53c448144323550a891420d158ab4b8578abab85490065d2cb28b5ce156617a6374c3518d0bc4e4a6e80dd74416e48252834bd116477798d6e0399e093588e07e30da82c88a1113a01dd008a617b83a1b1145543a402f2896da1c9539a2a10b84db80808e745862738b60a243830bf0cf521d6e0d0ee275e559dd1da23b4945e42d4683381f5e7c72961612a09fa138b90c77c2911547a7c86a1be2280a47e5a806a5326814c24b0cc931faa1884bd109344a70cf30a160e44497658bd3a530e444a7c21109e1195c73295a49f89b21385c95ad0d7ec2b4a786942333dc1c3108a2308aa0183521d2960a3151e9bab89fee8f53d171c1859c8d77b1dd212a01d39425325e518ea00c2931c44488a66bf3b27a3d51aac208881b44e682afb8b2363b3905a6393907e762a8091f62688a1a6ef00e1d0add9617142ecb26e715e82565e889d7141048291205c9456e0941469129482b0e54cda6462073c26489d422b3c82a1a903724c980071288800f087420e4011c3400c8061a2c00a6000b24940b7628c931000110c0a9024903f7e493dfd2b5a79b348a49910e0c09a148140843867c927e90246ac8104f878e2130ada1163e407244840ebc86bef4078e7820899228aa22514b8c3039c2242a49940637865830324489079200418244c902a88c442141a26401150c79d931b4021224458c20415264015d40a0fa409228242a745494078628a11a4205c2078e4485c06928856682e44811aaa80f8644457900c7100aadc403498e0c11c1922811a6861c09e1a99244814004453af0212e1d6524899224438a0c3912c2471949a2e4015449445062c4c8cf18daf2811223511fd418d2d24c4410253584091311440549a2e40813265112c043593a044742f82551524b945440081ec2d25154453e3882a44893215122448900010f3e88fac0078a7850c1d095662265240a0423569a095592a82551202439e24355921cf1c06b884a1b39c224c991255423440d110192104228326449d407488e88109332a20449ac69e88466c23464421719d234544283604811aa221f14a18a72c0d0d0d5088e20297284ea489223474324b4941125483a30248a4a0423512344110d4de9254a16000e4969265147a88e2c5102029508458a86ac3a89922354459688f0c1102a2548980c3912c2175922c21fb58c42d28122544944f0c0102a23514b8e7c90e4c890214bce99d9be2ea20aad082a5e7ccfbeaed1682f6774e9725038283204b9d5a222b0a828491487869244d148a441831a19514aa9d1d2d2921135a246467469b6b5b5dbded9d79b76de7bfbde9be7b4f75a5b6d9db5daa87d5bf6edd9ddb34e7b6f774b79af9c3ded6c79efb56de595f4ce3be76dd9b7db5a3ba7adb6da9ead69bb72ddf3d27be7b51f57a7e5ba4ac4715eabf62df2acf5b63ab7cedabb71d3ce9eb2ed9c745a3b6b5f6bafedee591dd077cec941dfbe72de69695f2ae5bdf7a3b7d533e4e6b5b75a6b996edfee1bd3ce7aafa5d3da7a27ecde396d9db3b6b5b576d7f376ad2d5bde9eb36fdf59edec0e626dd86ae5ecbe5576f79c7576b593ab2d8fab1d11b76d9bb5dc9cd7d6aecebeb4dada725acb7513b7ad1cd7d6f6bdb7d25ae5bdd5ca6aada47676dcedbeb7efedd9755b7bb97b27707174ce06f4e492bae7f48078b46ff716866dbbabab72f24a8febda498ece6e19864ca1b4ddb2a81ba94658370893e69cd376f77667f766bbafc7d9696fdb683b39ced2297bdadbd3da69e7bcf7369d52ce3befbcd47295ab5c37314774efec6e0dec90ccd576276d755d39e56dcde9b3657f586fc6f551279d734ed05edb5cf7e4babbbbc339edacddcd4ddbdd7baded9a769daeeeb6edc9596b611369360316d0dd7d67d26dd9ee39aded5cb1eece356dcf9eb42ba5ddd49b96d27bc339e5ac6f6b77cfee397bbbddd31e7d50ddb3a7b573f6b4f3a86f2b60da696d51d19cb6d6aeebe6388ee32c57eb6cb56aab55ebecba39398eab5cb5d6daca59db6dafbdb7ded9ddb55addb4b3b939bd4b2b476bdbda969b1cc7d96ab9aeb676adad04c8eb72755dd74d39a79db39bf35ee99a7376b39b977a575e6bdbde2eec393b4f4b6dd7d9b6daeea694f2524e4a04d8d7ebf5a2b356cb5dcbdd7b396b6fcf39c3eedb073080adb6de3abb47774bcf7a73d6b0762ecd696b9d349c35b4b6bb6ff76c2befa5d5729552aca4a4a4444464adadf7ce396d9d76ce29e7ec2965cf9e5dbbab94cdcde638afb6bc72b35a3aede46c259addddbd595b2b676b3029d57aafadb35a3be5acd6b69dd6da39674fa630bcf75e39e7b4d6d66927c7751cc77195e32c77adb556de7b6f01b87a398ede2b01800347ebde3befa5f75aeaa22d8eb376cedb7d6ddf0c783c71f5d28b010106000001d04b5d13547a6ddf0b76d4ba5d5b6bedaaadd536776f17de69e7ac76bb73d27bbb7bce6e7aefb4a1c6860fb4a8a708962801411408eed717443589423264842220a891d4177c400992211f4872e4031f50826409c98f4688fa20c99121498e1401c188073ca0245bf041d492a80f962819f2848100a28e2ca1a2600048901c91da11001c34603a60620e11470e0a2ad881f401922320181942854484221e88fac089004f33465012022a1a4f1808c0870470c1100144491541d281211f487204042645407001d8a080aac892a824511f583201d8028862c2246a490820506264c892a8264a8c442d195284c90746a2865c20f5848100423012c584c70c0f1c29c005186020800fa28628a10a41044b8cd85852848a090e24264b375a004a942c61923252a4034890285942815391a8254c2288921a42354254121a4d54511f581a6242c5e4c808511b9591224c921459423584ea080840a00426b6008e78206a4908f96849912824465e481f24890a21842243a23e506489122a26b22f00018d8e8a5ae281214b94501531a2040992224d7d4114088e5025116149cec0035422cc6892248a1ef50543922839c2644954932123448130842ac99125210c31c2e4080886508d10d5d42449148581806657556d557636e388f4ac6a56359b715c5555956c223dab9ab33aabaaaa9a49d98cca66b50b89cce4d7b9aaaa2a9db4aa1299c9aaaa594589cc64d56c5651225d359b51d94c56c999ac9a7d1d91b644aae4ac6a468954c92a49a46755b35955d56c7689f4acaa6a563d4aa4abaa2691ae9244baaaaaaa6a668974d56cce38223359258954c9aa2a3bdb887455d56c568954c92632933dbb334ba47b12e9d9ac1299c9d96c4689f46c3689cce44c12e99924f2004b4576b7040193221d78f4f00b48a5d13ef4faf43bf489e7870e3527f875eb70b9f5895beec2e3ace18fe9ad4c7d863fa68b0e4545fc0a32bde5d35b19bf82783fddfb99995a07dd44caa363d427369a30ea2e6fe1fed63182be8542bc079da97b50b7a19057eba00e66a6560b4bda77a881a4610d39d414298fa14cbd5d79e4febad474cfc7e9cdc14c08cff3bce932fc217d4e20d33da956eba0ee6a1d94f6b575d00d8895f6e4c3ee0bb3cbbb7574a1cb5be048fff3d6e754f75b215ed272810e355da0b7be10f42ec7973b879a50b37d8ed27b4a0ffb0e6d599cb09bf3db6c542567adb97df98c88f5bb8602114644fb5d43a1d6adc346188621e89f87611886de37c269f57ddff77ddff779dff8c8e8baaee37cf3aeebbacefb4607c6e65ffb79a3508ac6f267d687ad639b989bb0cde5e68de5157945acbe6d1e760f1fd3b7cdc1eeb179dfd85cfe757777b7ece9c260eb70b9c4d28ad8f2d041ff5c5a11bdcda515f1ba1db977b5bba6bb5c3eb10b8acffacd23d8ddf48a7f8cdc63cfad8ff2a787e0bd979378138f555f3bff5a0a89f73627f1e6773e76288479fb5de71d0ab9dfe59eb0b1829d9dd63b0a76d6e6292db823f777723ef194165cffa8bcdefed539fd9b41baf6338f5060d321eb5dfbae71e9f36dd9fccc75c2eee53824e0db697bc27cd45c276c3a276bd667ad389fb8cddb3f8b8154ffaaf51a75fd93b3bf0a76e8a3f3eb33f401452f3692b636915f2daed69392eeb4e691eaab5fb77804dffaf7e73fa73ffff38be5bf80b41d2cffb9fdfa1d02d902fd975b5a30ce09fb2678b1f5afe56cda6a6981755a3112f06b94285d69c2fa1584faf59ba5ae53eff7e2a75789fb3bbce3bd7bc86ff7ee043dbc79873997f87a8753dc37711bf6d62173e79ab91114eb181f90cdab7f158fb3af5ef1567d839a95eb1c6a562c9ff3eb9d8fb3bf5cfdb6cd2b6e227ff34f82dbe6df9c52d5ad77e880ea96c3157378ec5cde2e8ff53b3ab93c7650744af6b3bfce3bebacd3e5ac2efcaa2ebdfac4affa55bc01d4e6f29be0b66d3e25b3965081210021f29c73ed52d537ff383cd6af4ebbbc64aa07d1684f45f49c534a053e2724040f2f9952c0cfd6519deabe37bf7a8797784e759ff30e830cf9cea12687a76a4c29e065ebe8badae5ee1c75863fac5797a18f8a390ecb3b61168fd3e5929a394e6e6abea64320b53294f647bb897d2e209c73ce49d02de81ee89d7b1e369ab0f60e8b98c34de48ff23997b8897c8ee3fc9b2d7bd696ccde3adaa12687a538efc4f9f4ddcf25ffebd047df2171c23cdff1a0cbd087c4a37c29ce3bbfd85bc78e6fffb03861cdfd0dc279975d1ee832ecc0e65e569ab076301bf584358747ce6597c7d9770e62895d3a1405f1e8755e1eeb7bfeb9c4748ef2411f1b3013e2bb738079f43cdc2fbb962e88d44570e2febb611d7f318747f9d63f097278b42e7dc32f99a7e4dfdc3461ed15fc266dd6c74d27392a89de3dbc75486f6f9a4f46d349fa7423ee029ff44fd6e97f9d9b61dfa13a6179a4a22fbd73a3f9f46ddfe67979f4bc736ff39cfae7e1065d861ad4f83a619d6fb81fc454f43dffe46c495b1beeef0983cda71c4d2f5de6f1ba75ea1f95f2e2fe0e730ac8caca7a0e53d997463f47f8eb8d476e0cfb0edd9830ea9bfb846d198a36ee51fe94cccacacafa8ba9a80c7d74b6599cb0e9b4fb9be6933ff51cdd833e1d6fbc276d0cdf10b1dda8e84b62cd7b5ef7e8e17d4b0b46eec7fbb4795ce944a192b0d33afac73600541274da82fe9947e9e315f2bdcc63d53799ff8125f2a577469ef881201736a9adb16b48d47ecaf5d2a9bbba4793f1d4af34403f1de78f17ecee2169128c28fd93a124234e879a7668edc6c754a5d1ba4f55639fb87f5a1a40067ae2dba7f4a4012aeddbbf0da4d2bfde388ee3eee4382a398e83b5a46f51be9e9f0324ed691ea126f56eea9d477ef9ac5b5c4494b28ee347dbdfb52d4ffcec8fea1e4b763c56fbaca770e9ddf966ab293a62d7af8a62d2e8ab33d958238e48defa8e1f8d7e6c4010e898d5b5585299b0ea17104749e5ab8f0de8abcbda45eaf4c5171e5ce0014d1f1a2d765dfa7771c724a6551bacc40005062062565374c4a45f15c5daada6ccd8e657098849bff82a013159190fa4cbe41d59190f36bf5906b56559190faecb2cf3bcc37fe47eec5a4dfad8771a0c12e46061d1440d5120e13314e80001060c1b3d387411eb58fbb8c303a7d16292ca7cc2f17356305f8f71c7d799ab6bf55add563d7569fd7ccdf656df72929ba590e4ebd7479a47d957b7f83ac56de7cadf8cc5591fabfeb3580a49fff9cd6d92b683e5ad8f23f8bbe524fde7ed4d8200abdadb8ce5a4ca823c6d2cc8d3268f233c75e95be87cd3776d8b99ff2668319693aafe13ebbca76856d663d1665491814b98251384391387461542ff792c9a0579dad09a20c0aa3602aded6089b58f23780ba5ffc4ba4ceb3fb1f651f6368f6d806e0bc6ae6599e2a9e463d190f8dc10a50a1a2dd6d94915976956ed637599c7109e3a87db2ba63e430a5890874d160586a2e033431951b2b262408c7e56cd999db7863a9facc52312ead66ffdeee6dbb651219bdb3c5edf3256a7413fd695c7ba5cc57919ebd796a15dad5e73884660d59cd559057d7d639458be7a770fd9df9447e7a3f89cb77f9f7bb5c39cc77d153b2c2be341e75c9795a853f5a64eb60cf5ce391f95be7af5af82b27bcc6fff3ecf39f7e0f3ce6ff6c073ae7e59f639e711e858e7df0bc83873e1af5bb7799cd1f0372ff1b73efb0e81786f33d4f47006d4a9fa8761b8a9d3e61dde419d6a06b0af4e373c76edab8f17063ee7dc03cf3bcf6ff6e0732ecbca5cffb205a853f589c7d94f1b2ef180732fcb36a04ed5b9dcd4c97a043ad60175aa7e4321d46f1e3bf08e713eb69eaf6e2308b1635e2e43bd639d8f1de7abd7d0c77428dae18e71b863168fdd27880704bff854863e661e673f644f495be304be5b87c4a2e7c71ef4d5e5d57c1a5bd008f36994575442c072e7ab53cd27f9b50b7df56f82b27bf4b7cb20f9c7aa47329fa857ef1b354b492cdf36aaeaacce0dcad7f24aecda952e6fbf6b56d47c137dd4fbc6d8777efa4771953e378fb3bf1f05291a09ce1d6206f369146134a4153134f431441cc571c78e0c260fa7d168b4d874a8c9c363d337984fdc4fe7a07bd09fae262b5f5efe8c866224f2e18f7ea262a9a0b56265c11bfa20b481d7ab1f60f2e898a0af5e696d5b6aa345ddd53d68eed6216538441c77fc749b6b36cae0ce4fdf81e7a713603e49f907f8e97de7dbc6cc4a5b8234b2662ef70d3ac2e70071c25c9fa480d86103665feb4c4a06fcb0802169c450d7ec5983ed529da7aabe7d9c2e3b5359ab92e6bb5685cb8faeaf5ec1778d4aa0afdf27430e66df1336d69ffdcd1f0d7dd81972307bf05ac75503f9d7a126379f5e3fe45fbf997be9b64e193e074829896db091b27f7c4d01f0eb7faeeb3353aff64ea7fefa319d3abd2ead4bd1ebdc7c9a7fc1879a507326a58cf33505c4cf55c5cdcfcac14c666ec2aa57ff5c4144ead3a7aabeb6fc9a3bf0f574d1896d0821d64aa33e107f7d63dcf1d4bf3c722fa574e9ed546a0c10e78f4d4fa9a0f9b1de984f638e1cf3a93a759f4f2ed78f301e3d9d6ae5e693d474ce7bf2a8d3897a7bcd23f75d877ac2daeb84491febf77cea1a95ab6faf3c3ad6dee1fc31bdd6ccf57c9a6bbe9e277cb126a0796bc298b7d6fae7b2def3a9fa08526ffff6517ef50e81549f0e35fb86cd520ea59b07a5eda37ae326f5db2b06227e3ba54d714fd8c440d2269daff55d3381ca1b3509faea9747bba1d162edb46faf9fabcea7ebed51befdebeed1f2ef9b81dec2dd3ac03c7e53f3af7f799cffb974108f550fca6f8d4caafef3ea9e77f7e8bcd53a5a79fc9c7ae81deed611e61174da2ff1d496959555c65707f138fb21bf3ad4fc70bf87a56a6e22ff3a90f9d76918a4facce3eca7f77f2e310d7f5c6ff9875f405e861a80dead433a14859a209e9259cf790bbf420d36eff228bf730f8ff23dff2e1ee55f0fbd8642aa830e35bf9666c4ae9550e8ab5ffffa9b60871a84fee5b1fe476713ee5bdeca320452fffaf775d8cae3ec5b0ee226dc83fee151fee7dd3dc03c828ea4c777ebf8fc7328fae10f53efb095a7e85ff7713dc4e30d52fdbaa141f9cf3d6f97a10632ebc1dcad63fcf294ccfacfad77b841ff97bb73744e3ddcdf61e957d25a879d2ea743cd5927afd5e794871497bbf7e65290fcba8ee95296bce501a02ea504e94a6e8137e5216529cb889c32b9941afe793b477948d92125c997bc5d89f29082946548ee72f9d290bbb023b90c29cb866ef892cb901c74974ba159c6712e6699e81b7519cd322e97205dc6390d1ff2d0db45ca438a97659ecfe9df529665df6c380e9f9b2fb96cfa529659eb6296792eba6cc93f97529d29cb8c8c9c28cba87759d639fe5c86fdcb324a5df6f9cb8b44d1c12cb3d6bd2ca35936c33d17a98b2e035dcc325be432eb4e3e81cb449f20cb8c8cfc28cb8e7cdb9c469619d1f0a3edc82770d9e61364198d8ed27059772397002bcab2223f3af257d1d151761b9ee7346678dee775f8680969888adbec3c24ca7d47d6852d38b9a521d7e4f02652d1db8e3e0b222d2171d83564bdaea39fb81d0d812252072e0d6d939b9e0bdb236a8f9628878302f19b2efc79054313783870142d7db4737272d97427a70e09293b4ce41c06524081cb3ca78002bc6d2ecfee047aee646130976187e18071ae23d1b353b034e41478052e43f20a9c9c5cf6b9d3044e5e81cb3a8b03c7873d3bcc1e396ce2c0818302976d4e4196e1c03144bd82092698e132d067749edd8986cfb08154813bb96cc99d603000f8045906731b13b893cb3cb7e13231dff0a2a2b9e4d971cc98e13668d0f097cfb0e1335c36e43338c7e1395ce6f21c72479357e0329757206fb88d0a3c87cb90dc86cb3877823535656f72f22697616f922efbbcc92778bddc068d2c9b916533dc86bf68f8cb65a0bfa4cbac4fe03097890e9300c0e1b2cd716419007078ce3200380d1a0e7319f5ecb2ce7364190514f88d2c7b65d9cb8b8abc29cb9898fc95654d5e54e42f971df92bcb985ecee4323043b593f41b5e810d90c8676419539631f90c0741cf2ef33c6719910dcf2efb7208ed249dc82b709997476827e915641905b904e9f4f90459e6796e946546fe4de09fcbba6c812fcbbccf9bb2cc73b073d0653877d04ed29b5c7694376827e9b02cc371e400c8b29c65d927c07e23cb6cb8e72c6b2bacc4a093d542175aae62f80812b81851022dbc48392226053bd452e6da493ace32cf8e94bb9da4d7c84d9dda69cc58f2a62c7b29395396cd7ec8e18a3b7a66a8c1ce0fb1a5190b509ac8d2860931f4b8105b72d950566a27e94b59a6949b3a599047d8b7cb5c596c27e94999a34eed482ea3198476928e946547b904199332e44659e6f2a25cc28e9814ea445936d4725796cd64c022034b06425e60b1458cb613837288210b0b84da88119342dde5323167d04ed25d59d6ca4d9d0290c70cbe5db6e51ded243dcc4ad4a95d36339176920e66d9974b80c5a45cf7b2ec003cacc9e28c17490cb221d6e5123c26a5061b307410a2892925b8222665fae6322ec3da49fa9665373775d291c71ddf2eb3d9db497acd22756a6f9fd12aa7a923eba34094ce8925d066a09f73ced96452a794d22b354f29ed7a8332b9e2e2f3697afd39fa0d120a3dfde923f8a3cfd8087e379994522aa7434d3aa7cb39a72b3551299d7b995f41a45797348ff2a5cbf97a3ec996dffeb97a3ef5d3f4fac9d0879c9fec9056a72e9d863ee48451f9339cde61cd3ea6d4823a06624fd8e832fc31028d467bf2d2bf09f67c92de5ee7d3e42406278c7a0f71347aead3ab8fdc075c1e7bc2ae8bad71c609a7aa5afd9ffcafff86429cfecbaf2099069ab0ea9d338d5ca1afee7d994f5d966198a418d1c5881123cacdfbebe650338cc879ff4805cd384c328c28bd72598a203e95e831d94c13a6ce0bb458550dc4c64b9b981311c866311a4ec28ed8941db132d22d95f94405cd27f95fbe7fa4837ec6f9ea93ce0c1a68c68c331330639462beba1cd334dd34493133014db2cbf78fdc57aff3e9751debca632fefd77df97c998097c54c9d4336f9cb69e8c1cb5f339a5e5e98a469811d57fbab365bdfb53657dfb44d50d7ea13ce245f6083e7c59f9b4f3cd60d5f796c03b72dcb36afda362ac3cc762ccea1e6c4b2af3e3187af3cc681dc95c7aedf3cd6047059c679d5e63d3198b5d0d7768e4e510cc6aeb1b17aaa29c3d02cc3f898796a7be96178f1d5bd3f5fbdab7e6b6efefa574b137abb915293df80cda7ebb5ebc00e049d03c1cf3f09829f7f5fc73afca45e4136b7be6dad2d48b771ad6d04fde55dfcf2f20fce3befb294f52e83ce7560e71d02f1bc5048752f4b591fab3399fdf51b0a61327bd0a5ae90ea5b1eaf83b9f38e9bbdc5a17b78f60ad279e89d87fecd1ef48b5df862a9d06be72e6c656565e5f229dd17c8f9d7656c4af70523fd9b650c8a4ec95cb863df14b551470c9d06a9fef9cd63e85f7e7dbe79985f5f0ec2f975ceaf5fec39b7715b106ee3ba3c5efffc93f2c6eb600dcf57fff008bebafcc3fae75ff630d8f997bb7574ee5f1e3dbf0e353fff2b9f23c0f778806f6fca3df9f6d68e0caae4cc9b2009d547a5b7d612603e4df0d6958448f3d69b6eec783ba95beba0948f86f9da39cd3684106598afde9c779dcbdcb9c49cfc715d76d28c67418a5b9e9c3ebc8c254e97791ee749f730e8176fd73f0fdb7cf187b9d03da79873107f0e45b9cfab7f2d3cfb6b50e2d0bbce69277db3fe39d4c4129d62eb5f87601e650f7a54c4571e13bd6702c42cab72794f0c3c97f5c480bae8b097ee127f0ee2e922ee980b97015de230e3f6b00d7f5ccf3f8fe336bfd8c9da5823ca30df320c382f96d96f4e55bdf34805c466e9148fb2254d38bf54d5af8f5508ecc7aab7e3083fb67378aa8610eb328fd437aaf9d6db3acd4db82c25bdff52cd0dc7d33cce5ced32cc540d1a04183fce7ea43e83a0f3b0873d0d7fc0bee0e802002e1360b941c5c6557eead20a3f7549f3a72eabf1d447a0f1d4a9663cf5109a9e3a92d7530781e9a97fb0f4d489283d750d243d750b203d750f8e9e7a07464f9d83a2a7be01d15367c0d05387129ffa015c4f3d83d6532740f8d477804f3dc7f7d461de53bfd13d75e79e7ad3f6d495ee5337b24f5da44f9dfb319fe6571039bd76eda5d31088bf94fdf5b64170fc48ef389dbac5a3ecc7f61a04f6548699b06e62bfcbe2b0be397bce9ba33ea594b27dce29d9cb4e48a52fa5f4a926e95fd7656ec2ba3a615d936de2717a13f19154da931f3b9fda0a3dc9fa26dc26559dfae732775d9de2aca03cf9254dee73de647b26b3e7ae7723d57ccfaf97a764a1af54f3abe71710597bea33f441fd4e8a9bccaffecd293b47e7d7ad5f3c25fbea9c94d8e5c8494d2e8ff539975dd775b29ffec864f6ed73cb3f5ad69aa5a8d32cd53edba56fb44fb620ce33dfb51fcafc74e1bbd68395877dd7d40421fd9318085594413fced6dc1f45c8f2e6db5db20591b6f9aea529f4d2cd772d07359fe3bb86431f196ae0d5e9f353440681f9a9aad68f5452f05172e147aa312e7c52fc48758214f647264ce850810f777e64a2e6ceb7d368df9df5eddf3e51f8a6e46f326bf03966227ff3b294740f87da57f05dbbe1cb2fe9d6319d8afba9cefad02d1ee58fcfe70cb3084a4fa5f43277ebb03ff1a841ff6dfff078dd9aa91efad7f28cd8351cee7ce89e7ff6f35ce28969e863f482d420a173a07b38f41906e95c7a873f3a97211e6796ad43bae7201ee74baf09f753b40cda53c99f2e7fb6492ae52bf4137cd7d0e0e93800bdcb3b640038879a3388a07720164104f30bc8a4d19e7a158df652d4fb3beb5d1e8fbe497d249dbf36e8ef40b74ff550f3bb814aa5d30bbfb025431f9e583454248a62d1908b88a83524b65aae560b0321fdce8dcac779cfa7ce47f0416faf567fffa27173eff541bdc3f33f0aa2795241104fdcf74c197ffffa6739ef70f5226e7e9dfa0e6901e739268c73d88471cef907a33c08309d38e708f01c27038c0013c6518996f39100cfa1498346ca7f54564a5b13bfa637d97e4ad2de73f91d960fe2d7f457e7af2e3760f65ec6224101b1d0bb76a6d077ed0c193f431f5f7e01a1df39fd2e577de7b97b74ce394779b89c7391f29031ce95280f22e7dca50140e7fcf3fcd57908e87c08e63c117e752e6333e6c2af2e438073f9141973b97f1224ea4074108f21a8f35efd8f4ed9b4b6c6d9bf3a68f9ebf3faf45f1018f2d7973b18ca513a8b50e8f5e7cb879af4451c45c65cf9e5e52957d6635d1120167a145719b1293216ba15478bb28545a4cb100331f4b6fee6dc9dd969c5f87a3e59c7f5616ffd86d5e7ef9a1929d4703923e84c9cff2858f1781d407de64ab394e8b5cfdbae60c552d3abdbd00193be6834b314f5e9a3d369413350fcd8f456e9db4089e777489d03a501faadd34ca59406986f7dca10c8054f6f68ed298304f5da819514277ca4dcc122050e9f3fb1e952fe081cb6d0092446186c624d9da6f7cc331c228eb09f30caa339e9a3f861d3a13277be8213d6d2e7f41d52c442f3d38d6007f84945aee9d0cde084d90c7ed7cab8a17366e524746710191ea4b8620916b062ed47ca14486c795203963db1f6ce3bcaf8641571b66c08f4aee393554429a594338c8fd68aad4bb9597b9dfa66adb53e43f9a75a9fd6eb94cc9aaab6d63993a092724eafd3ebd433e02e757a9d52eab669c2ae7f73f6a90e35fb0e5d97d75fd5e5cf7f5528aedb2d374dd8edcab58e9e557c5d7bebd5760f89093061b689df3161d63b1c9bde3a0d65951df369049b260fcfad1f8073eb39e693f50ce653e7d63ddcd389f31b930767349fc6d75bb70e726e09f0d65fdd43beed70d384714ef338c729f14a9cbfb6d796bb66c39def9a0d49bc11965d6e9a306ec3b675b477582f96df376c96ea9a9df4cc77fdbb32f4e16d3874ebdf877f78dfad639cfd08bab7796fde0ee271734ae76bebb0df6c3c3dc4db748ef31073de6920f3c879e3f9b9fc28e714e4f004f3077a871a78ef7d0eba034207f3c83d28c3a911aca03ca1fdd4188231c8c87a267d670c1713be9dc9ecbb3c821f88fb43ec80308f23fce76067d06df8c37bcfa168fb00b394f7c3f3ce53b21fe97bdc7f598af3e923c779d78f7bf6c93a44addf351bc8f8061de4c0f39bbb758e6bcaa3a7d328bef551fcebd20491fe98c16f4ebf6b3510fab1f5fce6196c4f23b7c17cdafc00f36902374f377e6c3abfb975a8d9349f26de6a3288dffc9be0c4f395e653d76a583365d3d6e646f3a9fab6f5200ae56b4945ac4ebb5603996febe07c9a6ebd6fd80290a18df9234df01abefcc87d0e0e50a526ea75836e3241346aea1ef2a78334e8f9e94a0aa03c6aad95d6c98dd1f3d37bcc9d9f5ec1a758d26aed0c7d1411a56ab5d502a9d6d6eaa34a5bc4981a1a5bc358bfc7cc70bf6b63c28cc132c60a2bceb77fdf352c31baef1ad6a03a29a5944e2ea412cb2b62d7b0fafcf4b152ffae61e979a309a359cc10717a4f576a9c633e81d0387e7a6351f9e935034069070a6586a0d4e9044fb1c43ca51ca881afe73782383d04e2746ec270d30ada279e922f9333390df0cd3e4f7de20fc4b1ef3c0b62fda11f7f7af779aef6d529fe401c39345fbd5d7e11eb8f1cedabd39143c3d568f5fef313cf4f9fc18b9f4e9fe2517ade790d39b05ebd8642a653aff8a392ba87777c778f0b5e5ef059d0f31d6f4321d447f9529e53e9a07f1dfe28c05397e10fcfe987a58fd4a55f9086416676c2f9bd53648c73275364ec5e77722dc0b913ce37efd0029c6f5e25f5f2d1bc98ed1ff4d4a5f7b9ecfa94cdcb8432f4713511ad7b7ebdbacb875ce621bf380a57e5c5ac77f7d8fce62a2f765d4807579d5b19eb70992905fc387b19ee7c3889f79f831888d722027dc452483e073df419fec0493e0f1d8a12e171f643f90584ba947497531741acee721183de21073257776511c41f2b8d26bd2543dae3ef9a0c5633a4f91bdfb519b0cce0e6821f7eb5c2700b39f0c3afaf855f5f68c3ef8641c2aec920e6a9e653bf6110ef3bb8390a9765b39897a76c2e6336c458571b983f877a9b65d7fad6dd5c657314d7755915e7375f79ec7abef298cd53ae141093328b59b7beddf965a909ccacbfbe5a25723271049b5f9f5beb52e6d76767619e863ea84beb1d76607353a71957752707683ecdf934a9cbab2c101329107a959152e84a4a4b1eded095420c4a0374fdcdd002b2aad8cb83191331e084c94177223a93cbd0032607b3ace52293434d8bbf4f861868f991bbb09396bb7c08cb5a3ee42296b55cf40e3150e44b0e358fb0cce5351cc4b29683b3ffdc13f190872e732d6599e8610dec44260b5df4ec3346849dc864a0879ee41d7361190d976197b9cff021ec04bb137727335c56e5a4c989fcc8654a2e437299918b1d7ad0f221efd002a21765d92ce672a8c9846532e6ca1663a0657d283b117de8852ebff45d7b01cd7f1627f180786f9fa0f3d437b0430c2cf9908338f41676b2e42ddf420c140d6559cb8fbcbb070664a0d77059cb89bc430f8a3cccb22507bd6344d809e8455e033b29f2253fc24e64a02ff986c7d9dbd002a0176527a117b90d3d58f2962b6159e845b9ca4992d7f0d02fc6c092b7b213d05dfe75680130472972d05db9ca8b0d7988abbc98d8ddc3e671c7635db51ccc555e0cf45152973e845d2ee2d0a16888654ce2b1861410b30cf4304f6979999ba59058dffcfa9077f7f8f2b8e337f7f2287d3a4d62fd3a147d178e52e443593663afec24671993cb5e2e7375d23907719422afe14958a6e449fee50541ffc95b25ccb71d2cb74a98bf2f5c7d1ff1f204b16b65acde47b17a993465b8c0f0c4ebf8aec140260631cfe3bb0683a01804fdd3772d86345d622814e7677cd7c48c79315e88d1f31f085afce27290eeafcbf77e0bf4fd670bf4b7ff20c0aa56a52843fbebdbdfff5aa0c52feb1c7ed90c843e710517da5fef9efb0e815890a70ded0b8120c0aa0624635db5bc4696c958986538d7105de63568e4aa263ff21962ae6a2ab3b9e8a1434d89a1bcf4217ce5b1217fe12b8f81eec2552e3f72a65c05fad150ae72b9cb432fc2b2596c294f69e532530a78a9b1a50563d752a0bd74ebfd32a684af3c26e364515cb22b8f55811e7a9127611952ae02413f725915911fe52b8fb5dc285f79ac285711e52b8f8179ca508ea280d890831ee628aeb16b28107a99a9ea4bef42217dc4cb0c02d1ce8f160bcd23e93befb330cf81a0b7bc7a531e5e7d0b29d072978b94471497b7f2d5502e737d16ab7ec30dfa6f16417c2af1a56c96daf204663171c65a5eab444e2669347184aed168f965bde5533cd6f2eb63087ff3cb660fc4964b1f893ce5e1ca4e4207bd3a270d20b392b420a4408dac58cb6d0864c883d041a7a107a0b7b2ace5a0cfb0835616a99374197610668e3a49efb003303775929e34e3a6a8c46744ace2d777089cb0ea463cfa022528e5f1557c5563d529f6408c55ea2f20ded33ce57aacea9da7a9137592b439671e4177a8dd88d82efd6b5c3f6c42e40ab17cb07b74776b8642aa6c6f1f4bb0288eafb35bc7f5ea126f7ef1287d3a0d7f48dff274ee1544fae6d2b72cdd7b99bdd64141fbf57489eb84cdebd36f28c4fb76af7b4cafa190af754cefecb58eb96d58d2fee291fbe9bd01713e711c4fb17c71b6a1f3d2f35bf9c9197200fac4d2412c35359841a64b9f69c4aea1f0c47b2d4b10bb96c49df71ef4efa3a00c35e0bc73a9cf3dff66dbc651d986b8526a8f14c8b021072c3488c9e3003159e431410b1c2f6354d1c3c928dbb66ddbd643182d7e2c148328dde6a029c01c3d74d07060eee8e94425d148490935a212c7c25117585cec499cfac5201f3c3e46542324ba1c68395c8905985ddc7b2f0c69a653afe96230b851fa441aa50eead2767e20e38723f41cb184952dfa6c01e82f9c2fb2c84384973f35a08941112715165075b36ddbb66d826e9a3ecdc75aeb7556460c102b5cd9405be2cdcc0412acf072c294327058413e016365d0193e6fb240e992e5a5082e7dc4b88ac10949b36ce2341e3c50d48ce10394293e0c9149214ea02865ea7c31a74f6b4e16465974c6dc191472798c9a8c949af8d09a2c14d84b5016286e78a0c1882cca3833e80918594f78387343196396b843469f407bb448c1014b4d0c1cf2f09192464b1b25cc349161adb52c1002b9dac21f7bbd9e3014856e5dac10eeb9b5893ed4a939ce05a24253b539548411c28b345bdc5982852b386f70d853660c30d65a1ba8ebacb5b696012e56e6d0eacc41a14b132e78c2949105892f2ddcb0a9d15102894605ab0c6e183506258e929b2f8a2a4a70c4184e6441c24a0621857088a1842758d45153850e2846065cb7591f44a3d148b8f75e2ef470d1878b405c0c52e282907741837571359dda66f050b3e3ecb66ddb76c33d81b6581a3439dcfb0468c352c60dc33d56e402c94fd785b5d6fea0405d67adb57cc828210a3d56f2c8412368c6831836357c11a8509f355cccd0717186508d9dd1c345e7429f1890a013ea893874498aab6f87aa74858cbbc77e6dc0ac2b3cc76e81b66ddbb060a14ecdd5eeb204c1d5a163b8de62cf9b31ded0808556a5c9175674b0c2628a3f5daeaa5ca5f0db16469637ddd5810d0d59c6cc505b13c5ca9c30bcf4b0a78c1e5c380346d18c4606153172b60863a68b2d335cc9a9a3a5842eb6f8420a21ec50d1859e12aeccf11204134da8c90246166ea85c552f8ca813dd93a1c75a13b66ddbbee862bf0803f5c51851498a6d8b13ee094292d1a7761d5a41151c683a58a960c20939c67802a84b1a3b5624c4b6ed04335ddcf9e6f5617ed6986df3a47c9952c0d46ddbb66dbb6190b5db367390a2002c7875d6c8e0a70b9a9722703842101768aa50838610961d98b621b0f4b1b4b7d4b6c4a973852b8f119ba03673cb7282c845fd2f5d5ad8b3596b6d56980e2a6b8c386743c2b37685708fd72f1dcb32280ba13659c6f87ba1f6e831f3d7ebbd77a2d0078a7b034d013be27a4f10d578e2041d18b054a0e204ab14c70b1d3d64c8a0292266ad15b9b0d65b0b0c6fbd0ecdc9b3dd40bb5eaf38ffb4a9b18115c61024051a2a4ad8e0c58f0b6c7058b3070b32e86419b4c51705c76d4bbf6ddbb6fd30284e4b0e9638e19e2e5d9f3b4a76aab5d66be8c6da5bc31745130260f2c1893368ccdc21f48405b6d0f9418a1aa408a3a78d1e403306dd189b419c13795c28b4870c0c73dac8a0072462880f0002028b0a3d41d4292376b540faab2b23b18441bf6511ee9938b2fcb8e0c78d1053bac440e6892fca8cd1654f172760a92770a958349c1972a042072ce4c8d12289384cf4d122083e609011e4c2d1939a1d20b85813830e325499f1610b0c5fac38e2aa8dd315634e8882900e41f0b0678c31b82e62b268e86245137dde70e1851516eb6a0c9abf5ec119074df8e74e0a57747726578707bf61d502561577ca74b9635523020c973e5608f124cf10b98a3a85cc74ea296938d12d99de8110137287383ca0a132063429f4f90be7ef9dace58c6bcb09df9b2d501123ae28d385ca0e59d41021e50b165d640983868a2c47d42a5504b2db0bf7de9bc255d7398531a7863d4060e1414b0a7e2c9b1402852d28f834c1860c22a6f0818a196a587de80b8382e0b3678a0a2d142ad4431e2c4ac0e285acac3648ac699a82cc67adb54c6fadb5d65a6bed10d65abb0de19489a30ef809a3e54c0b59494c6927ac8cd1a60b96174ab892840848de69c17b3184b828c2cb0e7d0a4591a6082c391cd6119c066bacd21401466d8b17fe8c31618c957b67db362e8f0b45615c69e189516786afcddceeec81016914c24c9628600a95a0842f2cb840021a2dacb001509b25aef0d305dab66d83b3f9f05beb374abd88f981883a695e78a126e74f173a646cc101081aae164b525c7ddbb66ddb9647b6165cb65843e80b23809062c30c5904c9b0c30f5f78f14541038d18e89460092ad48431a706a83b25d043870e191cf0b449020ba73e34e28457df153fde0c4b94276a9ed4047941a748ca1275f65c9185972daab857b080c25a6bbf1002396b5198f3d6deb1d612d99124d43cc0254e0e77a464a1c1890c601168093c337041c699ca050935f8d0253625421a211344a9654d17d3e2a6ce15385a8f0d5128e1a7ce155bee74392205314e50d1420e5bc4ac162e6faddd33820e6286f0e385192d94d01203187ef82451c49c306259066199f2046d4214420628356061040e41ccc8d4d27c49b38410377071e4c4b66ddbb64d51636dad4f0bde768503578782208687385b94e0c2192294be9861f3e6082ea09823c745829e1b4d0cc1d2039d42341401480d08c094c04d187fca5c115168080c1a98684342c78515f56d896d0c98e58a28c3e5bee8f202478d90b0d65afb58685a35640183115450d863a5b0c0147a9e28a402154590d173234b15247050618a1d665750cce0d112e64b0b5d60a9e9b0d870796a6c883628e0f97b6f7b51b3b11ceeb491a205d826073dded21d42540282ca04828bf8c4d5e667101613b5b3e20a278aa00f34aed62de1a7b6f998a2e5083d83f0a030c316b438246c4106991ec2c820bab084b5464a4bb85962cd746a1f57b42eece05d28d65aaf367380a84ca77e4056d3a9ffba0d7d5c31cc15104c9dce895aa585826a940a8af3814f587471618910095a18163c36478419fa3d7a5c34a32876b09ac28827290cea6090c2842b65f23881441c3c7fb15022098b2f615859e14d1779c01ade88b17343185cf0c11386e80bdda1df45a00dc090f1a687287860c10a0c2d1688da9c1596b84325871a624974430c92828f96397ce4bc929003440c42080126d01765bcb54850f17ad033673d60c5e0a7064259f0200526861f3a4c7c59c2042e63007163b0388213395094a34616ad31e3a776e7ca2a50029c50c1083657e6c8c913dbec1da32623a52623b018353da197ab23632d74e9709d30aa174e40d3c40b57acbac446a5076203f4db6f39142ac10e4fcab8f1a18c9811b40d983cdfadf950c7cc15327c9101081b26a84346d6971db6dc60c296db26053b447600656561829d31bc5cc9132487365300ea12a80b0bb1cdd97491e5b7dfaab8f96dabd383b5d65a6b6351d0809cb5d6da276bad050b60c2e77feb77cc8318aef890c54f1470b418e4454a0950c46c31869b2362f819746cb031e30819de640123851c316690d952338386d01c1574189af32111c20a105a7238e20527e8b8b0c20d78b4f0b3c60a0b96a32254b821cc0956e4000395199648b1e2094130cca0868d153f7f8dfecab1d682b7d65a2b013540ec40278a31666a3df0f9a28621c688d113049b13ba388a53e30a4170663033458e141d55d040869a4034a8d043ec0ae4f2d6c271aa47e61fe6005ad3c30f3ff86926b6d0c2e5ca9331b290e28d38c14c40a4092068008485192a599c156c8ce09382096e9e64f98223020bbc0244d1953b7488207386e0e10a3092a04185cd0d961151288144153db010dbea0aa19b6e6384152c7e18446788137e94d152823228583168ca10829ab4f851f3429c316590b873cffce0c6cf1a2fa414ca32a186bfdfdf3634dc09dce1e20b2b4049a8d0030d596688828414c03803050b0e554ba0223614702993268a17405778b680e9a30397ab2ce440f1c56f48bff5c1faedb7da06c8c093660e0c62bc2c314b334309659030c419247ac842c687e50a4f0157b8b5d65a6b6dbfb5d65a2827682077e68c1381bcbc99420b1662a3d24371c30605a03f7a2b06909723e89459c2072fb1047022c8511e463d87893885cad4f006853676883b326c2184202b305ac4acf70e6fed96b7afb708786bad517523a35a4569078d468b12dbb6dfb66ddba470994efd653af58fdc4b11339d941e6a52a1e21dc0ce2993850e942a584cd175cc80c9d246892c799c7002a4621ec0042b8e8d0954b60cec0fdfbdf3efbd7746031c392854d1850424c2b813a0f2c2943e4160d99112bb70a6f89bc51b409c63b84e89e84d8d769c550a5b96c85203095c140008268ce033484f0f53f0e953f9fc117b18294111676edbc51af4d45ce5a4e8223118436ba20a9c2778001d29c20f0e315491c6061fea5cfde6b585231efccd7d3e15e03787e5c84a1356992064add2a563632f0c9a8111f4868c12ae50b9daa2858f6d13831a578e27bc680163839f2a64c860dbb6cdebb6b9789bbadfb66ddb8c26acbac5e284d52b693a516914dfa8a995e1d6ebb536e5214ea7ea79e49ec914405d02f1c9428e0f3191f2986131e1050c2f5a7ae86226b661c9d3fdc627ce6f1b8fdfb6ed66517cebb58aaf3110b9af7fbd6ecef975a3f9d4f975a5c943a4d1684f62d7bd7b8837fe5ecc09215e973c7e0c3dd4a9bfe31e6a7278c375c26a531e3dc61877625022083d2454111b95feb2c1b2e2ef5f3660a8f0d7c1f9e4fd7591f220c1173b5a6ab073650a2962d77d3e3dfd7518e55163d777e41149066f333861d5af1ab17fac3cb858bd72025ae28bb779faf80d181110302080a0b79e0326a594d7e9f56ee954f75bfe8df93425b3dea96e90213ffd4e6f39339552d465ad6438d00244e49fb74c04faebd4250e22fdb1e3bcd5f307d6c17c1a450bcca7518983f924f14cf0633ff1d661d5a5539761036642500da84b19faa0148f24f8eb32274eaab89e3fa6530ea6b71497c77b9dfacd4e30f54f4eeaf2e620d2a75b6fdaf0c55d5b62d05f6c5f9c4f7dddfa77b124a38c32bec3acced17eb1ccfa0e699da39df61d02994fe6d3a483382dbe963a889f0634f3b0f2dcaa8c7f5f87bb36e737c13e21073514423b363fdbb301b32bcf09d9d1831e00f59cd330c874fde69f6d8d5558e60650cff90c3548f25355bf798740c0afd180990bcffd9885f92922cff9d76a8d5561fe76e7183997d7a56f1ce8711deed8d7811eee58873b364320d36ff6c6aac733e8af77d827484321d5691eb9a7348f341452a7e84bd1ede66e1d54fa94ecab087e7aff9d9786a74f127d9210f43538a34c8dc4183c61981c419b7ddee679e99f6b4abead7994320c427de6b1082fbefea8739cbd82d83c5f9dfa1899ccbe3a9eeaacac2c2fe4dc79fcf86d9e9fb8c6cb50036f722433d5f49f3e29969a34bf8048a7ae81749aa9be9733e8a9e6db3c4e6f229ffaecf1538c7f527ddf3eb3728af14d60169bee84ba149a4950406cfaac9ad2bf5a71fc18074f778f71c7cc5a1f6b1e3b07b49fb7198bf3c69c5b5cb1c57252d576b0c4b84cab3e764f71fc273bc4008e47020e9ea7eeddda5bcfcd53f7e0a49115f43cd06b2884bae735e460cb537a3bde86423cf7386fd2d0c754b6a18fcfbf2b7bb6a664969c77acecace5ac9d9cecbaae73f98ee470bc78ea5e928770fe8020e820187a4a3ebaa82be151869e44d3125ba244138661e85033e9e66f0b93bcf324ff94b8d0c7c4b334f12c25254d3cf653c24bea2bb924339f5aee83125a4aa284925aad25b0e55033a4add08d3cf42fc449ae84b99764e653925bff88a4a4ef789947974f270a7f78eef21a06a1eec9700a04bf0c290d0d790bc44b8080a283a1032172ce4131832148f405c4b29594e4507308f72be129f9490e26f954c2fde0db24af49a11f79e8dfad12098ff25da16324a423236cbb4c98f5224cbd9830eb1dfef0287b4a88127aebb6cb7c629a78de3af5e29be0e75d971b5c32f32974eb5f18863e4326895b1ec72a0e973862e2a489e3a63f4fbcf21813c85579312f3365191357c80bfdfbc010ecbce59db7fcae994f2db7feb55aadd6e7792e1074d1c34b8078dee79c7b9ef781127b53555ecbd56ab55af9152474e961966426ccfa5d336196c95f2fcce44dfef2a64c63f2d70b7c7dd784d8f2df045f32e49c0997b9ee811c28bb50081b324f35dfe965be6b1cd0bd97c710bce75fcf04d098aafebac4e3082f9d094ff19893aa9733392dc694ada6788cc9b93cd6cf2ff7e5fa94bc798a6675ee61ea1f1e477807c03c0fd3d17e79eb77cdbc3361d6ad44f3368c659a77241a2bd3bc1599bcdfba24c3641d9c0960725b430f5ecee434f480c9654dced4f462ba7476346169bce520088214fc2808820e3545716969c93f098a7869c967e863a9d5fabe8f8e17e66bd2463d898cfc3a17d3772d8827dcac79ea49feb5c0243cce57f29627d958232af98e1fe58edff11d2a39d49c177c87492d90c9c61af1fa8e1fe773de0a8524f9383f9cdcc5a3fc71f1ec2556f2161e937cfad70a85ccbfceb906377379f612b7426f51415433bc968ae4b20c23ca07587739d40cc30758bfa228863e3e11f7bbb8429ee746eeb991e7799ee731759dc7e4791ee7e55710d03f073f974b8a49c2a37c25df96fcf3a5cff351c9a77fe10fea4afe791c15440575d6c83b37f26e9cddb5583e97c5e2eae4772e8b8586429cbecbaf2047eef22377f9e7e125f53d77d59be6bb693e97cbe572a8d9fa7bd3b8dc955f4190bce548def2af853ff730f732ccdcf4749b9eea2196dffa1c6ab6707f177e41e45fdff428e12487a249b81f096f7826acfa11be6826acba11ee3008acc85b5e9439421356bd55e98f9b9e4dcf57e708cd1f6f9a9be6ab6f78e693fdd162f9ea17cd7c6a79f58f069a4f43439e17683e795ee70c7d783eddcc2729455174d13b849ae2475fb8c5bce834f4210e09717a314f371356fdeb3ad979edbc5f2ef1fd9c082a6e9e20020f115fde7c21e2cf9b386fc8fc1b426ff4c01103e7ea3f2270c3a3fc3e5026be7c81dc95c71250e5c57a62c071fe7de0abfae5e407825db781f5eb2fe9fdb37f69c051415f5dbacca3eca5cf5e52aaf9d2b95088d3e72dffbc952d9509fbbcfaf77161101c3e3e795d862ea5cb4c35bf05b6dc7543bc0448e89c0321f2d07b3e71795c0284f32107223ae72151ee090b3decf93494c729f9a1079b8ba8b4a995ca33991a2471902134433333012001231640303824140c07f33c13861f14000e88aa4c544492c662a13045510c832010c30086210400000c000631c62c5a0344aec8650c42440201d3924271f94b4cbb6210b10a946717d5ee0628a9212e982b6da126ce25b1185453f038f2ea7547127312f3a9cc424236df4747b031b63066f313b256e6cc7c8a9f369f45fe2123bb130413bee4443dce4abff0eef0f4725844b8e4fe0a1a79d015b68e22500cb51261432829a1ec176850d16811078a5edfa8b748451ec34b5d18473c86b52c753cdf32787634a7abb7227786ffa0d87c6796ce33fd8c4df43abef4da7b7916bb33c74eb3770756659defe355c7abf434507d9f827ea09dc2ccde3e5b99f8c6daae99f9f945632761a43ae98bed9ba0976b07058e8402d63be993d5ec3e1e5bb5371889ef94e65dda29823cc2879ae58177ea14fc949971de320ad27781a854f87c3052c4e78703f58d45ab998438284ab193030823cf365f82e53a515457e7598d88fb03311df8e115a36fd399f7d09fdb154b02245431f9131d9580a2a4c7b7319a8f4ad838b60e76c9464c5ad963134836ee4025707c5fd0d821fa841c0cec6d9c50c39c29c62ef8e2e4943ce169134d13e2be4a787ee1643eb775633c0ae8df84e6484cad733468e2b7115c866a1c3924c04d93ccdeb23b3aea03aa5ea07b5911a475e9f153ce510200a994aced909f6e64737be3b9ae6f5889adafa142ee16bcbd719e86fb6752b8fdd90e6b930b9fc144ccf5159e2eb623e8c34a96f889fbc42ebd8179ff7c883d6e577075140ddf3e4b046ae169c3d5d839825859a6a55ee35dfc4a19312185aa6c51eca048a041b787ad40018dcb6e8048cf43552183f52436e4ae22c9dddafca09e82d3ea240fa8acc358d4bbcb31f430a32df26ea0620c7a689ddb95a781d9fb878a8bb4780259a7fc67dccb6922ec7e684232908182dc043ab6153e33a6d2dd5d541ac9416047270eb1588c3fb2423e61d2c07b2d48e75f337b55b2003f25bf54d2bc626781200aa02023a7ca9229dcd376e6ae829d9278fdc0bb486b77800329c9d564e99b2daf07459ef4b8b89ad10ff7c76ada4a2e2cd5f28d396260bfca3dfefba21a2d0a0cc5756b62bd5200afd9f2d9ae0d5947fff7c0ac75dc96ef8cc7d84b5273b985f4a021a46c5924cfeaac8f7e039822d36078069fb664482231c97e5e34732374727d54caf9d21b345519842437b8c0b28145cd2764ff07aacea24211d499d00349147763b9c9d5fa16ff5e634a029afde4a1d72f0e02a394fd88a22e64ec71d8bb3b2794d710d1823c8949993b62f2a85ee208c4082a8f6d3791a8d2692eac758a62ab4b23934c06e997148a041759e529e56c3576f47062abf59848f09445be43b868c51a250232b36e1669f1f0ea53aade865931e13b60b6c77448820c38dce7859353c5957c105c9d3055d991a322412fe82bd011c3b44224056ba0db7933c393960364d2ce65826dc06e1ea306f3503c32e2df0a37069ca36f32237f38eb9fe9867dcaf110491ee0786e416f2aca5b1faaa704fbedc7b1a5765eb6f20940c5d094adcae5040e6f4ce1af5e3a8860dc5ad004d92dcf54850971c631d558d9f8f96c58c4f171212bef00208440d6c75069e0123d2f990c8955de134b0a655281f309c038735c9790c15c4294199bb502024e5929c2d102e39becc9479fe846534b489af38116f6856f2e0da34c9bf60b2f146b4ed05d022e96b42bb189b232556e9361e9075034996e73a7dce10a0f12c60dfc8c825db1a592b7c256608a71cfe14ded7662fe399f24a92e80626475c66afef246d1d9f2999b8b96606a80696b345811ace3ad00a7981201024f9d8712b733c3edb4113389479f0e1b8e339a32a25f397c668a19347da3912003c182141002ca2e22587f14c9e37bb5f93f55e0e7a1ed580d80abd61ae4b15b9d500c763412d72cb8cdc0280b054764cd7e41cedd9af39496a8d3691e4850dad2502d9b704f48f55c121af45486ae859c718494cadc6bc34821b78d23f53106f35e4c49b42fbaeedbf6bd1fbb88fa89c5e20b76384725676974d0edc0915c9da73676c39e4d5ff69aa7e67bf4eb1d50b957f369f4ab313cdb50fbc34c0a9fe10eb3140daf43ad55ce696fb316d5ee716e4738c562aae558f32bdf185e748bb0741b9842afd5f20b2cd59220b10d9018a6b87cd726ba0ec08b892049ac5872d6500b8fd93b7f5e79760d9b5188b88b1404d6b43cf62930e664402b6674770afa0ac80ef1b8bd501d59c3513808eda13868a75ff209c33312c2d92d10957d364fb476172fe91d58348aa1330c526832bcbdcf726406c64d7d9c448c7841cc9487586f5fb1fc2024287d761ad97f52643104fce076e7b8101310632a5de2b08491e627add4caaf3aea89a27b2d2b457a593d4fbc15e160a3a04b56358fc3591cce8f584c180bf3d3830cbc3ffc304035468f80fe75b999741ae9c4257a35c88547a063b674d2844bf9a7f274f03e453bd5613af261f6a4b549049588e5b2b345cb5c825e1c2cf3bb991681e3a8d5d4582cc2e197f9a5d0cf4352d82c23d550181c271315418941712d53e31bab8ea35aee59e1aa469972ff089456edf96f40527c74db9c0b6c8b0bf686f0290540e7a87613f33e94a5a43f446e92c74e244a80ed0270a2d112513b671c63f6524e7d9cd6dd66661abfb9b93c8c73974421eb4bf86e8931aa838c4d2d828314422f3c18c70abce8ab24e30066d84dbc2cc8425dbd1f0960917ef29b29c258a2b702b784a8609ca0db03d3352dc223f4d1a34ed447b4a593dfdba50ca038eb8d088c1f7289f189b20b60a1a808c503958ff1e2555e5450db1d44ffa976bc85c233a9b66792c4635df40150b0a54d86d3d8394d85566d8bf713f069c5362148e94f604b4e431414833c8569ca106ef6508d0526e549164d0f5fa4cbf0e5b984cf534900bde579da6b51d1d345bb43b3ba3a3fa30ac1928cd6e088aa26bea94172f23812df52424e31172da1cc48ad36ce1d3ec0280be4c46fe1f66e5d8de9ed997bad4c96ad5aebf8a3258219a1a3daa758ba3b48cded0057a78638128c3963d610928313ca0ac0e32730817034ab305782239942ceddf8460249233643c7986a6ca6041b1347a436076daab11226b5c234d38ca7169b69d8a47e2b9308156f58041d6863cd3d3f4b9ac2919a4022717b869d30d9369734c6c487eef180b80eb538c23e69b4027e0eb20a111aa8b0c8d6949dd2e7ee624854d8a0692b40d9a58428b3d9c05534d2ccb9fb7f9c1d933e07853030efc7770e43fa50ca72599c710c1d2a6a091f90b629f25eea6bbc8590512e2f42075d66ec947f8eec8a62c8a49c98777620926b94969862e301102d37fb0b9745e4e91ef7d4a6b88b0371d1d1be9655419fbe4b0e194e03a368182115b84adfce4b3be184a6372dd7c10c7bff9fb8fadbc6ca8df2b19ee0a81fb355abc6ba3e5ea1228a01b168d48dd9c575a3d0d93df21f16fb19e4888a5b1c0afddcf8c7c5f36b365b1662b93d2d96135562594c29529d18e9bdbff96bb03451efb4c881f964d266c87a71a89b5d22c82cc6673483e3c4e28d966cff7c7cc1daa4bdd433985845c418a32b4088d4efd420ddaedf95935fc0a60a22ddea1e73fcf8dbde53fe9de888dae010af22598a5549c16305bca7241e05b2d499aab7e419586e7ce0183040e959cdd2d87a7625c716a04cb9571e1cf91b0b56b04d8c8dafb43b8b424ce3cc95656c6e3424b8d4c268ecdb412bb269248e55c5218a7d2ebf2ce91028e45895796c73458a0321032a6e8f066e71aba560aa247d88c634961dd1eec65feb5cade94596ab0d51ebaa676c194216f137c83ccf48b00d3a8a48e8bceef4c0e23cdb493ce650458a4c8bf29e9bc9e376b9bd44656beb308e9417520504391b241412f493ea1ce3f59b8f39811a820ceae6da6fa3c286c1cf0281fd3e8014f5a8c2fe41c5945b4597c12248dce54447ae70e6e231373cace143cfe0c788ded98cc1ff990bb28cc5a780949c696951b337198e9b3b6d97f243a3e34a12e6872b5acce1e4bb5cb136d204a350afac9d38f11e48b3077c812e28e81685bc9219c1192e82602c263796cf0908202eb2ea28e9e740839b8ab87cd62436b63e8e50c2d601a03577e73ca9923f3722c8e3a9214b92c37317449f4e013168263df5eb07caf92bc17a08306c1631ec2de3bd2efee84713704afee579fd52ccb7fc34004a46853e5e3f2063adbacdfbc671ec3b2483a9c79fde518bc94ebc48ec4aa9cffe88df3298b7fe5e6808c40eca4008e6bb909f066a45dbd9509442db48dca0258b80862ef95428698aaf102b75ba08077e950814a4711b3416c9c1119f2aaa2c8d8d17f02e384b86fd7603b62ff7e417c300aeb0f1e98015dfa24cf3e4b3375b5a2a9ca3ff0218521fed1513d3c00c53d3525ce2d2fc39dd39e2fb56560bd7371f1bf2e65d5890846cbfcbf676886984c9544787c161759517b3bc8b8916aa8a290c6d59bfc83771539eaadf02b080b367cbc9525e0b51b3c9cd234ce3ce6ecf9128dac82009b13630a9b21da7108339ee35b8efbf13b3b8e662509f54b7f8d0f2fa9dd7b2c730f1474314dd99fce98c6316f00b10437d845373300368d80b16267b2db123e878c2a333a6138de5f030ca6f8eb96e230e56b0b871f17a94c9074eeddb2643aba38f7ff18db910ef879df31e278a5b72e6eee736f1d74e12bfa9cd0bdcb9bd471f017a9e3f47c88d97a9403d4c26f3f1bc7222db6d050f89a11a8b9b03838e27d5b8359fbb1645d9158c02f50910d8658a21c68517454639fd73d190e88bf79416d2644245204a5e97a4c3d2a2c066e9690529a3625c7aace89743873a36bb602a06ec20e359770e53ff05f92a998381270ed44485ac4b1248e68b16d2c3d384581fc9be52311205124a5d93034be561d6a02e1e27176eb0d5dfae85cd8aca07acc5986ca2dcb4961a5355a45fa4475ff88d54f015ad3cf4503e0efac080d5c075df0fa38c2d870fbc7ec33ea4949a7cef16e6c1eb0b08ef1fb712ab7c8e19d8d2b3a732414fdc285021b5c2a4678f84f63469c7d065c439126a79e84bdb69b066ba78b3971201513a6a54afb7a145ed538b6f38c6d25f44c6cc007307a15fa78133d40d2b0313181fa6373ef8d9a0b5575981110b53a2c83841f1e44816d8b50b2e5aecf942cf505a75624cb034a9399bd81c1c54589a1424fd6f176b60544e1ea36c2f703692970e91273180e1b808c234b777344cdbf4ee6e4620b1c4f60ced5efb0218390e7213e43183bb044a7ea35a863548aeb0a6999f337a86264cc1bf53b29d6070f99b8a1a3a1b5eb5e02aa8a5eb329e1b2021bd9bd13ecdd9d59ea7141a67a83704ddae8eb52b573810aa9afe5d3e8a3c835165148265751fce0718d166c66d5dfd9841b4f0d589985fbb302ba789c2d954dc2d8052d81e11fb115e59aed3385ca50223000f2d40e3b34cf4bb77f4a9b0052c72abd1414e64144ea22535d4f35247d937f33eb35eddb2b9da166e0262c1d76b260ba74dd71ede4d5010eafdef150c834945aefc080b6ec22a79f4e61eea715861b3cee1467a3ce2f588c36b491eeae6bb94453b67d7a6aaf43a484e98223a401fb9a7c4b25eb8d8229476c9d38538928e499422951b90811b1d9833adf54c9f114058d2e817cdd0c51a2630d392a10e62afcad9b0298194261209c4cc3451e3cf7a650b353a6e4186f6e456b6642413f9b21e3840109a1d4103d78b6484bfc8d5b98d208b41442fba12f736db21708f60983909c2d2ad5d2e46d4f6c44b68277cc485d004fd7416aa5ec27a3dabaa8b52de2571eed494280e6eb758b843fac04e3e74a7609e7956c36adc3de92505aab589089026de8d5943f44b383a66025209a68dbc46c1ff035c06d7767483f8d525baed8ece01e8a05c6629c7f085e49086cb575598686c94c3435c120cd845ced705426b88f6d5406a3bdc859c5da06978dadcd16f52726b0a403f660dcdee2685726a51dbc5f4a05293bb1b9378d71dccd1a7828376d20178462f32d9ca818c0b7b36a155b3a8b551bb07a468e6a028591c5742b6da0226fe504294c1b1bdc9875184294ee2f21b05eac8cd85acb8e0104c55b18850cf2029b8ae32b5dd411553ba209951820b31818e8283a2d8fe7382e0deab5636a0154a33a4d9de54a4ac636c0a31dae0df5d31b272e07c33add9a48cf803295bad2bc95712725d8f301fe0c6c46e99e1da0e822bfeb9f4ba6b4cfe54d97aa323100e2aca3b0f5417a0b3c47fb8e445b601c4d269ea0374052c4d1998dbee1894d41a3dc9e697404120a659a5e71d6e80c09d039f82eb3e5b4d6c7a630e12b882b49d464c8c1384aa1cb6ad3f4d78b4a57dd6c43df26f4295776241111c81972b955ec03dfdb3811806cad701ddc5d4e50fcb0d33c7879cccedd243e1ddb15aae5ad6efb7a03c9e154292fc7bbc7edd53870a159d9b48f73a2461b327a9388d9b82d42bc47bf5962e3bcbbeb99633a89edaa4d6dee954a0bf60036a53b7eae52e926e7ae53f7b932fca8ca8ad2dffa9dbf5aea8623e19cce1e4622705515f6e735ab890d8c6c1d0d010f531ca122976e70add2f0a1c076a310755597e319c08fa8408ecb55bc020593bc3e250332231a70def43de89967f19cb0ae6893a607a91113bad497a894464ff749d8723001bb4eba4a44c05af6dc13fc82b1ddb54af31d2d7341a65e6a9db50e7fd880f8cb69482f7b55b2fbb105dc3d4356b69f65f397f0391e38448892108a8448f7286b0b2cf816fef8c8eec88d2a3acd1803077a1dd5556483d242dde49a3e73fd94a4b910f054d776d2c4f7220c435de97a4116172910260667dc926148d562de24f2c2cfc4f65a22845ca894829ce2ccaa6ab42b68535d80fd8422b91bbc2710f7ac7799240be2c2dc31166a1915aa91e69a133f346b57f32a08bb7b988076a9bae46fb5af827347547cbd76d6003d75415600e7f957e82610817866b7e101a5d69f9604ef532f38b9fb3edb0b395fd2389c567912e0e9d0da93b54d728a09c7c1cb1e0c07117e2fca8f4b55ea6af46e61fd3b85e1018373f0625f36e834dcc46be7fb7b8c5de766030a8d6c479857c1973c6b36676b8c431c89c6029e242830e47bf986504736ce9c774d8c350b991a76f4b394ea54075e1d9c2ec48b193727508e4f8ed4abea5113d7f5048722303cc470f8dde90ae167f9a37d2cbfa69d116ce9adeef1bf2e94320cc40872e17de7f36d80d49deff5289bfb0923f0cb08d8bcbeafd69461440d5c328df08e3053c9c97eb4d194f348e753c4073d655d4744d0e15559d9024c27357e7fc22b6acbd66476958d3e3771739d2f066c0f7d93faee5366591e368f9a21d41f9c408402f7e97d5a76d245240a6a8d40cad8a8a23bd4290102c66a24734fff5ff4a944bdfd40d0c654a6978d31a7221c8b6a51828d7a4528f3bb32d254797a02e8c47bba879f5aca165d4dc20e653cc9120ff39d814d56f501b370e62cfe89df7e00d85e7acb2c0212081526502be698426c334e77c9daf5d29aa41bbc359843c891ca372c489845615c2008e6041cbe7a472955b44d3bd97fedbea241838b5c372243c175a14cd61712e84eb24db5b2457c3cd0f8b769b154f6c082d6e4aaa8a124248154168fc0a0b6c370c6ae99e718bab494f6432683afa914ad1ed47baa5cda8b61814a2882ce8c5e9d2f0b9f27fbe09e8c232848f205491477c509ee1d7bb36df686fadca21862ecb2619c9b9af9ed5037de30aa9a73333607c469a216a3ccab35d672819ae2fadd254a653d3339c6916e7029c82a3e6b2a73f438ef569b0f32b309a826027d4b0406472366ae6a25a6757d7e0e2db0aebd8deb84ea2f398cbc39cd4a4626da03609687ab16e073b382c2377ec4661bd5567a15dbe06cc7b22fa870c26c8d66526062545a904340c1b18803b009ffa3fdd5a5918bae3675ca6d114f16fc72a1079fd270d58d9e132aa6353c101edf289a6b9cc3a0206d42e6b98ce99bb946fe43277041039ec12fd774215cf736ca27b97cf50e88e2197a24a01cf666aada860480278e97f1376d7d16c7cc970dc74903ac9f3fed0001a290aa0f9260c98254c5cea6a6b3ca728b5274235573be8bb96af15d086278826cc507fb5e7a1ef35c6859a6114991187c77ce8bb8a8f0c44e293b3d3c414aa807b9e6fd8284e69083d449154ea4485a8b1749cb1227d4fb6d9adf8bc11fbcca5f856006b21c2100582e0aaf815a39289852cc15fd90f29bc73975081024a58404a709131bc916467863436c5968d658fde70a53c39b3fde72b49729ad556b90197efe042ce49862d9cfc39577203b09c4421b91c6fbbb7e66bafcb87b3c72ebd77b84ab1942d98c854fa24ee07c4d051cb08b85d7bc71c8a9f3b989770cff512288a0e1d98b4c906cf8868cd27fd088ee653f11542ca0f4d6e392a041a0cc67af79a443a96e4b650b781eddfabb711316ad6fb99c9ef9420c11a7f793362e771bfbfa99f756b7b24dc16beb8d89d393803422d8a5d654e3b9f25b3db68ff27fd283ba488cde19a1808252398005d6da71840f814a76bbedfff2c89c1837cf38f357d7a4d6a9713208687cf03ec3e526d53010612e7b4ed5d650f293f76b9fb2392e280a9fdd3234f65b8948455e28775736d99dfe9e6dfde035b19742d6181bd64cc742bc5b92736b09b26a25ee71898c06d807f2d776d9d96e2ab95f565c042c37d8ab068b2f5ba5d5c91b5a5340acb090ca70b59384e68ba54f13204dfddc618d38369e2e3592eb98def94eb511ddcbee3909fdb3d654324524e12e46fb34c1b917473e01db0aad428f0bb92d275ab067f8da1249873de103f55de0918f28b14ab4d88678b4ff2a07c91564609ff1dbea4e38eb1a94662a5bf6b3ab74e481f9841384f1b4e4b7972a6bd1a1dd47d2a68db74ed1680afd2736741fbaf4ea34163d75e8d7ff501ce9b6c4f1b9ccaf05dc6f019cc210b3b433843c7572dad4d0815f6da8155c1123773e6b022b177ed9a3486910c6baa052b6f296bef730308b057d298b6db2b47eb36a2e8227ab1df5e76aaf0680159981c0443505aa3b55a93ab4e370b5d44630243b5fa66af295f6e58d058821d36dc71901da1754a5d807a9a89b371f0e69b4e5b2bfa2e7d69d85c023262fb5027325e0bafb495f0f5524baa5f8431b7ad06e1ce6adf1e01c25c40fbf632049aad3c585e42fd8dd818a545a294e9b6d69bd7c7646274868dc66853b3a4da0cdd3a6b4b62d0beb641396341c1e61018cdb46a8f53cdf256cc55099bb220fe6623cdb4d6164e3e2882317b7f8febb19434a52e45810ab7e276ac05a53e6962b80208335ddd1a106c09b834a00e3b9ba55dd23a9e08ba6ecb6afb63ecd819912c6c48a6a1839165af1ad62f073bd5ce832e561d08cc11f8b50b84edba490eba4380a9c5197bac1bd722d5acec09d16c5c327b550a01a603b6f91d1f0177cd3f876f8f7d34edc9b407dad881de812ce84971284311fd372245b2c1b46139776a27f429ac7be8475fd6a2d898557abf34667986de9dd748440f8a66356c45b49013b62d5603fdd1c6ffed3de80b982bcd7dd3cef6fe58b45afacdda313a75d3ae7859853363d2bcdcb5b0d0d0ac27443fc849fa53816d2df7057b34d6ff82805575a4354349b96a5355997a6afc1b5d0aefe3d141f5c8180741d7ae53023c3241b64d69c429a125c46f142208b429459f1e7804b89617fea0288802c0083ecf102041566c6c74e3900e2d76e5a008d08aeb9a0de11374981e0061941718c0a82c5750c5e3dd3ffedd3a9620c82ffef6cc25a5d83f81ba50d42896e94fe83f82b14f81f34018b2925c2a5dc83238c837478a2be3f34b875d04e35b3b99d029dc8e804745731a83eb9e533a357c228811ee1c35b8039e3cee1c7eaf4ca77b0491a175972944cb9628c456e89abb2b66d67ed40205c3f7e76480677433c9c1c375fe1cfd11c80eee0a8e49abce67f5f8615114849f83d3b6b9c7ccd7caade47a59dbe4ba207fdc02c81d9277219f868d1405238a4d575df570c0ad79bfdc7143e11f51a0e3746cf598aee191533b14ad2fce31e6c1e98ca977159578ecf621a5d03792c4ca6fc3c002faf9ec58565654de87e4b3c4a12b1c77159c7d85c8eb8eb4ae6ea8f0f5b46bb217f9b0a09c3d80d97cae5f703989609d9707c3c46971a01e57009af3710fbb78bae17cf7c4c1aa05fc8bccd85ab9b534f26fa70af62a985d785aceb4d969b2344c853c76ebb875452e390fd132c796f2465258fa0dd1517e466e6827740523db66b45078ea078875d966d9b497630c55134a65692844cad5b94226f92321ee91678828461938e36c738cb9ca16ea803e9c8e77fa10141032ef056b90c8ade6fa0203b4c4d92f554f425ac65fce40b74a5865a3431392d466880806dcf759db4910cf1a8330e7a5fc9ad52fd222374b7c4e7bc6aa08dc6042b20821bcbb986835bf5c87942c70812d3df04473906a782f0714f28c9f247abe1552501e171a0df15d3b883463028a03ea8f0e6d0994aa7b87ea9e8be169c916da62e43ca954ab6c61ac47417ab57c69607013727a68e5a80494f898c12bee605626d6ec0d96eecb67268263f05df80a38d5407e91b9dfef91c26be64d170e5f9bbebd4aa7f755e909238a1f58e98350c983d2d38e046311c5a14d2861de7a2cb5478cc9446562c235bd5aa982e0f8ece1efd62d1b2da07127718d13ddfe743d8e0816a9f78ea7bda55f6a15229f0d1d33bb254672dddf4ff22134f48f657cc5bfc846353bfdecf98f3b053630c0072c28b081051ee0a0800d4cf00007152c60c103382861010b3e0043b6c4b9834d6bf1875dd24164bb4e46f9ff9097acf8856e71075d28c5ae516f4ecef99d6fb7089ee0cccc41a4c77ce1eb9b0d514550a34741c7569a01391e80c8d01231733a1d328ce7982a6151ac631257d899ac00366dc55ed34c4b802896498b06e1878de0ef200c4ec84e64cfcf4858c400d4681864cf114e5c17cef324a1b20b9dc98a52c2e28b80cea75b1ef60e5d6d56a1f4df83b2b11d80440c7e34df28eb3a69a569b5d296f58592520f06a35e52958744f8d58ebbeb934c4929d35ce48156afa44a8003c14ec69130136ad7068ba172ecc90e61f1a0fc086c155843a851837496b2d95a8b06a4dce0be4be46fa3182a0debbc9efc1a1b9031e96304bcf84a630497edb4576594a3ed20bab2f7ceb2644df4ed00c5229bb8db8961f829e6b795ea6d8c497fbe5266e17a9c6a4617d4bd4823e8303ad27a7c3026f35326d2e69cb8da42576f40667c4b7f81e810e117f049af82a0a3ba25067dad8ef390086cbefb7d8bf87307741b0bfd0e218a48d9b6853b68af1c1ad2821edb93c654dac6a21a1ee9527e562d1fbd85253a3003830dc9941f8c712ba599252e90f2c832299e200ecdbd80ec1b6fe4ad3723c0719bc562d310b2afbe602f5446e46832f2e0815841671543282bf50805ca77e5fcee227f2cbeb425c9b044d7bd71ebd9dc8ad042370a79941a8f00c381d9ea2165908a3750873fbadc8ec658bc307291bab0458eca9d0d81a8fd1b62397e768166fd8775c6278f1058d33b62f590460e6ef891cafa2ada85e5fd992452ae4e59eaecff75092473c8ff8c87fd29fc4f30cb0d7d0b680b72c66534413da70e0924b7f21fe1575bff8bb5fad4c200bf203b4e530a3b5fef1ef982d605a9807f3bbe53a82b40b040f9ee2048d861a1d977c405801b538b4530b1328d89febedea4dff99f6e4f87a1e40cd76ab6d7b42832683f15d1a29a68592e139e80441f17496180de7358a117271762d7a8a3f5ae0d27c11609de0d877bb89786de89acf9eaf942cf2e35bf6c3ba0b7731c1e7d9f24109662452ef4c586e5b696f587870447fab05b8d5596bcd21ad024ab6d11e602529119061184892491aaee3264a9b207ad641d19719e195649496cad7cfcb485034a5a845666c76a978fd399bca014381008ebe9b6a6ee32e7aa352679fad9915335f02596bad6db88aae6c5ecccebdf943ced2e48c8592af3298ea1aa9abaf99d03fc4435ac9ce0d45007e97a5583431a48227ed54c8ee90c047e41c31bebeb1ea7ed35e3a7c73de8270016b4042ccafc5d060421271036d1fa6026df6d933f11e5e55bef8f93e8d5de2e5f36d3c5aad84eb77313bc02b460365be98d72874914856a30bb5de87c8a7aa7c99eaf738e2a2a49a51d18f194df1d701b3e6f0be4c7b13455f953ba21ab20c9239664e277e051f1bf6b72eeee032ec53d6a02e89eb0f2184651dabccb0e74e7e3c5c802141171e5d6d5a3cd70502c637a6bf1dff436c9c6013dcdfa960424a55606fb5ad45ce3348884fb844cc3cb13c3af4d74a645914838497c9bdadf920ee0a3ff28bde965ed373bddf5e0a13e530ac48d34b70904e67875fb0ae6fcbe29dba934d44b6bb61c219805e4fe61c882c32bca63e5c415b6dcd9f2684838ec9120d3df86336c20f5dc1c786323514e309a083c659c0eb74a125ca124235bc4b6ed802d70ae023ab9171e60aaf258690b28900b41a757417a79a9660483dd673f12736a0f0aea1a0f02557fa01eead9158036a263e4238a33ca194b049f21b881bc04b14f094bd7c0fddc588be84a45d36c1463047645cd35dd22e0753776a7209c7cc1cfbcc2d250712d73a2bd046e5dc1517b02e853575608f4d7b5e9e428bca97b132ea744136b445a7410ac9ced36f4f771e5eb4447eef19bce95e8306a7d42ebf88648b4a11a4af286329677186594dcd89cce61b26d56d9952f9ea8d43f6139156ee890bba10d91a54d463c49ea60f2f7c0f22c403e741a147f5d6e18bb6e900333f3c56d09bc0ec00e16d17c627a568a20dcc987bffa7cc601808be642d0f633b6c743496bca1fcb8ae394964f007ecfa2ab0a0bf5e17544c941c9c26ba070e041eb1bf960ed95f5c4670c164f5bb63fa1a6e015502edbef9f62ac4e37b702370b3f192e67acc39a12746aa2ed342f0be827ed9941d40c35e94c6b1170862535309c6e0fffc004d6f2b7674f53dcf2c6a0f8448df28971879ba42d8eb32831de71ad1e734d296fc57b0e3b7f2b9201577c25b79050533c6ed84a8173bce268227be1005a7914bb5f702d15ac9eda10840ba096334e1d0d02a4d105887537949e11efbaee2adecdb30c32d5eb1f6801073f8858c030081d50b37d84703656b5605570c301b5be6a28247b2a546a8b79dac60e830230d3f0cf0b08f5dc5e25fd167c89aceb7686086ac2373e941100e8eb21d946a35444f0694cd5cd0be026dcad92f06d96ab638b3a1d13eba8081427679cbde49524fe000884dd8c475d1788a2ee016d261fb0aeedacbb9a7c91cd328d4074357e491c0590cdc02dcaff8fa5537a16174a1125b5e38d4898f7d566ce2784d92252a57dbe9a71ef66c501c297b362afa64825399c051a096f9531ec0bc826e8808908992f778a02fe9cdd3663e186867f4c2350ddc3e2547c52cc8400eddbef4ec8be9a217014d0171e52bd83177c7461040f01224003bec6339478286b76f0292fbd36351a64c556efe721dcd0439f2ad64b0a48c91990df67348c806e241f33396fa2593c01aa01381ee3e5683b835db755f6f6d8d3d6522084eb3d34f633b0a4c83716a44e23dc1584560cc15e0b9b0de873403d0c3c093024af404ca93ad5aa7e5d3ead9f07e639e567a0711ec0dde568771e693817559a3f5a71a425b2036b5925a97af99a7299649d2aaa32543ec0ef15bdd389f69c3424c3880ea8a9b3b25a9f1967e05b3fcabd87f276ce042544cb04456f2690581e908bd697528e7a6721406c4255fabf1e154160cc1e3570fdb0fbb7662900753a603fe2027598632320b3030782e0140608a249db2b251a738d1c7673b692f0eeb0330258d27130823a5ad4f075cecfd315d08417bc2fe42886bcae86b7db3ccb319998ea26c43ca6b059aae7c8952f4606fe98a977009be6b64d6a07cf7271290a3fc2531342ca097244fbfef126e0561c3c07fb8ed4e6a940a658e2a9e8c682cfb84df397e3d8ca8bbca3cd7b5211872e40d2a6ffb1fa86f3e060df028f24996b5624b952f0f364ff1ab13fdb4c1c8021dc12e2967205993c8d84411879715d442fd0a2eca84c6e47174570ea10b7235754c82c2125ec8455e5691de1676aee06da592fb75f848eb0103a25c2fa12f279527673c69c6f43fbe2b64c51ab08335966bc85b0219bdf02e3ff44d429dc99f863d6c3491b3af5a6a29f14bfe369654124e7a8433a80a65348a5e6ef73380479b84147083c88b48b03b136db058d54d52a83165ff6416483e131f2e308ecc73a86771f5e4d4754b44811f009d40a9c223969993be1afa226979610465d6fe40257a06175bdec17feadf348c3e4067a88ee68a2a1628b9e3cdc010e3a36069622f5219cae2cb8c28a06a6e6564c43cb22b87295b40ed4bfd8c60a84a0ca85a39f3166392cba33f3eacc03fa7ece2c9155c140c5379f1f292264d5aeb8c4ce8415573be00d55382d9f5ec81a61f13357cc0eb3ea61ba50ae8ef6a065b4f536a2ba3e6e28d4a034b7f018fc511ea0255ad2a4dcd3175ca207042769370abb16f24b8b1502c56bc0b13bd48b7a4a8caf3afb91093506a1934a2a3fcddc3b53991d88772f593ea9fd96c1645920e190e6620dd32dcbac3728db86e65d8d184f6d2b1cbc26fb19fecb43481ef37a477975190ba3a3501e182dc550e14409c4e978495415cd921c954dc8f3288effc6a2a521043814aa9c1e064d7c8e41c7a2b15bcf241f6f9b32a025a3a23d3d2560f15a42547e174fc2d571bc7faa2f091706d05e4f4b2314374257adec60cf7ba7095f2d53ff80b70d0b0bc0e7880dd76675e8d6fae4adc0f6e980fb83e9eac3057629e06ade2aee21fae59037c97d1b582fdef2eba42e879922b714e20b085f97ec32bc4e0fbf58eed2824cc55a41494c2e66eaf4a411371031c130546b3e87008c80d914dd9f0484059cb3b704883da58b0a603d522ffdda6b1bb86d03884b2a8a0bad1c31ea7bddffc8e48362983eff320c362d2e54373fb17bda843e6b2b3b9d3a069dfaa62304945f2532a30d588dff9aacfd993b595234ab9935dbb166e68e232f2f94904f447f5b58666dd4abf884031d0d8b366250258408eb95a94fd00706fbff03e4053196216e8418053f6104f444582df14be14488b5cfb99ab2c52787718519004485167a5a856909d052b248e12cfae454f2e9c6e6492be4acdde0f128662501ed2b57560212f90108b5291ce1cf8b4bcab9c04a02cb5a590ed61949308300b2c63f341ee5ffa6f227b3cfec5d11af2628952f56cff2b4ca488f1bf2e4ad48e49a0cbf6374c42a5eaeb92b8e3db44c3c8dc58234e66b37172dfb287b3601140bb5ec5940daa31166421a05a49b1219b5197bda0f175662a45d3efef9da5b9e85b3445190aa5be3ef648fe14a05396166a6ae07c0d5daba6d6607e51ba52457c48eea27a87349edf2c294ec5c881f0f9acc1d5a69f43c21d6a15072547a9e00ddf524dbec306eb7a03689405aef2f89091a9d08e6d486cb00847da26852777120885becba08267e88c570f54770e226e66bf1f0c8c2154208be70832dfa015001a246df5be2ca225ca0d48ffac632fc7fbf1320233402aebd2d5d74a781027e33cbd4ffe0113d8efaa5b192d45dc3bae051195f3e0c5d7c8495276aa759cacbaef1495bdedd13c8a7b9de866cba30a838f762900b8bbca60c77f806bb8685d628fc10b1ab7910d518e8a83ad7a40c22b7fdd15528d43a3608435ee96ac0a06c33f513f1fb23490186a4af11113b7b28980435a1f74b29a61aaeecc58c4b4dcafebfab54a066a2341c1950a8b093ac1b82acf67d5e06022453f8de0369bf8501f41890c81178992800d36b7478318a090bb2e986f67dbe9bf19fb4d4e0caf6a9a8d4bc524ae2875d2e20476a146c9518b7ffe731230a7d3ab2b57bf83a58b17d054491e8080e7e8d903938107ee9dd04fd29bc9e983013faa54e38b8149ef7b63713d9e3a0d4ef465242da24fa6720d0d78fd9971902397a647ef576ec1e76001ba853ab26477e8801fb96394e8eaee9a5cdc719561e47ed74f7d97401e0fe695b8587270d88845587228120c961b905017df8eebd09583fa4e8ed004804da1d001bc16860e827f821cd806baa5ed030393713c733b40bcc9b01df9bcbc3d6cb2dd8d9549e448168fb8a7a954b844bd2f4ff023622e63cfb8bfc244e7857038141cbcebf03487f834494bb574ae04913db09bcf6d20a8a41627da854c2630cbf8de506d5b8e2cfb669d3e90353173926fa15aa075e51d9458169873c8b4db7dc952e296dc24d1920a60558efd864e42c8677f4170b3857698545a1bd42faabe65d943a3ef01a94b27ca9e6f8d74aae1ca55209f8061dbe6c0d48fc5d25797bb274e96fbd7ba33bba237da47617f8f5c81c1114aeda9ae311cafbc5c84d982af66b0240c8c7c63c4fb095d6ef153e620af7196e761860d5261ce52e3e2071d195cacfeebc4151932f6ccbd941dd4356cb02f5664f2c04267330bfc87bc5322a4ebbd87c7922316340812c2895c5096c41824582c5915aa43b265367a26fdc1f7e834e38fc9d85fed8a8443ee861ba190a1dbd9ba256605b6293d2f0cfd08db354b89112b66d764b0edbd2bb1b6477adbb84a43649816b4e481b260081567a6d34a0756f209b789178176471409842f5969989a36c7a7266ebbf12d506fc822551665f4d5d01f1473afb91bbfc1bfc6618c366cd3c5c87d985d57f03aeff4fc790dcb6b35ac7b7a186765a304548ac9e813f9adfca2482e2b282819cea128f1195cfbd3fb08b3314ec42193ce8fb65b47265be621793af11a44b6d40ba7c68bd658c6c62a9260399cd9d77a044916324446f98f54d13b149db29ffb6983f0645e5cf02896cd8947bb68d209591027a901bc8fb26f3e570258ee59094f35f65a000b26510f7abca60d473e3c89ecb89cf871fe0d61924b84c1b74636cada2a9fc6529c72be001b6c6d1adea39d949dc501abf2aecb3fc99122125547f18f3f2861ca685650123318b6a9c49decbf6b43073a4994b7e180e73c69b826e12ecc0b4e5daa260205406800cf34a6bb024587c3847de6d04cf30212be8d842442808841b9f59e1824251fdb24e6e90c608a1c29ff25b3090f1486e8ae556aa18d0c211047c88129d12993f1fc5ba93e898bd9fde05a67a6ddacc00ace910ead7b5eae2da16f179c6d786b7142ad1ba71bb6e18ae88137bac70e71c077aa29224ae05f87fc3a44b57ae8ad651a04de92f4ab25cae51b485521b52253e0df0d1a1899cf73b2d2acf4ac65311044c4cff4b775acab0f10a7a6a4fc78b2d24b572b950b56ce74bc1f53862c617fe8289fa32f904a265998e0e3c1b2c4cbd7dea447724cb3bd2c7b6d0971b00c16df612e8e04601341e4c41db86ad5a2a21bad29dbe9861a201d720c0da8b6534c3f3e26a498d2e75a4c987f2801fc50c2ffb77c1b60c10fc260e0b28b54545e70aaa4b4a155b7795f2f602213b37a5110f52a4b8e408ec036b068bf424774957410cb0bf16ed06ca3954ba85aae1093b05406857a6e293c0c5cf80f6161c410648aefa2b012ba434bb0f638ec0fa08a5a1ff7149b701d3777880e7df00aa88ae243c3bf1fdb20aeb51a01d631ecf39502cd00950d832379d3387a83a11534f6de3226ab47d5c54544a13a31e920a1357a96daa68208aeeaf025728f883c2697b1797a788283d0de6909edf316d3ab47717d34204ea100a26c626e0650d9f4366b04ced56f9f01f7cddbcd8486068718060ed743c574811c0733de8649c452106f837b60eb98f668458ad580a884ffd033f5146c01f5055094101932b33aa08349e50470b7adb5c68a921578152495aac7fd021d4e474f6b26f8740e0672a1ce603224c135dc71f66a78a9247b878f578800a82a8b40104afd9dbd0ead905ccc60816fc79bd344f6102b331869d42f4a47618f90ccf1e43c295e1824d75f5ff99f31ef4e3cfb383cfa027c8ab933a0e0ad8731d0417c21eddce3a5efa3717285c24b04022bf7546da6a31566d3b0e971f52ed14661c6c8ef87e8e7c23b4505e4fb519e3a07ff373e0a19d2a77800553208b8f9dda7b390f9cd985b875f820a7f48891d8b440b0eacde9069446f98b08f6da124bb3d1fc38134fb0df32b6502fe8bd2f529969d74511b238199bb7ddb03f68e3a35675c691659d354281627d16289f2e7c8ba7eb3f6ffbfddcc8beff67e18f5c46349e8385ec58397498934d26d56ec858cba67626eec08281e55e6e6eb960594133812806dab748d652a9cbc2e8cdfe73a1036c4be6c0b86b32dcad360a983c83daf63c5cfb18b8fe5563f438bda50bc785ac81ba3d8000d10259abacc2fb7e8e68c0d20b6d03892e2bb6f2880445ef80598e47b94dc7103a9a62490d396252100a768700f33ae718144cb1dd30405ec721868253b43b0094d770c4a6608aed8400e435ceb1149c825d0180bc9e3336055164470038af738ea16014d8697deea0e1430c0255604e18305c2775a423188862cb9d6bff3445eb476ecb5c9dc060e6eed573fd7cc337136d8e8a87763989d84679bc26a861d0c386fd4059859d24eb6dc564439001eaef1129e43519de3109ca2913c5a05641bfa8428f302617c002e14285d78e047263ac87428050bf1d9cd13e2c86d3d1222fe016fe21d9fc8acc906b2626c29306ea5eed2cd322f58c6a69853361b77d4f1c0fdb941e1fb77555ac4e53dddd14de064b789d7416e7adfbc65aa68990ad02a2d988ceb82efadeacd61563ec6bb1e95600f3d7a3e6f477a253eafaf3c0a9a1a8cd5ba7700c39cf52f194bc4c23dbeb2c26adddb14ed223ebf63abb0283cc83ed9e2bdec1bb18960d5df583f4985aed7516b7ec76f207bf1f405fee916bb18f3ed5bd2444b9b28d999c217e5af7361f5935bbaca0022946536577a39ec30ad946a0112ceb3a720e741254e62d808fa630688180ac5b049e7b21b040109ce75b2840bd2c68084c97fb0380fc7c49417150d1f5bcdf7370ff31977046d355fcdb1b47a1b6aa2111d8393a16f12df76ccc7a2a7ee9f4f69434fc3dbbf55e8da627844c76fe4041f8095e758aca216cadf80fce404c8907a5c4ac9dc1a6f800448c611eaf95096e2de1b1f629585a626e6574793b84a6cc46c498c8fb65ec5396b38197be8353f129abd4b329c584b13c7dfb2bb5bd011a9530b4d884ad72a50acc7d32bfa607ddcd60793d26f5748434d19e34dfb8cf1a62e9793b26e636fc2a69d3f9b7b45aaa8f4f5867c05d6466085197b668440be55b70574c3db30f973d60e36712a2ce64737ff837ca84b4863a22166960f6df532b3accd1c8c1ba643483888287da0771efb0f0e6fccbc0e3fb3c9049d1d89e75dac3575559ff626fafccdf58a3da54fc78d005ede7334c8c597e75e386d6bdab1bf9fe30792b7535f5d75f2d6d4f7cb47fc75f0eccbf608ede7214be6bc492b6d794ae6f07b6fce6a53254a0f4fc14e24bf8ecad93769645443f3f9b5613874b0c585b5358dabe771e4cb779aa5db1b05c85c838ddc10997e9e66966679eba2e016a24b1cc0c029a039f82425c8271aefbe7cd2928a6e5d9e584e6bdb8de546066516a61610bcf4785799f3a540e54a70dd1f7cb7882f89405d31a57e54cfc0f1fafcb7a8c3e2cfd7371b5f5323365e40d6a4b33920f79c92b299aaf0a91f36f462aefcd320c789e00b5f40059a425a04d1aab985c0b1c4fa9ee97ae05e8a34c25dde2430363d09d010053e58044e9f9bbd0cdc1016d4b243ac25f78053c76af949f03b1300f17d3457926595bb25001352bbcfbc7935f346c6281183b493ce991187460e9faea34ef3690226411b1b4a13fc3e0cf6dce42652a9120ca81558cdc2cd0d35b42837cc8be008cfbb93d550a0163207e69f6b8b1070b7eeb371583910db66fa0242d6a32e64e8a5e2e2a49d680a8e92d56ac71f3677e796d84a6d9586e235c4cf4fda0a395fb1a443b00fecbcc3ca281c59e6a3a5c23d38b4fdb35995219e8ffb1790676a458bba632eb0ad16b3466b9e4e5606ee23487772a5f46add7deb46de4340f260b3640a878959c90de132031f2982f535da7bb8167d107a59160fa0c08612ec5811e0a17205fd0e5f136a37cd046c49074b53b8dd105e7f56deb050d61c080a7c40fa6f7c616ec384cdc8f147a3df7cb861d31fb0817d0c9e8d5b8ca111be4898bb1180cb0603278eb700babb6db2e681ed9948c683c5298d8560f96223400749d8f4d81bf6d5de3d5357283b4c7baa103b82682d3e00bc60ac98a09a02f1cc8b15cec0e22db86d0812e2ec6a1f8560063508b1627053e1dedb91c61454b2830951168dc0b78480c7c55bd8f29de5b433abd6044c4901c088f7495b0998c5d92266f9d22343095ef0c31e809e18d12355635fd244b844827fe13d359b38fafeb0093af75df2c2ba123c89fb92bff1b9e8c30ebaf3a606a0204cd9ff0fb4ae49a0601f38f07633e96e93626af8c56a3671bf39e406b047a8f78a5a0076330ee0f543d7992564b647025cc79e9e96421d8efc14e106adf0a51e11bac24280e9a3760b47bfd2e2490b7c8106ea3c509e7d6e0a03daa6f1dcaa4c3a6b1dd9a5ee749d35b5051da1a7c705c8127d08a311ffd706fce3edc1773132848af9e8a1f86fdee51daaeaac14622cefb0403903fc95fe592b17fe218569a2d23058b196984047d3bf966f80d9becf5abca98436470fe3369cce2b65d4f19ede01920d2c5ab0fd3cd0cf794a446e3fdfcb515e4de8e2583252d6cb6f3a58bf42b9d658ec70a20d70eedbc35229688741d4917374ed07546e51611d91e15b5965e70848b59d4a4be5f5842647d8fec1d30f3273aaa6dfe11e89cd8a4dca70c977f8cccdcbaf4ed032add9810e4bd1623f079c73e431630dc54fff3d0ad5affafda61b14d7ed8aee36bb661b3d1c2babdea4376cc60c8e720cab25ba41881a9f177137ab9c4f6049a6727f8c3d534f057586cafd0918c99cf8fcfbc35cb0e53116d58f3af5e961dc019fd4b9713a117b403ad3c375039a8cd40e25edfd2e3fc85c10bd008c0a5241572d2e1a0df4a1440ea343ced68c298d16d9ace8a8707082f759e816c4829bb5a5731d6f5751e224c0d72ebeef70ebc1a885f542aa9276befb5133b38d1b29b8b52820922cb322b806cf5fdc7897b500a86c0312d891fc3598805477295cd834724cf109a8320e49a0ab144a4d56ff8da366ab0fe0f2462029a04ea695d500a4a930eeacb10acacdd93371c8edaeed6ddd9a5d8383ef3a012556ddc38408219863f2a647f6c6e5340110ca129790db11e46018c20280610a0184910c7f035932fcdd3f72729d7305e9e1442a51fe0a34df6de7b6f29a59432a51dcf077606f5054ce1122cd982d30b49c45a6b6dc8bc4793b23115bea62667b5c24d755a8f26d9a3dd095885c15203ca162437688f275bc09450e34b12148e62b4d395b24429a54edea3216112be7610b9812233c6c8132b204011634324350310593e2ce510a4a789504aa91244b247bb4210312502a48c0a276e90e3c60c4e6033a81c28289af2012f5c7c5cd12185831e8282460c46a48a8e21ce1049d1a2ced4f0523585288bd30d408010d23d54a09260aecca32f8c60600c4f1387fa456a3e29aa1293596bed192c14c6992ee1962e5b7224814a8288dee0e2802b329cb9820205c89118dd642728723f2f1ef026d08cfc82ccba26249bfaa2870aa9e141515195180fab580d61cefc30e1696986142ad02cd913598a2ad70c44eedd6d2a4ad5538eed22a3324587f8bcd83aa48bbad1b28cd0d18408a433401841a30310a248c901c89918a2d4e084882043a4aba92503e743cb0daf17fa30d574aa483b01f6e2610485c20a466a7082fa4e8022e48883590b465449740ab12e9b47f34456af2cd1bbb9785c2f9e1d8a5c3c44f505cd92dd8f999516ea778b044235a33da628a5b44e81168c5432b950870e3a184ac4ba8a484b612901368e20e2880d4494a29a5ea84d4eb0c814a112e248104845c460ba6244169825a5242b282728e12464468504232420cdc20d47cce0430b3faece24e029ea4756901c4888e91f1e0395758560ee0fb78a95ab9aa5884a2974939235a70f2d54b01d609b961d6653aeb294264aff43b25c9e4210a28822b0c0ecf01445630853932760a270f0b24488148dc28bd6af49ad4d334aa917f620eb412aec01aabeea18333d4ef40ff6e9933a6cb037525c95c5acb596893fa1744597a323a214baf80012637ce50f7ed47c8024042324221f4a90e5d812a397e49441dad8a4489e22acb596060799d402855b80a961cc8f1166b8b1c4c1911fb120c2d4e8814a845cdb870ad21572920613285916a32a3ace08513566892c5d98b63cd92186b5f6760f19c2f9437f4014e383a2d0feec40cb682839c2036090f041a6e40603dcc044020f39b07a7870c27d62a686993f95e15a45b222a91b319c56aa2fc4649582f936c56589cacc8bca657aa2e024220a949218254e20c304861f46c6508102d3240713e4132c895c6b729533c915a594524a7fb8f2ee0f59945e615991d2c11632583e6a60814709453c31e24c8f2031e8fca14cf3c02163bc6046877963adb54bb247bbae30c8084fd9be87c57a08b0577e48d513960e8809e20b94911907647a104287972a563ad0246b30e51a4ea177938260318d00e50403074749f7a8870498ec50e48a132e446a88d1b1094ca8527088ac189d612a410b8e22344ef0018b8da732b21172d8b004ab042f039952946a695ae205cd15162146055586e948aa082d4a04614487138c48012288a421fd34e36c8aaa192044408a5e2d5fd2048357a570e8ce10c9228650c16a071c3faa8a70dab294a4490e12aca2ae8828d124d45a6b0a59b247bbdbc388e5c88187185608c301079a80900444191da4a88c90056ca49b1c2d1821e3818f2248706c604a0e593c60c547114b76f44cf002123aaabcf4b0f4240350594c4b3ed0a40a142a37e8c86076c48e29400c3902450383830d244218a922f20196a4110230c48d0d3446a8b163c9ca179992202860c6c715161193111267ec8e1a5bc3ae2683cda9dd6d4af640c17b318b85d65a7bb75c9192555494beae928b184240b8f164e4020d5ebc1742308383931d488ca6a0721d93c35558eae181765144d6de2a3450c9a9a51ed245cdae8baa39220a8c23b0701830fa82c2d50747b282a8b18452d48dd8121168c862440a1cd65a6b3940f21e4d89660e985eae30cae64692efed1006c8b54598212088963475c563891d61b0c4c070e4c3508c070ea32376e05842c5a245b0d65a28d9a3592b937436b7296b6dcd149a91669a1264986069a172a203564208258890eb104ea0c620493963d4473e50280358591a824814207810d1e1089ad98d5c551ce1d14313262ba724316eb8b2b5d6da109a680ee129b436a42c504ae90fef36a94029a594d2bb85a35284084aae5ac8d181141d47ac50158492a72b48aed84861d4f2a0a8560e8c0bd991244610f9c043941d2270c40e31544bbec480946584131d4225a20906114f168aec6e778b0d57b5d65a731058ad55c42c5f6e983204c9142723ec00018b283605852b5bc0ace8222136324aa9950127acd65a2bbb923d9a8c2a26164eb95e63c6d4f0ca144d568805aa0b51f572e4e908982f0693261e1d5899c06504303c10b1a3ca8c62d2e486460a5a64286324364be2c88694592b1b4a3b1a26d5900f2a38b1c103ac0f806021d28153160e2f2d340828504a2985c56e08654605205db0c674f042d48e292741108141c90c49b094f40794d22b25848e9127a5942681cde9dd4c9aaa8a4a2d0250e2ba7451e1105688708b92ea3b9b88b84dcbf4e6f5acc85688f104ca900bc508cd0f947655a6a53025c55968e224ecce2817aa6afd81524ae9ccbbb3d65a6b989f396c12302d8827ae89524e8653b6eb961eb5d65a2b0a4437b2502ab7d6cd87851d1860ee5d159098db14d9dca6a8a6b4a4222e4906d64ca194524ae913d4b6518f4b612209211f28c2a4c4820ea62270545de1c2820c4308d1690f688c403242c908a6a91d39b9a42c51e8a552596badb55166648f769bac6c535cb2b444a9778b962c4a29a59452ea5d4aa4db81d25007025c2903a845954ae846dc45f33d711dae97a51db4542f3e6aa626a5b5a10e5ad9f15871b7ac96ed90cdc95f7c32aa7f32a64fddbf11414e6cbd4bf4cf40fa6856019afddedea5ad698312e2ec6b9eb5f53ffbdff6f479dedb87e1db90fe915fc7fe1b0f8f9eb9bfc1a3e7dadddfd7ef9658e87bee65c54e4303db3737541eef7b6d69f14823db9ef9be77dd9ef278adc442d5e57fed0fb90552b4e2b839e5fb9ae68301ad87fe03349f0ca403340a6ede621bff6bed8f333c5b41ebeb8f9789c87b6803388b5bfc5ee21deffdafcb2a1e2be8af5fbf3f0bfaebdfa5ada97feadb9a65bd3f6e4ed9bf25b6be20db7af0b3c06be4134efdfb7e73ec7fd2df93344899b5f5ef53e3d1bbfa39f771b07ceabaadcb47b3276ab9e65aebcffc89b57a2dbc0d6916f76178b318b7a65cdf7e41ade7fe8afd30e48d08be4bfcbe256ad9461c3dfa3562eb2ef98a0ca48fcb2406341f7711d17c7f41ee32e92365d269d07c240d5a0f7d1ad2885cffa8cd72e3b0bb87d85996f7fbeef7f7fb9ae6bb78bc4cdb5bbc0d192fd3f7b58b94499f269234683e6d43eba163cf3c9f923e19c81efa3e95edd7ff4415318433b459ee6f7dfdaef5de8f9729770f8a0ed00af0fd6d61f06ddbd07c34ad67d4a8f2f7f49b48fa3c40ebf99e6a53f9c3e30c6dd6b34c3f83efe96f43ec83781bf261a67c97b621ddd74f46b739719ba8656bb76db3e2b6d96d889cca9a0a1a36ad03420e58f9b316c545d6c529b7c85a540e48642e4f7900598bc2a1062c7b99e737ac3caba8f201b21655b594a3bc64e515598bf22295a3aa887297db99e499b5ac4cc3d7aef6b7f3bcd19930c07be86fb8bd9964c7123ff82d80cbdbf705e16c52dbbadf99f80bf9fdde27a35dec3e1912c390373f5e9ba7f9f1d55911e7ffe66d9ee6bb4e8a639865bbecf7ed5d0eb915855abf4196d9c477dd937cdb6ec2f6c66381c5a3e7d6f33893dcdb86ddbb37c742e0b7de691e1b8779ab3434d820f74bbcc99103791bebbb2884f31be416e75781cc76abefdbbbcbf2587105641e1b86cfcf82d133ce6faff90b13ee42ffb867fba3c4a333c9f5873cf753297f9ce175edbd5e2fa9c4799cdfeccd6fdf5f035afd126fa237de69bdc3903df477fe73d554b07d5bb07d43f54fbfb40f435acf3dfee84cb23f8ed8fa8278beb1503fce833cfe30a40f06fee3cd39af798eceefe0f17e7ddfe4267fe7fb82b61792fd3cef6dff2eb77e9efa9fecdfd1c11d164873abe696a865b07b6650ecdc3850797bfb05e1bcfd2af69b10bddfc4da3d8ea8e5578e387a388dfbc957d4f7f90fdfe0f1bab00d1ec35c83c7170d0ebbb1e53e0cbb2d16c2d685fe99db6bafb9773a2a9c844333b71efcaaa3c24938c3cb2d2cb48d33bcaebd06d8073db1e5dfdfdf02b8dfc26f917b1be29dfbd20703d943fff51a43fbe3cd0ca0db57d9336a5832b558450ce1d0ccfde3d0f466ccf0b2fdefc74692bdaf5f50bf90c5ada79f05e0d3c7c03eeded5bef81781c9a797b6af138c3cbdb7f786c24da4ccbdebb0bfda365efe727c363923b7172b4e63b6a20730e8e1bc8dc73af7d15d0cce55b41972d76179a45c5af36b1035f4089cad3d58e259a98e025f428e3c3c7123ab03c41e118d59051b94108a5301630812025440d284e5450f4925a6bad4348c91eedd66ac1d4a06086ab3f62b56a018340d9c040555f4821a2d75a1b046c811a23eb8e64c9a1481120c014c1c2820d49367a5825f1b8d21213809217c86411720584294900c29c7003075398964899c1462771725dc97ea294d213c68027182142adb5825181524a65de75a10aa55428b53f504a294cf66857de78a1a40a29072f3d6445c580d9a831039923a526537440a2c1b25ca194a6bb29d55a2b53d19db56ea9b5d62bd9a3dd3b8d92e46ac4e40321b5d6dac191f768484a4c4e4872074c3428d9a35d9867577049b231f5a506335958b852c40d386e08d254821258b0a03822a49200452593cad4a433f2ca8380d578750cb079ab84a1924fe12b74bdc22885610a83545f21e581684ac998a4a829e302cc09122638ed3013210a0e48217c3045445663c45a6bc9506aadb5d6ce32665c567aa9899524513310488831be6a909262a4044bc7992c36b020442241e930699826a59b169c6d470a1a45ae1869da224545891396b6f4804415148c5445d991e47ad2716126bb902065958493952c2560588218e198f56043ca0c52dc0c565464eb011a96709882dd4c4d5dcdf015ba5e445a951ba8abbbe586a67e020585e886841b143d68f08186282eb10e440da04a15a3205bac24f1b485a888e431a12aeacc0a644aa082c0142c3123a278f8414915aca95ea1a905a5182d0448d97e8e6f2fb190fc9ccdf27c8dd461b713d85a6f7b1ebc8375b0e7c927b4e6504a29a50ca584e5962f11fc166bf7393912fb0ca7fb564beab8577efd641886fd0d798847b7923a9a456bc85b305ab43a9faff8ec07c33eb5b64a1d52870e241d9a897e2921d33dc980f349cb72bdf19ae6e79c13dbfb17dbc6f3fb8272fefef417366a3ef13e19f68af393d158ebfc79449a1773eef3e8ec9cb08fe2fd530088c7b0f1f89a78fc6cf11841ae134b1a1a80e04fd97c508a42dd6f90a7aba77df9536641d75b13734e5727c7f9245b14f8041e3f891d3cbecae8e031cc638bc983e79cd83e288e2d7ffe38ed9cf2bb9f3f5a8bc33c9d860676ba9de39ccaa3ff1585727e836ce74fd0ffcebf580a7407e77c92fd578826685ae4feee9fdf3b3afdda96f6602a09a9323fe49e20d71f35302d644ffd51f3c2f3a3d6a573dee6e8ec34a06b3ccab778c7f5f7fd1b76e2b730c57f2df15f15ccb76fff8229eb1f0b82f3c9389fe4fb395dbfc542f673e6bcaf17f11841eb1f8f9f028faff051e013d8be8f276effe83578f4f91f88ef7b1d75bdd7fcd6fc6f7e38ed9ff0df3c6f92912e97a86569adfd99a568ad2bc7df4e59b66fbfa09cefdec5f930248f48f33ba2cdeb88356f42747d4eab61a19833f105bea245fdf9e31d5ff2fdc708f2d8a22d8e156fbebf20497313c14b3cfe6b7c551c51eb5274d30d7654ffd82ed72ac47a91c9fd79fea821917b6c0f7263eb710f7e3f28ef37085e1c501c87e48ddcd9def5f63190358208fe5faff0be2540067f9c41801c7eeb47ad4b76c90cfe3824736823d688f3be7d1a51d30ad0e00b8517fc86499fd67afaeb6bda0b4d5b31ae58223e9968a2c9fd632fe5ba1120df6fe151eb92ed8f1dcbf56754ff80f7bff7fe821955e7139a3751ab79bb79bb7846350b5bb150cb783a85931c078a02a47433cbf43b9f01c5fa67fbee5baf97fbc5637fdf02b82fa8e6b79ffea236100f20257ff1c9a8dc27a3dd25b6ac58f336e2f6b5d298eca95f6dad3455a4318b63de759eb540310bd4e489429f6663adf74031afdab8807878fffd98e3c239404ab9befb67bdeaee2ebddb6f033509d5fc06b9fabbfbfded8162404b0dc3bde66d7e0452baf70229e5fbb3df7ebf7f77dbfef17ed71e85bacadddd355ff3f786e1f8e2a6648f6c406317f1cef7dbdfb00934a1f338eedbf703c5dada71c84720a5bc7d8df805e5d47ccb31108f66b149295feae0f16542ca07e291b3f9fdeec12fc85fa8f1f89fb7ecfa39f7c3a67ef9403cfac771f0f8bac16348413c5ee90145f13ecff3be1a0cc403281654f3f6afe80f43bac4d6db88dfd7d4c81f295496cf4d499fb048f6c81fef186edc5496dccdad9722971516499fb049f6d07fbdc2d07191bd44d93d201e4a597adcfb8f1e16e2ba391a0e88c7c86565fa6193f4a150b287feeb15363565fa403ca852eef71efcfe7af87b4b63799c52997e4bd47c8587c719548942794f1f88c7878178bc5e1e85f2460a05c40348c9721b108f9b194ea32892a2a792c4f25e5e492cc7923d14bb53ffbe209d973ffd856b79d3b263f98b4d86cb0d0389f3893affc2013f6f693cfedfcbabcf250f16b29f14e33c9bfdd696e2c329f0e8edfd8dfd65e39658c87eebddb1acfe5bdda419219ddf20fbb76d3e935732cb379d6f9e1f259698fe91433e6e789458d9b7dff038c31b6748ba49ba49bab5cd8f8e65b3d57b35cd67d379d7f15e3f5e1cd9f2b7f61defb45e6280e24dfce7880fba56e45173b760e62faefac77ffb5cd4f228b1e4eb7c29deb190bf4e0a29a659c8c740fab438f1e3cd3bafb9cfce8bf81f8fe1a3c0e37514d2db708af75a6cfe29b03f8a1318ccd9fceb9b984a56d4b2bb75ff99ade8ded2717995fbed17a4e3cf237adf9fd8bd8ea8e51d71fc3c6a331dc73e6b217be89bc02f3cde1c3c8638780c3388c7571e5bbc3e8f2d7e451e5bacf846cfca8ec3eeeeefa518c7ca8e6bcbdf7ba7b98585beb7bf79bd6ddbe67ddb776f18be341f4d93628668e63ec43bb785f4c1186b00f8f1f18f2f153f86103c0070c3640ffdf121b8574518be3431e08f6e952da6f46b981423c558e5fab0d76b0cb59e21ea3329466249317dc24f18b1bd75d99ce454d781ed5de414dbbdf7ba90d77bb76ddeeae190bbe89fede7fdeec7ee3d0f8f12e4f95cc5439e857aecf090e7e65af4cffd6d8e437e29c52ebafb2d2e752181bfb89f8c4bbbefba908606f4b5164dc7afa21047698bfea15f27c863cd5faf2d1794a64583479720df1a3ce4b972cd6117cde23ebd7887d2fb2dfaa7ff764b90479a9f3f4a90b7ef44ef0beabeb1d0f62e461adc715f107d211a3cf6f659e065fa2efa87e2d102f793b1695695e3442dd75a39ef1641dddf9f627fe83d7d17fdf3d99dfa5abef439fc7d78a7fef6d345ffd09fafc371de61eedb057deeb7d75cf48f04998ab4666b73a5d845b398345addb22273b90c36f579b5524f1402bb5ae97b5e07c23a87ddae8617fcf6c0223f6af797e300dffbe92fba7a91e3709807fe75d1d0a0df2bf223cf95c2967b911f21e5b17a341f768ea37fbcaf2da7719c0ec761e1f5683ea4f9fa63ad78277cef29cdc3f0096a6cbcc85f785fdf8fbace619dc3b2f7a00b74798f73e3ddd3ef5ccfb9cb06d7e0f1ba8c26a4a1a10941ec38fc086c818ea3f33cfcf23ad08b7c05fdeb7d77c357dfff3ebc63bf5ffa4c30410e3dfce2eb95bda7ef4de0fd749ff1caf527c8f31d87e3f022c7e130c7a1b95eb0f166d24713b4af71b556fa9c28d4fa6aa5cf71b4b653ee691f653aabd7b5576b733ec31b27f75dd43ff5b971c88fc6e9e1d6b7002e7b3ff805b5fefbe92fbcab76f47d32faab62eb359befbaeefbb42eeaff629d28e4fd06b93dfcb07b1775b05cbf100b75efd518f5cff7357477d5e0a389bfeebd7eaf7ef7d227acdf45f5fb8e8e32f72df1fb826c5aefd97035ed35dd17e436cfb9db7c35789cdb27836b269dd77562b71765fffb05b5be3e15fb43917b17e9d64d8f528de3707f7dae5b40fdee6bf5f7e30585eee8e5ee7e830fc341bc639f7bfae1fcf9d5a7dcdf9fb58dbae7b86fa3fed132f7f593c1f5d176ad15b57cefbd77665b711b350b3a69ba4607291dc9b6558b1caefb9ed9b8cb6fbcf52e251e1be7475943f3e13b9652c7a596e505fda5ad5cfaebbb0ea4a14183a2d0f71b6487c05af77e503ed8570d01f85f5a8594108010b455ee97e37edbeeeeee2edb6b67595148c741f70b7ad63fde57b2a78e8a1fdb0af4b6ca524a97524a2aa594d629d8f9d166aff9c44f8d0214295ea3e0dfbf350a44dc5f1aef40f00f0077ffee9904c1b602db2adbd7f9a4ce03e04db40c8095eeeeeeeeb8ca1702adfc97fda50b0000bace913c082010b5ec791e04fda567d95588dd63b1e64591e65f0c3f8588f328c4d69f106f9e4774fd8e68f3cebd8ece05a1093cde9cd6ae72e7b4c542dbfb777f7110ec1e043b2ce46f1ff41af48ec0717f691639dfdbe784a1dbaf2d882373ff0dcdfdae9afb6d6ceeafa1115b2b407f28b6c4a915a0fd2dedaff697feb11eee2f17f497d956eedbe6fd2502b145d3c0fecae166f360dd4c3e859a371d6796de5ebbd65575337fe1d2a5e87df749d97501b596949fb76ebaaa16687363d3cdb283dfe9a87022bb08949686065ebdc312f2a4bbd675b19d957bbffdd8cdbaaadbcd727dffdefc6bdc7f9be15d0771578577c0ae8b4bd9cdbc9b65fb9ebcf16ec4b1b3a5914bb287be4d6ffe388e6fdedb70e492dfb86c96acf5601f088a5afebe0ff47057d575f15eb66af79e9769629ed630eec3b18b3cc88db9aeaae58f5de481ef9e07765735cedc450df33c267deef558a6df557ddfb21ed590c8e30c0d89bc15e5fab7abeaaaac87c7d95559dbcdba2af954d3391e4849cdd8844da4ab5918be5ef7ce249fb0993463523e99266c224da5993463d9af86e638ab9369c226d2549ab1993463b3abd9d09c49133691666cc666920fcd99d44c9a481366b92fec6e4ac399e4574373680ecdd94c9a493d9b49336968ce266c26cdd84c62b2b57c2291662dcff32412957293d64a980e89f4de2743daae7331e7b51b1bb756da25a1f0b356c2fcc44bebd2c4166e2f0fff2591b27c4da3c1a194fd58e8fbf0fb25b630099330b964a5f75b19db24920d7ffc90727d6ff30ef19067c74272fcf090e7d6b44d7e7bf815cadfbeb7782784217d1ec755f33c128f52dadf5ec2fa47fe865d8a5a76399e0897f3a8644c783639c99082040000400053170000300c080744a389a0474af70114800d63a2545c4e308e062391388a62188801180441100000008061188661404a92055b9a951d830fa3a83458c71bd5c0c43f435f519d108bc46d9abd239fe526cfd40ce0296c0856ae65d6010c251deb5ae1c949e65934ae09994c885e6fe000be4b21234fa64c185b4858b3f9d19041316aabe9cf1966d59035c63a3261e5b436262f014bd187d40ecf432910000f0fa635f86b10d71688b47f3f839388d20816f51c750a8f34b35f110109339699b585b8561358a2134323cf9d0ca2ddec8d0b3b68901850ff1e9d3ff052fb2c7716dd5bd53e53999eb053b3e7e77a1dd11de7edd9709015755b050a157b04139bf4799dcc5b0ef5ff113cd56287b543742a8ffa7440700eca93e43f881a9f2ed84a6023cb8d8f3f98a3958da91ea902ee59123b96863a0a07180f511f0c6dc90756ca8d0ab4d43a59b399075d55f4e95a4747f392b84fd8faa30533dbb7526f067029686c856f6b68bb27c67c95e4ef507ab7aab8dc6e4449772db624ddb315582349fa8c658bf72427de864e34bd89d6054a9e4f4a4cd5994561c778444786148607886a443fdf390deec2675eba2ce034629da0bf3971641447c226c781b210c0a75c19a738169add0e70f9456382f203c5320141228e48fc5052086f4e10df40ca87ee62bdc8fb42e013ca0c1a7523846ac426a4207f3cec26086962925675cd4909d420fe4702f8be502d7e4d2f08928784c1d388000af7bbb3a362cad5d9c5288277cf1e858e40a942d0b64a04f7b62c14b7511df9f31237d4cc535c118c9f3429ded0fe18c7445b8e293d250e52f009f97b218343d612bbee55855142db44b64a95285635aadf9490426a9c75a0e2f97f383f359665249b60ff00f1646d16764c11fd79088406fc316ad0092b71cb939592cff647742fdbbf213bd2f97b848e9156e4571e4be41e9d31beec0115222121da03633e0bd4e01b34653315c720747bca355ad4445e22b6a12728ad4ba510bc6c9dc04cea397f9ca0ed7475d8fe828618c9243a6e9b3f7ccd09cd3cc797f13927479b727d32bbbc26908a960e2bbc6d7cedcb70884ebf23306df485256e68e1224e128db4b6ac95a966388b3a8e119c166ea2742804c700da89ca88332aca231b3cf233b927500ba46ea4c6e62551f86ad69eef0632c8186c122c82ac7f4c8307b145081af96eb710dff0df45a021bd644f633922ab7264570b22c9fc24979e954f3878958a0cde62ceaa28243c7ce0fe60bccaed6d6b5d8351a667e20eafbcab56b7cc00f79fb5cce57af57282156e6852b557d6bbad9d1c6694faa48b72751d3a44db8c87f355b4016e00cd82ebf4ee1832d8ccf5290bb1ec76d622bdad3656ba97220192e500d76e53ff85919f0e99ad250e4905d2704bb3f92b4d780e406a6014cc29088d87484408d12872bf53933f710f9fab58d3b9a9e2e204aa51aae68f6eef2ba38a18d65816b6ee24c319c947f8fd30b55cd2ff983819c1d9877860e7c9c918c9b72d89d206bbf82d19abf729dc5d1841710e05b0538180155a415743671006055e330450ebbfab81b8884603b730b40f4dc51744651c8054401afb0f244f89274e8fed38a1b3822a13c0f64a56d091144b53ac86e36f16e386f1fb461f9a8fc79874da8ddb6eac0a24dd0b54b293dc02600821c0a79508448e412284143c9f3ecc09dd162e0f60a45a99c9956e39e64e14014421c1f57f713fc0ad0a545ece43615325d64ae77024ff40d4e0a2a7d26d5c3465661f798eea55d7f35c34b0b5bb819b40b19b7ef64327df5002c34dda17e604618ea74eee309060151cbcac602c0d965e60f6a4cbea1593d42d519d9b9cdf9b55da3c6e159b933a371726b69955d9eb6a86d014d44076289ebe9217e91de2ab4ba45fc57c46de206dfbb80a92dbaf51a97ae71ba52e1aa45502b2d239fe9256f2e9df84d94374fc3f2d90beefaa183e69f3779c97855f55d85a87d1248e03186c015588c615a5201449a3c1d2f16507925db8e418c1a4238378cde5732894e9c188e5e17a4d153221f18393345fb10f18b7ae80a96ac11cb6f0fb7fedd5b6e95c6efc00d37f29482be6dae947e5b0afbac844909da935a12d4a55aa8bc3fce390e3fcec8aad74fb4397659aa867d57ba36aac2c5d655727358420941a79ffc05914bada80e0da82b16cd8f0532b303e6ffbeae1dc15d910009890be94d5f634caa6a6d39647b25680a6f66d0d8df1dc17bc21b4ee45306bef8769439e6817d17874d97545ea1008567770a865dc96528a8bae2174542a35c7d77070b745092f610c3664e4a132b3aba6785b7cb3687cbee8c7c95f6143c4daec954b9ed60547f91e366830ebcc6266e3440b3beac8ddddfa47e7314ba8de1e20e50c4d0563d3b8076a03189c89d88163de386296e0549b1d2829b70d82de97283c771bb6308553ec5a656c27fdb19bb43a707ec9f1646f4fad00fc36f06c5afc3607e321dbd82a92cb914db88dfcaf1f2fd0f5af73aa18582698508c3264485faf0260d321dfc7a385727c1d4666f0a1949cd54f23191a5867b7ddde89fad812e748a5237db5af739923ad124be84c41dc56e73ed3f7907aa145e6ed026578be43e80dd00e5a08bd019e035bff2ff931192285a270ea69f6cc8b18522dcca0b3730b072a1800608690bdd273a7ad592c15599b8ea2bd6e789c0dd7b29c5dc524d57ece21e572b5f86b283ec14250b933ba55554752c3457ae92cd20de1ab966b9179660929e6ac5a6e9480ed4ab7666d9394f895336923294f8a861af7cd5da63910800f57af40e5c71e12e012edecea662b53304458abde6f30f5120076a4dd5692a4242262c3a912cab6ab57759c48732f50b11e214cff83f2aab8acda1e2995db1373d46d2a0429c8f565b3c76ed351700f82e8fed141d9bac10819b0252c1a08c4a0ba33d4a2b4f6947df8cd9bb9ea0a17bf4fb3d7679fe06aee2036249eb4d6da24dd2b8378ccb0b0c313d2697b22cb09b8410d858c344f6236171171e42e576fbc138e2970ff24a91c323d9970ccdb739f77c7458e8ded3511dc16c8433e2514852549e86dc558b173010a22aedb91faed1d3f0adb0b8e1ad2e6c0336231c74dffc8cb901732b717f56e7d378bd4944bdc4db863c874cba4d905601a921d4ef8b11a7896dc818c4239219a0308ae1c1320d91512a457d7c5ffca75d0b993c6ebf652a2ef94c8501732a586f1d5c577fe4c86506c0ef65fc6494d1bafa744e5cc4901e1d830a60fc3f984987e172ba1098572cdfa10df5c657502f5b8aa240688293e0ab1a0bdb214b9eec5f58fbe90d21df7afaa6a890c3768a69cce400e2c9f34104c65ff3524190343c5d6747f36a7281c9131b45b8aefd1bc774d517a65201c7495aa5e2059385016a38dc1cc380a5a643bafde53001fcd5a430c00609d8cd41ee4a0887278e1538b6a5cdb29dfc186e184669885622b35ab38065d89301a128a5c5094dc4bd751ad38d6692ab56a5483edd454a9b899b984eef2af350cf478dcf00d6abe952c8f218e1d1a09e01afe9cb35160d5f3d90f10c1826a5f2ab6b18b0da36f68aeb0ad876cd85f84b10e058dfc6cd8b2bbfe337495c3c5015c6b43a42a27da75fa5b5fc46210845257899f17d136b40f71090ef6ed8c5d40a1b6428d04a9d8125295482657126aa4312afb4ffcbd18d3bf82eafb32b3325066ebd143b10a5dfbbbd2ac24ace376f08e61cd8d8d0f66e7f43b078a95a66d456490e2892c5aa7d72ae41663c86e1a10b92790f2aec269836b908611a12859565c3622fa1fe4a5b3ae9bb33e12a614b416a29c8721146956fd428750ee87d18b6d0eb98b50703c402218f34384088756f8ea6288ef8820a97badf2608c16ee635dcc75beb60004da022df51525230abfb3e78824b1c36c0b66cf52ecf24f00699185e7084b6a021fde299c614cb3fcca6152009e28be5bd2421ba315cbf14a29db55bd812108c2ce9acd16095e601ae3b06759e689dd386885b061cf3a398f6955f1725e80619288b38cc6c1ec527186be118addde241b2565411e70e1f848073581d97332a2d18d71d4c85fb8b0fc55271d5f097e12311df5ba4e3e0864f456c63ee6bc31c5199784ca619b4c02e1b683854265147aecdd8c1c17da1f625141c95ef6dc081b03e87a16605a94a44e20138fdb36d436aad9c3a4243d6e0588e7ea3b18ccb189c1c20952caa4243f56969af0038b537299812b33e2f9276d1f6c6a314cf62ed8cc3588e77c7fb2e13513372ea028758dba01960df3ebea95f3c25f34137a171a29084810e72605a6d0c1088ff51187e049a6a1156ae6f5b3d10a96122fc73569aa68b4a204cea51b70ceeb1f576d16e03e6fa0f89bd78c160942d409243fc8a163879a49841460df02d2b9400a7450c1f3200af33906c507dc480405dd06d1baea94bb48feb1f63db485a6b417bc2b4d6ea06eeeb43283b6598ffeb056a8846ed75b91f3f8a6d0d07d7c49e09223ec834c3b663e81d58bc21be984dc19aa9d9b555c60a22f03c99f7d69887a660e69af6a55e361487b89ef569fa352050ddd78ea7d89a3ef03db907c8590ae04a9ca9cb0c29cb96f9369afec1df08538f26029764f2d66e6cfd9569a7b7ebe82856c96b281dad9166d488c4d0997973ade77bacf4fc0d6dd5b0310b499942e029c974e3823442fcd942bf0ccd6544117b133a1ed4d5ec2377f69bd605ff55fdaf066b2e9590bf0a488bd51e815905813e8e4c4b47b79ec136edf6600376f2e3e84e7fc53b6c91e08873b1dc1289d8f29f0d39331394c4043e043b1c74a9680d69d64bfa2fe528a4c48be980d641c200c8bcba341376f2c28bc7f5a23c827a8d98a6ed4a06f48f14feb83be50d3a0e273b14cee35e82a715ce0ce0d1098ab5394bd1663b113e430a6b725df8523235b13a00ac3e331afff1a809e20e488db47c3a279169838c8c9bf7916e5a9c9ed37b8398bd8cc33db26de6ddec47b224b67701527b2620cc70dbf13f1848b200774da4a518d2a0234b29827e3b8e59c9399b30a91d8303a522cc7fbd389293bf138353dd0a8fc5b76737b079d68a4d92dd0257ca7227c589fcdd88f2174752c8387ed2da98b9b517ce48e508d961ded8c102391ab63c28a44925db77fae4e8a9067a657ec29505b8b605a3cf62e5d4ceebdd8d3cb25abebfa941eee04feb8fa3dcd87901c78e7d2d63ad2286d771af295dd3f11c548f153b713a8caeb1e541620b6d7c4de02d739e35f17b7296aa5a91c129692f9c7ac6575f74683683eaa2caa4518252734b10bf71290e69830a441ad42d8580bd39b3012086bce6039f351030c4cff75b5a4aa8dacbc578af5a2034775caf36d0e47d7147d1c6bb4f9c774dcd3728208f7f3795b0bac80f9ba0ddd51f49928fe792abc70d187933959a7b92860c711bf4a5c04d6acb733f73e0a582d49d91dd9e66c97be1700099c4b319e35d907f0f941236176b51a2b616ad7562e9612eb12074b1d012c0aebaf45ae315f851b4475b900c3ca1e48cff51d0dd425a16ff6d48c688f276e831da442d87083bea9479ef9c578a8dee5d1d4e2990828ff5d41f52f0677de8c05e349c31930a70a864dca3a5d3d6641c3c9f92c1530290426c532d516ae8132ba84ab2f0d70ac6bbac7062cdf46040766f7b44c5e79440ecfcdf8de69fe4c3bda5c50b10b11506d34b5d952573e53045509860d576c98726194520d35aeae24f50161c15137df78f585e50c327ae2a635ef915bddddeff081ddbeafe3bc9173247877a5b668a0945916d7cbc2030967aeddabefcc1cdbc3f2d8a83a3942568137c898c4a85c80de9423ce6094bf4a8a888cdc2b2698210f66240515c140945d61f512b8962169cb03edfdb02e68e88b2f13304d6e984187a85395974025884cd322389d74d0e4ad9362ebff1e8cf4739de9e40a8c50eb267c8f3a2108798da51994dc9ae901e0b633254a08194b5605d3b4906cd364a453a7b0a7505131e80c15045d749b5c684ccf193a075c714793f70435fb246df6e0c671409f30fbf8b85936b3610db36ce785de7db9c9525f6104efdbb3a0ad2fa874e18d5385236f12c4332e1a79d4decabb9d33aed812d4c746a368bd8d86332b86cf28329bcc11208f538b4352681901717db1a07ee6cf012bfac08dba7ab5dd68dc452fd9e65f2622e78333e1de4f6f38be9a07a0ef856801c2afa9e885b878d82b048d5e264752ae4a46b6b7fccef1bf26ef579dae15ca26842bde303ed32bc446b505cd2b0037e692b9abf72df86d447c1e724408a8aa9b538847b3b1ccb51a714d61f4ac056f4903cf18069d50bffc860f15d5116ba2dff5713a2c681177e46b33c89c40af346e4d79404d8921bb9ae46e7e87d9f7e9ba9b10ac98c7adea1cf17ae2d2adf46e5bc5b0cc3c7c275a9047a1cf8ba1bab6d5c65ae785a440ce5cb1f6771ed9920dbdd1af051bfeb53c71255b3082aa15bcc478d2f42f5ad66375b341548fc06ba1d171bb0c98dd05519f5ef5cfce838afab37497c87628d0f945ca91b87e10e4332cdd6c1a5fd92b51de0f7efd7e907b1478be801306308aeccc604a4155b664ab60183d2c31c0e2cf9eed4309dabb0ab287184332fb8f71577b1485cbd38eed237711c25da404384851570c07fcc037943b6181a503ff1c4ed3fb06ae017a9c6d5d42f5df0364f56d7fa19d782cccf5d981b594be73783de73ebf96e61eac7f7929b4550fc8cc8d6e1a246ec03895d522a55db84ee9c6aebd365fef7e2e35d456e899aff783faf5c455ea659d709a72c6ac5a4bdffe9066b0217d9a2bad3a7f415a4e926021ea2566ba86d89c5c8fcb41dcaa714680753b18a2c41893a36359df9a7dda218a7d04be48bc8f442202a268508d803494fb6fda9a38d746cda2b78de1455beb1efe410f5aad4c0a39aacc5654d3cc522954a98e45205f16e5c06785e1e841c98d87646b8d9826aac79da5a6f805c295b2d2eff7ffda93f343ac4f63f2549ed4b4fa5b65d3112312562c11287173d012270b7c48840a35f7cffcc76a1383710336d0b23df3f8ef7d9839d567fd719eb950b1e9e2f52b762019708dcd10d18515930c17a7f5083508651dd0d519925d868a4da440353ecbb970157c6c007309cc42b3481836e09d41ed406739737aab3e6aab29b42d9d4ad8ca0a094e75167b496756f3d2244c82428bb0d48b2668458799a3b2079132eba21631195c7892d521d4e5d80bcfb202c5ecd4d3eea3658556d734eb60084a4b2e442e8d1f55a5f0b44e7674455d66bbea0259b0bb4cf5a185f6c9c7990ff8564cdf96b162e5f33dd2d1a17c959547263e9b293cd97b0eb902a904a5790eb224568be5366bc5332fe9e5f46cdc0c1c761346fbd1dbb955558967134f4a00be14865d372bd1a13df591b3454a203c44b10eec649225ec91c7c417d6761b8a4367d48df0f5b5ecef25e0693178bc44cc5ea7e7fac80ae44b2e89858218473065fcc9c8bdad3056d62e4f49bd8966959a4ed7aafdaebe1cdf785a8322b7053de309c8da9ccd7c539fb800eed63590da900d4e2db922b2624af32750f52281512496651caf8c7f64494d533fd15ac24cffd42e47125e6d534cdc2b8c9917f05dbe75498893926f0b3772b9e517c5372dbf11959c6dd6b22603da6a05f7852dcb5cdee281da7510558ed2fce89e22b45bffe941fee8d6b5306181d237e8085ab8e6b314dac6e3e3d168dc22d5b97d42072e1984af87e463ad55baf340665a0805b322038dc8c6f76c0cc3d0de5e8d281d447e3f5fa2b90338fb7b9f59e2d9330cf524b1c391449a4b51bc981e9e39c6503ca71a316e3c5b86b64e7b65c2f03cd0e42ba076ab613147fbbb2dd2f27b422f5672d3e1947d31cc57e6d5640fadc06246195fea2d7a5b2a88241936a16bbf7770a1e3d1155f2d8ad656792f7ae59d3c8768bf57b4dcc5e47cb4b507d83c16487d3234c7f2c7d7cf6379de5a98997f3af0598b81d6b1b938a1ee0839d93b79f3d89ea4b01081c84f0d588d2652157cf1095c60dc4c66b829caadb4bf702059cf983594901e103639d261f90423b84120879a8530b31208f6d52e59a5a84ad122ea650619f9b639b160c5a0b5c21d9eb05749d969061483fe84e8bfe2582ebdc9b8b0c2554fc9df1ca75a0ce113e14adbce401aac63a0d6c6d1433b9d7fde5280de3d3e21d575183506e834c6e79ca2aa3330223785b6534db30a2ee1ee6ae324eb3152f20a565f40908b0af6d19dfef1d0756ad82f9e46aa3a44ffd636bbd475a2da66ee94f967e6bcd79c9d229d09b0531dcefac16d74c34204a8ecb279e4c8cdc4036f523b9714781ff64bf19d8115fea6fb0e1ffa628d6020511c610bf2841dc61a287687d09b732ef0993a8ac6cf14e2adb879745ca8a0b470910a5a03a15089219e3f5c06b2dbaea5f07900b36a13fff98dfeab9d4d9bfd4ebcc123eb3b1ce60248a4481616bc28bc04b7880d4e59c1d0b97bff5935cb6fd5b82df38eeaac2027809fb0a680a416183e9c95c742982a981f3543e12cba2e62a99d63e0b7d3cef62fc31683c93a21b9a6dfb97ff20d30358e4d707c4534d1420d3d929a5cbb0c7ba36aac2c5d42d1c50b65d851b88fa4f4fa7d7619ea7845074692ff6f82dd08d639d4721d3437a1337d4ef40db35a9f1c3382d1529f21403215f6f499fbf8faf3375dcf8c91695fed15fa2dcf8db234d49c6c50db3777a047f86521b3c74a56d9b529af0e95016183cca3478fb43b2a697235644a43a1f85b75e167af1d0208ab1167a7f691f655f68b6b3ee26838eb14bd2c4d9c868e209cfa959b957b4e66d2ef4734b4df7b0d1ff532e3e54e1857bc7c4f93a706696fe80c1a54a53d138fc8f1e817016931575bda830a486752c30549c05ad2df119565b13432979ff453d49967de88d199f870c8cc525d6cf2d13687f1de9d49dd72714933094f8a1c50fba29a9c07e3880b1524cb39ecdd36d4d51b22379a5845006b974d11c498be5a7859691bb4dced537ca421cac112f52dfcefca752c6df32a024b620bacf5102d1047882eab76e2d0bd18bc98175bd6a44d6347c0b54e9aae6cb409eb471433a18e78d7f8fa6335a13d551f9df9145e49d67c205366e61f4eea1332ee8c4b072234231da32e6c90701f69a5b09eaf59a7fd30adfcd6cd08fe7c98b2784facb84d47bb379a55c8173d3be159abcd093ab8f6f7dc36df4a79dca9c552830f095320ee84fd556a03b85e0a298e57f6813dda4ee4ab5dccc9066bd79529eb166f2c2b1a21e4cf45b1c54b6fd2dd86a225bd982525ee8f9676da662158d2cd34cef86ee0d6a3fb1b4b4edfa85d2eb82ef786bc47522e96eca51491cd63bf55717ec666c7f38a1d50503d903a092e99aa7e4537ba3ff7b11c5cd8b824e8f5a68f218703cae2de0e70bd2f2d15d86650323d96847904d5f79b89b2871de34e2a336270748b19471bcaf53f074d5614428d89da3495f28943d3a427164d054e6ee65cfa757d446ea01cb274a8f6c69dd9485a1d69374d6af08900decb78df9ba5355969aa169899c44ad35e468b1b0ca801012221970f03732aae852b3c2a38a8cc8f354a44a8cd5f073f5fca6e5603e65eceda0d372741cd6e7398124c9eeb898b95baf93eb59436b24d58f1a5642a7a2d348afc0bfe41373f4ced8cdb921b65d3d5cdaaeaaafcac1975af56d11404818c15af37c6c02c1cb48bb3220a2d8b391791dc55e14d06e56928f9675fc6f39ac5f903294997226a1ff5271c72e2d134c995384b5505f3cad0a976b397090351d64722e26ffcc67ffe4bf753942ba996aea9a331abcf05969a61f49394e0dac01987b77492a2fc91638c992d10b4242f7d38348c3d1a706be043cedc7b59eab6e88738ab9017efb6b4e44735cccd7c70ffbd6ba84fc3f0fb87e32056a5e600bb5663e6f526fe84fcc232c7d656e5d24f9bb476350882690bd9ee097f6f9d3bca8c2b4610c5cea129124726dc9c278a38a60f045037419bb9cfef1cb8ce436834c08b82ccd08db94a13fee34c5a5081b880386e09d2a28f23127a3ed0c67677b1f2836b2489257843c622535ccc95d46c4f7c2df817d8f120a7024375e53113345e627171a9d574c8e56db5f674d018c9fcf111c3e440fad5b68201f916126b615301994c805cc549c712fb5ed87968b83442c634390dc5d249f0d8de92e5e259986d813e88b2c4a0a67caa300d1289ce9d2a5ec9427b42d4456650c40734fc46c1eafc0addb91fd5844e31e545f1eb4131f765a1d0bab4e1c0f775cfe14c626d05d51800fc54089b493e796f7da6ac11ea4d7222b30406d3bac153d06426644cff0bf78a12ffa9c0b17c0b2f139fb6471ac72ab9f7ece8b2c304c7f6ee9695fd06afbe2e5df51a6f70b12ac11fea967f3e980cceded8b51cf01beec5576d5a7c21a132e8cbdcd426773a1d0f0c6bb8eccb19086b5d9e92c7f1f67a749df395544632184618a432c02ff96985d5cd16327988791432cd0c779099021dfbd2ce2d111b276dce544d7953583456410beb5ab537e9ccfda272fdb66268c805afe9bccffe4f0fc3944803c21c8b5c56d943aa0af9d8f6f48bbb9a34e598750fd563ba644dcf8496f70d3cf6e9cad4fdcf4a7d711e9fdd80e9d1ac2261c714d718dd63bcd073b2df5f4e1627b850d6af23373599698f15a1ccd44d0a39e24ee40cfb2ddc3df414d1bda34fc53736815fb6876e523fb35d66dd76149c3f79541cb7c8032b65a8d23067bc865a3b3a8a2c1fca59777842a846cd6ccc946e40eab27d2ddd24837910f28ef5f015d0ae5ebd2ca5ee1012c787ba6739c20673fb49c0c614c01aaf0a1075505cc1d28536f4e72db6702209174c0966cf6e3af95cddd070b845842a471baec1113502b71afd9fceacb96bc3ca18e572aecee1a18de702b81b138d43b7ea66c3d0f179db9a32e6e449205a0b169f1833b367cbc217e8669eca9751698d1159fcdae4e9d05ba2aedb4789c9188f652aa460307494902c5924794e67882b44769fe1fbd2e6b10cd0fd85044604254834c0b0622127aa9b637ae9d3ca61368a118884a958e97288231621a59fbe2d279cf5b793192717e0cb049b20863aabcdac887fe5e309925b5585b96b3f3c01279387aa0a71b153dca94e5e10e1a649cf689ee02d90da62cee981376cb707cb93a51cc824bb1ffefe009a8ebed36bb31c1d20cc75ec821f0031149636cae176210aa8e115ab8bf84d2956013f79bf73c3045fad930adfac2aefb10df633593145b88a5a6114f1046b8090ea471c95892d0215aa0f55b0f662b420a9ba1cbd6a8faf6eda391292e2edc51a4eda242ce65f49d9c8f622a93066c503aa6787d5e8b972cc744c90c56c5ac6a3261aaba77fd2fb41ae91871bd82251f4f5a8e3b1c8fbf771a93062e52525925d0a81bba7147b8e5f8eee17891597e66ef7fce243a3c5c327b3481eb97638233908cd6c2d117ef016eb6687272a64a46a4efe936157e1f2664087ade49d4e5bd4d1197861ce062e024d28a609221a13b021d183064db395bcbd885c856c26d48b2728bdf0e98a3e1b08b1249b690812a9caf60168592ea3967de301c4e8ed1548292096ba4638ae2562fadee853ed386845b362034983f2afbcaf11b32054444ded25471c75e634798f15b5e7addcba88c503536fd83eeaf66308416528b40e550940d06cb68d0d8c8853265a09e27ce276301b26d35eff83341d60824258bbf7cbbf98f06234d6242f094b6b3ab1c027d6092808750410bd85f2224cce5f92b6d5288a5182eb26aa6c50182c4978269e017c3d7c950efce76862e2e755860587b8694ba249fff050de85b35b0379dc995554c429d874520d60d788e9462ccdc8d3735a439a1442bdc34955ff31f34c4221e754689471f3cb298d9014a2afa336021d255ac29aae3a8ea9a913970b8d324510e5966b63ff316ba7599552906a40a5aeea43a01942600a068a441411d54daf15e907e750c16420629578eb19a9fa315ef65f4e59c335cca8cc1372b884834e4f503baf99180e2685ad835bbd3be3359e454dbf08e32d9871761b248bed867012cda1f605201caa38cd1ebac565082d216d1cbda29f21d1140862e3c6a52e05b58e3193a13d91f60e6308d42d5696964b4bbda0528b17a903e500bb220185f81c47c25a0d9a6be2d15a56304d735892bd2630ae38f31d3a6bedac23b54c611e38c35e0333d71e67c5626f7d5a6c03f400f2e6a5959a57ff15775c8708ec9afb661598a5c28ce35909931f22eac087e04f18f24e036e585ab97745fd97c84001a7f54565f7d3a58d905d141c5a6336c60828fda9f48da42f8e1c6959e5b8925860695a77eddbe3012a54699e73ad5a89013ad3de486e8da5855beb15e3c899d9fd4e220a3796afb6dcc221a823482d406e476bb339c50ae7133d7172b36fce00297459cdafb9891ba5108a97ac43d453c32a09f20a9fc7ddd2796d682e9a64103fc8c46476211c188f883e6a08217198994aeebb8a443661efdec80d76ca05780a8411198ce8f56cd708ccd3a7471087c215c31d5de3f2eca5a6ae521923fc43f45051c3017cd06834a84a268d2d0bc50b136ec1f4b1c9f934cee6d822ed67865ad57064fa5e6b5757a07101f8ea3e748cac18a6a9d6f881f7ce843a48df40fe85aec0a2d177559dc5d02a9a1dc431f33780e61376a1d5f314c99910ba619d1aa6f061dbaeb51f4210315db30616bf1d2177aca76a7ab0f092b5c67575392dd675cd1908ef0b28d5338f1227bd1951747245094a2e3175c5c62fe5c4be5823dfca2efe3f7142847ceaa2febe46fe2f42d3ed453bef20ecbccf6cacb109ef46b383014e9af3fbfd7a9b2d23b17b907c0c12c3d953b3bf946a5ae44f8a71c52c100c1ced1a49ae24054f6551da85136261c6dcd2f6122385de47fef3a33c9ac21ed48d5abb479e8a9813ab662f161e8d35fff45fbc0196219351c046b1cd36d11698af112e714c1b951dd0cdc605c63ab2b5c0017e22788e4f47c03e89c22f2d8d09549b5ad12822dd9cf081ec166c2e3291b261bf17d4395fc5d3b771c304321abb79e1b28a8c31897ca2747401d6e040b0a87db71f521e3e3db09d87f3105b8c1969d0b389b2342854c9b8c4029f8f058aa477c5b17b392186087a57e55f420af41191817070eb5ba53eb4bc8e5703686bc2fe5c631c0047971d8e5f4266e27d83519045a46d713d21f5920a9ff25df33ed120da4c13983cf76b4656b6c513e9abcbe22d832bc19893a6efc8a2358e185fa74f11083459a6ba621b0bcfd7a8a5a289225da9e69d090e1e1370eb89690461d53a8e202e50225be982968d336ff6b51a02f626e3d7f7eaec4984207339fea6fd6fccff476e8bee49df6a8c2c3d684a68e8730bd0ff7891831207a3d811136027a2cfa0cc4bae4008404cb42c9d9a6205675f57ee07c5ca1ec1b8c15fb1449ff227a6f17ce4654489eccc2d2e2b06769410e033536a64f99bab6bc5a179dd73897cb48d40c8619b928f66d9d4e6f3a2d83cf063b7c24d0c0622ad9a214efae6e873fd67e7496800061dbe23021288356d5f1e678715192b6a0b73f77cc5428ec1a7017fdc8ee675807560e988a0fa84e1ab62ed63ae57097dc49c311f1d3ce3faf6be6c470439651b9e9bc87424244b5fe9b9283049daef0bd1636948be6812156af3d3852b192310bb9a85005d3bd490749446f13eb2aca920f2513b3718f9662d0a0925422754639e20240fa2982265b34ccdff98e58461573c2488d2d4021d46931de9688897e72836849657c2d1715d18c6cd005abb0e55635f35a2c87586e44045a2cdbc35aca62114f82086c209a8e6deb51590ee794c85f4e45c4fe9dabe1c9fc4552da0a7ef1d1f96d0f27611d1dfd04ce92eb0ae883e22dd032d310ffcb08a82645f43aa0fe2edc67341e5a4c500df322e91520be9ede0a93a7dcf665fb18f0de6199c29f5d52428619bf469a66ed8230e435d2e27f84028c6508a0876f7e9e2136c49e76d0223079d10c661b62143413f4543261903e3216aa7d8c79a80f695fbe90b48567e87535acc5085dea08c0e026eb9d62d55a8531a7abf51fa3c0d1d9d3284d188e929fdc7add8c40877a57b82e2140097e83571ab45c08867c8ce67fa0f6b516de25b830d365c76d04d6c0b35ee5e8eca8cde661027d22dc2f224f02d36bee038ccb43d9bb1e5753b0f03a122cd0a75670de002a8f6d27875d41ab81a470c1d3039528ba1dc5353aa8844073228ba4723849a65e9164e1099c35407f4b9be55000756c2731d3d391c6042a851784938b12a8091c45515fb566e5f77524842531b7c00110b1986ed2ea936f5c8068602a47acd23d55133ef1fb2d62391bf23d8f0292364b43d30a3a5b4f82a23cfc2bc7584525dae60756279dc0ae8637d95d3d53930dbf6636560d8110c992d385214b9b57ec4a57e4a41f86b7ef419e96a8c8f829f38bae842ca303013becc213babe35944c9f8426dec6ed6c64ddc6780dff3621537f0c5edd9eef8938b299a13480ff4670d8e571dec66e94fd2e364484fa84d6cf46d61ecfa85eae1b9ab04c6b0371cd1cd30f8ad74d1ef605dcdcb880a8564d4a96bc61385a9ec543cd83194af06dfe4a53534c38473e909ae79bb1e36b90c5d8ff55ddcd376c212bdba5621858eec88db2d096531e6ccf0ce446e4ebddb2674d84f7d0c0c4267a970ff53718bc850ca3d9f127a2cee02617ff3a4fd7658ecc4809b5238e9a55cba4c971a4e40276553d5c6862656925e92eadcf27e9ed80c1f63a2904051a76a5dfa56b027e1220a0054fc20afb7e136aab3189c83a061800705fa9f0b378b267dad7c7a036fcc14eca3bc5b1ea9c85e28d055b9acefff165183a1c024ca74d992c1e3d682d23ba91bfa5599dc6c147ba5e595964185a03f6ce4b21e2eca01d6bcd1ec3d8561c19765c8c30684e8da2ecafa70cb788baec408c80fc72072526949aff3a2375dcd124de5620cc74fb852a8d9a31f979b9f4fbe8ff6e1307b467104cb68e27393052c803422b98fdee99138699d677f022417b0137c54e5681d2c516195d8441466cebcff0cc9543271360666d50cd12af06960a1dc93e458d5fe5319d7b762e60ccf1e88e3c165a4d9215784186170e938adf81a8223c7218389b98bb8c2207050ae2913279a7f591c0caed8bc467b4d2ad744e2d08dea3ae8d53779e34940a05bb7ca26a94562e81bb71d05a6b27761a98e2382646f3e25b4041c8248d1fb38d8cab7437277c86db4153224d454575e67565472740a911edd0c98449dfc284b774e4240ceb1e4ba62763223ae22be6eb424f813d119245035995ba2b2f60ed5db5c121f5d74df3586368f104ed2926c5a9e55133ea39eb6ca472d292ee957a0677683c6a242b83313604b148f064239ac51b895acf0eab07c03630a98293478ee669f2f734635a926e925bcb08571ff49b90a798577dfe41378eb38b53947a7b4985abe1ec574f99e9a1644d949d64fa963705cb821ebc1312a0b20edf6de46c34619a2c5ba795bd812f7c8c3f1d075ea803c39626490ee2faa92cd58c13fd5b091c9db935533ca597fed6c55f5435eebbd844a95a6532210c802b2f2a5054ed4b1552b4d2ae00810f85861652d22578276ad04d87cef40f717ec522475b136efb22ea1a97709cfa8690ab8eb01f615c813f1e9afd4ea97f1461be9d091f57c3adb2ae33ea85b5782a7714aaa27a963f86547f27a3e9c180fe29bed45f6ce2d448d6a3457900641d91f961d09ae6337b0124d3e820d6f98ba51d9d680cbdd3d7e1b52160c0bc9ea25dce1b4fa0a53f3c94c05f70b19c8e5b086e4055c5d4209f7d3b02b15a52410d49a125698cf86d546d4c15244364a587d63d19eb3a589ae232d7b6a3bc8f494b523769f05c5027eefaa18c7ab14d9378a3d111178ef391b3b207a2626ceff64133395f6f1c61f49f55c8a2b1a1a12541d411af6f2a730285a327ec325cb1c49b41f21a0a4f79c711495fa45e54dde78c56f10628687cc6ee285c471aac9bf5a333f73d86e02d5d44aa9d639798f742f201c84717d7dbe570bd0f961119e71092a466279e9efa6201d59f06b581265b0ab84b38ff356ab2865a46bb3e9dfaa418d891d3184fa97e2eb32b38a09f5756cde15dd0412b3bad53f704755836e4b00db2ad7fe8a9a80add4129376db8ec8b240f51a5b57aef64c644cd064afaaddbe943120729a97da7817cb51bd170745f6406b1daef86519a647d97cc4f685592211ef639639b550688a79b4c520ac5eb15ba5a2918cb0c4897c1aa3b1def1f6390a1eeaa09d51029a55cd35be44455d0faedee916f4c4f61fb46fbcee11706b4c3383c654e5eb61cfa5e29e09f1952aab5e73870262ea8e59659ea90fd31ffbe71dc6a28ff0fa72ebbad420b3f94388293dda10a98cf47c35b4f790337246efa71bc2885b35e5b8ef840e66b07428c9d148baa4e926adfacf1999081ff426fa9257dd1035557a704b00aa79be73f0de22acaa58f9474288a1dc53f242c8445602252c496a267da085e4b0a13c371e19685b126620a8a1d67450eae68e7774045d79e5a7f94b2073eb6ecda7081165eb686cb088acc100eebbb8af37f614adb19050c4b6222716822a9f3c09e5b4a1a713cca28992edc4513d12557b77c50fec71b6248b5cd8329a003b95a6c8282fba0ed4e112f9184cbd436e0560b19094b82d34af7336cd91c9943a086d52f6887b94d876bc671b7243bea3b4ce4ff1dda94c62437cc7a51189d2c28fe4d8aa2aed85df869ca4dbe2ef6940d81078525c6b50575397ca82d867daddde03fc9d858e8c9b9febad45216e081dbe3b9fc0d759a16389d8f055537ed4f6f787559d323a38c533472566fbf4cb779c1480d00a538699cc303b66e49cfb5ce22fd8b7c511521a2b3133eaa6ff1616e90354f9c0ef7b0348302879ea95e0e159577df61a2b1b28015d7dec0b11079405a1c1d1ac4fc069831eccf6414372fc6c9c7ad391b0c8e4d6d67485e93b83cbd115c8867936c045419c8de80b1ad3337320f8b53eae493917049e7cbf84b08553e1d77f6a213a91f984ea8ebf2c4c44fb3519d4d7a26d428c2776654b9a046d603ac2442b45838ca05ca556930b807795068aa8b940af1eb2296cafe43b97f9db87569c509022d76c4eb26c2bc9912e76a54dd12d70c34436b1792b0c691dc476cf26d8d55a70de4af9904423499b45e6884d77234b5af15d647db54930bcbb98fb93b8376e3b519602c4ddb0a76bee300ffb122204020cf108327fe4177cb5d260cd8a5f26c724d00d8d80e1c936cfdecfd936315ef7fa9cf53fd9ddbe22743c6d747fb9ff38e6f4721c788a31e552886410242fa8b7486dba109c58d77d8a8f2d362d2862089a6ef8c4338ef21107a6f5bc0df98830def0296b6976c9692909f8a2d8ec9725136144fd32c9d8c647c66c23f79ba6a9867f0866621885455729ef161e8b474304451a23d317898eb56f1566833a6430fcd74b491ee0ae95b866229d1b6102936c0d087272398bfce44d2367842173d422a5f7d67e551495039c31e4484f6f46295a4b568ee30b9378cd458db077bdcf50b117e18ea3a4f7c78e23a23f2f6f14ad2b23652cb76bf0b4284301d5d913401a8d554959bb3eaaa54ca48502f212518834c8baec8a5254b2ddae3e272e2846737f684a8afb856e7593c96802eb82de7619092d2a1e11ed01cf72bab9c4091c78e48994113feebabd60d6d011b788c14cc53309e436bf9c2cfb748e520b12c6099375c2228f5bdf7e1ebae4d4698bb92e9fcc55f30dec59412e8c915504fbd076141621a18fe236b655e46b4153a9bcaf891de0a8db7aa9556104869e4547842c00a655b448e1eb632dad4bf33e693f904c9aed8da7e218c80e3a1ebb084be13b48bf47584034ebedcbec8a134a41628d84ccdcf8a9f99fe588463583cef2f8ea0a340d91026105f98bb700620454a2037a032b945830da11a70de59446a24da13de081f64b75bb9622029bc10c61738c7036a067effa2b71eb0c66664c791628480a31120f203c743b050428f82bdc6537e5a4b859ea99608aa57296a61e481b46d1d336a32d894476f7ce2403b4081d093709d25ffedc1c867883f061aed8c55538f3fb1bf6abc52ef063aef846af6c10e3c8f246af4819210ef96e381c12c618df0d1c4f462735b7dfe294cea7f5ad5677507b432f57d72bd427b519cd803d8f0ce7f13032f415b5289678f355c96f74bb2293971fa2a1a77f421f45738e44f126927fa2912c92c4db8f087fc40c7ffb112784f37ebc0fdfbad0c2d684ab2cfd8b5e913242e82b195fc96c74bba2538e47fa11fd0bdff12f7c2513fd0b5f45dff12f7c25e35fc4e85fe478a7221ee93dbdf0766f8efcbe5e91e8b6cc23bdb7bedecff3795c6fdde8dccfb7e617f132d0db779f1ba4703f5b3f45201c71f32882beeda93df949fc44224aaf9793d27b37119df83a9527b52cbfad65fe47b994fbb792affc8b8e2f202f09a33a1b30f640321e8f87917b9571eff8686ebd23bd772cb9d1face9db2dfb09f7f696e27cbeb4bb0fcf7e37d481ffcc19f8f76c314d7e2ed83ebcf2d3eec87d96217877d60bf1cbbf4c73c23c5db0b8ed2ba221f857ce410b81ab94379f311b251da19ede8ece8ece8ece8ece80491b373e9c8d9f2ee6f3f9e4e103941e8ec04a1b313444e103a3b3a4168a3477adff2ac51696774af48f4bfa3b3a3b3a3b3a3b3a303ca420907bc7103c78dd20d1c37e27c5f210c6e8f9825bee1a6c79143355ea89be0cf9ff5dd629fb7343af10677b01ffad3e26171e7a96d10d32ced63d1b7ac83d447f6867cd4386ebcd8b682f041748c7c52e2edbda849194406116384f8bd4ba2833c195f103e7aef7890fd47e341cca3b2af767e05c918638cef8388cf5edf1fe36ff6e3831b751f23063df6f7c7cbf51bdb3e1fffbd8ccd0bfff0c67670790f33865d28de7eb3784aac07c52eef63c63ef8e5611a3b46783d8f2ae5d5b2972daf3e2bed4b6d13655af2549b2fff6a2515fb12399fdec7b46aa193e857cd613ca66b7f1ffb56d32eed861a0886ffb4f792257c8745089f11ef05730c0d7546151cf2645c4867ec3fe76f11fff088f1e8dc3fddd3f9d6b4fe7a4c734ebaf006ad663d758e3ecc0ebb54ece4538a3718b3855de2bb1e11bbc88fd9b9fb9cf3fae83dbc39d42efc96db8f0fd80cb14b3f8435f2837eb2e1a437013f61bfc16cfd66f3b491c4ec243a4827d131ae93e8209d448c2ec2a779b361c3475edcf744a20ed2495cfe91417dd00b22b986ec8e781193ebb3710612792a1f92e7cc12cb64fc84f868621f5fda9758f4347ea328efdce9e4e9cfcddfca3e2729c2ce91e43bd2e548529edc3fd19c2365efeecbec43d99b649f8f5f32b51ba6b814ff88995ada0d535cc71828bef53053ec9261fa307b68b1f55bdd89f9835db2a71976991f338d17de5e1e36818833acb67559966519dc9e1077fa9049499f5a1ee9fd94b3e7c41bdc7cc7d928b5999a4f889be457d1f220b6239342a09b32e839421cdb01e5046aceb4c67640783af9f7ff4e275888477a7ff219de44282f1f0ee18da26093135cf2f541cfa424fbeb23de6ce496e7849872874f67f7d184786f82374a658637d1b3912374527c4b4339d6c8d1f9a87e7cd05fedf30df3367aaf077a90e63438428056c2f1debd7e14086e79f109c94bf40a7c89788897632cf19014168d1542081dca7f10c20abffa9b9fa7d6d4b6fb02a47ec9fa1fee724d6c4e3bdf5af57e8b63d7f5b3821ec31b8e6c610cf37bfdc430bc59fcc32bbe7169377ac5a2136ffd39a601a3315624b3a55f2f40ae1a86fdd8af48e8b7f8875a8b3d6afff39866b18ffd60d761b735494d5293fcc0be83f405488cf1b96cf3e18cf1e994cfbafebaaeebbafe87bbd477945efff95cf4f32d8ed5ff50a7d794f1dddfc76254468a514b3f4fdf3da7ffbe6a56a7572e8be22db7633d9d007a01122d4fb42bbb6377ac8ed5c95e80788ccdb13cd985d9db9cccaeb2bad91d0f02f220d96f2f06a4dd6afd16eb7673cdead7faf5afc8ea581dab6375ac0ead819be035e9c75a6be14773d9ac8f6f8465ede7d3f361b4d6dc60ce7ec391e98d1bbd62df7efa3f9f8ffd1fee82bdcb2d2f08643f26dfe217c34afee20d47b619de2efce01f9e4df036827bf106c23ffc73a375c8878f69377ac5d64be2cd9af186bd2d71747ab201b23ba263854e14b96390dd912e667205dd77f07d8abbafb5be009911c6d75cb6ebe505e177c7497f4678d1b7e186292eeb2336bf56d00b905a27c8c1acd68710041f54718b67da6604e82106e1cbfa0d5ef930565006c270bb5976f1368297fbee36a87efc8f5673c5db65e1d0cf9aa457423e6a19b1b5013f3dd3e00e63f418a365c92735cd659ff3bd9496252d4b5a25f92d4fba945287fce72ffe86233bc556fe452195212b9350a1bf0099724e9f2f4066ceb43ceeee6e57734ebbe373ce49af9ffedc7a1beadf9ba1adf5ebad587d5babb5b6665f4ddef1764ddc430faa260ffa10c841ef6f5202c2db07fb1065eba563983aedc24174ac0ec8ad2a728640f9f5693959e440d1854cbe91dd919c9df75e80c86749fa6f88972dcce325d3aff005886559efa1e612df6537279c0f2b1ce2e57eebdd3bcbb22cfa967d5bdffa0d8435b683cbbc4013f49f0fe8f3f9dc0799643f3f3e9a78b3397be7256f52925d10761a86ed5631b66a9290cddc825165873507be00711509ac61d49a33ff4a3d927f7ea3d67bea9d3ae691fc3db5596b314895fd3be69d4adee95fbcd97cb30c6f305fbcd10f76146f3eac0d9fbf66fdcff556b5eaf560929aa426a94954d76710e770b8afce392c5f80b887f04beee1cb52731922b706b11e325a38b24bb9c9bfbede30c595db455d1eb6e0fbdf681d13bb487ca35722c45bffc3377ac56b432bb4904ab29af0fd0fcfad69e1261720bb235ac8099271c8ee8816b6c8da765f8050f79ebecda1ee654b73190248a6efb2d336cb935ffc1ee2e59dec36987ff8d6d8e1cdeee4f712db4177d077980dd87f641e696eb4fe7b97994fdfc21bbdf35ddb44ee78f3204e9327e867f616deee7debb3772a798a614ed9d32fc1d6671afdab5149b23076ec28867d3833f27c8b370ff2b41ba6b815fb10e579fdf6922f0cdf0a92dfc77f6f755a07c52efdb347ff94d8faa8e688b77e88ad4eaf3cecd8e5d1603ea1501ddc7ef8f6d1608d6e074786b97e2cd4023243fcf15846b5e856caf16d376f22ecb7d1e76a20cd3d1fa54ca92ce352eb316dda1aff0a419e7ff37c4ba33e1222c36f4b00194aaab95c3af181163ea0a50c31bc1c3a518a51d619638cf77ae9798286c8f2a1552b2441963bacff0bbb874464f9b0c70565745009b14390db881ccaf25d8b437fe9913e31639c1f35eba136bfad85bfddd37da4777d8cf2add5eaa579ae5a75a3f8a02132ec1133ed5c633b7f9f0b5f4e255a78ab3992805e391291e195a19c33461931842e786a61741ebf9d3644d31ba7354304b370e1374e86183a6aaf774319483927a68fd259138a216a6f10208cbd31768fdbc9d0ad32cc2ec7fbc6dae6c9b05719764e868df36edee97a88bda67b24193cf59ef5a8a7def4d4939eea9bbc89bc86020f75e1ce79a4f79918a25ed91bea1e983a12d33d102622414ae8a20ba566a594524a193fcf87934229e5fb4b4c44cc63bc8610367648b868aa490d2e7c87ed100048139d1842860f69244c4c4d25a2139046c20820470c6dee910d0e21e5042ea82095e14503052c4a1813244d866f82850b5dc64859033485b455e8e81076a44080534eecea8e28a33b9190a8867d08c6140f0d1a8d48c4384d72102f95116cd03246ca1a20319c7b4ed08b678b118918a7099108634b4185149126c3cb0bad637e7c491d8bf5f12dc772f9b066b8952aebca9c4678bee494d9f2613f602f84c1c767b90902b160b454a33e7a193e25a8e47934197e853bb23b4680490dee4976081f148fc81bf284bc2019be23327c37c34fcfc67bf0fbcdc7a391677836f6d98844d6b3a1cfa694b767935f4fde6e0f4a96f8c598d8e005a1f6d1b8108a2c411f8de4913313c2890548ad77f7c0adf5d10b6577d85ea1773bf8714e05be8f10a6945e67b9d3885e6aaf732050de46314b1c9a4708c590bdee5ed450c8bda3fc76092784f4e3a4787e8d76ca372184aa88edf091ab62832fff613d608cd1053e27e48c6cd4c8f0ca319265c87189727c8ecf15463558f10511e028e75081050d7cd003237c09ff49fcf03be38c2e027ffbbcbd254496d562968559bcc5bfbede89f9007dc8408e97062d6a516a519cd2c37fbeffc444c42c31c410a653f65e45b890666b6d28844365001902e024143ac2de9048243a4182c8de508e12224f5822c71842809efb9b7e94143684109a71e17593bc618a0b7f8b0fdfd280305ffec47ac48ff05da614420a217689f8d227684854a3ddfae0b4f081163eb0aac671395a6c4e0c6906a7852c7f73629424c1e55c5af8400b97d3383238026930bfe913dd3ae10aeebcf70af151463927fe110570c61967c858b2fa1d893762071dcb7c13c6f3c4cea9cc774f35e746f2496adf8c11ff78594a87c99721862b78efed8eeb1f271c85441e1fca21608e3b426e0a49fbd7b1c4173dd2836f43f68644ef6443867c64a3e6c2f772c4d0a9c00c4d90a373a309276e79f1e167187f402d8f4915ee26a99034315208928a1a202c406a254dab7066a8a4c9d0f2f81835f441907f236427d247c38511d7283dd2a313e3f1b2c454648284126c9a21dee0cb8fa5f811f301f38104db95e1d769a126f2d1cb2e105f1a12d508e1d084155f84a0c8de188223910a46ef343289e28620b5220368e1032d44ce4605b0527aaf8fec2b20435286ef9c766b70a71f448e45ea90e1d31b84dafbc3fb22de13408e02c89bc38938d1c2ae674a8c00a7f29e2e71fbb7507e2ad45e1db2982a9e803a64d8a92538c801c1dff9c7e8ef76c0777fdd32f424b59920b9f05b1ef4ba5d57c48778f371d96408670a2e84d71459fe765d91df3bc6a394e3470a117484d1c765e383eb620948d0941e1fb86cb2cb49eadd207e7f48d41d23debebbbb5bbe778762876a3c6884f531542324aa61694088efe8777c1b6e88e2c689ed80315a0cb18cc86da939378a3047ec8038863b6ab843e9b109f910fa2846e978f318638c43189123d486e81c9f5b1f3df83ee20d46cda6e1c687edff7b7a7df4dee2e2f6b73c77184229f10fe88303ce38e38c1c537a7cc444c00c614eb25364a344f2e6c3fa884ddf82c247d029b4a0c833fec3ae57e443cd9ac954052d1ff40aa4224807719dc4911c72c821871c72c8c1539ef21857c2e990993a64e8741d321f6b85ff9e0e21b975a4a08e19c0d9be044fb91220faa876fd7b957ebe3a8c87763dad7eb41d5cfebc7d874900c2ffe0cfc3ffe0ad7e7ffce0141e5cfefc0e2e7f70caf52e9f977fe11ba470537a7c5efe07531fbde4166f4cbbbee5f5d7976c694e7ba9ed486c6282a4626a4ecdb16c3ac6e6fa2769204d5291b89a443543243f6b434369725093d42439b0992e7b507d168103879450420971c0f853eb09dce895c63610e0da6befe113d75f7423f49eb84eed2d4d519d8dedb51037f543e8b008bbe3a369777c3463778570739cfc5ef44ef6e53bd19a477a0f49106f3e44d535f951eb15ffa849bcb5cdc9f0bb35a8d32bf0bde3224d939f0827bfef9a771239e9bda826bf8744ae5f3f0109010c0c0c0c0a8947599dc601087efdd288ed85c8f58714faa81d7c38964b2e6c84cc39244f21746207979835cea6c1dd7a662a11adda1ddf61755a9dd71e6353c58d4d1537ce660938a75dcd19ccd814a989c2e64689db3d8bd444616375ecce9c333451cc4051c4ead89ce8315348de7ac666d2396d329ddf3333a219ab63d353e1d6411adffc288c41d71d99bbb54ceabad08f755ab6c68e767eb528ae495ce5447cc6554e64c6c118039761ac3b42aefc7695cfa46e0f51351137d17fdf330dc5f53549afd09a43b737a4659a484d42b1cc11365814bf6d042fdb6d99777a16d3f854961797742c6d7d6deca033d1b671844ddcedf3c35c6ecd650883eb627dccdb67eb5bfcdd342b6663bf38cce286d9c22d2f8634518d90a8869bdcc1c8048c11cae0cadf42cfd68030b83d3fd67f5fc291a3f376a357f0f672240b47e4edf34362467e6f0301377aa5df6115c34b833e821bfde111bb579bcae414b1a2c84d912a8ad81489a2480df65bd7e40df471c27f26d2fa7aff7a99234f13bccda7ef54f2aec4650efbb0b966782b5d17842f591dde4a1b76e10dc6ecd8c5fafad1c22eb28df48ab58c748d1128a2357cf43e779c6f79c8facef0ad88b71f44c0ec521f521c3e7a5f63a4c6488d911a2335468acc1cd996ac112335bdca5132e28a382335ce488d2be28cd4f4ca883352b5928f4a1987cea51bed9a36e272409e7d515fbc8155bcac437ea51cf9611ceedbbec41b8e587fcaf710ce874138219cf01dd6f42de0288a811881728fd4518b323e8298d2f7cca8cec63653ef7409712e93a1c1e048fe2da473d029d82a54c97b25a4310a38eafa5453e1546cbac6a994dea98f34696b001468b27f636d6b95eb30ba011703b6131d624aa8eb371af941fbfdf49d1ae5be5697dd54f25103707418a6776a55e9f3c95573548ca1a8fa0fc288dcd431ed827681333917e3a63e42aa5310e6912a8ca9c9fe25575cedf3a70820417a8a1842a461401df0e12377c16a1ee1a65207549be3b9416040be27b2f7bd0ea293d822889c08f8c8bb9b871d2fb6e04e1837f160596865292d9beb65a1d65d702aed8526f99b81a6c79879a994908f95ebc3ecd3446de3581c646bdac6b150e14edd0547f2f716e309d9bb078707c5e26e8da491e4f48e9be677c88a3b3fbe7c6a599645572ba922d22937f5aa738043f6f7921bf9c34e657f9b84bbb5cab1c0fe2bbb235e704176a7fafd16b6a3a9897d9a95210cd9a779aec18abb4d18647f113886ed90b445b083cb3cb8de028eba30102350d7b7aa55dd4e2555df6ab5897a854ba5ec46efd437af57efd4336d937d5501475db8558fe46f85bbd1ecee4fe9677bc933451d01211b2bee169a22fbd71a77cfb9e798c331d53a2cc7f2c9ee4353b897ca4dce24c7b11c206688901c85b809beca47fe7789fbf21665fa3bca449938c4472d1365a24c94792f64f39e50b3e82db27bcfc29164f76f0bcf622707463929a4f4795339a925ad73d4baaa85d63e6f4bad7357b518ce077e3ecffb637359e7acc31ca8649df621f28122bb07cd10c9eeafb67991ec3ea329812525cfbbc47bc830a3870c3fc261786eaab0e2a68a2c706c5a46480f699916d2443ae53b6164f76fb543068f103872e880c35551aaedfe1e845142299fb76c7f0fc218e584733eef09a39473526a5df0ba9ef735a9655d57ad168318f6bcb1ab5a8b619f0fe8c27b9ff7b5fdb10d02d9bed77696d92e31812626cfdba4c43a173a39b10e05c53a1a2228123d6f51564bac0934b1ad8568c8f609b6c3896d0d05dba1f37534200d1acf9b06fda4f4c88e4fe827a507cdfd23c88ee323f790c6a97827c089ea6c6c2834d9bf3aa9da0260aa60b891bf0dced4d25f836389df9f3953ff752caff356572bc712ff0c65c0820539d0422a0aae0a4fe83c61092e6a8e60813f988a37bc5442c6c4a7f1a0bec48d4a01e27afb160331810ceafa1794acfe75d29475bd77613b206ca23e1961c45206ca3c99d6f1f2fd8cab177162166ea275655914d7157cb61491b8cfbd4c3c638c3db10b575cc9b48e58865ba15e883252f5282a14226e9a9b2823b9719315386e82ef5fb370d3db6286d2296aa1c9fd35b93ddae4fe08df0c261f8c114af824b4537c648c54a24930bde294e8936c128430ad43cac83e4530c05c188f4f96320e0b99ec5acb09183785c7ccfd56c6b2e788c43d01e3d66c5b1ccb27ef30ca8debca47fe99142ac702dfffc2712c4e385349e558a4090737f2cf52d71515874aa4e4a6ea84bbae902aa9722a54c8db6d420859049bb3a92ba71280dcf8ca38157f2d752d9553f1c6178e5389c90fc61885806ccdac7b2747b95f42f6df6c64d7a26557e24e75e548fe29e7590f49c0f8c8652bbff7394b499a777a91a2c80b6a6e7450d3849a7782400c8a8084174081b38317a05e92c08425546348e9e0c00b94bf9c79a720b8b082119832c8388204e5ff308984bb49551108e59b2d7ab29f567b54ad43e25a866b7c86ecbfc927b2579efc7eaa2b47a13e39b2e046e126f9fd9b549986c8dbe226309c29c58dfc453e527db2bf57e980bde2bfb92ab2ff28aee8fbed23613cc599a40adc4b1520c91b68128c9b2ee04631ef74611e1ec9ff9b67fb782153ec1e2986089ae573c04f3e3c5309e12689332156865062d1e2f1a1104ec53f6a1a702afe2760dc1af2566b08c3edc9eec816b2c89da5cc2665e410377512dce48e7c1124fbd73050f52577ff0e9d1bbfe4949b3a937927d0fb67aa77c2deff83f1187dbe6af641daf518eaa339546b3cb8fe5d5fafebfb3e293d5e96df59e22d080c67338fe4ffb44c8896c93c927fd532d523f95b2da379a403e4c659ca47fe252cb8f5af7fb5c647fe488d6b143eba3e4a3922f7a73cccc309671892e5d7959f5c1a6cfc2b8f1936c64d31fb5b2148b267a921ae3aa2ae8e9836303030302820b8235b9841de4479278cbcd920d9f36653d9f36665b2ab7a081286d6430b70b41e5a90858f5cce64dcb495dcb44519216e6a24643f4296ca1e85f71084c7472e4359e3a617e3f5fe39dea9c619f94a9e8c30c207e5833626ae753c5c8aab0806195ac1ed971fe37b73be52eb9031bdb27aa72c15c1d0a76c06a51f9c8a3fd4b25369712afe32a67550c0adb84635c9246fce264bc29db1aedec9a11eb5ac988a4288e47e3993fb670d59916371d93dfc4dcbeeb7928ef79b02b2fb0d87ec3eae5ac7c332a65736cf5b5c654fe1ec66d024ff6d94ad7cd92069ac6544e2c218fb9d8f640d56dc0d45083531a9776ac966d0271e4df28fc9fed18c2f647f8bf1f8e4c6350ca7e2150d3cba9291fdbd495b05433d23d79553f1cfc8b8efe15657d9bfd21471534ce57e37b31e92c0f191cb306f5295fda52a0d79e21aa2704db273021878724c6951c88baadc5fdd65d129237cdeae7eb6971c659c4a3f0a0df508b4a0baccbd01da2a01a525a01404f7522628bd7713658733f04f2faadf9f7331f680d9c5bdc02e41778a7b9753783815cf9dafdbf1f0f5fcf2ba04ef14e53b94d49c239520bf7f0e15b546396c02f4792731320174791e0361ebe877efade3b995edbdbb0f27c8efa1b6794fc0b3cbf39807ddf5d1730f9d730e4324207408fda10b8c30420891581bfa3df8e07b35de7bf0bb0b08b31e92b8f191cb5b7f7c18dffa08867c04af8fe06633ace12691bda108cde0fe2fb9825c3b167ff7d577f080f8e617d84438bcdd0c9fdca49492fa2862617bff1ef4913fc07af9d49de8cfa9b90bca2e6d69ee915cfddaf25bc2129be87d06dc627f7c8f98fac8b31f58ec4cf1b77e6a8391ebd20fdd0f98fbbb0b5894309e26be80248c2a3cc163047bdb3d3463052778c9117086406aa3186c44264aa8817404a185500b48ad3383c7ded00ac280852ce6dc66be7e93796e8e05fbb9bd3cefcf6fc702faf9ee583e3fe79c325ecd3995f820ad9d4afc8fe64e253ea641a712df6acea9d4f8577c2bd22232a453c629e78c51461863453f2ae5f821270b2ea5a11f5dbc8d4aa09be383b71c98fd1fde29844d44a2128c9d736e9a6fcda71214bfb98ce3ac261677e6b8c97adfc2e4fd1de79d6a787f47e2a692f7bfb01da1905583e628ba7aa7b9a220ba2a31c9e6cadab9b27365e7cae51a4a6a10bd89fd8dae4a6aa8415475eed62a987caddf1c153b2cfc5034873ad11c2aa3a1916026063da8d65cbeaad576fd316d731b94b780a3507e94fd044573a8c6e243cd879b28293f9ca0a0a09c340a0a0662040a05a2a09ca0a0a09cac5622fa62e22124b58272e8506b84b27bde107e527ac0dc0f64a2c58d5289bbd9ec62afb877bfce592cdabbdb865670fd9da891b4b76bef88c4755ce994b15ed80ebb034629e7a4d4bd3572d705aa193a956e69775c41b89bcb22bbaf367f43eeab16eeaba3cef27e9fed25ef0851f707dddddddf4790658286eb78b357b5d8a7e7677bc9d44720ea993a956959d28af059de36a5079687e8331cc73e01030d1e1a61a0f45e91e87f445331227153bf7b0d4e769becef4f813c2b7001072820266f84c73841060a08f1b75be29feeacc87d1f7039b93eed55c051278f124209a1a03c0d1494a7f1212d0508d1875e14a281e205745da77df4e6517aaf48f43f1acdaf2764dced77c47fb7bd9968711fa4d6c2f4e6c6b1dcec4f23e5589c8dfd1637d1528030f913937f419568294094fc494948eb9e13cd61425a0a10281f7a14dc3d29f4438ff2275a0a1034fee469603fc129f451b0ab661e89620a38eae44fb087b0cbd01b6c0b375118fb14c74d2218a2cfc8d7ffa0f9be3fbd79a793f7a759b809f4fe16b623cb4e3447bd9977a294be193a53af26821fd7722befe94c7dd1bd95ce5cb8c55d3ed1202af4572382e6ebeb6f6f26fb5f20cdb6f39393d0d6325bf764ec5bc6b1d0f72d07cba958260e656935bf8889600797af37790b38cae4b196b999b664be49f4e1a8eb4af9a1c4a1ae872626dfe24e83d49a681065a201a1020e55f2d5ad89e6a867f29d7960828118812ac1ee911ea637f4e63d9ef9729dc60d6cc8c16a14b506be5aa5a8358daa9a7da8f1a0d6dcc89dabab513e386b4d956262515f37d71460cb6bd5158978ad5a47a9576090e85395a22e71615cc52ec41e29224ff442bc91fde34dc90e925be3589c9b1ea3c738bba514326fae295c53e88a445e5d37ef5461dcb59af3b21718d7ea9ac275536b4a6c80712c4e092ee23021c72b5871f34e14c8b942e602277ba9b54f2ed929c9c295bf95e6a3f4515a6bbae710156582d62830cda1a466651bb4e0ef22cf98e6504b5cf8be519776c411483c0a7b244aa8eb3fffd16a136e01fe52ab2a279c8a3fa63987c3566fe09c02b68a83ec1f803ed599579fe81101f20eb2ff85619aa3ac9602847dec310ca4012950020ccafe0baa5aed5aa560f85aa5f4dbc7be6a29405c3c38a51fc3ee91647e48697ce10af36a4c8917b7c2d494e85cf8dbb5cafe2556b85bada935efb79516001137cfdf4a727f89863b5d3726ce64e9987f842339085011c4ddb879a74829be6ee2cd75136f6eb2ab80435d57adb9695cf34e150606a68ae0c7d5b1b2e590fd2bcc16303031d973386a6a44d04cffd2527ea8ff82320205536b72f8c8dfd21e4d01e2a1aeefcc838a4b8fe41f6d62156e92b5a666ce276b6a6a4d29889a77ca51aa29d5e0d49ad2bd89c1bd71d3566bb2156ef3166fb27f8dec957e1b72a81b6008d551e13d67e5769ffc2a84300747724705280718778b32b945ef7994b2bef7e614891c4ea4427536a2840f621bbd626394001bad43875ef1bf09e8138a0e365e9d02c2faac8c283adc0e8571b7e8628c3d2f46291fec8631f6c49e39e183daf7b48e87b51bcc90fdce0b83bbc59eb84594c9c945a2a8361618d5d9d868d8d42954d184fe645964382f3b37cab2b87137b567dae074c4efafe14cf0a50a24cccc8890331a37f2f7d1c89d5ced79b5fedf7c4ca27cb024ca765045ada3a2a4a8544ca4542ad53b65342a26687e00d58c1e189d1524c04304aa199de13c522646c63346d6856c8b47f27756c8321ec7e2f4130240d4323156097850460993d174eda96978375a47b5e9151e6a1afa24816ae350cb7224d05be4d69175915521f3a21ec9fe9de1a4e1ba5e79efb2ccd02bd4b22c7841657c40201044394143e8e4e4a446ca2cc3b52125254533cd2085add42a0e15f58a23158b36a3f6f4f4bca669af26aaf2e66ee24cdedccd0d4ddedc0d01e600329c5526863bd5322c0ab2326c4c094cf6a767b8251ff5d818c00006e05433f454cf507baaab677093e8a14ded90fa9e946dc2a9f87f8a15626bd0247f1b839485b133f6091b93b2327dc6ddc1a9a832caea91fc79284a4f761a1a3034aa6a53a9a84fa857542e60778d13f20ea51cd9bfe6b00e27c3a93e549bc7c3ab363e3853c644ad2295ad3ea3a9c9c450311146364693b22eb891e36c8b5902939921cbc9feb48c27e3c9fe4d5170dfa9f8dbf0e0fabdeeeeee0763377c8ee62012e910d92086d2239552a5179bc81d9dc84d8425ca073355b60388bf57b21dbc2c6322b5039566639c4a194ec55be056aa19ac0bb2ff8679f4645738951d190e19b7e4a38c86871eee0ac785eb22fbbbe86037ec07e183df9a67184bef6534aa999a6ce98043299b41967224ff52b519e3369e62dc0d65d54343e3583ad7329cb328b02d703606d93f01756565a5c64dfe501489a20a5771900bc061a5d6a3875ef1875a047cf48393dcc15cd72b3770c00107ff1c2c7676cec142ad52c1c33bd523b50ad9ffa6ba9b6ac373339ce75c86f3640b4ef1bd98c33ef81ece81bdf90eb380a3260662046a7e8bbf9a92308f14e354329cee066439c8543e82a936590e5ccfcd52bde24fa2c966cd3bd15803078ce166aad410d8f21c8d45a2a6a1f63c474353cdb0a1e1c1031dd8fa399373ae0137f2ffc1f55824dc18b5a7033c7840089a6a43b345b5e141080fb8537645c6851323cb713d190e196ea6f2518693e1643819ce7b55c64d5ba6ca0eb04fb36460ee5653d9bf3ac0b658a8f5702a3d382cc454c1c2647f131e9c8aff0cc3dd329a8c262b92d57803e6ab86f848c6479ee1a0ac5078b27f454d0923619c1519163c4498e1645264469ae4eff28c9a953398bba1e44c1edc94d1381638ef94a5b2225654c0519346967aa74c95a5aacd2375517368dea9ea34c91f09176ee45f6f1ec9bff24851bd405371fab76a9345b9e638161b1ab58bba452a9bb8da64aacfd5e69ddc168ea50b776a47f2b76a179c9b63dcadf6f4bc5355d199ecb5a7d264ff24dcadf6e4da93fd250c0c4c4ddedc0d4ef698436405054ff658a4a724bba00f66ab098d540d9a509d8d0dc4130650cab12ce0e626c7ecef62d6830b7ae26f5af677d4cab255c30eddb01bd6c67ea0ef3077c30388a7abbb7bb8fb6f98e2b6c476c08f58045e68d6431231b931c5ee27a5945ad69443dea341800d09e5d4867810ca1a809c6379ef35d9bd6f1d56760f852452b994bbbf7d88f27b08ad8f607e3f03a2e8a8742c3786452c55cd08040004006315000028100c87034312c1aaea217d14800e7fa24e5e4c1789c35112e3300832ca186390318410638c880cd1903601550a0a63ebf11b3c5136b6d458e560247adc254c60a5d2a6c59de14c2dbc05e550ddc2c9fc0f8142d15e9b81021d175183199ea0522b322fff88990a7c9d9a8fc4ea568b1db21828927baa3e2ba450dfa8d65afde6a36f083103b51db1927f3a001f7f831cd5a622db0d600bde6c15c77ffa9db51b4936e6e82e78e5d52afa53cae412cd2954ab3644eae55b1bdf685d9fd7b8256d284f965597e7d16d4f87e3d183535b71b490a692f8f5896d7a418e824fe0d5b68886430bfa53c41ad602bdf49b61693903f29b963381bc1495fbf90d0ee78b82860141cf9a28d7d68367f081d5aed2994d57fabe781f679a7092e611b22a3bbba4223b5ae3b5cf10e246945c06afc29d66a3290303e488893daa8c5d56c56c82a177fe170f0be1d3d1fe680dec3ff428d2a03339fb1bf5d925f229706e284c73354607d0328bca57f8c6495a43e641a0ee81b222267e433d9c87d04d1ab93c0892269c2b56fe3467fc5bf78f4a24ee966a21fe7b76b44854f233df104fc0105e760d5478cf22f5f58823d811c70093a6967672deb538c047983e871a52b3962c50caa340776f01867e67b6fbdbc7b4c3300dfcf12d0c2b9f395f3923dfdc16cc98ae2a4749efc01bc502ff9b83ea3039b0c358abbe293b07bc15721d3f856ff06cf13537d520d6020e118e5b0897c850949d54d5604f23a5805e1481f553d46a9697e698c550370539251a9d6a6593c4d7943db2afdf9d1c896022afd4745ec145418004538ecc56051219021454931f158ca6584a356d07106008bb28396e847404e40cba0055262a1003a920b1c43e0ca3c259b0f8a26e80eedcac5c7060b82331bfd502c869e3d08338638d83764a3149b47e19d80427af4b45c93c01fb45061cb75c69d33d94bf993e670e64068ac5eae2642c2692efcb8dd99df46a0008b7493a82862cfb420dcbf72730cfb8558f94758c9a463c376f2c330fa2faa4a650154030434ceebcff194fc7807e6a5f34661f4a7dd6e04758e3fb8de432dc521f733f05d3bb2d2151101ad2f5a8fc28cb6336a1e05bfc14016b794a85623014dce9b6d02352ddd3d9d287b4da8e057a7e9bbd9fbeb761a9edb6e23fb40d933f213eece00bda355595d40a5e1b48ef13b9400041ea01c7f2a2870d46b6147f0c53d2811f20e65d84ad7eeee7f5193d320912294242e0a1966433461476d7ff64db4ea7e712e34bb950e4ae8743d6756687441ac7cf8ca6a00c6aacbc08668b27180aac6249676775a021e51d2c074bf81b3e0d4f023aae5924b484fcb5d443a219a05d2cbf9c18d18fde846854aa8746258b13f8f4cdf97f82b29f3af4f8196d0e8e77259541b551455b72a144e308bc4b0ca0851034d72661d69c6d716a1da998ddef0f44c01508403ebf768093838e246a59958c1af600e976d699213a48fa6967d77e48c5b2816400ea77369c2fbc7417e4b6e50a3567be62708b8d9d8b28745fee8cc03b559d6325f70c218ee3f432481be183222152f93497e01083cbcbd610efd7333fb2eebb9cd3fdad56901f07676d76269540eef07acd48bcfa069f121ed06bd307f51d907e42099d433b058ee2d0791a1c09d694ed494aceea396f1533f266ea86f592e7c9ebbca8626f1124ae0012d5766439ab9f903da9de5d816c5f0325cddd45788cc323cf23ed913aaa6cdbb714e5ac2016565e26346a8190e7cdece27c6c3b1e15612b7378ae997502b093d178d5054afdc7f51bca38b0b8b1911aabd1d0aceae62f5324a739c86101cabe4bfb97048303d2ba3466f60f1d3599098daefade3a58124ad6dadd38d7ae75401a53b3d1925e9e02ec6c947ebd492c83c14ae90710a20ad3b3a1460374c2091bd364f284af70292a8fac92e801b9f033e456dd83f88a7d9d85527f2e3d2ba2d30c44b1d6a742964ea0b2fa198e4e8fec98063bb0c7ee171b06626310ec22133dd1a0e2722279c3b7362fc33ef127229a5feda35f0e5f0eec0b193633cb87600267626f8a189ac2bcc7ec4b872b3daeb315f4472b6e05c9f9a051e15b284c36c402823a2c36158a80e1f8c52ab504c1c78d7446c57821b10e946e96bb6dcd62ba61598826bd25fe1f22b86c7913397706478535d3308ea8cfc46988b74a33f273e80bd2fc25d55f9f6978fa12da20072c1e5dd5dc1ed8f61e291660eb281cf12c73fb8afafc2d3523af18ab89cd2e367ea88fc00645840ee8768bf77a083f86a8a2eef6375f4a708d05117801fe0b66caa1655e94366f92094f99d35b28887c60d752fd15bf45ce8fbeaac846abfbba4369baf864024c3887b266274bb5961c62c295b94b427e63c2c900d6063439406470a63a3a80dd24a7577fa4142fc42204d8b1defe6f0f1a29a7bbfd70ba8cff1c99d0ab68d5fa7a241d12ef63375bb8b78391ceac4570886fe3af067c71b11310a528a8748a20978a6690136a4b6b62834d0026747e44b343a162441ee45daee8980b373361ad125fd3f3169ca9115d6de075c48e631c05821cd9f6ec90ba653fe8f4c2a5cc60d2ff027ad82055d0c0e0c9babd99da4e09d9160daa7befd8ca5b6e6391f37ef3d2d9c12ee1b714845a9804eb5c240ccdc4f3c2645a97b1395966f53b444840958e7ec474285369468cc58684a6c586441fabd41ee71d7377233f2087fc096aba4ecd4734e24c1560335a4a15d0f439010048db71095f6a433c0fb24cfb963ddafdffa2f36335d24c8ce46a281aa117249888396fddd2c51b3828455f92614291a5c865f387f25ac92f6e5b5626effebacbb8a72b12059072bae8380c7034d48ca137bb34fffe769061e932a4ff69e3bc71c37b1e61f9dec5b48fd8950749a9672bb93885f6dc5837363d7d09520e59d06d248dafc39eb57c36aff6582874906b2f1e392a290d2796ac9e1b6bad8b1ade3673d93318be00bd8de33db279219d1b5bc9850baa22f2e49b5124b570cddcab7706dfe56f95a9dbae29a750c971766ca3cd150c68773b087e6e042b53af27286e1fefed183802e64d93e48397aed97a29f6aa9b4ddb27e007c74840765801591aca80bc99ef4c4ffcf107a3aaad0dbc8b014ae1bb5f324c2ba484d99ce352066b8814d274115fe15397974c94539f6a8e07368daa5726c90255a54277ef8d13a0ff09281f1e9fc526252878502f4f0c9840a2b97d99e1dd4b6a1021c2db9e1adbc8c289eb1bda465d85d1ec22dd4bb6e5785d1aa23a6419713b621a33402f8435f1ce2f9e90af0509026dc889737e57b835ba58a631b09b43addb39c3b99ee8589dd5c92c9e8026818a5ebc685109303d1c8c053a411cbaa8294c55bc096e142c6f56d364eab0cd670d2e9e792795a66b58d172fbe400c555ffeb4fa7aac8672175e903929ec3fe30fa8c905cfbd3f783a7b68c6889fba317a6dd1fde1e1c3d388ff32a03405107f06192053d996bfb01cdf0ad83ec9167c2a5c3bf0320cf5082de5766e0b1846542c96e8930a32a56b527e36e38b0ebdde9961a36aff23bd1272f49d3233e7d0a19f092f8da7a6e5ca234eeb3a7def8e166fb71980684e5f555d7f2142302f88b8370ab1921eaefec52db15b8646075873cfcd7df0fa6ef614e616329d99db180e8acd5641aada04c40b73e973c06f60d484a85b62590c09d497ab9833c2a2297307569604ca6e3d3159b31093112891e1645e3cb499a179ecfbaf9108bf38d94f4eaf99d5c435ac9f5950c7a55328e41707092ff63f395102f7e817c84cf6f9374d5df0fe39c782d0d281c8df28b39fd5be9061b221d246f150013e402645130695b17218f5856880320ff8f297977a0d806c4a413655a47bb657e473ebaa4370cefc45a2769cd71cc5640c8a11c468bb505452af1e19a3cf60166e8f8cf49a9a96b7996239ff287fbafe2ae5687e0744b7aba2ed8254605543711876e689ff2c26a709605c6d28276d0db50aae75eca9ac9749641ef3e8e5c677a9ae67f20ebb0f458df8fef70b2ae4e902775d2b1ce241134c158caabc7ca8e693fa7d94aeeee97dbedcbccf5a43a527d4d374fec4ef22f29dcfbe1a7f672c102a8388f344fafc1e748e374f1747592372400a795cdcc41fb0610d76344a86c454f9c8cb1bf0f76c8e4184ea6d0d13d6c2b4793e76f881e7d1019339889339736cc900d0c2f720660f4f26e970ca00ca30460c4745584437012415ed004b4b44728cc363b7344e9a9e60f617d1f614419042029050eec15df3ada13299d952c2db00037375a78981ede333096e11eb5e40048f0127dc6e40a5e62dc91da15de46dac04c6bb5032b52e0323a323b0d4405bae98308b4cec54bc378d04d96dc1e51858b88954e388b128498a30a2b13db4cfa9c179a646febb1b0e0ddeb3b911718919c631629e921416fbe7c56046415c8c481ae2a9c6aa8811b91cc1bd44be608dbe3ccf0a3139b7ee752294e47ee708f534f6e6cec5acf4b4d0ae0f59906d2632eade5d413f3fa248642464b55ecbf1da455044221e2f783fc1604c14c33c67818262563c274661744e9d88d32882cdda856258cd3939b8006a164baceb5a523eeae37d50793d9eb35018095e14e2b53168cafd4c7abd1820e88b72d56675a983e7994acf47bf65aaac6a4a991cbac277c19daa17cd6f28c0464682d3ef1b8056c959658add0a609dff45d1fbc78d67f5d6da12973a3ba620681c704496b5c73a1d155986b9e7affc7d10f08994049376591d7cac83b541c6164d52aae3069f55da8405a26d536f35c8d559f44a061d24ce9c824225dca04cec667e5efad51ba35be76bd97b36ae15f1d2772b3d158c7943f329a567c2ebd9e78fab9dc4608136464b7a7f388dea4c6a566601eaa8051d3f9e0b6b60968016a42ea889473a8025131bde1a10cf433f2c0cbce4582acf417d8b59620ba46a5f99a10abd1e09f5dc7bcf0086888ed94852e56c7fa47c18b81f19fa5c40aef71a92a1331313e83dd6a05241569dcf09716bb88b83b0c8275c035014d268f080f27262373b4b52460a8ccae62fc1aa9728bd10de051e0d279d77826b643cbf4a84fe35532bf3ee319e06c55b9ab2ae9444f74e2835578ec7e1ba0e08a6b228d75463a6e36e7f99ce7893f33bc1701fab8af52a46c1c0720ffba6824cfed4bd1961e4d8c63d049908060dc63086f04828b93e0ff534858d4c46000309ab03a3f00b63cead4488fa5323b80713d8428019012c52b23ad3c7033b95b794f730d5e7a73be45992429d4160a5f16b117c217238404551f95985573a7e43af7a3a0e18a6d7640ef1550d027c328d886d440d8c340522e7293449f3b57d7eed3360ded9acc85e093fcd3b42ec87c94aa1c7736af5a41ba90d9624b827d0e48c0175bd8b40f23175bdbaf3b6181ba2ff5c220ae8f04fe527cbb6eaea4b57b5ae233971b410a09193555403bd6bfccdf15358cd6a017640b051d4e433adddfd9b2895f58a5ac31e4e2936ae6f58dc1e456ea37a1f98aba96073ae24f293e4d3f5c5d09d7ca6213c95f1f7e5078aa68888c86226964a5211477d7f22aac5c0bcd07910b128b6c5235d1b68f4566c87ab091e5da3613866d8a767b80badbcda85ac8cfc172fd7c23ed241d5f9e28a9f22dc99aa16cc88dcb2e720d1d4ac4a21a33921e21cb4eb7c8ae46ffac09ef8afe9ec45f1705b7164c9b215882a7adb0aef08a577393427dd286c29052ea1e03020408f4b405c7bc59818c3fb272a5200dabc6f531dfd6532e851796cb71b877ce24e297f25305c08205440531d67c579593cf35788908103fe220133ad25170d3865d614258856089b9c9df850796f992ead2f9e7e48971b25ac2decb84f55f22fd82af57b8a95372680d263fe37e83066a26ea0acd42d0f360a4becc8c2b20268bcdc556c29425523e8ebabff18a35f24de618578dcc057f17c04f07ffa643dd880dcc379eaee7035fd1ea3f684f3c1ccdac07ccacea83b770fc8eff932d3e3491dfbb086534dc2873f6a2b2f757ed6f60623f457660be29c0f58bbdd0f16332ae24bf34788dd4ff5138c7b50b8bb6859d71453fbdb70a44e0a0e01fc04f7fd4bce6f7872f35988f3785e64bd2eeb1a8c27f305b34a64ac91a4ecc4ed21b88fe72695adb2d85037686b6cd96b3907d00768faf918a75cb25015d27cfaeffa44be2126fe4353f41f3753082f1a69d6cea03b846483238d70321a72a275992341b01daa5c54be58f5dd230ddd55ab386df4c8a61168e60e8a7d1821a46f6c235d30094896930be1fb60bb5b9561e409098035981c4bca21666c6310ef34e5eef921b5548cf42bc49a7f542076c61a26d444db11904f5ba6edc501908fdaafa78282bf5b68ff72cf576c9fc873e001d80695e0f3727c80280eb01f7107ff8e8200a826500713b42007c09b2c2893b36c4d932887e89b002c054d29315845c9b4cc79bba3ec0f5a4b8ac1b2894de8c4376ef03c6c61bbe20beed5e873eed1a4621228dd9f2c0b6102073cd7b7964511bcd8d1f9622ae4e65dfcb6ea2f8338590e8e9dd8ce8b7f60b4b72bf9532f707c9d622df8e72073cea8642f6cfab6da17822c2dae80bfa786ccb2d0641b73bda239e869ca3299c878000133f27dc161387283b5fc2bd9392c64d41ef5493818918b66de7dbe66e574154149293d06a27afed24f4d087ca79414f5aa051f01202324c80430eb349713a2f1e5e3f3e42b1b718bf81bd632f0cbaa8e4e9c497a11898b571bca78d3f241104de6bf212a80cec901716869b2c0119c9437fa7c712d6e2e87d2acaaec3bdacd26a6d5a2560134ba49765a5572630a3c1b5a6af30ff292003358206b17d2b913e154202428922c945ae6ff389ac206aadc51779f79f2b37747ed5a81b573302478ea92dfb6c8a7830a33321cb4fbb3272063444b74716410c6e97341d0f03a36c2aa5b5a2911a68161a1b284076afaeebb08d382ecdea405df57f7356734012494b46d23026f71f13da4d990c7182d1c0e02ca9d3eacd8567e164e7eefb6a16e2a61e2ff336c7362172556ab53fa5fda00ed4be464c37ad1ad6f83e37f1e69a7d13b84491ad5174ca00772922659c10b91380904a830f1f6098757fe4563404f7cbaa57646b2a9e1c3f72175e5442c1b9553a0437d4c58d0a9985a42f6d525d8f2a4805c0c590cfb915ab025a1967ff87d2f769f0bccf53f4a27acb6bac28830011d3a5de140647d2f11d8a3c31163410fa4f78ea8082694f09854d4348ae5bd801dd34b58b484ec720af0492f6c42c64b865a541ae0db48f9b99351c30afed4a5166c2777268a067971548adc843cb6a1acefb518a33c100835665a8508d24408b1055a820b9509a539653798cff505dc32ae26d83220e2a14b466b4c0de9042ca7b9a4ad6e898fb364cac0c89a49af338e89bbaa6fc20891b3d9167f01cfda08307c7880e1297e7676b5c9af0ffbcfcae511098a10c634b2b92c9386fc8ad77df76f835c822ccc029d81d8eb367c010463192014cc77f820755812191cf8b45cbd062ecf439018980f10f46ce3595b929ce0d114b2cdb3d6d42b7c90d08ca172407037837068003d8af6313e7316be2c01d3a05134a0ead81c0b48a179484245cf6d32d36f14524d0947323701275cfe74923fe8904e6503b2bfe69aa90938cf355064a8dd7570cded7f2b0052cba4ea84c0f8d2c5502a305aeef057d4ed09f58dcc566a963d68cb1dfc5879b5d8d1d9cc2c8111a06412a72fa52bdbbda01a21934634590d497591a823042446e86041875b8ad68559a561d34b087e4291c90519a3403f5d08e01192c631bb126f2fc2a58482ee385a74ba9adcc780f72a20d7ffd9ad92aa611f4d9595024b6a586022a2477aeaba698350eb4068da7b462abbc80978c03385c789bb5c8ea0d2cd4d76c799784bdc40c7f395e6ca7964adc70e2cad3422a3e6230d8a9cd6a57c0700c014067aafec5bb5a49042f49ada7905c552b676940f59dec9bb0b56aaa705e83711764c9f77638ad06a5b128b52ee0fd1aa00d7c43fa4939de0504bb4261c1d1d8cca7d7dfeb90171a27bba22cf89a11ce54915f6aad59474c2901757a2ca913df433caab34ec4f99d79eb03f5116590e2609a70d690fc8d119b185845c8e429575013f2d813d68c85bd4d2347edd90c333fdebf1814e7bda62b95ed22f3a428d22a811e912926369722d5a93757ec8b542bb73a7a33f782fa5d7eb280f88cecf471e368be0f4a1f364a85e5aa277bc60c37b882c3cefa5fb331b95ac4bc311e6d8e9d92541b78fb9259d4261ff0c675abfe71aa4ceda71f12c1a72c482e5d15a239cb6b1465b4928222ffa67e527a489aaa012c99444ea0fee2bdfcdbc64d979ff201fc5647d50090073988663c497ed132c312caadf35161dc1bdba0a67c0e462eb1b392f4a0324bfb3957c137d0d7c1f7bd6edc4cc5024bec6f181499e371c0b6919aa33a67c56b6545c9bba57a3f1f0f14ae8412620483e4f1d618cf7b251d9af7d7b096a7be6e708a1f845fdb623ecea6809458139df07de64b39ee9853f8840d8dc22ad587bc606f44b501151d51773a86d5297e21945adef84cf4e965ac341ce3160e1cba7f88fcebb206109d057a396250fcb1ac876ba745cd0edb065d5a10f7a49adbdf942fee7dd446dfdfa77c5692b5df07348eae3b6c90d84d46aa655de4f1d4aea28ea97fdd539602c3e1975975023e3db39f169f00fc5cb06555b8a38631f2b38e417869d2e0319d05f3566fc0e8660c240844aa90b71db0a0e1d07866444a01eb40c080d8ef7f5ed1ea45edc81a6fc7f0d815e329aebe68d09e90fe36bed4972e49bfe121f65ee9dab44eb66b27cbd4350712e070e5f8db699697921de5db06c054faacaf16c957e04d1c6f13b5fe522197080fe6a1cba8720ff2590ac6ca6aea3e3a3f5e5fc1fda9eab33f26c0e0bf654ee278d79fcf483b768e3f3440d414f59096159c5a33ced914d2d25461aa73f65082d0c4665f36f85a428a11188a06f14c481ee1004e8b1901fc5698e672cb72ef61b5722e0030d51aa2fbcd1268ecbf5cab6b07ba627159a23be2c59e1f832cc89ed6ecbed591e5dcfe41abff612ccf8071af5407be508a61012b27df57a6322a06889c0d3ae0bf07453116713d2b00994686d57922c1b3a0d7ef330cc2ab9440e89cea124385950891c8a0fcc83019a5342e22a2ed9ac46baf76385dc51bd1f926c803504901c04a8c6119b0b376d50a7f08848d8b27def97275a52a1cd0caf568153f0b01ac8ddad4773529bd93031da71b778fca14fffca83ec6240dd9b5efbb1b2527b2dd4086e4d9f5b0f1a158038aee5a6d301c0205544db72f10a552f9986e96cb4ad3c680efa660815020b88cb7f399d66eec3b03d30a6f95b32701012434aaf4db64ac342da7be730560616129ea92bb90fbc0ac8edb80d81ab1d61aa2a5909060f26a1830f82ffe077c859b4604c516de4a11e7b063554aa21e317918c92cca10d53ca1976017f6768956c38589b9eb4d89c8f752dd0c0834f816158a60d5b24aee86d7198abc25ae2322b4f52c6c6cfc47d273ac8ff9c16209b84a0aa205fc9f0b21c6e8e95679aab576098729004bedd858c2a7b89e7d5a07fb9c5babbd0a8a03c495f8f7f088556309f3d0bd2a53e2bd400aa047551b48a38c28e50c3fde86696490b6a70c6c681cf5f8b73438b99d38a9a6650a318e5a38a9fac2ce0a01588de954378a3ddb6a43e0ac645cb0448880810840d882fafa01ccaaf43f423c00d358a30e7e0f9fb65d24f88afae05cd6413595d9e6610608f391423606ebe480b24e30978a1527d9bf196e0afc65a5452957e24f1a5bd67adbacc4a23ca234a76a2b1bdbd9036d5eaddfd6aa798dca9d61e3a04dc968b45aad4b4fc14bb31228a5b66f9c85a277405d81cda98eef3a3546613126e4fdf24fc9268b4a5e5f656550172f37ca88033455b6b91432ccb0695eba77080705eb5cdba178621a8e99fba9bf2e495c5afac3ec5f87c2a071642b569acd8282eaba6ffc9686af063496d2f9a449ba488a630ad981857698a89855185574bfddc145fdc83a902cb7804d92f761b4fea4bcf354a5fc2900ee24ad0d18edcc2459b25ee29175c7c3ce83385ff319d335b101a9f0ced15d79499c6804e4626e20d0384de3fb83088e0292fce342e294a4feeff24269514692110a1e76f2419df5d9e5c57ff41f0ab12933b8deeb860962d190b3f8d0156dad12764989d3a354b9327cc6256b44e5abcc8a936f8bd50fae072b7540d0ea068e4ac43409221544e760695cbd41d0a133bc3a0080de36254542b02c0254d8cb2ee5a2a255913c046e9f10883d1568263ed2db2108001d87bd007c7ddd861853912bd57dd043a29409d25d9420dfe48e5f4a76b73ac145195dad17a80eea86e8f0173a41eb41a8a78b22e6870d041c1e8d4b3278d5404d00afb35213790e1b1f01297ccebcd20653219c42830e2ad8f14af305d21d987af8db52cea96b65b3daed93ff1ced3171db5b05201b5d2684b22d4e8008c1077ec25e1b3476f27407fe4197011ca73e7cc01db54866d2cc89c4c1c6db5deba9009aa9ab9275140e9c57b87b11e75ec493f0577d28a15d752b76146a76f85976dab51689e9b3e97fb2f32cf7f27d6653a08c480388a092d635bb5a656bdbc64522c11bbe88b38ff971bec49fd601134358c0f6d68f18974e132cb1f7334896ed357dd48448084c57af40fbffb82cf8035b166c6d6ce672a0140ca6b256c6670785c9a7f50aa23783ee5deb9363ac23a736de5c9af07a9a0dccdc2a188a6af351cab4adbd7baf16019e516c576ce9870ed2be840ff4467f1b7c4be094290dfb425be3c0ef41bd02708a2fe5db4a2ba88a8a78a4d84e412625e0395a676851d409364b65d17dc5de922af68ec17828920b241dddadb823a3f33b467efb0a670e1c3d4340b494d648e21e1920fa5a15ca5ad4e45401b2afc78e683373fe81eb5fd9d8c34475b3953e643b8cf46e37c55f7b1f9c8a0f0caa95085fc0ace87de87f73bf9985e7ca75abd0a308891a3e1c31af9f1af1f304a4991709c983d9394cdbea5ff6a1e7f389dc09183473ee1f81dea46253f5e3d5d6ff1f2124eb07444ad71fd0a4cab526a2609f2b5f9677f0cad0e87dac8b3c22350a1086b7c0e60ab977046e14ce476b0b27794aa1ae2de0aebba134b52d749a971d259d048012869849635746b5d344665a7b7d18ee4877dda74ea2400310a19fd91a231d0d078d724b4860198f8e92575ab8eff8b1304e564225cbc314f20e69de0d5986d604d90b8919e789c756fd88bd006a8ed9e9089d98d5528784fe24fb0107ba57f7588609b86fb229d32fb3afa8fd660afb55b8111319623adaa9e0e1cfa2f8ecd34de6da57b3021fefb570cccf66c86dcf677b32fb5d33bab9c3f9ecbdd38607244325a9e1cf1b1515cfec0b532e1895954536967169d243133f4e2aaa3172f344fbc07f41a24b1e9ec20b3f3fe51376aeabf1c05316553680bfb59e0650f76bdbd5825a3c3854419d849dfddafa7db03e4a5c1ff46646121bc2b7c457011d98b334acbbc6e40e2d5ab7242f83d929c5dabe1ec9a2f1e234d88c5527530395377d73e3f01738f1d0f5e468122c06fa691f23e639721203b8a3c263b2a7ff5a64fb973d8001c57de736f788a5049a979350d59a25e0fc9d0ab5c60b86c58fe325fb73d095a8e9b9a1c1dda4e688960b6ce76cf0bd8a71626f420ce2df32fa94a7b3b06683e81eac58f4c9d9407f2f21f1ec3f728d148b04743783debf08dd30e917c470e9aabc8a2c831bc7fca0305a15bfa6ccb64f5580a586184a4287fef1df252abe8f34aefe1af1a91564100768cb725033097a3d86890f6cf6812f4ac1d630cd452e81f9f8124e84a2913804d6449099776658c99822d31d0893be7291a494d93516c6c7eb1bfe2cd6b328e6fe600290e82c65838314aa1da67375eba8817123a7ae22214f06cdc7f971c292dc589ac86000f006afe717dcaf22c6854c67e1eb9228a1243d5736424c610638c70435968a82e24b342c1d72a233888fb899e8402e4bd4d73280e097d1e86c38db23e5847acbc2cf5817317ba4b5970d996dfb5084ccc80abb3cb4607891e24ead6d9085a73aac043f0f1fec5804d1894da6b0c852bc3a3ea7bcb71d62ae35dc34ac5a8c8a19ab6256e70dc1738502191c13152d7be44f09b2f5cef582c059267fab3b57faad75e1faff2aeef8fa4e1835343abdf7393b6e6c3a56b650e2be9a25046fdc72a311e32bc135a9585904ae8a812f42399e0454af1cf787c67aa3039df49d1ce5df328240e0a07616fe793075b0b19fcd897a31650465c9ea00292615fbbed245fc813eac17e2d17e21919b052c658b54d98eaa7844c44569d16cb1efe85c066097e0837607604927f86e02cecae50a073195a1c36b1b8af1d1ae46ef1b842c97c0ac68ab018ede528ef483af937ded88bd5a99f9e1108b1f83c34503fd701e124791972b27359b2c9369549ad8030673e41db480a258b4f5aba9bcbe03eebe49899eae214567b403551c7da0a97010e792d468639b933ef147cad907cc8e2c1c44820224e0c90e39574b806fe800627f06a2808261ce7fa617b4284efa3fed86016c3f99f47854eb40274f8e3029219d808f24a124077c63506752acd7f6912a5e9cf01cfb66319d3dd53c6ebc2bbe7f5a2d4f1c46c0cf1bd9c7a449559fb51c9fe2544fa7144e24937dc0512e23e4a1afc271533779ffde9f33449014e1c4221140091d419470319e317a757dd8184412e9ef25f19ce6b42c1d29f74f00fc92ddc3def651fd3ded3f22973af58b14d6faaeeacdb58c414d4ff79bb2ffece335265004e80d32442fc6b1f4ec22a11af44ebda13b89a0ed90c54b82653866afe3e3a0e73ff291c8b619bba61b20a529d4a5aabdabfc944499b04030b36ccdde3e68403ad20c48623b5f8f501f67dc1dd07e5ca3a4a7765564a8e548bcfe3ffa44362c783e6495997a9dc4ca6c424c3f636231a07b77c3c4858c63884bf4c80dcfa7c9754e9505925bc9ae5959dd969f2ab3a8cad830077cea057fd5f7deade00da1d4cc7da0a3aafc0ea73e53091f3b52ded9ead984530cdd36e5881421889a1f8dec7101ade58d38db713ca41f06295eb198feb2f0a29edfcd0a6f8d90935b10ddc495b7834e34c8fc20aad6d03d4fca07941b75f011690bbd07ad2b72df8ef7d2c0857a4861025a291deb8b06dc04121bcfacf40d53ecf954815dd2081082b530ec4d5702ba4a421c85d49e0c3997e07ef3ebbd01295a22b6faa4256723bbb547026b47fb20858306e9ab5b817b98548d8706150a9aedd0b091a8fb5a08a23724dae1f19f6b0563afbf957c05eb6f8af099509b97915fc9207099f479d1da169f705f12475cbc74c8dde15897edfcea8c93923f2f5780589057d8941312127c14bfb929b25ff3014165858e3820d1c1a7683068002c232999eaab68cc44c48c0e2167c0da9a5c584ced6e60914ee7c92b1a3a7bf691cedc8667694416f06342f0e363791ee6d2c9e679c2f4f2b116cac0e72cfb76ff2e1b5bf8b460c9936083e3c6da8544d77b7db3150ee630bfc949f9c440883363ef64e002a01b42f5a687fc6c891ebe23e4778f42da1b12ed7fcc6d0f9009b859f56c2ad2adad0d4a3f9cd912bbe7e641f39f16a1992e2db01c3764e71d6c00d6b6a2af17840d8618cbc5c43290f8059f4bc61ac2a262dd7e242658cbe8d359755d724555d88a68ddd4604d474cfc4933875760419e6fd0b204401cfbbb526b9008727ed857c38ca7f8c53b84f39b7a09f903c51cafc56fce5c20d450e7881d08dd2bca9b97b7b1055665e00e4a2af40c47fbf119320ad633eba9607732f55a426cb53015087b64c176ab4dc939a2c393711eb7acde9415ee6d993ff005cbc857d82c0a86a73256388803034fda4cda008ec080409b969f39acc9df9e86322c2587a1794b40efcb806054032a3d2383082db21a9c244740972562a4eda95c961f8e3017c121b580b9395d6e524f9487d8eb4889b944ecc91c46214aeae5293ccca3018f527ac4239481f8fb170154faea4a30f82a1e60098cca61da6e4705a1469563ac0ebcec221149ec3dcdb0436186b4ceb992c2b923788613f851a5ce82ad4bd8adeeaabb269a57888ede02f75cfb212a79893d90461919db217e348ce268f79c33df9bbe91d23e73d6cdf54d7b6ed950edd46ce1eadbef53d0b07fe282c99060d80fba09050512c5fb53ebb036e679fae18dc4fce2d27bb0b4852f2947ec094790dadea92ba68a0cd61b9aaa2fce22681b5b80cc4b6fb1461f4363a80a1fb7230ca62ffac67188df4e929c37e2349d0c5caedbc3698919bc3e59dfd1be6f9dff24f906cff24d182fe9ed72a3e832123bcb9e0027fc325cf16bf138041c58aada3341079d2f1ae08f73d965fba648c1e50e6a0d2f71118723546f741c25e1b856e5a0b819b165ab6969a648652ea7d72acf9e0b6e3642560a4ed7a2be6bcfe7483b08dfc3441ceda168f12e1947940aa82e871e041c919f3e6d23e70d1873ba5e2f7949a052f0a64b18e841761239dde672bbc44286e4fb620f49308c45bbd812fd1195f32b8744934e7d6f0294cdb005a8c1efe1c2201f511160df9acfa05530c3263bf804454cdcf13338fffd5ce5124ba1d6e3ffad0b9425b20162d1ac16d8572a1b48ae0bcf45a272a4e9f59b302a2cd9652a046862713d5452a0d1bfa3558e860a38deb51dacfac3d39cbd56798f41107b13d65189011416455fe97d25d0508c4001ce15b085d9d3f1f94d18c815b6a0352022ab233c8a86e3978aea8ee8335272ddeeea5eaef974bcbedeb54cbdb0b2802d043500429ee8a0578ed4e924015d2206cb354eb4a787e4f629d37a64538c88773ef404b401a7172618b0d6fb307b1838f110ac0ff6ef91ca7cbf1dee2dbde356cd8094b151cbeecfcf8293ee0531c0e31117348657d63ed9647a1a8025a08704f8a6d31eefaf871310349605ac77236d1f2dec5b68f6def558771a525f6e5b2513268cb07f706955e90b0bcd4ae31b2449b3cba411ad4cdfe04ce86a9eb4a03f6a81153d393101de381f61a30015f8bf8bdff6bf3cf56aacb141d4c899d4f0e58c2b193e6c3b07bdb4329d019890358ca4dd88984a2325a20856ad24de5b05e8d12a14a084bc865e52dc4251001bc94218668c4140df7090d2a66e58a4d9ed1c2b79db070ab32c04e86ddb449ea003696514e56b3c29b5ef2f0e783647ccfae5d3ceec18cb44b6a65d3ecfed3820ccce13a50efc8c24a19008c55e8ce2fd2fd433598cd1027cf41846d85981e7a292796c1dc0a90e9ee00d4f4c06143173fe94dbcb48bfb94837439bca8ff51217c9c04383ccc5ba79f4ae888abb6ba2a180280608162567180498510f7822261be7476150324c034224874bb017b1c703b6058b171a6089b732c69248161754b4a2c6d4f149045f542885869513e5044eb654635a4a7c6e4d34fd849912c20ce660167386336faba12230364ddf4c42236876a6435df3004794301210285b1c6f42c8d8fad642fad173abca6862536b8961294947dd69eb5d3a45968cf4294e7bb188defddf162d11305246a9a5ae92f1430a36d94f0f4746774335685512f42391edec633b49725053c6292f5905869539f7ee8ec4879562ef8d7535b92dbe1572d047e88ea3c24e93b941ebe4b67decbf1b001e074a954f17da0f76ade028610fee6a4150c45a43c36bea3720b69091494f643fa2936e8d403aa634dfcd708d242c4e08cf8b4927b67ebd0477301ce1846748a44866a87accbd664e378018ec226261151d1fe7f05bb5e65bc3231c4d7dc3c66b99bf142de7249027a56e74f278b5309475dc9b93afce576f9a5e0e54c789c78bc9dbf5849f79993ba31f4c8b33c81658b600cdfdc93339ddb0c17da6473648a04387b4ca47bc28f3a8927e17f73668bbc1a9969d81ab38a44d3e38e0c2059dd03040c9dcf40dfc6ba05b458879b780f77562543df1b5d03b9ee79d56f3cbd3e00b0d5b042cebe96d9da6b8e91a3ba44e7eecbf4ecab443a6cbda4539787154651b64b8f6fa7500bf01727d90cf9914b11ade7fce00faf9b1b0c51d5893c5b51971b96bb4fe2ee5af64af474e03668f52b60f2159d62c8b69bfe257961540be877841013ae355128343674f0c6ef5b8ec313dd6e9a5e784829f9d739bf4584a0ed26323c368f5ce62100f2f80d6982048f1b2c9b05d34a1cd04daa8284a2e54c0c0ae0f4d13f8f5885ff95306678b7381339e07c3b2a3754344854e1a4ad10c681436aae6f93378ad1364c31551631d6a05f80934834e42529d26e9a4d1b5081a14fa86e4b44c81f4cc8e501c613b3448474417013a77890447e50004ab21aeb00f08afdca560836180294f5f4e10e4c7c76afc10d232245f13f27324bd8355112f153032c91505a04cbbf0e25093a22e879459a7be0d91928fd5f30784b63e017d9f223be508efcf9ad495c04b2b81b7ce557d99f0200b5d2ed1d2df49d6f5b40587a925450cdfad83a994c3a53407a1b39ce78b7cf3e9565749efe729f7d122106d9d8b122eb40349d69a97aa4c1e9161adc06c55b1c386d2e32eaa1231d012d72717904243a4494aa4c7923dd90ae54183427d0c8336935b4f0d2ae34500b5706e780f7a6cfc6b2cff8b78019d3e234f07fd2853c32287010101834aa8eb337b793aae36491d7983c004e14d3476d1a815fc57d2125c34dc906381ee46a5f606ebf6669b7b53d63f280c6f389c766ba975120449f45b91c96f521f8a33cd1a0a7083bc1425c916e3f6e38d0338900312d8647cf850fb171989278955687848e26b3644c8e01d7f3ce2b57b1054d73d857f8651c464a80be4ac9e2c64fe801585819999ce30470004ae477b3c5b497565494e6d1b9a331f1dab912d3440be70b842ef748d82da82436368975900bda4f375dc260513faeec7e522499aad2e5a5468936d366b69cb2061b89799a1a0ecd854115db110b0553fcb7223907deadfbe3fa495536ff9b409b7e727988407dc5fbc48f381b23c61dd3a9d3f94008e314d383f2a65a5f141c4d4327c5b9abfaa2d7e333706865a32114ba18902b337c2d8e2ef9155722095ab97568cd2eacc6f20ea3df0b0f83595e48f8621f4157d85a20ba5de6925e0fa7af8dec1465d2d14e375b171e8dfd560cb2851191120d8945db724678af72240940190c456bbe19b27357a53aaece03a04c28bb2a2c33c855dbd6898b4a98e30cb1667af8e97eed227897eccfb24a4daaf0aba555a1276a9f3041eee874670217af516e46a8c65698cb36c673127991d6ed0fc10b5a84ad013118a74ec2e6f3941fdb9edfd34993347dcbf3aa56a86641375085574d4f5f40b080e57cd497e7096e303ce91f27b7e1b3afe6a8ab71cf383851aa5173bc1f6c9b7c5b3f0b1e2c44720fd27178101b0337b96884cfdae8fe2118c3b788990f5176065bad46fb4396c9e5073f21d084a0d12c540775a6ae2ced9454a1f756de1ed4abc73bae35edf147db9465f41ca2693811b1e1b290d209b2d61d32ea9c308aedd2129e892c6c320a93c0f7ea5fbd69505f6590e1911ea333ab01a447a1e2ddd8bbc9532fca7c4f54d3155ddf5a30c58ce0b1a2eb1c9ff921699e0bafa684c9c4577382f000f24be24f4ba42264da384c4eeadc6418d8762db1eec4658f34157ea68b021c58dd5e6441e47d1dff20f3ce8f530f607adc03830dc02213ae8466fcd6e86a3c25fddaeab4788ee81c4f1804877a5213c20bab0b1aa5ff07f91cacb2d3782587dcda4660bbcca74b2fce783d936351a744a42595db7c3481c70140f3df58d8730730fd24b4188c49c6add4a0ca8f938df6c373034d3918f910494ad4d2fb6f1f91fbe7afa42125557ff69943e8da58928d23286a655e804cd2f3654f72d99d72ddc9c24741072c13194d205120b866f11f43688cfbd9416deed365a5459790ea264e565ccfccd2dac91a74e07ca035dd63b50f3d30139fc4998315db31e101b99bf15304e9cedbfbda7bdd4c332eeef086d049f9d3a7db1b9ce10d36f062d4f219cf24596dc9756d0ed53548e54342444e2068a6cf434e72e729a3349c914059b3e1e5940c032c19739109f7490e881c8c172742f6c29204574d5377afa8ab78a2e0b7b2efd5e238e787ea083bbec3ee422ad2329a144bafa1c4db08c58d50db784a60d909fad01c44d5720c643fb9e5117e8dbfdaece606a064d6e48151aaa46ecd9d9424ca30d1d8db3b31fe14933b561b023c4d260813b7908d84448d3c643f5015ef13a3f5725c02c1617c02d296c8f98de03e46e8642ca949e12ccf5628446c49e334011340c0a967312f5921738bfe8a97db9448e1eef622e61a8d9c189ba2cb73dd0a8d4fd903880c31b0b11e113eefb52099f3bcf6580e43e59b576b0391212615fc2f2bf5e0ef87809937366e8f18e34dd723102e849056a34eb853d81826c95adc36a97ee4fde741d92afd6dce02784531a078c74e31397626b327f508628e96c50241acce48aa3ecc7e889be6606816935b8790d39506ee063fbecae3426e0d6d7e8109fb5bc4d696ae40233df243dd5143308292235ab44365939c3f3198827cec29be66586f2fdf7933ba3f8e4bed63e1426cf9f3d6a3b6006b502155462f49c3af4fb82a77a8df6a3fcee8553400697f024f4e509bf21a66270f5c3b88b12b2a61c875f5084e7ed8dcd152b4523f07887be1f5d8930bd1e16d2338911f3750f9ce27b981990f02dead8817e0f01103629742c5caffdda5b1358ea4b7190f9339eb20a7a1809e6b53d584b3608b7fce16922e612db8686720a32892561b0c0557cf34a968486805c390adcd7c21606977fa707e6ca9ed679e960f2861c4d3ac3773a7fad103fab749bb54143951f55c4ffad37f7739b5f46a3b15dac2b8f9d562f436d5e8e938ca5766241b11d8c56cdbcb04718ed77535e70081b4e4cccb8f69df171c1b662f9b904561005bbd38a00077b98a1c4f92fca857db690588273131580df439502b5b4c97621c5fc67564ed83222c2ed1240a3d478453d44588aa5ee0411fd31c03dbb82079cbee6d44514086af17364a19e2a5f32e2a890fa93aaa9ae375b95970a44f5b74925afced4b322faeabd92354fabfde7c1667882e6acf6f3742de8b0aeaa5774b19be99dadd5ae80a05009ae12a10e05bf9910f2bbe69b1a72660805b6fb67be952544eec6a29b2365c298f8f16f78d75093e6ad5fe11a314bf0e5235ce93eb10245a301de58856bc9b1c4f0d5dd4068036f4daf3dc480922d2a56af5708dd2b3402d0a10c8ec40402aca421e15837f23bdbc0259f3d0e73916391b94a1b83839de1c7e546725bc1ea4bd764d00a4f69a17bd8a5b352da96d94185ab36a6fc5764b5892bf05d0aa7a4d03f52c85090506aa2630429808edecb46af7d16b697647959e3038bd7f83853291a7906744ef2ba4dd0bdbc75f3adfbc808defb69c309809bf4975560ed294aded7464aa87661d1b400e5c91fa7a3850616d999e5f850298fe77ea86f313be60e21328d7fea55006927c37501272516c7607a3f03e1d50c8e29bb853fbec9eb0f512bc36a3fc08f0317505bdac15d890e84df1229ce8a2a84363f18671e2637bdc028e0f56e5224e57d22d5e1d267093040fb6c901fa67b4bc34f8cd9c0eeb3c8a7f8440c0051d2ed83ee86185ac1cc83bb889d0ef93492f9f1a5350db5daa7cc060ca4b30ed24946451fbb8e7bb855f7e4b9ccd601b54cdda7eaa70f822c085f9fc4d33008f60d2a07030ba2fbca5bd492d856d779d2ec66507d4507f940aa77c1b6a9061393bc0da9e34183e4dc50daf39bddc1906c6fda77193ead2aaa327b12a68b9aca31a7e9b70149ae247cd2a9439b5bb0942360edce080c05985e975b4e984be51053ef057339090c98bf64e18b5cbdc4eb81b0eafad265e555a889897808ce86587c8a72899b0d0d808ab0de29a9c22ab85354e73c41427064619eb4d90318bcd2b56d8c15d1ab8bcc71f789afbabbfcecab33eda2bfa700903ccbf314cc3bdb6a649291da01d982bdc81d35929508ef000f77121166100ffb941d45906247c098655e42db2e7fab9d80fc02c37a455aa82504ee5897c9fa4f9778680580543fa0f1b915b2d8126923b3326b7cc4fa7fd9e20e045c3ede01a8bfef67d67eaf88de6d69f23bf5ce4dd74fc108d13aae1897bab584198d02ec822143acdff531d1b597002251899e4fa4ebe9b62b2d8fc468d3d118d54ab40bbb8bf47081c85da71489a628eeca70d46168777babe0340d408aac45951f3f814678e29947b5033897356a2b8af6f07bffd63b92fec8e1ac24f5e07c19ca1fd8d2fe5e8234606eb5971869880fc269b501ced7eccc62203318df921650666f73631fd1ef80d1e3882b417b371860eb021c74fd01aaa9754b06c87f0c7cabb30e3f8540db6e877d3f4d809f41c49a11b8cdb8b54112a9a685b47d2ee6ade7a3d42efe973917174a54a71d60010633e45c3fc23749a001c50efd8e2aa6974ee69901421a1287427b20518e0a091622f50b1cc29cea7dc93301fd4d65e9d7061caf37718081000042c02b19a56ccbd8f305d4916a985c7964e49f2caf680d2966a4ebe094b04b52c0f457e16a5f7a67164a8c2d561be91d7a3b36251a519994dc2505ff601dfa3e12223cf3a0378f133fdc69dcab752f27106f9f688e475b4ee01de8c661839ef9b7cc10032e2426f31e70490a8659397fa7ec5d909106bd153fc6d773602a4ef9f9bc99472d8cd3b7181bdb2d41eb6f21e543790c1708599082e0c8412f26692ad76dd81f0c7bdea184fef1e3aa2bf0ff5b6c82793ae7143c8433e0f2ba3ae2d12c41b30a68737aa82e1fdc9c3798f2cd4dddc88bf7ec7a2acb921fa6ff0993ea99c51a4f29bc3fc07bf3b7cc72213a783a5704cd4a85bfcf7a9ad884ccc04983fd6e32532aac4c5200d6468fdac754eea595eb0dd22048e5caca86cbd89c2bac73c0dfb97af8609e9f47ab85363e0d608a3348159dd116faf283efa09eaa0c51cf64d6c4392a4717c3a93d260166da45667226ed14ab39dbcb5cd755ac0e9379f1d31bb19cbe6366186e9c18643628bbf18526416f02e0df09c9c3c8a100d63c15f2a35741b27c674f62f2032dec7b0191e270525fbb67903b50631f6a79c9d4173eb9249052f8a8babbada6b279ac3c75af10f8365b77d10cd294ebef3ca440b03536a61f39c635073201df4f47f0685d9bc59291612056c6c020c960f06d55ed3a7817f4779d07993754e6c15dca7917873f6a4d7411c0a0a53df512a6176bba203d97aba7998d28155098afd5b89467e7cff48616040e9506e73a7329c7d2aa0e52581f3aed2cd2f7f074a9f83f4413b3c61245fc9adb8ee9bb8ce8984407f0968f71a940264a89526610a77e6c482f32e08c20b3bb005c4ad984a163c7e93c2ce8cc1c9b5c4a6371fe9929ce5dc44289049420cb41698b2b130526e3ad10620d4676dff25c2330af58b61a761eb54061e33c57fda5eb7c269cca98f759f5fc6137f27e96784b75c19bbca41817ed27052cb9a4cf4618191d624b23923e5d4ba47cfebde88b9393dab59497df61f37c1ea4ab1494e3bdd02a80a4e44bb64c6825bab4017c3b48b71e8bc3c30168131fdebd155f9ea91fb71b9a6e099567ef3f8c57eb044b63420be6cb57f6d76671de0cc025107a5e5dce2102437a2b73e4c00bfa9457887f65767b2c782915900c6a75866bb62e975d1f05d4686d13dc7caa4014c60d1d3382ebbc19354a8531b20f4795541c18684501346ef2f6b7c4498a574b00a32737487dbf4ee52cc1544266d6571c3f3c982dca6e470ed63586c82adc7c827bf324def039ead8905190ec1a0b74a96e27ebafda75527864ca58ab1610248fb77bb83f8e43a5106e91ce9e3e6a15f552f82af31b7bddb44f5170f1adab163b6f9b039472b53b636ca03035b5ede36f868cef2b0da8d0c4097ed053c777693cb6341821261789ecd6f7fa64101785e51650cd452bd38f2132eefb420e6cee9490134036ba300f3b1add0309a1a7ee17d15791b94805c93fc4b44ad411353c81e7a7436292843d9919cd099ae731d733b34e7bc2ef7eb91c1510b6edcd6d00b382a2438fb764e6db069c41f6298ef5c6911b8a2b3e4cedcfafed3b047878659dd4ffc9a302003c6147e88d64eb625d4e7d7f2b0fc60d5d66040cd9452889aa84557ece261f3c32fd6c0d356e8a16933057cc2eaeb555cfea8dc216b68f908d144f1d391d5ca986efe2367654f98f01d57dd068476962893f253c6e973a51c88313e71df9758078b84dfa43d606fc65f679595807f6fdbee0680114240f39184d73fc397b75c7156f9ae1dada949d85accd62367c9d57010d5c6ec4befc434343a434d6d96621cfa0baf9cca492c162119491fb92df7fb4a00ded1ad705db48a9e5edf477018c08457ace2bcad990612fa5c84119822287928929021c0bc08e3e92d5815b1e11128e24e083e1c710274b8ec9cf18a4c0689b7c4c5ac0021819dfca523d00888f8f4bd596217a257138eb607e93ab28c9316724400e761240c8c2f6696b181afe11592272a03005cfb87b56e85895daac0a1f6305d38ee632ca6456cb1f4bd0e38c534760508b42c9d27b102682562a81596f4bc341ad908784db2af391c26ce449ba9a375586b594503f760d283cb20593102ed980e0bcd8cf1c35ebed375982ccb310b6c57140b6e7925d009b25b1f275e553ab1f854fd96a37ed67ab8285a79de1791a9ef5910370598ca5429f702a89b948e47f3692ae03611a8c9df3efbc204d40ab78db94c445d6eaf02378cea3654bab7a4e76d5a1f93eff7355bef8733740970b5c48516aa6b62ca177cd47b5f8e221c4fdd28373b6fc4ec53532811d29814df600434c5a2d07539add105ddb03b4f6ac6d9c7be770d5835fe93d30596951e1f5f7fb9cb7767c188c6c8486904adc4605006c92f3753c9dedbdc8b0fe80b62d0bd5db06d54d6d28f5e26b6819149cae8dca0539a1e5f3d116771cf792f80a1abf7b7fb4e6d0a7dfdaa94a03571482ec0941f0d5148377bf12aa849bb35a4ca8f0920665f7162e1691243702b218224af21fa5115b556e05370aa63addb0122fea20ae30b63067d2ac55f04264f89bce60f9b855c6ed378d7d71f602a7335763e78ef57e0390e2750005b8b8fc15c0dc7ddd06dedc9704a1a9f0fcad1535394a619aef55df9fb38eecc14d944ba2485ed145f5845b08257532e83850700a3ab856879eca459543808bce59a2cc5d9f544ba212ae421c7d57d92b24222688a592de217161ce6fc59ce2fd6fdfa0fa41ea38ff2773d42e874f117808a33fe38f91dd5e98be78c37f08a6fb7da44cdd1f78db62c620aaa3e87847b1786ba1b97d07cd06b7510e83f0a62a9d440b4b6e8d4da548e038a87ef92c2d5bc4552c4586a9ff813c06eaca3b01c16b65b12892fbdd6db638110c7a7c56c10880b22fc7b5fa0507c006fbf243535f8224e25963a14412ef65aa5d7962ba994a228714d37496eec68fd1f0b2ede6d47f22fe394ca04d11fae70c41ae87abec319cd93222f9e52e92c764a5a2539ad81f633c795af71101d54de6e23b8d9c4d748d5a69cdc20e153bf07b11e35cea8ed1244e382ed25246e4d0c9108f5c930c3fc1d6f8ac0205663a1107a62d2c3461e698c24fcd12fc089289b4b74089f47ad06cd9844ac75865572c8950bd4a437b23c12d866fc537316646c07397611ca1b59ae47838dbb3e8ba848f2575f449aa90289451fcd145d7d368565bfc3cb2a8a30f78d0223c9e2d157b62b546070197f00143c8c0dc04814065cb1ce1c5f0744780416c8eb7bf51023470dfd080b0dd45b7c01d0592e4690f8ba4a052562bfc7948e8f95b61ba731a36b53e881174e607026a42972ad29e46fa85a17671db363eaab05ee3a9f4142f2773309a7f3f87e94caaa6962bef4584639463cbe3daae03dd6eb32cb718e4298973e7c69c523ebfa9112ec6586a478666c995d90d91b1e8c22884d8c5b68be5c9f859dc926d41e4c0198927552b8972eca437a50d46246870326c51b610935ce97a985faac86c5b9779d4bbc99dc7d97f0e11b5471a66e513fca0cd1e76daf3850c09ae6d8ff8ec7866424a3d4e83e69ff37c5635e3dea7b456df9450846945cf1a8116b4932111057f64fb000bc9704f240abe0316aaeed6921bd67fc5179483b810645a24b3e66556664aea7bb1600750637c908b45428f3c2fd2e09757ab2d45b19e28c0d5afc63bd820ed28758a0030f950e783c1c30cf5f5ee68197d071f11f7dac155d4e8bbfb00b6510b7ee0f54d421757a0a8fec5ecaccc4d0576df2195197ab03c0e6e3391d03c1c61ef598cd2e7b3f7097c18e83190227f4930511e4385067471cbef80d6284c8ed411e55be5d934fc6847c8638d88885d9a348e0eb0cc14ef7c40c36812f8f4e86d6d5712791d14ce61a89fa48db48b4cd7bc92d71f24211aa1c6abbca8f2003d753bde1dcb1c066f737a07780ddf9503f182e3ddc478cc0f6acb0e040cdb8d00059044e80a83ff4d7a41dedd29cdca54b5558c67b80bdc1d6da67b7ee907994fa5340a3b2ab8a3446eb5f5137995aa35cd733ad6ba20fe273ef8a6c38d08ae92f857fcc0c518b563ae8045dee7a3f8efd08a398747f0d5df86bc3672f2b4a7f10ff3a0302249183d7f964c9285c34c000719a2b1d605c80dddd54d0bb1a59f769beb4f92aa7e4b04354be3b33d0e25fb82c16fa6f6b33018eb041b71604d735b1001a7d006b8ddabd801401b77e3544d357fac3e56d3cd700957308aba5c7acc8c90d89ad904e91c6aa4360fd37ca1cdbe263e582b553fb6005115b60980f64b43ff27fa8bdd82a5ec621b098d051fcb7cd5fc3d0ca0d8421f29b01e5aaf97bd75c569c9233caf362a4fce14224f2267d7b002fb95a3554ae91e2aafbbf1afd787564789c4d23b797b15de96687a85d274bde6d1c31f0fbb480f84624142d52d1cf07497a55fe9b6d1539a3e37e599428de6b58965caffdb9a440b2fa23adfcca26a1c14ffe1cc71dd02a2e006f997c81195214a0dc934b6cd01b6c40314e95b84df75eb392a2cbec1fb45ab8467e93daa1a61b29cf31e6e8bf075e362809a7d58554f5d6a77a1e5bdfe6d7674d784f6ccd79d94b821791c34646453089057a2ed9949f1f73b812919abad431d515fd73c81562de26b420ca769f3f382fabaa981336431e68cb7e12b222815b1ec2862a26ff9453b4991e8ebbb13be1ef00fc3db0910cbe91484bc8660b8c004b09558eeebfe9bf346d3c436d42689f8130b74a0551b561c83af991593b96ae4d64ea6f140d69d2d9c690f3dd2f88b00f0d0687b26274c9461588cb711a143426301b0218eebbff0c79097c19ef66d1ce17b8827048536d157b061fac4619df23c89d4400858e120ee636df8a731652206ded5f9c73eb7df16b7227ade4986b69f18cbadb475bd75300df0ed6ff1e020fe0c5565e9e6ed1a8478dca97d75774bba1fef3131034e76205b13a05c4f63cd7f650422e23d5de66f7dd3ff5d40f45c3c20debb6ed86e99c78fa08d82b90ad6594b30adb29799cc578fe1a65e9eb2d024c70741e4ae64a967393e58c2ac9293b51a59f00cacb3fef355885c4079d390e10129b14ba8a52d843556a92491fefc9b24d5cf9ad8d6d92a24f64763e884c3b2412e384d66778a67d8db87d2c279dca58963ea0f585a35f64fa43e80268ff743583f872d608ae93d0b095a5209a69a4a898f36d9a403b0bee9f304ab4b52ec1ece8014c2ea0bac1f91abf62b79aea7d66f4a963ada601cff8309d6f888dcd2e71214ea0c162ed583c2bbd7c5807fdb450bb339ce4add147dd0027e9071a7ff1901eefea221e418773bdd0700a400aedb46371bb65f488280542d62b6e7b2008e6bb6d1a70371f5363ed43fcab43f1addfc920c68d7e9e62e65157d32464a217a7f0609c747dda21f0aa7bbb36f52cf5b699d329d4dd9bcfb92ac526db93d3da9f4e9387b88e10b9943f4c54636efc6aefe2194d932dd8eebcefd4e4c8170dace3a6e37de3251356a615765a9fce92a5b08bc0978b88882e9c5cacc63c7c900d85004094c04dbffe56e1fee5af85cb6a1452e606c0e4131df0f561496b0bf445bfe85e3fe937eb5554b76a91505021a7086304232d98679a6a5810eb59275c9002c454b7338735aa5983d8d42c0044420e292e331d408c1990b774e59c9195a00cfedcc9277d21393d7c5e7c256f64e8fcb00f4d955ed47bd933b9ab04eda1fdad5c45da018417bf215807ea8f66475dffe21f483219afb36bf5632867fb8b49601087487314e3426b1b7eda0a96cdb6d990a59a256bd770673f58140dec8e1e2c943ffa3e588221f7648548b8a37b7d9c38996f0a966f5d162a0af4efec559743b0c5cb1ce7206f48870b469a1c87032cb6e41c16e2449337448d0ed8b4d4f6a0e2dd2ab1405e595857a0807c645c3311ce9fd2acffc0adff7414a1779420d992253cfaf2f6dc0ebaab04995f32bdaf36642b862c4442157394240bfb2ce27a0fa6412874496cf4a476d0284d8c4b354dda80e058668fb5c6b403797e12eb8a0bc30f1e7534024df480b0c7fc7114b58e0594a8f1fbc8b79d60ff5d289d88d561a75cf1d4ba03bf7f76795437e6214e367e782b4e32e7219a9ce9c4d40852aa0f9202a967a178ea9c90e920de8833faa4f30bd78ff9f45cd89d6439003d08dcad3199276552e68c6595e6d6fd3b5076ddcffc3ce5a8bb1a81c3650e19f9216e0669ec152aedf32935411040422b78087216becffe59559473f5a6f0b28190566b5de45040894c16d42814f56d6390c351b2d5aedadfd5799b1165e98542dd78f4237ec23ae04b0aff931409c7899c594754985d1286c201f0d24b20369999867bd9c6b3eb0e3f043b937e7078c90816dd08d43b06a613aa563c84cb434de08d375adfe29aacb15ce9cb7b7e61cde0a9753dd7890d4ea344fbe2799d93c7911f26a18f0833d069e7cb6083390915c338220cf7e4382f8f0a698059515a0ea974e5e6c5c5b137380a5260b22724d225ec8b182294f51ef3f3565c0b004ccf9a3aa038ce90243bc37e791b7c26a9d27e59731711cc7d61a998af40bfd716821f5cb99d58589e416ee83a40898be007147ba2fd5753775722d7cea5c52b1c6fbb3f841cfc18571717c82d08d1bdffd25339ab01b74482ff5d3b404ba71d7ed81c635764ed018dcef6601de71fa57c2dce960dca26fadb2b6058c8cb606480504417e85d73428ae0a363630099923f7851c9445b6587f3d0a211d420c2e0d01cb9f95420b890364525b06572b5430c3f8e3c345119cc64216635c05b04a7b7476bf124e3d8518da58d5144ecf6e3b851cc84c59fd2303fbbb4543bf4478b37e112dccd81344a53959a6a5b0dac1d1c21cb9c167e51ca4051dac1d83601e9116c817acf26b1a74dba7ad335592923c4ee201fb0cfd8509bb8bcc93ae6edf92bf1850113fbd2d152dac7f4ec265a4f80eba8a11e447348d9f0883fef7b10cf8a7f31c1ce8f0595cc49abec929c9cf26575be2a949ec823b6188d391555060a581de66b48be9c344d24afe2e4ace98de28f0b36625b3b02b1b8f65b9de432d6fea81f0934a89b8a567acc35da64079fa980d04c283c0fb5939f226d972be632eb5c002a11459e0a1b8183fac1c8d01d3081b2f6050e4313f97d006fcd97190c186b7236f88d636109f72a616d825b860c9df247ba9f850657eac5dafb875206dccc3337134184edb0f8c3a8689c4a909487498e2db3758992031a83818dfa3c9a4d39566a0f15b721d234cc518e738254e640f40cf9327413877e92fabca0ddc883878f521456f5ee651b3695db63657a1a258fa1b322dc874e84df666cb4499057da7da0cfa80774c2f8a67c629bea69ed9522ee5d95b605cd8164feb106dc84896ad97626d079a38b4f6ad98f65c401026036db0d4f40eb4c2c20555bcd52947999a34358950d6883cca7c25d138140ea731287e631546ab02536d60a56abefa1af3da300542a864544543e22b4faefc071f6bc194afaceedfda136b0a764c9a610066035ded639112f2d2bb9f04870d5df834879f0e9ee58ea65f6f9fc452ec9dd17ca0691c8fe160443baca22e08805b930d330792339a33dcbf4831e8e28f618bf43376ccaa67e05fe4691a50177b2c9a163780e29b28f6a62ac2fd4f8f0a094aae588a6d3db3204aa7f97c2b5d832a989412fea6049d368b8a18d8991d1e978e0203921e5d91669ad2252513a9ce830ceaf3644daa365c4a48b4e011c3f4abbd121d936bbc6a7ec75c5d5e1f3ce8643bc694e5695e9ace154b09383dc3312b63bf34526fb8561fd3282c7761d7a6360b2dc1ebda2eb8af67bee160a80e83a28c21d0315745d23c9e56efcbe9dbb0cdba5cc58108d7891d65e5166911b36db0e8e26d6d703db2ebf132e7c6398eb4dccc886e015519735e3f6554ecd2c2b87390a52812c7a117afdfdf1ab7439b88de32d2c3182c5b98246435fb4dd7412742824dcd7f452e5e8d9d2a28f34a9dbcd98a578df6127c846b2f65625a1364543e03fad0a2915b20d2aa9a0eea21e9a7e9c9437d75c9f43b2b8c6ddf00c3be60db5ea03d1bd0d1ffe9985f3c16d10fb8d0ba39c8599530188343a0088039080a764f31afc0502231a3ac6502751c69b80f3d341093400cd9f6deddb6dc5b4a99a40cfb0843094d09a59934a3b02973a486acc2941912cb5e4fb0e9aebbf7520a0e2921bd40e9ddbe52a9de17688f3b66608631cd9846434a43ae10b99c48df257e9d66217d36d78eb767f3883c5babad3585ca240559bcf1af84f3095e2073e38b49f5224c5c285d05b346ae4e5db579ce8b44eed74bb78d725cad35f0a68e325b4ce7fcb64b290f94f5b4c71d3d3079b4cfcc55cb42182940a6daac69d5d60c5dc5705191d1b7c332e88486d0738b9b1c9c651aab2d1a98dc98bcc569c81ed6160d266f5151bf093dfb4d8b9b7c822a3aaa107af615f2b842cb4863a691c2f2dd5ed68081f24061305872ae18e684c2c1714040a506fa412e294c45dde4d3a5a2a36a48e8f36fa8154d6ee03279e8d3d5de6326019365e710067f54e770676bed30c618c77c404040990251200a040494c799ba721d49f2366b9657b2e26b2df8813fb0832090fe4b2b2ed5f0baf5baceebbcaef33a9fa0b59d3de2dbda8a7f66452e2ed8b3875974f9b0bd61e4f97824421fc8cc9ed7d2d2d2e1c0d1384e2814cafecc8a44ece5187aa9a31990bddc416548c0d6d9b4e7652c2edebcdbbace7a80f2680ad4590efb26f67b7513e95f713e9dd4d6f02660b6668fa63f9447bf7c394339ecb9d7e0f781df07360866a73dbc16dae30379b4884e9e0e3d9d565ef6829f6fcf2b61ae237aeab60889ba27c2573b8c45f84271d6d5969d8a4a60a42b8f5794213f760e8f7407fc41614db6c32891e787401426030a0483c95262b2aa834017088adfc6f2bae0291016699de94dc5cd9c0ee3cecbf1e5c078cc01e29c479c22aeec368d818030ce228f357199f6758d0edb6bcaac0021115191cda2d08c8eaad4626f002d09028acd12649f8e8961524eee98a191095d59bb4c66b19ddb4be64d1de5b5b15913809fb35b382f2f5087fd52e9e1ba8ef36b7483a25ca06e511d786e4ca859338afaf9a48c9fee30e7d3acf2a6f149ee4fb3a810e5388ea34cf27c5246d063f047750a9e1080b732dc92f2cd3bc039f69283f1725417479022d7fe66b754579517e8c5dc6475986ee9f4d18e9f163de2935b3a7886f4c43f6a3e414af2b4455dcdd50b7497348b46516bad0d6eb55225a5afc90b54025252bdbcb95b25a7abe69c1b7193eddfd306ba35dcf88a3a6f0baea50e6fb3aedab6eb34ddaad56d20c0820674befaacd5bed05842f3b5e341230a1a49be9c543ed7fae544fa3a6b5f7dbb969c6f7c751a7c15faf2b3d65aeac2e29c2bfc7454c2798e263187e76d1192834f9373d257ad6244330d5961054af3b4871d794c1eed08e0318bac0832b71832df98b5073c2cf07cfb9066cd273d807dfbcda492b1e4392484eccb49fb21cd9a4342c4befd66a8c7cab7b7d0ada902766e54e15c573dd93750c58c2052b0032b49e0f0012d88807005126e4c5106193558a203116eac51e382e6871a6a58c306178071c485c7d7ccc1918b73caf90d8e016a2e3c048faa94cb2f0bb2a7263086d1174e809048f1821c28dc6081cf154e54e144e782736b1192438f0bb54502268e5822894f114540e16a7fa1b6a618b19e9a123990e2072870750fce5841932c41b0000535573b0bb5f5802334257cd828438c335ced0dc600a4adc593368452ecf1785347e9c2d92d3bc08604000983928530c0a68fcb67d6f4e6009bdcb9b79075ce32e4055698d01ab2bad5be98b95981ca28177a38d2a4900bc7175d83d62a0f1506726e6b85551e688df6a8a30de55179a02d034c6a437b7416186498336f993279cab4cb907ada439c3c5ce8aa47f0f2ace9ee13969c73c0bcd8fc0de94103c3cd90bed263b21a4b9eb386e54ace4dcb3d0537adb668ee9a86c7def9576df50f74d54726d91d7d3510b513967cbfa43edb56390e4ba65fe6ee8425735ee60f6ca0267b499e356185d59ae5cd9ae9443657ed5c7c3b4c36c4944d598dd2ad212ea92a994ae3baa45bf43557ed38280b35d1b75328e6a443935a61836fcf49723969305872e77da5c9f65a93ed7d0335ab366b2a6cca10f560b6ea91493433b35a89624c4c4b4b596118fc51c73a43924b2a1b5257ed18e70e8a97c56eab1deb6794b73aab5c11549b6e5aadb57620908fe33eae54e2b812c8711cd76d75c89c467312953cd35639ce34660fc92cdf8eeb91d9a2afba9a3513fb905b305f43d8e02cf5a5d3ac4b29a5f947c7e59438aec4711cc7715cb7d5284ffdd2e8ebf5ba3d2f7cb455fbf259b161cdd33e802ab40b7450a4b52e72e8d9cb0aabb50c86beb2a7e48e762aabd67a1ea574c96c328bb7a7ae88c210c0eeef0fb9725d47bdd477c292ed9734848ffacc1abaa45b15c94fb7ea4eafba473d3279e83065da4b5a04eda9add0dbe91041cdea3c7b9b1c7b496348dcd13a1e61d468ca84e20c3d1c71f28f2120ed825a09c0078a2d6b56ee3526cb01936cb769b2b6c8ba6a87117259926b6b48ce9da51d3059fdc624dbc921b54589a0ab3792749666d1164c18bc1b491a9bb424bec759db3925a1530a0bdd0345eacad9f388637270c4b9513c69c355c4f2b898709ee3f2e8072ac9e0486575d5d424d22291ba84516b5ad368cd452857efa0c865851139d141427bd0df1167adb0cf2897ddd3765aa3579a95d42c7002b3ca5f1c39a5ae2ba5be54d7a5c00e04479c222ed06dbaabb00aeb3a10db9eed92f1f365857d7be56956e9b2a4c2c6a0f2ed75a72b925bc7f24e79ea2e31af1910848b4e8ef2d3888d66664a7c84695e68a2d9f3680dc376be791e4a54b9380ecf719d4455c9539ee3f2e8b77c9bbeac40a6395b1568aeda4d36db4ebc41398e110787a3bc24de9c3ce5e1106992d82fd54d8ce3f0989126a93ac7e1311ed3a53c85a3031dd3522ea9944b0a87bf7c2ea9148eb0f3ae34e1a7cc74a1386f114bef4db8288c951506a5ef244e170a372e2a7a691c63a478523d34ad6975d575d58e71766fda0d0a35e260276800e12ad22a78030d1793eb39aeb28fbe6766305936d2b7abec1109294669e7b41b4b8fe32eb839253bf0c7e6f796393edf015ce6f8bca50187e3a86e507e72d478438e2be5274f35cadb5d9cad5e62825ed27cfb63fc51eb3052320e118706335e3fb87038468a9d3cc64f9ee38a719b46a91ce5392e2f25f64b6c585da544d54d089ef210c67ea93a4f7908ae125539c686a94655e7218c0d14eb4615da15e331a9b1abf47c3b9864725cc348f84ad7b4dac2451d16350d17e1225cd4b49898944b18b9c448014de4f27474b2029ed07fa2019d6893888a262d70011752c2308253830cd090adc0481218a722668d1d4f594c990602fe8966257f509c68cdba4038f9a9418b2f4f53befdbb924bd469863a6ad645cd5a44d26f9051b4971e54116a3646d6f9123543a1cac8e589f6edaa92cf2adf79899a9191b1572f4fb413ad5946db4b4f6db9d05e7a5e7a664a503293d343922a888c729881149d06e4a0c88c1d98fce0c46872918509d80e9808c20e761080d012284ecca0066690e16adfc008a8687cdc237249538215207ed0e4061cda202287066ce8240105610732a25cd1254a93ed2fb0da72096a97a4da7249d2ab76a117d8cfb78b0bd0926f7799d5564bad2529c35c66efc2f32e48efd2e3285a6d69000d29c4509286ce14535ced28a3da9a64a0710592a428888c40c2d58e9ad5969743166d60d1e44a95295ceda8a2da7ac9e2c91533700276d4c4d5b56e750b7d857685234e11d7565457bd7d34d40c35a3b4f6e5342a7a23a146faa911bdd1926fdf62546a5564df1bac379ede7a3e8a25a1f67503d8f0bcdcf1d5bd22eb4186b4f002119a2cb3e8c84a2dc95e1a9b6695b4071f34f7de3bd382c018a4c9ea764abe5ece14a500f332dc3186eb3110c14f270fea44526fab154ff0d78bfc15fb6dfdae2843573a6d2882fa779cb6368b8e3a9aac3d2ae1bc66fcf1dc2be69c3e9d07dc751d16b3ed9ae5d66ab7766b55766bedb8060302091f66ca34b9e34adfa49ba5599d63dc39f538f007376932f54d249beca7e3ecfc4319e1ba71064bf62f6fedce36d8addda49244099165660f19f0db5973536fbde4f17d45259f7bf4bbe2fd09a270fa3a5e985953b2b604d6ca7d30df16fc71af2c354bb5babab126afec865cbdbcb11b345e59ad72b64ac9e5adcd9aeedb6164f4e28b74ade0f156b935b2ca97d823c9da0a47901b4b8c3d3b96398f73aeea58a666df315094917cbf84a1a212cfada8e41b61eef2c60bda11b3269802cc57af220651b0ff358971b546b7f6b456abd57ae2e91adcc19b3a4a116c95611f300f37a33416e39935adf3aa97e36854d08474a2a0af66b56c6846a3b5900e05da6572b076c5308b9016164d99500bba6a0f8fa80871200b67612ca4855d737d2be78a816e806c985c969d08baca63cc8d2472f3d0fa55573ded3365fa05d2c0ae816cd4c006853dedd13e4c1e6d43a780896ceae621d638c87ce8576de11fc01a97847d7878203c8cb550cbb89a894aa147980a6a34654c6e1a7b565748d568d674a53d3c9d512fa8113da24474367b6ad42ccea8bd982cce8b49766d71475d783159731a35abb3988d03babadc6c07df5eb92326aaea138b205093b3864820f60e3b677a7d7b19267d87b56fff74c86508fb2f89cd8e5f0b6a16064b0e7940a3da0a837a4584140a1d997af87e4d3a446760acae643e4c196e6892dc8ce362302a72307315f994547199bcc407e8c1b5c964325918d40a68f74c9f92968e75926ec16c5a1751fc40e1c229c259e171652f42a5ccc0651a710ed0830ba788025c7964a29a01a7c9f62224b84c2393249dd71899864d422721949fa9afe2f4b1e2c4e2c32c86260b94c16c16186b92569341948f352b8432c976da64c30d23925c45b6249a7031515d9faf4ac13af5af3660e12ac322537659b3c05a935c1893e48e9a6ce76675055e69a1e7c2982c3a34c9763a6b72565b944b1ac2f8c8244d381406554c1184abc816c4192ed388536dc0c255e4002ed3c8a4a4cfdd6f10d3b2d1910af3746de47d2823b9fbb28ddaa8bbd081f572661bc3181b79d6555d6d230804c6be6bb515ca6a3896a47b2af56369ac7561ac7f4ce3ac2b6e2c679684b124b90c7fc29f6695606c0950b3300c480369b3068c4d992c660b4ca55a5a4090863292c32fdbe8268fb39a6e52b5ba82412e537fc1d81d4fa06e5b47c5397d4ec58dfd6ad8fdc9feafce8a28cae0a20d23d4b841490d252a88e20b1da09001c6c018180363a5ea7d1e481d046ad6e645b119105723e3cbd5532f575f72b55a1b5cd2acb99ef2e13b1f9918d7f06d2dadd59669b3e16745cc33653ed38791342b0c6ab2a7b64259afba47b8c4e411ca00a7c016914b90464dbe421e675dad00d2ea6a457b5a2693856d376bb7cd640237b762ff662dadb64cf98a9f0b08a45fde89760d61ecb5c46cdc243bf90ea1fce4da5445617447457d7e4522521e6befb5559c569c7f4ffdea9e59334423518fc8742c41588c232a22734523376bd23b23972d9b7d7b376901082b9b860494ef219c5cb1df244ed786d43218b8c4531f24c01e90e7b99a15a7b59eea4319c9fd650800930861167da48befe6098fbe1df76b49a65f72351734d064fbd1db57a160552f786054599fde18b99c480f438dabd11ae56aa9cf21820822881fca48a65fe2113e2b765fc539c28779b00fb3152e1136c12d0867d8278cb92097200da481461f0c60ccc865cb664dbfbac96c750d73d5de2f59bf5ad6afd6f1a18ce4ed4bccf31d066de309d4af873191c8fdf05392cb901b552cb4d064bb18b95ffd1a4718610493c964aad52361baa66bba4a1c9f00285820b9a8fd328c89600dfc66b9e4c0dab74a85ec2d7e81ec2d638b5fc0e42aead3e5d2ac3aaac8dfd1a418b9243397c48933224d5db5b7889788ec5f86b11c246c58015210106644c1c57d924483da186540d9c1de0bd6c0a4dacad5346bcb140685316e0dae66aaa67e35a9230218afda6a195dad5182b55ab3c0da24c3b065dfaf30366b6ad2ac31e1db9dab85b3dad2401a65eca00a10627c5185ab3d2caa2d11f0200935b428638a14495ced61acb6661a660881c3133b58624705aef650565b393a3c3471c58a28a624f9e0aa4e7e3e0982d0f787b230d6acf924083cdf1e068542fdd481f4539bfe326cd62ca2665d9e3016c6b6ad0ec064c2af29d31ec6688f3ae21ab09190092e89ab99b89a170b1a229761acf6ed2f45cd0a639d08476067600d6fea284b5ded76b73107e5711b871ed29600ece602b0630ecaa239688f92959295ef1cd55edadd62521eb4da9ad982b658b407a63c58b42501ba6ac741ee1697e67d1e1680833ca534b6eddb3f2172e9cf79d70d51fa6abd591c5dda35ba578b1b864f8e01c8b4d65a6b39a794bb2623723d80cd9d9932ed02a0ad494b56688f9f3cda11405743687cfbf7825cce7c0309c0dba4297324d36865ce976a0098e1313b4feaa594c8b3c954adb595ea1328c7d5add6ad2649d53af3c486f4c224aac0907b452594bae848ce23162eaec0648d30c9767f01454ea48ff2086bdd7a60d1f40893d583eca1f3ed9d03a5354c1b1848d40f01f75e3085fa9b97d0828e923827894ba5b1c4f9ad1b679aac439aac10f03c6e0a1df88e59b3e159537f3a9d3d68288f1a1f34d65a6badcccb079eba8de1ba4c1e7deac0538bfa2e9f5adbe2f20d029849b697441d1cb26bce0394217c7b6a26a09de67af599f67a695028bd0db731cfb48460a65935371d9560dc1fcf6dce888b2a4ded37a5bd83ecee16fba94827692f9db586b6be7d591ba6f818b1e082079a357588e88e512d0b85511e7b1fd0ac12432008d19ca92d6e7c40af46a2194f8b5c4ea2b7004c15b99c4644e5e4c2470b2e5860d64c0ec9878d0f1b1f363e6c7cd8709fcfd6c56b0769adb578ae287e11ccf04d73406b92c2b683cf5f6f7102f6af5bb7d65a1decbdd65a6b5ba5bb713e3d95a3d4d9d7ebbb59cebc98613cf0c140319119b02d95be1ff45c75ab3461dacb224f6967b947f952308559e56ff73d81b5b42b1b3498dbb5f6de7b2f4d29964b9aaf3d4c1925f24d90660de961d6cccd49b372e480d1d1d3ed6cd9b9ea12e729ed9725828724677c04b99959a890b32834a379742d4eb3745e334bcea9943b49ced0d0e049d655d5412ee795af4e24c70c3c1787b6f75a6badede922c0f96e246b376bedb66d9b03a1bfb5f015eb43a05775b375925c09befd3ebd7533c145a15f316c4e047f9d3232f46f5d9f063b6b87b459d5afd85f42b3ec5816f9fe719bda69438cedf6039b23870e1d3b76d84e0b3b06945b032956b0029f2d7ca04031c288241f9f258898620a15a800a9c806403d37f8f9e40d27a57e15fd1c7ac9e6153f9fb871e5b79e7dcdb2e29c732269b2b2384fe5986996aa2ba159aacf5d47b31aa8c9eabda457753af5a9ad3c561518f443140484c51152a07091c384e808932a72a8fde8f089b8494cc1284247e868288b170fa0624889297670c30a52d0b6d44f504528698284152e5e52c2c8010c0d2dacc0a684d18959843922cf1004276651807145195b60c08a23689094648ba3345eafe75a4865ddaaecd8414dca9c04bdc0c54f114984b4f899a24212120e51dae8c10e0ff501fa9ae3440e6e0061891a134d7270d5793483286e70c60d4e9ad0c0559dfa54a740d54f69e492fa80b25ca6907cb5dfd45a71135bc8521f0ae4437dbe67608740ecb003cdc94f1451dc7043d1a402d35d7fd87bb78de330b64107542a79def7a5f294012d883deb85489e7ad8f3f62fcf94b93fe4eaa69af429f047f5172ab9ba4d6d697181d1c9752c6fd0cb0bf803460603fe08ead3899e6ea84261fbeafd752c2df02f2f3030a7ef14c28de559839d289f6f904790229766a55993f53b261513138303758a898179898989718969c93131311d1e4038a70c8d40e96d9943fe15e7260ee08dee6dd60bd9316a723ead298d3685ca119526e70bd9a9d2e4fc16b273848dba35a7d076a208d969c942832297f84b45df25a18c71c6251a87830b5c20bbd5d65a6dbd8287e7565b6bb5f58a348870c289277e9a6862b3d75a7b2d14182c76abadb5da7a45104124113d71a30ac6b5d54960accff7576b0aac734e18582a36494552452edb22e1f16956ac59b37954d767f36c364dd63c797c1aa8796a93a9a47c2bcc5bd1d22a442dad6eedb64d3b7bd5d6a713238802996f9d7a5fb368ab52a8f36bb77f4d4efb5121da63d974ce3961ac9d75d2a693ce39e79c3de91df25a30d9f2205b213b9bcfb7f29353373caa5028b79caf42768484ec0859099a142c89b07e1b85ec083539ff85bafa0bd921da36213b33213bf613b243d442ddca2f6447d62cfaf385ec04352be7e73728642768b608d98965213bb23a0ad9096a723e6eb2c5270b61ad12f672dcbdd75e252ec7711cbef7de6bedbdf6de59ef66afb5f65a282f78818f1524f6c91957fab3a9211d24f6fb524dd28f87ef4ba54a224d93f2e64052d93afe9ab5b92d6a564e49a3fc7c7286ce57cf3f9f98a1c60b513002d6c26c5bea090fa8fc7c6245e7aba77e3e310307efd5fa51dc644e0adc1b6fbce1c6dbed0a6f35239a7123d33ae29f75912c68c9cf2c3fe9eb270d020f39a7d29ecd6bedbdf7de20bdaace44be9e05857ec97411fad149de13dc09d40aa260bf699d1fcf86c31118dfeecdd03889439c2d4c451d247c7b0932b525ce55af707c7bf5491b72d3acca4dc974c4d57d1e5923dade6b7dd0586bed84f9721695397e8e414e50e4fe786c143edbdaeeeeb67392d429b501c9e9e7500d60fc89f6f4fb49a86514116a66c3307453b630ec6a719bdafa9ea0abef8939fb9ef8ae9e3dc7e5326bb25d6090cb971e15ade28dc9f389ca4d8b879e459ceca6d39426a7eac6e4d9e439ae50c4c1a3aa65c469f1d0735c988bec1205764ae52d72e992f42dc93fbd6ac738e754eaf4f3ed519a04714c9e3d1471426f21c333726d71c0066d5cc103273c24e16a4701d5161983264682d861688731787a92bae5e2dff72f587279a2b98828a3eeaff435812a32ed901dc5c3d3ac59afdabfb17427496e3cd168be44cd682cf80335439991cbd3cfc988d311b4853ac5e034835314272aa8972729ffa3809f433524f1a8d9acd9be1de5c4ebbeee07e7289eda3ae19c3aa1789ea4a869d3de155144a82968b585a282aeda5168248d51fb02a9a2b6381da1a4187de3a02297a79fd3cf09a8190073970baae8db1b4504a266386ec8feb30a2657be7ce961225bbfde813fae53f1d2925ada42bd9ffb710bc04492d74f133cc0a2c634c30c272e2882054348bcb02168fc3954030cf320972f3d9e911e9e1f4a5c1c460a6d05b838154578f19e8b789ad2ab761771ba4eb49b447614adb626304405ac87a12a8ae041c011471c91c4133db08183674fb413ed4588971ed48c04d8ac61bd635af63faa2d2ca402222fdea0420b255ced1f526d45000b268cd0b0c832032caef68ff619a19cccd656b4d15050bec77233426551b72254172f41bcf46c52b6a2b96ac75072398d5868a159163b10f21c7a259d8ab8d5d61008351b7a193d6a565b275aaf868cc87e3e71634a007e3e09c2947fe9a92d14112a8acfb70fe9e80c19216a32e50bf093880837be8a2aa27c899acd2aaad0f99e555041848a127e34140a89565ba822d44743cd3e1aeae8a385236a7691c8e587a2d5f0a5e745881721e817271aed614797d9e4515b2ccc6fc318e3cdd6264f7d2b1a828684d16f47a6ad6893b249a15b7c0fed61c7136df228aaad8f3657f544a32010fa5dd775a1db74b8d52671284e9745cd9a65d3f301282212d2700972f17149f2d2f3e2f3d2e302d482f42df9be5fcf7b86f992bcf82169b23f5a094a2e51331afd2a0245a1f2fd7c1315356b0acae8f3396ab2fd03819c68b3863af552cd2a4bf0413353c2ad755f20f8bcc53fbd63becc181bbda546bdb2d64b49b3a6ddda99c7638399e52dadd4c65beff5d6522f4b43bcf592cfacb96e6bd8addfa49ba5698f2a15beaf1d07f46f2cf14df87968d42c2ad627a59796d467f694ca681015a2945ea3661d592b6ffdce2aad59d7e8da60b2ae1793b46e2d85f2d62f259bdc31530206af17b5da4af9356a96fd98a4bede165574dcbe9675b70fa68c750f7fdd7d5dfadafb5a84792a96403a821f4dcaf2369c6178af4c19eb4d7bf4262df5f92810e8265f21f46fa4e1f3d04d230d4c6e3e0f41d0a70bc459c44d5a183272796b37a9aeac874d5a1f64fafd9f835eae7e356b6e0fa68c75eb0498fd47de5a1bde7a59f3f6076fbd44c0db9a8e5bfbe8ad519fa5daac29f9584add9bb457756f08b49bc4637d4bb2e44beaf3f3d65a246faddd80defa6dea40cd2a75d01f6b5d87e783db44f3a68ed28bfdadb40576cf7da1cffca1b8c1eaeaba093c8f3fe6a7b77d52e6e49cfed66dddcf8dc69abc5eb222dbd4edd52ccad32cea33bf4b7e7d4b82677b09f39e9739369d5edd66953a0e08f975dc5ebdc1f2f5ba796309f3e5f62ab7d7f64241be138a0ea6bcb77d9c38bf8a1749bc569aacdd2b4d5e07c59bd4abeb5e0863d2bdf2d73dd8aca17e3da0bfb4baf20963eefc3df28efcf578fe7a3d4bfc6dbfd7b757e73ba0e7b8369f2a840efa4de8e088c3843a0ee8db8dc943d398e3dac692436a96956eddcd716d81be95b30b1df466a9ec37a73121136026f0407bd0df4ce0f9099e108017017d03a959b56e4dd7bd59fefaa51d35eb22f50d63b2ee1993bc7e2f17bf8da5ede2372f67177fbd45ebf3a5b1a489914d6ede899b77cfb8a9ba426a169de4e677738ffb363143790f7f9b08f34dbcd77d40ef95be4d7c51c17bde478b7a60e567911a41fc26aae8589dc7b3defb72582203bfed0895399483ce6fa308707e04a827927f135dd830f29ee9dbc40fcb7bf9db4417315ec5f92cf24116cfc13c76cfe5dbc45215cf79f3f34a1dcc0cc376f0a9388b7cf0c473b0bf8eb71d20dbdf1ec74da7c9fb6af2821e7a19334407f4d04da899d70413fefab4e99c4486a961caccd69c36fcf5988f89f91ecbed8796b1dc72f81bf3f73565aee724726ef27a08b6c47249fec6b9f7e2807ef3526cd6a0fe6e0078a3ebdbdd5eddca41db60489ae5b32d69d67c82c5ca1664693f6cafcec7074e5c36625f6e3f7f4f5f6e407fb7d8df4df6d731b77db2128606358b0e7dffe44b4ab489d4bdd84b1ad385bd13b9eef088937f0cc1144fd7e694767d6e6f8d7e5ea09fd5a7c8fd047f0cd169af2ebb8d2a153ac7be42e778a481c94d75ec34b477e30a3454c763ae2bead345034da5c1a6aeaee31667a5a92dd54de7ed9d4d6da96eb057c7358ba9babace5d57711c4db74488d9408085356a7fefbdf77ab72fb7d9ab63c79d5d1dcdb2de7e3d555bd5af4f1db7b67562a38f9b05f3d3a558eb59cb591aa4c9cd4b56d85417606816b59c7835f082079a657d069c4ea46972ebcacc897753596fcf71a9eca852a17afb4df51e719858c769afe37853f2ea75c4a95e1af34c4dd5cde6a51c17756b4b983bee68721bcdc841ea86b7dedc8559b3fde62f7cb9c17e1b715d6d1e820c7299fa0dabf078423f76a15b395e9bbf40e4ae11fb6d07921a1bfaf2c3af34bed8d2907d49757efb5a5d25edf9cd317777346b236736df9cecf86df642766a64aab6aa6fbef1706c785347d9b12f79e1110aab6fd814a3ee9ed251f9ead76dc1f33986f166d9ff7cc7d73eff7dffa5fed3f11ff9dfcc7f34ff79edba7dbeafbb1d88fdf6aae77cdd496269779fabbaae9df6746b4ea1959cfa2c21daf992cf284deb29e19a1a4c4a6a20f9920a7dc94b3afb122dfa12a57dc9bbfe442147a6b0ca56e24b548a2f7ddfa84ac1fee740ec7f34d9e97bf79b81f634f975e8585461cf79557bcef778fa217fdff4e99f6e85fe754f931f92263fb7e91eb292f646f29f7bf8131b87227df5eaf30bfee811a7c9cf37f0c70c4d7e5e82227fde6e3fa742cd9adff96753b1383bb1a442ff79fb306b3affbc7fa03db07f6ec5ef73dad3acf6c197bc1b7b872953f25217b99c495ff292cfd02ceb25c779350b7bc9299292e7a5afe4dee79e83e0573ff01bb1571a3bd03190896afdf4432e61dec2d4eab656cbe39d7ec84d53399ada4adfb65fe19d76b2098a1214fc0451307d92dae2b69bcebb316fde8de0924c4730fbeba59b9273234ec91bbce97c1b71bcae2a05a6eb0621b75b9b7a450f94667fdd7ab33a70dd7a8bd4555799d2ee9e2d6169829fd355bb653c2dbcd9b6965adadddd76fc62dbeeb576dbb67bedb65d6bb78dbbdb1d95609c3d6720e69059e6b82a1508559d409f561a8ee32e777d34ee660e5257a20f1aebe54c0a9e35a1878f0750eb44f09b30656a4ec0eed96f010e0053a8b4f6b4218d0940bebd0699a141be6b49c473be90ca34536692619b35afbd97bb9cb5d6da3b49da3d25c137c75d3ac337e744f0df29b371dc68b791358f38e3de5a2b10623633ca9c482fc090182541a22fba4379680ff561c18566955489168cb098715d55f7aa24d562442e276dc8036e6666c81db1257307497ed3dc34eb4bd596933673d5412030c44e72923dfb1286a76fad1dad7da5a06d0af4ede853c6ddbe8670a2698f169bf2a8b3bbc5692b1ba5a352946b4b519a35a55965098ca795a2d458167c0cf44b72be1dc929d342452e4b4425a25251fdd211e5e195d261efb53bc894bd24c8e868e19bb261efb5d65adbd1d4563706b1944ef2fa7ddcf8de5bb113c17fbd5a7cfdd2a693bc5deb56b7ae9b5806299cad48e7edd3d3895376081f34432a3706a9abcd8a8fe707b447db7bad0f1a6badb55e2afd74f9d9b42584521e1b16bd75b3e56d88393b49ea97da293cdaaee3bbafea5edbdddddddddddd7e2d6d4a299d4a36b7e9d9db38bbbde280ca0c6b5307dcf7d9f1a484e3b815349e64e33c6dfbaa6a505aa7bdd75a6bed2c7227c9594443135ec946fc27f63cadcf6bedf54173039c6da8b40b170d8b9a748c9100000000014315003020100a86c462d180449265d90714000b86984e64481a4a84498ee3280a32c61863100084006208013053431833d3ef907a259df4a3eba1fa329c30137c03b36b261f4eb5143f74b4a0299149485213ecc213724776ec80ffe76e35a1db3aa42c42fc031a03392a63a09270844c8c94e0c72b9142843c6529dc318dd255be155edda244084d1f0860cdb2b7fe9a5e9e8fcdafb8c8ee5cea8910a1cd13848b7b146ec326982576a28b7bb9e708852eacb1083113484a7d2bfd020a12a16c36c513a1afd8271812c89522fe26a3e811b07f7cd0e3275040b2b2114acdbf8c17c07f38ce8a2294955ba278ce81ce45ccc14e33582e5067edbf6648b1af8c508c36133276cb9bcea9b3057db2a51db79fc3ccee5e63fade6b8c9017e64c00a989ccfe1d22e4cf58285657b4a403cae717c4dc1fe034fb86e6cb7e3e63d30410e1fc8c38a0f555f054901440acaae40ea00d1d34dd71928884de56762cf3ef4c6c38ff19f58443393c0f07cf58bfe8798c4a7907e04ee01ce12dacd5f0143a3149a40b3951dcded9723f867beff3b416a0dcfb5d4a1aa057a62817e0047e9ce7da3983b7bca7ff22f9bd0fc26fba04044ef879cf8b1f940c52a6e5fad98d805b94689049d3958ccabf7f37f0408a3715075fffeffed56d9525cde2789b3d3df9fa6e7f0ace615477e775ec8bd5486b950351310718409cdac4c43226049a5a9fe0c14f39635becf3d9360d353159f505d9d532a729be17e184e59681e1032a7da9d2d9388be66a15244fe5a96af4f65d04cc702d9d546ef48908976cc9cfdc7a8574009a881594ce4ebf1c87b93b2ec520d0a5a780ba9c3b80e51e1c23a0605262ca4d5b7575bc6fe03413618f5c08731d116d80487c1925decc206d84b2a006988c89d949f7baea17414829818a05ded5170b457fb3077c6c72626dcf2fc35bef9818d47174018c857725d8cd68f0d9826e43f3e00e9450e903457447b1d7dcf40edb9b580530666e36246ef873c8d381aaa6936ee3549fad3cca2ce3b9021b66041e6bc318384d8031425478d2f8d348cfe9cbea41fb538d0ccbed94a67f4514985697218be6e29f0ebbd2c2b45635d0c87adf20fed1d5048e788b7884b94d682bb0a719077d8e442773223447bf5ab26f67e52ceb56998f535e22ced995a09a2650acebdcd124afa18a26dbca461dc4dd0bc54ea8d0601743e45bb010b06f993e5336a582c9b2c936dab2f99c7ca92a6a3f73f7e6b6753a4c5eca5458797dd23164b9fd4881d144e121e4c69d0e1772db036985589702e1dbd120906f8335eb116568ac3ea71f5642b4afb69386e3f6a16e912382ce74ca040e37f55fb2642b258afcc75d496e4ed81cee94b23ae340517b69bc79ce600387813442815a05b3b6ad16f65e35e5a77148241aa71f70ff24cfdefd5a099ceee8577a2b25f6a65766101a0d2ef0f2fb20fcb025482cd6f1112d0f25047e03a38f1c918b7da79e2945820baf953cfc481035d7370a984a13b92164266cc77d55c5b271d9fe507f14c6002928ca6f901d3fcf3d5f3bb60975671c84bdb68361ad449fd8c80949accddb79517fb0059c96f87977bb3dab6abe02e8b73e3b59de7dbdc5b487b4d5c3307542696a0bb10e573eb58052a587c7170192cbbf47335cfc93042bf953246d332a0f434353dede919231bfcbf71e8864258412a2ef44cb2a8d70241a82c577c74514bd7f8b2959f984820ae5b82bad509941891ddb06b1f8c9cdc8d1dd2e6f54cfb9e0e2ff28e4758b679f3520b62d8ed0990afc6375869dbe61664e9fe49e818d52740bd5d1d93c14bb26b454bc72190cfc25d6c02b6ea94de7ad44161e32a2539499b06831cb5f215f54a0f0ad28117af44d12e42ba3bf43ebb12bca9fd7b9f9b6c68d9b5def5735bb8f9d8ed42a85dedf10bfb1dcac7e2401ef7d06fd57c61645a68a7fb344c2c45db2b398a49e81fba1baf315566ee0416dfc9fc8801ed437861f57d4189af1118a3adc22073c1a2f1cccf3e1bce93196e4ee55da5e9dd43bcf64fccf14a1ccfd29b23ac1e1daf89c7131b095e002041878ca85665e3b9c570c370819a7bace502e8fc0c2a69cad32679a63d7e27d8351f6761fe2a45163ba33cad94d3ecffc5facced308a8a2077e5ed52e0038e318d2fbee8596b4ff048051f41e54097a168b55f989ec6f8e73e3ec0fb4633704b9810526e235fee15b14b45622ef40691b9ac818b6c2c5b0c64c11c2dcf0109f4382db34257bd086100a64d1948df5517d28b5002f544ae6bd6099fc7e682251dab60242399622e8170b142e08456196586b49419e3115c1f949db6886531e49a5075b3951ffea46302f3dac99f20674649803a38763b78d3a9335391061dd461af86cc38069782f070a7178e77a939e1436d0d2f9b52ebad23d6519d4c37d0f67390012ff4fddff83b27cc19a0702faf78eb04bf2cc37b5b8ae04c2dc7043a07be45b9d6fc51fd6e87a596b9d574569b5645e2ca62cea61b902e9718473796bdbbf9af5ebb2127b189af0c7eddf8b7863c9d662bffad4048c0d402f788667ed89c49df23eb970a6a8badc60f8556d98417a170f56f4c79adf70d56173a186e92fda810b9178335ba9c35de821ce480260db99e7a95eb285acac2b9fcdb0df38e509572f79ad26f6f1406ed52897a10b380514162a3758c534d1014d626b6601f62a09e05a465babf67cc40f40a53eb129beb96a6c7425a63c123e3039100803bb4311e26fa10c57ffe393ec73521b900c71901990101b1e020d11d164459aa50b37bac294e420f6e8c10481032093554f9d886a839deaaf152aeb04b39588a1c8557ac0d24e2231fd7714426af7d914cdcc853a863c2fad4dae5ad4cca5dde7ef6bd0d295563c6561b75ab92deaa1e854be72085a8e83489fd8970d615293fdb10804c76ae2aef8cf1838f179cece0bb4cec7be6463791ec176864d040e3da5bee5a809e88c2a5bc20222a80f3b9d621f0ec4eb612f749a90416bbcb383e3992588cb052733345c07ea046c41beea34e5dc01aa2600b34bb080616926deef79209df0e8b25292673d3cf381e338d8cf13f05620f64015df347669df53b6a9691fd65f156f1148e7fedb63a9d0052d8d354c62fd755f078aed8e9bef45a7dfe5d398544486aeca5fb9a2b9dd142b7f95e9529b1e5983cc708c8e31ffefcd7a968523fc5c1d48d6409fc8821d3e032e190a133c1be3843df4e33afe8580c2161c578f708dc7900677bfecfaa943e1a82b53d3e8c71ddf71603eeb7da882a0aca5f12376dc589e23b746bf7400ccb8ab64ac2818bf9d7e79bb686775d0cbf65d5e9f21c4a9855c0760329cde4865b69b4c654a5646595a9f5e05ec7a43ec48c08812e1f448d02a9d210f9d280794801689dc118f5413e22f9d5d453835f8d4c23a932534ba99accf6d1dc8fcc85d3ef77fe872548ebff46416bddf47c58ee9067e38a125696de21b2d01be1b71711c4a2b99db9ab380e1c6009927b499fb9b217018edf70bc292943074b80d1324fd7bdbee6d9e50572a531adc9db9848efb65e195bc9e6434daa24d0097948ab7682d931027663114240f53e620f08707d4ef6ae016cd125441e1963ceed056ea9e851082a159ee8cd59b8bc89c594e778821c6554f6a87269c5eaa99620b9788b075c6acbaa2d6811676157945d97decfa8eecc83da50555f7d3ce9de12de30a1b6089fb6de334ab80e0b19e8874f709b55cf8a709c1c6c2e8a8e0dab753d3ba978279daa90700bbf54e394230a4ae4e985f263b0549c7985d018ec00c02a6af81b2b8cabb35fe484f89174b8f2c482cb10f4308ae4ea354e3512cdd9c70115c77b1016f1422158ab9cfdcce479e11f476d28cc9c2653fa1d73d8e18da3e0b04685a9d4ca015eeb2ceab0482ed57bc2a99d1202fa8e819d5f31db5be2c0777514ff982cd663264d0ed7e36cad0ac7305c3b5b906f1e68a31096306e6842936079857e4725469a34aa78789f69d0ef88c5432c6a9c9065a352429d1976108aff0b6463c8c8a85e088aeab34192ba9bae12324e7373a080c86ed7278cea0ac20662b8f3622e40245b9e8138660b4cc90c23e9e9ef27d3c44fefc60e1197092e0e2ab1196cf2d6d768ee4a373ba7c36b71d9dc7f19bb5d19476f37e813e647f16d0d9020e554b65675368d993bccdcc985df1f015b051a27de89e30b744fc13b8142e0ed8987f077d9d031b92e9847fa3e2f375f81cf3e54981b3fc60996b0ba4888e970109fe6c5a8b9bb67548e649349dc46840a5a13574ec08d8cf3b918cf2b2bd0f266ad7680da98e7422520f2d7d420bbfccbd06095886504aecedeed385ddd265ece151c4c110746872d8a7669b5e926f0f4b347331c60ba136c71ca9061b26599194bc51fe2870862c088ed43421d95195c673eb6fea3e5f0cbb7b99cf85e0e124ebd0f12ba531114ac693c0ece818447374a97dccf0c4def7cbcb5b30f751a0343136c41001c9f5770764b10c13a4d7953478f19fccb7bf79d665d60413bb44cb846f7ac19d51597f019d0bce1c641df4331e32956bb3da477028da0cabc35737fc9a7e15c056ece088841013a13771c0fe6b3d2cd0461c22724a04f9e3689436cfff06c1fec3c8a0d769b6ea65cf4327766fa742c9dc862ec8ec7a853d48e6a5739bf9591ba14f137be47d1e3ec43fc127224856d386ed5eb3fac47c845029da1b0b2ef7ec0ca36063d998da3760a8ac2342ec4a44b223055fdd6f93b0c52645afafa0e924089342408c4c05cad469fcb4ccd39f975a74f2540bcb746cbc9cab000b590da84cf879c87b8444fe50add3bd46b77774d949d055dd23cff55d6bcdb55c0f5b9eb5651feab019100eece720e15b09108f4403a27604d63047609e46e7dfc9bb26fe6c3f0cba38e048fcdd255c5102b9bfe19d54a6e2fd050ca9d50b4bcb7ffef5871690da0838baaa93cbca7963318f1529a54d7b149b3235707480aaec154aff57a20b9be817136d78f41782a2d3d514a3ae732dcf381c1b84003a9e2ef6347d854dff4142c7c3afd545cf57442c1a7c314c67cf0aa46a4cc765a8d8408ffe4f5e75876258b52e6d564d1481a7ebc031e902adf539d0b4eab1240086065a9d3d058e505b53d8f0260b0edc6bf62ed73fbf2c404f4daf119cee635a1883c7956609af556812ee75c372e53efc836f5f0c7c361ccce7020ac7219c68b58bd6412f70fd6e23ce95119530a8c114e3afd67dda7df57e4dc725bffaa2f75b41c2cc0d49aeb70f347ee76387412a67749940baa2d21a882f2e957e181231a1d1b76a2e2310e6a29bd6d34e9d102e84229c15b8f9e56207608cdb16ee9a94fbbc06103b02b6bb4e7299e48682276dbffd759ceaafad56a4a3b76f500a30313116eb1162939d7e1626ae38175aee74162e4ce57e4e55b89d7a910a9f819a52e407087ed9d6b7867ca2cfe5727847cc1c566962eb3b06548bf9ed7732ae74f49ba106396740baaf7e87cb84c5a600c100f77da17054681178e1777c14904d1c850dda2b146e880baa57abead63263d7f63261eb11311ad287f5e37a57ede313aa82f97236f942ddb3821b9ca0b7353ec3359b0bb3613f397395d7062897c34e030a336dbf1a75787ee906915d093fcf846c6de784c81a9b9dc9426249bec062683bcdf039612d928f266d01354cc4830c5a44a96e101e7401ef8944de86a3355dc8ff4ded5b66773c200803a94a25eb34c43872088b33c3b762431727ea3d4fd370a8f7442badfcf0e66760e9bcc2354056b5493bb15bc16cbc174dab919089d53f1785cc20d5b0b9963e14caec08d384609eef91172508da6d5338d005e095155296c5981ce746ebce9ed0731f77899589b98461426c5504feaac908b2f890ba36f325c0b571b547dda63c1e81c9f0310d62432f4d376abff10e4c2caa7ab795ebe5a3add97a8cee61f8ac91eb1dcd1af4ec384732409f32e8344c0f33e87a9019755a9bf035b7e7b3f5e58c5efc9829c384e374a7e504f32017107c1c6ee4e65ddacb742e4368a45b04dc31ced1900825d0af7848f5bb9dcbf5739fbd8c7a493d0d3c4068c18711b12981f6592064be2cb259ac5b54c9afb1353bd901c169d15d8d240a5af7a1248691d888444d58c6a48f6b4e626a55165cfbe147cc1a0d5dbf4525736e8e4eedd576ccfcbc6ea1f44f63f58046695cbb2b8b4d1e64bd702e816fdf18b91b58bba5657789d5e074649d99365c13f9039d8bb630b190eb2e3e934d01417656ab77ab077465b6a28871f73004cd26924ca954367e0d369faf99424e7299a76737875bf617011f0070420dde699b38c34cb656a57bb980d7f1e7fa378bcaaf1c2c4ec30848b389a6dfbd0ed99b17ff5ddf4c9679471855945181f26b922617555a2a2a7c2e375652e39a7f77bba78525429327401ca0fba5c7aac941ce677a0ca759547c6b606afc5bf7de7cba66c61d1212e78b0466ed0727fcfc0cb74fbcaf6442c4e84de6e605e5b39ec8fb64ce0dcd15fa67b97aecb6a22bf154f168cfb59ea6ee26b0da9ba6005ba23e3fd434336ac6609ee6b27460704c8b3f2ca3d4b0d76d2d02ac77818b0ad9379225da7838102dfee53005dbfd720e4953062eaf44f5ea541f32eb858861c227a832966beab2b457477a83cb5e1ae57d604177a5ff46579d67705c1a8eb6ce47ab42ba074480e42a155960c759b789a9316a7f948142aa092156ff4109baa23b62eaa2bdcbabd7456480d379fb3f2ffe73e0bf0592fe3823303dcf3e22f26af52489fa0755c58fb8b81fc4480d28bab747afa0c41e1f28f8637e84c477926b615689f0be84d17a26772feaad17322bb356e905dde165dd7bf636de18472916c013f0c41d03e3fb83475af4f75ff805f3666f7c4b5059f19cfde5b470262aee344ed42d88306b612bd57826dece576d068fbb3fdfa0ea302879d4cdef37b32683e65644ae7931ec44c36d7c5277a09340dcdb3095dbabb7e3d52170d0a8da6051fbb2e2b3b0c3ba1fe8b44b99855982642f0fcf7360be4e149e90ac83670131809cfa465194323be843fcfa58ade59553c9572d9429ecf0e52983d1c2da8939121fb1f696f356567553214adbec86ca6193a0626788694c6d11655664518930cf69afbf3ae9760380c4f446add21a10a2a9fb6461524653d8aade0860946b6935c993a46b4710da8dfee5b2ed51a71b1a47fc8271d9f7c12299c7aa3379f9a0276adffa43065af4d5f0e69cff337a6beb45315db71cb1942a22f9f2a8a3eb509382663f82ee02a0f21c171596e0375abf7724ad22062611d0126d8dc128fed5b6a0de068f216b5e12ef4b868002d0cbf19826362905584bd9b8d49e88519e6759fdb1bcca6a2e00d2f263b0acbd3a1eff4613bb8b480e31911e0dfea513e73bb70389222182f49f984651188a002ae8bb2ce0adeb89b0b25e3d9162a1aef1bab19227042609d3ce98ccfa693d3da17bb8b05ebdeb79d8e1f774231e2e05b4f6eb4595fcb68527fa0ec9e7bda28012c58cb5a737e2759390f9a72adf6ec903bd43f3f05c5540a9625cd5ab977a1f2eb4474f7a0f7fabeb8178b773c04efb30c4b06387ba075a9147af9579c606e1c1e681681a300f3ef6b671a42d890a7a8269ab32537523354fdcacc36fe7b6b6b7a7551d613d7839f5f31fd4cd9d836b989a20d558d22bb36f35adc78a47fd9cdcb5e520b7d2f7c33bf49acfb4a9e1a42644e74523ebfb70c74157780df24324272a3250e91a726d6ce4ba8f1ab9f6d10850f3e86ff10388b9303a2ab4fb724b8dcdc722fa909e6be5608aead85778ea8ec7956a36364e6cbe321197c14112c0589a3901600e72baa6ab7a08baf3edf9492fbda71e29ac603c9cd214abe2eab342841b23f0a8721affaf9aff2b1f58f1df28b2d73782d2702284aebc8dee3a88264196df3040a996a6c010cba141fda028011bb469425d3fafa10f60c0b2ac264d05605b0edde106cd5ccb639cc47b3ff703684aff11b872d414affd0a85a1120b31c59130c578a3809deb15aafb64bd10c010e976d276e8a8e37e3ff2c656b46a1fbda8cebf5af300fd27612d4f53e2594aa16ec5d887333d81ba1b8d45b9c6cdfd827bf477ec62df935f0174a863a1cd2f5ccec30e28f5cdb1f9c9548f18592de0732f00b7d60b78802e5b0c053483deee745dc6b8e186ca1091360b6cf96d3bd817879950f0aef7f280bb2ebc57f9b4d0e0c494a7292b5ad27f1074944d6c9e2ae44c303a3b8f41b51af4372fb6c357032488e9cecb06efe9b70b00b31235d331d63eda942b9400dba12965ecb93821281aaf35e90541dd3e411db005c5a106aa8ce1745a022e5bf5733b598dc2f4c7e5117bf1126e8d2d5fd8b938f86e8898ea3089b5020dcdc0b638677de5f9504908dcdde7f6795492634356ac599c9e7142e32c1893a4ab3a00f3f6f2cab88fb85b6aa02ee4dc7548914e1768a5801b6c9400ccf469069c2f5550d08dc7c8dd4e9e509af70d6577942f68f9e86c10744657f034fd07e7a929a4a9809007d5566afcf11122f506539ee4d0822c6705c328a48ccb16adf4c8531044231d8c3dd1c4bcc50e20a064af4efc709690aef1e570d363cde195c12d36101cae724667e20b3c3570b03d91134b090e083129b9815455d8e4046f39b1481dd290e5084feb7a4ed5499fe7af46e196b2a883dcca20c43c8119b8588ff223ec1c3e99d6667583cf09da9959830bdbd23a6149d0028606bd17b4ba6718502bb49b5e419ae42c2af83774b9cab987c9caf252734b14a4d51e1356903e0727a8abe67b9b9aca92bc10e1712926fa0da96b0aede2f1fa22eda345505657e669492a4ae8872c0d15712db39fb6f1b21538aae45b163c87de1377cf4f815325ff66c92bb4529ca2b9f0db3ef717753fed43645a39cf1205b8df38445742a45111d3b2fbdf16215b4a799628ce7d717a882f49af63afe69b19685a8dabed85aad82d18f4292ec0b98c220b8d380dc84b93d539b6aa022345011e171a44170543d2c4a4a030c5215d84cc966e2016321f56406cc0fc7363323160ca5277c04465706f784ebc9a78730a6cc9823a9b6cacf4accbe1c11dce0c871abeb45cb61aaeea44a5dbe68d93553fa03cac0353f59e7076d5458e2f711ec59c8108b2981129ca56aa210e5eba45bfe622f5bc26d7a99df324588563708b8c19b3769eeb1a98513a0aaa7fc132193753e8b759dcd32dda8ca1fe2464b7f85f077fc2724678b8403abfe33c1334be51430c403465596d02e989545a4f631e41a982ca0bd4edb3015fb2a517384bd96aa4836d5681ac8c0afe75a43b40997249078213a538ee214b748742255b47793f3041b8e28555b2b134b86f2ec8a8e9dab3cffaecf50e2feb1cc259467ba7238f6ca69b9fae9864042525da858010f3edcd86f9833d855245455ddd8965f1be4f41b71ce2f0ad5e678592589ce65c76df27f63daa62700379c7514b4553bf04b425ff50fba150cdc07cc7afdc4d8132f154bf20e83456c2f6ebc1295e3bc83e3326092fd945051a0863f8c97352d9cb6bb7c788bbae282cd849b703609e1fcbaa6cf7b05bdec504128092b1ac13064001664b12eac896212230e7efea7dd1b15770d911c90fd5bc98ec23b8db28b8a1e325d5dddc8e0cf9b4e1b3ef2e22378f6df9d1f3f0f16035ecbbd83f929ae75201005762b7febf4973736a8782a3d6e586f44c875260932ee65fd3480a1e04f13b01dd419e9da5635185d1529337d58bc97f0540b3082e807612c027b9ef41e1e774bc409943d66abb966621c4535d42619bff11a364d3e6c02013d075b8c2e3ee546204227a3768bafebcbb3d196fb44da9fda64c53d0088167cbb7fdb30dd138afb812d218430f386d8859447c6f6eeb2470139b4326b31d66bd49fbf7ab3d63c5e61bbcacfdf6e03fa26aae699bf9bee06ce7a824eff7c8a80450286abfccdc9e54e000ccd9bb878286f454ca907413937569308689a541718078a2a0f899dcd2da7e69c27a4641c016a082d6ccdee9226aad0974259d207e20fc30553c1b0eb3d31a1ecc9d0afb83c4690d24cbd641b1aca44ec5a836f5f1e0688e57c015a7784730f63660f4eafa832c29d435466828a6f192fb0b94f34bb980e347ad7ec26245e974e875057c226c7ce7737571f7bb2d46a694f12c555cb7c552f371beb666d8b419832d11c9f8261d9de261639c8affdea3467e360eb01c849d86a35fdc98a4cab963df26d8a55ef476ec4f50452d14ec20640a5e1c5cb67147ca32ed8d6ce627b7fde0afd6137ee601aa4e96ba4be7578183d04b075569bf2bcca4e45babda2aad519aac039fc09e68705950d2361421f6c5ae18d1ed3abdbb566d98b7399fb1988edb137b86d3e5ef4c50c2896a62bd4faa4bd58f0596ce23bcb9cd28d880a9da1805d2172f7994ec6257edf40bb82a110cead3f036514255ec9653f2e4b94d890a830d40f2fd678044bd509a40d8e6cee8daed5f50d8a27d4baf097cb84a122f6840a303b01ae5916d54de53482bd6399a8e4e70057b1546c1d0db534be522cefc32910c0456ff300a6443b9a5e1d9935f5e26f790f5e0a4d33dba63b0595279572b52babe388a2112200efe74139e90ba94695fbdee610556174ad4c34a3ce36688b738fa9d7590043fd519ffafda8d6a03cf7253382035fe43490adb65a27090ca3f0ed02f2d7d9738ab1cd0c2e259a7a3b5a0080cc479560673e3ecb3707e1677dfb8ff0863c61cd6d9181652deedb16c7e2a544f7daec55e48ab1a44755f516df4abf64da053c3ab731e289786af4414b6da340c5c4cafa55a41aa3f3a98c9228c662bcc733e30744cf849e85b5883a89dd4f8f0551f7f8980f3ce323d97a1cde70601b2742db7bccefff20fba31382d37994754a2818f44a4bb2de0d27080f20122669df3cc3cbc90c4d69fe451436c72aa402a3e0358e0670a4a90d087c84b6e5fd030f16c53f3c48a645393e7429a3c18ce9c41e5ae49a8d72c9caf9986b2ba916c79de7bb31d24ce9c0786f1ee19fc2ff0cc208bfba44557d0292c15dd48f9ebb0c5238e9c3f79433fdc24427c8097d788bbe39e4c11e42edc9a4c6069fefabee93a9ebe272c016826b9321f198c22c297c3739dc8a9ea669c2d0f1e295109f159e2211ed0b51d4a1e397bc430f10f76e8cb35e1bd2ca2c0c7d40ffca0883888cac6bde52039569e5cb294e245d449b780a5194b6f2ef1be0f031a1e898de7f8569d5424364a8c243386aa840cf241afd98ffab159a462c27bc3580b68ce1577690e32a2bcb710d2988c09fba816066d21dda80235809a71fd8f7e09514378d28c31ab4372b4cd5eebbfc35fc9438048dcc7a2e5246e5b96d9ea4ee1080f1448b392eeba4b78f87560ba7ef7a45db783f5b7707a27f338e9d4f8bee66c679cf8de87234bcf855a7c96c006d4cf03d41aedd08f73b11014f37f855aa42650ec3c837617115b6e517c96761afaff0d5e656ff04a8d7078abf2036dcf28006298a9fa79a4d507708403ecc29339ed8c6ad733d3e99c84ae43f15b78fb19fa90b86c45f6bde6fb5e665e5be013c9c957992722e40d32d5f22f51bf521d7ecd1f9ed8081193e52b5d9ed627b5c3d9ea1033ede0c27530876c2ae5d238a31a097a380dbd5a702aa510c8a09270ac5c14aad79c1026706dbc08b82746299f236ce0576719d61c6db948c39444b328aea424f66880625f1569d668c4d0afa99f9ee75cc0a00c470431fd48c8b00c73906b26508e75e1536f6c0d47951b2bc3482ff38e314a994a6d1514d1518f9361b1eeb65f62a7fd732d120749e47bf21fde9810e390436c25d9174fd005aa6bf3b71613474e074d6c7b7fc6f8eb28b8c7e03d4a178c1ce4bf0ae5493eb2f64b3d412394450e28c4f28c1bd7bd900546e9f545076a014231ac3bb38466725bd7fcc4540b11a459c778e682030994a3f0ed9cb8ee06e34c643a0d219d233625f0538722724e70caf9a042791f1d68b251799467c6691792b18ed169e00d6e2d41c1a7d14d93671bc60ba5cc5cbfd91d6386b1360764e312554b282a3081f65331ab24ba30848e146bc379570d667be3531d7ea3a512e666c5fe66f1c920b441507aba75253aa9a113f72adbf1f74aac49dd8b357ccc5beecb3c1209a1e33cca3fbb34b092983644343dd93da02df194fc6524ee1bc76f5be5800f317dd9556abae917e3a7d457927e09b69c02fc157510175c75fe6253dbb170dd5642f5e7f65dd76ef7359f97d39bfae2c29f69df5ca1988310bdb3458fc23359107cdf92afe6ce3a1062173e0e6db87b36c70e0ffa05e5c8347aa03fae599c372c402083543d671baef9053f2544ee4fed5c6b062ca235e7360539473060ba5a14d717099a936c0a472226e76a38adcadc91b015e469b25c6427f66c2f22dbc9df520ccf7df5e38c51af74f3acf5e1ebd57dad22ed3d66656c837b9f356462a0d09a85eeb75fbd1fbbc3333a1807daa0ea56d0d1e1ef4cc1a29ff0ae750d8412160fe95fe86865c9d849647f1afe6190e54ba5e2713ac78a722bc11ba976cd3da72c2374e2cedc3231dde1c03444c895561e9a080286ec2c005a82841881ffc043a78d33befa9936eff2a85b848e0bf93443993598438588293a77e3e6cae179c108e2a12b49b187868648a73648e87aa804cf0c061d06a0b425b19d0617d34d900120a1ab40c49938ebf7666d92749871f71a6190496c5cc5044ecb893199be4ce0c1458b267a4aefff1d5bbc40d04260baaa7b53c852afe065abb9d170ac0029d927fef697f507c26660e5c50adaf552850486af6529eaa12920eb33489971602356b5fc3dd4f325dff5d760c3a8a1efc54deaf31abda99e96e223da1a9bb69a9fba2ce6f58aa95bfd77458ab0ecbf536fddfeac7a6c62cecefacbd1b475640d1d66aa99cd5686d3864a9dded98724358fe84c6642faad6f16c5c93773fc2e0f3674ac7804d5474b1c7c4dda6b1d4d2d30fad9bb58293a162383a5e82d09f04f751a7bb6aa75dd5f11972a58bd76fba46bc0296fd0b14d1bfef1b8da2e1642330b99daffb1ee592c70647004ac863575558db2e34a7ed24fb856d25bebb95d2bd0db5afa3fec44a6c25efd743585ce6edea054f24b181fd1a0f087cbb028ab0b0c1c0a542acc248306110750cacc04f0a47bba0e4bcda9c53c07d286461626d642cd5c7240ba7ab50db1fc7ac909cd19a3e51fb1e8c3d7a7c5e87ff9977d295925336f3197b24a8edf23be3c6f4ca55521112bcbbf1b7438ec27b7eccb85f76fedab6839f50402ac92a27209223d0cbdcbc1eb1b9387a83c6c6b81a70b9a9548cd18aeccab33f2cc8a1aa4d3d10a1973d4ca11de630faf161d5445c8a66672d2f0a66e4234ac5a48f89aa5e8128b323df85a699d20b9954a28008f7f6b84ccbf7adc4cc5a3643b5800ac2d3a3a594dd7b659001cec527a2112ca0aeef79262012c3ba75f40bf029c658d08acf9b6d7ce8994d4f446e4056b758f6640052a3844538770d84c9f125877313f4dae0d2222448f7eae4c4304510ae94545d188b94280f70fb9b14cf32a892401106b80a17fb1d37a89d6a4d77f0f70379394203e41b5abccfe6829cb66ee2127652d08ca88bd0ab836cd385dc0623703472242cb6c1a647f5ff7caf4a06673df93811e81a1d9574ca5092feef56d21b1b715f68951147bda370a65e4444475be6b6ef63db522c9b7ce435a326133c24fcf4ed5a3e1d3d9e88905d101a58f5fdbf98f113e0f43d82efd65485d7bf1e92c06aba9fd26254f367c8e956f219091da42c73498a1718c4c6914881e0004d208c73934876150e535845ec64b2dcd292acfc45512194fe9e0e0a4247402db336bf6a03812ee2b929e4583a1b7b765bbe0ce4faced919c957a8b5b1e91901e0ccf939c985e50af95525c5205bb83fbb124726054a6e684b86b6ec5002209b27ce337e5f4fae75402168f719b6b51e5f480a98900e0ea51e0331163a9ec0015126c217a6575c522b3b14eec642572e249790c539bc7ba2ef9d6358ad6baab2d02dd7f351e17f138b72644d3177053782e7e8c5d955cb6755c93cdaac5a8083e0b832c2bcafd932a96c8f2fb100de0e51f3e5284e1bd56ffd0a58b7f6a5988e407368a73fc01a13bdd081501d7b1cc775dc829dca0a15d037bf4c4019d0cd312c36a2f1dc4a1777b865b65cd0ab407530100c9144ffa44d4bace352c5d5c631f501cc363515bd621efd9d8718a81656f52416a19bdee5a2a5e3911f4511495aa3f22742167f00b302034aa3932c8bc1b86444a81791fdaf93abeef1709e1fac3612471c893b29d0e681a750bbdd49e9513bdd1dd4d216a90d6373500cba78c1d5300e74b328406b1eab68c8fd86e4f4ba335917b71da8e28a08aa7b035ebdf843694113f5b7bc487d6f8461828892cf4987166451ff6de02b2fed1dbef7e86a037abd35dbf064bf64a77a57676029750404563a281f583e6e67ced191a5789ce7c611de493d421fc5ece3f610aff3e92ecd53a9817469277fdaf0acf005597a46b782ca55d5d1be76a0cbcaa4ad348ebe5d2c074d04cafb26ce229e66d3cbef17a677313de2503f59fed0c3c3f60c85a154a8653c63b98b52bce24e0b56a4a2f93e71d685b8570b15359a23e958b567fd754ad8b265da0f88eb06e52a7e358ec09292d6b483c6e10308e90b53344cf781a6261bfc2755050d38dcc1c79958184ce41d5292e6f584a613caaf1e6585f330b42700dbbf26736e99a445899edcd33137d6ebbcf72054f3b040b6ba579cd52b45a5bf91f6eabaf180803606e502078e7f4b6a51708e1192303c09cc8d872efa97c17b21723cd5764b26fd6510d57a0a7cc7d86b6b82aa23ad742f666557ca1277d21befe911e10396de47db8e39905a36a51971d2147d4348b30f8c9ade74521167a1e43ccca56b6468230238ef09da30e34cbfbb0c95c350abb3b57e171e712ce050c6a6c6c6733b2e7a78eef6e4149ae7b1baa8d396830b36953f73ab420a5e0b04c064249b0ebc6aa9aa5ab01c00aa2d5cf74d59a41e71444edf3b36a0d23ea19f388542a793b42564e6ef3381263ddae08a636fbb768fd11da2dd19ee8e885476d22de01772154cf9b060b608a91b0ca73361d68cbdfdb314e810c2172c9127f935b444ba4625e9e9ddd7a89227f8698544b03ae70e0481cda59f46cce2d2ff6a359e3963091ca79551c2d6d4f315d77cd203d46e57aa641e31e91c1dac070e589ef77bf094031b528061c425a87c6875dabb0225d126c3719b9a35cf1be1b3a0c5d7ed1f9152fb8580395b4a52fe660c38d2c61eb18ac0a03484a65c711cab46a271c965fc87c996e5e16348e80d423bb2b8823ab954cfeee1c44cdd01c84ca3e4705508af0ebfe3c0285d08f82a6414291fb21287ecbbd8a9152eb75f04a59a07f23269182f5e90a306fa0f44feb9cbdfdaf92eb6d90e730b2e6fb49b8199f61959e4989df35456feb31a732988663f4328b53eee5c99869c6ce4ab69af4d2fe47169ff07a7de3502b600a064b209d65bbf986a526640f948c4f4ad462be9294bdec90476660079e4b52a7c2ffc4fdceeff1b9d4aa12887be89896d9ca46b5468502418c63034ff222f122021adea3269b4cf08003fa3141c434e940fb1ee8ea2ea69f942a4538baca3f75171888af3bc5696df9710061f5ceacef7251746046c4304b6e92cef19c6e2517c5061fcb9142c4a6dc288ee730739b9d4ec31983bc84a4e0203617e7e2db003d131dca7dfd8f59537638a0d1d600df866f8a7af827c41aa453c48e6d0e33da75c69ebd4a05ca84811716f3a59ae976d880ee5ff38e98361532f06143ccdcdd7efe17160a574885830da67a6e77732bcedd9331f97c70969a8c01348fc9936030564a64ffed72af7bb3e6f9ee006909ef98d3fdb7ca0601711614ac35f0ae6d3a648ce1d118714a87557090984575e599aafbbb430be565ce747c9ba440dde9ec52c8b7ea8cf87090df00c1ad7b0e7e4db2a8fbc1d0a1665d5701f42477916a0743f4916226955cef0eb0b2f1d574a2821ce920e3a0010a3ee8cbeb9d0cc47f09c06fb2593fe957831c384b5406644a27fa560c09d6f12d3580e3f7b22eab085b80e704bf5a49f843c060c3d0d81c5e8240735307455a54a57642e18e98a450404c7a77e335741eee359d398274b141b7f2e7bfa7eb609194c58e341183d4b257eb2e4969d7c356b7caed6f08f0aa56171eae012678a900ddb7a769b1cc2f4aeb6a8fb1ca18c37c5601ff0182edd29fb570abf2b00d9be0041ace92c9562aa0dce70ad86857dcc701e0433053a87b100d5658e3e34e1fbca0627755330f019b6c9f02090660a50e24e3e68a522f79435ded827228dddaa500edd4fc9ff848cd905ad57e3e2f2b1cec1c511fd7cfa366b682ff1989382ec26eab72f95ee70f9a01deb92bde46f887175ca7dcfa7d1d82cc799464b34e94263942b12fa64eb41519f18ed378c5e5b94b80e20692e10f4aa83bd2b35eb6c712370b00d3dbc71423317728a07a7acc2eb0b911a307e33cfe55624a170bdde80eccc7eee2a648cbf8b78d787a1625404c66c18a4904b115cbe233c486c80baa0c17115c3b450ed8be75e2f5d7226fc9a8a18cc16a2a4a2261ee0dd024ea4bca453b2922c57bf657379d0f531328ab8b367f8f197d7e9d7452204bdd70c46b24f9f920b7d20be6364c6d70f9c689e825c080891c24365e44b03bea24b6adddc90999f1908efc8cb2c755492ba1008b2b4aa0523995156371b11c597f21aa9688090d604935181d82bc66f329c515b874060cd5dd7ec0e9c685d58d5b6a3e37b7d6e5090c538ffb1185a74d926c7cb14ab1794bc93d9a439ea522db598bde96b322202f1edd6ab1d0b130588f54c180bc6d4fcd3a682311fa8c520e2356b200498c5c31aea0501a5c01f9a4aa40afa4b7f3f6c6d385758a64b26e428d8d117c07caa511127f9bd4f22f198bc46d242a08f7abeba7d197d8db573dc22542424c358e843db44f79a284477d869e78f853a621796141ff0b780e88fdfb679dc4b873cbe0a484d62ca135e3faa535c0185c9c08f65413c445e9171f0967a8a83272056793d81a891f786506dfe1e9d229b922c5aaad948abe267d74183a510c746871ae142d725db0b8bf1d45b09198e78a6b364bb97ddab2cfd49dad03ed906994a7a407548198f879fb4ae045da9bcd79a493f46adeffc801b60f345bea921376f8ec29490b7a2af94cac4674b622be6996ce27dd9432bbe85809cbd6f9ad23cfaba7501fcef1091ef3669176c365360f4fc9d1a015269236207f88ad3661041a72d7980cd3d3fac5b37229bf2734523283e28b89ba4a55984218b313ea75c20fb3421547fecb7ef7f96629a13edbb9d55d755ff8e6a46815f7d92205d6a94f740b30d836f52a15baf529af1f72d9b03a5593ae045b00d0edc179732bd60ba3dfea40eefb0612a19cdfca72405f4a15cdfdcd190b6517ae6abee7abf0ea2111548f52f17c310728b12069b318e065d8e3fd537307934cae9bccbed9b0b4fe0d969a0c25a9a4c9eec13f9d3b61f5a205ed8f2e667a710f4d8f10a8e40048cfd6ce65330a2307282c2b18ef3d51f6f9c7c7659e9a217aeab97c1721871b801e8f971a4e15330bfe6da5a1ef0419540e73d0bc95dd8d581e8785ddf0b50ebe9f410337c0baacb4d85ede9b0b4033abd1605c2c841bf2248ee154c405677e57a7a5bba810777885a1d148ae3f03a7d64c304d681f34b5a48c81f829e326840c5ba465b86d82ff99dc35362e5c22db9940690cb327a071ae2293c6a2945522f09c67a4c1cda4543fabb3256f9f9e2babe76d8edf46cd346319c80b821cb1f282f27c25d9bfd8d1eabd3ac2d8abfb4b621f34f32e0a86ee194984e25b1ac185d448386fef7a036cf7d58eb3cefbbdc0e0acb38bc8f35e314fb2e2b4ce62cb89d8994c4617d83ce215f6f62622b505af3530a72bc5fb243a9e1af40ebec3a2b7ba22c583d5b46c8921260f87857397b889c6df1ccca7ce90588d899fb83285231ea417bbc8d57fe18ae1ccf09c7d766c2a614f6fb2a90fdc8fd931c095419f756ae5bdc3146b9e612d4a9de6ad56b33d813cc2d10b51bd35df39d6caac81a879d30feffa6ca5262c6aae0cdd23df707c48d4d866ee9309281051b3583dbe655f1f30884a18106740a513fa81894049c1e327eecaf8eda4365acf6d328f40f53e818cffd0e712bf40eb5ea325149aae4d8522a0d3beea61221580333ae0f2243798708a85ab0be2ad052d8656e512e2479d16e081ba3caca4cacdbf5b1a008eefadc8306079c06189340a8f2afa25db0245a2247b3f42922a49f4870efa920d6c3bcc7b40dd40f661fe994f3828e3bc79cff397ec1ce3620d9f15cafb47cb20ab34b592dca37f68483811ff96c479ab55d40711e33de1a441f080f5da745da545199fc5864efb5e3b01728844029f0e1a33abf3b1a137caf01e823f0e2a0caf76563e0c29ae9181e9e7e2bdea7cdfc6f249ba810b93fa7f21b40e870ad61a4ce2f9250e02983eb0fc689ad4ff23fed5136ba0a08c8099e5812b0f414cacdb9ab88ab83c02d99d78b6de08de6691b52f3f53e31923fa0256f484200abb62e4c219f165afd9e855a17018b39efd295a42da5a4803ba2db5cd6ca1f735ab38864e738424b3125ca7a1246d749a57cdc591da39cdf5e76535c81c825f6732555f94118b69cd465438513d826946824b5919b40b0ca38d090702e3f97c04f53cde4ed8d2b65a73585d97bb2acf09e604211373e0684327b310717e4e0867e8b4b04377a0817bb3b406da4358bf46cce6e067c2979e22203259edd198b99713bcaf2e91b91926d60d2743c809c320aaee28c7473e14191f078cc4792469d9507a7b233253df34e140ba2f00682bd367014aeb3aaed9f579ad478f5cf1d0bcc56042cccb7f05e5ae102ad1b5a112dbeedac25d7990ff144dca705617f39610ecdc0deee596388e09343673fe8376dc9b13a3f3e08cbccc6c65813d5aef6b1f168d222a92dd9de41c9b79bfdfe97278ec59c510e70811790f5ebf70bf39da4e06362ccab28c25e5cdc4f7803df1f7ec5cd18c9f6361bf1188a9818c359407c1c168a4a976f87e83dd1a0728a7462faa9d978d315b602fab3d8d6f0376abfea10d3b04b5ea9ddbf0723708c6f71bef337aff4fd216d385679674d450288986e1496349fb4e2912402c6926c2ad64deb2860685467006fe5166e3a7cc4ab07b64dd87453742a8725c4642a6da9ddb695706e9bd17dcaee7cf1672c7f53480c968277081db18a8408c1bd0f0e77a89e6466ea15fc622cfae8803592b172174f80723a0bbb6bea91c3d4ce235595db86c22087c17cefbfe98cefbbe0bd7128e7e8680c9509d351f98125e8f29649b9f7fff1380626f547c2be4fdd4a112d1c5c77183fccab02f3fbba584443fd38a6b9ddec655ff199440e5b5c264080a0280c05885f47c005b6e12e7993b8663bb7ad96fd702181f7387758b89fc373e1bd09b33f5ddfdaddf4eb74ccc883d46b7f025d1e0b12b0c70acc6249ff5927db448d599af64dd80cb97e352821e9d764b0e63a352277d7f314fb108f1f20bb8057ed09bc54dcbef6f9d2a24348a646ebb089dbd3794b6d70d70322e175dd8dda9e4541f2c7bf45db834943c3d183ed3a8cbcbc9693c502b2b9adf6504c91938e06a128ffb8a20eac97e19c4e13e4990ae55a337e16e3f45524a42296463e196f116e476f28c1bc6909cc1686e49e408c97ebde69a095e7538e576583fd9b9ba2f39756efb0514466273b00df3f565c59e3e489d999018d30e79f3b5deb2c46280ed61050d4067f7ee414e398a722a6ce0c74ece563db3ff18f2e04038377a76ac028e04f5c2f6254dbcb758e0ac8cc00527b2afda7ad49560ed11b185fce5cfa264af0093ef62d4e7ccccfef27c83fecbde52131ac1a234bf25d841600b61181d9cf827f9808624f0e6780f14d8b9fb264325089c6f37f904291c97ac367d50a3cfacf016b947a44c0619e7c16787b3d274985fbf71fc151a6ebcc0ed6497922c2732a20a373d18dc98316c5e9cc819907c2cc97c806468b0c2f61104a73623477885d4d481c1bc84a12c1230bff69fb5ae602f30cdfed5b4045c96e2836916f2991035d2f0c61c9f3187fd0239ca903b824982e0846e7e18dc42ca11ae91004f59504518f85f12988dc31cee1a4d281f8acf8cbe864b064c6885d79eb9c9071a411f70eef02df3e6c3aaf1049aa345cc8f2301940b12a9f3f454e8a165ffd1527dc28ac4667307e65865d83dcc7087d3360304faf53084933c3f8a4dd408d3053def4813c7b68d0c44732f174743421231223bd965409d4f427cdea8212fd24fb84c0028135872f99ee0f046a488772bd247048d74b8470808c1c53211519d5ff24b0ecc1e6e919bdbf2531bcd739a30ee990318ec5781fdb8632320cdaed72567db064c8105476a0a1a4c6e7c8d40e0aaa980a47fb445c8564177af569562352a8cf1dc8762dfd5302cc0225f5ff6c5ab79090b655d8c5516e6cb7892701809e357cadb8a29bf1864819847307a36f1d87c7cd3adc71fa58869887a608dfdfb70164004b1c3c45507071d6d2728bf09c5273bd80b7d318bbeb0a2aee5e7b4829eca8c9e8cc8d6e5826e0a22afcb342a079a7047cd370769a7da5d44525537a6b5144108a7107edad0fe04a101f69aa49094b227611c6bf00711ed702ca1be09e27d54141e055383430ad0934d4a1aa0d77db051c099564a2e11a62ae0f3d57b1052886fb684b77375745b632ed29be33e7b62d936a0e0c6f7d53645b0658a5bf62b9414567e5e613279c7f79eaaaff070edd2409ebe03920081e055147a71b7fe52b05f79ca986704d0bf3b50d345275641368cc8e844984b27562b1e2e92e59632bb9ecba5b6990efc913082165427eacb899cd82725fd68ae13fa20caa1648404d7e97bd7709db889418fd660eaaa2e7232a713daa8e92035d8e88640467c05883e19e88b1e7bc90c73fb044baa584a1364fb811843215c224802f0999510feb20fe1baf12f2a62cf4ebd57d2ced2191000d93f6bcc6aacea6008ed6041781b48909186188100d65b40e208c2dca3d2c86b36483b8636393b19b6a5ea1343c1925c08beff82df568890fc561d94eabc0453f33c4bf87a0cf3d66cdc250079465bcfa5e4bb8f75e02382cee1c2cf747e00af478667030ea46ac578d74009527239d023c37bfcba7a5d54d75881e590f82694266d6c7599a808c68c9ef4f3b23607aa881ed616428720bab80c22d52676e1ee062ee8d5597450c69c83ddf6412650a4b89edbc466353d3d68ba3a858d26a6329eca99b5feb94cf4e36ff87acfacfa7f3ae3df40b4ab6b0fecc846a98ea023a891c5184178387677764605421347385c42ff83c8bf692dcad928d8f4f09bee8dfec1ac2a7066a5c61d1a9254aa35b294e2563f637ef3d4b6950eff90e2c366129f74885e4dbc1c7ac03a1ae323f892467928f51d2d17b43d6c63142eaf3fb317b97aac773e1fa05c9108e2fbec4f32e89ca95a119e3bb04343d7522bc37736f425f6fe2057ee5115d857c1fde002bc5c67ec60ed56cbe58a6fcd40f581f297bcbbbb2cce93990e3b4376b0a817300d185ec2a776c971130a47d088e59b39c04fd7afc577a3f8bc78cf743c48b99316189300d50b4810aa51341de5c1ccedef99029a235c2e40c494610ccda0131350fd35c77b34aa2eef5441f4cf5cd62ff575a777cbec7bcc8264f8110347d91e4da0b698e42427153f568efbc441a38f788ab5823f5f9e22496cf667c1cacb90d7bdf75f030a1b88e7e70f766c8c158c0fb40d7d23275cd19d5b6080937e216181ffc570eb0d1f83eeeb123342a2b6940ecb2263a79996b1e16f371d29eea281f7db8fc9c76e15f35fb9ec59c745a784751d4ab415295023e94a27c1b6677ea62818174284202905bea4f809c20282875be190497b5c535ea2eefc7b42d8e8a406d20f00ae0111b30ba37e0786c11ae2b43924af551ac5bf64fd3cbc8424a9f4aa0ff734fd4200c003d048a7d0f7c7e1ca63be7d1563e1f650c228a743ab157cbe3df9491ba80c0be65fc06738c933ac5dd226b8c7b3fd544e53468a6e36789d72e308f6e2a4b838f548291f463e621d081fd2641a348e15591af2cc9138ac9a42ab76b91685066db801c26014ab84460290a344786d78166714cd2166cf93f20e8f60a4ac3cf46bc14683426db87a69eb19f4fb070c0ae00a757cdc7b177c41ea2b8a3610a99891408a3323c4acafa6ad3d1ca6acb129986da06e19cb97add4c76b6a14f01a3961ac4841b0d7ce1cf089f5e5d0185bb0fbb8048c22c8ddbe351e2630e286027f5b8e364cfc262133b40af643393aebfcb6d3dbff11e92f311b8e7e2ffe1d38c00daa59f70b4323f5fc865972c221ec0befdf0d08a7498ae3f09cc20db46943b8aca9f21b28e7eb9ec274381e08201039dfde2aabfc10ebeb4282d7079cf5a6e56e013141bf2646df7813fbff886c1135f6c116058da42123605c1938f17a53120f57f340a4ecd6eb12c1652e94df6d125f622ac3d28a5c9223d9b0e1b5a27e00d0a6c6016aa31f7cdf371521f585b079b768253ab6a7aefc4369831a479f997c6bc335d6c91ed6c108393745474b1eb0c7bf6edf671dc5ad5ac40c5d8642134aafbffabef516ea2880d5322b7f9794ff22e6f2df3786da09bb14fb036c63bc4706c736a2f894a2c61163e15e205222255dd5fe55913ff19580ff80c516824be8c043e91d84b8d784aa998b7c0424307b857cc0cdaf804607dd65d59db9706f5722a0d563a7ca4a1d5dff6463c88a7be1a8de5ed69c293a927fbc1a36a7fc2a8e0ee9bf7bb834e0cb39be9d9034b1b2fce97e4a3c5ff88cc627fe908ac623d09974048d8cf28cef442e1f00c7bba98356e482fc8b29224504c426f20f4468e6a4f84f5b921954e640e538423b8dc113c270c8239e4140d10333748f0c505676d08302edea3b20f4372020e9a7e63ae766401f030a96878dc2a61cfba80354fec39698229ff28ee5d6e23eae400ec4d54e9f86434f6a80fa6fce88a622e6d5ea0d0b4eef8efe9923df0f04b4210498be80b8619792d2490e077466a6224ce1072bc8c39cd40e178a558f50557fb8785b7f887a9626f2fbde23851ed4aa3621d5d50746df352ae75491eaa597d2991e847c4fbde576c6011fc042ed4f05689b397828b041719c61673200ad04f09e662fee809b5fa0a388a0b4a8083681f3497ace429bd42dc5fe5b16d397e305907a9416f8ee1a3ba832b7307d09b08ea08f4e6c2965262dfd0b04827c586ffe2bc44d424dec9ccc6e1aa4d94524b30ad01abb54aac11b1592868643b133f622e5450b578e484762ce0494565e500e1225e51a5b19640232058651631b0e740eae09856790aa4db67116e920ada4b2a21ee1b1a42166b42ff3eb131d50c9a4e4e4040baf4026478441eef45158ec434402cca4bb24336418454bc1c6dbdf224cf1490992c82760c23f68d9670f9492773fb0c81f3d9af7c7287f8dbccde25aeae2dca07cc8a7493b3e59008b4067f80cffc6e8e20e364466c6c48b1b9c4b454c97842bfa5db1ec31c96012fba0c9ae46ea1cc3c01f255e62355802f346645e1a576091db650d0f81d2c8a91302a7c57eeaf7da1ddeb951712771970c3e79db2480020efb4d1aee8358291f60877b0415425aa07218829db3a63f1664682340f9d606bb8da7283584fab572d0d671865cd0384e2023b44803cd305d8321356c849f2ce195f97e45380328f8896856a5f7608437c462f4bb654f4af8b7e8a1206ab643e8d112e67bf54972ea04ea4d1f49aa6536a8472b0b8ef7cd71c0b63be0f4b68ed863daa1a1b986306912d0e04185d9afb107b1b6be1be80791eab0f42694ab025e2adf1f0c0c8daa0a9dd6867aa0a605a0907e3d5e712d7ddb92e1ee8d2bd4179b490343010f0c37a3f4f82f6bee81a0d702c5221a974b46103cf5326fc564f245b57c9a06746cea4ef72cdb99ebc3efa62ef718a915d846007747365f1e753f288bc45f6963d7d5d22afb9f07e98a5848c7383fd8b6005a1d6e07444c348511835ca6de25ca22ded8b596789dd7a15058a55750f82636cc1662be4f793a826806b62e01afad793808aa96e2abbf3e072fedbef9e15ab64bed3bf4dd1957aed1d5a2ce450818009714bbf3754cd1ddc5a22268f52f686e614d47db841e7f68f98758620e933880c760adadb8cc037aeadbc48a341937deae9e0286ed502ff241dd85b82c74821e244ccee170573889db36bc92704f41f3e9e0800e166f0fb593c47ca7037afefa985820d23a4a0705cfd5782493f82f01ed07c6bf0f68a8f536c16b7d303934bccd2ba773c013d734404dc4d9bc886637103800ba331be0ddf0673d47fd7b86a774c8783b3cc886f642b2c7303e6bbf2464efbdb7dc5bca94520a0f096e09b709af7d2844ee17f92921e451f96daad0505579fee42ce2161b6f313f55833931d8bee94d59d2ccb22df33efb1c20d792254d908754a1b3840657aadee1e7ffc1f7f77ce1911f10e3fd7ef44ecfe7a8dc060a1a0295d03e5df987a4c03ab5b7a8439ec0820747434822c509960b614059a3a98b2a82e08115f6d4fe85a2f64755e760a5f6cf2455afe626b55a8d93035daeae520e4dd4adae520e453554d55dd2b67438b32d63235938778376bf0994917ba8280deed790a10c84ab4ffce95c8df8e7bf653b15d6c83efbffeeeeee4eba065d2c4644c1f28469b6cf6231a2a8f673585c1066b5acceaa11c740b758f22f8edd2b1f3381253f253fdde3607c96f43057a2625d1ee64a284b7e24f0aa8409e45f9e92c992df9d0ab153813b153689253fa07e6fb9c5d3489909482209259458d6f45554820609aa5282fa4e338b0609cadd818ddb25c65d7f7541361976cd13f7289b0f7b7cd853f76785fc69ba3b6edb4d406808231ad00022cb942dbe60b5d77d21179170341ed79ea1d277ef08576b6c3c2552a90fec9df8341e57d905d1302827bb3917ec7ebb67249c17632743d59ec1eebde3be7086aa3dffa67d31a668a0803d1a8c4834a7c1a86a30fe85c47eb84975c3af5c3db2226b27bf8a7b7a6a154bb84476e7c0a0b1dddf3bc6f84b96b0525175af4429b2b2440a777472e1a249cdc2c550dd2c4c647eea66f122a67217744304ba55fec73508038a7d38db872bcb50339f5573a19a69afc180554d732852b873cfdecb3e2eead13efe3111df2b66f1abd8bbbb3143b75e0fe4334cc9de4112314b88a8abb48449ef5057298999ba6d1774e38476922f75959270a921b3a0f22f96ba4a4986aa77413744b4df7d6968f663250d32b7b6d030564ee9b48f7cae93a3aa9cf6596ed33ef7545443754f3faec118e9a3a8b7952e8d7be41a649447532a2fe775c01e3fbc1f4f4883fc2da05ca3cbdafc1961a299243ac718e38d13206e7ca0ab394b693e02a54196420428554a1562662ab6d58aa01181c64d667375a477e46fa853a5102d0a699f75ff5c68507e1785da482ef66ae8fb24cffa7e7f6d9ee6fdd7c6c66ddebfe6b7c19a3fa2c2f10f0559fdfd93a1c19a8f0dd63c50afb02db772c177e4c73f71a72e68fea90bca9cdde36103ff0d0cbfe77d38bf39478277e40524b173e10be5dff1fdd793f028d49f4effb30bfa0f07857ad4e9f437247c38a837c28f3dea777ca6facec3f19a7783f3b84a942a512e8ee36244c50db56d31d2ede4454a4f1f3dfe53fc6d8bf169fc11baa018e2ebf86deef0b641146ac70effed3a94ffc9bba011fc47f84d725f67428ea72fbb13747cfcd805e9f8af0bc279ad7381adf89cfef4b19570c3e4bcf6b13b61841f61ab79f66c7e8747f3db91f0400ed424cc47cd11bc5483f27370bcd7013dafa741f939bc9f06e5ab3ca006e5e3f04a6850fe0d6f480a2a3428c4c653e2fb9aef5760a17d68fefb70b78dbe4eed2143e577e7dcdb9a98ebb8f38dfa9fb66ddbbaaddb62f0a7f1b641fa9f37341b01f4933c6b0aea67fbd0adebba6d3b5154f71bfbe8de3dfece0b372feb7eebb28b6b9fed62a0e16477828c5d5017f41d5b79175ef227c46577c236d8bd7f27276f9ffcc9b09520ee8b0dce504895afc92ef586074efb42e35f9c3442abd0d86e84a0f16b6297a87541fd63fc1e2cc3300cc37e6b67de4d10b47f622ac89edb9e731acc7e9f3d55835996cde76e7ed0b21aaee0205e58cf967d49968cc51ef2eb515529dc34a1dec395dc40454ae9de31c6e8524a295b4a29a5945246295f4a29a594f2b52e687c19e5ef5284b2add606767accabf928509e2b0ae85a710495ba66ea5af18469dbbaa09adfbaa0133bdfd064bf38d5f86fdbc65d1066f3ed711764f3e1609b734135df435228d2ed956e7e7b5cbb5ffb23aa8abdd605611f4e7f287c3234a87d6c507b89d36fc4fffa3e3d7741a7dfb25985394e89284a25405da51dccb87b47645623db9d00e4996704060c06ac629f91fead5d90fbe4bc399f3dff19e45fe6694d84fb57754131f06fbfe5e0b83b81e639768fbee675ed276f3e90d778d80339c80de9d550ea933c2bf54da11fbf84f6e161034a1ff5cc943e7da06e9f5842efc412ba2668f6e15e098fa86a17e41f0e7f28c8cacf9f0c60f79151b4fbf8d49b0d6a8ff252dc47f793fbc9352535e4aa4540fb967d68efdef6a5fab71732fb6bb20b8a617bef4e88dbfb76413166d96f3d7630a1e15ed1be84212914699f66366a3055a3c6443dc626940ddbeaec20a2fd21773304e59f0d5660db1b867c54dffd4e9a16f454fd4ff8661ab68e47a27bffe51ebdb95791b54bc9843baa64ce0bd51199237bfcd15b1adfc16f1d8a98435ae5ec171af41d73c5a805a85fe815b3b4f8ea05576d2f5c8d31cdef9687acc59398a877fcc139867214551287d102ee126b79a177fcc3fd54356a66687fc6cf953dbe1e2856cfbc192be6c58b53f9b351bfe0abddf8ad2dc3444a2a50633c85d81601f42b5730759586bad4f83d759586a0ac7af44efc9efeedec2103fa75af2fa45d107f8f5e312bfe8f6d8b3cbb131f0153013562a9f1fdebb13cf1352da88351a7c16f30166d33e59faac1f9dafcad81621f72373b64cde30aeafe338b6d3536686c9071541ced81aab6ed6d8bcf1ee3d3546418b8366fd95667ebee96967666c952fa4d34438474762985007d3f0de248ea36c28cd01ceea8fb5aea76475513b39146ab86fba5c67b289932b58aba54b40125cb863f2227bba0f8d725658e2466e3cb5e6183490df7cb081cae9865feab65cbee547b11e3befd3730a5f27e098313fe96727f2be7a02e156c48a9b1863f3efb40fb3acb58eafe4ffb2c156c0cd57d21eeee530a3dd346952b0448fe34b82f3ff01b5cba030d5521277fc70271ff0be7bae48dd66eacfb1d0053f77737269531b34b6b14edaf991aeae8aceffe25fddb87afd49d9ca2cc42a01f72f57108f343992b3388820a6bbf88afbe8a2f986c88a28b285f58bb4c5a75b19c29427de78f5b246871a3490da6a2eabfe9a031a2a1aafabe74b240a2168faa1857364bdfa3fa97d03e4b451927d57f7baa0ba91e7752ce7f434e16f90ddbeacc289b6afcf942acf832ccf5d8ab70cd30853f15b09fdf0cd887c3f367b8aec75e887565cbb44cdefb4e4c8141afc7fe67815c20015dd4f8415822a3c60d6385e589ff03bb96493249253d2bd598168ec8c0434335fec68ff1267694cd01d163238ec2040db9294ad59e9b5cfb75aa0a5ffb9ef6d1b417d23ed86bff030ab75ea3400b84abf6cc44fb0ef42ab2b470a7e83e5c2bc2cd62b51553353255d3b4544d0bbb2f9c93fb68d5b68fa82e8f6e075bb65cc1a5a9cedfb6dd8c28a1825d624da1f16ca0f99a2f525d865daa27a0f9adf1661062d114a1f968de3d14b46aa4b99ec6b341a8955444a464a8061a5aac1b8458cb4283b38506e70395f0a3c1f9db05fda46aa0da6f56d0f91b5369d03f4a048a71a58f7927faa17e6b8e3b13b8437d1ca5c1f9db3af7097be7a10582fd7c56d23ea88fa96c8c05fd48a5acd8c746cb337fbb01e51ef5d8cf8f0d62d94b2f063ef1d0500c1ca5cee7bc1049f4196ee0f0857bdc92a1c1f9a92568c84665ea7c36f2e994aa546f74c2cef9f35f689febe792690a53e77c16da67af405154e7ffe85564cd2f32bd9d3a9384d479a5ce493466ce5f9df919e1a6acb934b655a93ca7073cf0a08c1a5f0ae7f6c21989747777776e2f3a3f29777777e7e6c2dddddbdddd23737177777e775671e1eeeece5c17eeeeceefcecd5eb8bbbb3bb717777777ee2edcdd9ddf9d3b3a1744558c312489b608a15172e5082164a21041e440868b0d9389510269c31cd9262f57d1d212d775556eb55a5d24d7600a1b48249225916fb55a5d62eb4035802d98f0a2d28196b46028ad404bc201126cb65a2d249873cb9962e7b40f92b8499129750021b88c913ac2ed164c6cd104c7e446ed04151fdc888b6dd2b660c2cb36f5dc30b145133d48ba5bad560b49338139b8f8800ac8652211154bb80c171dd41fffd6e62641e4ce638ccc2fd4cc396c91fda577920410359800a2260ae2a451ab65c9e0ffd19d9d3907db5689e261dcb6ee6e3943a1befe8afdf1a394ed1f14439fc0b994d5b677b5bde34fd987ffd6d49a0ad41f9031ba4a45e9757d1faecfcf7515c11e01150015a8691241f0e852da48413f757d613c92f262837481704d5def7f7d8a260c4195ef434353695cba3815874828ebee7724f555f735367fc72e19bd976e161caad490e3b2c839d8361ee84644c3597558db86da6e2e5a6a1428432d6ff22f56aa3869d2647acc12172951940c31186e4ab24284932db364ba2cd59e1e62f8f9eb8296b848891295c421304d49562a11eea40c992e4b46529a492b0903e64a1217311166cad465ea525171291168ecee82f8fbd1e0fe0e478940c3c8421e3dfaa987aff5c64215d45314db6e2c8a449829c3d4858a911339133161b45c81d245dd32f385a90a156e529968cc91e53244e71283f21779d94f42cc528a70d67e9d7ed00a27c4d47e6e07dd49b61d5828876199b671d8e518b769d9e4e22ecb7360fe99b2bbafad2878f58f5e4f83aa2a941fa8dbb3887cc28fe5d9e48c1ea3c7cd5923568dc4c7917f3dd7040d636d1dafbbbbbf41eeeece89f2fa504ad9d794f343296355c99f0d5212ba70a9195e3fbffb49e84223ed9fdce21009638d37357e581b71bac03d47f5e114b93e47cd2a50b398b8bebfdf74f7a791427dab6f8c32935ed72b5e7f43af8fdee9eaaa77ff60df7f1bf4df08f8676449e8010d69dd52ed133915776a36b8fe513992fa924a2d55ae1b2a466a4439859e5cec8e357a2534c84bbe54815aa7fb062ee3e454fb85f1f7fde77a741ad98f721b095d6814344cbdff460fe76525c2ad5695f2eb32b6dd207bb18f209d2dcfca0643720ac9c5448f7095638c47b8c84168a93146fbe517ca0faf0fe79c59967de6f126a5bcae8b72dddc7dfa4ef48c6c9fd5ed5488ad56acbb6d5ca91761f466839ba2eedc87557ab3c1e813a3fc70e28f5077d4d5f178b4dd2041ca28636c4e5519a37f57dd77a6ed54f7da9d8d517ef4e64af6717d31c66ffe008381570e351f578782a330828a69180a29b86a6c5e671ab65d9fcd51b141f937451be41a9ccd9e455c90fbe5322d7644aeeedb05396e062dfbe210e882a923dc8291be3017e49044cab197f1f13327b2c7b078798c6d50fe7ead9b3da8611ffd920a2167aa6e7a40f963f69763d903fc63c699945be63fb1cfbc90c3dec3c85d043a145010ce3a3b221d0a28a8f1a591f85132684efbec82fecb72bb66e61a5fa0f6abc5df3c8d7df45f9d095de537d947ff54e30b109ad050fddb056316a7632da357d97683f8f7ee02d8c7fe7401e55f21403a48f30ffbf8d160cff7cf5dc5efef63f47a449f7e15b45077f401b747796133db346e01fefb4cb7ae47fb60ffc341b4df07c24138880ae5d1ee2ae71ce39ecefa9001083150fdc37458d0bc5f1ff2af6f039997b3a07cd911e91a5db5a0fc98d381eabf3bf2378f8874212e28bf3b22b4c679c967ffafb38e6814c520950317802ac67c01050852b2c85264854dd1171afe22643426871c8c6e68410f470ee8418d27607ce0a1054578f2829b274160ec8590122d88b852449736c04879a2d4bd60a9db9e3c41b7a2324e456d50ba6de16a708a72a4ba8d14943fd2dddd3d920ab2eb2346adc6e7d5b592ef5fe84ffdd9a9efec4b6f7dc77990653768d907d4209750a6d35284195a478bf0a265452ce0b26e2b1a422ba2075bf84306e6a3c3132724a81848b10827fc8a0cf309c19e04e820440e197802072eb0b22508295414d1e28a6082ea628b23bc8851030b76802288228648512f6ec6d0128086a44006a67ad58dba4a3ad8a05ac2aaf09225a08aaccad46a82f044db2a29d503bab87245e989245dc0bc608af60228281757acb1822f921c11851345b03afb7a7e66cffc7e3c912f4802f3f9a932351f32b094979aa202cab6ebeedf1637c6ef90fd657b11b5dbee36afb715db2e3b211d08f3b2fc8b9f03b223f2d5887cff8b6b8cd74ffd3dfd4213e2e7554872b7a089b56cb392311c65bf7bb216e5dfa53ec35d5ed8f49e9e8a621ef8ffd3d333c4771a688522bbd4c4cd6574625722395582004627c98643edaa275beec121031a7295b1c935e01a4c51da68d0abae12932ab5c7571c9521563f176beb80970de8d6905bc5fe745294db9eca3f7a7e8424802a3169824b71595d2526467049d28cd72103627b4192eb054c364987690e268eba4a4c72a8b40d6ccbf661fcd820cdf2eccff81b235d9e65192cc7dccd2d89a2749730042971bd0a1e9fd8f657a73d465f8f59d67ca1a12ca2f2f3cb227e59c4b25156f78c1aeed4fd70a776770b49a206afeb9b4105ac95498ed4a407474918f124ca0d3ea071f9b0c6c5e4450e6028a1c4c5074b6838239b0c317b01438723b86881a1048d229adc209be02075b059bef8f003d20e86304689b78c275c6ad0c51136cc000a2d98bde460bb1205109c3041c50e3fa862f68046eac84ff56f245e89d440b1c287233fad832974a05269ea4ec124a926d5c93baa0a675740bdc3015f6481dc787e2e7a1eb34052cfdf490bc4e6f99d6881d43cbf372d101cdf1e56e35d6fe3d57ccab3f91b2ff537bc9b5779375e07d51fc763de8d9a44f5975b1ad57f7257aa7fe643f5d7ba30aaffc675a7dfe6875b1223641f6e69e45cc1d93eec7c684f07f7611786e4dacbd17d783a6a4f757a1c5553b265aa9f55f5460be48609372a0ed4879a92ea8fc34bb596877e2897d8182d0faae525a1395a0b24bed300f98d1bafc8f2f0a73c2e5a1e7e1b8fc72c0f7f8dd7494ee44dcbc34f935a0ae3510d51adea8ff2c2140d75b3a84f420e42ed2942e7a13a18953f5b523d944cd55fb616c8356b887d384466ed9fb5bfb0c66f7d2d5bcbc37f54397ad16879f875788e254e8b2de45f8efc3c83b8e5b909831a9154c3c9b551c388fd40b9c6c7c2fb718370696a430824b04421630345d061298c0fae50ea81656689073f2485d9a9affdd388219eb4513b35c6092d464668910312ac90a2608a9729625441854c0f58de2bba40b8fa0f69a3ee152f506c9c97c270a0ce6056031aeae8f48a59fba4a976edee21cabca8f19613289a7a60e24596858993ea3f89e852374b5d252274a81b6a43751f9e4e1f76a857f5873d493b0bd20f379a0f352de573f3382a1c7f63a5a2599bdfcf1648cdef75fd0c2f8e39743c4de5d0f1f18a2f548e2157737cdb20906b5868b84b455e689f1b2aacc042075c681f950f8e6f9c0f91c43a3f65c27cffd4957d324bbd9cf3813a9b389e91f9eebfcd9fbf0d7eea81bc2e92581d07a76512d485222b7891eb73446c4e9c4fc7174ef96513dc512be3f06e780ca541265ecae3214f85e5e9aff128e0a5f6531decf29c3ace8b2f9535b06ba68a34caa82d46ed2fd260ffa98c270311373009e96054ebaa8be54cd245b4e46405463e0081e54cd285238b1d90f1c3162520ae18c152134a9e687103911a526cdb3e24b8aa91dd5f1edbb60fb8dbe1b63a32c81548fd392df82a6777f827f34d5d2c356855ff421ae6d4b00ffe8813e34adbf5556a8195ca4db58d805023a73251abd56ab1a6ac52104b6af8a332104d372a3b77a9fcb15b689f30e542fb7096d60e95f93bc06cf0095e9d83ece56d698c4962496f591b77f1795c4169d569f0bf9c06997ff927b7a495274dea2a3d194a405da5222dff64492546e98996ec48152f1d9e74f19b897ee57290c4929827434c78c01a2149185cb8000c19666a60f553557f9934a5ea516d21b3a4a585972224add005066d5c3146124d647418a3e9c0c6698d9e9f9e1f3b28d961872170139b3f7e84645c66d7d8420a1fd2a832c61a4a0480a2c60e9a58b1030d865020c60c20783006131ee820440e170f41081e6a9391c40c6670268b18fc90868eba4a4e8250a5982b8d6d75b655c680ea1fd2d42aaafbd81ddf6e0cd159c3786388620ba4fdc3ae364960fb2bb23cfe292edca1aafba83dcbb32b03ec8e7f8e1c353535f4119eaafb581e4783b6e0ef827f9115764c0cfa39b65aad5611ab7f6fe608950150b90b6a6e79f8b1ede6d85384241b4c5e1d5b09cbd3e1e57efc085171a91c01d45582c202273c702aa2a0435250d247ec1096f8103c444bb1031c6e90e45984692b4b1c898fe0b204aba12aebfe8efc5428ceb05135860306d4ddf3a35454a961aa1accb2e0e0bdba4a596090c58bea4b12dbc61218300ddd831a384c9185c75852d2c35eb1e4861fa8c481daec0a4893d74f093e210e462bf7119d2ae407bf3741635d25275dfc58519fcc899738698fde61d5e65095932da80a8789131c50b924046b1afbeb015663c510a30326ae5841e5075fa45177969e9061e58a275b8a300024564003262e4c86cec009a305126ce4e0822396c8e203304e400185510ec494c10a2999ba3850238b163348599c1081b5bf2e5460d3802832cab22c7ba13a6736e76c1762c3a9c00c4c86114bfbec9170c5000e7f01c6b06628c018d6f6d9735d6f064141aada51fa41e9872c4010d55502a25579ea2afd2044a534bbeaa9e20406550c11032dc8b0fab94301ab5b509c1fce90adf34fd83a5fa8aec7e30aaaf4030f756b0e637fcc13aaec21f17af94b2f62ed2ed81c95eaea14a517e65a3635672aca26e6b54777a6220eba56b0758c93cbb48deb3647a5e2b653a728dd3454bc5cc90877e9f5f2c42f945a87827f94357edca89211349bd6054913b0207a1688dc1c95eaea14a518e7158bdf60ef34d7cb13ffeaf8ee557ff6df26b702bbc1de067ba3f7ee7216b7678c7dcf037495783053430e8d902af110a6ae120f4c75ffaaabc4c3126ba0676b4fe5adef5f3f397af57417b4660a8d51520683d53821c3c87e663552e27cedbb5e9b9f65bf55d3b26c6251faa047e7cfe86a62aa6674c69eeeb21ed713be5483bebece33323db64ed3fbf1b0c1bc1ec37eebe42e05a1eb931dcf98349bdcc7c9456e7219563411a6cec77e662bbb20ae9d933a83f29ac13efbb09ff3b766f3b16ffe11454b304154fbbf8b8a0bdd2b3f7a255bad9f1e5487e6d054d3a1f6893a6296e43bfd5c522a292749a7472413cdd49543155bd9a4ee4731bd8a4bbed349becac26461929292923231da50662613938de126d39cdf3638695c526171292ef5b2115d251e92aafcaf494361ea26d92026c986a46ac599a5ea524757045daa59986c0ca52ad5771626290be3abb8d43bfdf2ff552a4a6397282682d112bf74092353ede727287f5ca04eaa21e52e3072494747040de35218c94412b54f18976a4b0e5d51b2f46385198aae701826465dba0c11490e4649944264a7ca8f9e1c8a0b848bb48f7c6cb9f4e815f5cc933a2a2a6dc0a492ded9209286f5215b4ad090b6e450eff473210d652b7c4fb6641499619c1c6ad90a2593fa556eb9814bdd2d3758a96116a6f613a0ee961b886aac5d2739c95b8db38b0076e6cc99346aa40db26cf54eff9c94aa54ffb4368f25280e0962c51e0654c5be56edc7ba08f8631f09b84a1c146afc8cc8979f8cd25bcbc8dbf24494e3d4aafd44d4586364ac45846c52c389b5c034d81f37351885865c93a62f956bd20cd626dfe91f3341ffc267c52f54c9ca430621d7c449afba31ce35e15235e4a6906b525d7663c0e739e3b2ea8ec751bde7aeb0dd715fde58799b5d1077290899f075f68ac1f44e7f73f2e32633b89e5a1816d8e4aa12dccb2ea83b0b08b1388cfbb6efb24fd5e0f6a906b70f43a76e1fa6eaf625b4cff63aed73c2f7d8b6ffd13e277c0aed137ffba9db37b6cd8d0729dbf6dcea6cdc0d042bacb801164c6cd9c2e50a2bacb80116433451d57ed33eccb40f69c0aa7d98a3d2df16a8636cd4fcb2efec63a606b5a6b240fab5e72aed937d0ce637f76ca04f1323fda6a1f4a31f3d49bd4b5266d2a0a6bdd0a086a441ad030d6a2e34a871bf6da1614fd5b62e32185e2ef4b7bee4252f7949f6eb259b106b7fdcb43cda6f57d09ea6b9bd7b3170945462e07af24266aada635e88444a83da73dadfc08186dcf4dcd43e61b320cbb375bb09304b6381deceee2c024203d47e6e75801001b57fd3bef79d1f5036283f20e47fd0bd9c069b7d3419b3af2b11550617ca0508d41f15c4b949fbe01091df2354238ef455f81307858f045c71885cd78783fdd6eb6bfc14da0787361857cb8a4f5539aaff16088bc7ae1ab03bf1552a1b9bad865b059264d0dfa4a54b0b8a198711ff08a66a770f773791a6df1addd3ffd3ff6092108c8eecaa38ba2e2507d9ea8969812c15b15ed717befcb81b21d01c752348552823bf4152f5a01b21d0f8fb1ca4abfe3ae0290c47854e5a7196045c6383385526d1f3f3a0bba77a7e1c95e45a145ddc3ee3106fd96a7190a8a5065e816088efc435a38735e385d63649b9d68ce6d66a91ff3f643908f387cb834ae7cb0800f9f7575b2d25442d254b5aad56abd5e296efc44fa1c890068b2e36381aeabe46030d1ce444438c9f23ab11bc6226bc43a557237ca48c54434dc7873935e4229c0ff94ace87cca5c6e71df8e308df909c6f7b875b38df0abd13bfed9d15da47f5f159e020393efe061688086e3c8eefef3edc2fe49fae477db860604f3fd4e6d37cb83dc8bee6434e7b9b0f578ced534c22e0fee6c31d233e0478b59b0207c9f1c51fd23eaa6f5d4b7855f62e88c71554ababc50626f5c90281a13342fee988eb5150e233120b04069a24b2af51427b9b28dba796e0fee6230c2210aa38be6b79f86f7cf16fbc0c5b1eff1aef5a1e7f1a4f2e8f3ff5e2f2f8a33c5f1e5f1e7fcee3e1c73c96938db868fa4ac7c7a73473ce3957071371938f9934189949bdd11a129b10684eddbfb6a05fc3df2b36a0b2454b1643b09284c5b346921ec0c82245134bb0fa1fec6e28367e7f1c143e23df4ff515f6fd5949110ee2d51fe5a4c8b91ba8cac3e9f554c73cb0fab3d7959dddddddddddddfdfa8a442fa141dfd6a0fbe15e896bd0a4177c273e07457290adf1590b2a0c47e19f1f3253b73de6b5d0ea9df8f1e36ea02d1ee220497825c48aac78eabec949d9171ff33ae0b5d0607cee091af66c96517eb23bad398d9cdcaeec460814d5e33fbe427d2c72faed88b82041c04142be3e2e5a9ef85819b41ff5b2ab807f7f385805fa511f0e26c46223df8934aff29df853a6a8409fe69b81fe46d49702140ac33e163e1c14b09f3f3f2333760f98df0a1fced61e4980f1bd8b40c83ee25f1f08d847fcee1e20bf23ec23befc1e5f9d9ea3f0125cd4c345614f0f17853d5c14f67051d873fab8c877e24b31f4ab9093246480858b1958a810b162192c8a00429429a460228a9612e9f5b01157e1229aa4b5d646442dd5e630ad310cc3304c7b0ed3bcec4608b4a6aee47eb4daffe3548693010d7bea9c5dec55f7b387ba4ff2acee9b7255eca3a98a7dc67d482bd73bd877de464d70cc8baf79d71f49d50be30efbbdfe5a5fa5686a0cfa15a87f487f4fff4f3f6a69ebcb4afc4dc717899108fcf91e15e8ab56a17df833d27fade0abed221c2455e3a3b6a99d51d559dc0d3446e0fa7e1a8c2fcfd0f0aafc917df003e4f7b7118cdf5f91065d558b78569ba629fe5c202b64bbbac0eec4fff931800176763c2f478e1a439a1a9fb733680ad7b7ad71c50bfd1abf055fed192b4d679cd9c249131e58f15df09550113e183346a9280a229cc18a5f24fe0a63ae32dbea84a8a21a7ff3f77c90c282c657f949cb0259d518698c74bb5418b4a28a9ab6d51162c9df9f4586d677de7b7c077b2a6798a2c2f5d837c3f5ad4b29bf1448d9fdf1fbebf1e1a0f019011bacbbd4cd411853fd8ba8d0a07f6ba8412c68c84b15477b2156cdfbf31207e1aa3f6bc141380eb2bd77ef4e16c85251fdb98caf6e70b4afc810cda75e8865f3e1fbdf782df84e83fe9df74283fe29afc6e326cfc6534dd59c5f0a6c1e9b8197282dd6b2388aeff84fe97e59537088749f7df7b195affb8c7095062bf7040d998883705fa884cd5780cd88c196162b052af85a366ff3a960caf636efcf5b38c8f6e110d9befb6dfb60f033b5fb70b817aa1d1618f6d90bbd70a57d9034c885836c567c3fa213b00fffcbe3306f7acd3ef86dbc5423809f9b42436e19e99e7bff8df3b0cd08120fd9c8f5dc6f6702f6d7ffe57527cc201b3128b73a6c694c97f609978deaefe192a95fdac79988fb5add5696ff764c24af506308387061c6066a70f105006620b1060ea4d8c1cb16ae879399cca37f7dd7755d1bf75d87a4761f08b87a7d46b89f3f43aac6f91bf71bcef6d777dfe4041c04fb7088b89f3953b32fe42a02195aadcd1341f7dbef0876d4eb3b6f48f6d7e60dd9da6d0b7bed3d4cd5ebb7cea30961e033f5d4d5eb436eabd787da07eaf5e10776794254bdbedb6c1ea83b6f2b13b54f935ed97cb177b677ce0bb9baa13ef32ae7a19e7adcc443bd7741287f390613add4f8620a286200850c2a1bc60da01c210db92006ad1370903d7da1f6f485f6d1dedf055fd9bc7f0bbea2b1f1683c9ceb8558dce43bd7576448284cf5c7c1429755f33fbee33f45059b292ad07c0dcd67f3f1a8015d96cd2fabc6c6bb7e5953b69f01a70af59afdb26cf0336758538c5c0f80c1cf7c3844b29f1e8de785aa701659dff97eb81eb4ef870e34dc2f2cb4cf45247b18fccc99ba7d215fdf3dd6a5411f727df7656528337dbcd4a07f1b34d47e7b8ca87de65f42d8c77e40d8c7fefc16c03eb6a7c17d168a08d590aeae141af4e7a5c5c1d214349ad821c9961a58fe1cc6574252a6d062064b68818529072c6fc1c90f3b8012041f744982e5cf4cbe2ac11840f4b024c60c88ccb07ccca96f96d46e14981c464818c5314784596a419706835a252492babb97e8b0e8314d4308910d0105161db7c56ac9938468c12c0a828a2db81dc0209a81106b00a10a8b7f2004145f74c0e4891a5fd060a3624385326a63139bcd13338c46757f1fdbbff6e57e749b27babbdbe609cadfdd981448484848484848374829241ba41a241aa41d8f44834491504827a40ee93da40e8943da903424ef43d29032a489f4f1409a4818120f1d240c490700480008c0cd9018ee4224ecf823128e4638ca39c239d27194e34875f4de91ea08c7d18da39ba3d491cd91f71dd91cd51cd11cd123d4d1c7e30875743aea8eb8231e3a47dcd176a41de900e0483bca8e001080a3ec280002381200c86d916fa8788c3d70e984c6ed515ed8ccb48deb4e284a536393bab981439543074ece0824704d45ad689483a323870ac70d12768ce0444572a9e9c64dcaa68666c7e774529113d150d4a97b0f8789ae319dd471db35a6f66bdea78389b0a4315a363f1e39bee6a249c443475567535d1d6166d454fbb1ce841b35b331a2fc616684a3f2b001dda51b756d8c00c0ed6f01e0b253eb66896c2d10ff8d461818eec9d6d0b63a618ea5968ccfdf1a2d1012ea925061e033351ec15457e9082506a8ab64449718b8b540b0f71b9eea6fbc1b9ff26edec64b7d8d67f39757f3ed5d8f79fddb9d70a362aae7a07da8942c9775490b992a1a010000001315002028100a884462c168304f7459fa14800c8aac4462481949a324484110c33090418418658c01001860880c150d7500bc2829045770dcb203c4c8542f1d3ef89961411794a4e0d4937febe59952e7d4794c643a4b12a8c4be5f21128c24474de1ecb4b9ce79acd28ab490be4965322991d603b7b70ee3431f8c0d5c88f462a24d61dbc3ec972a837baa3c6b862e88a0a0d63c37d9aa118edaa075d07505232474ce41422ee6aa790cac07dadea7817c68fd2fea38b3502e10db5e852cf087bbc95d959ac7c6808d5100ba56e525a40989383f92bde8b874c0568e42e95c551ae3cb4e81df1bb1d18ecb9503d174446e20e83ae3ee4ed60a183438383a326ebb72a96353de6565ed81151daf2d34b61ab456a653291fa534f0a443be084e9331f63cf648d46096ba457c2b8de7ede5bc4c3b1163d348d100630e43cd1441f45c946021d5fc38d86da44ab9044b959e8d1cea68838beea1598465919b81495db6049f38e4e674e1e7e48ee6f44f7ed6b0c629340a8ccadb441f010550e2242940ea3f4ccb555e32e3a96e310d3e00873a4cfef9a79cbfc0a9967d75eba7b48fa7713c492389e607f3e489a1a7b56639b474f5b27168bc393f5bc3d4726e72b2e703296972ae33569d4c039eba21a63f85728e4bde2435b2b098726d6e13a6b30d932d531580d23a3543a5d73cb6fa3611444974f77267de19ce4423802cf4e4659fb0ee586acf8bd03ce6e6a7c4b24597c1349273a8a079c41c3c54f28d33bb8f6b4ada9bf1747a21ff221e3e3ad800ca28968e5615de1479009bfdce1bf5949ef9c299d6512787b0a95b2567ce5170854701336a061b49bf4a700be1c769ed9aa1482dc2a7d0a17ebbd0703301efc55980879ac8457f4e14a0748e7bf3a444e84944d51386b027281a69f7c94b1c813c92c68055d8db16d0ca0339b222e3827365f98a69797be332d24bdb084149f3d7292bfbf9dd55c7d1745aeca55c8ac317c74004b4943d2f1a622bc8e5adfbab8b4eeeeda2d8281d806efaafa16c65af7826807c99bc985b265ce5ca045c4413c9ea8feb42c5bc45620675ea097eb9d29955ff8416018e767f96d256f1b3a5a304fa1645e808f4cad7d3d916e477611426b9110b326dd8c6050935eb7f0760e3cc85b8b9b765159ab1ffdc70525532ca21522381c614b0e7f9d4e9a62a8dae3ff76cf0d3b65eed69a3158514694adad424c73929962cb5ea650e07acecd6b4ffb1ec855f8027b29bc30867b9c246187105f17d6afd12ad1322c4093d25f89c919d3b1a24fa3737a837b54b09ec9a3600008b602ab9051fb43c1f73f391cf8d5953e2f727d64564f31b9cd67a0bc70bbbb4537cbd7cce94e2397b667fe09fb6f728bc7cfd13f82760c133747711507a93c3b4c57db846382cfe5d553e6c9c1400befe39d3c1af2d68777d51ba223e2bc76ac6936364de86daf618cb5d68d2b4aeffc310cafe31b69d551a418cd9e33802c52542226f8eb9b490a2490364be57e84155589fa8f6d31db16662fd2555f1831d395e7c1c8be1af17c62fffb476f166b7e2a31d81d9477fb6a27f3a7934231ea52e0af3f4a34a77448720ce5493785409fd71a25fb9c7f0054098ae9e215c5d95a07bb89b417517d1c8c15afb90222527eae9eb5aaffe695b0cce5a4bed0238628c064ccf927d28b8323940b72547a09be28d161c7cb6e1bb7a68bd0fcde7c00e10f5868532f09fa26fec828818021ddacc0bbc5509b906845a8ca45572e9cd6e08b962709fcf2668fa61939495ed68411b6c0f3ccd01e8eb008be68f0809fa6c3db6906f771c9cba4c83df5c6c509f0fcb33caa45867d1da96bb53a353674776509f9b0449ea8728cb7091aa9f9c78594384b0ffc70468aa71fde2257c92fc4f2bf05d920f8cacfdcb944595eb5d9a20992a69944351637d9f9e1479de7003b2f19f380ea6aac04fa4858c0aa2d5986eeb4918fecad925a6076e7726b94787065409c622d59fd7eb980aeacf1fa2e7e7f465b3219429a6f2ce5dc3213c4ac6d260e6c8cf09a659e4f894f134d8fb907198d2c9c2dab1285de9ef58879b30d8994d5d0b95e40747b40cb0b45058fbaa3a77933cd8d71523369c39562629e90d88b117c4bd3d09b638d2cb43718720bb355d112f2d214bb429d4a30bd9540e10b8a606e050c28f874acc8a668e56aa3248d00a3bd10c8018c807d9baee1063b6603ea0223559f8a6742e01199ab4d118e460fbe33502bc333fe660732051e8d3da1b7e7b169d5c4219fb884452d8ce8ccfa223440b0a8beea908f3672774c6967082750b5ec93faa36b0c94d73f0a5baef051ece636b184db6e50551c241b59b175f56ec54bb5d38f34752ad5a5b79df8f6f2903e98ca15690191e4579f3608671f0dae9aeb91a6e369a04d2d0e0a96bbdc6b48b7aad50d7b2ce250bfe85e9bc92abe74adeeb8e6f6a7f90b28d7ad35a5e3a37ca642aede4f62e621ac6ee232c93ee1f3e73c513974b51e58c138f31b3d76d57b82ac7aa3a86cc773e00b2cb58581f186550bbc31e7cddf27a754ba19d2b75a00705914afcec268553d1f99079844872ea1bd61096c2f23a24649dc2ced9093c2db0d56395998beae7b459aa12a7ce0c18e347a52dde7490320bac2b1ac2e35aebb2e896c41ccd4b9ebf5a84b74278ef174a401f4733712a99511815d994068b7e122f78a89c143d00c89da105005ce5c4c057a05a81fb378ef935e592bf161bd979a71c138b3c23028e973a729aef8ecf8aae230d27e33c10e847e1548abe0805f7a795163c1d5f68ec8a4b28cb17de3d711ac75b3462a4b5802a27ba782a913592d7aa33705ad190b97d04c8e355f2d878f5412fb2370836904fbb9f877de800f43c367d949c520efbaa4c0a3ef055d6aeafce23810eb3e68654253ec75ac7d5c6e3c63667656e5ed7d2d117183eba737d47a1d38c9c79ce8fdc40c2805f2eb4c590ebff0de37bee3f527c876e759c70b0000c405e2f4b3885042c9f783ec3c6c0b1c47655d98d1de0e61fb82a7458337695f0387912c8191cc3a3b1370d381e9d5a200469c7514446d47a7c91980ad07159e7650e8729d5748f3c609b538fd5f2d1dab640c457cb788f3523cf45f3af7455353978d259659fbe80caa6ca951ade74208c21bf28465e80c6b703d8d4256d9041e0d1ed65547c793f521b46dcc8caf6a2ef71bb7da1ba1995d3b03fdff7f8269b756507406d2e3e70de4ba3a6e98db7277027f4de8a35f116cbaee4f1e504d2b1f9f24624e4bc948a54c405ee1064ebb363518a94743a23fab07f14f5b9891a841c8df7a6dd32a62c15c45e85b12e09425608eeade127ed86bddcbb0daff16f013af3e23cd230d9cc84108b9698463e76793fea3f13214a004e1ad658f0ad7c9d21d6817a7d004df93a5e293913c1c1157c281e156ced69bd1064c78287fb86bc00062a464845f7f0322de4ebf29c9a64f29d13725e46e5c85113b17372ef63e545ce5ee0267cdf6d23e1e6e17549b46c6f153ae0f91c12b8f098bab8a7db77255f6ba51363278559ae957199e8c373a680ed9d94a82aaed4b848e6c3b0c35732fec7fa0646b8bdfb715536fb8dd413c34f987f66a4bd5d113a8d371f1f396f88d341ca40e056db81d41b422e0ac44fb85491ebb1a70da5f4986f503161f1a5e25f14fae3fed0f75058482cf45b644a34c4bed084ee94a161c93d8faa758d2ad4df5dbfe2d1d0dda5cba06f14cff2684ef0f3b6ec3c80177d4ec09eb62815b03ce283d345b448cdee7f437825586215a08ed17bb1b6d0e69d7b1721f015493c0f19407a474f1c841d9dced1cd95f565feb2f056467b65de71e039113ad5cf27289a76ca00085346a896513d220042c4b8ceb76655f628af256e38e376a48e38e56272c37d4d008babd80c1d8578981065c8c35193c0fb66471be2d92535adfb62728389013d21f24415cf7243f0e49b41d7d2efe9f39e93a9d2e850e2402ef8d38c1c5169319a1d97edc8b8b195398a09174f04e260383c6f1be6668183bd96ca432a7bc5a880446a48888a6c7c8b19d6d1c2be53166c47e1e584a89b335588dcca67d7d732c0ea66f23324aca15f4ea7a15eb3e8fa762fd01347d06ea6e6c0ede175db6cccbb714756e9c82e9db74c2660581ee684cc43c6f9f19a08153fb7c1b096d3a7f9435af9f355df1b8e4e8a3c8f7de0d56b1099cbcb0d8a5a4541b215f6c142b50c34c30f02b9a8593b1deb27cedf5cab274102f26201af9ec4bfc078789232b5c1801418d7408155c680f948e77e467a28a735c026dcbbde7e8d0465d74ce88d7733ad1eec92408f69fa45cfa2fd9ce99f4d7cf6159f944a2be21b0661eaba6aa2dc820f80f3e37b002cd7555e01765cd08186b9a365797e2fadcc05479f50a5e2915ac1920a4969e82c04178a9625b7d8aeffa4c5a820d18ee083123b33bcc1e3f0ee04ce3cf863f1b288d6eebef5211dbb94d8489f9c591d2dcad9fe969232b19a9e3033caa49f503c20ab951a5032657784346151ed88379322fa16dca78e669b0eb5e0f2f627b873139a2680acbfd50fbcd3e378a4934e46b515a5524daed7a56ba644d368813a8bbe7a293b5d76ffcf79b2d194889f20e999025deabccfd7ab988db1a6ccdf7fea7f355701ce49ff4f1b6c1cbf6ec3c9ba06a6bde1fee6d3463f0a004be38010c03bde82c17245961f39f018efbeec7ee0ef9def863c7429e4436db655dabdb98eff4112e5c37a9a8d34e7c27ff4d482b3eaeca4d5c682e412eb2e687a76212dd67a7244e3b215623624a0aea03682102f3679819b174a687d2598d081219426284447816c51026a5d6807ff909f80c5e0d848493c4f0a995c341b5b7271049dc6f0e0c5defd03b764e9cbe1f1ad3baae4d2e3fad9988fb15010a21f7a438539770eedabdaf6f1b1a5e61100079a3655f38bac3d7c1cc929fa0410b8dcc431ba1e54fbcf06a8f6ba9c982c10724d8853cacb899bf9a14f0e755b97cee30d7511201ec205c99e623c7d34405adfb76067f85a93d77734141aa904df3baf040711957a9ff97ef5faedbdb6856a597819fffef7fe3eef64121bcebc8a1075beeeadc46cb782216dd23933f1edfeed446cb5822d653eeeed20177b2968f9f2b61b69d332f414ea789535d828f2756ec0b986c5af878126d797311b4949efceb06e115ac93a75a1581ff0435060a0d759de6e84bf6decc5384b9b867e8b72c0a84d7a0d3271fde955d1939277f7f6f1362cc04697afef96d8f6f927ceee70b616b171252cdaf9fbb78e62c97a4a70ab77bb781baf61a690cf6f07cbc55a0a7dfedd6d47d8b404cc841eaeb20e7b6ade1d3b202e81b86b6151ce7733ec46e8727d192c5b87eeb4d5befa01a48d40b82801b480fcf96797c0e204003d4823bb2b2c5388bf09f77980278b4b80465d95d0488729f628183b987554f7f1a471f9790153947bae448aa7dfd59892dc6f11b590fd5ec6a365c30c1af2f045d7c88dc71252807584087baaabe6205ce1e1febbbef377cf7bb1bfa7c1f9b009472ac8f9798ac23fbc12f447dc9977c3adeffb7eff2ee7f7343932dfd4190862fe3c5e3c9790430ebb130656876befaa7783f93efbe276aab1e73dd44dbab5022e85360796c6aa70b857ef92f8ce238d3dd3a1f6743812b92610bdbbc715fddd8e0c1a3575103ff8f51018ce759470d626d4573f86adcda25e4561ff68522062548121fc5b34ff6545c121db5206eeb32112b032f4e5b56642414f591d7a6c3843a6c259a7a07d705ce247dae6bc4bcd555031efacf6e1badf2f6f22d3debb4ce24dc0122f71ce827ba432d6ac7063f6e43ab311601e5175afa53464cc841f0fd96304cb4f183c7aac3ff6e9ce24b394abe70b4a18f0cd20225e2ab260253cd4727f45125ed8d983a9665efbbe0d4fada0c41c682ded6fd330fccffac3ad974271f2b1b6347f8da644b085d6424f1839fa13990949085d4a207241494528e4903caa10a17ec2104a0367b8eb6e71a121260e2c44b9d5f10bf4abc0847a1af76549e783abe20b7d665d23a376a10149451d45aa60ffe633b7016e6c2413e603e3b71922515630b6112f00cd8860cc4eef58d57ec27548e300b0b517d1a97d62a1f7205daff0fe5360b68f9f064ecedc8dfedef03fa2842b5346624c56165ef71cd1fae17f1edc4a065d2d2711db7075de6bef1bb68ae7f50dd92df2eccd0c316fa035cba203d804b16a4127e93eb6c34b21dd2e27b4ae28ab7b987fa56671244c6cc1ba91dfcaa3e888e215c5d7561804cd9a2f6f74cbbe1f38139a1a79948c4257f11f64355163b70b013119320fe6203f482499e1594e93d527d71d2d5736cf0e063d513634a3096b328aa08ae245445232852dfb4cb4070080b970963ab5c14c9d117d64ff82b364938a443e5963a28823c2c20a39d9fedb743c89bc811ccf377f114d2d563c167c8e4d55a5f3315226159b615389b8e77f912c05d74b201d484f6a312b6c80011a7cd3b5bd4a34584f3de9c4c74ee5695e73b62f4822b4e0e012492154c8ad4a6ba52771d3a28d21577a733615aad850a0cebe128005a1e4ef2053c7a756cd6d0a67b5b5f2dd936a64852d19ef54c6374636e3d9becb6b5760425ebab5017d37b0a7cb13a512f72a7a5ade1a4f350994d815d2d7b5c4bf3cbd5407f8516d33ea87422e9d3cf4810fe325bb32c7518cd33d7ad1a142483d76f4610b9842087ebc8203640abd20734c06ea8511edfb2165a8abbadbfbf9083661868df56bc78910a462c1f6995a4c60d1578abcc34263e6c242c3e27c1d8529ddfbaa03cfb45a12e432df83b0c61c2c09331532bd2fb254783b1ab6246b1dfed37d724b3a7bf94c1ebdea428eb95a381d73a08bc484ddc3909fb35a714e7d093f22773dc358da4e9442b5b90d9e1806c785306893be2806dc462206162da1013293a65c7d3931614945f4955b37676df1d36385c5cd8d1787f11a39e34023ae104ffa9ddd8031860478ba56a0423822afe31d9e410601c952115a63bb02afe37e97708047ce8aaa9a0a7ca16b426e14d986e01ca0a65884a98d8574d43bf5304d68d3eb6b9af9b72a09ca96a057496cbc6dd81a1a6c804a056f2d5f7e71f939cc8112f2cf206c88c621f91cf34792cd3a4fb644c7e476e62c064859d04f834f43d4a844440dd07db0065db88b1be70de19f5fc7230910e5baa6a3c3c0492f373659572929c5cac248acd6dc8f3472b6d0be040725108cc6ad3eeb1d25fc19791973ddf956ef062275bca0c053ee348e3e52d7b5808f5cd8ec0b431da9f1e1adbf3866f9ff862c3f788a29dfa3aefefa3017c8fc9869efaee546334ad6b73daf284f2c2abc7a76955faf035c6d6824700aea6b96f539d48a691f423c8b1936799a7c7d3eafcc8047d5c853ab40207170192fdfca9d95c593a913b6446e7acb4c951675fa9d9283da0186a36a9fd11371d313a6c9c89349ade37ba66b6ce581e37dd1a2bccbf3c543e85346932974a88afcca30645680eb5ce6f020252fee7bb7d286f22cbdc9746e7e6aa14cbb7a71017c7722b86071d14921a282596e42b05b9a734a7ab0f56a6f370fe46384a2444f957a498bd51586c0faec01e6e27729e1eb7b2e7e9d2a5f633dd77daafcd2d2336380b9086b8f9f653ff03157d1b9cd453a8d9284b2ccb6c3af29683f5b50b7b63d4b05b4f4382bee1a809b4e3246cd39633c30acaacab07948375cb8aa1f42120a0b7850d4cbb0acbfc053375e2a3488365a7f9d6b2c1b2afaece5d361b32c1454d878ab1bdf6603a803e46223791b92766d056908385d5ccd1971de2ba965a9e33516a49bbd5fe472c5c180563d46f7da14ccd946409e35f3b21c29f82e491a156a4560dbfca1a74cc3212e6df5cdbeab1c9c6b40db98249c04135ac5a042037b562d0096b90a5c58554dd51be286b4a883a7947156802720d788bd003725190968d999951815968c640f65ec7a9bd673352c757807f0a92ac7a148476b530e3e25b5ca1135dc82a5ba847bb8fc5903925a62f1a55e2a353b57843e989e1dd297efed38a175c1f2ebfb95f6905684d82ce0eff24a1506ed6a5dcb4ce9f74187fc494d974f3045ccc4e744f0e259e088a4763931037d61de64f9d3019e259a2dd12610b5c92991ea09026df91145600586e0cc36b55e1a3301c4b6a1870883ae03c713e6697e2aa112efa843070df34e038f5a67be4d2b05c690db0627a9779011ee997c358d11ac52896bd46fa369a099880f3349d3ba6e58a9644e0447fe4af7d7324a80d1914e4d2b5d44bdd52fc15e2da95e103e9894f41961a655472142f518d513e4869ac8b725f0876d47b60739c084269d762e85af22e764d8db1dfc0c8abe2647460f8d1f31fb4978274430b1bc7eb0a8ec7834d22f4cbc1f6bf4591f237eb2b867f6e73afd7a5b3261f103826d4850710b2979e5f95675629e179649d24d33cbe92ac8da18c3b5fff7a2d8ad20ea641d75221e2b99ac596e37a76d7fee1fd2ed76303fb5a58e8cb6e3eee61c19549bce8d70586be21b8f2f32c44bae795979248a14c6ea10bd1510df298f227631a4956c8c0c690bb46a86c99f251341c728d7f2c65ddc61bd8fc3dedc812a732778acd0515b7ca3cbdb13271cbd1f5833ff9d55e1dbdc9d25b288f64218f9a819f27daa2036e17d8efc7e88bb09619c9978a21de472255c58a2bd10686372b3f8ae73975964a144f43ab0b88e9ac585f32bf9297fc1277678fa3e90677c937445b17949da91c18347171fcb00896433711124114cde89c4429f3e5cb0a51e1217a0624b148ee83acd21eeccc37be93da9b6544a832ecf36af1d93ad63f9b9ea5275980dac41fd255830f114d60ac8b2dcc055612aba40e15762d8dcf1d88f15a7d194a57440ebe78ba531f8cd578238a2b0fa464a0c3a87e258fa54dfd48409111d6129b077cdd5975228c532a681907320c46e7a00ebc8187e0d7f873957c35de0ff10bfc983aa0c8d76af681b9fdd443211bd11723f12d3b7f415d3b6b5f423bfcfe26f22508231f503bdeba8cd17fa410be08ae1410f42a3168c823953ea4e655f531e3fe262e9e14005fcc2e459cbd171e40216f6ff14c39b061ed90e92a81623286bb3a637bfd9ec5a688e6494eae38caf8443fd62c7deeb87e56a53fd99228c0347cf72d360b070f1e5abf7d5c6435245607eb941d86e0c54e6fe7885d1b9f5836d513a220e12e8f8fcc2aff20f07596a4156b9e81c5a4692eea0de260ceaa647743c89b881a2308f761af3d28f1b73317829eab7ea7e169231d88e03f6cdfa559e7c5c7b3d8d38f6a0682f4ffe7f733deab8ea2f09ecfe787a35d356a45e81276f9851792f55ec9bee8dcf54658b99fd12a52be5cb6c99511b3068667b00829686322b4f380ef2045d6383db1e519c919fd20a8acb138cb2cc1352e78e048b3ed3c5540e7ac6941ec7241442e3cf143332e5844c4da6945c6d32d85908ace89f99c0f2676280dec5f4c5616c9d7ea1bfdcc44914a922cc166612bde34e894dec0750b36f9450cf3c0c4febc2c32e8c9775ddf162f3380b36627915796f9275bada588630cec82a9b598fcd5a0773f03b00c0e82487552ea6935d41a2a7ce2d075ec6da0c4a1826d8c89175988f0cddf85ae3e0a58bde1f349c1c7117f701fc191a8d6e4f51d181fa4120bc3c8b37eba33cef80c1fcba04f8f4ea0023f3a51e48be4d9cdc936a758e7b5c47e8a88b8db60fae75bef7cc2564529f88b9311924fa976acb768f8c532c7fecd2e788a2a84e522483a2cfa42c272d4604e10123143347992d34fe31ccf6575ee2fa0896348c417c7dc7def7649504897f1108f53c2cb4f6e3db44cb3d7f40d076593dbd94d673b2a8ed99c10c49019d28f478f45953f0645936ab1351acf0d0101ae4cf7ac2cd0c2d42e702381f92d63b3319fab5d23ab102303e6767ea9351f6bfb48cd09be121ab77662f73cc816a74917bfbc7a116d188d8f003658db8559461189f5e921e61c1cf058610b5e3868f4966bb1c825422f41e78d36dcf5f98b5df4d0ec6b295037d72dd290b2ac49d61cdbe1a9f4855fd52b88928974431c2b94c06b2b40f160415c4713dfa42c890c148c361275bf410ce56fc15078e0de08c2054e8ecf8a828818d53372c148455e6c413e6b2296ee116077200413c933686c116255d916f2921ad962376407462f9db0adfe7cf8e1645e7d00c7661b005051a834419e3cbe6cc38635400f900179439a72bc5a11089cc16886fe90f227507543b23375bc77fc74f2df2e7cd0663697730e074d37aa4733b67126934a681e840c31f6ecd9cbb1c5e9e513ea99c43e6165270f0757b12237b1a1b0385d8dc3a158dc2f55074ae814e75d3553898562414584fe448466331482f76beea91af7d9232f890838db6a17b49879615df9e435a877ac58d985e88f0be621debbe926892a2200d9460f22632a5236266ae29f7dc7ca9a88b0ddae906c82e7436a580b6b82bee88332503aa6e0f12d46b8edf358a6693f898d82a205a4f6b5e55411c0fff401059308579f133f789fe022769b4ee07f3988f3d14545be2d751f927f2501da6a518dc24ee6f56eaae55d0cd9bca479b43579fa442d6c75ef22cdda0669eb8eecfa364ef0a398b6f472a24871ae5c5e91f7fa1ec7d08ba236ad5f4ecc399f205c958517e94bbf39059a11008be638b1db3cc14f63406093dda804852549c8923905d6199ec320f1a176e5a574baa793c8679c7dde414a57f406a01f71d5a0253b2c5c4dfaff066e6043573466bc1c4a0ec6c9be6ce265120eca3643f52ac4c22f0a00212066c374147ec25f3323a849be19c2b81b1be2b8c25c247570d9c00d06651825b715fb8eb8ee44d18099cc547c2fdc77156320d101c83b0689b5c1869d618a4dac2d30e19994938c7b8c315109248665c93ac559a60c88131dde186c814f6f631c6d4ac7bc054d411b8161cb45f9fb8d4d860d5a19d931af44fcb3ef6f49a9ef6a76808b90b4453008d31271147436f2277d7602074b78630311f328c5b9ea643487ea315bae2d70798a4c14a9ebd54a5b57bc88974fc85118d7cb8998b4d910737f1aeb46118291d40b0bcc2c2428d52e3d941326880f955e7c365e993eda6e42031ba254026d82b11025464f6065f195093b1e6bfe3d01aa0eac5cf2ca465ec6064a8f808bffc3981f688f30845f6cf801cb7e9384ab4140618d645f3cd6462042451d4cde3355368f10c5dd7c7980d8431548173e7201d128a160cd28f1cce816531c206375306852b17b7754c01bbc5fee34954330123180bfdf104c96ca02390ef17b9c8906eeb2747f23bfeff329e5123f2125cf8c442a4fc6c2e26c196e009e51a765932fbcbaefe9004df112923a1f646fd9dfa8cfefc6dc8865fcb520f8713064a1883bdc935ec2f0dc4ab6e798c5a22e5c3cbdf50714c092287ecb6f0519b82f1211e105d05567db9d92dd304c4ce97a8e3456818a88ec1e349d1a59bc204774c3fd34a5af6e270360ac2b61591183e1f91e63e05a0ef60fc7f1596386ae16c8e29d403f5655604c242a234d936c74a29c8e43425c97ac29b08827dac10240464f8ef602286dd5f16718301e18027022e78f676b28212a806d7c30dca90f77d2c7d792124dd2783e35bd6e2b5da40d179b75bd1cf4f701a2ebd1a1035e4b1a6a931515c283406d5453ba93426a7b1a185bc2bb52436928c8790930b8194e4c7905595f7447e1a13ec26228bca93286a22d956f4907b23de951dca751c50644c730fbb31a332a4074de04e42235f410eda07052476cec7e136ce1c32ba443e9d46191cac3ff4f1d85e46fca272ac5c1226d195cfec4ce1300016c1c3ad1d9e0adfa9836fe1eaee98a6ec8199ad41e7e3382ac75eec21265522a472702cb357cfbbfbc2c35ec9d1e67369c9080e53f9f8528de2af1f88c618ba6150eb9d5a4630ad4ce892a16f25e9e5e449174d671ccc83e317beb0daeab48fce570c04b122e2d6a06b2483cf841db6e07dcab2b42db017ab0d33b749b3a98893f670573ace89df450791cfed4f8d9b09aa2b18e95b71ff01de4c4fc9475ad352cc13b7687063ea93aeddcd0240db3e07371016e6c0fea642f55542c3384d46c9703ec9cc709a48f6a5056069d1c32237a5bb44dbe7bb43da6f79713a3bcd56c26c845827949b56dfd95d1fa8efe27eb8b8ea1d133e4e02ec42ff6a5082f9c8025e85fbd83f79ae77b09078a721a4da10c0d4b49a24a0da4a2ab6020b316e0c88ef59c1564887aa01b17530578c3f028e00837d511c81324df8f29cd596c3480d56a75736ac1144e7adb93cba15c5ad4e1ea738aac78ac2bea3852c0d9977c7515b4171e37aa62b24cc50d9162f2ce8b8428c1c0dc6c39e3c5db15858fb4de21c2475704ce91c97934309580b4ada067ff41766c5964d618eebf43a2eac6831c59ed0a445a1aaab3e3a49db7e7b767552aeaf974d271c610b1b90837384d8259fc899c2d1d1ec46d3db1c52f897fdccf2bc1488badfb112caee661e8dbab891c28912a04d16c1017272ea1eefb7c1c7fc6ee4da4524e811068a652dc8843b92e00b0ef48a791a628c3842e3b1ecd27fdd09b0932da7dbe5ad5354eadab7b01417281f2f183a76cc706e56ca07f89945a951565281fd763fa6862277c443adb19572a0d4e92bbcb48830a9f05ca98e5d2442e9b637afbd45dc13dea679034cfbf1ae2f9a7ea21be8df8f173d11ba89dc6874c3575c6ae3e98bbddbf9747bd0dc481e4f128c09536ec5a7ae04136e79bce284a6c3c92b97e01a23de777caf1d4243512da891ca1ca23a0116cd2275426c7442b59e2b5c913a370568ab6a3279c050a3db34c0efd33556f380ef1debc4d5ecea354464396482ff1e57f3c19e856a2a1f7d685106644995bce26b23d0c1024f5f14829536e0b274c0692b9504eb5aa37e1201a3109a268436ce2634c97efff9ae51414b9cf2909e28c5cb42c2f2cfec9cb9a01866f105d77451991d08b2f7c7a4369c799486dc832af4e041837e563f76a447f1ae511058f743247a194ea9a9a66200579880f8f3d0c9544ab745510aea47b4a7cfd03a7ae910cfca010d82599d6ed742844f8e8d7288361f57d9eb847747cd37cbcd27de54ae3952d257b2dc0fe14e383385abb0688b0b08e2c4b07dc36131e0792ac971c5f9afd404cd9fa082831653e2a15ee41a5e35a8c879405f333eca6560c0924caa802825eba80fa43e5cbf00caa6df0fb8788aa45cd544b0c3641160ef0cfa04c6840bf4994a1825d0f1191202433b5cfdabc266ef387b79dcf323f446e8b9671337c0a2380f38af3cdf6899063d66bd75fdbcd245b07fe5cbc2b6c033deb99ae80e73c415c584d4c48b2a5ca89254c938ff238a5d182a481e167c269c380001e2d899f25c99cf960c53acb4a93343e5820f4077af8290d685b5454193f01be8b61a34efc747a8b4e8074417dbbc3bf95a4a222fa2012e1de1f8ba1d285b380a1a63365d12bedf260011ec955356782b8cce2ee768f37fcfd883228cfe1da1a9d4413fe1a06bb8ae61742981520820240736dd4f899dea452697e11edd69220abaa009e6b25d0f64df55171d07fe890b9252f9e8293fe28fff565b9ee4d7ed3a43471a710ab39cd6b669fa0a3270a95b0e118d04156e00a851fcb3a002d5ee549e15c446e56d155675d1822734d75f9d18dd6981a25f320a721279d0e861dd1149985217bb05a2b39565d3fe80857d338ef18185bd48e81914edd27194abd8b13b5660fbf777946ce5a69586699d631c14bbd5d7762ac0fac4dd5adc08c9dec6d205261e1e62d2fcfbe570dfc4f4696d89323a0d02673ca5ceb7e8b7ab0decaf25e95ae8a55d49c4dae1536e87417cd75dd869e318ae76b053c65a2e736ec5ad05de704fcaea5aef7acef6088ac7050a38159200bc329920a86264f17326fabd8e6b6c88089be9fe5f621186f0569a9e513a472c317be7b69e9bc92a6e565d80ed11de9114c8fe261d2981996c7132368d0df01bf1bbbe0cec6c595ae92b9de729e9c4b30c7dd2d533ef5d846dc0dbcf8a984be7530f17c56d3d28e9880b4a048c99087ae80eeb3b26bc289b01246b9f28881c485a19c88133f99c5a1cf3917c4f3429a315828814748d5e2b349b459e2ca8c6e2972a190edf824e80ec215e3614d84377487bc2df9bcbe7ac3790effcd3a11f8eabd299d5c3c37d13daf1f61e6187053c23e1a90f624720e1e24776691141576944655bfebed203522f942829de54f750a7e946f9ce7f5df4a1e973550ae1989cac8a54b579a906897245f9f34113449ad929ec9fa2a89ef005b943aec83b533563939446d800c2a5410a4d110fd06a7aab426e5de671cf90adc430b9c2a263ba31f9b646872ba321193a1d785c2067d50db404fe5b1bab5fe97d9a482a35c4d8b07313b4298e49847ee244b92446e41a5482d6b7f6f2e160879d9d0cb035dfce13894cb42b01cafc7f709c4086d2012b5b4f4a2f95c218dbbbd6ec1547a7c09f14b265c431e2f2acd3d198b67f86861d5a59bf8f9ceb12723da048558bd37a71aff9237f724329d09f1bef8a4bbb6d6ccfeb4129e5a5fb7211aec94a904a5f095d93da4a2528a5a1f63eebb245a9e29606bff21e7a0ed706193d81ef52a4244d5056d095162928e85d4ff2087ad143180fdd399dd460dbf4407cfce8af7ac96ce250aa07d18345aa204ded2247c498c67ef294cfa255d9a6d1f0721d78c203021cd40a0a4e60e80380d70832bf3d494c5be8389ce3dece6ff4f8b973896f22189164cc12ba719bc47ff1ca5cd9bd5f002b48da4447dbdf98870a6f2cece3e1c36540ec1ac796f1c97b7102ce1685fc5d8aa224fa548a1eb20435280b3bf025f88d839d46367ce877103202b1feba3cc10a7648ec27bbef8302f1096cec1065fce1c1a12bf56624afecb83125be910dbdfcd3a78712d8b4c07cd33cda4ff0d013088903e482064ae2851425e0c313f6fe552548aaf0bd42a98ea7652c3fcb2bfa17bd6c2cf5a2897e8c2b495ae24329ef1b84435c2c33ff85c8da8ff85d7c27b47633a1f17b9097c1613577478bb9383d8fd3d388d22ebb4ea4d9ff5a19575e5bbb28d30ff261e8161cdbabe222fe20877bb47b03a808f4a080076c7c7a508950598dcae66821abdd522067c31dee67939ecb6edec243c2703fcc034c9871cbb07ad33d3feb93ee1b96dbab1b2ae26bfc3f64a54d9bbf7fbfabefbb2004499e422fe936f7a7bfdafa0a7ef2000787237aa4944827f6612554da503874d6d8ab7c343271f57dc9d68dc769bbf6b144f0c5d8acd34b2e68254b5e4e67eae19b129e711be9904a8f067761072b0c68b5ac9e1148cc81eaacf13077248adbbbdaf455d45632ca348bd426f1ef5dd44b28f242fc116e119300e245faf954178442f2ec7b145b1cd52fffd861479e60445651bdfea4816d4732a057d6f81372719005396f0994854bfcb99beb036fa8a6c3b874fe8f538842a07c53f10b5e2acedb83074834edeb4ccda33cc206e410fcfaa172b4461d99de68cbf6c130da672f870c12fbeff971d0433861c8c3b2beba832573c62830289338381cb7d3ae40e89410475444237e20be9723cc11fa0b61213b6a5054c1547722fab315ede83cf748c698913894c4fdad0774d4008971d50f4411159b723e972385aac29d397d9e59e3a653774fdde9baa5842990eff511a8ef157d981f9ac05f886d1e4a405ee31a0c0110d6d4215e6a2f44205e73e7f877894fda050108d7d8215e6b3fc403228ad6f3d3cbba9734ae74a98df900a97a04d61bb8fa7a59a58633e233256134e83c43f35000d21ad12484825aebd741fef825d31ac48158b3aef8ed1a8f340de1a0d7ba43bcd016843448b275677796d702c92f946b967658a139de2397af151e73c40b28fad68c12b442e7581c415aec1637d86c4b20c68a56595088f6655f140e09ef18d75676fe3034104377f81c38c5f8a26d59f637e607e6e93ca77f4ce692a4ebcd98e4f1731aab804125eb68c8c17318abb0ad75dd21342315196a95f27e6f15a4ad6e4dce0e36ac09358ea301557b710dfe9e895ad62121c5df4a0c5ce6314133c9ced4e8023cbffcd05412748028ccbc5e72efdad42a27067a9e3570ec76dabb48b8a24535e8d4728207ef806c66bd301d6fe751ba70a3dca27fdf60b56e8c03ce8a8b01ee54c390f2cb213ee9ef50f81c8c6a8c28befe0600d7efd24c96a76d0f07b2e7cf2635219ed200d0f5759970d4faa8bd6a7f6363eeeaec193a8835a9c349f5078d25c041d93c8958fc7a3db221f64a030607a3318667c703af9d17a044b14b00ff08be8a7e8ac95052f9b0aaf99968534d210d0dcbd2276e7f890358895d6a81968dd49de24e8c885faac7c038182a1374a104106efd8b98c91dcaac80e7d8131ff68aa91b0836cffd39ba2b7c7652c37d2411296913436a2e815e5d0fdcd21eea1d3011f218fdc28119743b391643166785eb1b67a761316621d3e8b18ce59f7062398d4bc8d2715b1d2291a32105f85def5ec0dde50b7fb742cbbddd4274fd9148731759846b432fa7955eceffb0056720941df74c279c4f23a5b1725b8405e499089501325d54470b20b13f87bfcd11139e744f6a87967a9b1f6b3f1d218282adb81bb46d1f5bf33bd1761430075201e2293d8f1c8ad21ed1a22e313322347f41736a3b298ee837898798ed53d0bc63b74a2fd6b350ab016ff69831f06f1c7deb42a6b48e49499d10de5bf4775a6f1ea85eb609ce0745c008b80cc26ffa5cd59d4dee526321ba4d3d918d05c7f1793b940b223d715b9646bfdee62869396eefded10180e6c30d85daf6d0244ff3862d6d071eb960ece3d419c75e021fa15527dccab6bfba707e188c89e45009a91ef9c1cec23421f4d491f2837672ec40a51c030f7c0083bc7cfe0b55589ecfeb7e0831c6fda661ff0319d3a5981fc8825bf0a389e61b7a76c00b40f5fa5f4927510ef7e3535a60fe2f64527bca6293290a28c2c13d16c91516113549cce0d097d08102afc365b8110cc0816b7d89a4db12ea7a08bdfe33d402ffaabae6b635a5ee41a221ee6c783c57d6a29c37b7cb51f7287a30e356950ddde3212dc92b5ab9227a5711929d26b885c4db511c495dd95468273b31c63ab5c9f35b880fb0358a7dac3f14a462b77ae65816e1b65e9f7ca0812709631275a78ee52ee527f5d6b383320adb66c29e7614be38050b6bf1f035f7509c345b7fad0af6ab319ffaf3e0cdaf8635f8d3c67f9911ab5dfa6e307bb2b9df31ddd19abb68e4182054374d6cac108fff5611efbfd62e47abd2948502d1ee23c50ea3b031fc5c15f7e0251b6f0ce2cf6d024bbfd40a83c543a1dc09558501ac71842d8b1573ced9997e1fe094d03c9c9b1a59458b967ae838a7c1060ff539839dab1304e00d5f84e74c7881200ba0ebe8d42c45b3bd10cc44224eb95f420514f02f9a44046c7e57f9707dc5e8a767bde0dbf9d498eb9cd678518965913d86c3659c8b64f55e4b0dd86aecadb16498a4e2bb17de4c2ffc16c8b6b4836a6d0f499f5d773ca7a46c867f86042212a27b357ec972aad4f60db000b957d0bb960efc274be644d5df222f50a051baaae9d7a84f9b224189de071ed374b42243839331396aa21e15a035aa48c8341e01a755020cd251427fb4fbb0714a105667fa76b1d1d0f6d53781e390b1dff41d3378d79ba09a1e71db8587903f8d41ee443812932a683bfd1b004fe6313a75e87238a8b8fb0cb2be234167d0827559982ff0e6fec69de316a0abb64b4a213cc079e81b9b7257d496d56ea4525cf6ae2ae14dd6c9a68843cd62810dfd267633c6f6a776519db96c52b540cb81c48384da553ca8fc158445fdb9df28ac2ce3206662f47a814048c0dfda92a3000db59bce168d290c249b6e6c3f2f3646078e87768d4c71ea24ea4363435fe7841449bf0988cc0c2c8e9b6c0a709399603709a343f2d9bf6e58b7e35c6e1d798cfb42872eadda0516032752cd12343d13ed5be7ecbd4c421712c5922463328010e944f011a3bf2c5f5b6513b3b9c202f38705641fa6a591f3a0cf7f0a3a576c46f00e2d21c183f7d8ba0bfd68290056072cc0b5f7d859d4d7650e75ed87d285f9a16b2384b5296e271b881c8a4c1f52f6b4fe42dd0e65f026efc8e90ba3d7a1c06fb2722885199114dd93f4fa6998ca76cb061d9fd2fbfa6b76012ad40636f271bf34b1efe9b4574027366cf9d16d4970fb5252d97e636303e394388c91c4376d04010f65741504d536fc1704135926bd38b812f8a4bb8b702b3031fe10eda80b55bbb201845ee4ccd3404ca37aeee619230e7617c71075b1724bcc13b82fefe1f28b50d37f2b0a10638d61bfb1c1011ef557851c83f9786a3c4fa930f4daebae17305b959a9cc6f0cdb97f9b3175e3618214d76c09ef7042cbdcd1fae26ae6426bb193dd852b1e2e31e7f67f3ee63545f3f94c7fbd5b132deb1edcf4349efd07556ab2f9c5f33860ceca872580cb08485cefe6667201ed2db703f193766770bb628927f1346ae0e08a3b1a95ac4c712e188c25ce0f1f3556cfc9e2176a4fbecf8fe1ba637964da46e41b1207caaab553c8213bbdb2fb2115ff31298ed5e7d3a682702710441c40a4627f93d1b096765af164a957e9e2a2294c25d37ea998151d2ea14ad9cccce53fb7df340fd4711ba5028c3e2c5091c9a16b5549db1d236decb82223716960bf0499a49bd34f3f182364535262cae59f5776a4296bcec12a2d900e409fa90e6f261e7124db1182fea5e75c698fae56df2c6a779d009343fb5d1c1e1b80dc4788e7f56a1ab312133a6c9cdc02f89c0c6658963c232b28d1d31a5c30cbdaa49e6a3109429696761fd72c6f913f155892b4aa8b8518a8c7528ea1df190159f45d34c1170210fb7a853e363ac2563f0318359876df5dadf979d25f5118eb834da7ad3e0bc35f9aa9580d01636f5e019d7f278fe8aaf0e28ff8944e55543d20c32739dbd1ce017b8f03d810961a6cea70ec4ea1d6b829c2ade62b695609cd03a1c88d0fb174c343bcd35be48cdc75ae7ffce62b0cb4af6b001c2b2797d18cfcbc76d56e34d5cf0f2936140c5a3e5174f9254d815214a8c2efe866a5f77e3242640fec490845fc9f54e3a7986e0a1f6d43ba411a1c09cfc4eb43800f854007eaac6d82d05896b97843cff3ae1d84a07105ef4a59034ecd067de935873011a20e8fe79d1576b17613294c06bf36e5af497808e283f33f7aa9bb8c5649f237e99714d71fd35ba632f155f1196ac21fc36451d2e5d0da6bfa5d45e4626e76f910d0763ed9d7276520ae927716c786fa34edae44e1a3740a6ca9b3201a2287454b0321084a6533ac0ea1eb3166b93b69619da5c17e21b9407c06c2393c33129ff65cd4af4be3479d83b6a1700d0efc31d00e9a57cfe0fddcd06a05f5aeb0daa2cd8390cf1e6e30eeb36bd4eb966cfa624ed2aea698b71a525fe53a64d4bbdd84b597470f936ee2dcadca7031b1ba55d6f1c5f71dd081b8b27b209026ab924481397ff9e83bf48a83e9440306613e044058a8b2bd796187a253a54e8fe8f77007d89868c3d743e9ff2335ed804745073414c567ac56d44c320b612b68ed760ca3f4f2be1ae6ae5ee8554a186e5448e048f019c803f401b274571a2d5ec919112d17e09f8b136d00c41ee3a8444d8aac6988f481458454ed15c2dccf7a82f001ff6372c21f2b930f41f25d0ed47d35962e1fa874ac128005af9d8f97279ab6be85dfa9ab20808fd74257efbdba1af52531aca3ba1e59d32cefc724246e599dea7c8ccbcad45654b526d7ab984ffe4c76b1b1ee7f035d427c8b3f654611e71ab743b5ab6e90b9e21f3671c7142328872332d82303c512be98b206cbe0df650497ba0c1e914e9a4c16e69c9840df6fbf7aceae7e6d6a9deeafb53a4d1e6333ebdb494043522d41cb8880bfd56398da03a52af96c1905117eee15d7c1aef4d9811f4ab18c69a3661a11cd8dcba6e452f2eb118b754a1515ec85d0c4a41b3e06ddee2f3ad2b566f3eb75dfced1856d1a43371f0bb2e124c5da94551654caf91895228c8b05cec427f637dc3d86988cefb1e1da4e08f8367a775a304f66adf781830fbd775e89bfa2401485af3108c2e401a39bc3d76b7ab7f5c1ec520fd03f098fb8dc056acecdb4d12248269b2a7195aabea182f44d335515c8375e2ddc5b7c0d066de300af4212f08af9fc0d88af1c7ca1f1fa77be24207bb80a136c26113d79d690243578b8d1415659c1fb6738e52ffb5499f2ab527140850afb06022138aa0dab142dcbd2c7ed74f6e2a77e5e0e8377f7bd3e7c81946e9bed82626c757ba85b536d859d46b046336ecb3c2a117abe207883f53c978f321aa469170cecae3f80ac928b831bb035be77a083cabffd135df035963df9f926c4834db2611262c9e5f110f70842d0b5affb2c34a4636a8eac3bb395227162156e168ff734ab84153860d9039b191677650d76ee347ef31a3ca3de04d67a40fd5eab590784133f5bf3198d99f81ef60acaf9102310e516b41a439cff7aeb800e8872077438d70b02d26a423451444dd0a09f27d4e53a01bc8cc2816d7d8e6b9e2e0a02aafe26302f05ab210e727520a8960a551ebfd707fd3d63c1938532b094738e26267c789e7fc078abac74a24fd783ab1cfac45483208e4bcb0a9c5efeed27a051d0d0d1f2d355e3b5ef10eaf05ee05f341a1a23f57109feb3c69acc2c6fd4e6458687763deb82775f928aad80b4949b26a371b497bc6ddeaaa24c9ae7d0dec0971cc4d85ceb9f83f61e7f94eb8ea017afffdc96777fb43979ce484f15fd61964dfbaeda24eaa6166bb979283756786c768ef87130131d8788a7eef777b05f92fd3ae599b6b4c0e634631da0415855976e0c1dc557e7e24d615c385585c96d6544a0d494067186b80061eea13ae8612520354be4050c6fb99c4ef70abcc236fe3c8447be1d50538de8b3e94c8a7983e3155c1540b1384e84774029884769a83b4d65ea3a20abe28a20552b44f72bd44e029b45b8251af12f544c44e8354cf1e36d561fc582bcce8d76d965041da12a847f560a332125f56a5de351824d5adebfc784d8c7aef02d7fce8411c82eca86ffccbac7e624bef9d5389643b2af4eb4061e3f760d2df92016d021b63fecaab360e10020ffd8c2afdd1b91b6ea472e317cf435e3fc6dbfa6e6165e6f93eb1451270cee98d0ebc5844c63d8977220cba00d984c24bb64b47eab41121cc76c9f5872e1c92fa930dc64e2e647986e9364fc1d87abc5c8307e123b5b3d9978b6750a9d4597e7dd006ba69097ef76ed8d171ec555f405b366617f95d79762f221fea45d1c2fe60ee43355294ad29f4251e420ad1184df5e62c81ac189b009bc570bdc4e905aa360843ccfe23d5f84c9d4903b4ba8207e635fbbadcb24cd34820cf57ba06544de5274d914f3ba1c6755e72e955125954ba42af9843d9b524854916d41b5c06928d7fc537e0ae12666d38c1c2bf8b7c4e524b2449e356282ce713ecec8aae7251bd809a1378d9ad58eb79796cb0371502cd829438ea492747b35d5d043a49166c184010e2808e25cdfe295a7c802b9aa59a3c7799a42783bcf46a007dd34c58f99e72e15e9da562305469566708007d6cbc6a726330d3e4946077689f21206022d45fc7c8d63e82503d9e0846effa3da1db86bbce78bdc4a3b0baf2c9d739848f0b7d0208aed8a27f93678da443ee7993351a991e297d5289ffabe90cbfb169bd76108d89893989c8917d797a9204dbed97cd6387737de8e62b32b9497f1f689839bbbe772fdba2d169fb85cff9f6372089b5a9d1c22181e37d643e13cf32453b92ddec8cfe5254361d7433870338b0e06745c041587e8d55cdb59575f0a6773e81e0251470e5d4166bd152dab16b5ebd0195c56afb7286c3f9bc827e2d1d88a5aca4cf967b10d800893f341ac2393c26eaf51e3e80b3b4933e592fcb8fd14cb6689930af1351ae862a111e931a333d459f9030c63259d117665be625ac61d0820db802b0c3a4eebf49a60070b4ee66a57b4dba7470e38cf2b318184a7acc7861543fe1f9092bffe78595839b46615b965231dcf35b850daca02ed7fea203d529eb54d95bb8404fa07a097fd39d942467973e543ba45f125b33c4e592f87faba0e88a58879b70370affab55adb4a0be3233cc00a76cdde240ef2aa2851c9c1207510588b08780b05760d849d7dc3810051520df21aaed6ae1714021239275851a052f5c5908e0183a665025c8a740dbce56bec23f4a3a2f370015c0ce3dd0619e1fbbe9dfab828bfefaaa261a83c066b14eeb81eb78749031ce0e66078e307bb6e77549a57613fd489cb07309901db6aefcfcd2fc300e872e07b85e3668d89b03a1614c84cbaffe285db5f04ba8b5405dbda0609e0786e134de77f16c481b2e825269093f4da95db1b7d74573f2116b001ddb0b2d94a342b608fd719c06b950505bf104d755ce44361c41fa655514a55d53b89a93bfe9929c2d4bd902a494730da2553eb5ed1d6c0187d8fe81b3ee2aec18babb1df811da610f8216f5a6147c1c9cd5074b79e60340b461323543107da2c9ba6dcd72e70a523cbbdec4561520d0a4349218bc32c7561cc4fa2ec23f1953460c63ea33dbd0c338df65307f52781c6f06c4794d575ccda2d29f36b2019b73c7bdf4b145d7758f53bb29c3fa52bf60c4bfe2e924c20c125e8ea7bcedef2a9618e90fa722be3bbd6e568a7cfef39ecb8ca7af73adac2265934677b029c9e2e5e7a03be01e410c89138f2381b419d026750820d81020b390d7fc81e3d53140d87e7641916446dc106e6c0696d7624f625cfdd8a4729b6f566c1620bf90025e01862d1a3286e6aef76376f57186321f3c3ba3a4f44ec892ffee888b34fb82e1e489ce05962ee64110e1a028ebeb51e8dd57a6edc0ac4d118986421fd5c8603bd594fb47d667d97f1d432f70441c06ff75070dab152f4a247ab242e5435504291c909e0f545d03f6cd659f92d472dc90729ec69f8aa27088477587b8524afa9b6bfb97447346c324ecea4ccb2f0f7791970afea8adc7a08dc412f02e56ab9a44415339cfcc885fde72fe9025894a148609db9b6b9b94b28bdef1b3aaf1000b160d0acf86890e406cb930f0abbdd54811e796ee85a315746495b52d5c0141772ef8da1867fe46339d35674116ce1d2c258205125b311cfd1fc27b5f0e9873877eb01498cbf3c90680a7b855147e7055ee5f21913698a12ed9792ba0972be7125aea59a2e52989f1925ff0600bcecd7c0e63159a60c0244eab2d32160b493c84c976d37c980b23c97ea243d3fee8027a63278df871eed1e005c3321c501b713cd37215d1f8a8648fc7d2854fb44788041434be68ed4f07560aa500f099eb3f32d0499d2fa8fd1f2f40361d61c3a88a4a6923ad6d601927a2fc19636b138f45ee96e3433ed6574fa17e4d61323751b237151496ae1d583fd97bd8bde5bfd3a30eb5598d1e58e8d16e290ed1999a01ec696a8e8c42e95bc91ffd28d51cf483d80d45ae47d51b1266a45a67825351cdd9da0652948f71fddebeac502ce2fe5b115025d5f7ea29e5659992346a6a2f35e4e1a56d80e58b73cab326ab21fe985dfb4361435b0eb1d96cc4cc051755300e1e92059351416a09e6b5a610c6a555750f07c8e791336b359a0d36dbb2d011fe116b9e5f2ddc5ccc615d742a0951722185817e389111003e7bc038a565a782d7e5c1c8aa4ec3c39a64ebbfd014068edd89d935e3d443d323e5e84290e13d824f4d2e186988737535f1328ac66416c6bf66c6c8a653458c9ee3b41d9e2fc4f8ecd3b358d1bbd0221f1ea0d608793f02bea8da2d5ad101e20fd6ca89e07387a501adbb420e9734b57797fee0c7b2413e088c80c03b7c483d90156554d1d9455af936804348b0077b700926cbe26321f4995e28312445aab0017972dc9b7cc21c1e50d3946a3e4dddd034e3de34ce9306795150b226a6f4340ab795a6301962630b92341247ad8cecc9c82869136e865a059345a981a0fa80e88fad201c04670e2e5826380d3c96b0f63d40af834b5a6260e49730b00a5140e613741f0943f5761ee40850df91151cb531651318843e8d7dca30fd909d003911a6a1776a60ba7c17eff05683650c1f3aa55e81b13bf6d9183e5c78c8c31760107b89fe3547eb85e98349e715c1f661972d80fb855f150087b5d552b214d4886b52bdf7d901c042c0c330aa26492b6d8654e24f3c7e6615b17df7f0cb6fc44b3b9b826b4c6c67a87c249b714618809e682a704a763e4c508531b0d4a0aff5ebc3e193d847334b690e5d562e662fedc68cbd18d46ade6ac1d037c074c9838937d290e399d4dfa805283241b1fba295fc70b1f13e7fe0efa64d2d66b37013437e7171b02b945cdf1ff3ac2aa5bd5771bc84875542911914de423144abea40dca6520dc8d700ec792e22009ca89be915b01a16f19321398d28342629ae6ee194ce3610fb2bad78595270b17cbab0e1952983783d680b0e99cc2ed7cf273a7f8dcae27fb5d4a9d2b7131cdad41af16616913443986cd3da85afc09d98718a4c3c2dc761e6b498b1e4ef253c82768e46801c07adc9a1255546746e3d9fdaea0f4c5d5fb1e096479c159ef9cf2acd614445999e29f032f3025104e9ee6492be4aeb7d5ac7180a53bc199b2ca46647316120ace88f6084abbf128ae9245e914b33865249013daff59fbcdf1aae94a8f0633c49e844e0859c1509bc3f4fd10757f680579f1e81fe50ea04960aad197d18524139fc3190233e78783437f91bc15c1fdfad10a0e4a426dbfc7c90c0c93a135e075dd8a5c3927dadb61aa74e21bfd8eb5e5e90938362711c7bbc5a94a26a31c026c53d2aaa3ebf225818ad2b2ace5b9ee815dbbca32382705a01fd7e6af827e6020ed9e58c6c7df739f6f18e05a2df32ae5d499659bae65e70b2b8b58f60c6bd4623a9496fccc30100e71ef24e101ea96cd636f817d1e20ce61b74e6d4c1fb1a57b387087e3e7a87f58c58d93b26cac77f55d301afa1e9a87d3824695ac5e1ac5f7ec68196dcb098e2f05421a8a665fd54142bd6bac85cf5094525e0b912fc6f087acf2225003b550faaf93904f81b79779a4bdbb4680f5d8d6d84b23088588f4acf2d5b2cd494924a85d9083962dfdbe557a6d025cd516ce3ecc9ed8d27fb620f2ec3a3c94be130a5434929c08275d9b09affb92c6c5686405097f2b59618f2e62ccaa818f9a8234ad0a185911000e200b10644aca8c1eabfd8e7dd4e34c79af22476136ffdd7214a1af37500a24b80d262917facf13346b800a81874befe67cb476d77fcdefe7d77f11c04ed915fb2380fbded2841fe75ce4d80b6e96fbac837b78adf5b5b516f44e7912b6d65bfbced642aaed1f2c06f574b246362a9dabb74b267ab42387e46b3c007dc2d717311eccd74e44703ddb53ec1263503ea07d8680a5d96da978e58216826e033365c1c6f1d5e63ea088f468b2eff8fed167ea9d890b400fde19b163adbcd25fdec0abce004beb8604afb864cd5d876c58ea1da39feb460bc4d1b48baa31a3c95dd49e0e086e0088dc2ac2c22c03d88fcdbbbea3664c271b22efcaf701d313474dde8e7906fe56757e0776a515053e0de95b511efdbe52410fabc255b19949ed11cd356f216a96f1aa2a44afaa034285f77de1de8419f7cde33b208f8b186ee0b077134ea23c518ff260cef8ede43dc88352755a4486108c52d077bba2a30ded664e27c30aac67ffe8712ab92b9a520a083416ae1d6d07f5bd61c661275c00862a6aa1536a87d1ad84587ad941fb9d376587c59cd1e03b98d188d922b6c3f821238a3f5a2530f5ae6b475a5a1bfbdc4475990205a2621624180321a4d40e90da00603d175e0661c2647640cb96a88856ec0dbca18008cb865b2e428ae2f415ff8a54ab864fe5781e54f252eba89fb808f67206b6d00ed8d8e924313cbc45d58a1dad025bd83bde5003749266a2eca983330df596d0e084fd2f01ea6ccf3fcdd7048b18bd89ace256c166cabd771e897d9e0c785c08ef2b86fde2bcc207b7295a52411bafca5219e209eb81c1c9297b32464e10d8e5a62af99b4be1c037ee48aabc142048de8c07f1b60d927bb25bcec79c92acde29a1a1a77dba13865709b1196fdb04e198c5397e6852c1d1efbc03f9428e9d4286352dcbe41f585393daa077cc3f64cb86a36b9f31f28ec7081b2780e04f11763c89bc4fd4a8032d3e4f3983c311bb43a7c66732da724eb349baeb9480370f8a17cfe44d4891cc78d489878737ad5b78c8685488347c5c00296c2532cdc2fd9ee9069407582050e65fbe7d71d73c6ab4e0dd6c2d7d3d878f65868d4b9acc6280bfc4e4bd48e051d9a4d512a22e9a39ebc98af0678ee130e88ab0d9a2df17fcb1d565856d093ff523911068c1a101f5d0cfe5116b1dc871f0c40db6e57118c6c3c8481615a1ec54b2f2b7ba2bb134cf2d5137d1ca4fc8b34c2c58ae55cd47982b440f21b71b19dfc079d019f89e2b3e5359ff10720648e88ff844a6f6c98cf3cbcc6584e9e4539ed642bdce5a62b0866ea47ea2a070ac632df7a559c03c9638f6ae96424e0e7a2d386e3aa1234eb34cc484b062ff3f85fb9ab7f190dd60f1ef23a6c09a3b47e0d6c86f572d9bfb77515733d0ff9ab713913a7ce1c8540b1b8ec400c044d379fd422d293b24cb2fbb5b5c03b602b9eabd6ef5ad690a252b810abb65cbfd0362cbb8b22a7330a72ddeb8a7577b1b5af65010af21a9c0cfed7827a9ce07c39ae6e1408b194e2dd4175a2d17ecd5159060bf23043b7343d710762a9ca6821638250ed60f0806ae6353baf426ba76b8af2bc4479106d25861cb3e3d4f07510563dd6cb338feab820d1e659f3083f8fca729ca6b0c34c7595933442b6eb6d50bc85e2382f3d905a5fb9221cdd07a48475bb0c796377b1221e8e5ad33416d363c0e811748582a0c2ee81618565e5028b6e4d21a8bda04541e171cdaf1d2bcc5391131c192e154603f81e10dde072808dac46817227d3600d576173e4925bac32a63db2a9557263edf96f7bee0bc522a42cb1d420ff173f63c4fce9a454f5f8348ba1f758a377e1f6b3d78077d83fbc695a9a26995bc389c111036abbf9e8189eed54c9eb58d9dfb23151e5e71b21ed66b12b3aa83409d02dedde4f8e9618657f686f0e3aa7b75b318a84285304225131988fa7f20a7f54bd8c8cc8d611b1b6bc2740d51a02ce2142e9f3452bfdf42bc9700022f4fc73b62319776ca6cf5ed50951e8bc247d1c81372487c68fb02688aa4f50a96adc2fd43ffb2d4b4b6a58709a5b7edfdd4f01688ca067d820fad9326dd40704bbe641823ed90a3bba1b320a4a00acb2a80b34fe7c838e79299b9c761a4e0dc7b869db97ed7a90a80641770c000c122777d849cf4e8efb12875dcf9bd1373f30232b1cfdce5a685d886d1b5a55df313a34322e6f63afd4f54db7c8524c29a210401caca35d2b251ba6b15c90792f74180275392044aa94144437c891eff7514e191f90f937035a8fe18c9b0d3804adfa82969e53599c01125b40aafc633de54ab8836f2ed68ab5def9360a3acda8173d8cf70731982755cf4dfceef1b3f5d5033b445e5047773707cd0d8a023c86654b29bcdb5601d320500ddf20e8f38c0f0145cc2f4cd03b887913c5ceb575bece57d9b9b7d1fc4b5957885f8c052e1e4da1a01657824549c0b974217f59e10ce2e9cebd1d6ffd661157a587111a6da9051b24584a8f02ad02eac234ffc4c609a550d6f7ade1b3d0c8ba43eaf7b6b90b56a7605c2169593e87b06ce685e1c90b664becdc126f3eaf7a4096e03090e74792a4e6fd8d36da9bdab05960b1c7233096b429869660c4f1350cbb8182e9a55c81d060c97e4f1c31dcd12c7e28158b3320fe260b8b012b8b102e6e6331d4c0677c20d8246900cdaf041b4455813b9624db611f538e8dc44bb460bff530e773cf8e0227b6439d9e7a6f10403ab1940f7bc0c572fe01c08a60a1926339a8c5e23920de27e756cc9b1359206ec7648641b41d50abbe7ae3d7bbabe705d26ea271c7bd3efb4ab88f1bf2b8bb9c4664759909ea1fe52da416f0fce4a528372c24dd79a48f0746431ca50d6162ee90bea2883b174e90053f37ca79cbe2f4d3f79e45e2b8ce9b8b1c7725f61353af48e5debd15022c2a6cf185a5fa818ac588b77a50dd1b60e99abb93e50607fca9f544ca8969e3ced34f090bee8a008a8acd9bd886905ae7a17d4f94faa2fd62dffe21c7bda631c29fcfe44a89ec4046a06fe67eabc3214359e8b368af3abafb80621175c3a627c0a912e25e8de50a62f91da38cad7aa1e0f35f241cce7963ed362f3682b68c7cf7c017ade6b387abf70e929ba4cb7578a41e7000d67e0dbbe18742cb6129e557b3a407225441854d27508a50edc6004ea7c7fb457204cbb4a07d5692488b70924810a29e30a2d1c6083d60163a349b349c2cd9280b8073a501e17bf6e26df1b9a5c950302d1955ceeebe7831d05576b67ccbf11ce5b18b70d7b217f7e390416553642f6265beebd654a29051f0b280bb00b0ddbcaa24fa4e01191b43a62051593d207840a1d92778214e611522a1f6470814362aaf321854d88a2694709b42d485c1bf2082edb19165c8b93366351567ad2f9d121bcc8984fa823f6792204aa4d530edb3adf5747e6ec1e9e5870cd3aab11510eeb9f959ae13815e8d34fbf39a94e0a4d5ca61301ca8c7365b186f16fd73e7a953f27092ae717c4eda038778c41bf378ce87786b7416fea3ec940668d0c563815d4a6ee930c4c20d1d510ce85ca20d80cbaf5c06368770507dac5d0686458a10eaad57d9201054e43a71d2e50ef0ddf42bb3494c468332857f7298614d431e646d0aeee530c59ec6c40539377604fce16ba93ea5459f7698832eae822c21b79ab1eab3e9feaf9573ded6390566fe33e0617aabe2ea80b45f53ad5c720ceacdef73148a47a2bca9a36adeadd78381f83b11553bdd63ab978e4a91c426955a2eaed7c0cae14d55b7d9ea7ea26c76d9adc4ac4ec33bc361ca7e438517e108af3740f7797741ebb2375b8d67420297338e7892e9093c451dd78f4739b15b35645ba99871491f247ba38bf1bea1ece8d0aa7fb7efcc76566d9e2e474a47fdb26864a8dc7b789a1fca374edbabb3b9094cee3ee2e7b7c459a798890d4b49e2a5f3c74943f5d44f28bdab8a49452ac727b5e3c3d5c3c76745a3dace521fbb2333c1c965da4b979c84d53f7cf46b23429b7f97152d3a4c72b447aab45a9ea54bec5d739cfe97d2e17a5bcbcbb05e56f630eb522dddc3c847bb9bddc406933593afd64bb7b38dfaea47c77779737ceee1b145d77f71c570a39d5a5fbb3df546d76943df9377277e98a65f363ac2eb501592b5e9876e3819beaef79df4a761d4f26171800029b5033e966d76923e8b8db32d05921fe2c7542e8a6caebba9706aa3fabba706eef7437fe3c65e7792b253d55ab1e9fbafa40ef820daa3f773f375578d385a0b65c08aaf38a5adf70b5b988736ff586deec901c0e547ff67861a0f67be3da8afe2c5ddbbea09a8685993dcffb56abcf8b857169bf5dbf54f03a95e7e347ce0f1f4088c462437cf4ac9650f1fe9b9e171886168c8cf444886f306281cfecf2a291174fcf4a09f501c02a8828f200c08c99270ceb0f032ce3224dd55a46729902c030d298a5dacfa38a818b7a0480911806963de170cb82b24f8fd57f4b0940fc7683945e3975df7b41d5756e88b0af7b5c8c94041bc0c73a5046da1856f6c4e70db58df480a9fda3cf98200a58581b59213150fed18890112d28bf37008eebe15a2da1e24d80adba8bc76a89076e6810215f7311fe9d382b4ac70c79ecac9678e0860a29c037fb8757307774b6001cb73aad1d22a5901124a86644933cc43e2f8c1831626419b0b2f93dc327f3c7e5a26e98a56219c9cc0cb761484fb0a017ece8f02cb74b2da77609bd499cba2c9bbba9923bdaa9bab9fd10b95529822937b931ab3cd7ca58bfdf8fe28fb1ca5a819f2729f5be55d775df276b2d13978b52292593aeeb5641feee22f01dcb2b4a968daaee7f94a7eee374bf9d6a04d5eedab058c655f63c496fbedda059552c9b0e5b99f7ebb3ef81abe27f52f9bde7cbd07dc8f6bbdaad6e22a8db90bce1aa73098db3d5f58d4273beee129a46a12d9d6ff60ff33bbbbba7e3554adef93a0c2a5f89c7b71b54053057d67561ac613d5a50fbc757ed1f739a76e1362cc643fac17cad9c9c29ae7f244d337556cf667f9f2a33096a9018901c95eb68c44907673e4a16561d370dbf0c5778694c652d95bdf0152d5ba7df91a32ae7820d4b30d100e64b1433ee82061db2b0020d30d6cc46eab45e56cdc84d95b90c8144edbac90d8142e57e5b1de1b42d48d5acd07eba4a8980ea42b450bd9f8fb0994ad4f156077ffdb4d9ebcfbdfb6be6b0c5ab23f27bfafbbfe7aa85b3dfe059b94aaebcb2bfcf0e2623bca8f2e937871a362668c9224595cf5a1d912b1204217f85012431cae4000596252666b12aeb2842ea4b174757b6b062265f5cf97256a9f281fa67b3e8a2856902ede1208cea00628cf0885c5501a28b8f2abf6303081bcad0018e304f2cacac3023c71465e800870e5d44668639778a3b6a3fb7c5a94b123fe4a08413194feac0e28a2a389459c1e90e2821b68d013fa4d1c33fe850c12a5fce355250e28da42c689821c36ca444456491010a247628e34c1c33c90eb3038a87afc77b2a7f10ae5226675c97a6e8af7f98563925962ac71cad6a1327922592f93380a52bf2a36778dac788fc56b576c9f7b8c8f6fc6ee3e5e659e90387f1100f5a942b44fe77448d54f94f2eb03226145898fc558364ab4f6065b11f2f98b82c83854913d471a7a8d208c906495920493648caf6bb42946c90140e74c00649993fba966c9094ce020d36488acae7011b24c50300043648ca17800d3648caea77858c02c86083a4b0fec70ae12a2b84046c021b9db042e47720a300720a5f095490c0ce8c4e483a8191bee59c8baacb0f9eb4e8ad41fd8df40fbf873dafbd0d570d7c2de1105c2d4a354ffa678c0101b528b9a8632d0269445a94405ce4b21f01a8f273ced0918be48f98cb9ec4808cf84814fbc715848bf4b83648fb69e3f2d26212a864f3426db3b25d59c5266424908bb8284a8b4f7888b4a945669092bcfe4ee0b1d8eb6c6cb8af6b6361fecb45b4dff7dce022daf39a96addefb248739cb80b4428ca09f097464a42f89f673cf4a2ef3c2977cef59efcf2fac8ca71686b4411cbee437f19ef55f38c4d4e47b2f1c621ab3789ef75e5882cd53a10efb2f1cf29e8643df23b11cf38d95b151c37805d59fb5e2d0d00f086792cffad5fb5be0c42a7cfabdf7a5504214121781818b3417d1c2d712e69ea372e6a0231b316be9239809ef5440bff79e8d8e36a87b7f2ec1feb014abb1a2bf2a34f21c16b2f31cabf1d40ad1c2a09c344346eae7c2282d6e109630c443dc7bff241bc46a5688bfbf9295f11c0bf30702bae002180c047b3845a0675ec85368282bf9f31252920662a98e444682bc84c443fc85ba8bfcfb684acdc7858fbf0cd2a21017e1ea1e2591eefe398368e8483f13d09148b542472222901f3eaa7c11f88e3ad217cf08c2ce0a91cfa353432416fb699990e860ac0e1aa44519a6fa1391af255c3168d9ce8f2ae58bf26392488b52eb822609855a94efa9a1e387d4878bb478887cc92d4cbe7ca1e6eac2e4fb6c135a839ae9e686e8ef225e74caa0acb4001db669e721756ad8df2de77bf852126e8b1a5195475e43b38399aa6d9f8324f80a71919caabdcd91f6495cd67fb49a365fe70ced85dabbd7420d346ed3c2106cccac7183541d47f2d191cb807ac965ada3a98944bba93a1ff506e3f64e5af48da76422ab31ea1fa4feb982c65ba8e337bbe8f0431d7b4d1d5b86eabe75fff876c9214d652aaad751cd78d030ff273adffa11e8c8e5e235de56a4949f4fe56aa545fffe9bd7a07a64fb54a1d6403e021969ea6875e3b5c097f63db8efbc1b2eaea7bfab08eccc28e75d4d74bef51a18019d6fbdf7a140b57ffdce9c67ae85af08ec3ccf73c0e35f2115d7f3789e900a5313d7f3782a39bff33b4bd3a23fabe1a91697ccd7727ee79db89e0738c4d4854d5aad1fca799d67a510e66bef025d39a00553d09d3169ffea5e5d28a549cebb24b0337174f1f851649abfb39d0d7485acd4a23f0f90911ce6bf03aec372429e9a3a9a3ae22987f97bd0a27ff0840e2ac3d71259e5f62d7067131c41d07e0481fb1184f9dd8f20a87e04c1fb1184ef4710563f8260f320b0de6b1d5e29c109c79b6d813b4bd33f6a68f5a75c3668a3781ab23a95c769401860594ec84d0ef3771710500ecb72c2268e6ab1c9659ca661febca67a93515ba9fe32f09416540bb90c8b41838e6c46cc4d47441b270d1b9e57f50fb9888fe69a8ff86b50f67777f72eedb1ab79b88dd49a476d9b8f994159af62490189bd5e564543a323a019eb519833ef5188cdbe5fbd4d686402df3163928fc2ec3e88d7d10891da6520e1c51a3ebcd0841b5333ff024aad84a5c5fe5ee281a57fb6ceba8d5aec3eeaa25672231f614fd84db55f4bc21178c544270d8f073432c4ac85057d81437d9443095c989510f36f252ed2d5fee6818b3ce1221e3a8087f427e121fd68d00fb808cb58c961fdcd49d43e7a81ec050c31a2663ce04b31f37885a4ab875dc443fa83165bc9654d02e623974169583f270183920ab57fcd919686464649aab9b4d20dd42472b5056c48a9c9e60d4aab724ec5524d40dd271ca02a154922154835c8ea08122a9e4e5034f3cea01f11b4fb5d55a0fbef07ba52a20277e655e848b7b0fb7a852ad210a3ee8ae3f6e3f6fe502a94968359e65397cd108396a285411c2b0515697021e38e33c2d4e0031524a0410d0d74a49975bfa81579ebb25d0ae5ea7663179717bc0d86900263a4c41a3f70618138aa58e3e9e8045674c1e58ac66587d509ebd46d4e31385cefcaa9fb34c518ae1db6a0f10bbe784286267ae8d2c5060920b9418232505f98a14414ab303eda68723306644455f7690a2c4298a1fb0c46972ffc46172e5d86f0a82e3a4824de69aae25881bba1cc0d676e40d3b0adae26c0a8336936ff6b996bce3927ad818986ad14f38a86750d621ab6fd4d5c0183531353a0304b5a6a220924a826984a30b3e1a8892c28cc6868e2ca0a50b32b510ddb3a4e3adb66cc54520fed857657623d759f6ab041c6e08d5aed6a0dd2e23eac0aa9fc637f5803d0c98e01b5b8ef75d7a014b9fee89d83d259b20c25e0b398c3f6b584ab4f8bfb6005405de92b485fd183c8988873026529a5949c8302ca3827f87095ebcd45d1ee717731efe29c20d7dd69d8b8e9464c395db6f46de5e65bb85a1a6e97da74f5a8984c871141b76df3b05b366b5fff70d709f68ada6f22a8fe5b75ceb990578826b9965c5e254115b8ee436cdb4b6e32d98a703d2744f7dd385ff3e97dda06fedaabbcb790caaf7fb8dfc9ad96ac968c60941a8e5173b35c33c125caa53a1baaf2afb85074993e41a08a8a8a4e98f5d396dab3fbb7bbc0507bd526d410a801c715287a2093c597d90f3c7001038731a05a988d5c2493fc559edaaa5ccf00eae853b9a7272af792fbe942e59e72ef92e287cafd4b2155b9d75647381d5cede4949986e6abbab004f6c18d2c73fec4c9828e1f03ead8aadb2f98babddca2c052b728d454d7d4a46c55f9bbf20931553a0151251455aa7ce204511615c923bdd450130c2f3de30419b59bd2d9bd58d486daa2767f120d9eac14a742100f71e20814e838ab13550e30e9bb1d9e4a057a375b41f278e564be8a03ee630eeb9ebb67a3230cfa028b85765df7495a36d1a8dd6ca37673d0a8aedb5d8d9565f5b2b6b0a07cf43beb38e0a874dd5f6083b6ef9e93f88acdd20631a0768f0196cd85754fd358376af7cb74d4ee3730b642bae77625016db60a4587756f06dd444f05766108b448dbc271cf3daf8e60c01be4fd4ed66fe1936da2fd167ee030ee896623ed9a43ea1fa06f90b9b4c8791cf75ec8454d6b837c04c84cb4c8bdc702d9852f9628026aa0ca3dc581e2f0c61aa99906ca24e498511a2827d430eee7a4bd4e1a885498fab7507c97bd38d88688665bf80ee33a2e68f755f51ec855af97bb25eba5a4b22a9c4450a219d1ac5f8adc6f358acbb4e7fec9c649c9fa7daf54ef81af2f94b2819316b9ef1e49d5eaae96f87b61ac45eec711d45685bb5e535f1d794f4937be9434915c29e97077044bb84aafaf02e5cfdf55120c36888d5608f7dc1f59199fb030eeb99e1e1d9d15f73406eabfeac4a0a3ace3acdc6cb1b9392975b99e73eebd72ff33ee817e9de33e089fb4c8fdac1c7b593c7f6fa70aedee9d2a94a5703f65d64d6883bc35b039da200654ef1f09d4619044684a7082465ae4800ad3fc2fa41cacfe5b852e87f9bb27116af101fdaf25f2b564d6ee67edc20eac13af947455f5aa3084d712957c55e8d385a2061c8801934680ff766487389046a42590cf49a9cbf5b40a1921b24efc23119bcd0e332df68fbfcbb6dd0d7cedc71c7cad4b884e979c4cb47fc1fd97a641ffc824fdd3328e01c9a765463e950211557e9058031991c2e962a454e5336959cfe46bbd5d4e58aaf23f795454e5144afc8e35d58d26d4d151dd176a3781b73e4b5d94b96f49d923f744950b47aedcef7763683f839e2a945be8bec7dcdbcd9d31b5629adb264e3ae89f7d6a428629568cdcdd6515b158abe7a2966d1a483ffb8402a3eabfeea4041875fc2a272971451d190b2f71136b25f749092b95a9a8ce8499ea52db059b74d0a2f6792a1ebff393dbfc8d3a23e8486b0ca8c5dfef554786f6c71ae63f27a5b116352e281fd11828f781515a84f281072d6a24784e3a68d1df0b9b682b66ca109a34363739ad772ae9911596136a7393d3a22e63a41b2a3b089bb4e868a4131b57b78536e9a0457f964dffd0f71b9cc701e5af1c64231b0af252cc651c2bfab64ad02b0bdc0ae4b29db974ad423aee99d54b39bf70a10c097038a3862d3db4317322a6a82954e842055d802862e6cf45f3b9676a1ee08b09cf749e68e67d201bf1115bd199202b311690975ad4b279d5422ec28202c5dea05038ad13e5795d187444f2bfbd7c0719fc5ae44dd4c097ffcf1a7c799881ac4055c3826af0069d4c94a1180bafeda05cf7898b9acabf1680e1a246dd6faff2cf56c5cbbfa78edc5a214c3c57f0931a77dc855f2847c9b146e577f526586d7c3a47528c540fa76815e8081d265e1974ed5921ece14b897cf973d5aa9450be320050db874e955fe56b2b73f1fe00b5f85303508394d35b5a3ee915ca57cbe8b270552677e6eee6eefe15c2f288d4348d47a7ae833915a7bee072b2e06077f952e22f5f7ae8e3573d8462a44e28466a6bdb0ab92bfb460b6d6d8ef2bd93edb94a73d0376742867c838e5f654d8a56a74813bcb94b6d24b05c06d7ee7618ffeac88b89f63c55c5614866aefcf25bcad6d7faa826fd1b0eba46babfb645270f89f6d27f32b7edcf7039395fbafcf99a8d59033f579ff2969ac6e04c93600bd49fcd90d2e6049433836e9bf44d6a7584d254b5a6fa924f54c116a8875488217643afb79bd3c2d155ddbdb9c38d5b8bb7afc9ddf96678ae6d76aff27edfb541ab9f605d87ed0575bfc7f6260049f50757a16b7b6ca07a6bc556b71f17c24f9d40bdee5392918f6b8bdf1103e2f2a532af99a2a2221466cc4feb37b6b4a8a34f4f1d8324d142b5c09f264144f5771955df42a6fa8b5c84881500a83ab7197eea45f7c90db3c55367c43cf6fc1c21854413471451fbfb81b2c8c1a778840af2a32f785254efd57dee89b649f7f377c326f3b970c87f86af1d5aa26e2eb85de91914c86554baf7577d2c89cb78a6626ad2bd3f95d7bef6aad786af086c3fbf893655efcd99e7aafffac4652f4ff51fb8ece585af0870efcf01f73ea9303599df852f8f8af73be37e679ceb1da67a0f74f140bf56d5b7cea0be62c2fd7ab3b35efb53b8be56df84fbee59cf7582af7d2a4cde53e99e7b2a4cacb089fcf954b8efdec1d786af55d8a47b2e7cedafc3665ed864be7c2a327c6d186b51f540b3f9ad3474672f8fe3e477cfc1a4e2fb63acaab270bf5331741554a7ee931170d4311606fdd0e035d403030aca9f28abee931151f4184634814173ea3e198183144620adc0882a8c024adb588252315a0a7a03f504fda47030d4438133d1cd8830589a0edaaafb5404d29722aa90a1b48d1d284e1867d0ad8b1ce8cc420b55517105135c8820a2890820a476683b88482202692795aaa30bcca4934e6a333d15c73f3995ffc7eac836e7475b6387507cea920b2cbc92e0e263a94a0b3c4134f5c46696375ae811038a1101dae2f412030697114b3cb199258a470c284604a8c97b4c2c54d82962caa5840c3a4d4d4d5c78d02c5962687141da89cd2c35b49013038a110102620a08353a348b1516707ef0d29a23869b20d4e4acb102fd610ac707338e6c8480baf1a10b961635d487302aac84506333635394e09b747eb488a2866d9d451c05ee2358a505c0eadb392be35cc685511ad6ffc3138be645652f51ec250a4b0d95bfad24865be7ae67fb88739c71c2183462cc10a305bd790acd58ba61a79c512c7d417bd47d32c3855a861c493b8b426a678f6bcae55c9893ce2f6aac704ddd97335ed8664e98995fbcb0a21d715f9cc04172d9be60b1c19dcc685f7240a1e7479b986a424dc3b68e937e514186279caa902306272487edb429010d3828c181830b405057dda72b7298b972267602a94959bafe632c02b962539577a5b458c27543eb0c1ad5cb30a2fa87ab23eee2c2c31d5cb87044868e1366c8c20b23b6a431450f33740b63b4dcfc70668902547f708e51a307196590f9d511a76470a8cd53bbbb5b879d15600c33d5fd27195baa3f902d5400bbbbfbe787343206162b8e38759101264b1859cc60514111a3336a58010a6840410a9a053260f068f9737b6802c480a1fabfc690d13133050f6f11042b270c999cb9991b08d0c325c5df61b801abfdb3dfe5ca226ac5430ce34bf5df591d71971813748a18a0ba87e142f55f30bea8feadd511777d204b1c44a851e14c1b44ec0a8ed802451930696011c56d450c1754d5448bdc1d8c15aabbbb7fcea3f1b57aca7ca180da3fe91736d47ed7176fd47e9c6f00194c5682d80205193f780b4b20518413603071b10216377bb33a22f94b99188e748c28098c151bd628e3c5cb0eee6a0107a83b947458010a335f27ad821669765c4a38ad1e9f205e2c95a952fba9ebc5d8910662630b02d4314817395c50bb8b3029002205751ce998ab0537c030e3cc172a08e247131c4d184141061c6866fe25e06289f5b57a7c64b57f52d7479dd1a21b56ccc8a10225a0e082341b694d0ac344840b4ee0f0c4ccfba83a1938aabbaafb947bab2332ea350621f205991ec8d4c1451217696a7f5498315e6a03418d69a2077fad8fb6ba908a313e54ff0da3facf305a6469f1d7fa688b031293c5271d689031c7520d66829822c232459634aeac4963c20cb3a8eeee146a6fd1456d56edeeee9ea55d6d714325b24518313654d71e7173c3cf8f2eb288820b2d6ad062cb1622adeebe52d5a1d0a8ae53dddddd49149a3aaed4ba9cb1ecc294a9a34f951de68ada61c64061c9a2088b386ac75a2d104ce0c41a682831cda005014ca061a38a982c9ee862e6c461fc8d426d2e58a8fd187437127414ab6bfae0b2a20586a9fa4bc72285ea5868f1ad41c72f2aa783d47e2bb4b082cc1542d47ed7155080c9a1f65402ea686ab2ebeda350fb6da29a2cacdf06ca43ed5f0ec500a17193fb54dcabbaff9aa02323b1be5f9b52979ba7cfc4f48a00eb6fbe096bfba1755813d6d37088153e23ed8cf54cc61069f8608309518a8a66b15f4d3191516209a4a2a2d9ce98465055df25abf7fef3a07cf513ea95e77550b70feaf653847c058a303406a9dc7bddb7b9e61fb875b58114c16bf7ddea48174e368242aaa16ee473860730a65068224a05a2a6324b6f48f1c2152fced8406d5becef672546e2a4a62f07d93f212c91d5e54bca9f46a1294913284f5544f546551155c73d03a4aee41ee048da0adefa782bc3dfdee35e8221f81df3e523e197a1913ac351fe263f274249da28a10898229c664cfedaff8f873eb55f33837e1d6342fd49fa6340fdae7ed70705f534707a29b346d1ac9f6a52359ca0a56846fb5dfd3bc7c4c6b9bc44fba3dce5702ca1ca176af91ab864081750dc51f79fd458c202691c52d5c502a9aeaa3ab8ed5654485016d74ddae2583f27c7eab8d0f9ac8fc24f4209c63ea236d209e8b8665ac965fb4514097c4853c417345499710096901ac206388c500189197f2375d2134a5dae7f518cad190e870b132df2e384ada344c746ea239adc9b386f1a41d6ce0b13759dea3e51b1a6565154f7c9cb5458f7c9cb15f58907fd331fe7f9a1f40f0e9aad72a1072d72131ff1ccc565ad671c2f38610be4c0343c9ca04ae6e3847c448bdcd4223ff752b271214eed6f46e1331b74c20645b58c95be67eff947e64024fdcaa02b17f2152b84bf899bbaf23b712a36e8284294da8521b0de9ff5ac1f59891ba7d09f73ce3971401116e7274e3882ac341ca3d410b89f63943a6fdea38b239fc58d3464fdae8ed8841fb056a0075fd0973ff79abf216df940df606a638ba8291aa6c690a2c50d6b788021081c5ce04021d7c7041d8d2041fd3f292854c3f8e7a4502dd228fc24701ab42ce7f97914d1118a913aa54586128e33a93d88d222178ef12ef3a061fc68d2ac61662b957f5c2fbed27e574f5a34e3eb723455050a2b5dba3c992075e4850b1a23320c156503a178d04f5ae41783f2b3197e96e267288e728d0eda4d5ba6d858010638a4887a48e1083b705843e98e2a515c968120b44cd144111d3a6099f193e0b2fda205cd1865d8307534c38cff09ff074d84d32e31bcf519250f5881cfa0c76b3660575f21e96af39c7c1baedac81512af3b3f41ed3950be0de8d5ebc2a01b4339f9a0c58d3a961b3151372143f93add6c4a373755946e36252c4b6ab434f777b329dddcec174a379b12962526b868f16f083a245748365f21f1bfb9d1bea09f0b31d0ed875aecea2444e99f9d6f4fe5bd6aa702db163e692327af7d825319a93438479a7427f6d23ffbb4c4154f3c688f3f5f1d69a30f3e68b1a3186aabf7ca9f822f25f4257da2ca3b4b212b690ce504f550373fbada632b2fa2aa85233bb3b3342dd4deaba67d27866aef840a138fe7099dbc38e0f9d7bfc221a2194fe87a07bd7abedcbef35e775ce12482f6bc8fe7117ecf327ceb75beab06ee84730b3a7211cff7c79c88425d819e21dc909af1842e21176a096444a961fd33271c274e58a4e41d4563a0db4331b3bc418924422d762fe0294dd1163ef1a0c5fe80bce9cae5127ac25af192129052e5690ec4ef55931f648298fd08df6dc65957e1483f2029b28eeb34eed45493d7f384430c471960d4cc5ea1cb61dd6fa47f66c85f4786d2ea6ed0fe6cef2d43a1ae2315ada15a8ce23258d024422d362b29d12fe85733fd2cd5cf50fd3f3888fdae3878fd088704e0c28c277c87f1fc2b1cda2f504320793ce0ce7c80434367c85c419af17c0f38248233accc7cbcc72de321cc6c9de2359da636aba92d5da8a9a8fd49a4a8fd3b7e3ec09df11c7177e7404b7a63e6e377d5c46ff8818ca9190fcf8be7c5f3fad7f7bb7c843f9ec7c7ffe0f1c1f33c3fc67ac09dbd4097e2f1a92257836678eb33de40d5fe7da94069c9f37070d231834e610f3b68b17f84f9b27a5e7fbc52a2fad50b7c357971d19a3450b3d713cd787cbc8f70c8a78a19cf13cd3c9ed57b7c74a42345fdb7b68ecef77f1fb771e0e421f25b3aefab23b3355b2f413e6ae9fceae8783c90ecbc87e3878424a78a8bb45e8623940fe9c8cae422e3f69387e8bcce7cd9a20ea7a331d2e421fe2d50c543fc7b9581571ebfc3e35d20ab4973e4bfccb9873b30d0f1264cede735eebec67fe4346a784d08feb3a2ca6b5aec288aaa87ec460e1a94d3b8d156588acf1cf54f6f6df7a2fee99712a8ea21473543b560e829a0b4eacde77c3f43f50feb9b7e3f47e57ce37c4fb141546c90970de2be9fabe02f1b64c506b5be9fafd8209def67301bb4f3fd8cc506f1f87e0eb341aeef118e44b5c73b084a91538af6bb4238500ae7bf8152364fcafc5d212d504aeba5e8fcae901d50caceef0ae1014ae1f1bb425ca014d7ef0a11f21ea0cd3bc87a09bab6ee805b75c0ad2d70ebb4f997bf7cf90d28b9366af1a8c5a22e687369e8ca1b0bd84b2c19f651d10ae8ea3b219fc2b57b11d6298819bccc5e3dcc78763a2d9f111dbba826f595fef1f0f58124dbfb27f170e4c2976f619396e1eb0349e6cb4fb2200052a5ff32c1d26237965e6a5183a25cc872d0ee050e6d37a8208a8ad95009da1aed370a23663ccfa941b5f0090f79c2433668a847435fa9bf938a8e9cf44fe7afbd0c5fb4a5f940df498bfd4f6a1cd51e759f96281a62292b768043210c339ef75c3d3e7a00f9fee6f2e3478a8212ed0ffc7e2c92573fdec73b01f23d20f8920ffece1e7cc9f015011fffcf41cf83211520dff31f52616a02e47b9eca8ff7f13bf35a6650f0ff697379a76ef0c156faf140bec865df4a0f6a6926fa88da40fc07281fc813cd5e3294d2e4c70309877e8412d819d16c147f673d16384177f6236492bf331f3fba88aa27b9b4d83f7e0466807982a18bb74b95a5d801aa7238c52ff0d665de5b879c081ebecc50102344104e1d8752992297da92050acccc9364c868418df639b85ad830c6d1acc19e12ccbced7370a8b72891c30ab312be2d5ec898f93738143b6256424ed4cc43d90860b09d304189a8598305403363a51fc6709935e853c5cceba6ce4852c1e1450b2f9e911051b3065f60ccbcef73b07f28424bd2ac419f14ccbcd5e7202f9910d5346b909e30f3585f831b8511330787b630aaf012352bc167cd07e0f76ca2d8010545b306bd38661eed22051450379f833b050c5353b30683bccc3c9c1cd648542f384198a259833b2ba1d97fd0a4bfa669feda061ece97ad8572d3dcb5f7972ee58f2d19ce77d9aaf25bdca669eb23493066befb77da4f876d53e6cf16e77bf70e7acda29429dc3f274f1254fba97d08d3480502da00ac1a9d2efa5fb939673872e5b86db506b9f256eacd396718c2d4a6a66d9a167e2d6e468cf44f87929b4db79f2d722cd0af0b698bdb9c33c8eb6a5da59b736ebf6d2d243a7eae9e0db76e4e2e5c27551a26f837c64808f95da6f6abd0d4af39afcd2252a6ccdf25329caa6fd0e31166ddfe3b1c7d6a73cfe0949cf7d79ec79d27099af39ef3213891aa2d9aaae56c00d69cf5ce05a43a8fea36d5dff727e79df523ebfdb90d478ee35ee388bcd36f8568ef5519893879d03fda6bff01081b245fd3f9d5e7fc4b275c22569dd70983b438bff5da1bc901658bf3717ef7e6d7475a6dfe061c5dd5e6293802a936a114667d555b81dfa4a19ca3fc96dd21f4d82fe50c3404296f82a0da0bb526d7593d576381aa5e7ba16e50c5717283bcaec65af7ad17287f0b898eebc4b531e81faed3b8117824aa5bb89ae20a29d9b052a719fac486511de5055a0f25278db7aedb23e9ba7d4bd03156b52dd63f5c0c63a2a8523531ea48a46abf53445f7b4debe650b2b1a0445db6a8cd7d6d9841d7950d000a0803261df4cf3ee9a0a66abfdca434431d8d544de9a846558db7676696ab076c2c2881f37bb5c17cce80c7afdbb8fdca8f9b4726922d9c33147d5ad4be45eda384b44fa27d4c7b20ed5b4a749fd658aadb8f53b6df8d8b3a2fe40fb793d7ed1ed4526a5b77b795deeeeef670e32ee3f96bee2ea56bdc499c19bfbbfbc5d37a79887cde97fea394524ae972782c30284be9d377598b216fb988941ac83bc4bdbd76ada2016149c48652fb4dbeff000264d3b466421c8126570a3aa4e024d8a3ba2a0f09ee549dca81dbc258526a3f570d6c556ffb249883536f2ab5a9dec7aadef7497055bdd527c1af7ad5b3f924a8aadecdcd27c1593d9c4f82dc56bdd62741adcaeaed7c12f4eaf1f85cdfcbc5634787878725437fde405e0478bfc66333fbe72567e07eb946d2557389e2fb4f91157b7620cd325d3d000eaaec856d535537d5e695fb9b856ddd5b58e57bd2dddbdddd793d961632e7d54dabebd352935bcf78b681bc307f0d74bfa9fd3edb6d61b3a8fda357a9bcb446298dac4ce56f315db44150613779440c51c759258b226e094dd211875c19db8fda452ee3c434acbfccd11924345c1a6e4ded1f7b49a76a21078585f20ad13c675f678d44958174d3063df710de6f4f6c68f1b48da3ad96b19cea189b1b8fdff75525ee0822d8547e2e662449932750483042bab27484d31567faa78d983c6eb544f51d8e61dd7ea56a8541a814245bb947c22b11fc8e3b868cdbf77bdfab2554eaa67a2f1cbb6ebf44b55da9fdf3bd9ed3a5f62a399770f53816bb27c856b972ed40479fea4142311c47e03a3eb1c144ed971bb4557b804de59753a865178ac0412e5821cedef23790488b546a9561ace5021d7da63a219490ae78eb33ba1309435ce4d16e292ae8d8500dd5539e98a8327da67fa41a4dff7cef5f5ba552a954452d6676734e95ea3d954aa552a9a05a74a2150b7464299642d33f5b554324f40f0969a45a0cd8512d321736549a2ec3522da6a37a0a09431eab3aca1412faa7fbfda91d517d7777f7aa39c3522dfac63ce79c73763fbf7cb9e2fc650ae8993a329aea67588acdac5040a1f44fff14280d05de22806e2f6b7b2b9b1f5dd406ec154824c139bfbb1fbdf97be92a709c63d72f1c998848acdb0fb1122731121fa9593325b566ea16b5502b6603aadb18e31feec720aedf7e77ae6c93fbc3fd04411186ea74afb35747240995fb0d1cdba993a6bcb0929a85128184cafd5882085ce5b8d6b6edb77ffbe514f3eeeeaeb35b8466aa8eae4fba74e8062d9a6ec1e37ec7564f0d2513e16d9d7d319aaa9c543b4dede72bfdd32ed4fe62c29adadf419f995cd4220a3f537dab66dd56c8f653866ac4332326f928fc8c89bf19e442ad2883dd68dda7755f8263094575ca7cff795239c641965d1e39e1d9432a03a0b28fca31273ba49f7fb5b26ef22f388d283fc73d87041dd94a6de690adacb75a98014bb6d23ff2fd3bd08227e8fc97d4fe08bf64b844acf265f8b5285b7420a011883794dff60b8e44eaf6be6dbbc6daf616689168ea264a96db720bdb821f6896a3395399c0776c44fac749c3fae7a4d4e51ac54a061d5fdcefbfb8708958f7396adb28512e4ce9d38265f33d01f5028ffd776a3d0cd761dfe1f62eabebe44ebe21876a50ff76ada5104f6a243de520a669b6089ae0d48307ae80f2af8d4d5d2f26380257c0cfbbbbcfdbbe4f2a804ac153129915d28dc56bd57de2c2456df5a47154574cdd27348ea89e8da6d3fafd55abce57cd706c0e94502a17a57273f5bd506bcf25a66aa2aa3b588b1634b05458dd272935d5f39ac3d1486507273f4fea36bf1eae60a0ccdbb66d38381bb3df80231423b55f076c81130cfae27e1d66138eb42e5451d1ecc5854c988bc3b467a72cc4682145d18cf5abd75e8303054988c122441d58d04144440a34a4c912c6872fc66071218f6bfb150c74fb91955e376ff3ba79f9f4bd58ffcc7f6f594bd57cfac7c379ed83e0e0e0a894f419a29147eebdcf7927aa53bae6840e030ff15ec8b9a9c5167dfa67642d1b6bb901a528a1e1119bd5db7c505aac65955cc64c344c7b4d8aa5aa2931176d0bfada183248600216dc5841862666da03b92cc8094dc871e58a2c61729869ffda8bdaef98e0fb1587b1fa400d5ad4be95a469cfe44994291d4069e2c4830f56dc732155811e07b6ca43a2f2c291fb11b8aa88fa5560ed9e73b5a8bd4a09fa31aeda1be91f2e943eda16b220b536665653a9be9bf3bbad9309ff9c943ad1bc5f009899cff95fc319ba6a372713274249a610d435952a045d75d071be265f93d5dfc6c121662387a6a359091e1b57929831c9f7debf0a318b2ec45815f262882e4ee060431a245e0841106898014205a9a43a66fe2fca2c524a5249361b6826208d719a7d5d6814973169a8d0498b51cce96740d59191cef0c257681c749fa49ea4a0aa7f95ed6ca1a60560a0a24c4dcd26dd29ba4ba1e8c2fce7a434e6327e6161fe35ece0f4837fb1f5157282e58010459c3142073027a830730dc2c461851a13c071b4c4cc3f0a090d6e942955be5665d8dd384085a6aad571a12a1213a424cfa269e17628e5880c916c65a41d3528ffc8484e9e0abfb2cedffdf9c9aafdf6d2c6d6ea6344fac726f4622ef36958afb850fe31e69384058a4024691fa6aa9b50248901716010d1270aa1243120daf24128490c2869c788fafeb88c83d67ea017e7007a51aba36b670ce84535f46331a0f5690d8c7f206357e68de9eecf98cbdddd7be5eeae74d9bbcbbbbbcbb15434f8822b647fce39d7d7d7d79f35c7ca4d05a2eab39bf389874ec5ad3220aadc4b700cab7cf7de346e7a73323ff1e031731c1fb77da169ee514a4e29a5ad69ee514aee2ec7955672f975ef831bc4bf41570ec1907aa6c6a8d48c56854d0c19aa191100008000b314002020100c88c542b15830509561fb14800b88a4466e52954bb32c87619032c6104308300020400819019a22d20400003365fc89744578b1fb8e15af4c40d83c06965b3b4d9bcd8df41b24a0f0ec0989aebc22e5e046003ca503a891e93349b3a3bc2711692107b7b50f9ab94b2fe085d9edf5e3760275161d5dffa9b11768c8b67f609b24d043163d935cc0aef777ce1448cc980981fc5b1d8391e0db4ff8562804a28f9f25965dd33a378fa2664daecb53106eee23f7549745920e6a63090904242edfde5b91b11046534b0c6d6301450ef7d737cfca82dbea9f7bd51c6814bf0a16d89304227864f5f886d1a26c98ad86d1565be3379a2ba835e5972b0280145745b60ea57828ec9f40848ba1a2f0f06f2c46bbc206bc3de9fc1851e847314e8b412d0e73b6050e942482525211280b138ca2dec6d09ab390cb998e2146bac45f101f7358472748d9abcfa05a651d208af8cd4296378022eeca6a577a2a824529218fc537642b03cedfba1e5f79280774ddc91bb768f28b67988c88850a3141066d452509d668609e9d62377c4ff927a60a39c02368f1c823881dd722f0049268dc69d0e051ccf59afc0c79578b6583ed2fb4d69b3d1aac7085ca5d804715c5cc7d1813723cf687867a0c31d6c7b23a0aff27011a73ac2392bdc29223b27b3478f53a7365056882f9bbe3e6c1e5439005e4cff6e08c4fdb0640223a5b69df439d363a267a2820fe22a6d22520880be5fbf4eeeb7f82af33e1ab46329d12c97cf3017fb87a6f5c681c2d4ae68a435b782556a3c7467f6600bab84b58ac8e33dae9e7f954e5946d0bf99b764e976aed0212c1f80bedecbda6a9b75f0e4437359266e2f176744cb8755849c6273fbf68c7a467a3e82c6d80050646317e4f5c7eea58160544136b9b6aa193e0fe64c95f1fa9a99baf77e456eb11c087556177bf193f648b7e7759641c0cd07426bf066d94cabd6dbb130254094b3a9e3256f1c7716a510ba48ad8204f6f95290be229ffdedf1509de5e339762022476ebcd7a3f0aba641f7a344c2e0db21d14cff508e18c07a86c1e0ed7add0f49dd1e1fdc99a0c44b78d387642791b59bb3218d6b3e3088cec8a2cddaab1a54940f95bc7bac763f18c04dbcc209b503868d3273498e75603d9733f58575ff1e37dbe83b5f71b053f96a7cd87aa84b93b37a6d3d96199c49d2ff869de405d7c0c830fe0e17f7f746e265e3481dff67cba395fda0663f95721de462194ec952ac626008c5df7b187e63619b40bb0e021279ac08388ec3437a21cf6d1e518e7c3dc68a622264b69178dcbc2cc11599cfc553772b9d50e063b31be6715d7f2cee55f6798f0b2fd64d21dd94c16d2b9de04fec14c87f72fbf47f79c81a8beb6b10993627d605bb0104e55d0de9d450869df49761a7673f79f603ba5daa49db68d6300ba1db546e43d76758d1531e6e0c93f696128967605d5192790f48978fbc012b440c0306309f2552a30251335d7fd15e6a239644de4296a34c1351b0390cbe8c190f1b80a6ab8eed988a1063251e6d91f505588eccc355d47e2a81cb4737c031dad9e31e18d6346826088d74eb0c1ff4fbaaed5c7eac5f11cb75ef7e310c74e965b7c2c97cc47fc02d637216447448a1c484d39432119f899e3da62991e01c0bc163192e309cc05b7606f2600bd93798c43add40c7400909b1c41908a2a5cc913bd34371e7a71daf9d4cbae058c167dec31d0dcb27c81bc4782450d7ab85deba8f052eee3ed30ff1d8d568b6fa06e7166ad1147a339dda95183f242b5eb68ee5910e9b993f4ba26c94a6c9a4fe47196bf9573e64f4d6676073b92b54e755d924e46228b93418a4bab1de57b70af4cc6e178fdf4c428a04c6194315e7796b42b5429b893f459f56e5f472da8484c80d5cae85998002129d4dc10b6f051a220fd50ea607ac28ccefe8a8fcab66f4a5933e9b9e612a1ea1f61dd8cbdeb49f9ccd79e010361d31468a7c12a886686763c9f911cdf92f8db2370318dcd398767f32beb899d76941f70a2d999930b2eb13dc1668f863db3c026611d17285a3248580f97f180549abf9958e277becd94abc8297c08920004ad0859e34ce9ffd49bb70f1ca7f8a0b6248dfd76c80f520a7598d53725f44b886f135a42509c0f328c569a7b029032b3023d77fb2d83a5ca9564a6f02c2e2a29ee6a6854c95713a7251a08ce54a0b1cef644d691eac55b864079e90002ccf6c730170ed16243fc5a0a914339295f90a92e03d8fc4a06407d4cbb162b6f958d8da9a7ec904e98fd3acefa623b3de48cd029c8ffdeb70b251cbb837312cde82aa04c4657284a57cea4857b7cc399da96d07330b553130af67e291dba0200912e02c681a2ad8e614926072d6701fd959c4db6d6412031f136e321b94faffad6b710c86f332e4122889b2df1c47852f3b163f4ba68a25df91d0fdd32df7b06fd51539b6ee74886238e1bcfa52c1b03629fe3b89da9afe1890d1992814d145c916f0e3b1402dee4b93d32868b0d761d03eae57cc95ced00a1087cb0ed3e910413c9cc4033d1a0bb712602c4b7ff4510d68f1381ec47545ab8a2bd3c7c939650464261df01cadd85e5f8fc263e4293e5b2a5535a5169726661d8991601a279ef76782e9bf4051c9f38fd1a4f1bc49a0950eabc37db5d4a77b7d88c250027bdf18f4c784b451e833b356d6178aac30c99acd5e352670e85e7b59581e6157c6ecd3429125b192320ef7565ff1c04106988def05ba54526ee5f5a517d400201818354300d7f8fde28b6330140df2b46649807b57c4c4dd071649da9e592060ae341cdf0dfa4d930f445272618878909ff54c3851411a830268d4823f8941466ee91ac998bdf44b57e769b4a579115aab5d02caa5eae58e6963a6dc6c9876deae19fcc308a6d3442b19907ece39e353cd95f88e9532912e9dbaf08a7287678753739d73df3e8b03013f31e6dff46d967585d8f50ecbfbe9e6df003f23a3815f8a5c7915f53ec0403533abf9811b03742fa9a2801e2a85aa87915c45abc4874407e490a0343aa1faba62a174bf92c02b143c48619bb7d86c489886cf637138c6510b81eb3e52602b33ce4e68111c113335fd33a0a20d0761e00a73222e705cb31a5c4e149f564b390dcc2f5d4f3ffc26e12af7a701bf66fe61bf500efb7e38dfadae9fcbd4af93bb8453a5ad161f2e3920d58bed92bee7f35789ebdc218437c0ccd45e04b09531f2fc85072af37cae373be63d0806b25565130cc44a5641e9e4e731015dd8e4b03ea2c7239f94354dd35763f49c71d28f0c6102668751a9e80889c7ee9188eed1cac02ebf74f2171708b15da8e631deec5b684dc0ac09fe73b874ce1c9dfdc84f9c8a80b6e2668c17dacc370882ef669a30286208e8b489c89169e1812d7a2dbfe1d55c7d2158a6173213418349fee88b33d33282db0369f02164d0ca42886ae4013b3dc369519fa0d04f02e13dcb025e5c25d84343e6cf0c4b57d1d311b2a51ec1c77607ed037468b5197f59bc5c1686704fe50bd20c062e777e7e8b30ccee170b5441ecf506713e08c5010df1bb49c2a1253fb51642c180b5931d9b4202b4a57d757994817bcc2a9728cc6399a5164c06727d7c25efcecf59fd52145873896a45de5ac40ab968c8112d7f9f75f529dc0c30241a036328f442b7e908c2a3f56a01eb2825ebb2abae41f9dfbeb47c080775407b4340fe834b9abe69625f6005d241a9a0d14abe9a13b76a70b4c5b6e30716af723a7dd410a845592b77b1f61d24776b402bf82bea60d326e457dd055579ac23bce2b133690d465647bec6c9da579a250ab11137b4e7664632179cc46ee59ee32c7db023d4f3b12999813fb48a8fb15d5d06b39b04c0f000ca9e74c2ea7bf764f9108556e9009f1a3125b6d098e8ea01db7d217b49b8333d34dd7523b0825340487c3a7bf2421d7b0070f6a4422e426f1aa14a0711c80ad47ff50a7a2634310c96e7029e316e7b7dfde0de5d95478a749089ce92513801f307dc139df329b2860c63f25232527bd5b2b94202d9b7f12400eafadb0632acefcde14cf35165c71f178595335cc07c0f469d92748ba052af5e3c71b7bd9b08ea449ef898b144ff244ec05594a14d8279cd8f316fcccd674cf3cca85b433589d001147acbe15f0294a1970da64f5d37ae3906ff499dec13bdf0d701685f431c1cd4c0c5b6dc4e2b90125d779abb2f3dc31b6adca7344210f965ebc890244e8bf84b43bd6712c33cdb2037d0e4e4121ed5ac4c2cd93864f7063b0e9d63af2ccbc662ed530ce5206b34fa67b6fbe3c4b37eff9739c946d8565376805a41dee0f7cfc2f067e19602a38cb5c2497400d6a82c33d541b1a008cd37b92eb4bb28c50434835dc3f371f74a8970dcc357d5963418af2cab01b339e18ec2189a6d02ed7545fab15624b9c51c664e96058f36d42752f08da00453353485e45a931d8913e57fd95f2c130cd0f02770c1331a3e05783f77dbcb2f809c32bff1c7eec0b09d6120480dc4c5f8085341b419b4776ee729dac5e88f559b2817aa453b509b8e760ff57eee83eb4caa2b13f1fdbe381c8e69d3523b93476924c6de366dbcfb7028e60e10c0d99c491a3a536691da7248fa1b9f0a7c7a5a93d324c617e3f34bf989359b81e9ef1bbd35e821ff40cabcde9337679f67953af77c0a6aa42a9298073304017906e6ff899cc8f72699add05c6b40efc3981fd425320f5ab0d34edfac5a02181674d0a03573f362f8030358d7f48ff079d75645f76e6f3b617fce66dc28409977ffe8f7c7ce4726da7ca5ae132c9d2c9a16fa56e53f27ac6911890d84f3b984b6aad0f6e278a46853b1fbddfaec523f99d46ee6f0eb678138ebd0f67654df8904006e93712c0ec5ae29b9c0eb523932c238f3993e6548c7540c23ea104f8b3564487031bd36c200d0361404f9aba71e2ea8a89df9625f42bba98f7893593e87fff92661cc4788b1864b345e5cfa4e8d8a6c99af02f0d95860c7294fc256c81756a8bf9213cb313c6ddba17d152c91aa36b6bc2f708105026b3081c45a182e15562b8b15bdca8b0e6bb5a641db791438b8a516113227179023990d4626804edb1eaff7f567ea97990f1ce3f6bf25d73a8be99099553aebcf4b41a64d899cb82df2fd4571bc4d07f30d760d52a70cb72c4863401d8f8c8538365b08ddbacfc00cf7f105905d136e7152e1b23b976afe247b91eeaf136fc09edb77f549b54c1235ae4ec13604badc073b087391496c7634911b01e09216203ae13442c65ae0667e39f564b7c2cd9c3f4936c30e467dc4146084f0dc83954b56c054b33c18e6e32d57ed7e44c0ff19528f83cdb9a1367106f6e4877aa0697546330a9c4d361cf822842c9e83258f1c447e350fa698cb0d96af0b45949b1c165fe3b399d1e8de091c5df48e9ed9b34ac992a4dcff96ef0069ea75ceb7a8b1ec4f1c951643360512fc577dd964eb4947f881458d9274730788050913ca8cd7b99ca0149c5ac50cddecfe1d24fd492147894e3333ad4854b82c4616252c23d1f31ab30b5b10bdd950d8434a64442d7237ebe98ad29df9e7c2612a22921e927c11fcc3ae2b70244b5bdb5e8a82947a899f1cd558e66fb16563decd84c51c63896239a80bf21fc00f8bc6796bd94bbd06e8ff8110e0613a8dc1f2d5ed0e74408ae5493967e7460b8480360a14b073b498668a9fce69d04da7bf767a197ff63db88f1249d40c6bf6a86e2ef24c727e2826162ca3712eb1c42032421131195223e58167011a1a2d6a8a3601b49d29886615ca3152759f1011b15dcac2991fb4b9e337911c03ec51b770919cdba91d5fc61f7f6adfdee2a46ba54710991b8bbee22eb4827b24236f74e279cbca17f1d6a54d2df04e2e4920d688faae95043df02ac32367411978b362cd9351b431f7048854ed8a94037a8d2223ff7e61950de9b457adead8a0618b64ba0e37c39ab22d4a0c3306fc9d8027b8b7794149ba045cdb1577dbd3cfeca5effb48ec191676b2f6df1cd986ef252da2d2fc54954e3be99f20c283eef53a272fae27144e0498c582e6bc500cdad209954100032c55805605612140409d69919f1ede26dfaad5c78ecbb9dcec71f9d08c8f1ef5496932bd6a05d97cc07290d940774c4db21796c65e5c0d8aed9b9d6c368766c415ad8eef039938047e86808a79ea03f4f9ab75761ef88e19e10d5d3cdf06520fe741ab63000cbd1eeb124bdef1c1b8cad22c55e115250a0059e8efc1f6aa4bb7168d9d5ff471400ef8ec65afbf001087f95a9bfbd2af90170d40f04a0b70f54502875fa2d7a10d3d97a944375569f6ce3b1c4004ccbd050708828f8625bf89babf9a4a1193c793d352c32d568f80735ae6ac727dedf82967a8d5785abab27b4609af8061401c9f32897b2aaa14440463445baab4ae731bd6a15896a6c6185983698ef14eb71aa16ebdf33772dffa6636f70f50d18aa3d2ed69053aa377cee953f516d7545c94b8d9baabd3e19d0bb5f7ab2784c07fc75e27fc26d9c57b540dff13499449f6ebb600a477d99ac17e42d0067acffaa7f0d895bf7cac7ef730683affea57f5a077e59aaf3d692c1e87c98a04e6e16190b831e034e626d372e4984ed30809637b24a4efe602f81bee9ac2175530005bfcc3e17e3ea574e859ab3e7d3563d43be41ff50608a90077c470d10b46dabbe489ba01d67b094f6e42a3496ead7d556673994f563684ae2696d89615b861b6eec821e105b618962de3f248fb8b5027a661f23b43f7b01eccd2e4746d0029822e2896d5004f65877d39019eb09d147a86ec8af2e5d2fb2d3300c334603692fc433786c573b14ffd981cc676a10b020f5def29c19e9af561fb2696ab5a36d73bfeaef6ca1d49013d3e850f07f77025f901a248221974f15008795f193550b990d804e0dd81a09bdb4352312d1b26f26a4572ca17f836bbcff211f26029112440e8bfe4a619e8dbc8153fbcb5d0c4009a03d02b459b6bab9e3345961ed4846d45103482f5a946dcb7af39a445acda28020342e26a5b5ab0372226769991097e444b5d8d206d2f1a21e3a4423b541292767a20d2c3c3fdf30da649d5b26fbb61cded2954342d2459287f47b298bb58ad3cb32a26f350ef27f41294ff482f0a1885c0fe55b1e755191449aab1d26e9f24ba23fe6b00a12aa8ee848f3b83521ae08dbf051f14aad2865d19c12c34a39b1f3b54a3fe1169487cc0ca53eb30ac92a826d8b7c6cb4a59c41c643831e62ae4b8ff7482b622cbf8ae9adf0300bfe0261a2c3d248dea00697a9be5d461516818ee6af3a96125bf05e52ae55324274a6ae597a62c8ba9d612b93094976c428693b1a3029c5f89a80e5541f9b2a13ba6f3f304c1766a3aa00602fbdf6df1552834ad18eb0d5ca6817b6997bc28a00dd5fd2fac056f0a181916d2a7ea9175026abb62804ba9ea3999fc87496cfcb317ebea8a9ae220a063512075fa555c03286c1b2ee6d5a4fed4ca497ea99f08c31701fe140e2b3f863ab3cbc2117eab6c0f7a3a892ca5ac2edffdd37702a9c8f1888a974224a49db100846b3ac2b240ba5bd6e832964a33ce86943f06b594cf51df3b701eda4fcc0b34d52a5d27ad719ff034d565c3af4d92ff74483f3c107f4b9a3f197d0a7d238b3b119564503dcb33117734c481ab25df2a2d76cfd73a4fba9a6ae8d253656e859cf8834c1260ffb4034b9e7ce9b7b3f7547afb237bff9fc5b25a0eba9973feaac690897ee82d56a75954c090eecaadb710813dab028ca782e9079c602531869a0c15b135d94c84a424f5ce0ae97594cbaf16360e0cebac440b0cd4a84cda48d1e01a1dcf30c65de0a25c67079d2bbf0f24bafb9802a4d88760b2f3ff3d158351f25a48ea8d566635812786a6c59a9fd655a1cb6f2e74b245d4974738cc81c1a644914760af2f2efc6974295c2818c676cbc7906b92112df1d14ce434091dd22063fd49a1b0242ccd8b3000f140889aa8ea8ccadc51529ce4722ac90031164b7498dc9b9ca9179adc2179e05da728ad4ddd091b3f1393048460ca06a930eec3910bed4a54ec01bbf25a50fa4abda465deef28e71c49e1d019e50c0bda3d72d229e012ee0445391775c96a6be330496b9133359b897cfe85f182529a8d7a81db1e06d4fed42f87276c4c3a7a78116b8a611122df76e3cd46933c82c7bee72be8b819d3aa148cfc4c3b800e4f579b5d32dd00f63319eea8b246af48c003e725a92786cee12d1c25a6399d6dd6bda1b11b66a02e2246de3e6746146ebb1803c6cb7af9d38feaa0c6b8079b7aa6d5671e41eb61845102153f8a0c7f163a9c1484fb4500cd37b9de826c25cd8d1705e86420ad143e36d1eb4996ed616af5d9f84304463a6f6c83c5fdcb68f11823e1adf8049732a05089f79d584ab1200e174c5c93da31073f119998ded8ab62fc2cf3f2d6b8dc5e7ac18d65ce65565bc948a5a467e93fb3931ec7d5f8aa7d032ffc28800de51daa1512d8e11f066796ee7797a79397bc1e60202c895f7753f586cdb0b4fdeefc28af42b67611ab21be384c4c8e32c3c61686c0165aa25542b7635c0f2d65d9150d0fd5428568a1eb1ada52a03d09dc54d2cf986f66e423a861e2b3a72052adf2561cc154459bf3e87c0790b466b0fa9f0ad667a3ac4a9034a033c348fe1b82d0abc689d0808b5ff47665850c8bb416865d6c4c2607da9b6b1ceb00018bd2384afa62691fdf0a638fce9071a19d3b1aa0cc087f19e85a74d2d5d5df1d485c4824ceba621e991e594a37c3d904fec165ee72d7c98e24ca872722e1beac4830b8bf6e002f5770533295fed08fa24812bd9ecb9de0e377ccc5be2784e258e598a805f100312e673c925a2a98fa584f0a18088f32254c1d172a688ed1fa75186bcbe368d45e1002e42164974bbda900cb24e49640bc9fc501a50575af65b549fa33aab716c3f532483aadc7d1f6c87be2c59382d8a16abd0381f4af0f1c548b63c243bc40f9612c3473d0443ac97e10d8f3f6aaa832c9d317a81949037bb5165c7ea9f2648830203772c0d71a0c6c6e343f27886cd51d54588fbc511368800dc60b59ed48857697d4ddf46907483e20a22a7d2cbf854a2611a9eaf486ce68ddbe6a3a400b358ec64caf014415fa703cec59087c09e89c4841dbf81eaa832440239501abe9e7340470b5bd994da65a2ad394d194e18fe7875d9699220a1d32c3c16dd7db9df562255a062d346edf8e1b6c3e7c164f530d6faef927c6f2a80e2e67401fd573f46d95ce78cbffadce175c5ca4991255fdac833c18d2e273266b1228e31e35f0cd012c276f15eb963042439f1a17ed7a72c50f255502b8826e63d4613f1b5584a985314487c2616271d048eb7642481ca30d41591cf4f9c8de72771630406993b6f0a60fc17157bff20287f1a3589414446d5c8215be82899d7546efbd502f14c5f37e036bb5d14540a6a51cf0f94808150062d82a1702dcd3cb71ef7ecc2ee974d5f512d356010a1e3991f863d241c43ef81c89baa419b624e95c66076efa08ea64ae3d90cda71bbf691240f2c915b53975d42bb7a35d6a89952071f31d079b5e5bf427266b999855c391add1a0a19216463b228e58987447aa09b77c4907761a2848d46f53a552eaf8cda8399058a31ddc972216e9b4619985f250936137f1c2b3828e763898d0cf98fac1633e541fcdd9a8a7c8e81fe455768f64af91885971a268af10c423f2dfc7849683960dc27e04e50bbaf62d1b98bc5c9d5bb87dcf684a0b14ebdbac70e055336e05a045cd9b1fe200984702290207f933c8c41766de63ec318b65fa9afdc4b7fc9fa7b5e547171bdf2311e412aec175ac0318d102fa0024020524bf69482176e402eec205dfb0075d9f5992f84cf5ccc3b755e075ba183b04f60af11b62647fd83d60832b22061cd5df35aac7f5bf7c442bcf882030438cf3ed82760cbe3c7e931dd45d46149565ff7ba28a5edb04cb2a95e95ed6185a90f28ba568bdb6005532a93c7a08bb5da0e56401501dc09b926ccc3b27c373488f5d22891c4b573dd37e7cd89373b3a23a4453ae325f0821faa0d0a3691390d44bd1a99deb2b51ef15332534b93d106e95db234f1793985e41c2d29b19696db3aed5ee0a1ab9309058327a81d793c195e56f63f68d5cdef9103b68e120e2b0c428d86267856aa4590d8a8b558f62872703c80bb3c6d9fce460df7533a91f9b40cdaae6d17913295c6a8e3433d6c591d2629f19cffca17ab75ed8197629bf84cc6eb6521c291d7640a300b64046c364337f08d2b2ca9a60ba2d31dfa0be95f3f1da5d02baf0b4f9326dd1960e67f64975b596a817d5ab41a80a5a34050ca0936124c98c29cbe805a488a27e0c4e57637d8307c4f95544f222f7d9b25ef943b72670f0f86150a5a61e3b6bb1ac0395cc2fb0028bc202bf50596d67f281e4a4f340f9d427f370e210cbdd95945227582e29ce74081b4a5d9a0b2e020488c4492cd13012b234b84a30420e04bc6beb7c4f96ae5218979f370306101a3009fb9a7bd39a9d71ad07ecce75be2564617068c01527c33e80039ff081bd6ae5d79b0906d89485acc9c4956a69eae05fd3c0aaaf70ba05e82164cf55040d270ab1cd2ce887d07585378c4bb6edf65b6fb15ab0228afe50ed463d10d98891e431a6c1ba85b096f8a8a3117dd622c92ea7943bda79c806e24d7ad3a27ffc0d944e7e1edf9cec57b4a5e0121131e10c130ddbc190e73fd59043274e0019e90af9064494366be3f85a553637c8108256bb95ef7e28ea88327d18079053c46ff21a497774270b6163853ce4d460148453c186ca946253a8d5f4ca4cfd01c4d79fe4606ca418bf0d4923af1060e261da83657c837ab99265c07db9c95f92e8fd92255eb182e4146de681f1d7684edeb11adeec5a86866a83d1d55438a3d5b6b895988dcee622b5d37764318d5eeeba118446d4ca0619060e3c9d912d2f235c502292a1e3019e6992e1968003f4a2deb2af464811032321f7a098f24e12fc9362db56c9d06a9cf2f76002033b397978c153673508972d40de1a3ca9a71630f9a48863246646c9c9cdccb238b9cca45b14d187fe42465cc54e357642050a565903b6066f295c572abc0cf78b19b044fb5587666f953e734691002e13f9db1a59b0a33b2a70fb4dafbb8db6d96d08be8ec8545ea5569d25e49c778f6121a2f6c42e637f2f785d6d119ac2d54d8169e84145ab77298ee0c67430183ab501283f35c373fba41ce055b1aa69e9d521ec0da0007a02ffd4749d3356d766169b05673b86db119369c0be433a2052fbc7f0318c131c85bb9b60001498eecb29bf0781e8b01de6284d7ae3818fd27410f7d7a4fda8994ffbcc06e8ba2cbc601fe16d19a620ff6ba8869856566efadc6fab79b6f1531166dfe7e310cbd1f231d7f27ede5805d129a00e2dde4da35f766dd7456db2b32a1b434675299088d61594c35c4ceb33dc71b415b475241e756e9eb3d7d9bbc951f0ce26a83f938b4ad1fabc24b56b5a32d3f0d36f78ffc6dfa960bd3fba43141267786143369a5c8e1ab69fc8f92a12b7daadbf4bea4f2a9ec9daa253fa73fc49215b9e4f890d49513e48f8a1b6df60f2da6e765ad70d2c9af0c54b81e3a4469896de32b344cf92aedb8d378543b9283aaaa7af42d75ed49802abcb6f66c52875218c3591cf74cce4745d6f9581c3645b84a9d51564b4ed84a81e1faae06ffa5b1b8b90eac024c6221b7c3a22030f0cff2acbf621cfe586e229c6a2fd6782c82cdc86861a60604d8115389c656a83bc5ac1ea38c8866a8b319fbcc211a4903faf0915280b98f8d319898e39897b13ca6ee16b8c29af248ea2bbfa208aa34a465effa6bd94402347dce9d2849763c724c627602b2a0c4717b5c044e1a85d8cd47ce98a8b01a70891339e571e5d9c3a3e0a95341a12343a6eef3ad5d154540e91b7883c198a207c0dc66aa0a906f8841f458862aba42d049e1cd908d9c271f6d04fb59545e2756c68c602fabb8dc4daec0597a816bc56816104dfe00fd49616076bb565a987a9004680249cdaa96b26bb2ef44aaf346d6d5c85ff573b31fd1a934783c5462b681fea5d58177fea95f7d6bcb511d3edea8aee22cdb1e79cef3c4cd8eb0314433d45f2396a54556ec21a943072af189c6c27f9bdbb5de27503efaea51c6cb857a9e40458183f0519c1afe3332ff4eff2efd2e0468936aad67e39ba8658ac96eb6270da52f1c533814fbd37b6a8ba015fa0d9858259d9248efeb0b3db5c392ac6f3e9b88d94d5b704e9d686f0afb696e10c702e64cb3cd99cfee350a9ef0414f889d636b2e91307142190185f40ed433903441f9903b145a7c8e91d236a0a85c53ce04f2fcbacf772b23136f3a6b8bc62f8ab6c096aeb5c1d4ec701b2acbbfb67ffeb62e13d241fca00d7b94e38a1eb53b42e53d0ef9de91046e64586e627e54557b5474365b2d7b3100f52d46740dfb58235b074f08420b752a6576045e0eb6f8f910c447789964eed3733db04a3b9d20cd8a692e12c3e42bb1c6891b41c4be22b71947f1ea06b8eb770a002e9b91a7822c952ed302cc0ec5d03641322d16e0900ad3efdf768abd129034da127d36995b4050bdb8d2431491b1c8ab666a56a9cea0767ce4c5762b5e967ef41619328d3d8e5f4a191bc799e0fc2706db12d274374f0723ac0a5155c3c95535fc0cfac4cd6c5b5900902224b43a11de5a282d4ad439e0d606671406080d551b557b0a828caa555aac74dfadd80d15f03b02f58af639f9e96795088dcff87f2a508e24a802732a39275a2327e1db4ec90b2875a0bc40613323c04176346ccd4f9856811eb50a388f9cac5ee380630d34f758774784cdaa263299cb1560138fb902e2889673b30f57aae4cbac05a4d44043df70d6abec87ecc4a7d9d1d9ebadfe877508695652e072c4de401403bb1dbe4f2f348756ef1762487a4cc734554256ae21d10f1e52e7979fd39bca13d2ca4b53fb20700de90a38caeab2869bef06cab9d3d6c51cb286744da52e5d5651d9670d89d8b5a9336d5d787cb6deac65260da8257755d3b6149c76a022081d1d8f54c5f14bf0a0e9bf8e9bfc4a0f8f43696fd2bea70fd5bfa9a19bea47bd6eacb15f95fd09c6f0af4f9eddd81f2ba117fcf0862ed2e62714c020c8062d8800f8d045c9e4a601b8711a70f0b42d8461be06628cfccdf1c10ed7b51cfd5d0df2df4dbb4eb985d4859a2860a14f51c73eddc30261002006dba5eb630e4ba253d3fde69a46b8ee9be7c7d58bfacaf1d2394886f5b0d0f08b158beb2b2f9a9c7b3ac052fbfaba7eda230b15fddc8796e455aacdb7a061cd011a741b8dc625bcecab55bd9e072ba83635c4e6a8d1c423b46d7075897a6d79dabc0738818788a8cbe90b547ca834ed3b88da53682e3ac3e09ff564aedd27b74640e42edd1aee99b4e4725199e0198bac871312c55dd7ead487a24f8725cd8e09c318cce984fba5abc6e04126c1cbfd674d93c47ebc09c0062d9c003d7f06e9aac04ff4f2aa5d21d6605fef3e32059b947d68f4060bd996113531f8ea16781d2678af9c7c1031614d8620a9815557e6d1262835b0c7464a91dd710b73ae6ef5b72966b2d58927cb5ff7aadd4a31fe5afa39775158c53499412b454f78dc2c107aac01915b421915d8a505540a2ee119c56734ead1b6c88abc9aadf0a1958b735ae1a5b095ed8ca56cba9f936c1b119ec4cd9a421ea3a05664533a366b6b0ea587a94e33ee554e828e374657349d18308eb33006949058d19a34509c3cb868084b68f8b89745a68fcd88615775c93d96e215d99bc959b57f3c1c8b32aafded7ee509ecd4456514c6aa2ad62718745582487dc9ff571bc1063f396ff18c82a93b8e8a190c7cef1b0e4ad1cce156cd1c1e19af092b71a2933fb08d2cd01090f5114e916e40f246aca8253e2a0504600f5b9a24e84bc4245d5d73f069f389031785f4922e1c2af73ecef590b149e746a24034496f861856cf9694b4e5e94346e5f5e8459f4125fdff1792f6243acb43755cf1a1a536869df38b40f0486b6483659f0a3ea809ae6691ef9fb54477abb44f32580fd3b7a004b389b608ca4b3c5a484cb9369d7ace8a12e5c54f004e71a6b69a96a3987ba60847e585ed9a2a2f22f6245e7643900092747af32aefb4e9b394435cfedf7600e63a54c0d6822ae8ec4a17d71a92388fc64b4d89c0bc195b644acab345d4f4ae262f2b043fa662d8f7aefa3feee560558675c4ffe3d48fe3c3563fd1791f7111cab8a6bd11f09cc8268a314b7dff6683b059390537867a3042f34bf849f16b4d29af44a5fe44924cb65059850c6678eca0992c40e990a0e5948f133685b376449e27e3d2a2c2a9cdc7cc540b8a28374eac168f79e46ca48b13482b553901d7b7ab7717e91e52fbb0e72acdbb326cc1b18d188e41bf307938055ec40a605cedc09f93ef8667e74f7b6252a938e8ad047c8e8a177cb958019049f344377358ab43a4f5d45021b2c4dea8d8a015a242815f9a79e85c040f8a72cfbef362305ea5b1ae448b58628d9680abba2a624b724609c88b66e9844a7b333732894b85253d24a2926d46633688b3ff318214c647bc3adebe01c443f79b753b16388fc9022341d08a58620d1003832ebea2b9ec9cc90b3e0214443e8e1873184cf66d143c9ab00257b4344b0024a363120499bee90cba438cccd86c05a29a73b9f6e39ff255a11bc63d9885fa8b82b523c6c876a5e0f37b0d47cdc0804311fd000f1d6d0b7e27c28ec1968643c83becee1def9ddecf70ab987d11b1507fd37bc72cdc8eebc6ed9e45480dd13f1a230c79a46411201ab27fadf5e1dd3f4ecfa228c00140be6e0594fdd4076402a1d87e66dc0167cf004394bea18429e9a595361d5772099a0cd4650eef888b4b34c1c183539c42bfd8e03866d542948cbc52e11fdec1a13f756addd66f919a4e7a1175f5f3fe60c89a4a99bb2e261ce9ade02d5da664cf7c44df658dac2a0e904250396ba08d7d7101998fad656051726a92f702a66c3e47ed35d1d62555346d98924c15a9cf0f293019b538365a29a8d571af996079e225c9b2993a04ee54b929cbffd0a728477602c2819aa82a1aa2c2ee14801f9f211158e0a584576a4b86ee061dd98fb9278095d229cc4c840538cdab261030273681d8cfd4a9e74162a74538e6bd9d22b15a124634b8c6b4553684e825a9a6d04af4f99474dcb2d91f0ab48c4ce40b86b07d845d18b7063fac3c0d23406846347e28345ca5da74302e96035e2f89c9569684a6765fe2d9913f21a243548e544fd5df813cc77ec7254fd90d366ebb828c9640bf43f0dd2c045527b0f7af22599082dbe84ce521d82751fd2d021b6d244a36ee23023364648a9c9d23bf8516db64e80f4949f8f66874394f9b43f4dc420d8907ca9f8441db5cf325ee4c12809920baa9a21b784b7f7ac38ae849a98ddddc978c8315fc01d90cbd3e8e8382652ced23a54707d9c1e481393c528008cebebaec55072ace0b2a524d4950f24da406c77485ca0e2eb4bb201d0bc5d620eba5c07b15c33458df05d55415ccc80220e7f7972fd0397cf6107482ca8262e0c7d3440c666097bd0d5ecd5eab9878e24bb9ac2376e0b27e0f0689c6593e045d15f4db78031ada836125b32c677167e3b64b12140af6a72aa6aedecbb9f801532f9a048fc48ba73f11f2f96fe0bba4ed120687917ed6e2b31bc654f3b9f114e77dde6ecb08a9e63ab212fb5618b1bdd6acfd290f15144f7ac61a191fcbda192f4b34a7eaeadb2f77e53af511a2ae02ae328cde8cc23b37c89c5434d864a0760d51cecfbd39095fdf76d48629575498a88e2e8f54c7d020820c6a91ed2afa9673b9c475ca8b837c1ec8abd628000f4ef12cd0a93a620eb04d775923fe518ab8ed645718a1a6d476c873b7ed2fff0d512a0cc599b93dd6a0835d3714b30d2c0cd35ff2e060f65f916c9193c8dcf022037508dab83e128385567e90cda447576b81ac2b59ea8cc215065304462054519d670cb772dc465e7fdd3644a4bed9a5d3556fb1abccdbb5733acea2623b0f6a91c1ede326757f11f30d06f5a03a97b6644e677b5079877da41032703d276ada86e8d1a6414f0a4457f856da7b70c53bac2aa195a9ed40eb3f6567b3225296cc89ec209c6c15f71b186efbcfd4019d58fc8f7ea342f9313819b1907dd687293c801ded01dd719a6542c1f1e8a21adda0b38fff4ca05e912876db9334baf93efd0f706ca5345f4422e62b67f93723668d12f3cb19107f593fda76ef6b6685525a38052717743927b6f78a90c69f071a82ec6cbb2766b71db4dc191e48c4abd433272af1cf8b31199b851cdad6657d2ccbcd200fe363054587a29a7f3efdadcc72ec8f684f2033302efd5cd405fba3e3fd8ed2b04c2aeb88fcb2969c2f7e69518c239a299fe19fe39ab9ccffb7f5839268a9e670b8eddda0a244d78b7b4efa73c05d75bbd87a9aaf3278e4482d13688ba752ab235c604b8e9e631e655ea8df22c8cb513074a48fc9781492d8aba5a4d8e0641bd70deeaa9cfc98355be20a5d8afd8c729fcc6b18e5eec0fa3f86da588f03bc82879b96cda112f3ff8a15a4f0ec2fffe1e29971a4957f3dac74e96c98a70ca246a4eb94465469170280761397c8cc50c838616007601514ed597208c816486c03727205b15fcf5f0678ece22459870a1a5a352ef46cd1a4475b7110dac84c4955ad81d016cecc2b2d1ee6809c01ff8502d26553b84b069a354c4180bfed0a59d4056ad5a2e6f7e8ab925f3475cb31bad2ba5522e7359b4bdebe5e2cbe201272bcbae03ca049bef12995d00d9b33201bb4c5c127f9231e255e9cee9238391416c67387f7c1e056fa1bfe1bee002a44d2ac3cdc9cc85cbb29deff2d06011a9ca0ef84d8a8062504382b33991000c94faf7afbf01c61b710593bbae11f8a0a5a8800f045ba1e2c44d12b84b2b678582a64ad1226102c6774d2331c503bf055bb8145db01c5e087d7cc0ed8eb094671b8a72ca43301d281a7bdc0c11e3879a399fb4766ab896dd20e08afe1d00adf430aa72cf30834277d2f1a47ed787a6557a2340958e409c572ede4437fe33f4ec9c18ed282570891cfa8b9b234ff11d53a022f96b0819a57c785a6c413fd954bdaa563d07b30386c180035225d6ea4e92eb63d47d6d7d64ccd58a7384d6614b07624702b38846f93e2d27b60a69a81478da02261fea8de8291daa0702a26dcfa661844a273504607838ecce9a819e23622c1957c2b3d409df1da71c10fc0efcdcd3a025031d462d95a298e118376305d1b128fafac7accbdc89c8fb8a827d63582d0e110ae39cc2e6406230380c7bcb835aaee4f2564dee3b0a15fe5bbdb7313da759e41ec33891b3b9e9e2179841322001eda946dc93364f542138adb9901ce29855cfde4ddd891fb2b90c8fc0edf0dbacae4324e8411963d0305448b8474f70bc225410fac69f8d8af609e4f0084330d75a7e0d8c15396ca68a7bea59ffc4b47456c308991101fa1746b92177f2b9124216389afa5b16b1bcf1b5983aa52dddb89a5c4c36f5defdd44415d9cc86d3612ed2a2e278206c5a106449f692b4adf570b007885c4dcf3c269d1804e2ef9b2c06ae086fa5827525fa85d06dd07f2ba5fc08953d2193d2e1832f6db35de9a261f01986592a736b017d1fe790e23e8e5cd04884303923dd19953f7e0f0aa2afac03d88e6c06508d39aa841a9b93ab73fba70bf04997a89dde376a9f5434f2acc0497e9c851707c979088c9d0b5d35991c3264ccc0389823fd90a460064534200b9455dcb81c78a98c0802b0dab9173d47de86f2f01ab99175e83f09b323174d3e7941a04973e4a090ed5764380fed72028451143f31fb8247add12e24883e4a42288a380e938c5412b14248038228442f1cb2351992a0852f4422345b699744d96cf98337531bb8355bd948346ca7a4663a2f46bc243de55dc2bcdd97435c19c7bbdcad0d2ef8ad6480daf84c2ee1d2c467ac7be82e1698869fb09343c3133c8d2ba510231b37723e90869a647d2cb080deb4fe380f15a0074ced7d9f81eb0b2e7fa020e699924600e647ff291e45a332cd407e92d2770f9066c4a6320a452c3d94fe8917f5b59850d22e9df99397fa8c95e383f1a3cdd6b4fa4aa26062aaf6e14c101548ba095ddc759e04ef70b3efe90e32a259d2e47f06a4634408efd9b691f96322144c6bf59328d7016b6219d24060cc2c84b58c898b024d09f888135015dd8cf41b0baa902ba4339f7fa48d0b8335a8ab46e3cb9b98032615d4e605dc29317e6ccee34f9538b245ef646fbfa8057b38160e773c3f20f2a5f99abd120ef97b23f9dc7d237862782b7f7a8a02a66c7a8088345b47c47e3812bf02c01d8d299c373cde6d7a5084ca2220d240706d62a3140815bcc867ed8d9f2beacf72276cbc73d9661061d87013d0015f5b042027f42610ff3ae7296db994c0cc6a3d0689d294b0e907bcf17d541bdd56945d6ee08deee65f2afd660210861a99d7000101cf0ad2b0d0da621fc154cbb0e5536ae7375bd61109e2da4914e29012dfb5dc66738718b7a8baba40d33bc11683400b196360e128bd8c60c1908ee8380028b9bfe7fdbbce7f0dedad4c667362eac31e3f01e423b88bbb5fd5f90d570dfca8eac192bbde69ce6babff0b7166a0af316e551f52c2243b55fa1b77fceb857d601d6fbf7269c2a2dd342fa000b149b2c5726a388fa3a948079cad98f1821f9bfe00d59048f5459a38fea49e39b8baa0810cde4c9636d4e59793dad5f9e720193cae6e3351d6f13806a78e2af0f1dec8301616705604e95fdb0959e3d712666292e264a1ff4bad0fcbfc4e62babbc0b78cce3a0adf91b8b449e506e43e827295df9da124000d75752c26abb56433db255c180099cc0ace77b5e9aba179112ba368e750079397a0903425c11c13e7f53f0d1116817d86472b7d1efca1f8996297698db5f2a76365ae482a1bddd3d145a6d8c399b9111a5ff7313b49ad400740077ad855fc90a7cf48a964b6244987e06d494762261ab1ad96befbd65a141858771ee58e04cc4dfea29360b0f7aa685a88fd7b062947341fe9e331c423def51696fdf2ebbeee150f4389cf074b688402e191fa51524a1d595f4b3c261a94aeff6898ff496d19542843451cf823853db3600a18ec85257725d0d125179c2a56e3e933408946661b425222445a9cee59aac37fc502d3102cd282c7bcda292346531c8d01dd41845a72adf974c52dd15dd50c5bde6980e114ea353b11f237014907f6de1799ff10a50fd63132f84bb8c80cb53542f6f2098e1ba16361ee0d098c609a036ca46124e7dc75778448cf4f1d295483f38494a1978cf81b4098872416f6b37a04a298fef8bedb489816d2d90c1b61ab77a0761613c6265241a38773bcb9adf3962fc714935375ea78b584f22ac60391391f1c44b25cd52a8846f43b4c8e120e758e2fe491c30c532684c04276cd4759dc51a855db2891c032771b80addabb9f61bca48e34320b86d9a129064bce5e4f643145a0a7177222254e55d5472526c825f5d3c6b30b88a6a84809dfdda210f88dcba38f6e1f26e6ed245da3a40c4863dabf226cd5f83991c077cb3b4d6e6370ea2a36beb11b127a474298362a66c47cad2c51531afc4951bc249d8bc3c7b77243f994b7308ea4b80a3f9672bb6b6962f504a192f94ea1343e5804e47d29d49d125af6b3adf59fdc0dc92bb9591c4bf1740a3e6872c1b7deec6d841e163f81dd60d58f3d0ca07a1736246f14a2a5077db0e4df08107624fccbbd3cb45ec7918d2f67e5a85190017d9725b78eb71275d4a14bef80ae5a49ff879f087872984486aed02e31c3d4afcfc474c36d753101eced506ca15c9c4782a1ecce0f308500f21b1edcde226d25472df1a526f4183e8162d5b108405fd1bfa3d2dd7b5a7d97d6c1160d5b872547d352ffe14749d456ccbbf623c0a803e0246eb824beb0cdf93353dd11358f5967ab994e0c3b64baf5e77ae114c1d02ee23c8a94c5b3359e9e7b6d793ca089adc68560b613050e64b47f3af6c173048fc6c6f02bfe7ddbe0589400b9c4d5f7bf7654712e3bfc2c306717c29ded4dff1bb9b86f41b1e2fa188604bfaffc2e68a02c51214e0ff64e082bf5526425b2968abc4ccc458e028da3309443a6130ee0abb8bcf22025a972bd6044e6bee82a41642c5b515d802fd7ac371b71a3c467c01f0cac298cc01f15aa39ef75840a975971653740b827ed9e8deed5a415b30833821cc15b828c331eb55879c60a14527cf3a437a59743cd01818fda115fb3e85139982599a66b9aabb14f64df4c26f0847d49ccd0cc7a5fae4570f92065533f1522a371c4a223905b4dba08a6df443441c874fa30ed1344bf059debd9a9d13fb01d6af90b4efdd4ed63bd50a77623cbe6c8db43b2de9602d3eeb82fb2c339b595a90ce7b48d06077795e8e58a423feff7180eba0e1ac312a266a23533c156142e4f1ed380bc742ae1cc8b319b2a4bc3fa76595f7a3fac0ae02a829ebd4406e968442b3cf28a661fa2eaf3fa9ad924554757ca0950f1fdf9034b097a34b8ec58d15ce19f40b6d28d77e2b028f44fdf150f03b73a84b907984c7512f0681f357d55da3735f2431031866b064230299d8bdf28268b5b1b285cd165aeade9d8d61c2026acb839ed0de8349a1fa60d79d1569a75b59ab0e1e412d759810d3b0aa308cfd39e6d780c81f135dc948dd7afb0054b082705d96019b9dd5fc97a04da6ab2dfa89d3204d88c90f47ba59a14a33f24329f29a3e916c10d1ddd6585953ec868da56344dad9c73e2607e725fed1f44420e10d2cf377c078812b2da31d35d378d6ae2a79123bc884aa3866ad7a4b12ffb095e50b0ff9913ab4faa147203ce878ea86bd7a808acb619b4844cb426e2f2c9b3dbb2045179030fd142d6c3575e3f120e95a12690b3add30d953f5b97cda5f0bf1db70e88e32bdbf99083f3f4cdba0efa6e8e76ede27dd6f5e986e076a0653b1530fba7bdddf9b33ce95450c32e6ae179997888e2add51dd386dc51b24c632ddfc6f8cd3ed3d821800c634dff52de5c6837ee5e4e5b4044531370b1c1b155d641e7bf6cf4dacbc740a6500be09eb79e121e3a5ea7a821345ec59a2e1dc046c85b9315cd99a7ef4da8360b2458991464b09259e0d0d2af5b422a40be37a52577a1d3e85e04e7fc172c8e2f1a106d825bc7c95f471fa16a522c9c63d526452293044200e5fce10c79ee52eeb20012676d7d82cfcf26a5947813cd71801cbac9f7b693c9c83262e5319cb88b0404b2a9f5cd597f3f07d17b4fd5ed000e28464648254cc8fb61580867f83dbbc665dc55a9c80143da2a5777fd2f5bb7c2cbeea0a0108a26d16023bbd9ee7a389fed3bfb66b7b4f229793e79123c9009f1134de9c00f950b5fe89ca71d60bd8e3f04989a4ea40b23456f0634ee5a0d592e4ab149012bff980f14a0618f6d8665da5347e47865db3f82f97c06d9738116b8c782b468c8d70db68471fe7e754243db3a5c3cee927db3cbfbdc58c5b94cd076d714570d472ac80da158c116d4956c95c23d137305197d2633327a4a30f6c40570625bf3d1a51bac5ba662d3ac495de0d92362437ab242b417d8825c9b7c4570423943682f5e1784dbb7437ed8b25de6ed2cda10918bd9f993c649657b93cb0deee1935f1247b5694b24e965f9b2bd5351bb3c19ed57daf71c18aa04d77f289574e5d660620355f9b564a58e54b36608ec03fabe746e5eff9bacf22dee1cea38919cf33c3c7cba8892f2fa6d4742ea866def96e0282507082f9e13d8246194577f8381efd764edec6894a06d97064fc34d4d8703c101bfae502fb2611782c65034c4440304fdec4025d1004cccc8c151c1804b2fe4118b63fb299e2fc275b3a6a6da6474c2856d4e5e277588d6767ed14756dadd2b04f6f787df1c38015ab95d2f0401ccad2ed0b4719ae9d17ce4c15348047c80ba7dc28a870f7f936478e8d275e710a582cdfcca1c432ef24adb8148bc0592bd68b406edf7581c83e10f83ddc72260e515c9de6fd58108bfd9fc418c57c36ddabcaec1a508b2c2316f726b2b7ef062f18d7906c1bf76bb8554caea80b5815f6550c01a582e83f58ee7e8b3e363684c967f6fe5bf114478d39e4d8bfa18c7ad1e81f080a96a3e86f3a5302e7510fa4c6a0bbb969d4181bf13f4cefa2694905330141f7a6d190f2c65c4348e33c5d9ef81287e7e7f28a9558d5f5b973fff25d2b6502c5b4090be6e7ea4ba31dbfc493f48e07db256989803e07a8585d879bb69f875fde2797fdec6aaa00d43291a1ac97da3a1a476dec1678a92e68d8dcc4d274ba3c01219b196817df70e6ce56fb44f1490c253ac794ac11bcba14af96fe2451fd9e75fde183c1afa911f2eae793059da325bd911f34ca98c00d181da28e880ee3d87e255e8567b023d5ce5edafb0bc844e14d27c6f1c1e78b03e3ee2809470b15953f47c8f4c520e96e62467cceabc27dd6e6d7ca8e266f95a04028e3f6f958cd62363a9bdeccab44a64684d8c40a65bc966ce4be9ca29df59610cf2cea95ec47e614bc0a90c5db31360a51f62ad348a3fa1e32ef5c9bb1e56d60a8a4e81ae23f0d452fcd1293f76cb8be89b1876c7d07f6b5f4ba80bd235d291b7a0574df324cb63643ace4313baaf97f0dea55204ac7b2f4c4cff286854abc9030720e41c0e3e8afbe86bd4e84875c139521574126d2f54ef5c39685eab2a44efe93cf28eff81fd6449be9764a001a0c680ad804c7f8ad24aef7d102740180670d8b34c29a732a0ab9529efd21c03bf77c6be687bc66b1dad7e0e7a0dbc9a1c722380a57821cbc77798690b0adf5fe624d8f69704cba41e32009ac2b968b782b427d9cb4830fcb0e2a87835be5a62d5521a6503b2ab180746a086ef585881fc6e07bda87d8253ccc0c0ae860fee088dc4042291c1ebfa18710cdf1417de9bde92247291ce6524496f05aa670c445e19056a92ef2e820f99c017804746a457af5ef249fe5a934db024acfac663a3afe97f505941e6d18cef871bb8b2af57d0620ec10670de7d86ab337cd3c9472523d2f15f95c281961357b935fa163829801513a47f6afe8b2bff370a852342963e988ce30a648f04bcb8e4d04fa8d51ad620d2fa08fc3b9e642d0604669c03821fbe15d05bef82dd01abd62fd23f8d4f7f7afccc09029b75739d675ff69a7141d744398a12d6266b89fd9d0a608f63fce175e899a3cd7a490f267b53cad8917bd3577bb26a80e21670f66598ec99bfad4187172f9db1371593c77464b97bfe2938ca7c8e8a17f9a2d4632b76675e970799e4f2f6bcc001abf4dff767bb90641207b17505c14e2af0ab3d81d8680dc4ce2c8423c29ab798cfdf81a313a309f94d5e62fc065c063a290b9756d63b4fd15bc498a9db091917d0077a46550a223ad3d7042208e1315da8c5aef7bed77df463c4e66127c79e31c23ed2a0b199e52c7144310b12caf0b2b3779587e64f38a229d8eb169b41a9ebcc304a32b662ebf72ad60df1e20f2d4ff350b54635013b582acfd06530034e94cc460c298da7e79ace44ccd7f838dc0c7d00fd0de19405191039f2f4e7bf6cecd3e6e98c29889959174c02808a7474787fc8861b6fe5638ae9e5824f614f66d81e5143ddb7b31538841807af7fd0a008daa970f10cad39b169205f7627ec960e3ec82ff9ee50ed37bbae00b9369ab4bd9a8fb6850e6c9d6f61948da7f77fde435840a89fe2f20e214c8504bdae00ef0a842023c27e0b5c94dcbae87b8b625b1bf854c7925ad791641d4d32145e18f5388f1dcf8df42ceb676f8f602906b86ad9b26561d455726c0ea144b03da64271d9cababb83ab82ef8a293a56a9b99111b195c9104bb18d94ec097ee21f345979d335aa0d58aea3b60a456680985c82a029f41c929e7bf95b35c9792e6dd062ec24a844a980e8eb969c1816afdf3b32054194233f4318eaf9449c582cb51dfcaf78691acc316696a1481d349ba04626599122a3473e5eaab975addb1d98f5f5d4248f79a07aa27614efd80fae2f7e73e1fea48579c41ebc063d740398eabe76d2d5c1bd341c9433bf6326b9082487485b78b7f541e081f8c03c11d907900094a7e034996304281683f6c57212a716a334495323239f8f09c246024025bad88e9be44e01d52a2b8b044a0b8b4a3b6acc1846d8d00e4c83683e58eccdb4a1f83e1cca82452a6b060a3c98b794e4fb0dec6a63878bae529438d07d70246f8a410f5547945d27a3cfcb10d9940c7a09874c208a9252a665655c88c9d1e166a68127fc0a5c7513a5a0ae18e648194b60d3d7cbd535e2ee9c503a22646282d4c21c592a81d51333c703d896d836d8d4b2c07146d4b42da4e33dc40376debfe7dd868a3f3786a1bf0ecc9474d8f19a50825ed2e25d35e1cd9e60fb2e474b028383b9ec9e238c0e7e939933dc89be875e9a9c66802627b06d3968a5b2ab17897d28292a02f6eaeba7b567b3bbfff26314284a3c8607b99ee64098ad7fe04c1c066870488ffee2bbdd5b57f5fe0edb131881080342e4d8350412e77fbe0b943f83a4a03e1398f32a3719ba71c446d73b3b85ef4a903ff9bebae5c1a37a9037950e744aaefffd46015531611f27b3a5e8d4c19f22d619e19c7653bb43015b4a8f74d14e97c6370c09ff3d4630f3d704e5fc0f4d52e01685c0be01ce7f142d96fb95f69600b69ef165a860ac7f9a84efe134286ab28cf0b7e5d776df0f763aed3bcceb14a58fa287119a460d6a095c93bdde197e80fe31c15eaa454b8ae034333249e84c311e2d004c67760ef977fd8373663258994f7514e30fdfe4fa30bd20e9135401864c9bf1848d5fb8ec50af9c84d07e46bd0ddea8bf1561a3665ebd604e5f2b9f8f4438299ae848bdb1733359c89e5d46e4e85b346e352a1471483703adc5d98620ab69b9f22650f47e285d2da148205088c933492519a5230a031c82c5ca186d52c499c4c707704582f4ef8f8a348195b4d8bd3d2e12273de052c8ed026fc8597847a2d17dd000859facd29a1fecb984088f8ff808aa04618a97351bde1c17ad06747cf4399118bd18e625c78707caf1f0ca75bb08ee3df9b4474dd8a70ce1b44b2ebb69ace34c8b488c8453419a20e4a8c1938a4be2d095f17b6c9b8216cae9de7057e384af299c12d1cb36b6e4648b24802d808c84736c22f855adac74733849095200a34879accb9c2b008c14d3671235912cf18feee52e0907a8b08af1ac419de55c12b4370ce7b45706d4373ee1b45ba4641870e976f6aa5623b2c16a7d4952dfd4ce8a00d2243c990ce908b085bb385c32400a5528bc41d0b83dc374476dda09df75d895c192467deace4ba4539eb5d91ba4a459ce34d91b9aed0a343c43885a9b68755a5a654c2462d582b2a7553dc0e168a79eaadd3cac15cdb86495015270b862f7dfe6e29d117ae8c95fb33b44d8912b2e5b58007a65dccc65e6674ca6023e7070830523e54875b615627c7461cbc892da27b92c82066a71a7820664e8747b268548320e537e5a0654dad0318d33739397c98dead6dcd61baca4a42a470305fba071c105ce312bbd1a7b444855b3bd2d4686bc208c0e5d6bc69072dc0f916dc86e00289186b2a5a35be2000dc96fb4ff997d4572c2669ed2da57cfd62cf0c11436e1dc327edbfc94809a685bb68160b76975850cbf7aff65b759b0c09b11189e746347162267c53f3875389ca606ce0e4b2b55443eab9792eac69f04cd5f8a0c76546e79099cca4060b9db260885514fd0728d08fdfc101722a70a0ccdc623b3998ea15200538461b10de79f55a5b68f9b71bfd5b92c922d5f3d3db5701f8958e28019acc87fa52ad02521dae03409ed2316bfcd20e4ee7c3b626c67da40064bfe6c92352646e19b805a0240a48cae94290ca8ee33ba2c68bf209a518c4ce4ab63abf7d328289f973c0f448f0d8609a540a01f56ba1d7974ef424fb90d786075fc26171516bb7b7deacfdd6a8d142c6be38770055b1f50cf5434302312fca14d406f2a0e6d6655425ae23a1ed6f055cda21f58469f8cf8217e94a3bd1f8eb7cafe355684a88e30a6a7f37eb8d34a4c54beac103edd65ad1bf41267ed41bce6892785bf90e4eb8833cc3bd72140fc85fcd2acaa60cc9a63e814957f654d8295820a6a68f65145ed7ef6cab7de3f161a2ab3521f8dc361a7263bddb1554c815b2a9cfcc72a3fda69b42d38a38e630e1465b0defdc3ce87ae1125a47836bac37a8def691da09b950f2d9b5d3d583f5af8f89d690673b47a53e45eec2c0da305cb6907c5aaf4d118a77156baae9b1a3dc8171abf83809d3eaad203b09daf0588832bd2630663835393e3debd7014de939006a82247367f8cb0e4a7ce321f1dd2ecc8132dfa02b505a913909110756ef7fe697b6d63708a2029af73e6e806b63c50ad29117324e462ee40046a04ecfa810c6552eb2a589a91cab43fb004a48264c1af7ecf07d65f472d2e3d699cc6934fb930fdce230210350c632e4a4ee421b253c8f6ab0bca09ab1c883d60ee5d6ea1836ea13014f9405429d710566e22cc04dc980485678af53323020a1eb297a45b15d8daeb9621f530ec7ed824d4f4460f5461ea6f536b05f9283f34818d00043895d33cc5a68f9fac54bd32e3a2ae03edbb1f5766ae1e026bc41ac2971e72507ab7a1ea2124282e7fd507a0346c0267b8c30282d542a8264ec169871c46a40ec8b8f8d8fd25772dc968495aac829ed489a225d0bfd3b5f485336b85b75b9e35e66213a7669936cd90062b10fc41b042ceec3db1878f483fc6062f7d2b464a36b2db8612201c5b54483153135846c7314d397050b44ab34a372726b8c0f31221e4aaee3834f1c1f5a6664a8ed62cae034ae94d88cc001f005f2fdba04738b05aefb9d61adf029bd8758c592c29589ba13f2ab1ec74ad6ba79074b965bcd15bca47e3cebc1e79340109080bbb2c3d8bc08a00540bf9324ec40086d283d8e4677fb0b6668da255d7c9d5622f446c693d0dec176cdd3fb4ae133ca48d3e1c78bfedb39803577b83d108b5730b6989efc34120c22e6f5a84a3ec5079eed23cc8b5ad559fb1c6fa3777b82e58b1f32a2458ba2dcc01fd882f984034bc6e79a1023bdf5e11b0f00b8ce98a07fc2da95a6bbfd827dd16aee2cbd89570c0c829e1dd7631df9a20592cb8c22e4f2e17f6e453313b7e720b2242b36ce8e9bb374bf959980d8b96d73b4a2c9d2393db7b482d9ec0947008f428509f95f8f9e31e81297c6a0f80a0331e57d713d981c48500b83fb01e19fd3004a97dfcacda0bf2b4ab1be2be862e4f711112128f5a87cd0047f066eb83be5d8a32a112c79dac5250f3faa4afbcb344e95e36b27de2e04d910a2fc2a9d489edf2eb128271294cc936d09e155d5282ebdf8b35be5cfb2e62b18f29866a228fb8d645bbaba10a874959ab69d198a5ddf8ac221fc8114474472a62e7ff578edce69daf92e530efec787d6140fcbb2d80f78b9765c74286e32876ffed622b9efbed42a41c22ae886a5eeb1e472b759814528cba196c50e0ab408486ceac4ae3162b951d937c1d473e1909521470d396de48701259c3129da773e1e8f1bdf4319a9c9bf1968fd0986420455a5ad391277dac53ab87f132ada97bafecd7fdd504b279fdb8a8ede98afd9f0838aa66005b6884b5c7024fdd2b7ed636c0b65119777356e065a6ad8046e6090c780e3c150f783f1ab6bb8c553eb8c663542950be71728613e038142392c64fe1c2b19a03bad303e8476206d9841d8b243aaeed9b09770b6c54363328d82ac9240624f3a0c6a3135a6abd215626d88d41ceff2b5ce335e2cc4ce39c8d4f3a4af7e870e1e74a9d03da4c17d2ceab8b1246fa554d230ee8dee04844f8b9f0727b41a0b2aba1cb15776bde4540e56841c5309c32f58eaff86f168b90254af72a9161f3cfdaeea2eeac8bc3376d29ffffe84bffc3d5ebbd97642cfca102c5a1070e2060dce131c9f9d945590059b86c302eeac0d7ed33457bbbcc11b4aa4a5092d4563a1f1830da78e79a70980e63a321d5353e2e7b8d7fe36108422a0b822c217da69aec17e30f3d45c4b07454fb508815f9ff9e130a82564522041159c1cdb2588ad74e5118895712362afa02d94e09dfb6ddd7f278ac2b247905f1006ce6983eba52dcb65d442434126f4dcd7ecf31f93af6183e16e385b180106321c0ac918a1417c3b42bc9118b887cc8c2731938a75f2a4fc9c3035922e6787cfb07ace23c42ea7e7eb4b650fb1ae215a5914a802bd0340fe5177e3308f60350ee6a5c6f3053e1d50ae501ed03344e2d6fb5bc03bfa1c1cf3526aa0929233501104ea51041a2b10fc2643e798a3db2e107cd78086d9c0800b7bae122c13edce542df0fbe1adec428b2a24a4b1025e40e70e7cb3bb9015dbd81bebb060e4ffd2cd1dc025a8e9ff9722cd211cbbd0c7559a737ae14247ac31b81c141a62d2e167ab63908cc4c276d72323f7e0efbcfccca22d5214ab2a2bdb3c31a27811823fa8810e0ea61fa46a028715e8ac02688f4d8c77251704699ebbd44f442b4907373923d634b8588924ec009f62a142a66b8677c027b1b3a96c9ff7a8a027ee31155be41d1b6ee3e42a2e9487a9a808d1a2eafb3d00578e2863b0985b0f99d20eacf77d9aba4b8cbe1f5dcf833818596ea244f7bb0dd6ac00a0bb4f3ecabc6b67c42579af72b9c9f42a4ee5f57d903929e7a95a8106e47ae4fda3b72ddd20816ef2fd1defa0615a7274598bb3ecc5f04e0d126243d899cae4d86242dd27021c0201af7198bf499f7b0af32f56b10b8abc12ff72b7f638f25e9178907e96001e835e9887260314c79ee2478a0d2ac3049dd2738d67d97098e196c49e4684ad6ca18c5e15f681addee95507d1ddf3c1d89c3f64f754f428d35cbf8015294419d02c323f8ba06c9d2a5ea2cfb9b54e7ff9101c46bf3c4ef6743eaf6d596c16140ed7e78b75bb9037d6376b262bc5898e84e6c4b429e53798e0aed13eeabf2ab2ef8790809f0bc70eb221a4b26447a1f3ac742c3bca080bba7a56c8d11031bf934ce46a9c7aae55524c6cf722a4af2c40467051a5ffa1234f9c9d8662c99f49150d5f0da6339fa40c9282306b3be2f0afcf13a4e9a324db560dcffbf09aa2359aac43515c2c477b7daa568818c3254b67b3e55882baa4759e393786ff793f07b9dec7cad8ab6f803bb712087c68b8291f59c783d6f4b73b8c7a117ed726ce4e56013307e385da17676a72510f70696bc086251f6bdbaf0b569f46d569eaa50d8b11d0a0babe56dd2d078641401dc8be4d736ea2b1f537f0b86ef89c50a8972254d2469dfc247685ca88c9dbfa7697edbda8eabe6465edd7b3c54913e81eda0c016296f9c17ec7e56da45f541186fe3c92c844566238a394a2fa1d5eb8201ce459ffecc0ea6ec8817a08001edd5a8839b5de9cfd82adeca2b6608ef7237d8cb583c13a1f3d307f9096e6a993ea99679573a452352efae00f7a5249a03388bf942606359cdec7cae78f7890233a74e4abad99c6d39a2a4065c9651d4eb1db7fb3aebc820e187fe2d57304cb6e0b22a486e78329436e3f566da073d984a818abcd97719d2e39485313464be2ba8a6aaecd31b89fa36d4ac1368d5ef811ea3d53e214e03f0cca34845a03f62d70468a55d00dbbcc48831879504e12c259513669b2d7b638ee1bed9236b2c683c014270817430c0b7e3d3045ce1561c9a88eaa18bc99a5f76923ef15e10106559f9770c99401e7c08338111990f83e17b57faec90980f9cbdc87ea84600d8fd319dd56c807644ac8389209a1dee94e7bad55f3d37f6d7a86edc437aff78c4430b2392350636168b7dc14bd7a9e1532aee43d24a38f44998aa2fd9dfe8adb4a0db20f6b11f543f009c7608a40673b73e617cbad7dc8e36890c1671cbd8df5841e144807698ae2e4d3250e0c13bcbcbb080faddfcbf51ede8017027989a3902def2d8a01c8b112416db9898d4c0ad6160f5b4fd7cc2cd503f9cac58952fbb80d3b6f8affd5ac9a63b9896be5ea08eebc521c02886613294d607121b149886b778d77e7b752e2eb9878a2593e10efd41cb48802ddf6b93efcdb98b49333251b166592ec32d4374121937da5cb7aefbdc003ca48e5017f9a518d2683bb50b5f0e785bbd8052374caf5465175fd04f20bc5f1b974ef6e7006cec34b32cbe9041230b4ca4752dfd9785312e54d3a20496e0a46286880c9d87bb519ec9b1c6c466db91f4e41db0c3ff7e7e0dbcc2724dc465974da68f47b14a19798a20ed025b1d54a3abd018423e20be2dc5dcd2029f05c6897d51688b3ff743dfa6479693518007303e10b8d5337ae9bd016300a45e377896c9da20fb6411b6cde8b06383cff380269dcd28b12bb7323897d229baabc07aa71d4767603a053a55f14f7a26aa253b453de2be2d5d4121f92177bfb398db9f56af9a03ada085e9e3f3e69fd9236b2968dff3c230dc1d03de41bcc64a8635882b89b40d864f0cfdea0239a36f6daa4e8ff0d5566c166162d6f6044e08ce1d0083b1368d6a1a0e1e04ba3e167d916307b924330fc76d38ae507d85187acb3c7390cd5de2d15354ff4552382e798e21e276134e4be8b8238f7971755b937547f48469e38fd490072c241aa459a50d60eff00369a56711a7414e2e3ef8cf070e30087e65d9aac78465113146778979682d15b75d513bba31048532d3de2a64003aad419a616d03bff8162d9a7a0e7f878230966871c6a2f40a0b5eec7e7ab94d74aa971318119b7369bb9ae97b3484b65019871818da62fdc0db5f0b4abb0e48911928f1b4b84899b570a22aaac8e159ca3c972ddc1e8bb6ff7890e6da470f7346ae720b991ed924181dc75c19589635092efda0e2fc00e9bb820a4f7fefab66d2ea497d4708b80c76a4dcf643e16ede6bad3ff47e8dfb1d4fa84a7354fdd148c886a77934bbec47ceebb6950079d3db01e2b9d3fccc2fb143a1eb02b14a15c449bbdc1cd42c419da0a6ec4d4d0171f889eb3ba8151a11f5dfa696d6a6231c4fa0df574e68b2385ac5579bf1520be3f487ec5a7e162becab55b6669706b12173f41c999771e2598542a228c00524e1e408d41ff1a62fb7a91706dd0e8e6e8a2b3df3843631b1548480014f69d2cbfd70bcbd68706a4d7785081624f1619f9a8614f058436b21d6e193f40bd9564b6c454b946aa99f6b94a882eca7a7f4adfbbca085082074ff730c76d666d100ca69e0edfeae389c4c784e766ce59d53d645623980c382545b429f4ca978dd9235ec465538ffab97bf07c4d27957681c5b5104a03280f30513b56713a85cc94ba9afaf1a6f0f7ad95ac9385eedb47ef0a54e8082c22d08caeba46d15a48d17eb43d27fd56b009218ac015d993bbdb7452d1ccef147d5bc3f2d4c626c37868264465dce07b13efbefc06b935cf2d93fe494ecc33194448e850af3c82020802c4970e84d5c9f504e8791f3b50869715c6550769627a753eb294e4642414f9153e9edcb449bf9c5a4e946e1064096e2c2945868a9b02c886e382a8043e6267dc0823caa15c9dd24301a723bda2138db9200d0399e35574d24470458176028fb1d58d917eddde487dcf239cf49f94cbbf90b2b62b18873568561da19d68bec7a4f2653399f83f5e3b9124e33c37f569cb76d4915cf517b6eab2791d725a33712c7bab07842737aec8dd41a4639d324f8cf93abd817d2ade8e6273d00bb848550797f085ddbe9c468b11e47d609fc9a5b8f1a3829ae6294236643b6d9adaf35705b4926d643a6920af281017d726981d753805be9b39692a0149d186c0bdd5102a7d402474245966297c03238f8f7a8c048b2d04e8dbce9f1935a11daa52bd886b7a1de75143d41dd901cc93660f074e97909a9f3a46af68b9ab90340f703a20d73436d6a5710c201ac07ca3202403956f306ecad41f03c2587293898ba64d21eb24a39bfbc7e1435f0d7463b808a3df5c9ff1a4d166dbb43f9ad34eabe8bd8b556eac136821dc206c1d171ddba75cc26ee3805ce8a3fab0d17724d26666a711ab30ea885590466f3fbb93ab6100092dd9352f2a18497fde288c7cf4c27927e175686ed880cb06472a8cc68da0389038528c9720c06dfe7b475e097cf82dbc9e9538b25a566891d5c151b87fe7befd25744315b06aaff748cd0ebe29714e04412ce3e91433afdde6c04cfdd2b1f9e62733effc9414c1e4d1980d07a1e9cd3bc80eb042145afe7f9059c1c9c59aacf421c4d1c2090aab6f48a1490456177210368f8097bc84aaefa3fa6959b01f1a4209f68923a3b0687f298a49586ecb5e12989c4eab6fc2cdfc14e578a438fd096de688461e7bd73e6c432480df97a1dcc78a62170028194108f1153647b0f1a52dfc3818d6531327fd569feb8e491d1d347ba0d769c8bafa7c4aa38eaceb19f55b72243f5c9b83d1cb0ddc81be175aef340f7d1a759ffbcbd71d966fc701312fe5ef570aae3091abd636459fe3b37d6879413dfc50ae8ebfe284229b6905299e1e51e100f78636c45c3ecbda41de5e39f5e2f6e12e6218351f48775afb43e59cddae2aa18d789dce0f06be7024bec828366b913496bf53c1847e2715132081b673684a8e1f5fad30fc39f874e0db3312dd1bc21544ea6e4067b34afb963872a5424f858b89039262381fd63a8d65b6ea3ce8f6ccaa196630ce4b813d3f6d20683bca589725bf1f701eaefa7123ee40e34214cb3c3269fed2bdb929954980777d70462c21d29e7d20217c88de08ccc2853dcd803f0829725dd269e554c4aee35ef119b8a7b7c1b3da657a98f5f7eb753980f13e0291e09f4636ecb3c8fc54ef52ff91c1f693ce9fe1d9a313fc7c5442343bc0e3528798d5a614f07ed0c410da5eca107ec4ac69bad719e7627be88ee5ad6493cd4263f11878a77a8d515805512acad27ecc00673bd32c800d33965366ea3c067fd12cb4c3382c574d6c39e534224c9805cc4187c9c48eb2a7b7831edbfba96f380ec90335288925ddb6f3a7d9976613d93a035aa563953ee60a219b6c606aa5ce2175b62698ae99171f7270d59336df10a8fc7a5a52848ba814698979c7f66edea7b77965dfac89f3c86acd53b5ac6bb84b611b4faf662997ad5f7e406c80aa6d01d4c1fcc96c3c7cc94d9d8a22ab966105a80e548894af762ee51e482430a7f65b1a2ed7f50adca1e022015453ba81578da0044dc02a3094de5f37104f066bd4f9e1d5008230310267360b045b984ab80fbccf9be63451c74088a141ed547b831b67fccfc3f140ef58c057e7b9f7299e9100d13b914092effef4c0c6d0f3b55ebd9c86328ce098d5079e12c0d710550b49ffb8a84648ba941c9a30db1ad754b97e4288e9c4863293835e2d2e8cf4181fd2e386a2ecdf7bd25b2bacdffb724d302f48e923c8c06d391152d038e6193a383825e35727352933865d07516c6e7578405c38db9aec97dd1febc8d0c7ac5298b2d859f0cdd48a5d64a929c22de5bf57a3371a62458679b16e8a963f4fcf5cd087acb9bd8a0971b8db86ac4eaca45339991ead3a4b34eb87ba46207dea42f8d8f16484c3dbb9f01853eb037b7c0d4f7e6d3c9394700f3d632ddf348611a5c2c9a434f84cc209080ba0e8fd34387a656e492247de0b8d55db3f0cf6bc11ac14399d6a4b4d104cd9c50fae5d88d88f19c81fdcccff01d6118504ce0da66f89451d96fa59cfe8094e9c5e90c36839af72fa933155fcbd322019219fb52cdc03f33ccccf7b06bc1d021a332c42d3aed3558d36452a331eda4d1a7d4843ea6fca7fc867263dbd0feefaab5e1c10e71c13b349dbcbbe701f4e3f128d289f3d5487b4cf45d5f46a67ee320eb49a55f1b8d713895a720cf4819ec823968c74c500ac9e642ae1d8f16777fbbc193e225109fd0abdbc84f7d1ce93658a39639f8fec8b8f2e9a546c7dc99d3102d45fec4987fa23b3d72d9a45bf828438abbbcee408f3078e612db639484dc2673f37eb91ac16fb0a6cd6b4047eaeb445fb86af90f6032b2dc6eab8af67129a23c3ca8d58e0f14c896b05026fec3aafc3e053da15974fafc7762bfc04a024222bacef42553e60d76b6719cf19d0bbe734c1efa49369b2cca0f523e10a10ce3058753deaccfe0040512744810be6bec707132f1357c9d21d37871391a5824fc13535b55ef40b73b0476af58f06085f81a04cdc20d841d99f09e1156e4b9184b179e18952c564557fb6ef0f2074335495f2678f03be91418574440ec213bae4ad1ceb06be238a034b8984fb43334d2ea52e4f0077e75f623ede84210a82f795826f9a55153875e92040bc5410150e8dabe2077c54c1c472c6dbca97e61ee004b486038abc4a8a5730903a4a1b2a00e38ecb70492c69cd2567a73ca427ef4873c99d13bd47b3d8e437fe9cd06cfddf58a891bd253e5828c20a0485a22154f47bec61783d96404740d4e59bc5893221f67a2eff81ffbb49e4d2caeeee9d49061f08230981083aad812768a2aba61e26785b68728652eb79fe7652b53a3dd451a2f5d3737579ae54fa8cb5420f406969a4a414cb3a90ad59ed7d38274e296bbd2412d2de8baec12d8c0ed2e476a4c54cee5ca7f4f6e25b7cefc31b74e7d544317eb5e6ac745ad4dafbef06dd39299d94523a2deb819c5ad69c0e42b72e561de1444d4815afeca6b42f27aebb74adcae672e2a6bd178df65ef4dbb59e811c530ae4515a799ddac839a32ff42912b85f5e9e4d4d48d6f7b221dc3cf5dcae0b41f837d0b97a206f3708c75a16bb5ae7acf5a7947356d5a4349a01ab2bdf2aa951afbdc1a93635c78610c20379100ec2f9da6ed0bf7505e5c88bd2512d8b5d9d0669e7e3cebf9db58d1c5639446e404e2ba503e39b9b07f22b078f07f2921a264a078eb91d693123b7cef5032baffb39d788e7d38dc203847ac489c883abf307f63ae9ecc1d71d7b593728c7c6605dfb2fb7232d5e72fe920f02be41385cf0257fc0c157e9ab8bb17ea374759211cd26b7a3226600f2cac38670c9ffe0c2b9413758276dc191573584676e61d2c891adab330dba7920af989597cddf201e57679babcd2802cb0f61c72dd299c7570f90305d9d45e0719d7436554760f9f279aec4e811b89f5e2dff75d279c7bb940f7460172ff1ebc9ca18a5fd887f8306f799f8a75bc47327c53f358175be736a0836510b97be45b05cbf5617bd5ac6f30649103e78fa1f80f001bde8f7bb1edfd9f5a8d65b169661164f798364bdfa8aa3e2cff8f46aebabac5f45b0d54569553dd79cabd5cbd3e5d983965d78f175aecce69cab659cab65dc72d56fababb3a909b16efd7bb3eae775aa64b5f9934de95a35897dfaf6e9d35abd13372b8ceddbf72813c7d48498da8f1df6c1ea66a53d29ad5756faec7e667d569d4ad7aa64531ef65ed408dc8e84e89233d721c7226d8e5cc52057def334b1acbbce8b52ac6bc5de742a3d9c880cfc7576ecd881b5999ddfb1e3b1e02827a1cdb487e23ebe25fb37d114f7e9ceefa7f80dca796cb3b3e3dfe31dd7ce8ececeb33b3bbf43e777ecececfc4967e7edb03a0fe4b0330beef34a08e2ed460dec6fdddfabf77ff7a96fffbef5a5c76f73f338ffa65fb2c47fb2a0f3bee33bfb4db223efc0b20ee41d09384a075b0163c1743592f341a6c7f4d1d54660fcafbfbdda8ba3fc6a2e541c55c5fdf415e7c39b882e81fd1d0bfe4f9a901e4adaf12a0d678de02043c58d1948346fc8e006993a281c669c587345911b539831638e54af318317624ac0860d69a84089f6d7cdc58bf75ebec4da6d91529d730f9f0855c07f3d941dd6697c073684adc1eb44a78cadbdd79e7baf619de6da1859beb34eded0500ff949fcceee26d99123967520cbf70eba1de7cf9d7353c6e728ad281d23438bdf0df2f7cde1899ee649fa9fa243f81e7cee7a5f9d834da5d0f11d57a4d191df7391c7ca3a901f26e588ddcfa77be66484cee7a47366b7ee9b7bee1c7ccfb9e7dcbfea5e7baf35035859072e21daf005b5bfe3f7fb8a4ed9d1b976eeb9e6ee8430b57ef9bd97d1618cd5f75774f6f32823fc9613d687d37d38aa099cd9975d12ef36c0a5f010b8e516df0cb2874819a67c1805828da1dc7ed6ec9e72c1ed4f45e02abbaf12ca68ad6c02f3b9d99795091d11617b1d0cb57fefdcdb779dfc41f78e273a6574eff973cf29a9dc2b79d8abd7bdcba204bf3608a110fcf6ddb0a17c262756a43ac953ec3fbd86cd89cbae3b49c3dcfc5f7f5f141ed6ddbacd179aedd288df6e5a5a0496482ac7de25b1acb35be47de61d4791ec4648688e657d72cd2394ecd10cd2d36c7f9227d83cfbc7f8aed65a12eae255e21ebc49fc7adf1785e9e855e2be273bf670be8e5a548a2588af5967ff8cb43372d4c2c56f11b6dc98c48728442804bba1b097c9f9b852aca0c27aaadd08b4d4c54460bfa552f6ea54ea5e70ba03981c01dcb727024bdbd70894115429dbd001f66bcc22eddf22ed8c224d8b22244c0ccb96647a95c06cc9fbd8a7273b3aaa244f8be55b19af764ce0fe8c66b1395fdd208c05b7dc52fea97a6d733f2e25a57c7883e4b78c104a767575a227977f9ab2fbbd8c9d045a7b2de15fab935a64f97e8b9090e5bf8ba492f1e535d2932726b1e7a657692acd82dff7fc3c31893d6c82a01ec9ea690347dec76740cbf563c498d47f5fa25b30fc2546aa9f5faf8c7e932ad7aff4fb1a69683c010499131a1a4f00b1825cab2a537795cc77b1aa3ec6ea63569f56d7a9c60acacbe657d7e9c53fc1d33cc958559713285956527e6c59ba2cbf7ab7246315963555325fb63ca2369e37dd014efe58ecdb6fcf65f2e349be76282f9ba77aaa4e74be96edb702426880b0d896fbf659f9bded6b5ef6b404ee8fd65e46dcb7df9701428e00591b6394263f71393a1450182154061937a4ac31460b11fb7e1920440122d6a3c81163127f0311e3400854b8c7b98900e506d63e761febadcde2c3b7d6ea98ddb7aed3b57faaf64f95fd13b51674022547cb89cbf1ed15632e51213013fb8fc9fcecbd6c37b35064e835a925e08fa04a2581d97abf0e0022e5be5d15789041e30920c8a0f1c49594a359b07d4783c0b6db19d985cf9789fdd7acb76ffdbb4116bde0756a0aadcb899b50b2f5f4ca5efdaceab70faf53a57faa4e91fea929b42e2750f2fdcef05e7bb193b56f412c5b922fd6f1ab2b8b4f6367a1aae30e6a55bffaaa8ef962283f6a2f272ed71a83aa7a65f7ebbb2bebd30572ed06d98b6550f2cdfcb3669d1c2b526557e52a77fb51659733908b1d679df5e14d22bf6259e95f6b3facaf5709cb3ad78736abffbe65b5487d7f5b3199d82989fcf712cb6e4334ea05c34d1b5b4a45dca0f16be214971adcbec90ce545e40c0cd120850a2d3fc8008ed4096d94a102cd0eb8b8b245aadfb4059603e4fed63ffb4d6a026fce7f179322dd403211e4fed640b210b8d0da8f16ca9dd13cff6199cdb43d2c7b5da2ff7a887dfab187e0a73f9b90fbf469136277c0ef87c60c65c928dc7cfd1c47b90db8edd7173af7e3bfa93737160b5ada1a483c723b1aaa92fd35f024cd951babde5aeb1bf15cf1d0d0508fa3fc7d863a595fbf657b9d2c7b83edcd8d5503e9065ba4ec5f425b6badbb415ddfa962453c570dd4404b7dbde64aee2ddadfbefde93e366fdfdaef51edc55307b69ff5c88ff1a050fa9bb7ae22167e6b83a393f34fdc27435dd851271dac88671d9c86c609fe2bb0e0d9bf89fb71297f9d1c1c94983ac864ff12829cc9be050e0e0e8ebd5ab6ac1b7483ddd85c23a06ee80db2f9be48f0df3cce75b2beb3f536d7c9fa96adcd0827eb0890a350a0d749af1901080d7e9f35236b7d3d8201e11bea3b716bd6c8e769af9d8572d418fcee35db0ffaf6a2ed07fdbe5e03a15fafd840e803397acddfb9aa2b6b62dd4e0e383f201d7afc7bb96fbf240a58b3664d96123b21e9889d2086ca3d8ef211c82c5101e8488eef88a98d2783551ce57e7a7c2299815d1ce51af402b96871946b46b00ad402abb80ff750cb359d8133a8256b81507817ff8160fc4b17ce7d4e1b38834445f2335825bb8744708affcc773edcdbe61392fca2fbe8dc73c2092e39b3f03d90fbe87f5f9a1010a0d1157fda78e02b4dc890ec2d60879da98cccbc57654a13816962c2002337316124e56c669a63ce6c6e39e07a6a24a433f5eaa4aff5ad4fa1764da1fead16a9d610e0762465281b20b7a328642a2333cd51ddb1db8bb4f02669021f3e26f1ab8f608081afbbcc0d187e15af26e33ef25d3e99a3315670fcce5fbacb91fbbca2f6c3ff75ee215967c86489a31a48632c42197ed645d9b38622fb6b20fd108b0da41f8ca3badba8911c15af3652836315df86b417d9939cf5c3874d6c8e2fbfc2aba856ac8fdc877fec233060da28f611197f81881a7cd1451d40ac2183942be181053d88910227a2c4e1ff1a698ac1ae91bc240491843f616355baa8622a94e91607153ab23491a3528716276aa22954cae430755ce92cdddda63370d6cabcee328dd4ceb41742e0ac91607e4d4e45f5e3975c19a9e07e2af61ca3a19eb3663afc64fc39a594cfb9e7e0191d3328b1f6d595f43f11aac0fd307663b6bb184146ce198a851b4d78c3d113c484cc44c70fd96d8e55b3468d0d9aa8ad97c02dcc1cfe314dc54ce0376693f8830066d7ce1fc90d18e7aac79473f66c29a594b08272329130cb77103ec532f9136bd265f93213f99a90875836ababdd8821392139653883dc033ae99c73ce39e79c734e42e71c8410bef6e2bf07b3cb4290ddcb52c60be677a65693fb9c90787cf7774c280be59cf7977394c998497cc7b0b5b97dcbef9b04041976fed77ec08662f230530a922a74103e141523295ac2c8a1c555453b177b640f8286a7c7841d4cc98ee4be603993337b14840c392b618ea39c35a376d4c844f8ec1c55b23f3ec292648786725f246f08860c93642168f728f430b52436eaab1e7ffbf6cf83b2b966f3411f671ac057bbcf0379ce3c3f37cfa5ab1bca3e0c0112d8dc39cbc280bcf3bdac867a7f838198ddbc567e2ee4d89c5bb6fd782edffc3690fdfbd927d95f7b1ec8eefe39b7fc49d1c394f39ecf7b5660414692b412c6b332c66d2de80962821c27b0d1091f5e0d3a20b92061a6d042892837d01183377ab860811a628ce040b3050bb64282cba1f00252229a70140b167c81565640690b98c8fef4881450e7013ad20038c4a821c50da429637819daa108267ed862469530de1802f027be7856a240376515558ce55da5099a4515300f69f200113972c2940308398490438a1c3098902a1a4638b4c997d90878818221d8f842055aba2071c2863238209326071b7a00a37a09e0084017b974d44cb23701bff724feef9fc02cb3e0d7fdda68078220412881c3141a7481a20b224e1841c514560891022e9cc8220730f470a5484c94a12f456544a1c11050dc80081fb0781807f8a19d310188057001c68813d8a0288c1a04d0450c0c4148e1a589304d94a121cc1c64ba4cc14206266ed00217dce0431964a8400a2c252cd230430757c690410928d6a0a189217e18420e2a9640a284124c54b16285044d78119a02290d17bca087a234c8c0d2821a22cc7459c20d1b841102325c80e0620626e2a041090114c1c6163278a812071612b43a581003196b90214496218050001c4434b8c2883273ac6961073f9880c88a15615891c40e0d5ad810c3063557c4d1e58914314eb08689a228b2b8c253658b2f886810431b533c38030d185829fa620d294548a298004b0d825001114448e8d0c50d42cc210314a4710510425176b842892e49d8502606a9ccc4041962a6709143982f8e109252861c626a488119613817212a4f1411071a6ea4c006381c85513e396d90a946182ee8b042ca1a62bc51040c53743863088c1631a4a2113c39d89a7a4a086242379c260b17a409f3b98549b32557b98549732588ee7fcf9f732ee7397c0f3e57458e435c6ace473f2781bb3bccdd5b77eedc79bbbbf3302b38663261fcdeb1a3a083cf5d0a7783858e6d036908dd6472cedde53cdc1ed63bf909ccce5d09e6731e2fc0ee4470e2e98e6736dfdb4eb94769ddfabd81339b5f7c0f0b93f3d13fe59482b37723a5ad730ef5a6e3b10f4f491a848ce5d63ec80f0535b7263827c0fedd13c2f7263919c26e32fb8d18216af604c1321bb1162308d0e44725b7c761809c73ae09a4e29a8dd7ddee9b49f73039ce71ec286f3ed651fdeee898ce39f7dc73cfd9cb39f79eabc1cb1572fadf430825665fe75c4f606e9be3a8175d78f909ccddff6200616b0f2184d039079d73cfbdd0c194dcc2c4e12503c92d0c1c6af250e601225242901fc1a970a5e0a8146e72d012709ffe2529f84f0fa57ca8a704ffe954ff3ca17970772725e1232fca05f7dfaf6d6addd7aaa89cb34bb364539a25ec7e8fbf09e1039c07e1db0311711ffbd7b73fc17d6cfa4bbb68775515d5ea4de7ceaba954a1822a7d1176ddc7223c9017f11401e0ebfcea921fc25584fdbc8a44787b1501e0ff2ae2f90f44f8eb2a22fa14001e041ee76a73ee5636822c67be32c8dcca683fe26d3f6800c07526e67673fe5e3930609e8b837552c5052e490109c25ca374414c0892e30dba474e18e310888891086d860dca18214ba552690516924a2f964aa5126a04149ca552a9a48204ca504a4ba52f957a4ae8c1ae36c37dc49737e806db72032564372108eaa7532f7e6dd7e5b9b3c378d0c0eda3c34c18fe1c3fc339a6da9a10179a517ec1b39356939b10175e9819fe93983d47f79d631205648839690ca8a14c0369f9a9516d6270ceb9e75e7369936c4d4adc9abc4eb6d22965bbf29e90c4f653febca46c784a68a4d19133e7a42f29a5f84ff8da1a2b6acd9f5509cb5a525a076eb96d81a54b36390a6247c1973c92f0b0defacbfae93e9635315a84db476abfb5c54c601aa51fab8f71c6fae857dfaed3b595c6d9e49c5fcdaf2a4cb606312b097f5845e4ba32f3deeb7eddddefb9a34cf67ae7c32180ab2cefbb31c258d129e17bfd9aa3a4956f732c4e42e5a340e75502fff5e42c47e2d0139ff59f4e4d0bbefa1e9d12beaaaa5505e3c36f179fa353c6086184efc465f80d3ef70060651da8dc5ac2ba56cd807f06f7a6b5284e2babcbbd62329fa2309f7e8b52da79e9cb8b7e9552ce5ae9b42c5ba9bcacaff3f2dc37e879b4780ed4e4fe9b351ce4eae775badf726d55d68ff33bc7d994d479addcc2e8a08a3c2fcfb7c6921651bc91a7b372d3228a33ac3cdb06cb39bf5617c5b8546bb576d65a71ecbc3ccf39bfe64ccf3657f62da5b4ac652b087f54d89c560a41e4b64f2fc06f6794e99c6bcfe1a1d85ae96b7b4e5eb4a8962e1e8c7d298c0ed0e4362d7b6d452dcbb22495f206952ef97e4b2f4b977c7cb55cc24e5ad6b2f82bb4f86aa58bd28131bda81b84b1e71902cb5bb261d2105d6b2f7e9d3a2d7b2f7eb9854caef22b8672542ce2d9fafeecad8bbf4efbd5b2158e652d7bf3f46f706eacb5f75a6b6faccdc54f6f90acf3dee0cce692fdcaded852a96459cb969e7e0907a7f4a552a9c23221932d6501933320dd1eacd2b296ed06e4b4c7031c62c0598e8e4308a1bb4ef13929a17b4ed68f5f3f5ae9baad93b0eaef5cdd243b7263b556d6dbba734e4a27a58c5fddd7ae3a5c21f26f3c6ecaee4b30fd604676dfc33d51eb79418e707e27c899cd540d46bd1fbae4f7de1818a3c9fe396e8176d2829fbb46026edfae961d19b59faaa1e2c807a429bc8d2fd945d01e6cb0cdd15c6bcd4c8b51ca28e36bad7dc3a6a31ed6d8683fddc4f3d649b933116832440189c31c667294138194db9b5b2b9857ba6421b97e7dffaeeac4f3bcd6b610faafbe6bed87c5b2fbee9dc8fecba4b1173a4f249ee757f38772561f2a1172b1ac7a60f2fbea7d563f5bfbd155a5d89195314756cae4f7f6ebfdeaaa7f2f244272f5b362181092cd7fb7c8b4b3ed67f7ef9369af6cbeabf592df9a10264d84e40ac380905c7d85b950fd933967fb41e7f7673f1bd6515b6e8fbe6bb61f677266e7503e12a24c6e6f56368c3123ca18338672ce371db8fbd776bb6fb8d21ac2c6e362c6cc008aa320c4f05469427e3f216821b24402eef3f2abe2e53d13f26b5e5c396b66e4f7fcbd8cd0f4ef7bdccfcb0226f5bec77330a6fcbca5e01b7919c61b70bb29bc2c605e1630a97653f032495118a5de63f7e3de9160ca39a59b02efc82d8c1b3ef41a6ec0e090da48c3d5e09cdcc2b4f1450bda30eaa932f4723bea418a17a6c2e7d3dc796bfdbad4e63eeeeeeeeed4fa05a7a44fc6ca45d8aab5175a9950ec6624b603e3450f5d88f4a0450d9c3c195b47e120872b6450497119c303520ea923ae10e161cccf2bc3dc8e781022a7817a473cd8903b375283e1021e1dc47054bc64b7830cd9ede005ca479ec98fccf41ea33aa8b536cd910e4970e1851496bbbb37520e6164f7745e529c28e20533480aa38c2a398c11c51a5dcc88218d944b51831a93708642b99f888396547f4f10f7e3a9fe23eea7a9c04806548652fd2ab89fd8bfc2d0502a872ad9fd733f8fca7cb1646c1b4c22566a70438d0f65905144cf0cc0205a028e3526300223e58ffa2326c8fe41b490818e1c93fb414113eaa5dadf902a867c848315d9fd731f2079da6004181cd59004c6910d52dc70c30d654c9122fbd19439b27fd700a3463023861659093660b2e3398e680023fbbf13d91f754414c7510d5c64cc4375df5ffd72004c3dd7dc31ccbd64e4a8ea8bf39104cee015f7f31a00e5c0de5f41e13ecde6acfa922b23f7d38408314968dce8c10d3552fdd518f7430030a6a8a28e2dd6b8f2a5025342321327b63fe04c7ac76fd3bdfbb42882d7627cb089cd45f2a7fb911899ef09f228d984f4dfa0be00cfa48e02453372dee3c839e77a785081c0cd1dc4f24c365dd651f40b2d3290f9377858ea4ef0df7fc64ce01a04ce2c4dc29d1b6e3be7b4eded1b696f5f28d32b7b50ec557dbbeeeb2594734eb9e83e763f17e3c94beec31808311143982d44626ed0831509a082a8872f5c70c50f6ca4fc818edc0bc8f9f07f579aeff10fe2ffa8ea95993a73729381063ae7734a021f06221f8f22c3af5535e7334dbce4e5a29b2529e01b0ce57c40f7f083c0962a7d16242f71377456744e4a8696595118c634a594dadc5c787ac1f31de7ebd757d959595bad20c9ce4a4a1a5855b1b2b3b2b6f2c2021e4752bcd859555955558542f5a7222f5b6ff38544fde5b4e4e6abefc9b005f8622b38aaba9194c2cdd5d5d5472e201ac1cc1354f4d0c5161c709182ef01f773a40a3568145106143f4c49c15f017e0bf05b6b37859bc718949315299b2f61505a0b36566094b229c27909b8cffcfa958ba419be445b1999a9aaea815c75595ab1e028f845d83ece85d7a8c1584f711f9028c93eac0f2de602ccb5624d4e15c59a380afe0aee537dacb0262a6159e23ef05380dfee03dcdb7cbb0f78a5b7c1a0e850a102a354097f93011a18d2507cd97ce9baf9eac4d0220395d766928228a56e43ff34abef243a27fd06e33efed3c6a0d254aa402dfe03bfbbb4177803bf8dfcc753f06745e1b3008f62e892e137a194d20a3bc151f0e6aaf68a30386b22d7cd2dec88a3aa07bcc7dff7012e059bc718145b45cae6fba670f3250c0a02b038324adddc5ca5b7b9f0bf1b54b9471fc88f5447dc875618bd5e4be1aba5b29664c4bf912a07e18d1c6c9045c82d0c0e6234951e3d1c9c3b5b3093fe471f634267b6beb985654d9e64fa1d03391f85c894ca79a299caf92851669829194a6986864c2919a49aa1a154ce4799019592c1d53194caf91982a47230fa56707ff52df31021860b889698b24317299c2933d02229082cd0384a097189d282285bc8a8a101d2fc01055de4308329c2a821d5dfaab7425fb2fd1aed37c8c4ff848466f74d647ec165d76b7236bf7239728c5846b1229cf75b44f4291cec84e4e5f833bf1dbc91fb2795dc62764094bb5fb84026943120737ca1ec1789952336343434e43ea2f0426789359139622f38c7fe06099b264a982b3618734588d78310af872ca2142992c55043a15e9a9600d3ef68ed45f36acdfa7b112102a413a79db6738d9eab94ae3d084d3d4c73da4ade2013da8ff8d5d56abdce5aaf5ab1208e8a2f5b3bab88c7f3c0809c8fe83bae13dc477c9317397e250567cd8888b7856531a109694d8d19b965592b90d72fdaa91745623acb5ad6ad5da246354be48aadbc727e5ea5dfb9a22c817564bb453b0fe477da10b9365fa9ddb93a957315edbce922fa94e9775c5166f8940923fad40e76c4f9007254b474ae238e2a61408e8a44b250dc03d699b40eec72db22065772b5c96d8b18d090e5571c355af2bb463a17ed7cdf1d64f9eee6b685183768776fd0ca36902b76c451f116f17cf3143502aae658d6b23937d65acbb2d6eac84a776a4bedb85aaab473b5d4cd95d95c92f36a594a296b0d52b2f24cce9a0c92b063661031e52062cab14517470d28d2c514cd47cbadf99865e63739ebd7f61be4c4524ae9fdaafa3adbace84b1b249f4aec9f75f7d137d2b2b4d65a17acbf7fbf5e9dedfd79d9971745c2a63d4a87556fcbd65ad7d64aa36583ac9d6dadb5a61e95b5bafaea16514394e1b5d85402f3dc224678e4fad5c65ef9d6beb5d6455dde243c72ade69cb3d1a6b47b565789bbd662f4089ced7c39796ab5fd39eed372e91ae17193f0c8137336d58d8eadaea6947edf206a59cb525cedecf0e86167962e3c8870cf3908e9103303924f45e0b6059a2ad965ec28d8e81c380362dfdfd484bc6ecf5945f5b1f3e152159bb1479e8028674d06cf3dd9c351a61e28d4bfc98471939e2c3fa7479403cbcf724c580f47b9c9019eb29db2150f38b333e5b48c34379cb1218832390b21fb3fe71ca6453a35e0ce5d5d334927a57bdd9d45f75ebff817c8e36c513a29657c192356b55cc10a9cf577407e33853bef24b8c0419068e85dbbc3de171ae0e69e73a20ce4400e87e038012e65422ac0fdad933cef175abbd0b9b11b2970fb4ecade42df7f058500c2073d4e1ef030edd8d1c9c1b9b1c125ebda5ad129db0fd87eb41fed47fb51e41ec8bd612368217727c35dbbf7c7c4752f31a543f598b0c32617dfc70bf6c32be87dc5ee674e0ca138cabf871cd543ce87abd0038bee033c75310f381f5254703f20681ffe3164b7217b99326b3ee0812529c02c396b22a696ea3a63d41ed23edfb30f762cf759cccd73cb5e76d97b88e7f6f11daa87c4770f361ffd1d82ec87976b401acbe4fb748872949b588f7c99679c583bc7b09d5d022a9b90dd9b80721f450f5d0390430620bf4a2f5a9207905a480365855ed23e3d8df2f7d6a8cc0024e401f410200d480b9a84ec190905c8fed908d949683fbc85cf48c8ce424b7243778dda82e0f63ebebef1f57bb7d8317647dbaee4ac94fd16e191638c58d681ec988d1adb5ad73a9290bbc4f98843acfaad7a25eb56dd5d7d9d734e5a5d9dbdf4f40a9a5f5d46da12f7a3013473ce8b2241dd96aceb60ac2c55d2b9bfa0b991550d98041cdfa1709fcca7e4f83173281c4c7cefe23f9d8aef59e2bb16f799d23e9d43a3e2471c5aeb70359546c56f34f84df8f157acda013b97f72cae4507dc9f79961c5fde68c0fdf497d6bea0695fd0c4b7e96ae597aec616535a67beb1d54b39e7aca0503bff549a7fb2e69feeacb24aa96323eb8cb24e1eaece2a9f012cd7742e0f1ed5af112d7d8b781b54d8b021571d9b9f53522a6945652dcdcbe5be46da1c594aecf25c4bf54b8f7375ae743ee773dc4707cb84b2cef75b37482706d0429cafcf96aefa555ae99a9b3de7ece67125fa3a574ece654b41a5b6b1ed866eaec4a816dc5f732cae6a9df46bec1c2b78be7ccb446239f809bcc3ca1b247bde20b9632b7cb0e7bbac4ce808195fecd8317603c0cab3ba7596a4cdf73512a4a64c43b5dc551c15bf34858afd7763df96ac6bf3f46d6e6c4a255a6dec955925f4abaf6e5075b14ca85a7b4b3f2f23eda2bf44caef5b5f76e79293868aa5abb306dce73e1af7a409a11f23ee1c1a48fc7bc5a73372c92d370eed071a2b397e5c22c7af58663a22c342e488a6a7b41f15159cf59729dd84742e599f5debdad89a654ccefc0a6784bb87cb67af9ff99009a794afaa0193803354f6f12aae5c91400fe9f79f4085e54ba6efd3fda0a6cb454218838ff410f82e7b76b86bf6e51aaa3520f45b0392f53f6cc9090ddc980b9fdd1899db35424276d2d6d49161d9fd74c149afc92ebc1033fc27333b5140a658db668050e680293f3165779178ae50b11d0dc99053c0b90136238123272c79a327622847553a60f719ca0607cf7ddebb289757553a3bc2ea630fa19fb52c723bca1a16b9154066970d8a766e2d381f6326122a484f0f0af526a2ec05f57bef5d123a84b05f5ef05b703f3d2736e5337dd4439c4b03f10fd2d38342b96ea23e43e47df4be691176b9f619f7e3a84c79c6f48f42f5f404c9990be292683f6ec2024fe18409e3e7a8b81d1a10ff79b91e1acadfb9f41027d340fc87ecef649c4b132622b27f8f294243a5fdc8c151d1713867c2cb557194bfe74a259c16cae7f9aa0a722fed87ff1816bb0ffc7983a0a9878c0fa59498e3a17df86329e3b6789223c199b692fdfd0b104ea687381e1a88bf1363927045b4005e74cd0eed873b2a38e0ccbb64f7772f9694ee1db6318c0ebce4cc7191fd9d8d4397dfd965c36f42babd43c2ea38f2814b6e473e54c95990232b67f2d1152fc5789d82c0ed06e5a000bf3682bb6b703bbaf246ce82e4173a0ba01ca8f4ccf7543d0a99b2190800000013154000200c0a864402a15030a2cca3ef14000f8b9c466c3e19890321887118841031c61002002000c00080999999b100eb93099ad8df342822813b146085d03e541cf43e0efaaf4e432f6976d233e937da0dfe79a724b0e69c7d7ae8089c52deb7e9a8040a8f8a7b039e91a6f76858a0d4949f9e87a53060722559e244d1d5466cbf21396de51286f75a1fb65dd8807274180b8bc832a387fbc6f4f8d024db5fbd71306dd8ef7cf11cbad1e982a2e35e9274ecaf8483eea33a7d020c65d37debe41d0fe5650b435fa5e1e080bc10c8118a326e9a86f45c646e674f507adf7baede0cd8120efda7eefb4feaf7b96e82ac2fc7dcaababc57bdb8be6a8ea99bf87bbda815466855d15c8891dbf7b38809d5438ee7fb0ba468a52f22181c9de9ffc056f7b081ed8455feccf4e867d5b4ea3ea8f787dc03c79ffb02e69ca96ab0b259cd8568fd96c72102664e4bb7b58fe40c1e059912608c5fa257717bbf58bc08d3a2bd4d3da77dfa5cc4b1f904dd1be361bde9154e94054f4eba5b02a95a16778a3dce899ae6a0c4f4e2a99f92a13b509d50a291cf23dcff6c9a61941354ae711eb72847a6dca1205f5840da39b111f8aacaa4bf1ee8ed0005d05c10665f660fb589c9e738d08b7747562b910cbf32acec895290a213a4284aa4750536a14805b8f487e5d3e1fb49ee74027fe8794cf4d135ba67412d1093ea2c2e7d972a2da011aab951f96792aad42b5162009a8c9b9d00d298d1b60f93284149d67620b547fe765de42ea83b40352ad362eba48a68b3db38191c25b4f595be3a9b13b15b0e3b0398bacd76a8c32e77292896f00011fc5c3695b18fd000b5f95d63de94b44632b6b7fe5f25fc9425e1275d68870172a2b7cd6ce3ef75b1dec8470c9ac31afbf526cfb1defe354f2b6594fc9ffc4cacd4660ee82183a1fb68f37ec351dc8c432c38d60b50531207781c9d6bb079e038318b45defba793e2d5a6f396bd78b7a6e32066e1a6573cc1573f1614fe72b19d0cd2f9a59399ec9143a7d7c1673da643d8a595ae012c2263f99579473d4d2757e78035ce21cee61c97d18b32e934552b90cbc68a3ca3e82cf169cb76a517370c9f5c0a15c9958b9ad98228533abbf7ebd4c30c38e78f76093995407d5df701adcbb1c38c7c7acca25fee2add9f1f9d06f38daea9df37ad11965e14a32cd2ac308da671974d6fc66ee3adb145e02a020a413ae95dc445a8da1c04821d3878ce1837c6ff8617db600056159d8b0081d6678ce9f5c15f5a3c404082bd6ea5a9df3e244987811109a0bf7dfe103380d6343429a8209ac0e1bfa45fd7f29c8f87790e8df8acc4046cdb0e038fd664c823e427da8dea1a826e12bc66ab3743fa6a87f3ba3a6143dd403f4cabb10fde2ee714e0d0506d350a12dd1e55d5aee0b0eb74e129d0153c8c42b76c8a0bac9964e3c9db319e50474d05104259d60bb829cfca89e8eeaedb05ef422035b277d82dcc6e938e294dc4646938a06e01c96aba04e8076324eae1990c3802a2283b33e12b160ede27948fae0642a05c138be68896d92066786301d46941c31d8e6479eb53c5f5d88c171c93d90e25d82998fe3e33c8a3b6938b3b1f089fb0ddebfe9a1cdfb1d6f19e7d821dd0161942546ac237ed1b809775fbbeb2458d0fd8c8e1806f43e5ba254ea460cbcde85ee0611ee0be4869651f24625e98813aa565770640ca869fb5db1e62d52890a7c62014b4616548e1a5fc8f55a31a58d4de4df6799e26b45ecbf3a0a1bdf341bd3fb3ebbe7ba73e4b75314f6925126931eb433d36a4266c7e26c1427243e3a180c91d0217b1a4efcffb480ce6c12568162202fafd66827bb0a93afd116741a9bbd43b78d69ff6a52d5d56e871690fefc0d8941dd91f6bbedf509895870325ed82500ac898c962c3fcfa1beedf44b281c39c4e5a6e977b147e729ecefbad418a9b00c1e9f13d1e297c6b4efb4543f85d792762dd2f66007634eea5fa650d512062de9971dfdf13dd25d6afea87a58945ed893ed02b20e98b4ffecb0a0e0de4c8e9d5342989e95c4b1563c29eba23a97e725773328cae224881b35ad4553f9ae7607b909d5e2b85e60a2b16d4e1c997b69d9b0529ed6bcde50aba2b98663919d3da8d3ec2c24359e4ae71c69842281fbd99073f9ec45871b222c0d1a410aad9d6866d92438c4e31b3b2fef6508c3a4e8ca16c06cd75966ac22fa1c0c3472ee143fee5fab70f472e3d328dc5cb117a1def71253770e1bff9436837939bfd0ae41d681254042b8ca949d062629e98d879cdfdce1452bd62538f4815f3b344a0132b9e1d3666d55f14fb12b077929b0781715bdbdcce9aa82e406efd36f08ced24f63cd40df5a087dd996a02ce00845c2c89f571601bf31496f93ee6b5c2139a6e32a63377b8a19b8f2d0c78a9990bc4c812f9b4cdeebe198632b3eb0f6fb0f62b82a7ccbda4161c3b23e98c710cac99fe00a41d4305bc8437bc8514af12694881ec27deea6559cfa3e029d45ea829d1574067c75abb80ac13cdaee3d35d23a880d3abcfd3aeb402cf244095067304c0ec5f90e9ee32ccbcfd0c0e30fef1a0906402ba0b23f1059345b4ed73446fd0e104c69478c614224c25cb4927ec2bade28fd44c11f633488aed934e5e0971c97497163d075bca181c5b9c0557933238da6e46731b0a2442edf635fc57ca27218d73c8882de0d15b7c0f266da6eeb2f2b250c5235ccd5f60754d26c2883abd99d44b2e78b59afdfb7e54c37a7fb8093d0df0b8aa3a85e058ffab6154dc1d10f15c7c67e6c6de3df32ab848ece303fa6d9d779cec4097ffe35a55fa14d1b9e2565fc53bc6eaac5433b4b8c49c7b9f292da84f69e37ab4165a1f0ee2d625add0eef38f025709a9873e2d32e845bcdf077c51dc4607676175017d45642ed9bb24a053479a7d6204e3f0e16055c1dbfe80e93aa377f3f37c84efa1ab1f47be2e3db69712a0e036e6956f22a9c3e8df50d73199e103f7d77c59a7640c8067905c090da3dc6b949eb7e3bd42755c7bf49caaec59907086136b4afcb620c436d6d433b87d19c7b5ac981ab159ed48f3f6256788a77ce0cf80f0b108d19d396cc74bb1aa4996d6121306996556d814c086e1f112498f47bc2dc8623c4ad9d15accf3383566f42b801d2bc736398f1fcde7bacf13aacc29c34a0a696ed2256363f07641e0b50387dbe95399665f90548c539a63f60514718da1e971bed2f512705d33eb572a609f9bc4aa71e15d9e30158738ed2882abcf2cf032a023d6454bd5186fcc36e341b48f45350fb549a0a14abc05df11afeea4ff82988c272e504a68215773658c3db126459ece0d5be6acbd771f6f59deb4172e2eb21d3f488f4feee303493210cfdc909e1f5f29326783a205f763632ae46fc9fecc7f25bf068eaa0f13773bbb3e232411cb52e43e4810300c34d70ee4762c4d2458d1fc34455b1679c8ef0413ace23a55ec80083e3da3e2122bf1bc8a1b9525355e4a8ca1e5e06d86bf215d8ae2907c6a0226c744c4834be9a4a35d40496648e08fcb1648f0dc7ad9ce1d6e95ec0fd9dcf7fcbb33529a91c0a96b1b009e54e1e271982667bb24b2a8c616484fbb6e61075a183e50e030f62ead5b4b7b9d651826c7ec7f25c5a44615cd5149c10ce2d823c567fd2d85194a57cab2d5138394c29d402fbe835ae3761e53e03ce2d0e18d290d43b3134ab968f54703b6cb439aa488e0c28380805c78cb95c30c5a5c2ee34ac13d5eb28d9342d351a4c210af177952db8d88e271805c882e91fe7af9596ebc7daa9313f284e383561f84a778e852832ca5c4303ed4ebc2a505b067e5c54f9a1a28431a96beb08d6887df17c6821df6f79fd4ddd7db2fd482a768489af1c2081d4498e6c9dce92e60536806e04d6851511801c0f1534ff5d1d9ac0974904dcc06da927ffcdfa7aa4c4dbc376f8560d2b1b36565a8a2f7512f9e397e38015f4325acfe06df0a96f80d7f43e40a75ec2776333ba2329282c9d1a43ec5df2b1140c54f78a739bd360850b5181f99003af269bc82db110bb3cd5721351d50d3400c108f010cb4ba8e1750d2f51365b25a181aa3094db846d7c6828924c43fc9936df476f033170870bf5cee062fb12e60f0db1af42eb1734aef2736331113ca70b23118d6ed95d8955b02a686533d0664272b6ed48eea4893f4b69902e03f2995442512837d8fcb10885285e052470a591b930b4b3b01950f110127c49ba4f11e51312e20c6a90d5c09bc66ae79581f3ad92832dbdd9225c0b161333b2a94f125f0542bc088f24d24c66ad6c4e4f55042ac6d383226861b834c32298becb4d70bea485ee4bcdaf3aa29621924f91fddf17b20b28fc827a223cba9be411e673ec408d9ae21d1afe12e0282376ab32adad2edb0e2f7c84601855cd9b6172a25aabf0125e889fd21b39ea37d423d6e3a8706336ca3d6c32e00baceafff5a6408d884bad10f31372e5cce5525006c4a0a381de44c51948f6c37e1ebbd2f2de73e2ce091ea28ded571d1bc98f3bbf69ca4feab89d46724407a441e4a92b5d6cb8a4f26b5c3ae4b985a6f1e2c42863bdd47ef642a2ccaeefb5cddd14be16079b580b5d1919e1176ba40b3fc188873542bda7e6d4c0935f56cd46c5943360a7785e6e5006057e4e6282ebdff2fda73ef86021934dbd844408777e9b488901cad5f3c96709b8857a7f38cfecd10cdfcde10c937aac6c5ad61a8af5fae06387629222cb7c86e6f171596f12dee59bc8d10b792bd73da55acd46e99eb8ea335d27a54b2b4e714e0f4ae4cbc5ed9a6ae6e808a5aba38ef90e9b6093f4282b23cbe1e9e659e298c1d2946817eceb28878a11063e04a2a7aa6001145183d4d3c02b75843b2c512c26016604a4cf13396eae0a9d41ec89886f563304bae9e3cd90b072d22b2fbfddbea077a190ac571219b70ad31f28e420cef8fd1d44e1eb82cddd820ed9c61d2c7c971df077cce2c39a31e488859f285f1a8eb6ba011ad356332df3365665824bcf14a7ba494362cd88298af387a0fc910d0d9bf83ee45ab249caf6af89bfd583651919083a4838fa6ebcfd0ce4209352556f6d4bbcfd2a44d3672a4ec6ab6b705c91a5c27a846b57f1cea1311f28ff191b672391bb1660857aa1055abc96be140065a3535c90996689c6fda68a30808041f6c880a479c527ed3ed8764902da0f83810c18b26ae5ec5aa463135df02b321fe8a8ea0229e93333908e44fd5e6730fa057285f7ba11ba554874b28029d37132edf87cd8416b8395dd179afd6eebe6342bb4a1a17bbb8b4b614e45a0e926f00fd4a8fac8630caca097ab297188400a65326da19c5ed82c349cd7094c887431310a516c0013362470970faaed667059366ef927eb01e9029c7dfdb88915ee138264abd1773c8e2703bbf2bb769781fd2138cafeb844f3822b5d1a9c1a791ebaa7df323c7b51bd4998d0185ded8629bd26eaf6f64ecb02cd1c2f01327d8c53b01021422168362a8ee1a901c6b11d24f697b57ec3015268a931912300847b6a24c430dc6431dda8d195a8b6103d31d6596bbbb2a2e3541b8f3a314d069061e0193665f2a72b52ee299155ed3c9821ecdef1e85473078700e6986929bc170bc53aa7e0d494898ddbe83aeb041147b56ff4284c7a3c2717e17e2987422e0f7ce4de17e553086f5d4d67200a647e8e39ab996443ab27a962d34fd4fbf835e3d5bb85ac9b350461aab361ae3bada2b8d2532022e210d1ccb417c8b371cf139341f3907bbbee8885d5f8a86689a449bb422b855c496b2d5b609fbc27e72b5726e004bef616259058de2f00faf2ee9f7cedeb8ccad25994423143f29f1f351a72b98416375290913298a9a46b1b9d1fc086e0c36c30b5615f80667e0d13e928cef22bb2e05c2ce374b5d39111885c80e1847dcdc6e46824fed8f7b36211a025b9d53dea31707c64c7c757c6314c22ba50151f714b11dd44cfebca32050d2167f846b416585e736f25f61822411e77985a94056b51eeedca45ff565ba34403ac2ab47aa1cf0e9a4e9470f822833c6a5114dd0742f4f6519590ad2a0d0b3b7f7bf8124262ff3cdcfd6d6128a96ae9183ad976985d927f4aa0fb672ebd4cbe85a30ce4e884ca5b4ac5e32e9467a46054a9e39ae9eae6ee7c2929644e38c4447c091dd7b4916af22036ac4bf0b4aec71041c1d943da9959f1a87a482cb10b1df7fa41af9e24df82d4c2945a8242ecc8df703e3adb0d7914caf542b8133a42a65bf681675302415f4301c476b74b41f652288e8eb0ef6a36586ed4fffd462bb5a64eaf7942125b53fb66b16a4e37315fa3942613dd9d06f48eb8ff094cba2c2c63a518974bccf1e8c6a8d422b8ab857aca53b8a0319dfde4084a3b4aa45c78e6b2f84fc1c570581a3bc568ea9dea2ca73d160662231115bae84a14bcc0752ab445158836abb6bb5f0414583b58743648d622ba1691d25ced2a202c61ce3803f205e7e37fdb018dbc67889011cf5cf1008d512a768428b51b8795b011fdc100b5a87a75fd36b00fcd0a65cffce072429413f8031b36b935a0bf96022e0464866e66b391638d52c9a950625d0140c630c5fd35f0ea5a1b142197c465958a5da2b8c8b50dd439c908fbec2a977080f1ae1207f5299482b49c4df5116551afe65c1aa0e2d786031d5803e271dc528f1b301630256388e612d055e4c164e1180cae074417472a35073befcf317f3b6ab9d5323e0c7093ebfddcdb2ca023b1911235a1f9acd225745a513e128fc069f04ebc29158df96869ce06d3fa87d40388bca49769dfa0ea2b79d044cc8823821becebdd9313b480921172e531efc51123a5cdaae10cdae3c427ac71abe2f33fdb2d8a0c5f88e9ef878f6dbb3a118cfcad5d8e3c0ea460ff55038407c0def9e62897acba1ceb668065a4b9ef3cabf14072fe8bf6caedf463b042018938eff3c40e89093b969d91ce9d2c6e1d295879085d33d33c8c771040ef0370b79b4fd568231d4b3aa3e9fbf1a79b4d6d39435b12226f55675518c8cbb6eade5122f59d6f88cdad8aa24891e6342c02c21f3cfed35b3416ef9a09626ce2da90edce57308aa26be367bd054aa6fedc3fd2923d1fbbd18413f327eab1a8782649020b3ab7c91cd1355d81d2809554bcd4b9458a05c468b7712a62875f1f414ef5f4096bf63cdb2845563034d1064427b3d82611e52e836bb8e98ec32ae893a8eaa7eaad5a30e630062aaefeb376c11356d111b802c7094e5078f27c4c454cac98215eee7d8558c7de7c59f17331b46eedb0f4c964084d72079893d40b5575f8ba7faa89e20aa56b3479ccd03bdbed9b7ccbe81aacb62d02ff7cdd849a9154149624a770816ffd0974e79f8b547bdbd09dfa9d2a1715469900b9e88d5fe050b1972e4c30952080d54c2848b1c2ab59ba649ea0a7e635ed472f9fc4ccc20321cec36721b775282196f30575ce58f8f63109de486cce5091f661843c03a6a195f488bcdea4cf6b8071064d082405c21430139672f30e2c5c8a395412708ecf0631f50ea13572537f564a06ef2eaa7d501bde9f63a0931322f0437db24d7228172f25997a17754c8a3ec503fad603b23c194d06031188a345c4f22967bf9ba018aa0f507c0c222aeb86b3964f612d6175c592df135aba581cef640a4b90ccccb448dc60a228abe2696a8750a3afa55ce92aaae88139b0f5744e09306e68313824071a626b4dba3113f66e10d11787ba58363f5687b0c682224ec2ba78e5430d82c0e1d1da438d9598dabf500d1bca95e274842ec9e3cb59a61fb97609890c872a222c4213221a8c43d40427febcc6e9a9267078b5112b86368c2eab6d9a81f80e5e3a8e6a20dde50159c269fb8673f5634a552438c50e732b522bb732e03b142d2291357e926fcc07712db029dd732f95064cd2661056743605403a3fb4b47851509efcaae3820a176f81992c56040d2283bad59dd532c13ef6443268d8eaf04b332805e22916ce1c36aae275170897d76f1447ff5e86b519e999fc6a7ae991028e7ecc0063a109c9d17b3a1e74265388b89dae0278b754f92437b9f3b7927df13d7c0ac9fd7af125cf8adbf9f1127371e3cce129efe33994236fcc94e38b706c5f129fb23ff5f8223458be59f82f0f862783ed8d1663d6b2603daf0b78f83876806098ee8d081556635423947314980428f0d1a57a9ea8e356ab0d76876b94363b9d5c39176bfff9e7376a8715a9eda69c7000c85d215962d3c09aa18bc6b9187ffee6c38bc81ed709b94d04be9c0f3e19c0f77892bdd6d0a54b9bdd85779cd5688f835fb8366bb74b5bc26f46425f5a61000ce7e863e734baf3b1672cc6d2930860ca31dacfebc4cbc956fdcb541134fc86f1c2946b6fd4ddbbc83b352814153b5c299b8d14f42a5986abe568ca5e44681a22c88729db446ed2ecdea8f583f313036f8dd6db226c98a48124661795d27331de6435e691beaf5a7041aa3fce33f46e7c7209933c1a220c72e3125ba6622699554615ee8684f6b000ed4598268bb510348655cc5f59435b02ff36f944c14570db86da117c575080fe5291d2554658a19f419567076fbe1f3d8e684adab3f96961c7936c46c0404e7bf8489d30f50bef8db241985bc7c217213c755ab3485b4b48113d75ccc2b1147c83459759c051656a88571367b9dbba09bc21a131c6712c633f16134a5f64928e1e1cbff9ef03d90b8ae5357b9ea20fee9224e48a12521820b6262437af28bee581f122b57c68685c88d8c8d9f3d109569e7163f841932c4e276c48d214caba756444d2ba93a64f9dbcba0fd93b0b5a8955bb884b4d1ccd3b55d590ea7b8d98096e76dd15842dd917eb59ed0db4bf8d2cfa7a5d7a986b9c63dfe7ac755d655976b8650f14fbb05112d4227c56a0407a1ce138d3c842a23f0b0a37ecb8ca032b4ac8194419769c37be9844e7eda928d73b4de44682f34656e69adbc73da10463419c0a1bf7e51691e0bb0167137c8bd55440bf78dc7bb9b6426fe0b59f2278381667ea69eace2982bec3b8a03449311f4cd04089d3392d781f0d89880e91635e8c3ce27422459a63ecdb81d14e7c46c51aab4d56c50645dd2add89da99535b53f79fc5038bb938565c51a525c67172032876ae6c09620c3286d1686e002e7ad398a7b5c6bc2dd60e68955e3a975ed677f03830750756d77486b29b8fc7544a8b72e79ea9ec44f852eeab1fc815cc3d73ddd11f178131e3e76b75baa8ec6d12e6de3bf9f2a2bd7b2e86fe3bd0ef900d53863cdb8812b47f59e79fe4679317a87ea4a46196bc2810a0a7a19b3423ca3182d5d021acb28120e287a33fa2b4bbb005f460ce5ddb866f19f71875034e5ffc641034568d4c5c4a021c9defa08f1331207518cb303be2fb12d2b8eb142e5e65bc5003cf353a1545ffaec440ea444b2330814eafff67922cea74ead8359e90e7a346f39be67877bec404745cd609b54fb98aeb0a293d2be81f98f5264ac166c916330a302a0dd67d949dcce00946203d3ef099b9421b9d9b077bc4f05955a2e8d86561afc67956754127072705bb43696f6b18d90cef9c0e46229329ca2338793c8349ee7e347ef2349c51ea7b27a6c9b137c4219459af899142820fb225dd401ec8d06919c37eb7ebffcf46205a3ced7c851f28aeca8e5fc316cca3ab2813ee04e5ed43805271f8e86ab3cc87ae72baec45aeb016bcf9946c2834feb1a667f3f5aa3a6e0097d17376816703d5edfb59ad2b9f1c084c75e7db6a60b1722559061a72cdb9f3c0e91357f6079ffc07da530854ba3fdef9ff1d0ce288bb748af3d25550606609bf0db182524292eb780588d6efde0b17bd3b4fd6175eb2d8ee1b712e4a92197490f855791cd207e7fbc12c4059690f22a63399f42fda7269ec3e5aea75c2e6456601f6ab9027a748b9c9432f54497c2cb15f45da02f46ccf212f34669da9fd2871dc2dc86f7997c694ac0dc9c664c2c5eae213e9c3a637bb3c4e41e35ef1fe682fb3a7779c4017b36768e418a1f27d26deddfa703336c3bb60925d2e67fc4f956a9382cebb28d4de183bfd105ffb3dbc19515ea13f594a767cfc3c9655b0c42fbeda5ff2e20f401bcc94807eb3370764cb0e8a17a61193b50ffb0e4b7b10ea2154bdcd172e1a3f772b642603eb90537f745aecd9519bfc484cc2f0fe1d095e7fd25b8a99abe73d4b2a52b487437f49f16d453a7922ab8ced8502a62a9b190a6bcd32e950c4e9619e737082d05420b5eae88f6732320e973ed642148f4ad9a0942c4f16b4f06a24cec08d2b5d39d1380fd44136f6341dcd101d491e89bcede3519cd06060aee39efec05285b65f9119effac905ab8f5f058c0750eb5032e4d6fce215a579403bba4261b45888adf0e17e3004c5cddf62e282f5e20ffda147ddb62783444a94d67bc9e929ab32720d8bddd46bd3550ed7118db927d5507e4bfd73d35cf0f64b1a1812e3871d0fd716741fcc51d31ceffeadcd6a8e6dec9a7c9644744590dbaf4d7070f406d8f397be12177529fa75cc68b3b375af8c31cc9fa005a2c619e6f6ace3835e6fbff8d220ea0139a7af0886b230560384c4e76b5e0ffc7300cc3cb43dfa4539d8c0ede6bb54b1a1ddbdd1235509b58d9a8181e9f6dac0e2d8130f611eaf66fc04ad1dac6986d957612521375e449ca68e0aa770951e531ead52c11d328fce74862e500c4012870320fd27915bba44cc1e8901c35681b50749df185159e7a5e9faf4069dcf64d2cab19b22f932472efa2cf9fb34e13724718791869fac2ca32e93f7caef89618b673c057ea78374ba39db1b5d5b6f42778406ddbf5edc713a1d86a0444b73afffd12007fae0027068d8ba100576281fe78ce55c54d7b0d66a3e234afc6338e2b718f03f2a12ca00e3941068ca59e1303177cac826675a8d694213f9a7e04eb867c7c1d26243768ab27c7b5325f043dfd455349462086c80541058fda29e3c7362f90b8830dfbedfbb2d382c111f9d2ba6ae30a1dae6fd41e68d8fd2f613225b9442614fea0fdf52e87c2aa67d006c898b35f5802205c3bc64b76b85d0b13f727208e7cf1a3686b4146c614ea0fef85ecd7fd9966eda7d705ef6db63f9af9523505de79abfcae46841f8348277c05dfad726081f6e0184bb7468cf789a8f2e34ecf88c94ab21c9059f47eb083ce40cb643b0d30ca5268f39f17e11ef8c58c2976f805379ff7b849b44c950c37ea9553cf3dbe59775ce10fb59982dd789b3b6f7418f0cde3e6100f7d27085290dd0bf0d42846f2e05205343dd19fe5853f90753d4b14820731481aee7d3de828d6481e5c7e1d8971cb3cdce5444ceb7c119dec96e9f62cfb355028f501730b10b788ba9e4672e6ca55beb3bce7d60902c64e9559bc705174aeaef6e2424d3c8cf9627b40f03f8eb4682e19565dd02f05fbda560dcba9192e880f258ffa623a16988d9ae3e12de72e4a27216a6dd1fe8d915aaa577b09060c0877ce198dc084e4fc218398e27b453be9dd3272710e7398cc22971bc7a67a15fdfde156e56a0026a3cabf6f6926a6f4f147c3acf4f35b7aaef6ae31759b9d4d57ce1eb7d15c32fc7eb5c7797df97d0d2fb112a5b64b22818784b40569b3e344c55efe4e7bbc26c738e207569d542daf27f77d51ce74fef545db85880903ab604e1432f91206fe7415f6697842e9796150e38a2b66c7de76b666e44f0ec4bedfc7213ad9bf8f101c9e04ee7e88889281a1995fe3914546d229c0c0358e01d465942f0e3cb09abe6ff0b6f53f2745053a8711423ca66424aa0040eab50ad75f7901d9a388d924a48ec2d7fafc35286798ab1da9a277fa19bca507ab75b91d4d01c4031a242787b38e3d3966fbaeeb67db09fbe63899ba5cc41860b853b54830cc40c8653dc5ba716a43dca0a7f3f3ffc314e6ee4b8144e49d7d18fba9d58667cc90da1776e85d46c4d462e79e1911858b3dedbecd7c5e0cfca5c607fd6e8c9e1a3ff381eaa6a3a00f04657807e50063493d69569c530ba80325f598f03bc46c6b0fa8d4707dfa2b905302daa151a535df67c03824c461154c93c81393aa41c3b88c8d76c0901d04a91cf273bd5bb6db0b8856fc602c0162ddc28143858d86b178d8ab600c233c639bd775aff866a88f93c330116090aa8abf824a10d777cb47d9d4a7ffd29d30837a19af5185639f817d5c8f323340e1080b102032b84dcb16aeb8d3caa8cebe5c4516fb7b398f769eaa5e3843b1f6c0c8415389e387ea8ccda7736b4cd8f0397645ee2464ebc571da0b474072c9a3591c41b22fc7830f1739f1b0cf04ed6804f9195d9752c9ffeffac1ad5e0893a4970ea9289b95c0d20e204c644b2818c870f22b659386bc815a7cc30e1bf2e6b28193a84996424600405da01274b0bfd23e5a56c3e46f701a9ff4aea411c9092dc4e2298638dab8c914855312ff1a7076b5c40c4c30b965ad0ae62d4e9249758b0c389746f54e3ac970e964373432cb13ca0e34a6a5e6c973474f32e69a6295a9e906d96b8bbed046ae48aa1c9134b87a0c8979e8279dd7d3df479fd2b37c817c4e22b5c848c8a58927aa34ac302376d5483b93aee5257e394588da47d676cea2b8ed40a01ba84252016014e099034dd0ae61cced1184a668f98253656d2558e667f4a1c92cef6eb6ab6878b4a308c6cd6b79ae45c511f63ef275e3f30cfd1c7442723249e8ab9d879c0ac3c2e124560527c0928f7c23af4b32f17048ac73530d7570feba7b13465c511d9cf7036661b235a1fa9293665560b48c73917c0d1893abfbcc46b87b55655b789b5bd4729515e4f62ece9994612af0ccfdb9dd30154e08d31b58cd23044acfa541013c1bf2cb295c026be4ba9b4b1a0cd17723202c941c22b7e16f6482b0c3bd4922c04c7e9fc81033a3a30580875f04932684e2cad9612fd3fce87e2900cc4dbf319523c8b9e7e9ad9a9444d45de0f4fd0b5901433cca14985ba4811031568d8cc0ef1215fb62d4085b7f7d8a3dbea22aff338c98f96ebbfcd7cc65814ebb1bc9f55ce5d3a04fc80fedf52fdd20bd7047b1ecc8a3f354dcfd2f55ef2b38e4d2fca02f8afddbc135714c3a9fb3db3c95512c1f4066ab0aa2e070bd39a0c6cce2bee9103998f7e434b840721f2f0abe6aab0334bdaab01cd2267640b1781128c7ccce833969c1982b81a898af4e1a7a42c9158291d43079ab44ab7adc1da8207a720f139dd0e6920b64937fa9b14f00179840a14f40172d93da1331dbca80cb8baca6539a5d192c62f10d77fa1ef206eefbee219b5a08209eaf45ea011f98e84fdde0a16fe68733de65c0398ca7826e4c041da2ecf1b57820888bd7bf5878f5d0151449c8f0cc454e04a1af0c6f6afa0ebb65665b2fc4664d5d0949f06bfb0589db8846d885ac0281500ad6b3640966a8129c5842e4d6dc87732fc09a260d2061ea7e4fe749991f8896ef35990ddbda0774db097d4e32098155f048a80c384fa907000e9abf6b02c862c7e251cca3fae02169ba8e1636b56be0d587c3d385819b5a71400ee3a3164536332fc5a3c37d7c3f79fc6ba4fd5fe1589d09bd5efa2177852f6acba389f9719436a6a30e187e0ecbd3511e21ab8133edcbfe36d2c4e8ab419260c2979e98846f65ed7fec941354bc198e5332b72a36cd228487958bce9854e492735ee44e7059405d4b998d2c31fb655f40eacc5a08a4810a0a6b1dac79e06ef92f266bd8fb0f14c4a9d3176942c0858fb94f2d9717ef909d906c21d701e259dcb0b61cca3f56812a6ff76e0db1e1c1392bcc52f06db8ff2fbd9762b4ae48bc9f8cf17bfa8260d912db23b9c1ccd734c75e9ca74664c99a3e29339f912aaecb5e476cbdabf23f99366c1f96ab6222e04c16d9cd9c70a6875def44c799726847c59aca766942b7169bb7e3b04ad007bd51897c755544a8aa4103964f0d51e14b4b90ac66e2d7aaf7731343131630842e0c57206c70077059a78384e5d80b25b33aa8600ad6cf1e1477c0bc8caf5082ee22810693d9993c0fdd28e867c754b0d522e8edfd715335e1e455fff1c315f4e2050d72a5a5142e0e3c3e808f15a47f0b706d152092a770b865a0cc0e5512815e0fb2f695de25229845a7fdff2ef49b7a91d5ff97ff3c8f3be126bc2d1d7bc01dc758aa65f93ef8869992174181fce5986a6e4eda376a8e21194072612af55085e60d64b4cb6a97207de0b1829c9e248406e4de710fe624527b6b735843869e3f168bb076ff5bb5e541e2cb86b862de2588cbc9fb268c79638367cb5d7ef38146c0bec2f848fab81596eef79f76d1d77b5b9418f94d5277b00c8196e70f24c81ce622a7c7273b3f8142d09c2702666302dce418fffa0de02003e6e2f1318fa8ac1d4d058aeb12040d0dc01e13d5c290f00b9857c5e43e4df08db63c15ad32e6f3d9dba47f1e3a13bac44288700b385e41dfe47748b8b85148cee13c8f442ae95f1f05c0ee1e77a7f8c1a2f88d33c62d7e773885fcc468331473e0d05ec6f485b3a91c22ca9886ab846347747bdd7265c96bf30280b75f3ba8b852cdf30efc4bf917e74864406255f772a7aacf6bdde76de9822c4cf3e23e592470d87f9ed53c28f1d17a9ddb34ae740bdb33856ba2de90f4b39b88b01c0ee7ac00632caaa5897912d4fd7496b6d77a08c532a57831e0763a359944d159a75c737b0c2095a8720946e1ae5a6670f83ed7c61bca8b385320b00f0bdc116f31c4a626fe9d5c9b9ac14126c80b3260ed9bb3078274a545a564d6ca5ccddb06ef207df272637e65a634194a61d5145079f81dd921565f64a1fe9f870f0e6cc31b184fcc80f410218405a430024761dc55300200d0183549dc8c384e5690b4e3b54384de36c328ae477076797ec9ca3c3b52cfb845ee125e582f5d66456d5ed89b20e4c22cc7186877c42355a059bd1e1a4d2f822cdd8073ea1a7242855580163b9c249e0107e020471d361512256c79a92ab3e68c8dfac87928b4402a2fef282767b4c3d627e7a3c71bc41cf423c04c271a11636abca2f59df13a5733313292bae427d7aef286fa5815b391955e12e39f948ec87d6db47c6cb59bbf9dc1c62425a103b876019bf3b43c65277481d41e1bd1b6ec5797d2c7e12943a934707923862b5cbda6de2f0011165bada5c6b4cb9bc8f8c81df96e9eb48653c5833351aa5e9bf36c12406224507683961e82a13c71767388d7a7b4a9f029e507fa8d3202f27bf03b99bc53081901bd7abc35e9d0c51614017f3282a5e120cd767aa6e469102b21e799487467dabf4398f5c1836e56cb37cee4f966514fef7862012a97b677aabefe1212be0aabfaf4b507652e3779da91255b6b6752f9425f68efa44b18c8e499091c2f294b7362bf59542528f19073c0a37d91ccf78bd3073d3f5a48df566bd6246bb5db9058560475b5f0278035f7ede52c97a257addeb55c272813a8eace93434ef77b9742c0fa55e1d2b1a4497e882f75205a227f5d0f837a14bff7acd5073f548fd82841267226ff59131d48c8112990a1af3626f540f29ac7dfe418ffbe7e81f8fc2872bfba92a5082d3fd42ff858b0e0882947f20bd02995c2e9e682da84a510f9faed69539c011c0bcddf72249dfa9d4ecd6f8462e96a1b84f4427f5dd247b8a8cfd2ec538cebcd2b2a98259038e624c444025df9a8f70d3051cf31d8c3e491d8a17b0388d696a254151b85cff8fb2674bada21ca4b838d58a59dd6e1c5ffb8a8bec90969c2b23a644a228a051bc713132f51cbf54be88e0542b90e11770710dc5d43d8d8fa57d24ddfd72f38c11f7f8849096a0a3543fe55f7603deb9da62fd582324ff636268dfd31f3f1bc9f4b4058903b3ef57d891818942238ef9aba6102bbca6539b3355d2c356d3ee24039fcf9d3f0cffab9c99b3cc965ca8f872681c0e7c7df31ceb4861e191d9674602f161d5ac8fefcedfd0e018acaa8758125777c43fb1a912a802b273d6374ed7c748ec6c2b93bdad52a24e5b477f488c63766ec21a448255bac2b32df5dcd90926ab1d8ea3c3138e442a985b871ccf6f4c1316e7a3e2add18482030ca12ada9f2fc5c93908efd184306ea80d4f1f90268ee7c8c040993ec7f0b923915fee0c2c109623d0fe15ae9c03d68f46668107b3258b576f4638228e76dc5d9816ea4a82bd13293e78e35ee8bc3af64d08ca9bbf3dcb15feeca151db8abe72afa1807e217293c77fa728471eed25476e49fec89c4964e1043c3f3dfa08957da7a1fec35e814bb8caeb6cd08823eaf579a30daa1b2e1fbde22808705770828614abb71a9f6f344003be10cfd6b1e1eff8023a8871e67f99d12cc02d9d5c01d1c8bd8a20baa28064cc8521230be0dd56a6de6fc7f989102698018dce72dfc094540a9b67370de4c03fa99b23aec3723aa543801de1af66d7a9046c0a136c2edc1d9a15ccc3411eb87b699e2c7a58de65867002862eacb756a9c85b8bb7111b0fa8287c6523b73befd39bc984208ffec1c0060086c445f093528d982d4e39c8629fa9bd5f56969c70a2eea675ffc896c94c3edcf6df46091ac455e23d650fcad542531469f9ad1fd048ae3a098b6e2ea95bf14e648fcd001aafdb42d8ba399c60186ee5b363529331e0ec78fdc2e050c13da11a882885780ba684c617403e9769a58a0531e53330e545124478a2335cc41637b2a2141a6e7b7f4927802521833eacac69d9898fe094eccbd9935004ed413516432b45144aa7922801404961642aa83790f0a52e0a1c1385dc3c616162826229801db923e6ef0c387f7e42080fe5dad4298bff1e24035e35836d8d9e8800f32e49634f137583e698284e5144ea9b28eaf332d57891a503e758c44bbd8d3fcc5db20d2bd391cdbd22e999921530586a76b5f18863bade5478cf969c0490d4c2096295fc1cc88cc2937c829443c186116865f20e2657b21f5234e812bca0e5085153cc2973cbf902347a3f00a14c40d5569913e46bb72f348250802284b3205c8515deabc987c426b56b4f2e240de01fbc05206fadcfe8173e4fce62bba4b6855c64861a5881fd02d1956c4204e3607c1acf78c21034fa26297404da0fd66059a68af9a40784d673adc57910183583a9ed843b08227e36a9ddfb58a71fe4d89a66b44e39333b1f33455aa597c712ca0b4724e9897141aa3efc0cf1be1c49120683c371b2de67610b5b33bc6d089ce038c0148c06c954f174a9305037ded836b6a92c4a19e5ee8370f99069c9695711819c28118d3d8352e2332858f0de380f655970b22e9e721f94eb92f1388ca0b75475d1ce901a3ce20183233a1c14829f11e8655710071a03be39008c5fb8128e56c1647a3158671e4058a4ce4d55406282501e84124af4778d7de0290c1e380e87a9308bdeed86390ab81d572e28f03c62575f3ae23fb8966a7de1cc31c678b2d3ad1576acac926cf1e70fed1d219451d4caf0f8aea0491ce9db010f889b77f063860577be79fe19bc570e0e3a5e4a0a09127d27884f783ed1a39fd429c3ee9b8c1bac8049bd73391f975c6b907de123a1e2fd869462e97322b677093855da24c0c44c236a11c89df1ce4025edb5991c81c3ddc72d058eccddef12a58541c06ae1239f3a3200779d2caf9a23010744a1b1d0c37481c2680060e4309a17ea575fcd2897a5e381e4e4e81b154e0bf3daa97c8af6421072e99134638974a4853156e1e635a875a8529cd3940c2123482c590f28c4786ca0629e9ce44ba0fdf51c2f308ee36a1ddba2d6bf675016bb373b3b8ed2464672d2ba6dc85820822e65f615e44f700cbfa3c1f5c8b08d65ea49d27a3f577a4b09ed2d11b7dc636bf83b8409154946e49fb62656fe4d7271e739e643d3f7ade4dfbd8df1c885f6217c658104598f649713fd4b442e75d00ff558dda4a4fb04e0663c208e0ecbe045849cfd2b384be7ebeffc83ccd464529d517890ab87935169cb5cbc0c8ac7354fa5a2a5b8971e59150d94a989f35089c45e69c71c77ed0d64adffcba25272ef8270b77bd227fd203d86a4bb17d00353f83c9791609ae5c6e06e615ceb351541f98ed4bec84bf679801b21c2cfbcd6f916fc70f6f96c86d78b8a8c55b7d38e0a4182f3d25a0995fd584b5238874f0a1c99ceeef902769beb50957a88cb51bed0634865e0660fd8f52cf9ee3e85b98d46badbb1b4eec50dacfa96266036c7d931d96fed39a6ad4b161f9b5531305b072975b265ec8b23843093ae1bb92207d738657d70030436856206cb68428b1da3bce526c4b41a02f8f7c56a22ceca05ecf54befc2c5dbd5ae6ebef75ff31177e71dfd73922a6470060b99424b4a639981086f5036d0e481a8295fc5ccf8c578153743fc09fbed072a301d9b76e77d7c26a226d47ff0d261b889c6d80ccbd0e6b65a83d757b165f02854e561a7f97c042ab81eb29ac4cf910503805dc91dbb4601b3eaf243594503edc1c71abdc6123e5bd077114f1bacbe078731a18fd53afb95471db819f6f7c2eb7fe38b59040c3780ccda54d125f0a269236952dc0fd3b99b912e2de33636bc38d809e3438d4288a8c6cca5d6d8e7da350d01829de3d1cf032ed3ffc76ebfe2c2a877ab1ce67b827668480a3129832e19fd9f05678c75a29a1ec51a223d518d1700144112d294b5a16ade6eb1c5ba7f829393f32706ca6ae733029b1e9ac67096e026ed8d5bf105514865e2752684c5062a7f79dfb0d750a390dc8b6619fa4c977699f4befdcee9375d32ce85e64339d26c0bfb9d32d3fd8a53705c6cfcf8c825fdc2c79e8e62bee12f11834650a35baf05c5bb94863a6f0d8375f621b544b1479b30e5ab5e80be8b9f3cffa464e8594921da5f15c8ff59d9cc010a7e65f75bab95b60f7b5025acfc2ad4c08e4af95edf0ebf5ee2e4608f262046403b8ca951ab4a0b1305f2eb2771f3edbb133b18fd22525243a8d254a9f474ca08a9610b394bbe708e286eed61015e5f1fde776901641c57bf52fef37c2ad08f554c02e84240efcf215e97a18a483764422741a12ce23a924f0e16301e7b87f17e6d95a11ba8d41c441c3ad88f45f92446ea1e20c76a3902f506e8d3cd0cb96d9076b149382ca8b16a8a08041e01a65993a4e9619c215148ba584fe5fb8f16da941558cfcbd7b1e48b924bf3828e072801ef5fab182134a3974862295705314cd43409f3a6c9aa11388bd44e21fe7982952a5d55c52801aae173b498a91765643e7cd0c2ba1ac600310b8ba849323f3a73414d6938604943f2eb8a540dc703cc5395ca591b7be399bfd6b1f718ed4f81ad7762178d5015033efe18f051f9211163942998c1b6127f4f26d826cb1781556fb6df71587da3da3377db15f38f24dc87461659a13ae1026694e47b3a74bac3d41dbb1b653a23845c85d5c093d2474db1c8a4b060f74bed83f8e48985b8eb7aea827356c44fe21b5dde8f876f5d2a70fc93db386524c02040ec8864b74920c5757466deabfbb0dd593f623deb1c1112d9d7b05d0eeee87603fe2892de2883c299dbb0268ef6e87b81ff18cede3889d48776c85d1f6b47df58943db3378620f8e9c4c775692b441a97ba022404ed89f815280efe9997e472d1aea787ba08e5e79b7066d8f02283ad68eb88f7aeba0c3df632a6abca6eaeb4d4c1843dddd3679e3f603c74b4ac4e20f0854434c7f1528f0dc9329d6ff2a4b3d3c0500cb7c0fc420930bfad017c673be4e74ca1cff04400acb139071907eb0de4922adf1329e301c0f0eca05fa2246e3330550e6050d9510c86789fd400e80ed62d2b516890db78c60cef1867c96ec1bd040b2fa6d34f893ebdc833d2715003ff3bb06274948e948e7d5ea040ec28f93171980700045e56a8a0e2e1bba57a00aee9cf8e2e9678b299afd74e3a53e921904562fb6006cd5379b9cda48890f69ef0729a9797225a1eb15f10b89634547df34d8f487a361b09cfeeca0e7e123398c5a4cc2060ff6fc54c320d57bb79c7f43b68dcd3d915973fdf508c70c57925ff0db7ed1e4d509025d13797bd762963954306d1979fbf0851c014ebdb96e0e5526eaec9dc6cd60c3b4484c25958326cf402a7c6cd5887afeab3109ff6c18719d2d88b7831bfada407f2136efc6ebc4f458518fc11dcc6681f70a16206ccba75a01ec3cc808dc9aa6afca59a1ae13844104351ccfb8228adcb4613e4e9a2bd511bc87efed26f5de8a1d3c38633f71e3b58b85b48a6c6c9e2dfb41fca8e731e2db1bdcb85871093914f84a6634256f2f1c20e8fc6739b79d1e57b51f9900c888e1e2e665db2d8c021007ed8a2e77b4b1d56ffd10d835e65bddba89819b1330df306b97ef60cde9d563e2b7b3ec5b88e25815f562a0b12782ed184ec9f2a472fed5bfce18721513b01c0480984686befe92720f79581340bebe0faedf9693a698892291fa9086e95235262c6438362b68c5b72a8065979fc168476c6d7b28ff442ef27d7d2d43c6ee92aee96a646dd588fa435530b5ba2726d17f17ad482192ee6c3e1ab30a677f05d2279e0e20bfea8552b1fe7fd123cfd55d46cf9c519cdad70311beaaeac27c8354a5cf3a6d93270e551582accbdde982ca8c1ebd656476eed9ec21c68f53bec389c4516597144154303d32d0420cc958509490d7b2dbc8713a28f1d268e48df5a17b9ad10c59f957027843b33e9c9b2ec162207235abd4ab87a38d3f51c83d55274d9db08bae98a9769ad76abb330bcdd4f8f14e74c867725f632e3c4b378049ccf308c585394a36f086b5d3012dffd7d38be6aa3c460866a88f26a235497906787b0ef9e4e7f942f6356c59441d659227f9b5e1eed9905d17e1b403f8abfdadb0a7c7295bae93b4189cdeef215e19debf83d514771e65f1464e4b4d7e83ff4f41c00c809cb77113e6d0615a2d5a97c2efbd498dd4269228b56f05562667712490f812d10d0efd576643eea0d765e4b52f53d5e8d2dc6006d351ca5b0bd4dfb523e3143c8702f4f1181a2c13b1a60ce1b085215857354610796b87fd2aa06bcaad3135c9103284fb74af88fb6ce9e67cc45d484f19a1f7c0d037d852f18f594fd196d9226ccd1004db41bd0778e1282ccb9e7f10ae2553daeb022127e674c52a13bc239c45c9a066f5354c245e516f23d1677aabf271bf51bfd9c91aaeac21f3cb259a9e958b519590659368a4a00d40c1f34aae25b33c61a0fdeed612c27f65af381e0401954fa6b6a550058a75c3474c57755cc349339e0ace549b8f4ed6c335bf3c3ff4ac58eef8833bffbc6e42c64789a93e43853215221cebe2caf50c0dcf064edc0641a7fec2cc9ed083eb882489174bed3ce573e63acf2fa5d934f8751174d56eed3ec480590c42ad8ff40924eb42045aa3eaab43712a8dd0459f4ce6a9ba907eef9f75203e58927bc1c01291fc40dab24c5cf8d885df47c1d0a031a0815258a80e8aa6cc2dc0b0f28798e8426bd978512fb6872d10ad94460f7adf029dccceb621d0e6b72401c5c62ec42e331b407f6ba9e2134825cac5bb5ba0db1ce2ace1a7be9c8824a4652e580284536e021ebcc4fb90489ec2106a3ea936f4b93340a1a756c6050bb03e305489f71a8b1a117e7324f40a0e8d7f1936df1abb8f38ce093878dbf52549d1f01c0ebcf98f2827b1ac7124a6df6e8597ccbba85bebdddfd590103a1e3d9b0ee1a8b04e13b84b87593854f03d67ef84aaa5609eafba4726ed0fcec2542edb3dd921c56150d1c8ca035696e34972bf5ffb936ad8316fb8f18bc5c492b619bf2355bbfec5beefcac51759fc7d5b2d607a276982ac7511af773e8e48198feb3fde4dade24a7c52fa5fda46e5991d7056388de8a4c0fdb9b1916ef9898faa7162465739d5695de3904f6eef062cf0dfe3fae0456fe6644b66366dfb969832e009e4bfa7fa1716106b26814496f12270a8d26475d7e722ab9621436a53fab4cf6f1a9ff74ac4c60e46a21e2490cd0d69480d796df372d044c48b76186d69845913184a6dc84fd640ff009211b16d70971199ee07b0b879c11e5590cb5a009139e2d99c0bb2a857e4304f66ab9d3fe7285ec604981e60f07bace26b10cb8cd7a7d186b7ef3e163143b038683755cdfa2fdb76ca7c7617f2154d5896e8ba17b71b97bac56d835928f4b31bc7a4191a31e5193460b8707f72f0a45a5557160a6f9a79c87b24fbe7b32e75e813cf3ce45e878e8a6fead835bfefae3a5ccb2070ee95242bbd810991087eb6c0dadf360142035414c0f0c22bc1ba02c623fdd24c033958ab12629d5653f2c24216188a6147f6efb71ef592f6aa5239539907555429b4e5c66c38ca42c6ebbce830a0df39523c3ea57d2fe3b30686845d0c054295b19158dd42c15b4901c7f8ea1314530fa9100c9207f0bbe9734fe82c8745e0203e2af42efd03022dcc7f408b267cec5e6893697022af864ed7b9e9cbc3d0a983d41e9c0685c2fd15b3fd636f8c97692951309dc08f11d204215db1d53dbe289acc7c4eb8f121ec4e0c915c1a87c0f8337bd5793d165e0551cf156a6915b64492db6cbbf0817222232e016f99e470b305fbe533edb1b43021e72b761913ddb0518bb862d8175f170c21b10eb30280d6505b7fa5f2de3809d3d276a03f3c7eebcd26b3460721cd2e3ed832101458b70cf985406f60b27aa55bc255e61fffc2e3738000a5064c8b12bc1f08142a7081fbc10a591c2734933b1bc7646b507a9450423e44a5ee89e5301b5fb1972f4172cf8b9ea006167f5a225aa34689b8fffbef3e3cc469105608e929e1e84a0e591275a14c09713b1d2735a8bc6ab52c9b08e9e1c87e1a58a0499131e9a33ec70836234cc288ef4bbb091ca2fbc27572fd1a833d856b2f10c4e9286125f5689dc790b010a2156f51b5e713d93acf7010e533a508b5ce0ac645902b2aa0e0611ec838ee2ed5a038f68dfcf8b5d126106cf6f0de5db7f32f72e9031475a4e149118e733e1204154eab21bb2ea9d459fcd1c3f3eeee7fa812c7e98f52cdf4ab202f173e545b0063c5bbcf9e9ccadb51d51f7934ffb8b6ba3c20f8437f79cc3e68c759e394e7a4fdce997e1db5c3eea7f561a895ec3abb222d2cd7f7313b1f624d3b087ddeabdb7d2bc26e577c6eb7137f75dbd3b64a38575a8e6e85437ac8b3c0141409deac355e01d14f1cc326a71349b5e6d3f67cbfde30eaa159d2cb5151a6221954b52cc5517133b5c132a69dcbec894e9d7b4ff671378c9b9ed71911243938f04415eb4e92844a0d750ace421dffe601b710b4bd6d57b83bbf88d3b2adf71560f2d23bf3ee6e8329e3df161b7e4c3aa98aee49de74dfa6bf0b525d2a6720dbcb17c95da0981a35deb2718faed8f08588b7764d893eca37bd706cd159c9e96761582a9ab5723db40197d634062b56e5df0f28c89640a772c37841d3b26293e6019d7eee79185b9e009292a8f97325e37e25c010fd722b89b8141955e935ef17f901d32ccdb9b912a2e5e6b337d37df99c9be7b686fc7bf7db376698ce92cafcba805a539265056385d41782bcd45082920a8d98e10d08891748d55b6491f57d64a6612c52f4cbaf8fd04a534610fbb28563de98a9c047d0de1737dfbf376905b644740c8d81009293f0d8d0f1800300fadddea0c168e67fd950c9e01441a990bf036569b97739217aed6cafdc2dfe1dcf16a6fc0d5285d9b8452ea35db75e802cd4c3cba8d61c7bd38a9521b1cbfcc5bce0216dffe06168656d656dde60604d42c2bcfbfc18ba8d9b92550bce8971c29d785f2031454581b82b35f5d01e45d911c1cf0c060142229b1da8f30adb658969491c48b6a85af64711b0344b35faee3f53674bde01bd634f103899af1ec665628e5cf411dea0a3d9dc489c46c310c2175c747f3b0e88674b4b8809f6dbef46a27121e0b075a7312cc078cc0c914403c2f91b08e42c9b3ccf32512103669b1a8bf35fd5d290c0f045bae7e9bca5d1997f26bc835c9cc1e68af71f2d8caecf50ab142ec1319e3110bf739d27f05adab26e3aa77d99665ae749c764cedf128b81cda22d73c19e72740e839318ee723b448b408d5cb41970629d7fa9abe9570f221783882ec0926bcb4ea360d5d7a7733c792203890c541edc000ec1086b3f0e6e867d102c8505903c710e1b4250eb59d033648a5f8cd1ecbd2ec0f1eb4ef04a1ca28efe2bc7ed87d112728c60fae0a69bb85f89a64c65b3dca37abfebaec755e6441cc3b8ad7efa38697f588ac06df4ba94d39c62f9c1f3a5b88b2ca5448ad2eab3d5cf68703906fa2c248d3c3e34f849bd52a6285ef1360f039daeeed4c9d53a6540feb6a46684e81d12c0f030fb7844edf543a71abe8ccc844fd97ce06b5aebc1def40b388c21b359357e82aac10f54473f4538966ee3f5811c5c0e6d47eb0849ad0e5c39cb422dfd81627fabb1f4d3f1fc7a6ee14c505e85a5c8302392bae7ce82c07046ea97ab18fc18e62ee0f499c87fb5b2e77f55b0595072ff7e00fb861e817f15d2301e8113b4da8f00b3c78872bb215f2709c48cb016deabab2ecda0e15dad715a4760c0b0f0238d47d1bde3a226ad0bdd9a0615042c728bcd57ae176fdaa9f95e9e9da903b9a4fd446a1d6d98d9ff58e961bc00bcc5438ea7971b61d51644b7e937b2b5f6245c665ad5d41900ab264512ef78b8b2d398a1953b8b25e6a4d7891c2137fe3d162776fb65be69f25a3c16c5cb0d8b58722397b7e6d3b8f2803a2ff866ebeb89ad4450f87903624c4c2ca244c3ba9ecf3fa1c422fcfdfda7e37ab8848e454c276af091e2ff8eb3fbbfbf89a1f7f13958c3834034cd49dc4b84ef2b438b6140113fcdf8c664a3ac4a8acf2eaccd330dc9c431eaea082aa3f2a73ac970e20670e0b422983316b9467774b8fe10c85214858ccb19db96344102482ae400a05aba0d55c06986cf397581b2676a3f372a7408d13dca8d4faa5d4887c1c62d3dfd7dba91b334792ae4fd02e65584e693572f8d83f38b4a527bf34abbf530dc449bfffc0283176ffff16e6eaae41fc840dfa21dd87fb3b8ff9e7f5d5d81e62cdedee33d1ebccdb2fd9ff8113f85c07edd949900f064753e3395f26e0e22759288a67affbe29bf9ba6ba1d84ab5d3b8bfbbf1b2d5b3b845a041d727d0dfa727ff55829fc337cac67e4c4a485db216192b50af99181f065f8ec79269d6745c07bc31f16d6d4b438916d3f2bde8d3414bc6f66dc4897d3779068437484daf15d258a5da03d2542d94d432fc45879ada633ab6f35a2c944a2c0261765e01d9fee5c58f33140c544e3127da53480a69e9924cf6098226404fbbf1103ba51cd138fe54a6459ac1d325860b8be367e378a655bbd3c028c491e399bde810274fa065adc234ec9d9e1df1eae259304287f86df73c1e37e5df383082d24804d34769f7cde7fb01dabef3e560b0832e1809293c6957f3b28e8026db06624a2fefdf5a551d964da53aab517d8598422a8b7723ebe94334de2fa9911a2349a51f44b4612fa5a1b208098761243f49792598394171a0e5109a01098de928ce843b0f2bda20688154b0456cafb6b776c4c5c6e01a06d259133f1daed637faa1603e78423b44612341a2f294d76fe088c121769e0b084c30c41a4bd52cad1379e7eca33432bd7fdfa0a71d2c2b3461fabc2a40efd5d771ba61a81818703c05a3995d091843d856057ee006eb9be79afd14feee1cec53de02da863b5799a10d6c03d993f0e53f366c8e5a6c0da58e82a1834ff4a6b8c2e8392a315876751d1f185befea3cfa0e8f7a963fb0e14d9aa8beff07902c7779397c719fe1df39dc0c22cdcb93ac10a223a262eb63349e516f108145a2960bfeca560768e5923a45c4f9e1949af2c7b4c2991688d7fab2ca48aab61f8f3db08183145af9d5d383b8f8e13130c162c4131ff5c1d5e8cdf52f962d7898fef9a8fc5295819a423d265e51dc15f7951ad04e1dd7c22146991e07750195315ac8e0f4d84ff6141cc055034a1ea57da24626592bca418440c3b082e74c629b98a5a3273630bd3fcb60c1956b26556125b4610ce4990c4870e0b5660dc3259f3d1b10089fd2e2fe74de70b46eab7cf849fdc64ab9a096d25898116ec9516af73c961041584a2860687b02a8b51a9b3be392b5d030388d6afd580bc8249d14990767769a598fe76471bd3099ff679762e04d001e77c741aee67326da5901c006d17e30648843bd814569ab9cf5ea9860a0988b20b1429d4cecbf264c9a49bf376d05815edd3e9ac6e2606cc325e92f36082619aaa4b0a9af6a3ea5d07cbf7142e46d3dd9a0a11d209684f8be8c01528f3483a9a66bd00c33278816b348c93ca2072d17040d969f7236093c236a8e59b95ab82598844d8eca145e08fbea14a39b11e875baf0620bf1625bddeb16cf8d210b817b9a84692edb4b3c4a852f771e28818071a06c3b83867aaab735754ebcd716d04885443263aa7d42a73bf92e3e848162b39ee9d3ff69ec9246efea9ac5c8b89f2572397a49366af5d14b404c31ba9e0d80af264ff33984fd118b4ce57ce894d2630f6caf5ff42a547db9629f0158462082ebfb9bce35cd31b1039c639ff3d426eff892edb21be87148d902fb92981caa0d45a9e3e692bda055654e48c3f7c539bcd13a8de00d11033b2c75641c326d94888737afcb820b0a97958c323dea8efdc408dc8ef68c110bb693c814330a0e7ab5eb5d2c2a9fa007afea4e9722ab42e8e7eca345320ecd777bd413f134593eafa013e2086daea732275eec4be066f1d1180dd44674902f0b98490ff696ce979adb388c397db3ad3a0ed2e9c3b6d036d5470217bfa06f1256469beb1b0ea7b3269085c2dfdaebea445c98b13aa975dc4cae8b9901346b7f62e8629bfdfb8e064cc9bbf981227e4c9dea39ed0a67a0427e9f81753fb06402e30419e35d69f258e503e2b56d7dec358a3a4a14bfef27ae61a68eb633b6196befd87dfd56a17ac2cb6b2673c172c1a804391b6f590e2169aa609e3c4b526c54cde3bb9ae4d5c922f7a340df2bbef2d27c5b8766684961654fe02af716df9e69ad8312104a728fa09f3b6eff3c53d1b56e2863464621346c56de7875801866dad8939fc35e27f5f20c466ec1e6a5e9997c5ed400184eba4593fd86a6d34eb1a5f241e313ff9b0136312e25468af1a6223de610e8704ea09da39d0bb2a5592c2a935a05383e15475b0ecac5e3a6a954ce4311b0adb4324941944312196f3d0bf510309de0cbfae99622702ae921ba0d2c970a3e8b40bad30a3544a0a847dbc6baf412c95c1354181b424ad3598b5afa2e6444621e43436c54b4231fd19af433a64ca6ce38c05c7808b28e65cdf97d72e7e008ca7c7b5ea0455abcb57a51c0adf42fb1dab9af5da10caeeb4eee1bee7e4358729baeee518c96bf4099a6b8acaf50d8495564e8b373770c10851826df45dfff4da64c5f8ad6226d9dc61f59bdc42150b15cb71723a748a1c1a0f4208437e2e86cd58aa7ebc6489c6732a9852fd61c6d785c4210df8c0de039cae3370d59b4c29ed47740a8019ecc1b0e6743e2b103c8a592d5f9eb74e67b82348728081445300ed4ce90eab407fdacb799df1b8482b3994ede907612cd727501bb5d1013f859069a3d215d5f10517f619cdc59a1cb8d951688cf4db52cbcfad9a44554a5491b1f636f0209bfffc57fd1c64f9e71c40186c1f8a5807c501fb317f1668d168749b3c614257628e605cfc69f027ed39d3cdd6756fe98546918c853e36108f9c1fecf8375d1cabebab6268ff8797abc6aae1b5e4d3fb6d260442145781806eb1428cfd643dc0ef1ecedb72ef5969396b6bd5fb8a7530bf48c3b371fc2c5ea9df46e0b5a8d0525766de75daa9a55c62dcb3fe84cddfd18d074ffd50199ee6aea315edf6ef987749380ba5e4b4da422cd9f37db100501bd31a15ad1bc104dc268662b7d8b82bb785f4d8b6208f6bb1da477b7f0f2cb666e65a9e0154405d8d39bf2f09b8cb5517b2c0c9fefa82d2143b4545115497ad7fab6bec9cbd2d4d7fbb200ef40355eeb60d4f1beb77d22df0812ae2767805d034c50310a63dbb0bc350c2adeceb36196cd062e3407c1fc01acbc68353c4913291e9574d2593d07e095571463c225f94d464c5abb3ffa5c2013bc83900f889a27999e52d327656c9356bc5a2f78e8bf86a1e463f7cf6e23a61a4db540b12f2d09531d8bab50f243ae5aadc4dd467c754134d387949641cb63ee24bac2ea25ebae26401a7366b18f49ae0823c106c3a0beab3a4ec42bb0899ac482e139328c7a93759e7db61de422d7adff78158cba284ce53767224d336c96169a44eecbb67d9509c5385fb8f821062aca86b56ca1873c26a9c5bf2efc55456b1a501a04a5cd1518c944ed1eb9ed0b1a98c65d27f2b6156535979df5758224ecf2624e5305ccb11a4cf509591844b386cf5034a047b965b405c514d07c95ac0718805d3303aa29d1b03af1128881a29027362b5cd4b46861dd708746b0f550ec010ff676845f385b9b05d02fe0e18f74539c38f93a940c0024934aff25464e562d8aff7a39b08917bef2df796724b29939401560939090b09bafe9d892ad5b79cc9b1019e8f3b1a1d47075e521efee88a42a89932bf19ef8978189e06d275cf3d8c775da7b70e586badb51c67125568882aeec34c997fb10e6fd67498a32b278cae95992d238b881c9caee32d0f82a5f4610504ff5128d771c91d0753b616865c1ff320ce82e9784ba755aa2287ae636bc8b7f1d97dab6bdcc7743c75b1a11c77d517c36af420f7ae6cf9f6b18738e4ae51a376d7c1aa3ffbb9afebe090db8338e83a0e4ee742ad18c6c1d4f7201e64cafa587dfb982ec9be467df999a665d4975f5ffef6210e860a1dd77196fb73c88378ca895cb8585901c17f1da1297258b3431ed6cd963171cb2f911f69dcc657cebad9f3070fcfd32ee429930825894aeaabbe7c393464be1472965a2b0a854209cd5484715da803b1c55131ce940bd48c314561c47f61038774913bfe0aec9d0b31657a18f6fc6c0eb1e72c624fa95d28aaccd79690e9cb39373771c94d4a293175928c94ad65dd8e195f22aac8f770a210e2a0ebc7c1a0971ce2a0d4e40e0725e9b1a7a4df1ca428c8a48f3bdc3649bbf51ccc94c8d3b883611acb2336fd5022b13139b3e96fd807c330124993b490e9e2a1444816d40b3b7496eb60425ee1d484aedaf45f50534d5f8a2ddad39747c416fb2e4f1f457a2f35f2b44b893c7d1e9c85fb8d05ff283ffd2fc2c2954d49a40f1fc663e69f34cbb4daf24b9f14daf2c7963ffab21c11b2e5779f88fcede5375b7ec6c4e6618646ba50eabec4962f77d46fc7bce98470943939f2f08862fb3fbed908b4ef9fdc39e6b79d714e1fe6a67686712828e6839032d246eea029c16a8772f595f4b0b524f8f480ad4c941538d93f44b9a95ca88fc3c16dc553ea0afb821c795833112bd18083f5e3d69250edad9f7d78791072b515a22d21d797710263ac4991e363f7192995734e1e3d3070791185f4821c469e4d27fd162fdbd315d4c5a65f7c07f18751768efce5d8b306520e3511b4230f0ed10e25097870a6b09d83895f8d2cb4b1dd6b72502565667268e2019a1ff104e2614e4d872ef6fc18a2b6f632f20cd1b4f73e9ce241f3354d0ea9216acf8fb1438a4b793f533935face5d4fee23e26100d1ea1ca27d9ddbb67d4cdc9b3e2942febdbd7f8e8dbff071ec2cc7cebe0fb0f6fd70d46d736cf9b2065b02a654a3bc94524ab9a70e83d02d1ca48fc3a61f5e7a844d530588297425aa509f1c7d7a8880042880238830126d8c7e187f221053e407dd217188268129a574ce48338a7a96534a397fcf19044c05fe21c225052e352e3535358e0412355bd01605f72d0807247050e1a0527910c781b7648ee340e604f1202a1a1a235446d0d0b45a4fe8c4275ab1258364147cf041061151a9b559666d66ed902864643e248be9f573ff39a594157b7f536bad9bbbbc1253adb45a4fe83cd172c1c5d9a18dcd4604ca58f726fb91fdb811b3f6e381032572200510c84053011f5e30420945a84108ba206485892226c8b0fdef66daee8e3c4c0cedf0c0a4ca0538bc565b3103ac18a0458b236cd41c171b971a97151321ddb5d65a31f69f3a4a16f5e547b3a54442b2903f3faff11a076b9d2ea30b64ce0e6d5415c5a106071a1c54382021e7c4a10603e19f88a9c0403d38a2604d8aec8fa5cf49ebbe21abe840549157471c1b0ea00838487de881723052da38b8a235f4e90daef1d77ee2af79420e63286b36052e107958339b3e0e9733b4c42726eeaa9390c315da83b3c47ddf3ff2f010da2bf12ff594e9e2934f7978fc5cbd224eaaa90e5138a28accb157a02b3bb4b1b55a4fe8c42764ab05041c50a1171a230b0e5e5e78f034bc3568ad1f1dac2f2bad5a8c7faf76bf7ecd6414248bf8b67ef1ef471f08fb32c8c14aadad7217490eb35f4cdc34faad3af8e951e4f1678738300cc330fb16cfa7fecdc7beeac3f4f7a72fe7c4dc67f56fd2a73cf10459fe8b0b1be4d034df64ea9808c66d7228f82069248d3442aa7c9046489a2dd0ec0057d8d8a411d28566478cf426c1832696f01930e1033b10c6df230aa9e394d346ca503e8e1c8d6902c922be3bd034816421df3f20a6cdb4894ee3d34f522e4c1b20139a447e1d76e0a107ba25917d73560eff1f880747941923ad3146138d1edfc58effc2e30a07274a0fde3c8aa0b5734d74891c9a56acb4c0f1f209e912b76c512472885ac0f6f7c8823c5d6a31309580e4152ec1526c89724a209a510a52206ad5e19d58adb5d61b4298ec2bb7b599e6e1b5af0707e96b3eb290ad83c0e1207d1e3c2561d0f7c18482ac63e2ce9e664f67a4ca66e52cd9d3aa370763381861ecc0923f36ad311ca43b38486374d03bdc1bdebe6fe22c26f4885c3f34edfa03c54c6614c2830434e02c580f3e44800217b0e12cf53ab8c30e0edac76210e5b863788ac2a0bf430c1d7670160e06fba8e3f02dabb0d4e2909babda0e72f8fb51c0152dee637abb5a167278e58eb6036c63203fffcdce8fdf3918ee318e74c69d3dd221dda3c7a42fbf4813f9c54c51c0f432e8f4a10dd3267d7c0a987478777ecac9387dd621e94f23119c1cc69dd20bd227778cdfdaf7fbf4e1cd1f66d2737fb30ef3e6463aa65ec414ab3d7e20395bbe57c4ce36bdd7c8601ca69162f226e950eed37b5fe8fde8f436ebd0c6cea42f346dd24b2ee6318e8649530749a3905483a46d98b6f7dc7b71576e867cef4f1f296eefcba17b7feeb361adb5d65aef4b5f0d7fefb72ff4dfbc2f89349149aff9c8d13e49cfe8de7b1e1608edd3fbe36eb38147b65fc3fff4a1bfe9e73fe9ed9b64f89f7edf3bfa1847a3a71ccc88f4fdce6f69604faa7fbf6ebbce42fa48fa6c7022937e6edb0ca7d97b5f890ceff37b9f1fe7aff43872261de6d8a6277da1dca4d3e72f3ce91c6254217d8c2aa3cfdb9bbed2e72883f4dd934c9bfbec7d5dbc60f665c0b44d2ebd936f3dc9bd00dfcf894228fd1bf5630dfb17fb30cbb22ccbf40d4c63e5f8f186dc56d3ab6fd40fad7c5aff0befbdd2ea1b98e621bf6a4a759c30c040c8c8c80cb1230aaebf3daedb75961a98e37ece3af363b991ba5ac5c6becd8d4ddf7ed3a1bcdffdfcec0bef73397276e4729055ec8b33fc3768a67f20d9d99471f70dfa1ca8717f628aefdfc757dfc8e88f70fdd1573f6e4f66c7a8927558f57c3cf17bb87e1562df9f5f98f7c46662153c78c216c030063294a10a18a50f8184a1b23074e48051d271a6cc796362d80d19c6c47dc3eefad817670ef4b54dfb3047ce0eefdbc891b3e7dfc83e0462ff40b2bb0f67ca9dfa46f621f63fcccdc303a5e7217becef7cecee928e3125c4f48d4ef328a2f43a84d0910386a9542a954aa5d213e10246e98528e93c53e4cfcccca85cc030bd0a84113f7b50a59fbb347b343d7578e7cd44a1f090966ccd540d43cfae430879ddcc2866cfcd4cc91676a3ddcc9b52f79df6213ee44227df6bfa444f2bc494fa9a0bbb7e18767d3dc4aedff3ba5962ded4bce48fb7bce5add6bd37376ee32cb1c65f3ec4852c8ff7ecaa69df8b83f5e7eb8783d9bef9a5cff4da17770c1cc3411b4a4db381daf65f1c8c11826c3f7c79a16388bc0d1fa3888e0edaf09ee360f59c1a6c4947cff196b3c43d6f7cded02087b1c7069c3c830e221fae947c08be367b5656c0121d829ef3251da23a4fb141c794e7784afdeed6a0c34b837e9968fb790d9e343d3d3d28d49d5bd313c7c1fa9a2947cd1b17a87054f76709d3fe85977c899e34d366d6bc6a4987579baf79e3029ca99aaf9ed5aeafbdfdf91a32859c85e4ebcfa149348b649c05f5f5e98cb38c7c7daa721691af3f7b66ca5fe10df3ec893fdbfedce12ca1e7ac9ce57efda99a34b326c7c68f71d1c15789f657984bfa95e76b061d461fcf711d1e678923ad3e26f9b4477ddac847af83f62bbd9df334a7160ab135d3dbd74ca592b53ea3fd922d653f1cb437ecfc4ada93488fb7efc6fa79be3620bb2f9c3c1fde174f511831a3a8b52855d7a035255e009f1e34aca2eded48c272ce39c10dcec7aac8f3bf5b1d7bea29f3de9cc39ce3ca9cd3e95311e90e307d4c5f76c8f98512d871c7a6f7b32fb1f83976fc70d45df54b125ff8819eed2e778027a59ebd67efdacbf9852bf6ccae7d8c3ea50eea9de34a19445dbc47203a14d24546cab3d2c2c5cb8b183e63402e871c3bc6a812e28d9f723139e4d831be6304b37714142afbec31e5918fa52e89895bea12ee6b78fcbe1088153b931a8eb9b5ec0b5776a4588c0eb4055cb36d5f0d1cc341f9f7ea8c69f08c8e1ce69f10cfec2aa5941ad14cc5df34a25d69587f3c686f31b5a1f6b67dfee24b12d205667bd9517f9c65fbd9f5f117c341d3be7bd32f4619c5e4a9d8d56fe1c253dd57cfeb36fbb2e3decb711cc7c92f7c0f046dfc36933936febcb5b50f47dd997e4942b2e0acc63599632df84be99c31f08d46ea9f713aa06947f0e00471bfd4c179e7bfa350234476e4bf2cb0cc590ec2445f4cffee67d77eae6d6c1bd33fd05da5f021c5cdaeba867c7fff152464901016b8787911c34483688cb0e5db02dfc191467f4951804940484e648f1d26fdf048c211855213539e99ce7712dc80070a27fcecdf118a26ec6cdf3c657adb377afa8534942aeeef7d7aef772f8dc8b819dabe5ccc7dfc38723018cac169922d0783cdec5f4c8fee9a70db6655499a1964e32fab42d674980374961ae9cdde86726734f0ceb43402570e2673cf8c902c6a7dab43fae6dbccea536ab919f6adc51c203919b68a2aaa20c30b97f6632603db158687ea6168ef1f0f4d0bb1c10072bf72395c6cdf2f717aef47eed6af9b3a11f1470cd31ba53bbe97601f1fff86611c7f1bd6a18d6dbf38e8df420839ccdb376ac50d7c9c8391cfe5a055018bc17a6331b917cbd86b4f63dbf7318eb9b19096607a46dd17c3a890a5dea8b3dc59de06413bf2d8c0c706acd6fe1d795aab8da79591b2b71219fef67efd58a97d19e903e2767feb2fb1a73f5820cc11f70b771d3aa64bb0c7c05d35382710704469a9f668471e1b0c6dec9bdfaf4f23ad5f09b55f097dec4be8a531aac81cfc25a69f8d2d7faf7daaef839bee5872f50c705b0d3a08ee1da90ecad3c78bc847b473632e1ca42d367541e77b0c6789893bf4f0f1276e941065c729c418638c1ef0288b281a42869f6d6f78b7ccc1dc5a69bda18192fa210a9c6f7549d533c05ddfc4c17943ee7b758ccbe5e0dbfe8f4ccbb46c25b7132039749cea38de446cc9be46fd9b7d37bc5de9cdec0e3cfc2077d53d38e834dd3623dea6dc9c76618e9327327dfa3d3a38a8fd7d5ba2e91960ec01295e22fbe7ec7892e3ca6c0638a28498b5d8cecdf9394cdb1f6db2f4dae9454e586403b2daa63f3d4615c1a3238431e06cfa580cee6f3d7b7bcd89ed35218cc1669b5e2b95de745f0e8108570ed18e47d891270767e36f93273db2b7bbd79b8c2d1144d1df179ade861a6a2841399dbefbd2084a84e451a817f94212540947a3542a8994444ea7d3498484a3717a11fd43ca469d45bb21fa0c2424a41111912b77e9b13fe9914bdf3d7596922ef9816ed3dff0b6e935ed4d3ee3a0fd1147c3f427fd83dca527713994b449bb4a009b8c2df6c40539f49a006cd88e45650fdbd916db2f592672487db6b51f5216a653c4961b5268bfdcf64b1c0dfad874d19fd812b77d1237833ef6a4ff416e920e495f7a1a85b3841a15b2bd7dda833337430e8188d6bebffdf6980c8d355312284e96656dfbdb0990dcd5d0de145b44debe2b115b464e1fea47be1bde1ef9d317a2f48816f9e4e6381aa6d7843086996d7aedb16fa1f65e33c374d2be1f560edad7c21256c104d05e85cb01a76c32b6c898d2e390fba45de5604987a68f31751c27c354b2df0913b4a686af6c1cb45fd2274ce4d06b6656261b67a961fad2dbe74abf71324ca5026c32b6e450c3691cb48f436e579d309183ace28495439fd9d6bee3d8d0fbecedfb8db3d4f05ebefd8d9b21c3931a4fbf32ceda61fa3aa85d7d6fd818f7210d1b3b6decb9989240778b2ddb635815199bc1defb777b3149f90baf8e6dbf7be133ce32dfd2501d31c5fe1128d8f6391d66aac4b64f67a28ad558eff01b076ba67d477daf4f9d05a852a012ec6509a66780bac5ce5b5a6befd5d849fe178436dd530613b21deb59b18382e56cff2da630d654c188dfc25311c64fdab35d0a61ac99922f4ff1bf423bbcd965502a07dae1a53d436c1aa99d86891c831006209aed4f83ec380bfddf2bba9a1a7450d2d57619cc0728023ef4a001650c738f0ec26c1fdeed9b01229f8887111d01b2a74a9e27641e2ab25dad648d83155bc9b771b0aee8bbfd72ec28778d0d0773e3c6146bb1b115210af61bf69483c16a1445bbf280e0571e59e355f24c91c3c843810bd8d0000effb85df7e060ed41c6e0c159ae0ef5ffe50f624afd22ecfa554ab1eb141b2579783a19cc489781995f23ea256c11aed8ef29f566f33b29826ac71851a83be79c73d6772901d4c44bc8c63611772c9209c23553eacb48959c71b04a14c494faae2632905d39691355ea63db0413398cae96cdae2f5d7f6fceb255ffde9c654dc5a40ff562d5559790fd65fd47a92d76fd1d7f00c58f1da2b0adc1207f5b6ee9241ee4d4d7c1c13ac5aeb57a615708c494fa51a5d29f2f71ead719ec2bf6b2a6beb4a9400441f495c4a8cf1303ab52474ca97fc4ae9f055ac8f6e773313585664aa5f4865d754883a6c3934968a65c2553eaa350395f3a9a8fa2922e7304910431d57df546421b76e82afbcd08da9fdc67ed3b372786611886330e6662179b1856391ca06dc3363b1661479e18b4b613aa9d37f6dc6fdc6fdd6fdc63d8db16f79b7d8c7b6c065446e9cba669a8df9a76e272a0f43b4db7d3090c7228b78f7c02835c0387dc35700d2524a8919193c9c4c9002f0d9eec71e2639be17dd8c61f7d4b8ee32ab7d5adbe98bb7e97866dfb7e050a2534d91bfd7829a534973898adee1707e787eed3c3fd165bba9f8fbb4f6e9fe2ab03b6e7385dd295705f7fc4c9d8bed3dc731ca79da586ed71968d9b2125e700ee77c7c9b08f6ddbe34cff7469d8f4c37cc3a68f1fdbd0bdd73d8759bbbdf75dc7d1e07ee37edbde7258b7759cb7e1dfb6ef52eef33da3db980c6f632f9cfa0e3fe0dd3777f79df4c871d799f13b7743ee1320397c79917f7e0cfbf35f62cbe8e72720b690fe8697df1bd9c730ec4933ec08abf1616e06f7d86b9c0cabb7c7f49633b3231676e47162b53100b6e82c255b54911f77c86d2963ee6b63656eb9e9e30d6bd9b6d5ce6fe69da7c30f6cabef47b14a67170ab1af835b9cb4ebb6e7367db5977d5f3b7cb1b1b2f5053229038e2821cd31652628d888e4064f5f5bedef8df306d1e44b3ebca9191440f22e327731676c72486522957116192f8db3d48fa1359ad2ccc854b1aafef8575e77285b386a0b0757ac384853636ce11a103796dbcdb2ccdd3f26ce2f7c096ceaf53ea629902ceedb2ff49f7d378c2fa55403c2a24ec480b5b17fcc7de4d9dbe29d72cef91c07332f307578a5d62f7087d1db5022231f1f453a7dbf47fe85b7c856b4d091a76ee1a08cc867227f45defe749919b3fb917ee562ea8e5185686413f9110e66e4f11cd9fe919331b2517ffa303f3d96db0b27d934dbf425a522df7ccc8978ef32b10587e53f3c87a6a8cc4ca9df85b713f9b8f738ee473893831cf72f7ce4a33894e560a553d39bf9a1cb84f466d777199ff1cafdfcb8a732336532c39374f85903c5941c81f2048a291368d7377131bfb18ffec8877d7e91c7270e46c484fd29b660778b2cf467e9e7933898e98dde1f670ec64d5a867b69e4710ea0efae4b4625de7132e88f7489eb195a0291f40db93d19a4678fa7d48f07c963248fef36439540d87b581e8dbcc7f22863de1442f24d1f4fc1b4d39c00c92195a133a31a1e95892da4af4f87882da5af8f4ba3f73ed22777a6f44b5fdca419b7f19583d5d34ee3e0e4f1386590b3643abcf76a5408663f332a7375081639384429a594fa8ccb48a12532a7b50fa59046451ed991e7c78e1dbaa8590b8cb2b1c9c61b7e8a43fc949b65d497da1cbbaab5b20282ffd965a6c81270f1428777f143b557b4e0284d9990bc024a3e047dc6e496bc0c8996b9becaf5734c1e0016e0b83d82d8838951c583392942a60e6a0e53e9710ac126997cb718b30cfba8695f65fd248da65ded629867d288ecde2cc6cf7ed78a6559ad59fdb20ffbfbd558312570a90c597e75aae3a3802bea108c65d8bd711cc6d6e2b971bb7eadf13d3a1292457c0c8b31c61ae37dfbf273d5acf7daea5acc36beae70507e116ac562c4ac7d49ebe6e455a2269445b2887b6cabc6400e522b335f16f1c8457bfe943259adeeee980ac9227bfaf7b3ba66126d1c670efa821cce705acbcda08f3f863ec69f634b2c0d205954ed4e98a2b5bf1da198c266c6eb333338f658e52c410ed2af30b58aa8e252a68666c6c12672385f56115ba2946165ff97f3060c987edd951e11a27e7b9478c6c81c9b525abf1c9b5a6d00c9624ace3d0b1a4593dd1d9426a0bcee2a24a40b07236b1c33c9a4e45e525cf4c82ef6bc9bdcc13364dbf3f03efee02187204f4a29a5d9d38cd2eca5fcecfbb1b3bf38f4b3bfd9ceb21ff466138de12934860ef4458c2a4bbaefb8b7cf615f8863e32e7beebb0e6636b6ade9acdee855d28a84673fa2e83c824ca97ba4d672a9cee2ee8be129aebd9c9266628c3f266eac4bb6af81bf6e4fb76d237e1bdbbeda7ee18c42e810709ea939e78cb100612cc28e3c60d65a1b630174a881ad590cc3300cc3b0ccffc5a41ad8612644adb55a6badb5d6d28f31250462ce39fd634cc9b656c4080c1d73ce39e79ceeeed80b51b50eff4a29a5f4674440f633887880ece7176301c21c3bcc5ed3fe85eb180b90bdfc7ab796fd7f4160af4308151e4ca107145b3802132e1855881c3c96000231c060053e8640028c39e7d43a300d44d325999e016e94596badb5d627c2058cfa3a82b85fc4080c1d4260d65a6bad7d225cc0b0af2308ad054bd4400528e001116e60047185882e161974f8044951062030f24c91f7de7befbd56eb889e4275640cc3300cc39e081730302d848e1c30aad641e79c73cef944b88031b58e1c5308eb2620d806db504a63a434527979eebd37cb62501cf415bbbed4218e19a34679ac662fcfad148a8a6195ee50282a8f83b2deec4601049d431ca456526c5e01c3e615b039640ec1ead7cf29d5a466855aab4dae3b5e76bcec7849e265f592c4cb8e971d3a684d9595ae1c8c185d559a825aa90dada129a895aee8aa7e5e5b80516406308e31b2602c3d46efa46b1a55a2f42143085699c990e36314b95130e99de472985b7e6cb1a39e4dc01125cc928789a03d3db9637e4f8f6ecac92aab4f7975c0ba423a393b42b1230f0d866cac016539c4417fc9c55c1fd9ceb87648c32cf5c913a1dd549eba403385da99979dd9fe1768a6e490574dc9219ee2ef1bd5f2a5852c35d5f10a7d01fb16e947a9a98e1bd4e00635c03ff14f1c51702bcb9f76c3d913f9e7cf00f7f4cd5a6ced83dae77e1fc5ced77489fce8a0fdca39603e2ed19e93311f830efe20b7a657f030f53c8941c6a812dbcbede5f612b77274d0c6383a8141aeff440e7f63255d49a76780fbf13f76314c3f48238e77f4d83f0a185be4bb2e497180fcad04bffc4d97603d03dcf8b1067f901beb153c7caea9fb3e8941ae4f7fea1dba436ddcb3bfef4f3549affdfb51699f7ef2ed17ce1a079241a78dacf2318c6231554e77bf9b574a2b9dd279e0e822d78f39eabb5d14e0f8ece747213656b3cfbefaf8d6ec4aebbd5fdc58bd39a60d76efc5deda38dfc459b62c1d65533d2b0f7eeca035bbbe54ede5eb5f30afa05cfc0edd7457120d7228653cae8c7c0886df7d88ca52a6b512858ce830f69874187750b9a4c3950bde3bb590833551f0c0c4440315011595a05028140a85222121791212940847830485327992372141a11e85426dd4dc316fa60a5a39acb90212122d67d8f9d9d9d9f911399d4ea7d3e98442a11e853a9d381aa8d389e4514f823a9dfe743a119d888abc603369e60c1850288ddd04b9b9b909326232994c2693e9743afde9643271344e2613ea4f8f3a6993c9e463f2711d41e8059484d3494bdf229f6f8cc9603b30194c06db411a8d46a3d168643299de641a95381aa6d1e8f4a63fc93dd2a3d1286794a303240a9e204e984c1aa8080808a8a89473ce39e7d16834ca248ec62867d38fde34caf973ce7267a7f11a2f82e5372b1fc168a44b9f6fd2cecfcecece4fc7711cc7715ccef973e6461c8dcc71a3cf3fca1cf71cc7bd36f71a32852d7fa668a80a39eb9b2037373741bc6ddbb66ddb388edb324783dbb6fcdc674e6fdbc6e37d726f3c3d3d007209f1418c2abebbcf37ae32754795a93bb42ccbb22ccbb66ddbb22ce37e7b6ed3599665f206472a1144e7876cc1b669a0222020a0227cefbdf7de2ccbb2db7134b27be5bd454552260c2b59235563c8322d7d6b3b3f3b3b3b3f18a594524aefbd7f2fe5381a9752492975c59f28c5d090a025dc9b20d2e50a7954cef366cf207b3eb6b5d65a6ba5943ea575e368d05a6b6d515bb828c0963f85ec081d6025aac8016c2f14fb32b12f937d6d170977e5de9d294488a7aebb747729c8edcfd1a8faba47208b853d1fc320fbbf84a0c8ec49b4a745c29e1f71904370afd850a0102133a546cd5e177907d5f342eccbe572250b99b22f97b88008cabf42668a01f10ab95310c1b5596cb2c5090eb27d613a1462539de5ec78926302522ae10b2c1e3e7cf8f0e1c3870f1f3e7cf860b158ac1e3d7af4e8d1a3478f1e3d7af460b1582c164b2583d5f6a6fbfcdfa2ca8cf357e644c595a9a7c7168fd2cb21a5f700479450026dfad8e7f5278ab67c29ed10594a7da58ca462d3afd145fe4816f4634b11b924876bb7bfe468485d22e3beff762d276a821be547ba7c9105fd93a8427f36adfaa38a0bf2fc2a2329053f47f7af727b34a118f6986395d65a5f5669a910b2b5e73efeca0ab0071f3c35808fb3e563779cabeca9d99a35bacce9922c6add1c50de80561363b462f6661adeb8cecb2312ddd97463d349640445324389090d35d8f09ff61bee748a714033e5866db65cd2c5dbb939bb76cd7263d1a8b41a1d9185b77343f49b08767d6fc7baaccbfe786a05b8fd00be0d67bb7150263bebfe70511d0f00ef7937bc875d2e97cb43790f13117927efe1562ba8e5e9f7b08f8ff7bd8757decaf3fe3daca3e3d9f09e46444444e4d5f01eb6f168784f73b95c2ecfe43d6d68c82b794f6bb55a2d6f86f7341f1f8fe43d6de5799e877a4fd3d1f146decb888888883c91f7341b1beff45ee672b95c9ee9bd6c68c82bbd97b55aad96477a2ff3912caa8f377a2f5bad3ccff3bc4cc7bb2e97cb35e43d06a2cb1dcaefbd069c257bef6dc416dc6ab5bce7c153b73553aa8ff732a2cbf5f156de475f792befae3ceebdf99e8e47e41179449ef7dad5f13c4be479d7c6f3bebb8183f10030146e32ae1b5e903d141791cbe522daf5bf934ee3960f6ee1166ef9ecfaff718f575807aff00aafb0ceae6f830d5b0d44d886888808dbecfa35d0804d349736e4692e4d73759a4b1bdaf569d066d05a9a8fd76ab57c767d13920ca5ad341d4f5b69daaad3563abb7ec9c815c988341b2f23d2322222cd66d79fe1644d992b1bf2329796b95c43bb3e49092365adccc7cb5a592b6b653ebb3e6af4825c7768a3c856994eb6ca56d92ad3f9bc9728cff374d8d5d4b8865c2ed7d0de187ff8b3118c71cba7d56af9ecfaf89bcf89ccbbba3a77755777757576fd39ff9e660036edb37bb6662a4feb922ceae73cbf8db5e16caaedcb33b5d158d78e6a639bb0eb63550e2fd125ba459e12c06f330e664f713c656ff890fe00c0875427001fd220bb7b7ae32ce0774f95882e7608e543da82dd3ddd115b4ebeb32c1655b1680d8bae8a9ce5c3105d6c4fcf1888628bfef19455a982860cb99ce59322ba581c9c25f8c4161b5a9ec25eaf9d57cf4b888eb398c820bad8193b43839cd852c3773f6d3c85f5f4cc1d3d383d3f76f7b3c65968f8eee711d1050b0a9a24d8dd4f9ad85222e4298cc522624d19d61067415921ba603c3c5878c59619a2f014a6f2f909da1dc90fa20b86831384ddbdf3c49691efeaeb95f36a99be7b5f22ba6033ee82ddbddfc41691efde673c557b9cc6a63b9121bad4a0a02a8a624b09c85395c57ab184581d8909d1a5f2f03861772f5db165f4ddcb209eaa2ac9a38aa27bd99235882e1547b2a83836d85dfeeee5ca53f4f59237af9cdd6927d8ddcb1244973a83c353b467a6d4ef290adac2ee1e03d18506c91a0d388bf7dddb882df3958158ddf3e029ca9a29f57988b0bb97115d288f6451ffa553ede02cf8bbd721b6d8efc200ecce73f7dc37f70dd185e2a0c4169a9aafee6574a1339245fdaefb110ea6dbd9a075d9a10068eb0280b6ae1bb475a1e8a15dffa489ecbd65f558d65b9665f5ecfa25dd7d566571aceaadcaaa2cceae4fb2813379d99997c9eb557f54c346434f508f494f4fd0ae9f4b300a63f1b09e67d7f766d04830158683a950980a536138bb7e3792995eaf17f7c266767d4ee49e7a82ba1e530fd713b4eb6f254baa2c9e4e87dbcdae2c166b846d555571509caeaa4e5715ceaeafe5aad1579dd9347dd1177dd5995d3f6b22d31dda28684f504f4f4fd0aeef65b27633bb76332c1e168bc5b3eb4ffbcdc777cec91151150e55511555e1ecfaf6a37f652cd5213744009c8c948d2f1724804dfbb0ed1ff7da52f26ccd1437c4537ca40b0785c882ae7cb52acadcd06c7152882a75faecfa9a038a2a38399c3f32f7e6acfa38a08f0b72b0be3ca57e279339a0f9338166508c6f6e08d1b5b14496c8125d1b92ebd9782b4f09801b12f4fae48a4f06cd7b89664a952eb8485ea2d7257ae122faf9df8f1b324334539baa48ba6c434416f5eb0d0a95e78d2d5b08a24afd10972107dfae3799a8521fe364faa1b79363f676bc1d1b85b3841c908f15e22c1c1017e4a994afcf0de184b8a1016c3382d8fb6723883d07345338660c9505ec43eeb56be62bb84daa6c9bd4210ee0eb663ca53efe3a199999e2803ca5fe26237362831cf24879fbf63b958acb19556a375ffa38585f009f74794a65ed7045ca87f294faf77b076bad2b421c50fde78038a09327b6194fcd95ccb69ad3ea1417c401619ae38076fdcd59a82704c53700041c51f00dc94aa6c838e7ca7ceca37b5b1520ad6a845de7265f31b511a1ec1a57442c7f24900c5ad975743f9c425328b6482ae343285a4357b3c83f938312e6becf0e5d5527109db199430e4e224fa9ae71b65167c122f69bab3ee26c033fb7dc3d3121a94beed3f739a4aa3175633d85426ed7bf01a497a8342df675450dd5cc0c00000000c314000030140a078442b14830a48abaea0314800c8aa0486e4e9a4ac32085718c314429648c218010300002203020330500aa4cc2d4c4ff20fa4f14b45f1ca5da28d899662d62548a76288f915e7e4ad1b4380d4542927107379583247665cd283c11dc10f1a3b572b4e2d9a05c1c2d3c9a98806dc228ca3a8206b1ca4266664c32515c69216f7a560a258994680653c669b8688ed3d3ee703727103cfac4a18ef61050f3b930134f26c9e77f403ecb59432c642533372691a84efa9019bd954669725a1421a40b29054bdc89af2f8aa7713a167678d349a86745c1ab6d66872eeaa81c5abedda3597beeffff853300e15457f531c3d93203d16fa49e17b3503654478904e894763e5d6feb8e36709a6bf813ea5e8a509e120757bbb39a34811b05e78ba2b6176b146b59e4cdc64a53da911e72ae6f2bb37fbec3895ca0b92bf4a550e41349eee614089562f4b7666c1c3b3066546b4040e55dc0bc7a0b984c85ad41d6b2609c8a9562d5260de46e7e4bd0882085dac209f18235cac1143ceae980e57bb751cc21966ddc3623d4324d5c99e55e0e886c86fa35c40aab5c53b1099dd2d08aaccf9042efd86f2e8a1151b07121afc521844a43d3ddec75b100351080b5395acbc59934a13f82831b9108a3e5f2a7485e7390877d067fdad348e9a725daaee9d6e3a129b230fd8de8b9d939a92747ddd354ec2b92ed8e4d3c0aef2ffb86f8814fd2b4d4b56da87fcf9577211e9cb63b24a489a361ec79a320bbc01b669115a649ac445519ea42d633135646276f08d11440022783eec2c94927d6bd92d346b3a6e1ee92491bcc43b9e0cdc067e487b78b39c7ce04c0cf563cbba0f97f2fc9c536c83a167113639289eea409b9d34f699426a745e0ab390161c291b982f37f48cc54e3e52262439528dd99ccb616176c6f69712374490c6e6a11becd0e2826c4ef546c31184ca361a76cb17a4a90ca0fbfe37495088947334ad506e78112abbd0596ac0c46f8032abc16b343e68b216647b255baf689ae40ae5cae504b34952db3478e850afbc9aa4eba043bd883591254b28f7e25dfdfa5ed8d1236ab2d56825c4958f2808a397119e93e02426699976627cc3042fb23cce808352407f2c626d8cbdd510875d3c93cd3a52b6a3a7ce8c59c8801656d2d883a3c7d292c924ac416a85d31dc3f5dba8fb2b9e995c46b5a295145c099fbd7066e32d7122aa92703320c2e7562b0c69022041c25fa5af03e23e9c844468ab116c84e9d71d9afda4ca5018732b4f6b1e0703fac06b48f2c12beaa185c8758d58530f8ae3cebc40f7011c0a98b974690f3d1eef4694fce4c054427607bca075e2d5aefb801b014baa11ae4d82a0f372560863e08bbbc019fd7e1226cf557999886424a64c75686aca49c26f50b100c951f904ed1ebebfd0995c4610605c70378c919b4dfe092e361bee1b01da25bd5a1429341ff0f2b593c4bf25451a1ff375b1204e747709ef60abadfa1c20d7414c31bb1890f0a598d784630362dd1009b40867c83b1053b05517aa4f450d603ec2b3ad709669b31f5089728ac25bd1e490d29537c15f6f2e3fdbd65ff62343742af20fd483d857a44f416ec15a9a750cf88de89bd46e8fe8edde7e87e91de7f9ce816d28fa05fb037528f423d11bd0af644ea59a827a2c7c45e21749f63f747745f9af749907d0ad2be53c47ecbf3f2c66838951df3e6ad4ae93138e5e7ad6a3fb36444be3c358f6a27b00105418e14a88099889d2731e7029c8740b6aabe1dd107ef96465ba6f503558781f29cd109fd8674db0bf408379dff48f66716c2469ee376d02e6522cca2551c2baaae1501a9a6a6324a4d5c19cf5598ed1841f179844da0f61b796ccc75782c6549d2beeff6330d92c30939ad09cbd947cc888c1062ca16cb71143f6efbd24d9fdf4354ae8111bdf1f40b0bfa1bff27db8f2efdf5e01d69ac882c3b783d5858231536cfedffc1b87f85364a4c89e53e8f7fa160b47cf95d7872de1b5ca516454008847c2e69436558780e72259a1aac29d90147dd2352ddfbac69c042ced41a345a840b1ddf5d221c46e29c9e8268165e9636df6f0270b9faac968b7f58998fd3dcff777f684368521aa1c6ed3d59305ab6829426e5aa65cc9c900c79126cfd6416fa0168c44868fcf5e5167ba030daa369b0955e274b894740c1dbc22aeee6ae94dc33b716b6fcfa7ce53b1d66584a26daa07edb6af7ddb0ed902c9cfa9857f02ed86c1c40756ced888ae5332d69fe499292486de148e0acc42072f1d01e8f84b622d708340119cdfed39a18783b47588b00aaded99e7a7acfaab0808217efb93ffa2d7fa085c735b64b8c2829b427d0b500b4eccd4bafdb560f0b300d10bbb7d1fedff5de6f0993e5adc167601f8ed4def4c96ad1d37e310677c32ef5c6c31ad3f16862bc35bd25d7d07f723e6275adf528e00be11fd1b2a6a15f3d9529dd1a81e15ee5dd3404e10d8015be41b08798c2f629fc35a1888a4a7ae111b0dfbb6e19d45766f5c8b46e32ab43a6f59accc151b75cda96d7bc93aa1e32ac974cf59551bd64aaa78cea2703ae6725237f8305042339ff9a78917074193c2763e160364dfd04a031c72602b7d332de3c17d58c93891c55de3ed0b3ac1723c00b5b7aae892073bfcea2738a11c1840381b32902aae29ea3dadebe52c743ff627c1456d8973c53fc70cd3ed2e9f5da7f5e4a89b5d9c2736b85cbb3d14f9ce9d4cd65165d835ab6a28524f79b8b91475f2833850d2c9298c2ca60146c313d13fc33bca9543500bf588e1b5825d3a798bb288b30556909d872658cb39677e9cad956de7e53a92d951d41f73d19610a63dc83dee8076bf03f2f95194c619c9fbc5cec304538ddfdcb3d281c17b0ffa12899c314c6f881c368bd66472b948029d39a6ced014ea5c415ff45a610608ae9f89712f9c114e6fd9335ef6fe9e1ef8745075363cd9b6c4d4517ee0753a9e42fbfd97fa637bf0a3f5d616a14fdc70679c21426fa99fae5bf69f2efe44e13a646b27f16d8b3ef5b8802d263390703c90b11aab9e885a9d1fc9fcac8095398f00fd4467f48c1bf3921cd616a80d158053422624713a65ee99fa8b3fe91de7f7546bac3d428facf6d7283294cf4b3ebf77fa789df999b8e3035929ff31149c014769b0de86bbe28eccaa050d874c25198fa5b6e4318890c6e9db17ba4841589fe1b47d553e74931834b25981a1dd00de8eb926ddaec3d6a5f7acd0ac08129030db83083a9c84a0017a507a6d21c2e75e750442c495e4f3c61d49b77e210a6d2b37335b3a52d89dffa1efd74533acb5ea35f8286d874b09198f77330c5b5e64173a683523e4d75564a32a406c6950c064b8d5924d89a9a439a593e06dd143a7522afd20c17dc208b9b2450a5119452a4e8a682879dcad6a1f07451995567797d3da226705588a906c32ef0871e03f5341f8637f28b630868e586148dcca9e28a61e2d7bc5652889250038320bf87e71b1c1a1923d9e3c45757e4dea27b0bed596b55c68c84c96748656ae92f65310509c30c57e9ab5897110b028b948834abd4cc2098280354d3be5dd9fdbcf665e032728bb5db23fde1b5da1591331f6f91234dc49947e73b1a2a9c5935c2ca62b53220038a05b75c483ce2698a0be5cb01b786584956c11c302ebd82b49d21d1720a8b8cab8657b48afcafd770b5845f095561230e67ea413087bd428cfe7b84ffdfa31ffd267eab1266754076623d5262202e87690061b06a31a40f1e174a2c3b26bf4a782f9b1a022e9fce400dfe2f7d284cdbc0ea9aee367e33c34f9566703b618136bfe2d379cf41eacd718cd3be2c7eb0016ff65d73e4f1c193ba24e80505666af26e2d404b0df61357016ff5367f0200a4c0ee9670e4606e850255089180f2a4a8c7d6ea93a734862892b21fde5da46d8a4db181ffc6b8f15461e285c51582ece490585fc8301479fb8b9181ee30eb5f9af5fce72f2bb0ca32faa374cf2b33f83f8aa140b186ae9b4fb543c7055b8a3ed249f3291d0fbb778685ad612a35eb457c19a6eacac1b685ca2ec9fc456fc71c28a2f74d057ea102184a92dc208d0a9e05feee401e87895bef3925022d9f4b3dcd5fcd8cf10e834886a7c1fe4819953a3453ae0c2800f8841ea1350661a07e13a56fda621045f2e29cce217824e5c111af0ba820039477f4c1ada78cbf7835ed8e6d9f27356cacd9022a9df9091abae0d386ab932ef89802359cdf474e64a6c1fb1e26ab350df7a83e9ae8d6f1b54e93667a630a2bc8f1682205ef3e791845569e4f664ed80cbcd4d65dd2720a6e20e76cb991b83e03a475b97c9e507697f7fbab09b8fbe2da8aff50b87231a7902a5388db273a92666ca7a675be58fb117cc29bf40e8329eac093c66afca4528a4648d9a0b070e7683b15564dda322b999c9610e9c80856fe6d27904c4a9d611d51c101fdc7c3654577c9e799311ac57a1d72fbc8000fa08ac679524b52d0f827497a0c4f4d3a3291ac9c9616e9c8211ac07f6b1a58b19889a39b84eab2754a85d2892ade0641449d47e858e06b48f65903f4bc07c47db89d49856f861849adae7992dab84b432b35196a638d1b8d589bbb1aa456a2f9512a4d37a87c4a4cad5220850b37ba22665ace7a49adcdd5eaa683056f1c4b62d18e88aa830ad2f4190911f2f08fef3e4d6b48a5f28f0d638df561e65cadb8b30f761ec15164ac0a9e0826b2f833d7dcab0558aa2f049c8a42cc1293fa4c747131ebe1c68d8498454f858ec861629e0902ef1cae4f2595db1bbb33d86ba0979849ab7692e07ad19dd207630b2cd506ab3ccbdbb7320eaa413db7ce35c2d7e8958c481eee1f135af8fe3011844944b8c3b044dfe49ec9ef4a5339f8ce6630e8d5247b5b9614cbbc6870199bb437d4122bf1da8e2903dec54b66fd8ea0707072ba88fce5c0af13d88bd038272443aebefeec0ad74a5702fe97b93be88d1921d34d8b40b3018809e49642010c60c66ef7c74eb733b3dab55b07717553148a826abf5cc30fba514c77090d1121c327d16a870c9c42d6ed6413a2e6a9573a82900cc744a75383fbfce1cf391be069357486ac8832c831bd87a2efcff00f826b153adcef14e9ae73da2a9363c91df62126919c56bcbc99dbcd4cb4fa15d3ef6a5987a6079de0a1d25bdfb40f088cab69ed20a32788c4c9fca40392d8325f7192c13636b9e404977ecf2a14443e464b5a0b24d2f20c3fc05e247b42a386de192b323d9572355050436e7942fae7dace428f1f4938e0bfdef990e6cc4757b10a97a7abd3a48818443e2ac088dc4c401463ddc50fcb4e8a4ccd7735ada8f721087c5aa0942f35370706030b2105f449a00a58849c4bf02a6c09d0a247d5093c5bc5cedccbebb3ec3cdbcaa61b94676a9d3cd0d14a8184d7a6894f96096db8ccd8a865f5892bdaa4be856c2364766c0814469e6e3287597033f445464cdab4a2b054f0f87e760e6fe252c616e7ad3feccd87637faf2fcd866f8fe60af6e19b8c1cc4c31d14b6df84460b6e3b4c6d9925273226d25e80814ccd5afef02d9e621ef1404e8577447056ff1eb2fbd346676d7cad49ddaa36609117bee2cd9995cf51f6d4a8417d353f2b80976696096f3fbb9dee2aedd220618fd56b6fa8c796db16cfae62065dbe6759971d109ac1c02eb940fadf1535f993e0ef74cd76a555f0d659632f2909c24112099c33472c2303e861034444fdf5513b0d6c17bd3da8b8b14de3631139381c063acaf15a4d12472d1a402b1a6346d4dce38c35f3b6ea9af320df5f08488748406fa39280cc1b71ae65a4adb5c13d9704ffd644c7a7b03822c45b1723907ad1ad99e7db161ba9677acb43e7466df21e6874d049692308ca724c38ac21670c4dc1570ba4dc6559b42c5ddf2b1dbc60b76532e027e86656346693fd2d7c822934c097a0c46e9ac13dfe1cdf7e2e4484fd2961f5cbf2f118a959059d082ea1967d001d5aff2b152206cf74a2f940bae4fa3645998dac3851e33df4fe8c18c1d8383990cf542fdfa659e0123cab4a61ec3621b665eb84ec75110686dd4c1088c8afc1ea891c9f6bea5f5b7a47d9139986963dbeaeb38a42926197ada1d63904ac8f7e68cb38f49cfbe0351e92852efbb294caa0c2314bd8dd264fa98ccdff227617bbc4f4d89843830ca76a46a0b6f093fc089c1cb50a4e0c92278e6c1d69d05b4647b5cf344f6388ed50f633b39875baa311a9080ab7f745bb806485fd70cd886894a11ecce0344c6269d2d82bc6dd018cd03a39a7fbb98a570a2f759598701c18fa88047b9b577f9fbd6fda5231b712b16fffae7e36333982d2dac344889e6b17c14626b00a6c306e88842f1ae76ff4b95b52b7383b2fce14509a260143d5986743bf4aeac9601286fb1af080525b22c863cebaf0bf20956bbd8759e29415e0599988ef16f86973ca1083f92ed97ce4f8dba087197d510de193bdd16b2a10286f7b2ba3062dbab2bcd000aabb5609ff3cab9693297eb868310c75c72fc08626ade83dddbbdd8f27dc6a6baa2a4e031ea8b93cbd4b55297c0f94d6a4f415b2ff55c0d63d2a1468ca620eb1618d66cdad68d1b1605b521619d51cfd13f8c3fc53200d026c4c071259f8ebe16779e2e44c35f5b296ab4ce04573eb8b3f23e61fe9ca9b5ab82b77a9bd5b3edf0f5b698fdebd0fcc610f700cc96dcb98958a8a269fd0bf61aa97f0db81000d53acced671170c908b7145de6ebb4f24393c1436267a68f6dd2638d7ee395cfca2c71cb16eeee81516a72e9bdc2d288948ae7cc40c3d6aa169b216cdb8d745cc2860ff26c1435ace0e41ea4dffbd26d75cb14e9f02412d2c8e8669c40abf537a65699a9501112b08d4cd425448d168db7c8a01a461c7b7194c534fe143bb494de30366ef5f6c900e8fb0129a9ba0fba1d6a72ac704c22affcaaee9090fc4568ca94bf0a3b2ebbf73fbd8dbe188a075133679d5ba3ad1644020bb56b23f50a318d3ccc95930b20d2f55b90dc94b160b5ed6a04baf34abe0311d629d8a0f29a2a170b11029b995db375c6057d155a7bc820f4e4bf8e1f61ba5c3e24974e4ab8d4a6af481414161e2eb0451569306f1758630ec865b567ba1c5f274f73a880385cf688877e1f1641de656d64c4c9f4e041d51cac0669a1c06ff91ad0aafb07dad0df00380ff9a5dc36d3118bf54fe59d030860a14ba788b1d18ac885467e01e553ebca95700b30f689108477b69658c35acb5ffdef37c0862c79455a09ff4bd4fb33e0a3bfccee495247986ad4037a7374030e2ffe41aa28839fd82a0de951070aaffb3418892ef0f792be3c0e9c21f8234106d1f7454ece9d80cd53fd08c72cab15f5ab6ba85cb179b0ae40351b78c7eee6b29a89c0657397d56d4e651d53ca361c2d56cb4562beaf822e0101afaa660d566b532829b4b5e3df99b3b50352bcd3fc10b234eadd304cd99d15214cbc636665091bfc67644cde5aac2e0951764763e8736b8e0339983e95b96f08a5e0421c6b3ea61ebb6756c66a73b827e5d01202d0fe32de93d3788c6efacfd5c84f272fbbc480f05e1c6ee3c081fc72e34cf97449982889463c4b94eaa4e9ca5b4b9f62960f6479b7d4fb4cfdb8fd0969d549878e2d0cc2b2dbcdbba5896bd4ad1ecc6dadd20775ae2d1e5477888fc5c834d145414775801b8e66c8153f61db6c493a485b8f1d76fe659f0989951cd85c654e5ee8517aa517fc51681425303b32b4cff68b7942bf1fb796c7b70ee42048cf9b2660d356ef6cd6efbdcf5217c83bc39c761122b3a157050ec5a1d74b05592e75c5fdde6e70f2594ee8b18640b762b58f720b4fbdd490bebb03fd6ba72a148d19491420a67c398000f83b4af0f182b1ea8d0428791304e57311fc0232256f1ab657d96ca2975e11add283f03e131113d8bf18a2844e5fe52715bb3db8725c53cfc7ca8258bf76fa103ff3248db168e30568fd092c775f5ca0cf5ebf6687be7e8aaef5d7c0a574f4e891dc453c5f5fb68c2ff9d1d8718b3007f2a2601a7686180c4093a383a6711b48b08e2e1c0b4f1077fc24bd5ce86e75e6a8a228f12c58e0f9eaa82cc7635efe665d34df8b5ee6ccf0302430c5a7a8482061694dd61179bf1b928ce8dabbf9c0527a8fdceab465fca939254b3b6f440626e1f137e857f158798b7fa4afa4d5b8a2e2a1411e211b4a053feb041908b80ac8129832ce85b1aa24213363eacb36c56d36aa08ccb20ae847a5c06e8d262aee349d970df85751a97744faa1e6883a7bb6f791234ce67f467ffe2bcc30aa21ff3120fbc3541135a3fddccdf058aecdc87aeea6d05f91882aad000d6c5d9d97a2c4bf2932c0657b5a4ee6c59282169c43615a7e15795c47e64526ecb26b36be03f403ac946cd6ed65d6ca6c69452ff44ae44a9ea8c4111bdd89eda315d030d0d5a7d2c07f80bca62503f423cb80b0b963390c3526e2db3adb01056506beded03ca8d585d58e4505f2adce2610e544ee3d9198d6a7227fcf16c15c7c1b488b3c7e54b8256c345c3daa8a4d9fe56314e675bc4c1305a670c1174128d908b0f6fc6d6e2c87775dd9fc14a56d66424a63619d6411be2378d91bb3a9043cdf63431adc7e60296958f97793eed1615852d30efe61110c8d9cd5c27e8ebb0b79c5b5aa9d59b255a89824d18259f5665e4af783b3d84e5d3321f63db20a6a4acd19bfe8afe0b387bf27692c8269cafeaf2eb635312ba7c0dafae0bfad48464887b8818a1d597cdf8be92ab324aa80303f628814d4610560cd8df3aea5ec357e8c227bccb072831ef7b4bb75c3d187291ac840550f37248812c73d5e0cfc260fbe8fa8d8c62e4029b9abb5c2fd92acc59c8373222d0aa97c7238173f13271c71ed699bd41d3509b9ddea23a7d8da703c78c60ce2160ddbdecb0329f21e4aa7c4f1083e242555558d2aa7fca7c2ef6b23055ef6d6458e3cba7542ce917cd45654c38e926a7ccbd0246a93c4865074e3125f90b277a8a94766601479929b9a86de5eed68e4f019c834af568e76f5d5350015198e5ef1584d87b5dbd80417cf4b22477329fa036c2aac4a86fa15c97c9f142819c97dd9f682863f664298829414362d787516b28daa772a0e0a84af0710c1b98a85047c33b3beea843705f9ca33d707b6d6210ceb238eaca2d6d3945a2c84c298ab07131cfa22bf97e20af094bdc6ddaac4883c5349572eb0a45e905048c52fb8c9bdb0bffabd7a03a2abbf076fd235ae0b89442f69833ecaba0d00ee4acb21e6be6abaa985110abf415e6bb58f345183c2ce116cea3b678ebccc6210aae422ba2370f17e38f749e71f88f54ce3162dcb15fcfc2632d7424332eb7d7e1c3f20e5688f8e67397c8660747c011b428ef3c2b87e378e4b23fe465a47fe8a4579044bdfcda0c0917e19236bdd45306d437b1ad13ccdd8f0fa60cc45da6892493038c5db298351b1f492fd2ccc4adf62d780a0400ed1a0730deaa8106423cde0e9d001b31850568b8fd140f7be9c466050720a219b4d52730fb2c9603b2e935a0eae6984b49b208d4a3d609f4c5e3fe7326486f87a13b6ecd87b75591096bf3b8d8f411ec71e28f81f7195fc598189b4742f3cc22907f57b7834f7869cfe8859f3d31d1d67c3512333175fea4420321271194ea8b7e19b536951032674a93e4dc1716098375b95bdcf00718a67974be723310267fb05ab7181f4b1a0f1321e91c934013d12898f6473aac4d1d920507e238671a5483be9b1286590182871827d1ad6d0000fc7f610eaad5ca98d3fb8e3b92abb1ffaf6a7840f00334eb2768c7cd18b058298587f1b1d55f2310e79d79b2fdf311acb8030b45ca2d8d0a1b0e7f19311db95ef5fd20f1ac6c34695e6d5e69f488089a4cc8d3f3c2f5ef05da43a08340946016ac18baf9af4c95dca0d4504e2a0e251a6eef7deaa6c8e087cb65885974bb27ca44c5ed8d1991f06b3b88319cfe379946bc406151549a3d605822e7ba7f9c98400a96fd59daa305e325fd991c42979fd06abccc6ed638b3d245dc1443a215dde51458e3d453e6cd1c07cce39de3d41a74eb012c1e79db7e167c64022066d4e5aead247a11e6629737c570031113a648b6418b170bd9a896d6b9dff13d22fe55e98c797ad0f5905f8ca1f20a1b20207d513880556a7dbab31e7f0bef61498f76453c4891acc046f704d02748e79d0e0695b3f972b813d200ebf91d4c3f4d990eaf1fda99e6dab61d1a73d86ef1014acb01e87dd6fb9fa2e8acfd821750da3099da5140488315699f221ec5df1a577610857bd582da9b1500283a60ddb6934545e57a32afc9074d0552fe7be639e42569c0b54e6e98547767ea3d27dfa0980649da086c5447113c47df859c77e0688bd47f35c3bfdee4f28ca554b8d49646090fefd07f0b9a2154bdfee638ee8619d57f2e2a0d5725c6173b0ee4045ae7b67aae5c7273dcb84da980e3b653ff484d3a4f5e4dedd0ab127a9d21209a01e2f53204a9383b6b6d459487fa53f83eaa2c01991da43ca0e00be6d0576978ac81fb209e816f0c314206bf43bc061cbf843a8b2d56322b6f08dc78279baec3d7df211d162446d95e0196e444cf7db80d65820bc80ef4e265148e91f02b9abe815df741efd1ee3a326d0e36b4b2184d5d330ab21ffefd65cda8354bc94e7f5ea990c571d97058474bf8da406e0404210c8ab5f1e312acab67b5a63f47b37d64bae618a6eaa1191dae5bc2a40759c3f20da46d1461e1a4bb12d65546553001056367aa5ce114d11e36fae178b2980c70e51856b433a5c847bc94b8615cf616f3de2ce8f2a92c19ddd35373c898074b8ce930631ed22d51039ddfeb598a807abb4a0cd93128efe8a144880a80834b37b5414ca9fe6cf967496a9ce5ed60adb6bf12b5938f5a552845f6a6167a722a9482c0f0956fae929051314de902785bbedd12817957571b7fb3cbc6c9b93e853c2850c73e2c46cf41b97314f69083f9ac312fb6077967ade358f572f02eb5f9e96911e15a62c813dee74eddcd575618d106bb39d69f129b9ee6c9d8a6cf8a8f58d17226702b9f77b2aec956a1f6d8ee9b3be0c0910baafc4542496588b163f19a684d93ca23cdb1c74215411bfda45f3aca2f33c59c32304aa33b718c714f245ee036260d32160f1c46c67894310643d0a17b28e43f4d23ab20a13dbf969fb9afa13de52594edd3a7f097bf7d3af391ab447b5a545c4e463fd718cb320bce35e9a55854d4df4fa41daedfe3689ee6bfef66077f9d5a6383dc62bb839219920689ba38b7911e0c4b4751941474af1b5ba2758ac1b539c9165da19c9f4aa47169e8bfa16b77967e3bda250120c4956c4da899cef70594d70197978fd385073808b7500264e16007bad9cb7eb50d50eaf65370154095ed28b9c94df3b6fd9c85c0c51413905e54a256ee823da0d8e4ab4d0b3aecb59efbf720922cf25ca3a7339c8c2ab54acfc5c94b806f06bf9b54a912963af890a0bdd0b590a480ff2904c30399ff4412cb88d8f90674a0f4056e3f94dc39feec7970c03abb21f93c92f7c07ca257ca731db009bfa8df5fa42d894376995699662f81e4068a076d854cba92497f5c7970b9972ee819d259640c584f6947e94bf0198f540fddfa87c429ebda2a2fa23a9d4a74a7e5d25e1988b3c0115149feca32190aed94bfd39ce25c838ca4add03d29806b819a858c95cc9d2219c909c3863f1dea47dceba50aa43db32515d1b5ea6ae50db19fc110b909224a6f22de7952a3f2fae75b52189058b039da1fddc60dbfb6f41207c0aaf5ee32e2255014669afa25fa47f7936b60c5903ec8d9da1b917e943a617e821ddb6a60cff22e62af242bc46eabe084c3e338f672309999bb146bccd614fa1249dd4f9c062581ad4c5881e506faacaf2985a591bdf07345f6d2004ed7c5f13a385e0f77d7e17a2ddc5d87e3f570750daed787db75385e1f6ed7305ce9f5849f40ac6d6cb186db35b8bd1e4ed7e1f25ab85d0fd7ebe1743d5caf85db7570bc1eeeaec3f55ab8bb8ee148af97b897fbdb7c30a73a35202915522abfbce941506d391ae59a68b8d507ade135ed6b546ee5388a17164b83bec2b01262984ab355305ca26e86b0b95d93d4d624ee26a17948c3fe18369af1200f49625717dcac4ee28468a7fcbf9b5f951e4baf021eeaa6ca3cde611bd2896cb3ad005341736bfe9807e699ff66743d9bf972f0fcb6933e6c01bbe03f03074ab3b6bafffdfc774838db79d798fcb0a48bf596e8cb68e970e652094c0c229fdda27cace90368eca54c45d276c5b06e4b64bdaa5bef51f3e668ccdea9f5fb0de71c6b549effa1d10905f7e70a57208ad0eeb70af3de577692db89847b1b6bdc59b4c6b2f3396e9d39716789edde46fb4efff1e5aa252d12f2083fe8e50261cf856a5bc3de04e2c00fb16ae6c539d1e5ad3914af115ea94f2074ce630f82516e69a9d2f625b2c1b7a6d2182e9cdaa897a8feb8c0b9465418d5d1a61667d2a392eb0a0e923ee3964202f8ee128e74db927c9347003feeaa1d1a46ee9a63283b0f9dcc07f0b62ab35cf3e9c635aad357da05ce10c02b7f00e0c86228cea58394a04f480369d694350b0c20cd093610d8393af73cd085ae53c72a170a71c8b99b9503403e3eb0523b785e42c0d2b1b039dacbc9595a650c37036f951f7ed094f5fe33de28f62ac808867162c8b81f4b441f179a61af4437ea8e1a6bec1616bcd5295cf86f52f5d288eae2136569af34318f3cfc927aa7814503e39f9b9a5f13cf156fb08c716999f621065dac9cfafee03b1d4f05f1426c476650a6eb4319b858f24765cb1662bcba2539a98530d63c79d19d0e00b150bddf25f2d274e7421851ae6d3c4d2fd1095e4c3f79b9fcc96314aa2e3598c4117fb59a8f7a58cf2ea75fe7c900a3f3aaa910eb85b69c885163bd1303d2b34ae42ed70b42980420f496979552b2370f9a634d2bda5e2bae40d7cb34fb69b9dbf4fca6f641d2a016d215356551f5915163dbe7fbcdfec8f02819c611a295afc76b115b6ec1163e5e3f5184f2cd60b4156c065b23e3eb857a79351bd6c39e411f342da7ba6146b54b678934bf1491a896c75df524f42db0c3802fd87b4833991ec6e133088d55c79dd59169fae16580de480450cbffe34282e5cbcf9ac92e8c40ee0cbe30633c0df9529f8f9c50f1ad895ee9a0ee97f8e34b1de6d7c725626d1cfb1ed9fbe76f7a21554d04069a8856d0edbcb2f0e2a8d4525aca9005e4016baba11d199a6b54ba1cd7c5aee44441a73042e649a5353769c0bd3119ac196ff3551799344bfcab0d62363ba833f07cf4c629d2a0e3971523acab57bab6cd83ddd704f7c99b5784d369f91060b003f80721e15ac911fcd3391bfdc211ebbaabdc5b4caff5b80b7a535b81d9d0b9c815c4d954d8657bcdc047a98ac0ff8fc14c09aef01623ae71114cfa4c6192ed168ed5a9174b1fe93e5ee6fdcc5558c8b13e388ad814f4c7d28397cf55a546551e603f12e0bfd816ef42c2c821e95b943a60aa36e706e0829e08568b4042d16c4682c7c4bbc82d8e7052fa0973bab9c1c29e2af709958a6e350e1a2dad95d59bc698d72149f5f9403b45db3b2cb984227ca425ef3eba87cd0324f12790e08c5b2d2e2e64daee376dc4b879a6a75887f1c34ef8b59abc3cdb647071ee6c0adaa3a42a9b2a2b5f8ff19086aa88855008bc19ff6607f41e31badd0f6a3fc47cf0b25b287eb75fa01ce2ab5bb4812e2d9fd9abbae1822fd593e8d2d4e43a114f622f632cf4638477d0a57c17b9c65c6b9758075bc66fc047f922d87d8418262c349d6dd81e417c619af21dc0427d25e4d460954d8d18bee710410560ac4e80d1261db1b336d936061800c71ab770d4044a7a806cb1c44aae021e1c91512d7e7009a64604911216f8076fd850c2013c077668bf5dcab8afe9be8dc311bbafd4de580d44edc58998dfaef18a7016b87e799704ad02636ce114e0d090624ded06017cb3c132acc88269af8e546be56684f1dec469b3ab3d6cf62b54fc6e322b4489e7a517c493ea7bb37667ca020dbadb3768e766e82107bfeb12a98694c94193d449c51ace02c978df962d4db64aea2d75c7ec91c94e6ed8f19d0b312c22738d32dfa4b33b380db8887176651db26ebe1ae23aa494c7300809897b2f0580aaf6824ceb205e47626ef710e98aa1ca9610cdadef77eaa2266273f4cc78d827ee33593d7f53c3b0826537b4930ab292c9b21793e6c16c6e193318f15c332792f3de5a36db89d7d1e8be3a8d6108441023f0dfc8ecd6636e8e20f0870a3fcd1ecf9512c7bcf044bdb48c028a3c15814f37e6c2cce5a768b2431dd7961d07e00b78c69af025d8a5b107313acd8d7226bc2576753ac8855ade571a293234225ad5af0cc691e00023b50aa311a57f8c25a067413aa8caaabc5dfae888d916b331c8195082faff6467640c07550a6ba03c53c1912d2c99943aaba3710dcb87d46332800951cd80e05baeb3dd027e8bea1b378bf66aeaea19a7709327dcfabb5dabbd48a4c76e17a7d019d6b6c2815e4ff014b30119b6b548dd4639fc332bc5df7fe9f9506b0e2af003222ecea15dd03cd77ee2e897d9cc1f6db14dcf45e46cccaeca43b69712c51eecd8593cc81ea45eae1d558c9614bbfbdcfff803ae3e0271de025ab30aab1fc8d02c50de285da8f9ee167040ea3574d7606246f2c425862e461792afc8c339cd9c7cc4a0b05b3156e4a6c979aa9be50f013da301267146292c565373c52e9358ed2f8f974d999d875a82649f2803360d1c14c92a9bcddd4b9faa805fc25b2d469e77f6438844905e143b20aa42bd9e8eed514848215634e65b007a86deb1c1271fbb224e9e234c4bc0d1ea0f37dbb241bbe76a24daa4b954e28d1295d6941e9f84c0660b478f361622e4e391bd8bc919ea64902e7b40ad9e428dcfb90b90c2f1064e4af970f41d978bbeb359ae5090148d0a10127056fc15cba2da67ca7a7e6cc1ab092a24508b5259475467c0dac42d467bae53efbce94d3a0e332ca627883af57a64f46e651361dd439172717ad74808b1a0a7f0f18386719c509bce8525df375c59a2bfd824b95bc391b5c2fc7d5d017861b7a5a43561d3ea928a34108d8dfc3952a072b8921dd2c014857b9f856d6d23b01b25b30273448b25daf3a1458b667e6496eb9b921038af85054118d051440d9f97566849405aa930ed0a37aae6ea00590d7a6e807b2b6c078a59b44f8a36eb448d85852b2d7e3dc36c2da666c1bc885a641504292404ac55325cc840325b9ba928724ca6a00952935231d624fbeda652dca1e6e3add850462c52b989f53023562f3bb1737418ddb1c547e27d23f8ad07641ba5f6e83b575d9b906f3ff4dd7f48c3e4a0b5f05db52af3fc401b19dbb0ced35dd92e934db6017d5abb49fad46537838e8321e22f0ea12ffd6d9506b44ee76bb6c8d77221421c1bb2ac41fbea1287da456f67c20238e2a8d6f076ae6da1f5f07f150f1d600ed4696b8725da1e23dfe26aa59b8da674e8f94806d3071b1646498bb37ba905500c62821911ba370028ba8093effac43f4c574ffd9b9e0035e7659df4011da1d922457b72ee8927ef9b7a180af364f9fd006785a9193d5d24cd0ca595a44d40fd904c4a8f76a8de23a61a28c147e3c4f011e9db3b13cea66fc778dff9dddebe2485c9348c77f16b67b68673e23d80c85eda4aa1743a0696360479b5aea54dc63950a59d93e3b87581e62713d528abed09f7d8e62269ca8f4839f0e7c66266defec9de6c0b312157280588e649de28d5de7953228a347d11531c741d712d35b5ad1e11bd9b41e2fbc09a44eb4bd763d5bf0d6a33ad12d6fd39b4878d98a440642abb6c3fbbbcf1de2c9016ad9da9b9eafd4360025334b3243612529efb735ad89d4c2d35542ac87a9c716d3b7fdccebf9c4dfd2f768ab24cbc8b6100edb187b30a189379e9fa32221061c9c474b39e92129c77da161be4531ac76ab5bdca38552a407568e7bb7b1aecef1109683821aa25a415056e309ec2a02baf74f29428c6961bdfe13de20f2e66264ff632159f7a7ff8c452cf61db98e8d91fbb5cf6e27d503c758aebf2e95d0c7de1016dd5a063ea90592629fa7b5fafdd423ba63a9df5bcf77234aa05e73d78be77322d5f618b6df1b65ea9074fa21004cd00d1c796de3fc33d04904d3c1883c0541bd0aa01f6e3f1743c4bfd5022272b8da5c5fbd4508d2992b94a4fd7942d801b954fa5d49f984c9d27f1adb6ff964d9b6626bca0539570de280ad4bea4789dc3bfe9c92a3eb75b90198cc8c7ff29e1a015eb01836a60d7a92a2d3dd2e053f48027e942137e634f0ba8aae873a8ec5f74cf54ddf7dfb37cd451e2d9afc9bd8f1b1af0ab1cc537d100e618857a2c92d53ca4e84dd832be10ee6dc1034afdf8ff6e834e212136771cf520498fafbc68ea5595c7b8c766724269756ceb22db3db3f107006e34cef363986ba747b5e92dfea0cad3fa1086eef118179d9df11d03cd83c935637236ad72a012cd4ec892200169ee0ed361557e80631557e9f1aacd4411fde33e1fe631560ce858daf3d9856a1eac23043af98f5dbb54563ddb379e3c808e77354f7425641ef40b4d3fdee4e3ddb4f70cd526b50f68bdff4d44168301e84d22557a948598be36f3b629766f800338520d47698c761e594314a19d4560ba83d5008850cbcad4a52b01cadc2fb4393b1593c40cacc72b640f2c952402644e7537cff7db9239b6487c4fd189b6f16695e164ad69103e03e312f98216640397e13722bb0002d62bd73749a3e3321f84b600707579f8242784c679bcd70b21d6ba2fb91b0263098c3cb390bce8793f576b581e4cfb61153e2fcc8d1deaea977fd11b17964c301e2a302bb4a0e53b1f7090154c07c2ee57a35874389a6ebecae94831ee66f62a2c021369f24d97477916a3675cd16da272722a775e86daac8880fea678100278a734847e925708fc80d41a2ce02e78ebd474f4b5523a13b78b3f0665e9be8484b1dc43120d848aaad60f1482c706967d309ed96c59478398b5eb8c45668a8f799b8b854abf8e144cdc6dd8acef28106ef42078c44ee974df1e60964f8f06cd2ca133faade73ba1344eb23e112a5ecc4b8e4e749cac2575e7c96458a4fe3ed7db9c4cb46b36c1c0cb3d8cc97259e0572dd82a832306aa0564a036d62f5abc3c49112f7ead1d32191ecf9ae2900f09b724e9a6195bf90fd1e5957bfb9ded7f3ac9fd1af46ca1a5b05e7dcc2499ce29c3f234f4698653bdbc26b277ad45262f1cd7f5945f5ed6fbd54d95f8bbd36788bb8c2d1da5c80e00ca6152ff3ad9f758fff0fab3d78609ede343330f785aaa0f231bcfed05d419238136cc9a8f15cf020a5c6def35e330057a25f381d693710b7a17e83695f30081bdf1d9e2a6ac2cdc933545f8f66e4577a5cf56aa36a08bb34ac8c67c0d31123ab68a0347e8c9de3d8b4edce3fdc0165b34259827cb9a94979377fb32d57f559a65870ffd882fd39972ad56ae39f16088272b8f9543a8fa591d9027bcd64ffa847c6229d28dfe075e412206217dbaba4f8eacb0edd4699110213c22e05bf470422472ed9cc485e35db0710479f367bb06be268e77b40cd24cc6da31ad9ec613e2543ff5317a0fec69763be626f501cba834fe09efd8c3c495959650936a993014b9c5ca613c3ad59206db29c61e0eec53b4c16038f005f43a6846360671bcfd647af896935a5e9e4b9d63617cf9c8763fae5c72af497a44d6de499e628dd00ea17fef46d48a55432dcf6f644c4021cd304f8557e9c26a6cb9ea485b8db126d79bb69d4e3593fd55dd3560f1fd830a673d1f1f0b7b0654e125478da3c8629e9d7a6b17a2087da34f6c9bc61bb9e7497ddabfbcc1f2d18b44f34bdff54ffb8a9a5711a34114b1284956797cec4c8b4523189adbb8a2d71182aed56fdf720eca4bd6b9688281820da05b1abfad81c4783c2d2bfcd35fd0565b5e8a884ba3cde99d793f803f97526a5abb9b7c51ab1eaec84310acb69229590ec3c0a5a754063dd3d3c85188aef5bc6709b492416bedd0fe2d101663a51b2b5e4670263798bc065abfc204a96d8d81b8a5d2eb14b6871ceafa15016110142bff098055f94ed2cec5148136d3fc62bca8a86b2ed98477afc61ee9e35c125d03b168919bc82ed9af2a83bf2281b28406ae8facf11219ba402d47964a578d6a0988f0b9c46f9da32f952f06cac743febc54139a6b2cdc45d4b166bd8716108a149f86e86e00cbfbd5402f563acb8e97d21ae06a6ed0a30ab6da26fb895b12f0455be569b7c152c1bac686ba4c35e382ee5b157008361c97216b6aeaed8bcb3ff8954c6407265f28564b860f543fc95b6a16763467e99f86088ac62dad9566fe5e5c376cf7c7e340dc4dfdfbb4ffa68226fa382220a5b40db8e9de900fb4a58a846184c0a289ef1b64c03ca0c539c063439f2c8fc84f1c9d89905dae5ead4d6d4162c43d8394aa194db1375b3d20e0f4be413e0855fd4b274e45fe0b7151468f163e47701331b65c57231bb2960124ecef24ca0881244208720dce39c2b5e20a987c532b6253e50387b06a75bc5973c92056264429a9d08f89bd746083378e0119a3d9d9b3d3c2a596a5674b190b120dfc5e4a87b31d4303ca7fd291ea5bd9e49cbc86cebeca408c194cf305c03b294db433cdae2cba4c97a9b472510eb1be0419a8a144969df5bdb2276d6adb0e9953dd54dd818e0b2f897104e998936d8b327eb07cada9c79fa352326745df951e489f7af21e4faee20da49380bf8a144fb6e20e81ee305119b33399f656968697aeb83d7ef8876b172fdda180c931a489b0a392109695bd7f1a45f241c16a1a8dbce705c5e5f315556023d8fe2f4a9e83655050386a1abcc9b60f93018ca47dca4801243c8f9323a0d074f400321c3c6ffa41b4c60c50ea6fcb5710653611020ee5de57558549d366a0c58fd3dbfddefa80a0318a252386f4028fcabab7ec78c9260894f4ab943445b240845bf2919ae25125ef5a3e1c14f5c189ba93d1994c9232d6a56feb20c868c09c7258b711f2d4c2864d166d218007beda9574fe34d0c3f843023095e81156eaf47bb2a4d072d67224a10763bc8c089cfbc0e0457206a87f886a6e6a641e8595b0e411bf5f5e2376352d045fab3c0dea1e98c984f2803913131740684c1d583151282272a73a44d911e07a1ccd492188eaa409b721a56d4e04e4269b159adb199201aad345298977bb91666f0b711a05f0b982517837d5819527850522f01218792948640af7300136160fec7ebea34d025e027dd49d83529426d39ef4e200c8b83d75cd679c69e9c2a57324abc7f61423184bca43b221184d334e693ff79d227db2759f1b0e56c82404896f15b38f4eae6004260020b43cdca630c98cab961186efd1bd092f2567baf0a3aa25f9a6e30c15601071b6292db6ee8210c3cb83362a19d103b2aaacf17bbed3da2b08b23357b7102d389e0295d1aeff09224da2ce412e4e1015a8b4202410d5c43ad5f58153996db9e8fd082135e0e7fe9bfcc320813b426ef324ea97a1dc3240c87d8a8605424039664e6de0402035f7ae5f300ad5d235acc85266f7337b859b6a8edad2e9eb274fffaaed740bf0ae50df4f395b61ae763624b4b2ad3c27bfd42490d82eed52dfefa97a603ca9a242be32875b71f286c516b937f7d18d2d2b6852045b1f68edff068a0a46fa651a9122d544a06b0b13355a9f414a32fb7adb21bf82b8c596e2b130cfaf72f82f2b3af9facc887d7cd370b8e7445474fe46142180e7c51222a47545810a0c28bf3e0b850503374ac65d5362f77a043ebd3be23e9085759041a45036efaf26c2b24d2a16e304e75092b8625c473f33a68e18271e57687eca2770a2d3fe9c3c1d404e1a1437dcc8802d7558a5a4ddf097514d9d4958639fadc02e2a7a05213f0971fe99ce8871648fe867f675cc1e9b9ed967e20e6b8f2f21b401faf58a1b805e74b225be39747e6f28c05252ed33a31b4b36c83a2218b281261d1df85c36dde09ebcc83f0e8aa6262ef74bdef31080839d16612006e73d6254ffa9cec2edb967105d64af108ad41e5d341c828786728c52d208ce66f873f1ea5418546bf7c79f452bc183eebd91104355fc335fc07c470450fbd1c545bcd3974c845c8a911588f993cf1728c467653662665f465871881fd0c81dd05c6a65ca378469b1a0091e6d8d0c2a22360ca6e7d7ca803b07bd03c1c8908b991a9d6a8cb4ead290ee5ae30514c7a70075ea7bdf85c00ae3431f26dc03500e7672fa8223cc36fdb0048c7a65a22a5d6d8470c7f4a54b3c9fda7f8651f52c737ccd1499b7988dfc8aa22019808a01e44d41b256b402625ebc53c9c747f28e948e1ca851596ba8fde04a0585ea97824aaab03cdb6f0c48edddeb2aced8df794d6d127bc999bd8aad2ec5904ac00f41be4c0b0688995db709ecb7b2b214dbe1184ea770bea047f277e26ee95d5fa9157044ce3e2a4f1358ea8645efec448f923eb3e54eed89f4705c91d2f23b875660ede259c5acab66d86d71362f451f0f066b65d936291d3838275df33cbcfec4b7a263cfecd9bbe7a58b1e5c83d26920478655a42665be990ea6a3a37ee53173cfd20fe932f89dc40f35d28a95f2f025fa54712a5a2c5d2c8a7ea1181de6b877c8262bfbf38474a346e7a6130d3e6acd73a3b5ab50e4c477ddba044c8a42c6c37ad5013e16134154b544e25441067609e1f18b550df7310d12c84ff8807a9404647dfa584e81a9f068416181da56a0d30904d9c3a55ab854bb3d1e346bfeb97143eb5c4f685e1fe9dd4e0e48624592c0f53855184b68a6da948b20c0d55a20e97d65ac2180f965abdf0f9ac530f9cca89440cd34d2a995bbf209ce9904ed53adb0f7ead22b93e078a8f799c795da22fc09586b6a368a988001500441a217628c3b35ec65c5a48d2480cc89d66ab048248949497f31df65215913533daa0bc8fe18acaec25043a748b54d911c96124b3ed495d2325eb092cf7d002388d31a108d3118be298c1d741de160de5630b88a82208ff0e7dcfac07c9dbf5b5242104488d55e977c67fd80f05d64acc9c64b1cbc58e45f1643053e99404d40df0797b3434f5706445ad7c24056431c8b18c381332db456d89d9bf4cbd18bc1c4cae21c26b993c24d7c9770ff1b18788484a390b6ae158e5a103089ff2588f2db8fa9a434bbfd11ad764465a152587b18294dc67613f78478161bd89310c4dfa30859932425c1cbc07e6f30c298b1e929f743d761758d0a39eb0b53bde7cb508b6d7d986bc94e79d288a59e4fd538408d058d795b41634b7c7f886c304d2ae05c2cb2502ab34102379fe042db49bb7afaa60d2c8da2c7daab58870e8334ea2286be5162b3e450174ce07f668335365875df7a1f3b972c018187e0178245123c5a64a7f80295e3e54fdc10fe8affd5d244372cbda70e0e95eabafa60ec75b72a98d777a5a69b06c7cc0cbd5fac2fb5faf92650d08e1314ac55306bd9f3aec437158fb253e1510367edf03f6cb98836cf9c79be1ce2685a49e12182e61abfae8c6f627a93570914b753bcedf367ed4bc9ac11dc5ac2b9dba4f73c3ef3756dd8db15954a4f5f89ef861cde9f0957c6bd289f29554fa7648d8315a7c2ca6195c65233b773e6ef2369a7ac4bebf5b84726c9cd1f40d586b4383a812463687d675d0f646a79645701758be0623a4174157c60a2721ed61c05a65f58ba14c7757a197b5921cfbd718a57e9c45fcbf39b0995e404e33e2f07568d590d7286f2c969c86a7550376e3c6b0d7f9fe9fc9d2c35cdaaafb078d4b0889dd855284e9dfa5e04efe4f40f1a206c0bc9588335f8be7e4656b0c82c08e6ddbc2cc3a57ce484d9c15399feaff074cd421367f5a4724c01b081b7ba899379a8c43d6fa7488165750ec840632bfe020277c3e257d8caeea6a6caad40d21ad554027a0c2829d3716f24a651addc1b9bddb846c5c9dbd06714ea3d6dfb35b905a660fa5ffb3e11bc830d34ce5c910455fbe7619d226b5661a3fae5e8b1c981380bd14db352c4fb0addfb0ee9fc5262f268a105ad9efde20a63d7742d62f2081065f96c9c8498f70d8cf744f60dec1a1a6c0203f8b6c0fe0f55aeb7bd650e33f55852d68aca6d57170b80655aefa424614f70e2c2b4be62c17c8d90a2c1c6668736f6769bc5421753d98132588a309a2c0fe745ac9473005ff9bf5056497840aba983a855016cee30ea245a9bbf6b37f45ba137be5174de4bc430d71e847f4aa2d7b1af0f65e33563f6b4a1230fec6aba7b8b8b8fffd614b3e2129b5c597a7767dc669e405c10ee208d4942420a8ff7a55d474bfbdb39a8179acddb625ffdb6460d1d842e2b7aef32ec742caa558c9a8e8eaaec4ef1741bf6171c8f2c2e91ffef1bc5976e0903d1e8b9c160dd91af996c69ff33221474c808a4ae41c965b039a547ac6ae3db9c2dce395a2e6ba8e91093979608059efbc6c8c395770885c5194a98f47ea2ded5d37ec1b8a82b07f4639eea693b37584e64ad342f644c0e7d3d97e04f04454387b983128132ee4e733c6c44f18e3b878b47e61b670c3ce249e2235a8232793a78d69ef55b796d64d3b980e93ea92734cb5fdfcfb19c05bcded65224f3edde9aa864330e901801742d16d3ddeede70b3914f167f8be78af62b0660d639223feaf867d3dcf7271dffbd65c3ba2f9179a86cec991d3e650b9a32009ff0e0b3e7a70609c6a318a0cc3fda53bd5e72920d489dd5c84842ba6039af8ab3dea7292ea557c8a2af28cbfc35882f93ae74e4593cd3d07e1e01c4e8634a42f28edd617265d0538ef8eba12c17a75ae82ef9c1e098d5c0113b632bef4be1dd57bb8e1b9a1d7be5ec609d4ba611343c31eb59bd57ae7e3f7a7d6c5e1f28b6dca1403c39390a98d62cd4c1992205cd8b0d19f91c8e6f0deabb622c8450aac01252e88a2519e2561386370604c2be9836eef56e0fe6c96e5f1b0281454392a8dc51d25921a8e3ae35d239a0ed96514d97460b254575fa7d3128143f022faa098917193d88a8f5f5b88c8dbf402072e711007369d794a5285c0569cb586c7d8f7dd77f6e471f875ea373205384a2375576a41da9d7661eba9aef88e189e60921927b1549c3c89dcde33c706ad28558d5d70a4bdf603e0b41969ac9414ada9cd0510940e76df40f69e989a980b128710d94aa620a79078c7e9f81e497642aefacbcfde9310afe1d9284b3a67955b4a82509c20ab545750165aedf1e008c99be1158b9c4dde744819c22415e410fbed329244ee0bfa507a331c92240176a217c0b4c6a1add848913dd2e34827c9a3afe57c2d8d1e9c99a89465def68d758a8be050a627177adf383c9d3ffaab33ef3bd2b4b6120c524378f6a75aadf0995a279bbd46d7bfc1c6610b998b6472e215cbbb9e978c180beca944c13bc679e4f26643fb3fc2835a1a2f1286cf379703d5447696caaab312ceb43d28c61030b081fe06cf983dbc705228fc30a58aa7cfd91eb262953cb7110f050860be95129dd47e54ea16de6ee043a30d020b46c12008c85a3111c8cad9f40ab084a8641d470fab4dcdc3900a76c2e95e8cf004c2f24134b80a521f5eb9ac255e8a1e7fbc2821329995c8f508dd41ed943cf5b09c6bdd09cf3444ce674dda20b0a14236c6ee2442810703f0c629dc8efda44d1eb70c9a706b40620317bb3681c5642766c688ada6cb545a1ea2a0f17390e046db9747fbdf89c12fdafab35e86f3adbb20f5ebdf91e760e337785089bc460b241d50b02c9451b12bb25b289eb6130c94baa477df324690769a8fe3e98aafc2d1fe5d8e9b947059e241983d82cd9d15ebd577d0771515032572d5edd835bce4b8a7639a4849dfad3d191af8e6a65486cf0ac7572a927cacc75a22647db76722016a90a3d6c9de8a7d8d14cdeeb3bd47d42ab0b0e4f96233e36f56d8ba0a9d4d7a0ea8c23d3c5fadc3bf102dcfa7a60b2bbcd70c1105c42650d3dda350bb792267ab5c851bcf6d7e6b0b273df0e03dabeddb4cd50d981f8ce6fc0366c7627e194eee2f27271ef4c3119261b6968294e4ebc02a7920c13fcd6b1c1b7d828810e137364ad5d2e69a2e87e800cb833a1a3d107d543724d8ffa1452091ea6c8c627b816f5420940aabb8c118c3d54ab71528238ec23e1700515e9b1112a386de15bc54e97b31efca274e975c97de9f64265152973c742201171a7c817fbd14f5d678e5608013a6bbb54911c63160d9f3b6a4b5e4b909355c60725049745ce7419f09c321e36da0e30f464f5e68d4f326e6538784aa33db9e2a1d0664f2aae9a95966d82c172169016ca06b5ffe6850a7921a379d8e04acc47764c935e8bfb28cdef4f84b3be62b7744ccf4628a2c7836a7f95d3cf017ed467350d2b0717cf395fbc4e1db8c0fd3f97e8265e067377484e51ea0fffe0a20c6f33eb14b28fb4310721597c4e140fc149f28c978fa130ac8fb83f34c64187165247c057a458ff53516838a958b02031da9b6c063df07a9ac2012f8e230cb8d85aa5f2f57fdc92db32d50413503033632176099aac1a75d179ff0463227eb8e9a4ecfa59003c6c96421fe23e734c75cfe4d65043405b242a675a4398f8348a163c34608910687f586a58723a98e857a7e2fd00dd49f4166ee228133deb1035ea93ef8a8410c0541c4ba92e14135f201d2098dfd5d32689c922a7008ac5e06c5e5f89205f5fea6e7a47ddebab93d25b0a294a2a8b551f191f25f1656cef89da04ac4af11266b14f4519762810bcadf005c5a67dd72c607a08e81e49d807133d3478d1caca7c492248aa22d784a0a62f12636790a6a38bdf70611d62a22ecb0b9176920b703aa79135859b5e5c7f37a1f1d3f0b998740294771ac6336e29bf384c4f64ae000fac6be542ff6e5278f98d1e174d050c942145517cc6ef4345e81d421489bf4d426559d81da7839fe0fde731663f232dbb86a0538a60a4947c315398b5a1497320ba17c2c9dbdda77274d2e820ba34ba9ec9a8b5f7f2c7b902382a633cf13e0a24dad92544a00c5497f0b8a29661dd5e215b2e578804839898d1183c6b9ca7b1326933c47201cd93059c54978ec3445008b542634fc213a752f006551f58fc515dabe672b001944ce6a485e000f38a6fc4f9d55a19fb4c113ab4f07473d0d92797cf7a1818b318b010bb5efd989c50df14d1f3f0a4cd1b77150c8392c10a899c5f890ed18e7120d15cf1011d6de1a14006ef0c95eaf8960f8317e442c8d45f013ad76c3c299c26b7cf8f7f9a0fa96fe9fcf0740a2740b47808e244fd39fb8f19a20bddb02e601f9244c09d3ba0db1936186159cbe4c955614213b8fa2bc895d0d5136351f702d2394d54083da1cb9f719d3fe37cfd32e78e696ae2045963cef704ca219fcdf109e8fdc9b339d97a3f49d4474c984132713048d9650b92bc2b6e249c2069518f2d9650a16a06632498bfb35daac27a80f76b0f409491f885d8a4edf09cdfc10a218426cc7842c60bfe34365f8ebf6767815e24cbfa436bbbbc1e7598dfc54e17643f60bb61b07c764364aa1a7cd795b20c6ad9b71ffd17173610f78ae5914aa3b9e5c8e36156e9ec7e58fe7dab4630b9801d95546825c2ca61e0eee4912ed9dacba0aab367490e587a2ced5425cc8202bad72ef8403ea41d91c716eb816d8b67231e33cf55cd8bdfc0e434ae8181e722ae85b26659edd799db22dd9b4eac6efdcc45e362e9bedd717813dff102b02f18c9ce94bafc8b25817a8e92e4d55971cd96b20dff8b32ef6b37be6323e3cc3feeb7655123885eb7b5658dc571d2f33ebca0fcc6cd7f275de887ca877fd1e1fd3d749777d4a173328196a0e3758accf146d88a231df3c3f4d38314ea04ce1418e261a4ce9bfcb75906e4181778dd4bafcca77c1a9fdbfc6c711f3eb5082bd3c10bdb322e2813dfc961b0c988d721eb8a315e460e06194ece15e35f2d155206a81912fa979cdbc84f277178b131fe58cdb62a50d174a20b8d03384bf74d6265cc4c332af65d0751ab63a5771b2dd9ac6b3bf34b0bd53669682c0e35e6376f3438dc53f15ea34454ccbc3d75d6e2e9edafe4e21c4f443b82941169e3315dcb211df84bc405220ee4ab4cf773c02e32c6bc44476ef0a1cdb01a01a89cadebaacad7eb6e525fe0d661164ef21d17f013680a47652d82c03c48e2c4a5a02dfd81c1dac53cea4f1ea1278a1fd356ee6ab59e9211f15b48c64ad4dbf3bc556af5fb8c5c9b9f9ba8b8ff4dd44e9cc99239a4eac3a6c19c05a7b35bece25fc5f2b6dada7d7b6158c8a7cc5650896be191debc8dfebf0d7845bcd3bdee61831b4ef7ee24d253e3d9d2c355a69a32cd9bd63c9435add79cdad62e46cc20d30e5ef83bce3bdb4c00ba9fd723839b75d1ebe7f24a369bb94315a74bbb1b9fff2e61bd05051e05c255b21690d5b29278c03eb2262e2e754d034a2052b0a6c17a6606948d480f097b2d0be74229b448958920a058e7d1a1131395deb3a24a80a50ba0f2e67d62c092fa19232b3b216e15c209808aa95af2e26622add043a63e06e843019ce5f07ec69e6304732bca922d952fb82ab19f785e25708b6ba1c82bcffc0f21d6239a3347f74a68f4c68aab7236603037432a74aea51616759139dd6309525155450612e952456ee54356838118109105524396d1bb4a953bdae8223ba243756446784b1ded6164a875b3f0315fcf1a4c186215123f2aaee4fa1676f4b66d4844fc9139e15d69a921ee1a545ef8b217050d0b24c2ec9575302006103f4eac9400843c3742b05fe031dd6de684af74447a1bfa604f8c08de29300560d0f3a4f71510cf441672b9525202d1c9528ff9815ce63a73c2bb12470de9914c56411ae2c062b7fa83ad83810c50fca0b8f22866477ac122bd99113e9636e2c8bd2f2dfbff716041acf2627e4c9438f9615c69fca9944d66dc71db81eddee03641073126f4fa61a507e447e3cab17a69195366dce7bec3911ba9c806075d228cdeb392fe7920c3878495a47f75939885d2aa2591db9833a77b2c396248af5c24a3088d3f906868bc293111c2721bb0c9bf8c6596f72acaca94c84e2e163b95e52df99a81f38fb4ab69b89ae77e11b5256450561d1638cc6a30c9939ac504192975bf7e73c39234bc8904594e6f9dc55bc3f7994f1d531e0c51b2a83cc77cbbb5ae295b3a6ae01c840516839a1629ab4490b427499000c5a27e724da603e67b168aad68bfc2210cb73ac9497dc216bd309b9b18dfc7439834fbfc5a3e360614ba155eec2330a1c4ca0f9939c44e4c13fb5e62686389c41ebb5f5e6613efc3160392ec0f70e3fc15c04200c6b528cb95efcbcf142716783aa60c8fd57e1b15a66096866d6fe3797d89c52bd909d8c4153d402f4b81fa7e6ff7eab74cf4531b6b3ff3d533c40edc311f2ef1933b9ebebfe8d59f22a5f517255c592fcd77e48ad8605545ad618ff6407f4794f8261776ef98bb23953731af62b15daa745115f7486415c8f42122eaad9d67c54e3b8155076e1fe7be96deda3e00f09dd02d996a8168a35dd5c10b70582da3accefffc4f9e67d72f2bf742b96f033960008e2f61fd417095bfa237c5dd1a30ff41a880105c7d874a4643407de8a764897c57c40d76e2cce6c789d62be029a4edb96713075971292e0ef0cfa699b1d8de730dbd3ba082e2e91ddd4172512a5db3006d5575bc5f85964fd1b28d8d8690c0a830cfc678906de05507add26080c946edd5640ee6686be0ae69db4198280581e74c93d417a987884b1d332af4e3f63e431a5bdfe91bb68c92e6e40ad0578efc0a22f05285a40d877f81db7aa3a58aa983bfe3fd60a1303bd0aa172f54e7375855e73663bf95af10d799b43b36261888aa491502cf89e597d49e475919042eab8709a0b7d3cc626e957cbfdd02e117ecf835f184054e15c54aafdba62b02b2aba8a78e5df0df424b145fe6b6d28ae4abc987e6b3098437c7f3ca7148dc053ba4fc9e066202a78f62a5c96dc32255c4d3ef63916e302cab6081b181a6e9a3ea2b19dcdc10233317e4aac2106b8d18bc84f7ad7251d65a6fa63cc03eb83a50fd9f997fc43757e10bcd800ea4ba780b4b5cec366f14199757079c72178790f9458bddf160a1e8acfbe7542ef55a78730c3340a13d297cd4616ebc8375e653a0f33eb1afa3ae56fb1af566955fa0ffda2aad44094aac9c15107bedaa009c370c1077410a68438f29782d48cdc1397c209e1f8fa58403cca84b8eba4373f11a857416a1fa90b4791ca4c3820ff797df03527ce0e0472cd8834cd8d211b14201a1c900f910c2c4756db458678bf2667b1dd65f4aec31dea345bca26bfc43b199e07d71d6344dab65809e6adb10571b14ea389cf42c8ac6134cdaf583a2b94d5c49d7f3e0588accd8a327f1f569e55b75f01f2122713a5176c9f970827fca918c530768d2241878d6cc7c804644f76f13c86373f90726fc389e254cd14312040ee4b4e2b8a61032cc78dc9643823631f1cb150a0a4d4c2414674472405bfc58a63c4bbfac97e0d9922a5e26f98e26694ddeb8ae0218c8c4e403c81c0258b524c90f7b2526d2d02bd7929cbf8851ad6f124fbb67c76e2e09ba45492aba3a09456f82540abc09991532ce9ee762bdd646580fd2dc84e65458823670286003284db3088ccd822cef846dfdc7a1a170ed77093b34200b9b4659e9e721e1a6cedf718070c0f6573e8945e9e169c26e98f6c6f8deb1e3ab9c7e978d0f04736d422b9087cccf24ddb8b2e31818d96d858ba91f2f3ccc03c5799851d9ebb001fa01d5c4eb231880fca81737cdf0f226a2f9409e3e6ed0aa0355e1d30c30168c4818506eb8e3922264e23a149b3e9b19f032af48ee04e3df9f3c15d30b708f07b165255c8e5f8de4202f0c9bd8ffff39927ccb3fa958dec2c53a6eaa92f4fcc6934cf4447968eeeba2bb322bae410928baea2b8471e503da3fc7b5b1a4287884ffa6b0892b0d1470026a17e0b46e1402103fc930b944020e1ee44c8b63a1875b0bbe89c4409a372f3426ed3c7b8a35e5cb34a807a5ba75e88a4a77330533c367aace2cff37b925ff708bd7e24cc481870db32ac6c6d80257bb08c30fc39add7e9a34568d1510602d4c245b14e2d76c381c9be6163b082ba4f2e204b4268237d57a411150e0108616ce59c31d7c5167e1eac6bb087ff974b99f98ea4600ce8ed66baa1e66e20d38895edceb593a95bba6a124e5a5273265f8e95a82a1a3fe9fab0909c0a0543e3a212915b7a13d56d7fe67c318ddc93163d419e26a1df24aa541c141b7e148b949e1d9729e70254a1d45a1bfabb7ff715cd4ad7d62d42945db64d6e0863ce812d8416ccadc12f3368f48f76d4eb6e99ddf151f561fc8b77a22855fd7b40dc5b7ba5a1c21eae524f6f973e53274de99ae71df3257ed58b8f30208faa141bf2f6d402c0378ffbeace5d20c49266a15d146bb552c9ed1e4a771d91383b44986e7d65273d78b91dd54ced6adcc4cc19536cacf2e2639f3c5459f125843a29a50558605e7ad19001433190d8e5467c71892c8490bb6d4a069ff7cf3006f3430f07175861c085338fc5ae1e44e03e056676813451e61c5c17aeb50743f870b54cceb67b6d0da506745f7de913c380566a91e00d8560d78f0ad47961c53937a99eb923f56263e40edc846988500ad946ffd45737bcfec1cc4bf10f43345951809566bd047fce90b16f8d39eff0188f7270f38c72a94efbb0af39ea7d9361a693e4a7e8a9556bfec0e251d00d0406837a812169614a4942fd0892140a55c4aaba48c8c70ea3c3791044ef0e7e671fb3c1a7a21a881a8aeff9637cb0fd7d222994c5af492456055d2582c49fa35f02802d37b8d3d2c9a11656b3293b689f23044d9b1f3a74289c8f0a437170d4e8c75ad8ba1cc332b2c80e0dc4e5700414f629a6778e9e7dac3ff0798341103da6083a6fb2800f361c70ab9af7b6acc82c17ed0c25139f34a6233d9f9174394b2f00926fd645c9e9dd6c1ca15c3e7fed1cb54414f1669daedc3c87f408c6ebfd0e9609a10ca1f000dab6d370b27af0e927aa336dba8a3628ff83e74729f079eb2f69f35408b6936d8c5cfd1ab185aeae5e45f74b062a3d7f2d0a9dbb3ab1a6f6c8c9eb94fbca42a833b011e29c24070931e4428191feefb9d4bf68be40ecdc6ad17aa5a995a3005861ea71ef18d505f08f41799e7b815840431d1d3a0dba9d2456e9b13bfd3a9b5f9e6c6d097dc1bcdb17be0f232f9777b1295164ffe1f20886f7a6072d1e3776fba4f494ac4973b81758d75cb2617dba1e3e2f603be95f60b04ce4b7a711b31e68f0f166b85f11b0c1aab342645d9dfd39870e20854f738ca7991698f609315d7c92a985b3ee0a615434bce8d2a02a27566019679ed7514d149d3a16de0f8feedd4bd50b223ffa221f9a8c703ce409c02a26a7aa4a4115d2e87531a16e478a5344401015121b60471025b089a26da1cb9559015ed0532d61ef37779d5fdce2a8f2a9782c99c89fdc74f3b1244b10d35ccd71104e6328ac4ef0715b347c1daa5c5250ce25a58907b379355d9637516cc1a2c89a680d327529589592e17884abe09e14c829f9fd7890bfcba593537bef33f83afafaad34d5b4af497247e305c9b350a9daee700b576a11489de93f4ce23c3df93dbc8bfb37eb42812347573dce887a5dc6e7f16c886014cf3b817df8c865be9a4713c52e0d70e0c1593dde9b8a6c7f7b4adc0b797f4f860f7927e23caf6e87f1d0a01b2ab476a8cc640e921e50386b38f650e664b0e86a1009211d4b408dc4f2a6950e83a7ebf9148853fa009e2dcb5669fb366d94c38c206cc68b2dec01bdc0cc0a78a501c226fc7c5648a16f999d39709e1394a5b4a0447e58f114d4422f08be851101580a4637b7befe3f77c4929e01a0dcbadd4d766178da332459a29ffc04f8820463508207a32d20a36ec89be438368368d49e2dffc2ba1590febd35f37760230e64e596bb4bc647e8032797248f693843104871222c9a7adcd2cbc28a3e6e00037491fc0b22be7baa9bbbe85664432416c688030c0eb7c0dbcef815ca76f8790e39b5f9019d3b798b5fb996c35670fe33281c678e684b7197cd05909f47f7c2e002c72a71c6488bb0c3357fead51334c5f04f526ed2674523c0a23605210d6f73f71c092e11ffaed059e7ac6534db75c3e9c87d692e01fb25267f9dcb91feaabd0765bc04793589e364aa6da39147cf98e90c7688a17710c6ed1f4ca2996c96b9cefcc8887a4ffcfe62d6b8795058cf8e63816ec55891e419a76cb91393779da66ecd54a1fb1ac5535d73b3bc1e5e7b2fcbad43d6bff4448e55cbc3b12b036ee0947bccee3422ed508a860b6715ae358cba24a153121577d2d971124efb09ac582a33bfbec33621c0a1a359111b1443e283237161e73b60e6b0dab9a5de22a849759432dfb9edd728fa8e80d7da5368e9bc40fd0ba703765478eb6a0b2ce7b331052188fb0db63d57b6c75e28d79ba03d220fd3e095f0c2dbbebeb3fc04ea3df785ab178a2a81d52f2eaf0120d09504c2e2ed9d7e13b1606712afcf794ac534f53fd1885966eab527f9ec7b02497aa490ab34f248762c2ef1de4d9d36b8e4912b4f2e80dd6693f196459494c6d81f2f1a1628d479a1cdf60b1dd7440aa6ad76f9457cc6da829159648ca1d6af9ec7018a37046318a1231ece3d926723ad3541acab7d21525c09a2a6ca1b4f0e226efae4e5ad939de336a3bf162313a65eda52b48032a05ab45a6d1825b3291e5e2e44ad4539bbfec6b96fd439101e2cc3ccb71593d3fbe2d032310a31bac0658f6c7b722d9bf208d5e3744fb7dc005876feac082b034f65eb4182ce96751ee311271eb065b5321a706a43e787018145c31f132692b9d2319454867734c3f55131ef322512b24f7261d622dd849c065552fa9b0bd3c7558d347b74f9912122dd517f87dcb83e63acf2735a17ee45786273b5f2e57ba9b503bea3ae928c26394fde6f78ed9b27732ade761d52fb4d24a198c0a55eb175f1555a881f1cfff4787f3d2c159023905c83d03a7264bce81bbef605ffb57a6b1e467d8fd618d2466e4bec073dfe8a326cc9126ce9547625eed51fd6e5c863bfe2262ef0680102143835f7590a7bea2455fdac3ecdc2dd094adaff58a9503b31313683315c1b527c8354fd06821dedf5c8fb915c1a54f39358a48eef4706b9103e002932b20b25786059765d202049d021a468a725d4096914d3a94bba618fa011fd6feb86dcb4fb75b70bab41ab258e427d3c7180356a4c3fe7aacf61bf339a877c64cf844029bd171055be8fb67fd2c27256a9ca46a41b736178427ad4b3dc886dd98b5b407d99046089146f6267b6fb9777f07cb06cf06239195ad729665284f7c6124cad90fadca5e311015d3d376e1417f0022070a15cb34bc45ac81e1d51eb6b44cd935560d8c94524a298d77843ef6ecc4b2875f760764cf84f86616e3cbd93dcddd6957bbcfd14857032ac811ea7eac62d72366ad23045e7210f10d86dcbd6fa18fdc8ff36b95bd69488c6fc1223efe8ba74f7c2537df987b0b1cdebc9d050ebf0f87af91379008993d900425c44b9ef761c48c34aadc4f1cd3f1f552678b119bc03025c7b06378dcf92f3c2226471c0a91928be0f0871b6f94b01a24a657f5f77e2d6b7c9b9854d8478ae4c9f5b248bbf406a4a4319d18d1208a422d5de4e249f089cc172b78d2aa5aa5ce4ec453823c226d958fc843a75d7622484d37e9878fd1644a193259305eaf8b0854618031aee25fdceb02240137799ba35cbe387cc9f7218e7c1f5b76b454ae3bdaaa8bdb56196ddbcc33f99181e111f97e7297e43b41a5dc46a60f555447274495efbdf7def89923184e205e4cef487cec71a287a4553102ad8affda65cad8a959c490a8c04729a594526a844a6b7dadb5d64a2965a9b5d65a69adb59ed65a6b7dadb5d65a8ff002a2300ae51e5e2094e97d681cca7caf133b3954e51e2760999eaa688b8cd194258b1859ca4b29e38a5e7bc974769bcc471f734f7a559021711a010683c16030180c0683c16030180c0683c16030180c0683c16030180ca6e9e499727b5bc163fcffa9d44f4e5e527214ea2e5c9c84e42323ffbe77dd39ee9af67b4fe92b0a874a89379a60be6474591cb9d21647ba6c2c8623b7538bb89237c992d6e8825c1a638c54d85863b97f8162d2754fb145544918f4d34661c6aa0b42812cb99e172c91db25f6b8c028cfdf206bd42eb1e7053779de1e4d9bc4493c33ede811100d60805d40ca0f0a48004600e8c3010cf002a0f4408001a4042083870000e06487194360742811122308901c503f4c7cf4e0e162c70b1c6ed0419261e4b0a186111a58e070b5be195abcb05cba1b31c8d00283c79d58565e5069c1051b9ae9ae543558a0b1a98068000316f083021280001f0e608002f44080010880870000608721303a08090224871f3e7af0d881c30d3a720e1b6aa00187ab35c30bcbe5468b0c31c0c0b2f2828a0d175a58a96ab04023c55a6badb56d5de86052aca5f194d34801adfd49c132db1fa02a8c888472408386cf0522b2d6da163a180bde1eb439393014f414d961c2da2540dc3965658f6f6564662a8063f34202e39822383838455444444541984b494743f88764e0ce28358a1489c94008c8078a548a87051b9b9b0fecc09600c1c9494d0cdc79c6502c161ba29194a4e484d74c0c144a4a908ae0e0e0144901020ab2a28fbaa88774d0d9040506d349814f8ca709172e72c099199a2496b8a9b940671843b1586c081715195d11a38454849111a222383838451e8bf5442187827ea4e8dc62c6cd0d4e0878747244d075363294949662146a6864a2143a9f524141423b403222e281a6019d7430f7b2642c2d2a3f3d3856ee056de0ce7489d800062ca0f19b871ffaf486c644560f0a78486712f090be10d05d840fdd9f439165c4011ece22033c9c4705e8d45bd6f4d03278224bda10e0e18c0de0e1f41140b7dce1a1fbb326b2244f001ece1b003c9c4becd0256f1934a463204596148279d84a3a3c9c31421ece239914a47f224b2a0179d841393ceca11ffdf6e91a1f0f1b073db1f52227b2daa6c7c3d6e1f1b0797634c9bb777078d8276899c86a9e1b1e368d8e875d937be4dd41391eca2b2cc5160ba2c86a211b1e4aa31a1e4a241afa72a893707491c86a25d743d9d37a287f66e84b9e592357e64b7b91356d580f258ecb439973a3b9b7f4dc2922cec3f0302ec520034bff01d135834eefbe10b1655a79188d5e781889543e24b2a6d05c4d1b0f630e5c3349aeccaf1ec69e161ee270c146644da519c4476c6d573dac916b7cb9ad8d5c99efa7c456c6b24673752357661fd5ede80bd4009001e00278902e5bf443d05580500284100016215d96c80730ec6cc464d90318a00076a8876971086033001b01d8ec48972dc2031876b63101e00100cf0ef66648509e19118c900e4242c07999245dd9521030ec99c5e66a1e08988354fa21956aa42b1bf251633359194e0f1b1e363b6c76a42b2b82c30ecf64653137f0e8e0c93c41d295dde4089aac4a34573608d5204483509eef24e9aa1547b7d264d559235db5ce1de9aaae566ab5d20cadf452c3025dc01b60cb4e9e35a6dec815a1c9a2443349bae8925c99978127069087055c0143d4f702187e2a423640a5c9a2311be9a24372c568b228ce8d74d1227265de85a9d402a8b402955460d89f91510a78818aa46b2a0101014da5228c31c6183736e960408c530e3e05c4f818632010cb8c8182a692154745433a985448490125d1bd9708e5ffff638c8ff1637430f847393e0afeff1f8682653e4c27053e319e2630065170673066876cccc66ccc0ec948a552a9542af5ff7faaa483792a85ffe383a1cc2930954ac9c032a730a5a1492c416f680dbdc00fca17c1c1c12932e3e4e4e4e4e4e424954a3d953a417530a993933ff5a74e4e7e7272523403cb7c526474058d51422a422a05769611636f6c8c8db131f62646494949494949c9c9c9c94f4e4a607430272525a99f3c755252f2929292580c2c7349ac278aa1a02c034b647bef2f119ab50f71cada76bf7b5fb66dfbddeebdf7de3973fb8adb57dcbee2f615f735b2fa8acbfddeed5c07e36ddbe9de4f1eb86ddced22154db6f7e194b1b68fb2fdb673dca7716007691d90ad83d14cefa156d9eeeeeeeeeeeeb6dd8ebee966dce7dd6ea6d24c2af285d94cf6e240b6cf683ab62210d32ff58940dbe9c46226b1b869159df3de3593a9df7db6e96d7a9bfadafccd7ebbdfee777bd3ee1b77ed8998dc428e3d3b41b9fba542cd060347b117b394b7eb26c5d1aa46eab628abc556ccb4ba62de26ee601ac43167905669276d7a5aa7bd41191d90790f2422f3090c4fef9eea60e6b5edc68deec72ad3249a2fec1c972971ff3450abac7c374f371155ec1b770ae28a7dc5ede1ee70b3a057d0304a6774402428674c2693cd6b2606692299b0c9649a4848136922996474403290de56cdec1eae37619a6ce685a5e6a294e033ddf48e2342f3fc10a7fcee6f441742017d3e31967c49969677300d76ddb9ee2dcb40b4f7f100900884249134cab22ccbb22ccbb22ccbb22164ee9eea766452668272460343d3254d6cb23a47f5a350df275ff2255ff2b5c957679b34e980f433303c81dc37cb79b87ebe49dee226ac612237d37798c897e9b71eea6b0f3b0839c8a48331bd3ba85db48ead08c4f47e18c4c88b9099c5e7b399c9ea9cfb7da7ee24784221aad8b7c0d30971c56249d3777e6078eff7cdae6f9232303a207d6d22754ee74495a489349572e6ca5e87e32189592016149c495ba6936899d832b54ccbb48cc9b4dd7bd2c17420d7fdd86e0ae20383e04049d3a09c91325286084a7f92b2bdf589f7435d65c36793819333a95596d664fb7e8c2095819bd8e60c4fa4e984f984993495e8ab5535adb269958fc99a4b3476bb73ce73734e22fb9739b32be696d9a28399776ebfdfe6ccbaf07e7631bbc1d95ce4a340f1ee204f70c467b6cb8dad08a403e9bdadda48f04da4a8629fa3b33d4df66a1716d792b520db6c86468e31c657fbf82c03913591a28b136f0275824f0461e60119a79c188a48b480c892f187d8ba361235119afb439c72dff41855ecbd0e667b83df449a2cca91d80dccb217cd4c4dafec775e60d93e6451dd647b4a29a538424a25a5f5975294127cf743af3935e715d6acd815a85d7e503dd9d62275279ba941d98235e72b42d5a36ce38fad4bdd96e974df402232736088c299705624e3c97670a6d32afb0c67afa862afe18c0351c5bec35907b29acc0613b97982b15ed9eca657f63e17e81322ca8cb225b2e5d17906b165b33d55a23fb135513367337365351fb962df6f301422259b10a1b9fb10a7dcfd7ef370e068e1308e0a7c5c8fd82a8e22654b8d80503e2cf64d6329a854eec7484bd0b404fd1a59b40471654def56a6c170deb57b316b4164e3b025db1e5411fb18eda9cf7d38edd4b8733a20dc359088ccde8c8fe95fcd419cc0d6e1527c08f1da11924b383d6d44a31334953a5fdc3930dd2f4844e6ed59c7633b87b3bce11dfd8bb3998ce6d52bfb1ff57df533d3c9f69dd3aa0a364cb64e4613552c145ff6df56cd1e416ce7ae838b90b9031b76718e2907466fb23d132e6edf3a5dc38bdb7bda0b50d531b19917100b1c8d8e47834174600f6d600b8128f6b7833890083a9384dac5e44e24ee927c27a85b96d8e783027ab4df6e70268c9aac6ca657f6f77e1fca8657e51095ed8c299bc9f61a8e169c2011fb7b9aefbccd9072442122f6020bac6005322bf03ebdcfa9444d5fca18d0e4f93efd26e3aaafc4b739a0579493f7d52a1a35177cf322b58b89de7647157d43332626460821a87c873326e6135b75d6ae22d3779d9079afd6f78f79afce4a6f64752caee8db27d33fc71c9b9125d8355bace184cd1f2d892f9c4941f48b98455329a4b15c5b69be8cb4a276e18e3c27c4d6e9fbe9740e879fa983d1b44ba11cdd4d3487668c4caf846290c889678d9c7e8b78e64c58af3ad65d8f2934d42e1aa811b54bcf194795c819adb577965d8fd9456dd42e75048c754c93f9822842e6a9348b5096f8c299146be47c7eb8d33e2afaee66b99eeec72a134125ac552972feb48ace9ca8428f812f88f922646e4a3f6562da654603b1c5ddbb0772e04c9235a95554ce4fa556d14f702eb5e00b5b2929d3d3cfa47631093ba6526a1713a99a4bf36702b54b9dac16415cd19fe03691e99dc8f40d45a6efe9a34cc38ea1863aa87fb8a66997d7a9db21339d2da3769ab579fa566a97902e51a9445c7d46096ae49333b48d70c75a45dfe1f6a1b3d2a3157c0d6b97507bce4c416c71a753270f71daa49452bbbfa0bce4a132d23a1355e88fdac504da68556cb2664eafe8e9ccf92a187ea899f38e65da9fa76f4b698caa3f41f920413a08931ff3f5f3f2392a0e82c309be30c67a5429bb3e0ab6a6d102d1c801d98568972f3b49f6cf0a77f6d712aa3933596d93337332238b5a258ddabe264bca000cff7aa9f183ef20d72aec6b8a5791a852bfd5571ba128727d8dd1bc65504b5fad455708c27b80b2e2b341ae0f6f0525cf56838ffef614064be4941c7b601093efed133a3966eedb0837d2fd58e5d38b5a55895a557764af56d5d84777284f6c69afa732882d29224bda40115db6d77b2357eaab8e0f5a07b9de504a43c923643bdde69c9f89fdbcfdc526f6164f221d4a1ecea37026c108a90c0b92872d7ebfdd1624adaa17fc4c328b4db2b3b8cdaecaf72db26b4b50305eb0b09f9724ebf9614f4f85d8d330080f882a9f7eba8bdf56dd5ed9d7df1be99236363837b9723765d7344dfb96791c3e65c9618fc31d87390e6f1c367158e3f0e5b0e570c6e1ca61ca7da0101c6e0ecb0fb4e1c556b9c0d788a097c946c5174f50cf516badd63e54c9d53ebc91ab45e5aa3357b1bedaeca68fd3ee26b3cd5e7fea7a689207e7d6c8b7c69f3a1e33df869616650a8629598894fce50c9fb2bc892aa69f72863d252f67b80bea7286391d2e67789b345bceb0c9c89433acf56839c3b7716eceb05dca84ea4ecd19a639c3330b11554ccfd13e664a5431dde2ce2e388a433a8b9a05805719529a1d1e5299210fe7128c0ef5d96bb20000529db9aa0777003f0444c18071ae5abc7400e35c9164af5c4f04f7212290bd872eae7dd330d5a13bdc4ddd0ead32a1c32936d17e3ad5aecadf5d7c21d52182529dc9d201943cbdaabff76301529d11303c780243553615e1a3371d06778ba0998bb9dedfdfdb7b6b391322e884119179e69cc326b1a3d7f0a5460c55d4c927845c55bdea77ad3666962bdb9e669b5d9c1f2d3bf003269a38d1eeae9d5d7630b27b562c8fba937aa9e9af15caafe8921a882bb425371055e86f9ea0a439c117b664d94614756f5a66da340deb6b615fd842a506a48efc40a67204994a1464fa0c474a29e513afde40a612943424ad9a2a59c1988da05a158156c9f74bcbc1b68a614ac1178f22c5f73a71c80db180d471fa69c80722f13deb78ccff6437f1eeddf351651fa23eec4e64662624f01d89091c71051ddf93a0808e0f4c82c54f1d3844fdfcb4229bb4fb4fccc4c859e023908857d03153a2630454e27b12472441011da7effbbeeffb6e81171ddf8ff8c06faefa743a9d4ea7d32df0a2e304a2e6aafbb2bbbbb64988228a2966e771dbc9e3bab99d4ca77bde4f9d89a34814a855f632df93ac5170ded69d4edde66ddee6715e149ec7759db7799ee76ddec9f3b49fae79a0e76d9af7ede1e95bd77dd338afe3e47dbfde93f4aa576b355593173b4c1bd32a622b1e8514a87af75ebb3f518e3bdd7bbd37f5de71d39be76e764fdee69db85fefded377badae974af7d60a8dd9f32dacd294f1b5769f5a8c9c4d2b174a67b266fc3f4945d90bb5d967d72276ff3b613e76d1cf78df3e69c73ce39e79c73ce397fe266d6791e2737cb75f51ba6777192ed743a9d58b418f93417242d589c463eed74dabc89145bdc74c24c9a4ab486da504a038ff33c0f0c62e3b0cddb6976de65e95ab16583a5f338aef3388ee3382fb6365089d8f232980ea534f03ca4adab516c1de775dee675733b715174278fe34eddb66d5bb7755db76d276debb4eddd49d3368ef3bc6f1cceb2d7c9bac94a03ef9bb7712f60dcfed4c178c17927afc3f41cae19c8737d114bb8f7fbe4fc36659892e5fc94e20bef90bca7ef61b22e2865801e469f6fdfee357d9fd005658472f62a6341997bbf2f4e54e78d46d95e5750873665c2dc1ee77ccd39e79c73ce39e79c73ce39e79c3cf4bc3880e83eb23fb9498c97a00ee3c5e553dac5c5492e6fa35d5a9cc5c8e55fdae5fbe95e77791f4186f0d043b3c0cbffd02e9c9c1d8c9c3e643c7519200ead92731291fa9439393339333996fbe43e69adef2a2f9d710e6ba75846f60e9beee1ee27ecfdc3a71118f8c55118c64b302a85873822660879857b3fe993738267dc049f3c063679098ef10dcb0c629915c8b805a4f8a451b3a44fc77c4a0f5be70fdb05d9fef4ca7efb9df8e6ef43a1fa670b557903c396d91ef60f0643554ee19839f05b0996f9f37372e752a9cfe7e819b883848664e0b9444eafec251071155bd4b68d4bb94f84cc5b0a6b5ceae613dc413806ee21dc44adb247e12e6aa33e6a95bd0bdc48b8935ad54aadeaa5f9e12983e7cca469d5ab66dadcb40aa755f6209e4bf4ca9ec313d6a37f4c2f92b1f95016654b1ff10fadea77bf2ba574935dffb40a28f685b2e828944bc8f55d248de45105fb4e6b4d9a84082fcd42cd95bdb52f295f6e5524b1578b2dcdd5d168f2be6df73413e763b26850afec878ab0cc9be77573b019ccd72c3263ed62ad9d41d9ce9f6c1fc3d993ede76bdaab7e14ead53e1e48391034819b890bea806c3781443a0e4848cea476e92f352ba3a9324db38161b644b6609872936b53f44414aa511f37c18b93fb9bdc94f874a24aa53c9127aed4a77c27d15e96efc5beaea238766523abe6e4fa054497a542daff8080e8b237b0f721b666dcbe00d165aba80208d9be87d832dd5e00d1657f7e7c90ed79882dedf63b44970502e241b61f125bd9ed8544970d0ad241b60f125bdced7f449715b2f7b123e220bbb87db6cf711a6cc8f638628bc5ed677065fb97d86a71fb1bac6cdf125b24b78741866ccf125b309256b2bd4a74d9242517b2fd2abaac923d8de8b24bf638ba6e8c8dd842dd5e155b25b74f892d939fc8ccc8f6a9e8ba32f625d17567e24abd8be8ba34f623d1755f27b195ba3d2ab662dc9ee48bad91db2eba6e8dbd165dd7c6be46d7bd91d1256de24abd3d175bdfed6f6cc5968ce3770763a9a59ff4deef53b968cf77dd63648185c758a3c6636ca185c7e8723d461c381e230d343cc61a6a788c36d8f01873e4788c393f461d3a1ee30d373c461c70d8f11877f0788c3c7a3cc61e8fd1878f1f390009a27ad859c863fcf11873788c401e6390ab2ee42bbc99f08c6bda399cb9c0a77bd8c559600f062631c1253fc1264fa5309671213894363736352f9a19991bb3a494847464544434241404f453c50da84051d1f5a6a9f8244f56e59076254fb56995097722e80d084495b97360edb9da3d7d9c96d59b65afd3c76959f6aad9adb566d5c7569f6a33b52ac5ad367155ab08222b8ba96a50a14039f135916327c8f52cb6dad49b7631c9b2ec99f6aa36f7fb502895e4a9f1b065ba879d44ae9742cdb212a9764c46b7592fa188ad7a2983e8b2347225b656afb79fe56cb5b12ffbd22c0b575df50c69b25a68d24857ad912b130944ca3596268b4d56ecec6563b12c76eff76d9468724270100c04e780c3fbcaf53fb00fdc03875726d7f3c03b300ef806ac03679c03db806bc034e0d00ee57a1cd8855bc035300b38b455d81b644b45ae6f152642ba8043fa44aeb781432a835caf82431a45ae7f0187748a5cbf82436a835ccf82435a45ae870187d48a5c1f030ee90e72bd0c38a43dc8f52d38a43fc8f53770488390eb5d7048afc8f52c1c5223e4fa171c5224e4fa1970489790eb5b38a44ec8f5347048a310d229e47a141c522b84b502b9fe38ac49542572d540aeed8514f869be8c872d3431beb9d64857f6aac9f55503d1657f7e3640135b9b12d16581803230135b3392882e1b1474815c5feb6b0562452257cd0ad1658786aeb0145bd914a2cb1211554129b6b82844972d2a9242526c9d9c105dd6c8080a48b1e56209d1658f8e9a70145b1e12a2cb22212921d7532323d423e47a5a145b2d5e4faf882eab2457aa5211628b2408d165977e105d37a607d1756576105d774628b68262abe4d5e44b43c8314090f1c10c0f723db522ba2e4d15d1755f72c5065344d73d794dbd9ef6c4568cd7d3586cbd388d0e5e39a8c141b6a941aea7f5d489286291d81a71491bb952799e882efb922b7527b6aeabdac8957a2a45a650e4fa8a41aedf62e016fe02c7f8087ef10f8ffce2ef32f0fd86579f8137175cb0614345e585175656585860802186186490a1a5e5c60d171716ebe56586195a3452504030855bf8094e956093a370c96160d449300c0fb3b80becfd845d9cc3a767983361ed3230fd0c6c3a10f616c80517b2d78f0d1b3f402a2a40412fbc1024b4b22234c4c2324404030c444531c4506424830c46472d2d4748376e2025b9b82429b1584b2f7f59caf5376686cf20d39aa14193f24201df92a131934293515ee02ffeeba5cd0c19afaf36d8e69f711b19af6a4367e0e3b0cbf61883387491ed51700a0e53d99e060e71b66fe19046b69f01bfe05025dbb3700843b677c1e18d6c7f03873364fb161cd290ed65c0619c22dbc780c368836c0f030e6315d99e0587d18a6cbf62ff020e630fb2bd0a0ee30fb2bd0d1cba80c37845b6b751c5fec337aad88fbcc05f54b18f8149a28a7d0b181555ec53f824aad89fe047157b139c1255ec4bb02aaad8a3b08da8620f03b3b4bc44157b16184754b1f7708ea862ef02e3e0234854b1cff0101ea28abd09f7e04354b15fe11fa28a8d949aba1f33c336aad3aa9daaa33a0baf97b1d8aaf17a39456c75afa720882c23a807e84bbae6d0ab05b9ce30112f09e6d0cb7eb3a7d45a1536a1379dbe0666019bd05b130e9b70a051aeb388cacca5ec354f288d82f4dab50f11014deb84d073b7fb41cfc91deef55427b6bed75317c4d6e995f270d73e2c73467ffa668ec9c526144449c14784e6212452d67474a46bc7c512995e3b182ab3464472a627903c2eaa50251aca6727afe4d8f3442c6f1f67a995925e4125d4422336b2baa6552b1da88e512533673611590d84b8aa4741313169d1a2b7f8f2c5503ed9cb649e08aaca8aa0b8e0b3af1c5b5cbb483ad42ea7d7d322cd3262aeeabd91a382be9f5cc1f0569bef43a1fe500583a3accab107063d798b1da3a45e48019bc9ca5e73655f73551fe3338e1f270a8d170f6ba0587c2dc5479732b542aea7409335530e569d199bd526fc6880e14d01c38b02861f08861f0643d4c119d5e63327bcb73601983da834ebd92993245468460480001004005316000030100a050342c178168589b4770014000d78a04a56541a0c255a0e63196390418601000001000006040060804a046bf5fedc06be292428f72ccb61d6ce7c033b807ddca82ca8ae0fa80e1d97a2a0ad5fb73c5d57dbfa8feb54ae3be01c6e6d3d5b5b49b3a6b7460516d548361fd223091fd93f86b86a89eab7e64204dd73e6302fd9249e5e70ee2ecad5c622ba890719a3998f81bb9a708f87103fc3c67685141f47feb62fda6f9077f8a164a6b27ac942aa9ace0969e06c138bd8f08369b3792ebdc2b9a52308a8779d8a4f82980c36bd8cea1bf0988914fb4107d93a31f6bec0fd25668fc09ea090101ed20162400f81b72c4cce012bad41da87e920572e625c83c2752cf8bfeb16895a406628468e01a7220c54028e3a0c2224dc48349cc5ca79183f33daec1f034034443a54c44ed5f97f15cfd92be1967f11dd15d547e2c8284615195a37583f7c00a3c48dd6572dd7315ab69fea63d41dd0dc95adbabafdaec2ab2d596721ef228cacc2e98c79aebc933a0ae121c0b8652a2fbfa3d8e50edc55f6d2f9e4518aa2268f405348911df311640a29a2e33ff24dc735751424fe22a8303107b370159d82dfc9926a3a45bc893f3f49785be903301b9fac3bf85e38ed4af19fe5a8a6c554cfb65bc9b19e38dd6dd5493ffb45fe3e78bb2f11b37c89a936777ae2886dd125c7d6876411e1e884e131020fa162df4128e7c5072ad9e42297e6d78156c5a07cce3831d107da62ed2e1c9b703e274f26b30979bb48cd8f07d37487f08a2e3f5476c9f7e2f53438d55d0a953316005e0c281da0a2653cdf4c36e5aca355a1a2810f481a6b1fe7fd017f16ce24b96c3007070bb972ba1cd3915d7c920b8851dd78571f3fd15d9754f05d6a8600bbb3058377431f81ec3a228aefd20502d89dd213bc3b731016a88d176a784a636673313a9df8f283de62f5b95604cb5050f54a5c0245f811ae96b56549ed58e44306c443b48349b1b1086c832eab16a73086b6cdeecd592967b7d355ce40533c1fe5314e59c47e9630ca94a48f40b4b72f5852f8068216b1b60760f8979f3ad15234238cdd7ef7b7a098962a47195e4725491e8d7d4e09d33a0da56a9c55dad54e317408fcf117132dbb0117de032f4487f19fd6205ded4d6dd19fce82b0549595018febf9f4ebab17827deb196065ca1c64748af9f0866ad68e85708252386c36c480a6d434128751101272fe37e1ddb10325857892bb06d7a11c41fd92dbeda1bcf62b17bd1fd237228e4e520aadd4930645c9203f1c8a9ddc4eab9a404a4ef66c196f9c12129f60ccfcb8a8a3b8e2809378284da0d4083f35dbeb8b28d9474a1f32bebd2fad0c284e9c3d34d85a8d95923cc4b045b9e797143f69057a9b60c8a67092c64efb2399e9764265040903b5cefa4f5ac22314590486205982d0f2050ce28f7eb99a991aebf547a41af5d0896ed718086d7c5176c5e2b9042839a8ed4cdca38853359e3b133b3c21772ae95ed9c2fc93ab8bc3461bedc9ef4c6ef3dc6ad03b47714510513e9541960eed59e73ad47587d3c825e382f3f5197cd66dc61134b2af65c2b47e71eeba4ec0f87066edfc11b804c4db9e5cb0e80ebaeff8ac73cf07389097d608ed7d6e0cc34620d0986260a55e65dc62bc7fce210bd5aac1c9830d87770f51c3fbaac9b8358999274f746582032f2330df9b19469097a3a6549de00939aa73a0d8e58e681fd1224cd5501dce91e766da4b9164d5c7e73f8b7291986adb7e6102eb19d31f88379364c2e882eae5ac6aa6f3d0242f6a4920256d68a0c44d5724a613849074ec974834555be287b898309c32ed2b49349c19d5f1dc8623f74681d58afa5650f2f2cbfa9fddb8ac16ddf839b6e2dc66d921dc2e855c49e6fd34a5e3c1f33001249e9166108c31d7ebcdb1a0f62679a24d4ad0bda21d9c400788c85a4e508696b31694a1ec506f72bf6838da43c3059f021cacffaac3bc41b3a3ba18a695fd2e82ce0b3df1bc56a5c34a9b2dd2b723dfee32d7c216bfd85b8e5f32d07a15b01af0d167fdb448c7bea700da933a88e0603e46783aef5f48ce51b0f30fb59020dbe145d72f9c22569f24cff4300641a8c009cc21ae7bc56c94c2b16a3797f944eb4ec88df61aab98c83c99fd1aa1f27450890283db054c9f2e917fc1874d7cbfd363c6c1147be31a21e63e3ac2dcb0ad661d7d8832c2340af0a48c1949cec5ee536a6c988da8a499b526700ec9263342279db96b04437cb3d31b66c957336e49652a41493972052716c296f67c45532a6a73206d2c765cdecdf4926cd955be8369d2323a176330e0050a82c8cd6e45e9f745ee6a667abba35afaf592bb329b9ab63a710faf979a5147318a9173f5ac7c0fe7587a09fbcbd18f2de217be2d737f58401d6ea38042fbcff5c0e76f85e8ba358cd62cd24b57ff7320c705887334b9181704a3b67f1f62934bffef60582323da7e3de0c35f3266d22158d6a2ee9e586485c31a8d91fa2b831b76c960ce7346983854044bfc446233e08bb3dfa31e1c730e27f1851dc4a40c487e0928753ab39730941f443582175a4bac25061c26fb7257999d0bfbea50884dd763df2770e0cabf597df7b9315d6e44a917d6cfd7857ce397e4330e9d55e8b772b35fca4dfc1de9272eff86fd4f1f88dff7820181496c034bb79f5a6c5aa6de7d188abebdb5c91c76a19ff21bd3eefb4c8ba5bebea5341fa0d366f2252817f417e5a7fd2ab0e4815274bd389602dd6b756e4804470ce596ae15be00f1a2dc90d345ed3be1c58029be136e02ccf456dd149a9f926d430d53c05c279c1de189c8de668ffde600eeaf5bdaede0f408eff07b457045ec342b3ab0678baf9fbc0c6ac80a61af35d85c5554ed5cb5c03ab8aab91d1f77abf14b1d6aa2591a07cefd1cfac66f7445a9abd92ed8a14ba23e60a9de82f96bda5f4af874cbf5aa3bdbf626af737bcae3b89e3a431c166abaf2dac4afae907a9cd4474ba7f0f4d9d40f4a8e19824457e381526195e83365b2e5a9e99d4d9bddd986a99cb073efb0ab2aebeceb724b1a320e9d485c0d8003d0694b15d35038742ee151c0fa75b321b47c1fc09cd322520ded0f1e3c2738a73fee035ac139fd60a26b97052c96d9fa41405b6d614b6f9169cddbc5a55c84aff8265ed88dcea1839a9a75bf29c31f3a4f47c645fd6764123e70a41d72ba870460d736f469f9393bcffa2f4c7c6acdda9527fd2e18e7d8cd6384b570ab585c2b7ea66321ac41e8c49d71aa9b66efe9520c210f763316e8fd92508058ca9fb4fd4c218da27f0f86b2fb96a4dabe9a3816dc45bd9cd6fce19db98386439c0aa7c2f438fd31d69db552f681f56f1f06a2932220f9653c461a63a1e478eb09ae97c4023b8350f4253f047a8855914ab8610fe0966dba0346e41968e0cd593a8474873271b98a845405d49573b84f93e0b32ed336db6b1b1fb189cc4b3e9df3b0e5543840d3024ab298e768df8227bcb7003eb709318e4c0af35db5090652cfc8ac95b6ecbc834f6d465d7055ad236d61bf78658a71d787fd4a6dce60a7ece25ef5c8f2ad31e5efbb5efd0c0192c6d497cad7668f4edd0f883e92abd5aedfa512623c1946752b37a1ee64d8cc198e5759a2a248c0d295a6157813ea6a2876db1d100c6d97e9231dac04c3ca4a767a2ed91056dd9c9cc89c1dc5242753bec96bc564aa02400c777d74405fbba0e3bbcc3bba4ef3b377a19153df952584e5eb51151b3f02c6956eda0c31590a8577a2897452b33363a309535d0f8c82c681334e08b8efeeb106113698156c3714b6817688c8ad199a317dba88a95a306e614c722504e9cb009f825c2bc8003bab06e645714b28232eff48bb1b50d142ddb488488ac49bfd5b326c73fc943aa1623673eb413eebda7865b42363000d94b6fe221b9b0ae5bc6c8c922331a62e95eb9e4a094f4c3ed3be01d67dfbab975d005ba6df8bad332462df249a4d875a3fc522ab6236e557c15025dd7957ca588fc279f3e21d94b1ea9856257172804a94151b6a158bf155772928c34f135e52f45ecd76a03120b700182e6ea4916f099d28d80e4c48ba3f4c39202ff3b6a5e8c220c43fece65b52de4349521628560a9d1027ac6fa6ad7e8b829cbb96696d9c71872b51367c7e43a58cdf89964a5dbcb937b17ad93ec65f1b31f370b848f8b1f3a7877b71b93dc3cd949d4d54b322724ecb18a9bfc20ee8dc8ba18d0e2ee1e4147817ed4bd66f07a2bd24f771ea8c31156453167703f83056ead89079d5399f24d90cbd5921d932df223e91b44584d7150bc28a3993c0d0c04bb4633bf7f0b8058a186fae7324f05870d42292f61c250fe25d4256fd9e90cf22e9bf1744548dfceaaa89c2a10d883d5148dc2203fd834c18d4219d39394a72d30580da72529271c1f4070ad91c4d0db37597865f7e994462cb61e855cc85bcee810735cb5fa871c087d2752d137e688c7b0b2b2ee4a9750122060f3946a927e9ff1e2f880b21a28ace848474f7d0f82f04b8bb5258c285c8ac32c33deeeefbf967a6151cad0ba5f75113d013ef879eb4d4ac0006d12096ece114c37376b62c2d5024aab0e052efef64d0daffacc51fdacf319775283f4c5ad10bdb9c722677baa41c9a725a150d001805e91ebb8deaebec5ce69b5ede0aff2b267d95fc83d516744a3b2a2651cf182f9b9ecac5a60ba693acbce7623204b0a0181ad4e40b37647544a2c5863102b2522a321b0a2222218a182229aa10723e4a05315b4a0129ad94fb2167d4f2313154e92141d4e001649d6a32ed00dc452d5cc981f5bd83700cd20b2e2a0470eb9e66db2ca360b4c194ce9100f051b06704d4b73555f50e5180fa809f0e6ed5b35d2c86de052f56aa64be4b40ea5c369cc5221a39fd6fae989268e18066f51efc36301464d7b787d78b337ece5fb0c5bc515c580a9065bb785bcf833690a6d63c7de4daa07d2c7d2425188048233c90133b21f6613aa584b8937897d540da82e108ae844110557fa1238b2c8e9b114f425ee26686e2d0b2fd56222f85d150a399a5a2ae1be1ecc71fc3f43a93181e9d662869f268df026de4667bdca053f12592a9955b44b39e58d7598c53665eae760ea4a7c0702947bf21c04d2e8eb0cedf6c05876b6bfc1a9b37742401ef75943a6147f2f2f2a63beec2d975d7d69fb4371f6275b385a887137ab6f89f01fa45cec843d7aa8ff01e49f07a05111bb0ff21ec6c3bfa8ee5f07a11a0447cb6e2f62733e708dc53a958afa8da404e73e159bfed0e02e43b3681b899265f8c63d76a9f6b374f0e87c315dec96bb817bd8a855eb29b273d50d0eeaf57c05bda9df2042010ab7ea698ce9f4e5a8a20ae5f60055e9280023eee97f00cd42c2e08fa9bc0242cba4f4c277d780306881ba5b6e5423eacb9af7bc099f6ef0b5409e480afea531539a6eaa6abaabe9a7913ab687e704dd47ab7c4420cd631426d43c7bc2199d8348799d001ba7bc293775fdc6fc992dcdee6299f3e70552c70e9b1299d68476e0e28bbcc128acc64a6242f31afdb78489980de901090df0c763738cf8eb9dd507107e4e2d66c275ca1406b87adda0a7e4bc4dfaf8c4c3c20714fdb54ee61d415c48c9503d0bf499785ab3a2e687bd2616dcd9987bffafee918f2fcc1828ce1a6b526241f67da8c4ab385f1cfeb53dba29321dec0d9545a903f96cec657b27798ab17792c57bd4d07d115394186d211c463592f177b37d00153c9009f41c8260549dfd7af8413d62c389b30493d6f0435ed17d5ec42db97dd926b696f96a6b19ebe3f56a20d8dd0cde6f19071dab1bf60471fafcb4320f9bccb4bac088c16700952d180dbd871ba2b3531c7c4a9f41b2873e7854d2f46a7c167d0a420561324318bc09098f26369c951b3257e31f26ce3ecfa2c6b2a872e35719704329d154b397be8c5e8e337877941aa4e686520f1be03421c540bdf847e96189efbb03261b93ee0a7f8cd615c1b7991f6bbaaeb7bf353a03360698db1e9b0ac81d6f4ce318680b3147dc91862926822f70a2fd66c4b6f01c22b24837690a85017862cf3d82ec4758d8cc5b1cf174118c547d1924386916425a978c810d15eaefc5b32c40e41795722d472e3d98b09718c42c5a5683a653651a0a36963b0883952457d98be869071099682b13d99b1376f9dada44d5f5d3969b32dc9e500200a7ac15cb82c7fba6c4b5f4edbba1456ed90f66d2c0b260d6964af6c39382f6b69b0b78f2ab01bbadef1ffd024d2a995ed6677452526ba9dadec4ea0ee45002ea2a03537376771082480d1f8d8cff1aa1d5498c74b869cd9da60fa965b166b9a66c54c5513db5fe3a6c89069e15e0a195b139fba4566af353f00b556ff0419e5697e34cedcba2412917ddd48e1ab412c50f32934600a69a39335ed75b912c4a4921c2fc5396ba8df39b59142de8ffe3ae130f92468e95b59216169e1c2e2e6388bdfbf51925d0b1659a16244a234f45fe04e4c9a2d37742f9ac11302b78619726e28a86bdb6eb20b3f96e2d4b53161c75792585ff37b8b147a57c57cc30fb703759300ab0086d423b6e8bc78e58c2f7b9b718dd1dfa71a428c048b805c49b055f095935e0df41227bea061e6a374a8c072254e268c8edc2208efabc484c9fefbc264bb7b151c602da47c36cec3a5ec4a982e7fc365739c62910e7090e4c86e806aa62fd09648eae238df8d26a89ed87030339d434afb9c12e2a8129262f4d2081fac23c76cc04a6ba56b457b1a6b6e669fbd3f1c9bcd4156c1adbcb048a1f7786554cc63311ea4236ac999b3b0fe4c0c697dd3aa2363c2c05c35c95431c8022d9bc3c66fc70a37615581ef9e9e313ec0a1823aa53c29fd1b7d120a9d15a31e2dbd90117c2879a07e4dbf73fe7fa3be959110971df372e4423054f088a4256c1c1c33695e211bfef8854c3cb1cc4e0dd8bf41406c4f93655f5e88871331dbe14b72f60d0e5d448ab2dde0e85ca0b4e60fcd16a6a359bb715cf612d0b095830aa3c7d9f565f669c68f4d73802cebfea025d40a2b5e01f6aabde8987ff111b98a10fe18e24f029304afdbb7c185ce13fce104f18ee8c05e09b0f384cefb02917db24262f404468bdf53672693d5dab72aa10e026acc3fb5865ffd1d2062ec2178f546edaa81e6848c5943113e0e053d198035541d24de9e5a2aabfd0ab1214d80a9613123e3745f89e66e52935f99cc0513131d2d4af112d0775c6902f22510b4698f8aca3476dff69a207f3ffa1effa606db61ac0959e609334500da79266ab03f51e236157f689ca4771e8b674c1a367a9ffb22b92b6ae489529bf7440d2460b5514b00cb051024704b934d942babfe4688f4bfc94ff3587985a4ada0a1c03af07a6a106f05074281b419b3a0ad25f48dcc8ca528ac4a43878171e1f2e10f3316cbe0beea3331f9e56d38197dcb40352c970bf6cda2812819edc3392c514b3f910f6f48af63a42ad9734b834d3d2b45d754312c2f42df367c597970be4c9128e05be588dd748622ce19aace0d2ead061851d02213d2af164d018a90de9c035a8fefb9e73b7e884e0c4d17f82422e6d2b590288cae99f90c0cc0bbfd168cf5e8ee571ad2bf3520be9e1b3d0f8edff4ea4655d9399ffe790fe1ff5998c8c889610467976d13c231e661ca6518044d9f9cef3c16a367fbe4fbf9be489a7c41c3770ae7a7aa0fb5f50cc399f2ddb915e288b6ea1a4c2e5b0092f8fe30fe95f9ee766504c10c3167be232af96cead9a8bf1faf9b44a63998f38dc1d4faf488d6c71138f5c5850affccf3252fcd10a181e45255f23aa80d6d37dc19d90d5e75c50c3165b7c8338c72cc28b49f15f3d8c8de3beac9fe8b4875a1c0668acc0e4e364225fdd12f9b34203c4a27afe0a9a4a8091d68c65aaa141c4e8d295ca90d47711d952de225513d109de9a0b6831747ea981da48e5c531946083abf1fe52e52e4ef53b5170a150ae653334b07ef0e6e9469db200929732ab4570e6b8ab58a2eaa6f415006803a29400e08da8dbfd519d67d5b5463f0176c51d6b9c2ef2609b54d952148caf4c69628a6a443ec2b74eee03094bb5f395eab6465092bad6687ba406e22e69dadac8460433dcd899619770077972e0bad9006a8fdeb5730be2a18cbcdc73fd00251683a1cf2d009adbb49bbbe392c467d7488487086cabc673e6d5da2f390e69ad38a585305f2d8615e7ecb01354f1f44f23a73bec05d66fd1b00d689c2a777a33a4b1d17c0e2cc287c5dffc626d97218e8653f13480724b8e4cb433f184f0af5c61a5d493ecf5e87539de78c19de42103cdeff1205143c3c3b8b67d0b10509e4936c5dfc083343405cf1239b62b0d14b668eddce605a26bb07a25ecd51582c961febb7839dc1fb10b9f4401cefb59568ababa686bc8757ab024ab1ad8ec1623f5fb89a03c345d561ce08dea89bdc88bb46171d5fb74f6a1c0bd57f7c43984a5fa0f9b3fbe896b83590e5dc4fc8e6e166b64339161fb44b805cd1ece7d50a06e6fd1dd871427945f7767ff51242312dce4afbb0b907bd23fb3569b542e32fe43ea6ddedcef7030f1519b94315d28336966e7934885d292fd1141803e7cafbfa23fdab47a75a962f357a568a69990822917a455f5ac320743c740bc18d75bcc30b18ac99d8e6011ad9c83f2b54cd035abd21dc50a47034e48688c7478e63544b07f5716aeecfb9d089a5af014735ec1aa49d51e6118c5a1599d466d4bccc0ead1fbbdde0dae75facfc20fbfc4140826e4faa0aaf3138424d3d5f5f63e6b23463302b7084d88da674924ab2a2c4d03a248bc9ae1f7d372f2c736561556f9be11cae448c6de9d1c1b92436ae36d358a7a1c7d7838c9116c1e2e9499a05367e530eb2f84c07e7b779a0c8cc4e167db3719fedb92b88f6f2a2cca7c27679190e6dd65036ec156bf616fcbf89d055eee0b7887069c3ca193e5262ab85d3189e19d59bc0c06640b0952da69fae0dc01430d2cd688eec34d7124565cb1d8acde710b94cffa9d241f77c71862620563ee33c0193e7aa8d44ca3d3f88789a883809837358945b6a122e9d852344c9b61cd1a25fd160ebe4b2943dbe6a4e8cffb41f3dd7a73ad70654a302bbadcf9a3f0b10c4045a2a10c59604a127564430af0b36b5b87827c6d38f11be6211d7123b1ad85e73e1500e375e0024dd7f07966baa4f9ea6db39ed9743f2d3116872c9fb693710b783770a70dfd8aaf590ac5a0176f55dbf7d108e5ef9d1adfaed14717d8ee775db90583e4c4550d96f37a253cb3074a9a4eb426277bb53d63c12e764ee8184ee15a5143765f50718e30eb233958137edd57919686c7bf282dd3158a40840be32b755ad2a28d0dd2e110c02e4df9145c401328ae002e2af5a8f13adf68649a0ece3ef0a28b5cf7696b5641586141cf65c4dde37a0ca2d7cb8d5c38499508aea8d169c89a81d459aae638470fe3cd8bb8a7a6fc2c479a430d76684580e033472e0270c34bf2f2264528800760fd4063f1563395683a88da392b5b176ddc7e0e65ecc0cf67b5f581eabc03f6485d04b394ab4645ccd83236a8e90d52b842f32f291c9be299e81fc8f5e1d4f1d72b44bbfe101402aea298570bb650b6415772a86aea579e24e29d1b5500c883b8500f33a70056e3066c5f761ddeaf8a0d676604137fb6a05d52da72639f6c17ba045baeb127543ffb00212bbe446c0913912c496e4a507dfe7bea9fecf15e2775258f27579a27a19d2532f08995e395fbc4bdc4acde109f43aad019fef4c496315c25bdf52a910b3ae2c8d19a557bc5417bb068c14a5fcaba31b185aae32cef17c0919696d7b09bba4c0b2b62885b465f13fc1684ee4a73abf82ad0ce9523a9ea3e1356f420d583a553c01cd0a18d693f292bc944fb61f92ce150d681e31d059fc99192278194c882326ae5b7fa98060ea22f336b7543a2732b545296c1e4abf9216abec19f79848d99a98a0bc54dc73dd7b7993a09b3d2e98ad2904f13879f181c7fd5b3c397bcd26e09c2023c8b09c9902f51b942de84f5f43ebcd3c042c8f103e34a94df687c30fb8038d423bac48a01f4c34e761a2f8208fdfd6124f3823141c24f0d74b10fe82fad5c4af11932bb0a04b97c8b398dc30636e5724dbc26257c503a54c2bdf59f3c3ac3a50cb3a8b16f98c49fa0bf44411a4d9b02ae01320f0da1954b1095494ff449a20c210f03cab389f8c8b575fbdaa61d319f38ffb30c7d32a434c0efdcba1f202f04055c9d504d6a28e77bdb2b59463acbf7d7b0edc2156511d00ff06b294f13dabe82a5366db453dd2759ed080d0c1345dd484149126ae252a7413d435a268cac2248b645112300e78566921ed625aec13be38b4dc86f79f0a26d43520b21775fcdd1d1f4bbe894f8a27fdcde3fe693630a48c3d9597e5d01f25ecbc8e7f7ff1b1572f38c053e48da9aca9e5af6554f2edc1668739a15558b8d9ca491ccbe2b044546fe8a4d448dcb7075cce6f28b77e03277d3ae65047848f28e10416b4422fae6b0a36e7f7b1710ef0ffe4d94f73c77641b8535f66980efe7a7a1767e35d28be923bd8457d405cf8b0dfb09987391c03f2171731b07f924213b974d1d19afb64a2af0aefc08483639e36aea67480a3a84fe48983d92a320e55d7296155ac813a1f0fc36329e9a4f13ce9790ea6d45dc8867d0b615a06ebd159062a62b1301a9153b088d2e173d5a5509ffc1959975d8f584718828ba5eb0fcb45b6059139fb2ed03b73a388590d1e35ab6feb9d3d7d8952e0f7337daa4765a11753b6ca511f8e9c69278a0be94a6fffac51ad740783fbed7eee179890bc0d42637b108fc48fe43474698d3e089d054c80742c7b1889313a31b1d1f898f426c03c2747c2402bb2e4b16e52861c12425f5def23c007785763728b293bef9e32731a4531847affd80766b2169ecb292117f2ae48efd16d7d3371bb317364ba8f109a3a95337691affc8811e003efa821d3f0c908b79fdcfa762f51102628bfd976dc4545b4bb61829dd94de21a88a87cacbaa9cb84229a160c4211c9c415538d5956b5a21f2f3b09ac37f227650f4e3154a68290aa6b0291d8dfd99005f5b1fc20be9dff77d3f9cd0a82eab10a11e148fa38a27ecf047f39ba62e460db9b0a9fdcfa922e434be2851d16b862b46cb0376b300876700eb1934c390e80bc43280dda6d50c496e0f1214b0c08f583c81560add0d9b1dd7a6505cc7b16b11a3a937c5507aa088506f130fd31faaf72b071c2409e7828a40c049acea849dec02a848e1790ce75d414284d19e5a12508a06a0ace2a5a2f108d66591e96f0feb463b0e16c6eeec36b9d836881ae019b2b7a933f8124747bd4c14eb450d8456d6b9e0ef8b2b595ddfb81aca5567ee1f4a381493e0f17c0077f4a963a508f6604e6626d769ced763a7cace67d94899176c10d4c08a496321fde17e19d82a9902e9ce54fc3d291dc9d6245fecb9777c9613d15835fca1d2d3f1eadcdcf8059268a19a2fc347862e2d1790e3b3c56a0dd0649c6615972766e6dfa17dc85242dcc2b519fe6a03c91266b3340f6276f6895a4d51d567b4c6123825ac1bbacbb2ef00ac0fc0487f8b611a6592b68ed3084d1ba9833f05859acb0348367abfc86b1c021ae299842c648fcaf3720ef500a2887849026ee643b9013a36dfa4f7c173b2960448ed14c1eaf5d7bac8a6beb1aecad4ad908732a61079f576a31448df5a3707bf59e6d94b830baf211ccb4b1f12a63398c5aac2b7978c5a875c47c8bb4f8a7be587a0bd00e777230517a21648486bda9d8734cf541d113b9c24c41e75fe646689b0df085e124c1f3455b9b28dc521603490fa46912f3f846e08373231122e0122d5a9f058d2bfd1fd1883fe26ece32db1259fc291ccc58e393db02db57b07bb1b12d106f47fbae053231f451390633819822ba0c45c53548c6c03875e4d8b47181a923b9e3208bc7bd8747600889929ecb4ca44b38b899dd524fa931c2e35325256c338ea1a4bc2c8e30fa516244daa5235c1509727bfaafa7b30b42b7e80007df1cf8293cb4728224e1f8363b7b1823c1d4b73be31b0e2a2eb5af9ad17bc33033229bb42f8407a145374d6164a56a4672e7e39ab29389774cf4662c524f2108cff493068d4be29168e78607a72424d99ab29c2af05a0dd04c79955aa014e0a1841ac666e1c47080fcb46e737fe18caf92d1c73209e957550c88c8e87d1fb65759700b6d65f48435b70b090f5ac93c5513cbe34c2fb4ad954186016afc18409b9d064add2698adc7cf5c4025cd85f5bde25a78050f4b0aa22d0df36ab88873e770e8ba41c73d704e49a2f0892a60cec406198377c9ea8f95dd3818c9a8ce2aefdf2fbf0587361e96f1c37021bdafa14750b5a5542164b79667b01c96d8173a423c6966225471885981c38ff89b6f61f656e30cc3e4c4507ce23d94a85c42b1617b2549b9ba9cadcfe87bce14569d16ef26e44dd9f86138441aa5a4a01b11b1bcd3817cefc42fac7824918bea82293d1722b9988fd9458aebcd56b2fe85dc579ca72fb51395824e7d5a185809ae88086baca0638f332677fca7620134e684f6148b303f9d27ca50c972abcd54bc49be214a7743297edd6842164a91b2fe6e9c8d14f2f5ed0e196ad5d618f71d96eca0314a2038a8ca1815636ee8c470ccf3de02198524e93066943540f49393f36129d7fe8db99c9210372ceffcf8925f06c80d3de221267242e48aafadec4d7db57b731699bb8d2f9afaf7770b542efb075f3dc399ce3aae2840a449baa101a48c6a5c8f072ba6797dad70794d8b817b25e3edc19fb772509afd1ae1ede5d556d6df05954f3226a43126fd3bea0023f0a75a8b0dfa4e1aeb2bf6fef2ea11cd174ed6c63a7a78a17c08ce3488bbd16a5357088707b51058ec6049b8cd0e67106555e663892fe2dfccdf58460ed7043f2eda11539e4f7249896f9919902cf4684850fe63884358b7d0448260afdec28c241ea9f4145a8a610c1a220b494e1222c87375f60c1864eb35addeb78b6bcbb6290ccf24086179a24e339a7e33301a9b64d83172b5baec21970da1e2c229427d39b4f77cf83420fc333d841c176546d70e1bdbed2e73d1ad52e581055b1a12e2ff366f0084362133b4d1db7f99ac2a9546923199c5814f321485cc12945c8ef607dea78192ee6552936c9e7705d947257be0b393b260ae6e873a69a68902429678f8a89aeb799a4159c73b881247b728fbfe8b18e701b30a5fa09869a20df9893db36e41ca44e8e9d50b48fa2f37f2e911855e4c530161787335e4e433140c9271346be01d58d19163f50d7ac290a520e836d1104214d108848846e27ba1e431fd31cef9f377264442c835c6a682069f510db531c175a309e4bb04cca0c6c6626274448d1b090995672fc9de9a72e547bffe4feccf5fe9aa4166d70314f830a67d80188ec734e07221d64fd569415659af4500a261b5247bccbd70269846d988561467f433439b758c68888347d72ff05b727c95558ca0c7021c6cf8152d7e944c06134ec28bc7b8116bee0c71d5c0dc3c8bcf4f8652fdb1828193f3f70cadc7a3a28a277d9b4229a2004b682ac2adfa01a7a1703e8dcfef7104ce84f36a2e7e400de40d1f21444338ffb941701c279cafda3e102b3b42c11dc043e333a08c1c7c883281344cb381f2f4f49105c11b18867bfc4fd2b94f784c268a843f17d0e3c2245cc951b2676988df89df944f66a2a0b0715d71ef47a61404b0ebcc106c00fdd8facca6f051c1629f9c7899598eb18a5870d93454a6253f62621431a2d93bee75305023cd8bd2eb4ea5088678b0bbcd4029d72f5fd6c1ba7e7c8cdc1ac80ed5e03c5137eb63521bbbbea756cb31d8783b15408e68e819053017f7bb11d0594b0aa9c7faef8368b8b55b18481caa7cfba453ee983951a943baf86dbdaa82f106ca6736a3ab2d15d6e13cc734d982fe76cc7406a0d54a1c5d6cf0ae0e91a4fe5fc6a243ceb3250fd68b6ad033183fb85b6e68f0c4c2a4488c2ee3fa67d42eeb4a4712fb1712528b495c20287471bc2934445a627061a7c36298c5ad738dacd3524e96128a66051b8dd8005fc85a166905d53fffbcd350148cd0225d8a13cc40e8a36360e77c519157d990e6452b720689fdf3ec3286a9615b93fcbd03f22639c957788b7d0ba6078b87a34b2b97166b009c7cbe6e8f1e0b323dca26e79f8bff1f7a4a866161ed6d77931f3f2f40f4294e12769a911708f0ebe531dd4025ae6b32c680507aad76f2fc28791dc8c7179fc49f15dfe52a1402dc7c7cd8e4c18663624ffe2b51a7fb05993efb9bee9717bd4682c664edb9fa78b22da1f1c53e3a97b31900e0f388c7ea32f119438eb8674d829419ebbd525bf8d3b539365abe55bc8d452376d841399ab75130d8c19fbb155edf17dd8c940bb818ea1bbc33e32ef18b9ad09c9321daa9eac5e555f7f241461e0ee13afb971b16c8f357749e36387bd82ab5525aee8e227c5f59f6562017bd60f4ac3fc09b1aa48d1677da8e82139ec5a7d23fcd2a153153896cd214021f3b6af57f005d3b45a1bdbb8daa25c485c3e91a11e64b8c297ed192b0520fd7194792c6e50bf8201beb34dc1939cd1dbefa0f406f088d14332e1547dfdd42b87ac199e91a7ab9e00bca95605af4283e189cbbac642aa4fb8098a3e4b3a00d8fab88d0dd3602841f255d906c3bd59805014fe77fdee99f40e26d71740f9a3badeedff6eb7d048ca114bec5f654a5443a3ffc3aa453470b1ca1eb354a7f151bd6904b9bb31b7aa18d363fce30fb7b208b47d26ee8ee29b0969f039e98fb6d6569eb7561fcddf953c31fcb97d307531caf5937de168b26112c690582884be61625886d720556847d7f81f4056cb8194ef16b85472d1e08fc622859650e1c2c9cb0c6937eba28dbaadebb8ad357f95a332b422113e6bc2036d539f8b039f963deade9de4d5d1a9c8945036c778a5e7a381edced855b90cf2d13fc7a2009285a4971f076bf21b474ffd39f85be963e81bb0164199060a027d1eb787665b917b5ee2d949c97a1509a515267b9daeb2573391643f45ebb8a349c2336eb8c87c93c10722f58a06be2e0688b0a5923537b0c7ff2e6fffd069c4870ae2e49142ea51601498a05b6ae27d61fb270d75bddf4e320ec2a0d52e316d0d9c36d96d82eea6042be79ef804eda05356ee106797f421dee8e625630ae60c36dea8942a1a3947435a899e3bff4d1d59b242fe90a8398fdb01af9589e8967c9d29e15111996c5a85f62f2f1df7a7d25c6d477a551c958624b560c681e7b0b520cd771649abc15d917e5f0ceb0c75c916cfb1167d9929bdf117860948e2e1a924cdc7b346a4324f587470b9fc5836a310dd16bc4baf8b12688531a4a6e4d6adeea3a659f5404eb8dd0889efd8b6c83082ee301db9d869f72ac3fcc7ad9d61f17b67338c9fafec9f12ba6bbe870906ac923314eef88594ad05756e4553ea772b09d191dd3c364451a3de068ef6f51342ca8bc47933e145f7cc74319e145c166f720157ce5fc183befb28f196d094c2953da94f7edcdffe1e372ac3f0bb59c1cab6f9e5fec29f62db87d261002b30cc271c1a0273163399634e9c02962e16b88a650146326ecc84f429bfc4fb1253f35281d232d9acb27e8bf86763b3afa533053fe24d225a75fd2f4059128d6b143050ca39f49d2584203eed013187005e0b7b68a04ed2df52978f35661827bd26a32ae575642d2672019c5623367fc4b398cb97412226f6812969d8f8e788a5a9b9e156c843b9d49291e31556dc22e5efc148f68877c77256ef4687f9aa3982b3fa45b57280a050feb44f38a7a60c967ccb330f7c2a743623b291eed143d8553dd33a754c498a85425d9483a497dd689ec5c625d790771804c5af5b24df148fd694895717df854a37bafa45073430e1ad14ae7e6d10f215860bcd5f4dbefa481ab57b1932eeea51555182ab4cfd149b6c7b6f5c89ff02586f8bb2185a7c001ae614bacfc6534ce4aed17b99922205830dcc898b75df3403cd58337851bb54c1047b8d1c03b167c1977a78d290580a894ccb8742142b39d8acd43df53cde20e76a4eb6a76a6f5dec6b4b1bf768a0b4963ed5862b6afb09d0d5b2528f43ab1f643bdfbda34f52d35b48489f1d56647605cab3c828a7611761922ef556cdc1130a5cd8e7a659a8ceb0493cdb29b33a279d79e1737df53d64d2ffecab7ed89ed721794f789ba63acb1e8aedd7313376416a6c7c51e8325bef6b39b568ee21e0e3241f0ed1ab716d11aec119869411abe8eca0b585706f58115052036516a7f9bd02df8098980db647a4a6d4dd7da64cd52e9f47b341ca8a8a02b3d10239278bb732727274f37f8f4e4a76a4f56d30d0b5620248e9cd8518100e011dd48b00af17f9e33b201f2ababee980264fa1c21a222eb8814e893306f9704d87f41c767d8cce6974db075c0a6b28c3d8ea178adab48e157428aa1afd0c0d7c44a5e733dc4358bd0f4dc8aa412e6a84171bc3370cf39ec3bca7015c936d0fa98af53207ac6442128ba493f1e1ded7bff1154c26f94473c621736cb6c0ffafd702916a6452d24be2b22fa267a513e7d51eaf50df92b5b0df41e850c2ffa5f855074f2b4862db18b7a9488bde8ad0a91e4b2c39c0fe46046177bb2751208f5b6621854c1fe0e413b11661af0d4ff1c3a4f1c5d5b6ad406dbc762ed8e2631f60298e1c2a4bbaa071d88c5f6c9960b8c6ef31f0b658f2cfcd6e107dda75dae449ab0255bef86cefa5459efb2ae4a26e7f63c5ddd65fdfea374d910d0efc6b305d52f5646cbbfcebde79362c62a21c8d8bfa06075d4b1c1efa71b6d78c8519d92ed59d18d1973c4703dfd2177cd5b812bb0bb2889d67346bfaadd0fff746aabc5a76b2cf98ce7c415e5d95469713a726559ce545c36922af47a4ca018e3aa7b1bdc9ca0e7033d27defc60cf097a3ebc75829e0ff49e70f3c33d27eafdf0e6843d3fd4ab03badbe0ad137b3ed473c29b1ff69ea8e783b727f6f850ef096f3fec7da2de0fde9cd8eb433d8ffc4d33be9554f258a8c1cea5a9064d9dd06bd77a32f1ef4d6742ad48d18c9cdb9fb485593699cf363961b0b68cf5e94b6b6a8d67d815780a4cccac039f6a927fa9e43828a0256d3e571b7b700b0672678e3ece79faba9350a1eb0cc2e0510b770ef448c62c43544417038302b34c092b5899f384e636664a61c349a82a90f74d4891222ff5cf7dd3d24c159f167104e957a18a7c0a4fb756d4bf56c8c5c337a773018c9998e525e5460d2b55e79348a3e3439561ad2cda8f82ff3a5335d329e73283fb6e8c0d7fe8634c54ae792b3221f618d25f990fbb7fc969c6c0dedff858383d70c23e007c3ca9790384aa84f56b6e7582aa52bdef91119bf89ba73a133f50cab2a8fdee86d2ff60839b315f670f9cdb667bad1f1975a2688ec6c800e68554b2daa69a069a3e690e9e988cc81baf9ccbcbb7cdad5a189479b693d3d94978590e05877385850626bded352cb5b8c9eddb3dec6f59cf023109b47acd3f9474301f1d7edfc3de2df34c2086406bcfbca3128eb383c3b73ddadd729e0562146cf5cd732ce9783e3825e7cbc26239e32d0e7f8f883bb6e5ffd22aa931c72edc30c8649c9b41504b701eb8e186fbe00bd7b87dbfef247ea013089de8819c2054621e708250257ec0898492f8414e2c74220f7282501279c009844af4014eb0991279acb44fe5be4a9a19891ea9d457a9fe4a6ca6441e2beb5fb94f658151288e0a94c200886510ddc60b82315478a5e9f15edb1e49aacce619cb18a5d073e6f3fee3e43b95c12af390fb7f899cac2b7219261566ebcc4ef5b52f9c976af7ed475c2367895d192a69871bb8c3d639fb870937d87e33e23aae2ab1047e30d71e92f46865b013ac3494793d56132b7a64fe2cadb6908b6f699dbacef9a52e1ce6bdad928915eef874dccc6e7cfd69ae1d2eab0b606213b1eff808b9916df0fe6b5e3b242b37e9caa098c13420635dac935768caacb595b68e8bba695d5ba9152301345019fc058b46655c87e5c4cafb071661672eb3e109fa4dc849c4237b408eaf9e2d6e22f6255f1d37b38c6f7fcc3586246514e232587c52acd7676611876515054224e41ee391727824bf2e8358556b804c75594e5cd990f9591aac3a6e70ade1b650a58880a08032e8172c1a90713d96c92b5d1f98051c5ccdf921f584dc2f1e29c102a4e9ec92492bdffc755c817df8fe716e1b3e5b590897c1c42762eb915948c982ee0529e977c3cb878f7a2fe412e291fc7956a867173711fb92af8e93acc7371fe61aa0d3b2106b2bf3ccce693b870d21a73360673e161b04ee47192cd10242f680e463062da006c3145a8e106f1b4e643adf6089c271813cd62162060e307f7cd8598580115f5c6ca818f22c0fc9620f4c8f7801ba89b402ce17381cd17d8fd753d9c3a0cb8cf93c84398c91a2e47ffc216361ee5c19e8d148371a65068572f55863d7517934e8de6339ee619a46c5a7798ff1acc9465cc1418f343670f950cf561b744d727f0d339e7c564c5e147ee3bf811bd1ee0acc08eff178b55a6f0ebaf686f6b703418b53fde8cdbabfc63c92f276e0ab221d39dc8a95743c18804c50f5fbf6e9817e1dd9427b90481fa444adb10d3932ff9aef09fd3e40f305cb2319511d49e36de7d8aea16d17089d90e23d3f99821e35a2f326ace249dc2908218201cd09a1c8b3dd7621fc74faacb70b411418110651610e3db0a31e0a8854e294f9d47dc64e8d80bba2c2a248e905a8d641e9acb0546acf7d237e4e473647db33254a81d5991c3f89485f25066979479daf99f3664634850babd0a2b3fb9a31bc55d3dcf1d7ccdd8891daca60308b3e68857f8afa7da7690354778a11deebb73249ec3909bd150059f46dde7d9ca94f3c91a212fe89f65154e2003323a15601eed0a3043654698834caa96c40f41bb9c91d1e765fa554396c01d6f9db750eb25f76c004bd73f91e3f3e2751bb53e2d963463a6f9b1e5e608266292669a29f69f6414ad0f3a41d281b5ea6d9f387bbd4c2bf7a6fd220599fef9e495ad286467d309f6eb683c4e63ca843096159e84b41827c83e4c481f51186de0b1d8094108ca4dfc9cb338e8cec12bec8fb8ce31ab0fe68dc8a9623f05e627aedd12bef2f233d2e7b0074f05992dbe45fb188428ec1da05bdbeda0ae54bd55b35c5945f1680eb4ec56d60c21e5540bc04b9ae45ee6e55b09cd79cc94f794d703f9211a63ff84fccbbf1017a1fa7c86567ae4335e76e084d27e9e370fd7a568da6982e88005b750376181da2fc8396679efd730943ec5760517070e0c3bb4fb48172018239e3f7ebc31e485987015c553fbcbcabbd901491301de4d24ccdf6ba955c7fd783d7ab9ce7cb582ad5a4a378f89a9f53814daaebf59a734267ab826bc7625e2896f796a0463bf43cffc9d5aad688e711e442fb3ac5b7474bb844ae8db8e1619ad5882906070b4169bff248449a0a7629033779242b79895dc8c0a317e0d38f47eb332c25dc3f24e8ba11192cd1c4e4e79fcc179303944000eab540b3a1926044027bb0c693accd648edbd1b0e246f8f1f3434450f805f7db9b68a7474e60fe9e4869a090961231e043818508209ba6a4239c5b452dccc2a359c693a1c92835052f37d787505c5c7a734bdf450102e5861fa6e9349200b66ef990100d08d3c84924ef842846f884b5598f47bef75de45ebc8db0ceaf9b264438e77b3ea5a15d028309079e633f97b1103b472332a8cfdac87828a0100148d3146984e3aaa809597868ca78622819492556436509a5302e3afd531ff638f3ebf911ba1bfefc396dd29a04c4fad302b51090da66333bb62b4730c846b151c7dfa8e4382525c79c61d902fc23033a2efa6c5b30161aa703f51ed0dd263b6ca40c9040cc2e373384dba4be43ba7965e9b49a1e7446ae7143ce58234479739c4eb9a2d875beadf9711c2540b6b6fff72880fcb5860d43ba640482f1bfec130a6240d1ff2da4eddd6dcbbda54c29a5ab06b306b606395606071234e4ea338661cf3cfc05685a35546065e4700bd863af636bb2c7bec52dcfc15618f60b45c54e547e9c738996289bd9cca4943e320c1d8cc0e5cac1e5f3338da872ff3494067988a6f3aaafe0fe25aff35042d0f9d987d1155d8e5a1f3d6af4c52fbab4f9b594afa334e82f7841d4aa17bca0d2a0fb662ec5c1581fd24840b5ffc142224df7fccea459d991cafc718890668d9e3f128945b66606bf793694eaaecc8863cfecd17c7870979256f5f8d3743ee4525ae58ab1677e3ecdd2b6667b7e8f62be001eb5c2ba8f16a378f7314ab3e650f7f138d2e00602eabf95e34fa6cd25cdd298cce728a5411f91a50cf1d0ca308fcca5a4593cfc5dbea4594cfc08b7a1f4d30bb12fe4f843f2d03ffe34c84722121cd090eb9f9fca1f4621953ffe84510978e45d11898311799a95394d5fb12a7e25cc2de817958772e08f4a9ab597125d2828f4c5519a35d475359c471ff2ab72d83f8e04889934b8a441fe26e2d7a689997d1a0a734777ff6ee0d48daa7d3cfcb3cfbe39d4347be3659eb62da9bcc953dd3ae79cf377c6e747ffe93a26157a658b8e10e7f4d1631cc35c268433d5bba8fc944750f943940656c59f7d9ccb662949a3975d8d1e0d720df2c8f38fd5b78f7d56865b2a6b318c1a72f59906dd5d9b6f33dd7dbad7d0ac140cb3ca90ebe7d0b1a3593b002694c3a8cd2b4d300d72fcdb4757b352a3f759217cc4f6dcf66157ec875be0c79ef999c9d68c7e4e66bee2fceb935763f6e1faa7f24361d2e354ed237941766ebaf8a31cba0fb273d37df4c29d1a9f7d580610aaf61c84aafdf6855d479ef6853baedafd56c7e9beee354feb004ff5ecb793c0de74d9119557c68d414fdd6f2f4783fb91c877b82680104e9c3859c28993259c2c111404c43cf48e06a54043eed9c84d8a2e7c6b256e9376ac8c7c8f5f0d2bd36aa02021ba20d05bb19d7e776b4c540773ef22a3f4cb26af5d2ba324ca505637e72677f2dac5153b01b596ac5c60a9543299be64fa9229fbd2e9e3e9e389077a3a6ded7299de9bcae9c3ae52e5f794266ad689eb64387da9894a5b77e3743a7da6e9459407d5d3d1a7b98e729c5783fb92e7e3c79ca8203010b60528427ca541d39b7e8bc1c56699d8c44556fc4a7777130de087496990c9c5f3baadb0c2b6030d39888356c685a3dd45ac348b05fc47a182808280dc7bdcc787b870d012050108db024769957c2b3c458ac7412be3a294d24ab9096faea88b7851b4359433b9705dc454642a3215998aa8e9a9899a4ca6df2f9e7e3668fa138f19b77bc1f49d4e5ffa4e279f14f5208b3c7d202884f3b61514e4402e0f92a13fa940a62f016dcdde3095fe06aea66fba9c66bbbbf4a52f5c2a314eab2622225aa0adc94e2fdf5f134a87dea10339500bc779354ceff252e9b713c779dc3b68217a89fb9679f782d71d2b63c2bc49a107aae9f348132386c7ab2a71b4f4e10aa50f4d35e4a0d2875ae5d29bbc5d9952e96d28d5d2b73fea64c8bef4dde00a12aa3e3dc44246313a18684f69292a7025ef86594d4f9dc685b4ca954893fb380f3d541ab4d23a584657a1c313fa8ab5a24a97af895646fef6a2db0eb435985f297275bb1c0825651505845265f49703fda0b9e8f4dbacd3c7452d5ebb942ce1208fa160b5d4c4936d41be8dfc66d2d4f4ed6a170f0f6a50bebeea40cd4a959a05b69452b3dab5a355f299879d1234055577ca150e12e272b936171954d599501692210ba172329c3e06319466096d4d77e3f4f15d5cbe9087cbffe07a45e9e577152ba3f4b28585a65dae22da1ad34bea6a178aa882314f5e8cc1b978b25297976fe334ed426d52b68b6c17d92e2eeff221584f9e0335280740032a3f1e271f3f28e7b1e6979acfbd4ccdcfe5f3116be8afcf815a40961893b76365560955dae50ee4342ca455f2798a5c44aafca148504a1ea8cadf54f0e6554192481f2ba9fb6a5819f95cf9b535a3cf05edfbc1f2a7c7754a9c91860fb68571d44712bcda6b0771b1dba7bbbbbbbbbbbbbbbbbbbbbbbbbf00b4ec22557a9602c00a281562b89c5a584cb444eab86da461dee4ba5f9e8517f77cf8bf7cf7384d4f7a24005db798b9bb63ee58e6a8889a93859f01183de76ddf795cb4c1033c35fb97e9f40897f258883a2409c08b20098aa469e68b325182824295af56c99f94a250fdfa2ff4a017707abfa13cae0278f9ae9301f5f1b9ee06ea23ea230ac5420a002b389122ad921fb7263b82f2a273bb93fc5fbc59630735140078dd517ac86b290dca8fe135514f39794dc54a5f296a309cc775f01e779f06ddf39c88fff44b09eaff72af21077124fb20aafcf674384d0d2d5b7ea67510e85783417ea45f2e9965d9b71fd91b59c63db21bc3be10c3b0974631f16a1206cbccd2043228210a6c85fb372f20b054d028b8e05816949381392666c64830a83f7f18b3c2f8933ec4c470516403c0eeeef2dc84adba6fb32b9fcfdfd360f73b9d8e11511d7472737274d21ed22c225387d34ca289f5c0c52aff25558f85d852d1d62c606be29326d589a8fe5996750f426064eda4ca1e2b69d097b0ab41c7a4a04d73e8a8a1591867ef5d0d1fa9516af42df105111a5e818b8787e7032fa16671f57d01315012356426d59368a2861c1485874226aa9e84cb471a3333f3beaa675e0e1debecd1275019b9a04c85ea0e60677256655b18d3477340f75feaefc745ca2f846c7511b632f98d7c66155eb07b44f2c3eb9d96b1bc8915e35d7e9d06f554852fe49e93b70db67ca60fe5adabe8b77157fae8f68528aa7dd95764c3d9154b40aa7c04c9c73fef8f4941c3597f6a4f4a51a84775f7f4f473ff641e5a3121586d1c57f553147d14aa86a0ac36e41368fc362e16a6b18e4ab09ad67d45d14336020cc3dc7d9ea07a6bb14989a9c1ffc718631c2a5aadbcb812b32ccbb29759966559d69ad1ca3289c9968d9c59a2b0b563a8069d56abf5928204ad56bd128780a26bb850ccc032a693999923b018638c31c618638c1851ef90acbcacc85c2557f715c5162d7944085cad5aab566b35b35acdb456ad181822288721e4459899313531337304c3c4c4ccc4ccec4ccc2a266635e31d330336f80df6c7e4801243b3f863626248ae981aec9ae2f3d34c1e1e1e1ea09bfde8adffa26ab0ffff8feabea208e3b7ba3f08638b1a3e900e8db93bf3b71ab469d0f99b27f6dc9ae1dc58f74c831e33a3593634e27b8cdfcbe28f11f377f7e8af287650632b09a624985ca0c5aae5deb2f196dbb466b4fc5b336c30d9b2f16f8521d098dadf1a9a41c3b7d56abd869050824705356cc915bc868c7095e02afe2abecb109f87b7fc0bb16fad1a5c39af5a1cbf18b9c210284ec70fe9aa86a4ca8f2d0db6aa9511e3aafab75f5130b1d2c263686666c57ee6b10f67ea8c66615f63f39b31d35a1d9531dfea3333c2affcad5618029d199a11d6fe9955f5cfccbca238429da98dd55582678b1a8fe007f929ca05fca00fdb06b0eecf4764808c9561efe78e1f03b6c697d856d99137c095533054decaef34cb050a82b0c4063c0c2144921b7ed069568a1e4c71851292980209266ef8293f8a3176225b73a5993de68f696fd32d5b661fd72af9fc7faab235994960f4f3bdab817d4ba37ddce67f8a9c0ae6e38e6cc70173ed65990bb39d3f4e23dacb2fb62aa222ca8536b96dcd3cf9ed8d3ee3fa33ed9469cdbde0b2656fdf7ca1d96f1ef7528e9e9e34f9a30de33e6e4cbed0c9b58a1fe3d94a6094b8a03cb35d77c74c49e46e8ca13c9d5a4e6e42529a42a34a53a0fceeee930a8f524a2c2bcab22ccbb2ac2b9291a86434fbb26cba5399ee1e84b63b23f6d16893524e8e9e744edbc66958740d088e93df4929e37c9e5fc4322a5dd7d9d570f722a4f8b109b747191d0a0d1a5e6c9bf4b42aa587d52d72d29375935b16bd99d5cfa6c5c99136f9b507a2c4341aaf168d2c5e98a61dc9b87d6e6adc08e634397ad239c520699d8f1afed9488baefdd05c6fb37e1b65bf7120f73aea625f84e1f3ce3f1a59bcbc9df7b71f8e23418c18892b95ba1af24bb4ba4d97ba7039aaf1b38142b33696d1c6db67d3aafe5985328b5651aeb201ed668d668d5bc362eaea763a7530a7c9d1182ddd0d1ea7138bc98573e97cd4883f514eb3fdeecfd36664baca089d23cf460d6f991d47da6270313a1f35469f75e08f307fec4bdd0d0a845f9fab7bd3a361a3c1b6a9425992e4b72b13923a15628dcd8a1d8adb6675dca67517044f252812445798b8848a28a2c47c217165a758c20f440461450aa83ce186a540a58a12424882080f0f37c89cd0785e4708351115402f239ef03222c8890c033d10b182094f94c0074fb8d00404427811f9c1073ea81abfd3a95540a38c31c628658c31c618638c31c618638c31c61851508848f718a54b778fd163f4183d4677f7188286d2607837144c07a1dde576b7b7b7777d7d7bd7d7b7777d7d7bd7d7fb429dfe84565a6c23254019031a72553e863df6db450cfbed390d8c57a29568a3c18632d41368c6d62017e3c7d353e3376dba356da469236dfa9c9b94dadf1b15317af4d8add33f351ea9f10b75b8da2325e87e182b8f98e827519a5898700503d3ab9d9951c06a1530d33342443152894da5a934163da5b1682a4d45ca5db9929995b8bc7db87d5c409a3551a8ec81340b7b14a380fcfc206eb75a35b65a85e2956b456755f1a90f7e7032a3c6f71991a352a031c7cad8cb3b5626d6b0040d5b3572f41e1a3840065606c83a5b837d44d1dada1aafd17f9b608c07a811cbc1088c318c6b5605eb728d61d8efd3222001931bfee59f538bcdf405fdaa1262c90f905cb1450ea0dc300f0e6cf0c34b07527471c34ff951fc91bb04fcdf1d3b7a333367e12f238aa32e3f72ede811e401ed18dda3f7a7c4450654872832e8e107278ad8410e5a3481a2440f3e4efc4ce917787b4ba1b71783b25f0a273e03aa8af2a38cc0fd5bb395776bb666bfbf7f8a107c7eca8fcac04a6e171ad0f5800f4418b26264892c7e64c842a8881e1d9c98e08930a63831052ca848810820181141103458228c130c61e29777ff4695fda89d4443a0ebed7dba4137f78faefc28fe9d452d56b8b77f93f2a3f8275f1153b9f72b93b2a04d1373c331d5a6d9c9decc1bea2afe157f8bff41f6a9dceeefd31bb614ffdef42f4c7bd355fb4c7f1fdc5dfd906280262e08e2490cbe20a2c7951415c18424503162094faa2401090f151994c089224338210753080114a684c1cc14b573c518b632a2fd1657abd50afbf85986c5e84428d78839b732d8875ced8a75375a6afc95efea068f9652863f649d5f88fd0dbcb3ed93cb96b05e63c866af751113034aef87c45ebe16bbbf591e23079ad590c35ee0307834fa212bf685d9dfc03b557e26e38f4dbf8dfe550b8a9638be1c21a06d1b0df60072a5071069e30fb48ad2a526e74bcf5699ecff17609cf318778154b71839d0517710e8e15ef63821a768d07efb68986fe358f6a61ac6c46c8d7c7f99a5b1e14accc6b840aaf2ebb132fe272e280e4972463f1f87edb5c7e1bba03824e9c741fbed71983ffa4fb904c2bf0790183950f93368bffd0cf353f28b80f6dbe768bf7d7f39f347d9d772e4adab703c06a7f617e2d4c670aaceca33325ff3d63f1ca107aff0704f0f201d0d889a521a73e434e8ff355ce188e239381ac3e2534a5d20551a6b4869865379f4ebac4e5dd07595f7a775af2ac6d25bdae0cac6c6aa25ab7077e677a72246f6c81c656856af6263e6f8bfcac08180760db9ca5b435ab51e6c41a491078c8886bf03d52ba00d8a120304746b48353ed81ab590dc5675e4451e9e1bcdeb1bccc34c50e466bb1a382821d0477ded0144c70e9fd8514ae6dc4babfcf7aba1c1cc6ba982ead4d03bbcab21b35683ae7dd455feb232fee1cbcbd690aa3f0a55472fc1cde326547a7408e52d722f5bd3ef1f80a5a92ff1bdbbb1578c6ad740eaa921a9a7dad8d082f2038183c3c646edcf66659c0baafde8b7edf8b3fe0cfbd5595523da53a7d1be97fea8d3cc1b7f148e3f0e7f1b7f1b57b0d696d8a8a350feef20e8ab12e7beab1f0df6734398f358481534649fee755695e33e200d321007312f31c2f23dfcb9a7ab1a52a117e20fac2edfc3553cba5fcda02e7437bae77456b51b4279db06d07d51f1aafea6df22a9593950af3ee21186eabe8ec05343765111a5b2cb6954701a976fcaf22dadc28735d6b910bff352a4e73e75a3f43bb5f43ba5fadb748f4e4580e55b7e06969fe1f42edfba8ad359d570033bd5f4fe2cdec97b5731bb90e937d3fbcbede2c82399bc54f7dc9b3ef352dde84b1efd8dbb54045a9ee56968f9d655dab37c34acab4a1eb79d0b1d3fb67d46587e34755695e563d776f209faddddb4493be8864273c1cb518b1bdc918b4eed33e6368d3a5d59e2013c982e333333333373d166a369f8e70a4e8e1d3f9238529997c80a3326574a297796878cf8f9999999ff267a5b351433df30b3abff85add86f894ac94a690a50694aedb7a114880251200a14724fc842bce8e404ada59ead5940edb7d91a47ed9314ff4ee5cfc7be94eb695e110a08e8cacd7ab4bb1b2896ae482c369d1ae46a0a4e0e8e1cad192c078e1c1ccf2bca57eff541ec5bc92d903c8047d01442414618daa02988d4d6e134f24be1cc4f49606fe6a76690dfbfc355d2ebe12a1c1d7424c9c15ebe943f3f474630477ef6050197686fe48753a6a212a87f517ce28eb76701b44b982869b05f0a9a3d8ef5e622a5679f971f9fb9a7f4e198b3c3be9052ee8b1f581baca0a73d0ea769e7c6d14595796e36d491c7e13bb41f72bd007ecd6bb98a9f81b0963dd8967134d8397a307ab6a111c3ca69a804650c1e0d1bea342b57f57bc5bcadfdf1db822e3bcfe6d90eda23fece4df6fe409c06fb52b187abfc776e785ce5dbb75ce5ff4192044992933df64192e48c3efb4dcb99af7d41501e24fb828cbe2be1d4be54fcbda1d897923f4ad190bdf639d88fbe2049b42f08e9490f49a420c4cfcecdded8c071954b2550fe741af46f02e56f6cc7db9db0bb5fe88f7dcf58300b4b6dc0c6fcec6d645fe83fbfd4327ba706bba51095df89b1e0aa16747320a007e61d68c725957fe376a0a1f4a9ac01811c9530355e1ac430c6b86f905dd5abfc56c6f8405d6a08d697d8bdf052d94d5170283c59e9b2e5affc5ab66c19e5b25cb971979d99b9857dfe5e564155fbc2dea91dca4f878b6afcf63de49c88d3706055fc1fa024a89c02ee8078111fd220ff96050d776af310281dda5cacd0eeee96dddddded5dc32e5628ff7ab77777fba8318b16aea431e62d3c8081793919e2e1c45527704d136f84409813e5b8d38b9323f58b1fb0a0862b181d14b1830f448451e545041752d0aeee8b882a1d042aebbe889842c3499119b7c913b53ff6ce5712ae1aa3c6baaf2642a821aa6e2cc21676b77b6d9623686c7e9623e86c7077eac65e6ae31ace59bd1a9fab91f805f1c0cdba8a3f4fc1b0cf753f962368f47cc4aab344954838599090cdb1599a40f93d3a17614182f2bbce12d5d87789ed6340b87b363589613fc81fa23692d2889f91478f31762c45484e4a237e383682b785e83878eda43482723f13d344c031462ebec432f7896519101810becd38a7866155507354838507d4dfb7d1866155e8d6118175357c5b881c865529d261a31a743b743b7437dd5d6e07ca42772af4f8d459e56f4f68fc0eab1ee8aafed547195986acfb2f261a5096116bd4213b9625347ef4a92596e175ff44297f3a7a40b9056d763770f03671cb32b098d9f7e3b0f8c3cda246205096b118a64dd9da949c203b394176168382320ff7f400c2379352d4528938f8c76fce2071882fbfbdf19cc4df36deea49510fb6bea0bf3e50c1104a403902145028b9d9e9c1892a52c0841330c187a29bfe9d4e5786b96317dbd22fe82aa185951af21123479ab5b50a1dea36a9fb2202aa5ba94146e2e2237cc44889c696bfc9df2f6cd5ddb0ee85d16f8f3d1b7901626bb6ef656558b0324950928d6efad93b9f566657b5affdfafcf5ee0be936fa4201d4f0c38542659dd611cabf030809dbc230dbcce061c108d2ac70064f47e3f2fc3662c0f9d1acd1d69dbab18dbcd48dd1939ef4f198bf959f44faa6c76db1c1a5f5f4a42f8c0dd2277de1a99e583c1ea42ffd466a616961f9523758def42fb0e8cca82d5f48fa1bb8b2fc7c13f72c1ef7a4753aea019d11438e66959e4b9e8d66919ef4913ea75924ee7080fcf06270f16c5a9533a332b7952fdc29e10c9f213cdc5324eb625b98c58a406d6cac5a534a49b34b08b568e0e4404d596d509421221931d4cbfb547e700a28a8180cc3301a2306c6ca84fb11f3c2dd97bf286686626363d5621e6c4a1542b5a6eeab0a21e1d4bed31c0d41e783f304033a439d56cd3eae62a44b9d2b139eeafc745ad588a6f9a9bb614643507681014a8612200cc0ba54a6f156b5af9a095806bcbfac01fdba4ff751fbbb3fdd636d96517eb5dfdaff7d30075b8aef8ba214c5ab699ffd688710f958488f3d7541b9273d8de9db0f28347d4dbf75baae0abf72fb85a34ff378c4cf36771901ee494f03f7a46f5d4543f7727556a8064f0dd2277070d8d8e0bc1d75d6c4b630dedb9d31e19202467767bbbb3b7f541efb3adc9c9fd1ec131f1e1e9e0fdcccd8bca420aa3ca3b88f142bfe8a1432a8614b8a10ef50e5c7b29711ae545ea2f2531d2a3f4a4a50e52ccbb26e3fdef75f1daeca38a134336990b7161ff61b2d89026ab4c469b46ff6348b6ba8237daabf37b31350df8adadfa9fb852c7b6af6a4495005eb3e015a42dd22578485aa09ad0cff88bd83c05e51fd43d4164a9f0743f2d4acc67b4143940f711ef91b903f3f57bfc0420d5a196ecf88f6da375ad23e5a32eb880afa351c2da92bb8800044111f40f262e2865f2e719a0a0ca9c1144f3021c4097070c32f8bc823a50c49fffe8c5f8a7fe726e30f4787f8d8efdcc8d7d143fb96ab3a8bf2e3cbd7be2020d60283cc19cedf9beca3f14bf9cf947f291ae4679f133fdbb9e1cf064eab473f907e1d3ba4b7ddf1896d61b6e798798403670a6622b4e1346b3481f8f7580750ed45e4aa1a90ae7b9c1c1c38a5526a86d2f3782c6ffa2439f44b5f2a3dcbe7949ee50b92a43f4829a7f4a6d2474b5efa52edde5fcb062c80132aa0c206206082104ddcecf4f8f092d245151e7e50b061dcc8eb7e7adcf3cbd173ccddbd5c7b614ced38be4dcfd73c8c99f6e82307001daec3ba1a5d8c18d63c3872d7bc757bb16ec731ec54fee4fcd163dbb750e1df03c836fadefe47fcf6b8ca5f0a860eb741eebe54fc9d1ab7f047d5e1aa36da60e86ff4dda6626fe5fc8f0469d07b34e89fea8fa97e4c5a21a4c37f873f0e0ecccbc1c1d1c4bf071018e7da34c4f4c209c3c215345ccd342b722dee99d67614b60de468735cc7c851f277e52f9c409d0dae155be948832524cbcc2ca4db3dee47a07d2ccbe8ec6a9c42409be38aa04bba1a4cea8e806a7f8b143a5a5277045447a3381a798f78b43148e14e55c154fb0b2d50e373b7478945ef0500b7e0dfedca441b1cc084faa39f6ba76020551b6ed4f891f819e3c1c6972557dd66d982faf7a596eb4edda948d841131a20e1ca1741f04c608a14273d37089d4ea8a4cd978439e9149104005000b314000020100a06c482a1502c9ca799560f14800c85a2406a4e184ba45996c3306510210418020001002002303233330c009bc4eb6ae9fad5382041d6f6defa7197281ebbeb31903c6b407ff8db8ee315dadcbd1e341c6a99e46c9070ed48453625718299c80b96ec6fd45b4557098156c968c1b6c5fa16ec16508f681445f4ef8ffe2838bcc16733912eeacc3a9d0e15b072296d36650420f6cf609d81b2f30213ae02862f0e86a7d717d49aaf22744f124e73f942800ca2e45eb0d7f9d3e2c4e1850ffbdaf30d9c85ae69d0308306cef750dc2fa8e22cf89962c885683f3648630294d2c4da02b42f6b15d45f4f8bb303be34b724655187f7bb7ebe9c07001f82e511f139c610783c6da58bc6dd56e2c8864e97218847be6c02f11713081a11f610f25863dd16b38293f90148185ef85b3dd10b4f17e2f6cb1d730ef38aab00227e199c5def0322a96971c1b8655e7e97cd84c4871826c9e4a4ccaf48eedac91b0e3eb12295c3ff007649695b626c066b22131f6e4bb525bac3ec17cd53bb0b391be9c6f32328b11422ae7cb61449ddec315e427b36706122ba4727c2cdf3951d19c13341cf685d76df3e46eafd217285becca5468cba9b4032bd7d0e8ae4422523f3428b735d0d9985bf0be5f4c7a0d368fa1ce743528cf460f87982149b0c85e82208db6d4d7c5be84258c18cdcf2da08f8e683a16492003349dd7dd1adfa7504e608129b64ba6917a71108c17130c84bdb13e897b4e00776ed2ff3264ae3a0748cc748620ae163d2ed6eac50342a63bd49b2e2d341afe31f99021df4a6dc94bdea42bf31e06dee040608723b1321f90dd7dd441390c6a0be0ae87b4c7ec569300007d735dd89fae96f3a7250453662bfbfe2edb43250601f204ad12973a00265395ed89628e48a4488b196cb2b44409204f0f3e6160bbde7de4ee3070e6eb5321082eb4468fe6e50122757294654ff3025496241098e67853b9fa935e6e0e01626b75682ed275c2e9402b0e1d6414f561899d1ccac740660615521e86dfd3933eb85762fe037631c9823bec309381f0f00db7a05322e94023ce9b6c3b2f098f1645c085a33a8454e6b092eb3d3ca5db409004ef83d95d02116d484b1f59b14485373e431c0191e1501159314598dc1a4f5ffe965970e6d04810e3102f75a9c2bf7615110ac8ea6f0970a3968903304755fa382b6a53fa156aa319cfcfd65d560c63428feacf146b60546065917bf28ff28265b25e8f9ae235a1bce6dfe5faf80070d72e38167e1888a1afbd4557699a99772bf4f555992d621a123adeb72f3fa657cbef3b6b6c6725cdec35be0fdf8c8482f1807cc2960e8dfff5b690e0c10c9c354963e0d833f3e8ad193d79519f04927fad56269b537d78df9c6447ad0c13a059b132b0b55a4c0f579420cda7b46c3b0f451aaecb53a7b1d4a37a64cd57c6c1b4b41c545be82b7ae337be78b3b5ecd9519727bc0db4d62764fb24b58aa6173fd9deaf5a700e97a5b6b980b01b317ba59ca53e93e1a201be331d22f6684e7fc29e80a3ef8b538d0a5f73a809c2570838cf2993ac381621cb74c06f985d3fd1d64019628bec737340317d20fbdf119b734eef14a9c780a12a771e9e697ebc0c49803cae5d2bebecc5e5729907ed3c79484341d39cf4e441469b0f1f1e0a03e5a1b5944d48657227ddf312352273fd5b8748fe20652f77ebb984a2201c122a2100dc5edf17214c59ceb87395025e2094f4eedf3d033da85ba6c35249e66d66a5fb4052ec37bc6dae8b0469c95ea8a0beb18224609e376014449a70c036d958edc0e474253a50c374f04287d5c66521d7063a604adccc4ec8432c15c4319cd022a16835b1f7180bb711f6351df602046f622566fe3cee1f4b68428d62afe35eaf05911ae75e185feb2bfd724a6bfb3f871c0b6b551925617a0c6b81a993363400f1fd9e07dfeea9948725f9df4a5a24f748577eb661c3a409062a59049acdda90080157a4a4c671a704fdc2120724c5a3383ad90bad97bbd3242646b7a418df5a12612e4ca553988e43a0450b8adbc34767e3dc46b454f72b6643ecd2a208aa34f0141ccb7ee6bc1b6b70db194c176a77c4920dbe2446d7e54b16171d853db9228c9c5b6611dbb59fda8baaac287adca599fac5e27a6ac74030cbcc701c66a5da301594d95a9298d3ca9c67a5cc10056d4533570ba7d7e76f035ec7747a0e811817c83848e5c0eed4adbe3781ad58919ee4e91263c83f5de8655b0af0b322d0a9ed336a5b405a9d500272c08befeb9e9c7e4af1874bbdd52325ea68fb1ef8922a180a57fb40738bb3faf9d0cec26be3063f0bc0707660ed60da9b7db56642fb10e960979eb3df93ae9203be08c9ce2ef5b374a13905c6ed63063eeadb9b426a2ae48aac4efbbf071ba62cf92660e85c6ec58e3555019427f652696d71a836d3cbc203f3a27d6e49adf7f5c2adc3a4abc0ef6474407dd9cca41003006e1604b111078332ec4b480a1e20e29fd44fa360da71c07c388c6114b4df8dfff9e8a370cb78d7ea4bb86f7b98df11b104446c5867f6261548ce1e72bf6c0b6c0e9c675893307e32676c2fc9dcbf424b618881d320f6d6a4acc58f1477c81691399aae74c309fddea210bb90e4b0baff50d6bcf9a027fccde0d050532fb805f69dedd5f13729b3854f173241c8e906e00148da35a4282803f26a4c88f0e5247533c0f541f7ebbf139d74a25501cd1559f02ef3f2e48e64649e53af5b90f66677dc111183d56720b541c00c673f37daad374c79e689e40cfda381f0a5fa85665b86a1a853b42252283e2b89bf98ac88b7a253af27c22efd54443922fa5afcd7b3c49a2bc3d4dc7e1f32624a4899bbaa32489433ab02bdfd502ed4327ebd33409d4bb3aa0073c60ea6bc57438168521ebad3c42887b7bc1d54e845afc07183ebea4e22b231bb675d9cd268150f640b332ce2240ebd263cb9dc9485013a4fe064e2d80e074a2e8d9fd90c11068822ab2950f44fb662ef1ee0ca988880b48132e098df2cb264097bfa8783cd8c2ab739f0f1f5c3d73212365168d96c93641262891517590ac6a231299805ca862423318a8d13f945cb5caffb71d6b2d3ede839c0605cc937ca24e65631905e6ce9c994333288f74467d5487090fb79fb6193dd42f8b4c9054ac37347dd0d9fe6ea06459293989154abcec00fe941513c7fdce6afe0d22a19dc19e34e19b296b6f9e1b316eba74480ce41e4f806af0695b39f78fb3232850f3922685db646eab1070fdaab07594cbafa65a277dc828c6d6b576591bc7aec6bd098f31a8e125526b2484a3a51c11e17b6d2e47dbc971e4d0e05231d7c98845d0ee84842622ce6e6b7a33b884baed00542c9671747368246168c2f0076a018497f0877cdcb4eed1263473cccca88f0d9f93f6ef892ac36c4b660b8525ba0a2aa26349fa677023e0d6b8c249444c87b4d5d0837ea8a6b6adf6050a0c97d3073616e21a1a973387ae87d6047e140b579130a9b533cba1addb8e60171965ab30578a8fefcdd1e84a1f595bff109549f5963061973da617a06e22847928fc656b8508b4c87ac540afc51f3515c8954e56360d615d4808a0e7ff4ec14500b4bc08138c6d7d97113a7bb4fbea317a037bae6f0322b56f83445c92312934243bb3f639a1d0d4d144ed6a46176b5b19252ae6ef740b1bfb8a8a04dde0721928e22054087011a2f26503eb446a5de39c7c8bde428e3139d6c1f41d6c82753d9627e2b9a1ec5eb079f2302a09d82c56ba59d86e2ad398cfdae1dbed99ce9275b71486ce01861eaf95c1ae5710ab57981492c93fe912c466590b3ad1ea96a4eac9404dcc2e41e9ac2f052347155d2396a6ee95f22d6e7b21650cd1c0aaae8d5c649bf81a7bd8f4e313f5d88cea6b833dc16d94982e58160faf3b419bc61270c757260426ce39ce2a7ddbb5fa7880b12dfac1e231498f39aa1704c7e5edac52f23903cb016337b2bba1b4d8fa554ef890f3048814e147ef9326b31a492fdde10835c41425a348a4484d88c172dae9d4e27180e02750be23c47103af5493575d713910c801704300610d863cd4a81dbc68524f2624b6b21aa6beafcab6182f255e61ca8bed3db25b7c849a61b9b10d8c262225ff23483157020d2d3b1fa70b096bf24260c7c625da8205bac4b714dd2b77085c52f42771a02009ece5d601be58c7830be9d0f94ec8f932e66eb85bcf080464c42cd4da6db3f854c2e8cef4dd2793952f4caab52cac3620b2290eb169b98c63d8b5199b882cb2e3318c24c9b16305f8ce4371aea217b1f7b90a272a391ebd71f75ab304ab32515aa743ec3bf9244e54205131561cc0df90f2ada81d39483d2c24e89e937f554bf70b1da555f72f803e8893c7a4950ae1051a91e0d4b8aa57d48f4f28db7282bd45febd57d5c4ae0a5b62aa2ef0bdf756cdd056d517c4d4389c6136989a3d5860090ed38a29f0db6bc068c867f756df3568fbe9190c1ba97a3583042a26f21797ac04f9a063199f8c871ff228a2280b10d2c410c4a81ea2c713d5344f18f13f5e2a8311cfcb2050677d786adfb61e6cb40dd33fea4bcd4fd50b8db8058a9c3029179c447bfcd67a75bdaa694510fe5e04f81a21e4da7bb602682d758e01f86796acc0e06b2f788f93e84734c57528e3d0094bbc28cd2fb8632cab1620499db91d76f191144d11ded5005c170f8d24c0136a3ea07c28fbde1bdbf4dcfd2795e0bbd2a5ee3f1440abb2d586f12d09fff3ce8c6a47075aecf61f7dc58c606ecc366421c189bcfe1a6b9f77af72c28bfa92ed1b454fd7451786c20e81056c914439b1b75ef5b05f5bfd5a36501ffd6d0ba00443200b951178b660b7def3dbfe2361604d3503550e0a3d5c1030d8d614e4a92b5603ccd1bc3d8ac46ddf02a99ff76241fb12458fec373bac80e6550d0acee1b4a348ecd80f214772eead51e686c611bfd19e2ab67191dbedbafd6af88a30c5a7a4bfc27ae41f1ca21ded92c81dd990e089fcacbd665842d1ee76d9bc1eccc8f28245e28800494538be18c10caeeb59015b2d9b947059896f132f3b811eac3d899c169d4f12c07d5d616f13783687051973767f91ee5115ec94840e3cfcc8db08a3b1191d7dfd2eaee63698523e9198c3939afd7ac331b45aa21e3cdf1300ee3e546997c51c1676e93e4a593e5125f081d3502ca9371e00e2006fe87341ac7dab93c87211c0106f211ba1cb6dece8dac3ba476c8a4341c46ce8be10a9d4f0d7516c67b0477823903a8e65127489f6ca6b326734ddcd6bd8d35f39cb7f9b6869a68cf55c4885b8ad5208cc7b79cd91536a3979de1af01108213b966ea7571f4e87a0070a5e18e7e22d8694decd4dbd2e2bbe14b232f19b0c65a916f8dc850927078ed01cdf5ddca9a7654c540395ee5d9c46474e2d10ea2538243598747a315062e67c30bc2c763a98115073a3b57dd96d9a2a8144695241a6d66383306abcb4ce1c011f5afec7bdcedad2f4476839df4947ab006733c41348e3cc41b1196b26a50f7eb086f2c6d93a6496e20e9f3e6ebe7834b24a4a8271d49fb7df0ccaca5c9bd7015c10c81432e8f0128412faa7640d3b190c2499448b3d5de6e3e22621d51c0e39fea30165097c0de3e6aabe428db25ea3455a3c6112ea739bdbaa7a720bf7ea9128905d214490936f3c434ac808967f6ecbf5d00e311fc731de342de3d4039c7d4229ecec8cfe356c519f23dd42126c7a8e1adea99669294bd757a450d8b57b6402426c0b0dad64cfc18edc3f59c4d538c69ade3208f352af46488c46bdab832ea3d328aa50aafb9a6f5e2febd455b00d47ff071fae2bf147675e1eb0557026a4a9e6cc83fa47ccd770121a6ef20d82e5be0e917d444449e34d2eec636d9d8c84e94d3c82bf84a4f0e030a5aa64cf4fa446503d6f6e722d9f174adcd59d6118d74582b988520155745f9ed5bfe03ce8d5e5478a36bdb8c9ab471f1e0a78d0e38a498a48e3a033caa20509e028901105556515573a04e41b33623183cb0c0463fa3acf2bb18cadbdd0e13d8d8fd6c9845abfa310176f980b3a080e3d5a335011555fdfc7adf4ef1c486b49ddb54f04cc09acc0950f3b9371d5f78b8cb6b9c58abd0de0bca2e37c3e89696a8c4b610df65621dfccfe2a23d9793aedf75d9adbe47ddcaf3e2ac4c8441c0bb82358cfe9cc0cf940221e6c43cbb9766cfa50bbc12b1bbce300c4fe92869551df0297de018f37b1f912c637e514a8f3ced12d9778a502f8c4c3fa88f1d923583b6533da8aa9ba949e6215b28ddd35a0de7040ef682b99d62fe3bcff859fdab68cb572a09631478f572103cb0a93e0af69b9c267354a2e6b5d34f61db0bd8269c2ac339f285f037de5e443dbc61d49043f0c25ae840865e95e1adaf098fda6d7d65315baa3d2480b4ee652fd4831e451307a2fb0f2754bd64c0bb6c0920a53ebfbdaea9e3f8cb23e7ab9c88c058d12549cbbf14ce259555487fd31f58cc53cc146ec520078d6563a2c46cc908ad662d16cad6c02fc4517e70744d42ee84427886fd5a7f74adbabed57d9334291d2f390448b9c6a2d2155d06976b4d3f78e253304ed5da5037ba0ce3571aeb4e06e51070cdabbc027888698e79994ce3b545e3a98f5c320ae8d12aa095496fa6b81f55cfd7391a703765863de434a42365810aff29371f7d79e652fae2e4fcf84fd7dff9303442f1da770b2a5d9fa69bbf6430c7e0bd32f0ecc5ec8956f90d370c04c66097125656213203d5b0160d10f8cbfe03ea5cb47131cd782be8da99a9e814ee4b69b6eed321fadca1f4d29afe398dec9dcb61650005e70fa9497f1d38f59a353bb7b01145b92b71a77636f513246a5850d2df4f55e028ac8e066d5b1e7b62e036767169a631ad742dd083cbb88c6079aa93c289fb60efefc57b91f9d5fc8b002b98bb601d9fbb4204998efb052f28f5e59965f43b55268de8606b4cdbf7cca08716db185d71b0edf5914df33435bf44106d1449dd129483f7bfc48270b211bdc7170a9d3a59f8aea56d53c8790354b62c441bac6c3158086922086d4a07cadb790cec40aa9a3be6656ca311cbc7200de1d6c9866869303728a6edb43a6e93786332645bf1d0bf35ec6fadc586bbe999d141b60d69693afa2c3797c023d4d550318a8c190213e9444c407f442bf6125a16a29f8edf3f7de479f54e778e4b97898e5dd70a8feb70d18db9dcdd854b0861a36cfcabcd8788d3430584d5fd1d8045e23db5e9191f30b917959f4f0dcb00d5f6816e7ca31b6cf01b3cd706ade0ce7baa58d5c9d378aa5eb4351ded4196d89820fb944f82e8cc9c40b2738a9b8ea88d6ef7a4d0595485b328062748a7845d03c63142bc8042c3d6639e03482418169d534229ec7a037a77c5843436f4e8c0fa98d1fa6f6b05d69d5c7fa1a9fac8c9c51823039a3046722d9098dfe7175d8e0b5663a8c4a0cd7048b2103e72d4088178622abf84037c6868b7b25ccdce30018394b60a264ddafeb423cb135256ba33f1036193c4c8ef8098cc70094be6e28bd8d35861dfc51d3de0b33c0381d9700fec2cbac1127e6b089d697cc99c9b7cd0f3574ba229bd063a3c49b08cc0d01877968c6eafe0834959a79b6375caad87b874ffaba5cd4d72a31531a8ba59550529eadf33876e57edd7882030567cd895a7adfb85e7c43a6f09d6bd7c3a910615b7913ef5a6216f40222c03ddddb0b50cc57dfc4397b4dac82339e08c44365bfc0d2b8e5db29a0711535ed71666a5bce8dca5933b2d9c9a016310013ed53a98f4ac7e1806dc55c47fc149c23efb13841b1a58639a4f88a3c7af7f332438b3e4fe1353f0d9a4d198422c887712e3346bc9f4d0e8ed8d0d9a0476a27b274f74f6fb4b58b54a47ad3e036e7ca6f74cad837c0ada4929ef32929c845eb2c8e97c90a4ff77e612089fc9954e59bd04732f6d02b7b82440dc5107e6aa554ab3ddb59839c55e9470ddaf56544b91efac3ada3b40363ff7d604f93d7f486d87dd2bfa80eeb582d1952e2e8c28cc7410fd37a60c0ca8a4f5df5b36644cac9528cf65492c2223a0198f5e20d4dfa1047d1c457fcdb450c0bae11b98eb40e94a069e43ba527955f2ee87309a3d6d42d0fc93539883d9bfdf84689e8a5d18cc4fcbc709188569f5a6b59acdcd4a31a88b88ae8cd567157c6bd52d33237594f7718d6afa63c897b9f2069bf8be02295c9fc039941aa3f45112a3ccf82f805a943446a1d8701eaf53c0be8969dd7059ab8a75d7e07d096e5f31bfc0af1dd8536837e73d97e7296e39250cecf0e71aa637dc8be92c95ff84e7840761f811424f0dee16053f71f45aa188e23002637b7656418f5e8b40e5c6ef228fb46eb0b470e37d60fd3cc417e889a69b36d781891affe29fd62734dc5539ad9699e1aac7256a85ed781a3666ed44a774cad05046802b04fc71292ec99c6638e5eec9a03f54972c4d536901a1a5048cd9486adf5c710d6d54ea2c883322a01be819bc15ac78802926f3d74d58b8116ab058d05cb5a43ceaa661c0e55521e74e9cf7b81af53d8c528f4ac7771bb4d38a1966ca1fecdc143b5c71afe25baf82a6ca9bcbda2fa148096ba27e2280ab945330216e688615630401b3612a26013924529f2300615998b95306adcc678fa3485172443f924433d59c2a7fe8c9b4a0696fadf3a040567801d14cbb7f0bff6609feabf3571fb8dfd7638e41f12aaffd352c840f7d50c176265aa05d98d502f3367a3b6d033385bf4f155407e3effd758b425f76a499b94467b059a6d9ab915894b50943d50eddf691370f3a3f59fbd0d6917c828c2c0ed1c7797e9a40861e1492a51ef3e23fcd2299f7b4db9b979db914d53d68fd35e77c58a49d0de2d5c2a03968bc98d8c1eae23b739983676721f90639f945e095d8e5918feb10c897e815d1a49fccb7f6d494925a0d6932a086f2fe6cd2419a14a1a6af358196ff97bfd4bc03db97a33f67db62a245bad289003dd709166ca7622120e7a2699850665404f604058dc0a2a69ce98ea0e57f68dd95d2b65cdbc22a02f30206c6f58ddbfd2e420883d0ee4443710ee5a98042afed1be360c0a8dfb818a6c79abea1dfb65d794c6ea85454b37a010dddf1c3a56610df5f73073847a315417a91a5f0ccbfd955570d553c2aae2b39151fe7d45126b00d2348d12d291923b99374db3c2690ac3384dc170f351746ae3bc2f6379fe1a145491e432e68579e4d6a918081ce9488c3d9d6a5710fe9775f282b196d75c0d6138d955ca5758445345704441ad4213b40ec3b349a6f11d2a099ec6c96c3315a66727d1a753cab1ec3cd873d263e642b73a252209eb92798d7216d40a510d999839f6d08a396ab1b92b2dfd430abfa170db98168c591314ff3b595e5a8f7ede2213494720a4a206e1ab2546d9bd2ddc7a89dbed48110a362337e8d3a10f9f43a16e392549e3fa5437484cb95b608a14f000d682d80f6d0d0441789aab83ec1aea83f6d29cb17de952813736b96cb02998a93f00b0289977492ec3242926a5b634fac448752f0e8307eb48e43aaf2386f29e0152169841653df4e3104ef9295398dff155d5f9be52db74a300be9a6de954fda61e98c7413d21c85926e7f3ed6ca9fdf79d55c926350c0eaf31f66b073bb7248fc2e33aebaadca6665df32efc30ab6d7874d14b97f42694441a94e1710d55469239c35c384b6b6d568f634582ee86e9115d2f3209a82ddc4cccd6c38108b62d0dfff4b0bcb64c5264bbdb13436161380bc29e058d47ca45f00c2db1a868bd6d649ba3a0181e0610f5b5e044d108c8c4c4ac2e1026f6da01fbee82a0435fdcb6d54b1903f661f05b9b9ca389d97d8882384fca7449403b305888293e0ffbc1ef08a31186fb21d919c994150cbcb1aaeea64d1a365d325ca4562b5cef5a03ef88213bcee7b08e2736732f0d697f1cc94c2409503a462cebbdb6ff04e01f685c9dc2d2d067e08751e5903682153f9b2733212513ef4e77874af088ba7c136ef8901d9a8ec60a75433dbb0bd413ca6ec2e949a3605fa1591267bbb589d71d0013b07fce13f4e52f02db0d8487ec358d265c01aaff05099d12226e563ab8fb988f25ccd1b228c879944e478a992d4b5f1b1cdc25b79ce7c045d9771daba857150fc17d3ea941858341be85d3770d306b1b50278ef32f59495fba382a3a5961241a1da300ddf07e399fa0e51bf4480aeb2858c36acbb85de47e03c0c1e3cf526beb04f59ed4b2fd56a6d761490467786f5f6f955ff4163ec0f0684ffde79c1c8acdbcddc5fd19287b673a7d97104a5095effa4a98e19f25c375c1408eca55bd95117bb49f0b4496612d14d9b0f61be1125a47f5d8b1c30118907cf678335a3f875f6c4b9f59c35a5c5340a30593150687ee454f4b3b9e28abe4fd9231fd968eb1eeea903405300a526fa450ea2c3d87721f42f4b43f96fb8c513c95595ac89a299e697ca1f50872855ee312740b9e43c93a102eb8b40b0573dbcfc28a0c3bc0210ed9f0621055cbeacbb822aafa0e6ec4007b01b433b21628a07087c1672d094b604dee2b750910a80876c3e735968860a7ef1838f93673601476bc5b861cd4999467fbdd5453af76dd18ba33081ebb02ceeb78f3584a9db47ea490a2c200918717b381dd88fb45b83303ad569363531dba8259b8b0ae1ac1bedf1e949f292d7f0a5c17286139543ef48010c8c7fe1af9a386240208dc11b41623f9dabfe62e857b97a35262921cd2d1256e055e479a1bd4b6f7d39ab2d963d883a0ccdbb5b40b9e1b557168502e923361879f2edf97a5da1b79c0ad1774c564ae777d351711287034f637c09e55fa0f9697c29817d9e3105a754fdfeef46f34c4674d693545c430ffe901a0cc66e83c4cbcab7815ba51a03676aa6de205bca957d9370f9e04e5c4bef9fa0af7e1e4ab7966385f0b9a829cdd9ebf0e7bebdd7344014a42f30013903d5de0b754c6bba577c35193e10944472c80b2897c07391f3fac8297c4d8e32bd70b5c3f600f65e1a7d627dfae58ddd5d3366f309e2ff5271476813a7a8a18cbd5c7115fd0174f6b01966af6b77700367eb9bbf803d54f3682f17dd0feccb225f931efb2ea59650463491b84b074d6fe77f9f63aae0c7729def310b5f21bd3c9a4bf20351c61803a819bb5f400338bf764939032cbdb906b680407436a3853edf9c4c4e007c3c14513fd84b877b050dbc049927873a631fdc3926e6df0fed073a0f7c7c64990f937db8bbae1362be2e93edd14991519b02bf9a21eb0a811b1e88ea6a14c749fc0b33bc00fef1d7a8b9dbb5e74ad617fc62459f390884defe145602a498978318e7724082895634cd4049478031d225ed66a87ab89da39cd03a13fbcf2c5dc18223d8831aebb6ef72e1998c59235a410e7b0688fde31c9a12e248307c2ccbf0126d0a489cddfa7a1020427065033fa78fd128e309582bf1473d891592f6d4c4b0399c85aedcc8dbf379be4cb08c9017d45aa6d37409183a70e955964db600ec6e27be8ef8fb9035b38774dbd70bf99480f89464fa5d4f50a89a308d6e2463529f4605a4f770a57429d82f20d6eb499fb5581928041c2d65155baaa19d55916a183e00caef4e1cbdc7adbe515ac6485600c83e83b851f41a6d0e3f64ac670bc7aa3decfe191bc9fdbabfe42087017e2f218236c3653aa2560ad58ee5863657e49a6827330f5ce7ce9471d30619a52113e3b5a5237ec1a350a498e94a3df30e9437b2fac4ef13838d45ef705949a0a93c44a553246b5549fa06da14656a161e0a24eb7546613b335c48aacb664c659a5912d723fdcd2e545a0564da13a78beb4c1b38aa907a8d5ff522d8b54907203e12ce7b6847ca68677d49d0e70b3a28ae343a7e36e7c06ac7ab0f5ee8698e5a04d932cb8a67bc686709a2ddf83269c89cb992d93b0f4bbd7570f5d9aaaeeb23c68bb7b1778d485a14e5ab4ffff8d1ef8f1b13f2ca4a538f1c928cac5f2341b51150fd86a53ff7984ff53a0c4a229d479f1b0529b1687df781cc03c7f10821de24c9544b5e634e2cea9df27d55437e2524feeb306abe970d08b5362e714440ad4b400406b0c44e86a0c607aa7686fc24cc976b09530bd64519bd555c8f0f888e29e87d06b1015df6a174274bf2fdc4f242a1ca9d90f0eb82c4ee1662794506e2a339731a7c34caa98959f8a728e584f1acca6079c3db3da32676d01b7c0b733996e6ba30120d5644c13b23879006f71c2965c7eb986c1053b50535c627ccc85c2cb6ab52e1398747a66cc6789ec012d5c81be535139d3e9a451d750ac1cbc11ab26c442d084f3644c0f00b4cb3d59aa77163e5d0b601cf85d608195783ef198b8f3e000ea8d401d10c6a08cfbb77ffc8bccee3a5aac262eff03d362dd620440b92f2abbdfef0159cd0b6718b1c54a594be7b8f24b846b302aa5faad00d105d13d717fd17f12371c9a57f5e0b76bb19887133295eafa92154bf02f90fedc8385787b5871f8f43fede0dc3142a763aa66e829d427714813d50d4081b356c8cc6d48d81a1844821eb78c3b1299fc0eb1c6b711c8fe006cf1dba4b025a34cd175ec9d282cdedfb46d0c55cfcc6ac1b6dae9ccd999221e8fda70fa363dd93475bb40041f025bc917d41515317af8fff2cb58108a85e4fb0874274c2018d59344ffacd04b9ffe147612f8e138b036c6ba3d4c749fbb6d5418dabbc5b62ffc4abd27b77802b7c8d7f362f6810b9b88e3445d0bd939f2a91c5831a77a251bae4bd64c0bfc904bac366808c33656067cfecc2dc721c2d5eb3f913a7aebd42cd7e13bb46f200ceacb49d5b2d679e9b65a498f68be09dd1e8528c8d4be727823770af8bc8fafa2fb313e794712a4c3cabc198e797fb07a11bbc236a76ecdbb8d1678f00faea00ee5516c36f5bf0d5763350cfa6cb345af9c0913e7f2f78e31c8711d644fdbde49118cb08c83c150e5e29fa37271a095945dfb70bcc55d877e723e03492a77fcdefb9e830da46b7a40f1b83891098aa528bd219161b3048acc0dce294abdea87187c102f4f395c6d044a0e4218d684595bcc250b0c369d37246934c719947a779000f0a5c4ca8492508ac1d46dea47e25c668742b728e6046a6aa3c517f74a86d105b9d9c327e4729d608a6cda3b04d5cae49245ae02c2aa056cfeb65d06dd9525a28ad2748b314f3680a820570efb1182977c9f81eff1aaf2449100390f841cc969c01155dea03e278f8e5116c79d4adfa39ddb39b6417f11e295c25187017c47061516a3160543427e2b14a5869c5076d9782aae4ac89ec5fda880a18cdd0f24e7d8553b6e5a9b5609023747b6cf099629f0decbedc1a494f1c4105f9a4c0630657df3c2d3acc1fd3a5b4e6250a09f24a6f315e7d8836fd5709bb70218be7b8b2d93a66d393ae13d557f864aff7f389a81e1bbd7b3ea15c40243b2ce783a00bdde5a068b8ceb113c3afaf7a06e7944daa93881a09049b6c1a1c4a404f8300a34a98ec9fa884c4c98971992720385fcfe94070498371cbf3e4a555a6d944f3b3e03f725d392dce64ec3d9ff5f7fe28791262d2a46b153fdac3bd5240caaf45052a3c4e35ae9d14349501d3a390a126b805a21b55c0f71a96d7625f14492b7ec922f01dd2472914e85f9e8aeaa979ecd674fcb6307d0f606a64efbff3423059a0581986a1cf2524096c109b88a5bd3bc61f906feb368f64d2f7130e2511ec42634c61329344d89d29c83960b7fa8add6d68e90c28c151b3617a3646538ea1e0063bce063a9035c2467ae1ed6c2f4a8eef98c7da941556ae2b4805d299ca62e6a736966a423112060cdad95dfa8c81a06f3fdc04114a7d940d560f514c39cc4fc13a24bc5596026fc3e9193516221c6b3e96e054633ef24031d7b5dec57dae603f3c9b2f61fe21b4ffd4623b15a542abfb98bf1170cca8208fa4293ec31f884ab6b3dcfa214001ef6c46161c43deb85066475939259631b2e0f20cf522e97c03709ce2bdd8eacb170bec68601f212ee0fa5f726a9541de9e5e98c255a0860a487c7ba1380e76ca08952e54e976d8d9fdd29fa5936c863c2f4199d3481d7ba24af39cf4d2aad693452391e02e840ceb3108681e55308e0ad03085329c4db9f05ed8660f63aa2fe9190cef8e55fae083b53cf1b057b36cc172808c7c6ecf5068bd4bf2fa958e01e9e3102c1d274fb2e9e09fa0541e695ba3dac773df5c8bb54d5ce24de01c72e4234433a980600d48353ba37b990dd5bd02dfb9246b30efa4194bcf050a6ec84d538073536cc89d17d2d550e5629b3e457f45c7e450ccb81683e84bc5fe8f62371c014f2d0311780be223fc1791c8ae94b02337e52d9514d6952a31e41ee3e39e01f3d970678928932ae013f8bd2589110cb803ff3b646af6c6d6dfd841b7fdaaaa534703be106a4f0a30dbcf238b06ec6f476ee630097c670770f20f26ac4caa187c230fcc60c54575ef37557adb9a774d42002fa9ef76066d1552422c70614ea02d2aa510d2276d8977d1166519bc8e3ab1971ca1a7799ca64f88fab903c28332f152645237bd0da97c1a168f358854130a08176ad5c4fab4d2647ba33c492eb99227bd34b069f998dea0c95d288f59912ef6bc35f81f613e6f95a84f4008ae633b16f966545d9eb84bcfd88a644cd42884f3cfbb022ce58a492d318fffb4a60a8469cce8ff8918ee8975e05540dacf95716b2f4a83506f2a3240350dcc12b107dc1461a94995978a5b1db9defa8a269ce6868a4e73b058ced40ae4b2e094166287b6d9ab7fbdedc8a5b78ee1bbd4266227d707e4dd714ea7f886f2edd846d090337028530ea325d5a02fc4bb78b245b3af7fda24eafbd9fed9844a2a56243939a79b2de873d27e186b4ef37ea667d0fee10a10dfed3287552f0328d612569a52e79bb909902303111613bb76e5e512ae18822bf5e6c0a865a46a861b2ccabee8fddd40595acac2224192084a6dce56a39c26886da69cceced26568e658c42a415b714b3ff0d486d10105af6b0499c7af47983b13d7c838d8eda45b7a3e8b5050ec0188a089cafa258c69aa69731f5d039b3d9e8e27acb24088061122edd915c9858030c28846d8c15c35b1c3cd6fd9c05aca26e05ce1f00c94d431cdad78b5dffef228a1b47e80e24ec2e83aedcd6e32075b81b24385dcc5b6038a0a2c62664581b57f2524e7d9c3a0fe2e225313d1196bafe0ae4be5dd938dcc40e8c50873b3f9ebe51657decffe05ba63b51d8f4b44348e8f466fc50a31d2b0d9f0f9b92741191d3bc77ab4cb5309eab8c26bce16c8e89647ebe47fe6ceabc9728a1ff399915627288451c861c0c805fe085176e3bef4522ca1e8f5860236fe25211f11eeecc13bbbe6a8961956b62a7b2f70b077747180b6bf7785ce475b0bde777ce7c108438aaef2f002d6e06b77628a85a62c7bd5ce8e4c84126e1663cd74585453dfbd3854dca7c872e5afccb6bb985373ebf356bb125ece57fce8d960c3717aa43cafa5d818b4e16c7238997a2400a43ce0320ad165939f40480f83e5af8a0d8520a0923306e14834bf5867ed2e020fb5e2d46af67c2358775207099b08a1c49bc9c909dee34ae73b6f84c396a7c67d42ca2ed9aa3de2ad4e78c2d92811f01b6dc65054f35b127f30700e1eb041dcbfe2ca56fa21b9f5bb3b28486c38d4ed50c19218e3801f5dd8b82cfaed537e267ac5ec94068fa51c8346fc8730b6c58c8fb58a24a290e1917fd8d161068793f7dceb125d2502cc8639121cb8cac8d7db50950f3c68b9f2c429ef5e75ba1676d8e9e5db371e1edfc6d829cc60ba0f77c0a35ac24df30e202c11ed77190e0d82e4659cf9e960c4bece6004937ab716df1775f49f739858030ae6b9d719e120a533c9b134a067360fe342e739099a592d6b78726ab87c0962330c2aeeece8ea8e766ae29a85b49dcc1e36143c43a9709ac19598bc2c3c8e6374649a41bc2d6d868e81d34955fe6d9d2bef53c221685cfda37ad520390610a5d7847c8ef37463b97bed4cd0022a2db5aeaa7efa753f69c36740aa8ddaef5c1736875cfb980591843c5b7c77480afd8e079416448263fdc4b36f352048cb53c33051425a57313e42f30ef714ac49d7d0837dba07fed79e61ca935b4dd29bf65eba7e5fc14eeb861cb0dd4c929c953824e4ef5143fba8a8aae5761ea274bbe59b3e0d1c83980c20d5f24f2a4f83b2181351d94f724f85ac21739519dff9bd8e2bb72f5a0a90b5ba55cd20ec6c4a6c9c5969b898938ad285ee48dbae8e757292fc432366ff25cf7cc593c19cc8b75e1be78f2db4660d3a7eb5f79c69b96678430a1a197fdfec5c73d8dc41621647fa652bd120baebfd1d7174a44dce2c86854e43cc0f84b5176b601d1eb7b887c96a252ecd52d55571bfccce8325ed0388c0dd632ff217d8e64915336513683031380c2b14fa4ed583d18759604cd90d8e0b61a22b23027b6eec641f5b8e7b99851f5a1711e6132b9657df464ad44500e7eea3f2ab0cb6a37c54d540b9b5c616d7c707220675f859f59b61c6e7f2f94965e6cd34cdc4834217cd2b387c685f65c0165b02e28d8ea625a53f88311d6df445e1f1ef366bb5f43700dc5875c89bc268b77b179e2812b6bc254d9dc0150d8d537ed9cc85fa3d709eaf0dac1b48ea5206ddfbc55a8a33d9cab8e4276c127601a120f8ee5530a680411e52ecc82113c5e3d4ac5e2d995c2a6a5a23b135f56d8957ffea88e590c8ce8231ba5629305702ea27ab3a7260fde4433148471db4a83c5233c5af319171360a4aa58f92e418136f0c828aff411b05fe9b09dd1377c4b836b43112aef049b187073b47bacb14d4b8505608a656ee2db8af1055679b706de41775c044b46d81086487c89863bce6f79a305813cf6278a112a0e6b2841e33cff7ce2a582f245ad430fdfb11609e01dab8f4bf56f2c540ab1739c6451aad248ebf59edf6d636b715867515fcd4c6d7aa34409e90f9ccc1d4e1445019cc4580ac29eba510f12a3b9c54b1672ce6c7041adb799eebf4a21148b5f91d1128b86de525d8767ae9f17484e18e5ae64db071be3e454d68a1605bcc8de7c890af8e71826329616389592af9446b59a59c19ceec7f741f9b6c4e605c096854c44d1ded735593849a1d71f18efbeef1b85dab44c05b766101018fa5457b1109e7152b6360184e3b62388bf9a8f01ca0d8f60a8c8ec50902dd1be8bdc6e48d630688c84cf050548205bc0759ad744a9b4bd021fd79ae436e9ba5e89c89e5a746fe55f422527b3d9395c444e119c3df183aa8a0a38d75f934ba12ccbcfc37bbec1b167c96223988e2472f5bd1604a45132059b62054eacf69a396690988f750383758a398ad854bf7dc02c83de0c25c347e6bd012c022f2419e2b85153a30a416ff853f605840b6e32c71f77f00fc5363ccce70f9afd24f02e94896386ed058ab64fc8a331e3e743578e25420160098285715d9410dbc9a046ea2b7dfcf150bc909b9129aaa1de32a009a39472a56944c5376921ad42fbbf40a47094d11f2e927fef95a4ee4176b59d14fa05a018cfb1867564e344f2e42cb8b84654229fb6b9161d9c5a58d0c05267ef610915539cebefd86d62c1e07bc3dacf4d4db89867e0231f55749ab2671005578aa2a5d8c3c68fea331e2c6f220d6b9b2e8c1a12c1a7908c46e7292f8235f205c485cd00b87717f0b2d0d2a0c712a4432b446e77849f6406a0872dd7437fa604f3dde3502ed88a3d6d58fc69c95ce4fd54c139dca3b35160164ec7610083af49446dc5610a980828c7c9ff29642fc95798a1e729ebe01de3d533f41c05642a14d799b1e8054674e57a44875824b5a2c5ab7b4d05e9eedba26e39bcd8254c5948f88a4fd79322df9a3f39260739504f92177e9b3f5118789167cb5e38d4a795eb163e7a3599f33be2b3e809b37adbff7600f244056ee9e7e1efa012f8d32189fd1870a0dbbb92df93b93e4cab120f5bd451fe7f1089963924afceba19ec9022bd544622027010995b8b0220be7780a7f8c33ede3fa3391338655ff0d7b84c69c5240ff3ee8ee6be2385d76942571f3de01e3d206b2fb572b6a5256042a3d8f59557677c064e6b4434e9ac4ff0c6dcd2986fa1d307ea4a0c80e05b06c3a612b741b34dee5c794c982f40e6454eed8832b1742623eedbbe4d7d621010a2ac1571b5ccf6c95535e9435f48fab6126e1041188fd3ec6ddbc468800311655a6e45c61373358ba9a846395456864e67f0e1e97ae16eb737da157738e8a5b2d7c5f47f52eaf15b3996a7e31375f8b50ddc47030af40dbe0367c40509c9c3ba17cd4b614d2ad56bdc1db818e997b9fbf699145ce26dbb6eee2859ffa1cc15791ca8ca34c4ed846b5eecd7ee1d0eb03c40aa50cdbd8e076907b5cbb347906243c3df9eec4404d46c7f0a7d7e47983f39d3982e52a124f1441df1c2ec76f8fb7d8ff0132a64aae5185daf91881ab803b3cf3679a606d982d33b30b771f37ed71a7e270bf9b60e7c385dc43cb37ea40bb772230b05436bf8581a333b891e4d7fd2ae9208ae20960beb16728f923523fe0ab7f6ac2b3bfe1d022e610594eb5051eec40cf9c2495298018998933e8641f1e9d9187f8d3c3edb43cbdc28e4eba5030585dd29be1cb14910795deb9306449d78a3b34de60854d2594cf72f98a74c0a892e54020bb5aa1cbc140be98b66b8d073bf2225145ccacd84c4b90b9658ed016a609b931216c787f79644336dc00de33bc184be91b4a9bb3fa8c4df9c80b2bd533f65b73d0cc4b818972284c1c01ff96bc3d500406ea90868a58590157a61a9c67f0f2a0733b0a0716d65e8f283d82108b9587be22c5b9212debda1383647fe2e857ac634f2563240f3bbee3004e02a32b31df77a9d00cfb5b821f58778ae15c7a49e4dbf16f39804d9d0087b158919c9238d3779b917964b64cb5c1ff0c7988847aae77cf034b98bf19a69149ccd4adbf080085f150454049f0182f9a5cb203ed887b7a53e4b137e28893f023d6cf70775171f65d35ebe63d50086434540f61f76f88248b30320b249c915a8810cf8fcb6bbf0fd0df7f051b7ee86d0422084093515a638a2688a683a4134c720cb3422bd8ade235b18de9f8bd3817fc2ee2f1379f05c04488b6c8ebb657cb52ccf8d8cfbc0c5b208612fac15a87f7e3f3420d16cefd9b2a119d2096a8a558a07d0963d0c10cb66299184abe50c48aa0664a3c56b624a03d3ace38ad0fc3d8e7ae431e98cc43999314aad654767ec07d124ab45df9de2d8dff2d61d5b516f9d5b89f398293aa7055c88949151053939911d584c08a8826fc93b2970d9ffd96aaf7dd8d72505b4ed84050d2126031626c28e35a06ba212eab85cb392c00aff233576969777444fab2530f4c60b40ed0b403044069053ce392a565978be970bb97eb0739416abd4587bd57a82cdf9d3f3a61ebf5ab08ec22f1667e28f63840a8cb2d63d16201b37c29a2f104f3ebdb77c9a1c657f38d88cc6f1ed89603507f683143baa12a88da4a8d018f8625b376087cced9797d303e1b64c66d90d6d542bb1941b966e731afb2f8ebeca970720f6cad7ff86eff8950301a14de5ce0f4ea0a97ea16edc5b96457f26dfc8739bb8226fa50ab8863dbab8cee39b62ec20039da7658ae4216647e26827b57607a3b815a5e01525af3c8c4e48637e89eff50dba258180d8ef69952d34c31fad185b3c79a50a441f99bc08c243c9d7799c06b2bf7d400caa9a9c472e21b5e2332815c38a565c8833869c0047dba15116cd4c7adaf40f97e6019e81bc3d4642a7d34c4bb8223b639adf1460dfb12b0f2b76e57389eb24f70ce25a01499f72e9faa577f4fe90eff01982524a456838b41511da30020448a23196955c08a955eab7021b497b74002d7c54bfb0f494bfc5a1a2c068bb163954ce7a62237de750d9d0519306147b048d8ea36d4094f29a7c49b18f47be11f359b3bd5f08d093901f3b723fb6ebcd585bf74fba77a777c414f1c6070a74aad17681a194dfdf12c8349812f46a5ffda64d4c02a276c100739d7824ea852845e9ace6e607655d57d71f1f2427f216051fcf42329f1f070619b6a361208da321b01f6681eacf1931fc7c81911eece9656e26f83f044ca443bfc8bd0f1c69a50bb64929defbaca6d03769f0fde917ddfe895f8b89efaf0f98f5fa201c7a82503c0cd60473ec86c57ce9a98534b9959f618afad309cd9b833e6964348e7771498d07a5e1217e187cb2932ca050c2a24b6051fad0652e4c81678f2273ada1108ce1cdaec77541e02eaa02ecfa713e052bb9d728dadf7ef06d03e6fed7e237615949934de14e142957a7fdfe2cb82f50830c545abb3dbd06f672cecbb51cfb57944fcbe6bd3e07f51ef34e9d77c2d7f48cf59272323892064ae0c8b28051ca802c72844c5e21ae8c14c0576c06e94368b7557e14f868539fbfc879f84da5f1bfb0be3cd144ed5070978413fe8a2c5d39c57084e62ab06e315a579679a9c004ebc3f885eff386a5d3d2bec4873d889eeb04a9de84080e5d7b0e56e1151ab595f97bec3452f5b050e0a19559241f1018278b21880d1829a636001c29ea0c97910a4c4e5c5da7798740dd924f79e69ac321c6fcd4a1d2d1655be4f39544ffc716f10af2a40e684bfd9a7fb36be97d2fdc91e56e6bcd1cc301dee5e78193e53fdfd69679021c81eab9c0ea4b3e2cd8e5b75bde5352dfc45f87f223922584ac419a9cc7327db3ea1b6c18797d29b8f8caa9ecc7ff7da52e8cb0f7bf13087d5874151ec27b4c2e15416f38448778b6f559a880401040bdde1483fb94a4eb5944a4eb44afc212a370d31c5de77e9f308cf6aa1071e135cb3e602a72fb40d70b821e32167a7a29c2f58bc856c0c48315feecab84f557c1f8b05ea0c212f7b7b1514c635f35854a51f8e1ff08b79ca4b5bf1d2c60f9e63d796a08ef40ba2059ff99ae355401f57967569a36082d6d972edff72a6225f50090545712b4a8abbc6c8ff04b69165aab5ab6900ea2be772549a68432f15e8e614a14efeb9eac3db8d88bb77f2e2bbc34863dc02015dd3b38e870c7472de9e25112c43cbda059dee8b1dea19a6e4d27b03d256c3aeaceb3a7c4334addb99e12730de516942a1eeb29e09cc75da60626f9763b4516b0ed7b302fd37e1f298bf0b6c5cb237512d524611c04716afb6c949e04a67b0b60ad1cf0e2b11f2d58af02c5c08e50f0084a10b03dadd2e31fe67cfb54227a1a10aa6f983351caeeef9327e0dcb19984369be984437cf5bf321071ebcf1bb638c8c581d3ec07cc837eb920f735116d0bd06af2ff037e5b8581895b73f7c8c28a8a18f2181e7b99c8e5bb3a38ee86022d4810a3a321a9208435e8fb116e5d7d48e03ace47e1e101db156edd2a33e7126bfa1288970effd47a8120249522c35290ba2cdc2afe006faa3ea0cbe5563db6ba263199371068e0ee826be69dda635808f4b636ebd500ee6e3682c754f3800890660c2d53ab46d91c9e4430e24d2fd7264cf59edb5a71eb15b8f55a683aa5244336852a9dee789fd58b741222916c315d618103b8552c7aeb2251c5c655e826812e7df62f3009642941cb4bf20d7a82a6f2e5cbd30b13e020520635646cd4f31870ce7f2c160a56feb051ff58e5d432f9942737a547514bcab4049692c802e17d5c374a7f1d6f83f489b467ec78361f2de6a4a8b4160a9982b88a2f5572f4bd7183adfa36e532b5f54849acf3ea61c3a19c6f9bde90288824e13086ee6f2f7ee821e533656428e0a626840c48d4fc0d05165207ae5828df56d0451e9de58b7576679cf53201c50777fd9e3b24a7d397fd9e6df815b976d8db5374ce2d440de21dfb977b2191021c78948e5d6243b6ae4ad2239738c34aadb657301cc909e8fde0851d10c63f08c26bfe9a2b48c022f0c1a3e965d9f8396455087f820444958ff5e2aff25e15d97aab6bdeafeaed524c3a05aab010c516b6262469439bc275dbfcaa07d5161c2a9e068c1463904ff9e4dd27424a2ee74a954fffa809cb6baa9d69f0ad8a10b80c7268e174aa59702ac2d902eb7cca97c2ec79a3e12c901b5221cd2319974b73d116b364707db25fc60d51b042b481e060f36395e423e8259fa8e8146439f96f69667e5d82cf60cfd9abd219f601fdf6883078fee1e4f2316f743fd27ffaf689df74d84be2239428eff69feb8e561a513d0c3d2b431ea79fefef7f659702c07ad7a76c8846a4df62b092ad64154b0416dd80068d8bac7d226a07e39e30ccbf482e1e7ac97faa3a082dfa8e3e24172316eaf9e68c8b4b52abf95610eb467b7b76420066ffd6a16e4cd5f8bc559423dff3a5bd8e3b68cc8459bbb3f27739b4a636de1233472edc88044a4b2e9ac690e0f2d4d8121ce2fffbf9809f6a6099c870ede89eb235920ddec87460a96933447fc5089c24a8bbc211b2bfd3531023c50df69805a824483c7601a359049223644e58d0ea1b34e40f34560fc75d6ba54e5be6c2a7daeaa77d0e465aad885a898f74a195d2d2d92e2bb267f8989589ae8439d1285935ecc33a494389589d4a543e7bd3c7d0712368529c6bf787527d313397e362079fc6dfa8025d96b08113c429fc3b40186aaef7f01897319659a65c8452d54f4d0e0a45afa5ff9745f529c82e6e50446f6ab82bf1c6df56e5833158be199e59460a6f8c28707a0e3cf998a5262e437efef81ba29307992c83fe75e1e49e68cc1ccf346c2c807dfe77d7894fbf421009b8393a17ced4471c07b22dd0bc35b1a8c70b36196e9d11bec4ea6e397caed0cf36f5c5abbe1dadefb5fa24519f327a8fa3be207abfbbf952d03326f452523a52f8c2fc813d2d5f526f55cc1ce7f5209456fc282d9b24fc7fda073f4b34df8e940729f18607209ffce3cb3131b35b78f2e90fdcbf329959914f46bf64a0b70ddeaec1e2e21e11c3ed07160721b23e90250512c1edd2bc7b1184bdcfe09063712490e2485dc7980c718e0c85f1734c0a3550d6231bd96fb5ece1491084eb0cc3c0c57718c663ab9483077efe8e0900e4fe95c6331f9d645b1473ad00f078291d412db6589888f02ec1f37a0f00de524eb3d87f006c420825afb69eb58467292f3cd979a33e8069ecc14c14c087086c7ee2db71c9cacfc84311bce8998378496610fbffd8d1c5abf96e3c71d7241550b38cf5568152c4a3649f261f466a538a819155f4929f73f653deec671df2be3677af0d5ab8438c3d0fb243efbee020a35cf78a95b60eee1fb4a46c86fd3723ad7a530e3009745b00ce03baddef8da21022b66e13784145dd59e8a632c98a6be9d7902c85cbf8fe159bc89401c996cc470077a17204363099db2be82121eaf298ac6c7d7e453666e64aad2f75bf4c53a3d9cc8fcae58cfd7b2c3fa1552d76bb7ff160f8633f03306769889d87c1fe286dae59fb8d36a4cacd5a1a9b6fda2b50da2b891d0b7a4c183c9817c8b9f93002a332f4fe7b603ec32cac473870a3d10260ce16e5ba0820117c588ce2d9243f2d1333ee014aad327a632555fb907d694bc334901eab044b24a6647925e91e22987d2609080d750a82c8589ba3be382ecc89cb4b0611063a7582ad215509aa1ae109efbda5a8785de82d35bf3134fb052c004b2bb8af385e8bc33879d2ef2eb5c4d647228ef8c3066cac367aeab196d8a569b68522547adadc7d82774369710d30934e86461fc62cab2034bba101e316e41964d516c50f6780e46e676cacda29d86eda0bcff61e094ede5f8387871187ad01121763f6db2d07128633d2e8d21fa73e2b912b9de101fe530492055a2e96dab26da9169b632276e0f35573c229a31211a150876e49c3c8f63ebc41cb9e2b3243410328679a9c43804b6c9d4946e6745d522d328ad1647017109addea9df26ec464e604ef43d1e850b76c62cbbbb8b8b63772544714fb062b1d62d46c5c10c37b1922732544ead3842c8b9010b7884df090a07875b0a2dee1c53628e427dc079ddc5a624d00ffeda1d92148865f6165d5aca77828383f1ba5ab69af7e84da0d82cc4c46454460bc886048dd14e3d27da207725653efe8e5de9adc005d637678f0efac9817bc8c14c643db8e94d3d4bce3fb4d0ad0536eb2bce6507999023419d21f1a1de5efe0ea924dd4f290852cc2ea4e07b87fcc3eea05e9c5e6ba17047ff709d0443633993e1298e3d687d1f50bd0d8409fd5c97172cfac39be2e91f65a7cb82482c890750c66bb80844d7e33c4bbaf2127789c3bf379ae72fc0c11e8ab51814880e653fd95d0389551e1dd4f2feb03625f573bd60f85226c4040423c07c5b0d6495189834a8fb5c2fb7a9a9b0ebc4d401463ff008e9f2701dc8b90e2baae47405cb822e3da2388c2b3f344560d59bec019fcf132231e92698cede535952d9ad58ad2ca1062a03a21d282a425e9975d7c270246d84dc1d29400eb8bc87cf236f003e10be7c1ef2a576ad3afd6b14fe707c4507ac67a520a48cf54f418322958d179823ee85f9f7f298a2da201432f9b41b9361a5bd8f7b5511f81e102e936bbd4e450b312915460f199b61dc59cad81aa62d0dd9683a022b596978f3ddf7b9061c23211accd9900b29331131386e457add7133bac1a2da28350619ab9952e18e00be3594f15bd245fbc6e67594d24a850d5f08885bc27cdd4522a4ec493b69958a039698cdcb636669dc611ba74ffe9c263d8ec0e0eb11d0a79e62efe35c229e9c58364006bef05cc08d73a7b1c1343ee83f539be7e96298b66cc55b9d023dd5ebde75a41a83b4267a006d60178f0296cbc92c8018584030c0424b889486d46185f8225c1bbe28e500d8b23c0c2554023b34b7ba194d2f04abd5fa08a232a53026e3d631819f6d848ba60b33e67bdd728665aa79881319c94f6467a5e1fe15ec6508f9115b22fd398c18ebd9a0177ecfb0d5b878b98c12f933f6c0a29dee829c4b9ec43c30b30f82f92f4df5870ecd1b63deb39be0748d9b61b1acb332b856aa084873a7b4c7bfd13bc57fa7adf671160e6c497f6ff94a1eec856b778fe5abbad9505889255fb8e3a0bc9a986698fff8d8c2352e6d9ec0ce7a7809cbede01b36d660ec8f38b1f97a2a6b4ad5c543e864f0b3bd5b80617d324ddbe039b2f708ba9f9a9c3fa00b5d9c4f000d6c54e1cc72b70b655d4fe03ce9ebd54c44f27287a49dbf5c2a40230b44e2711208e016b279406215b3bbdb754c7478cec29067d530b3d9045b5c9bb0bb53a293acb2d4274bbc6dfeb0ac18763ab3b88a7d3c0e914989607caec23d99424d599ec972cac798eeaf496527cd54db1a7302c5642b8f894437a56eccc5cc56b29703029513821d3a320aee931594a2807da81655a1cb3c2a65a0aa889848331a66004994467d30c5ac47b317415306549a3c568f2d1627747a587ad82720e0e1c0fe55aeba7a42b2288a8396c2abdb61ca0b786e976a1121b475201dc7b4e8d98dd9aa439f52926798d5e54c76b98d30a90903c837c86d8d0365b36db6de318b0315a534dd0898b46c3f92f7068174f19fb97b29d2d450a44ea4fe5169b63f2ec9f2289124db19c9168b62cc6be5873584746ffac462c5b2c12d6f7a28ed44b7b9b926a69c3b1845e3a4f42b822d8e1a4bb8459249bdba969758502a26065ae76c790ccb94e26b5b678cdf11e045bd6a2695657ac8dda7475698aa56b4b131dd9fa9fb6d6ec62a4f73394d69a38ac76e08750a9d0c69c8a12b261b2ce1118b4399340492153124c8998827d1e5227139988c238519e7d7d026de5734de5b7ac43d37bae5626eb2e9900259c082b1bebc9676b5c1092286e1180e808fcefc08e253a9e1e6d55319e74be668799643a148441ed0247ac20d9bce9809e566d161ed5c7bd560d15756bfa685ee61439821009e67200cd8e3f5e32fa0d4c41174322699d457ed136db4ae9a4300a4066cd29977e7f88b9994d5e07dcbec856dccb91ef6d46170e2d3cc332e21069c30e3c4f5b93c1f2e9a5bf0166ae897e5624280b1ec4bc43ca90e71ff46580d0340ef3a8f6b8815b251bf4ce29f14bcd1c286e997ef91ccb295d7093c5c41869955b165700625ebd9249af73535188397f7c2be00af5bcac7bdc30665849706c5e1a00263c18fd1143a374b3892501a10acc1db21a7ea9deee49e566694fcaca936303d939aa3ddf32f98383677e739773953f4073ff6bc44e9a90123d04d1eb25a88a96db76e599ebdfa9948da16f98242e66b42dcb8d982dad65467b2dc9cd1c632875a7212e99ee85c818c5347b5bd568f4894dd16e6f65e2ea2bc6f8c4edbac4f5294d034757736f5e23b65c85c449b2e58fafdb07ee647d200ba12933fd38fed879562ed06fb419459c0122c63d7863c45b17cc1d4ca886a52673fb203641a463652bd39e75f1c962a8ea915f0a8f49e46c8713e616db33426425d6bd3fe5b9b9c2b3925bfb4a980fe40d6d687b2efc1903ca8213cf00081e2261de0abc2082ace48a09c27c1e7791b883794fde60fedd87a86d3a2d59469d6f96cd396eebc62511caab8cfa9248f2f518559824e53a84f1f3bfc9e42b5e3f2f688403040a32d3a75e308e3a6e0e1501a201bf742f0a171438600928c81ed46ced491af75be02a36dab2c1b7e80934fa6a709e318fa321ada5e23c280014401510866110d9d5c6970ba04612580556f24270f6413fd9f8c79d2f7e6fd18bd9eeee95bda59449caf309bf0aa10ad6d22896dc9fb34ae58e813a0603db4b992be059f7ea83114c9b0a60d392045cefbdeefe1f8cc06d18bb9623ac9c04bcefc5d8becf07f52d8bdb239080f7eab7b79dacd9dfee0eb2a8b556fbbafb937b5c5a90e340b024ddc0beb5d6e62d74fdeed2d8f1efc8b3834d7378666d7165d4191f3495c0c6d75eeefee6beb6e8e963cb0efc455fc482d68bb94f3c85dde79c52b15c380fb6dc972f4b860dda95362a964a54a9685235acd6ed40115b8fcb5f289667d6b53b46a768666446d48812b53e99f44dd77d77b1f5b8fce5a0bbf27341d79c3a419f68f513fcc41bb93cab075ca96d9873f7816f1e1f92d872b56b83c33e687a3b8e8a5bb75c973f503499c41bf874efbd1c0ec1aef3ee2dc103c1eeef87ddc5a04bfcff442d7a97ebf207862f173d09fcfe3ed0cb2c95aa3bdd2b73b92e7f6068e38b41d71d7ade5fd0fb0ef4fe76a0c30fe6cf3b73493b2430cdf7fdf1816f197bbbda4c880a516257dab89c0d246ab46bb20174eb96ebf20776600a4f27f1868c8f82dd06b546072f1136bef495692e815e1f980b5de23f680a3b701eee6d2c076763c77bbaf37a811ff781bfc1cedbd903c150fccaf0f3582a55d661684573df81f3702619b5867dfade89349136b208747dd0c3f49533f7f4953f77203fe8d1ea262fe7b807e06a168333e18c78837bdab36bd6c6b5a14587222aec00bca18b4db2ded14048b12bb82b8da05dcf0e785c2693c96648995223ba24bb62825d23f3d0a46b655ffc19078105082cb0ea5e1ed2d2d3f5fb948164dbd7d68a946d8130615b20b06c6b74c54d775dbbca51b9b4884ad1ae6f8f8020601e74ccf6a7e20a07dd5c89234479e2df803fa63180d4758eb29d1f531107ddfa4190827ffe156f9c1e0cad5c4192432d88bb020ea77d84d227b4c9ce78c9fe00e5e2f70a150c9a059a818460be4b16075d251be37e96b1913009bdf3e311d01a74674c82ab527d5d5849ba9ff688cf3d15e2b07c7f1c964da03af94f9c1af70374460671bdfffde502cb77894ef4c0b4bd1cabd05d59faf0159880852d66576a4ba3a84033a969821d33c664e20f82611882a115719f7f64a6529f2a7d702083c9958ff3cafe696d3b3f2e4f7bb42b10263780600b0ca482c9c47b1f1314abe9834ef912c4ccf4f1c3cb623291f91f3608164c261e073298a76a3f2ec246ce63a2b0fc24b857ecfc17ccce97cbce8f93680cc5e21fcf4274111ad8880974fcf306a17f5e22f4cab0f3eb2099feec55bdd3eedb70ef8a3eafecf4a3b13cf34ca95566156fe8a016b642fd698dc62134fefc2821da43a3ecfc38c8794c9f1f17b94bd0d8957977c815e3503c260236e25a76c6578a76c64ef49fd8069c243f75fcb3336995a84eb65c282c7fb64b4376ce3254d90037b9d21a595836ed9926f47991f2cf6cd1a735aa3bbfb5927fa634d2482cb1737e19b297dd5f2b86c0fd45b2b0ec4a48384c59a17d8b7e5e2406807fdef799ffdc490ce0c8ff01587ed8025dff96f9fdc85ff9cf9d6d2a73121147585e1a096e100c41f2dbf9fee02b747e3d449f347616e167c8116afe1268cff7f945c89f4de59975589e9aba52beed9c91a84e7e57b2b12b7358fe9cb5966599f2029557e6affce30390f3d09dc1ff3e83df4c0f9a868d5d240acbafd2f93d1ef495fdec2c7efe2ba4d6f0cff94d2f92f7c75ff94fe599b788c3b4c5f2caa84efef048cb76fe8b94194093b6f7b9e431b64c784a6badd8de9415ba7e497a0f2ea0ee5c3af7e1e72a0978dfab04acd88aee1eccb9b433266c0b5dadd6e8ca8a5467fc407b6c5999006f3823fb3e2e63a06ece739af36355a8e9b618f8b12da5b5c608f8e9a6af2ebcb1423fa13c78c68389d0d77d4b739e24f6eff7f7ed90756289684ffefbe6fd7ab375490be70965ce23fed45db789a44926bd1ea399210cf9c8be4bf6b52c50d87d7c23b433aa73bfd3a2afa5d662530254e73e475a274276c8da40da249616448442c133e7b14fe8ebfebd7f2d11d5b9af89b85043d13b54e77e57d62cecbee92f92ee4ca5cb2cec3ec6c1064da1fd6d116b6dc795766683c620f4696776e6b1bb0aefd3ee5ba1fb7676df06e121d311faa1a618638c29c5b65e4a5f45df4280f35b4e00933dbc47ad958685550af8ab62fa6f27d0a37a2982bf6ad2ae3f17709eb3d69ffaa7a6499596a70629d0a39c80bf44224d43199b3eaa0a4d3b2d63119a7ea845afbefeabbe5611a1390052d7e951347d30aca26b5feb0eeebdf76a47aeb485d51765a8bfbc5c7940a97c315a8590bbcf4fcd55089f07b8ff9e9a2488fe15b5fd51db9f07c61bf0cdc33ea5f2c508b52d49a944a1c576f202a84dff235721841ef83e7c6a6672150267525104f0bb72b5a3f3bee34a11beefb972954b1ff9b9b0f4a1992c9aebc8fce9f0486a66a1dd56df8fea7bcd8d1c09eca9f4b58cc381a35997519d190ed5f768d32747ab462045aa4ab4870bad8159a0249e511dffda85c630110af3dfd9094000707056ab8da229d027ae79ad0044dbffc3401ccdd6688fc9c670c7d12c100d2c3110479b51429f16cba55da1d0df86a1f666c1d01af7fdad8dc63090b558680f7e77e2896cff20dbcb535f1368162a258d447379e46168459faeb4fdc1ae086fbb929b20c7e15aad614b0faa337ea03db5f418a88ebfdf6e4c0c64bb7ed53b8824776461ce216d7f0c6463972b0185f9bb0a08a886ac83e661fb735290b66327b6bfe56c1646a33afe97e48ed019fe1a05da7a10d1d1be474436f67afd5b7ba431919b43fb549dd4562fe69270b8e468fef257d22b4c83d69b527961caf6af2a7ff01d689faf7d7a900759987fd617d76ae94416e62f13469fa014dbffe4681c6dcb4c01deb41120a5a1b195edfa3b1bf8bdf75e6bafe372c73eb53a6c8c8688a3224ec812450d635ce8908218783012021e888455f4e95e36edf1d50ef0c372e5639edec704cbd50ef0c5177d4c1fb3fe8974230babb446f30f5ac6188496b157fee4cabee97dcc1de087a50ff1c1b7248704b562c4aeef378fdd8881d08d180cf912380ee99785d517c9d40af4f9fe21494d90a4e6f5d35ed99fb2c5099b8a384edbb5f017b562c4f69bbf7ca97af9a2dfbdecda44882c5984545102c315667d116cac0411c0e0440c436a784233ebdbc8517f1442efd8988d5a0f30470efffefbfce3cd10da1fc701ca1c9ef7ee587b99a454c731497b8c34764d2bfb8040eaca61d63e3cafebbaae2cc15fb5cafc3503922205e85cea3e8891cb202088cdca0b479b8a373c17d9f669838a8c6c128d85a645538cecd306cd828aec4ab3493c464d27dbce8068335bc4796a151b767d6bc426d9f52d90c7e84d15da0aaa53dfa9a033ea5331b2ab8c4a911f1c3a798ed8f338aeebb2882384f63eeb2ce2a0f4e53d78bda75427bf77f384cee5e9ef257cff955566bf528fa1161dfe4884b65d27e3a4232b5097368f1f4058f4e0b45d0cd5a91f16a14fbab46b7daf599a8fa8f5cf7a0db0a65856230b734c9e5ae5461f921b9024f88bfe6d8ad20eaab2a22948bbbe8f5a7f767d1b04764b7b840cd4ed95237cefc1077ea407158717f5c3a2ab14767fc4a2c3fb3a39ee3bfc1dc6a53e1990df43e065c916710625e208c41170ffd41f1467d01cc7711c157190aecd71b70605c4116899528c97adbdba35759902b5864b65124bdf6048eacf36f672d51a07c8aeaacb1e54c7be0ee015f9ee94e8fbd47940eaa5876b771f82a077f77a7738b82140bf5fad43e85310ed21c0beaf83f674fb5e1ca6ad83eadc0f9128807e60cb95f8749fde67af40d05fbfd2513ba1ed8b3f8323c4c996f5c51ce46ae6eb9e79912401efd54c798004f8eb4ae03e05eeebb82fc27d5daf8a45031885d6914fd5db9347566067f3a05fbfb3f4a42b9744ed2c073298d607bd82c20c34983f6e121cccec1cd4cf5388142181f9c36221c4ec8250e28528e68f7ba489d995d439f031410f732e7308dd7b791542eede2bb3cef90677cb991e342e599dca76a40f1a2508154fcc1f77e6ce81adb7d61eb47d1b6ecb7007a47de653469290ed1ffa0e8602a464de2aaa932d4c635bc4902e210043c3c5d704559072d7e5285f627061cc17dc851eb9da910b00821e64662ef30c13fadeb05fc91b9c07f5a4c0e6f0dfc7640fd5aeeffd0f073a22c66462dfc7042be991f82fb823ff6a47ce1eb9dae183dab0f9fe0f8ab455bbfe0c131adbafe42d7ba8b6fd4ad27dc9bcb32deb3be72212154511b553eb387e5f513f8ee338a2503fd6afa2c6f151a81f513fce3c6ae6513334e33863c7d3288ee3881a51a811358ee3388ee3ccf8a8efc40f5028d4a350236a44fdf728f14ffda57e44fdcccca3503f33f3333324be911ba87ba64c80bfae8803ff7dbac7711cbf711cbf11358ae3f8e0378ee3387e156f8ce36b8d4291a1bfc6d25f4693347f4999a7e218a26650a03843a3479349447d28d4cfd070292e7f6732d19457e6a550258ff1513f539ee3cf8c8f1a51653ead490cc1cfcb1d57d2b18e323411c484989db4e9a69b7b6cb7f8cb03d2f7f173d596b7b5b15aa4da36539d6e572f04facca70aacdbbeffc565dde2af6a3547f2e05ed53d8fefb9b72eda93df7e0eedf170e61e73b7f4ea1cae149bc89f2be3c680408ad0b75c473b8ebcdf719ce52c0e13fddcfb5f4c7bf27b683b06e0bf6fe566dc594ce22cd155c95fb452778c4d4b40a14db7d2db0a45acd6ae529de16f9f9637b2dfab546b54aae3b5ca766badb5350a73293476c320b6ef2c9a7b4de2d0fbc5dd52859fd670217506c64d10edeeef2b595d9ef9549eba4888fdb03cdd6e2930c6182b5198fd8c77bad01a66db07cb5aa33af6bf2c98babf3e7284ea91b56c935efb7edd02c269dfb22af9abd6eef963d399470290bace8e62a46d9f89a2ad7f1c5f8fafad6ce105e442ee28a4a3fd935513d3417240292dfc656db696d9baead71e1f6f1119a04b9c5462259c1a340f9c1ab47e7d85f80bc85f27708b4d56f4006ffd238f2eedcc0d9a939578c95f5601fac7efc40fc61f7fd4091081d67f5e195ea23d23997f8ce9a030fb5ab5f3cfdca0695e7f55c0f8a9e7c40f523f7e4d7daa74f9cbe2db58b7983d8e5d103d8a2fe8f337dd55d425f6e22fabad26b913a8ce0fd57141efec542dd35af62018fa4591a76fd4632ff8e63cf64792521459f354d46fc3b1920d57967afb998b4e2ab249f03dfec9fd703f608aac91d9bf331ba3660896e76b94658bf758ea183ff527f7c3fd5c0ea943b230fb614d79912c4d79e6fc458f250f7dea4f3d2886a2fd2d377479f6a862767eb02a6dfdfa8790ad7fccb6ce18f8b1c7d7e4696d1bfcf16d38488e37c692f5396c64b63ebdadf5571aa3a6fe2aded09589a6454c68d974ebaf2203f48f0f6ad283ba29d5493d58433d7e8a34e94f912399fd752acf709f3cd04f559abc4143f25ef223f196b376e479cf2bdbe3e57eb6fd1cb6bb8d47d41e1429ac429566823e43d9cccee8e724b492a1bf2c09ce53df522d2a2d7b5c80f6e8784aca70fbf97507b4a2da15232d36ff9c5881508354aa8ae4155b618b50122abd29e811eeb64fa33cb3c7b42739520bb546ada4a8b2d65aeb8180df1f07f6b1e97bf65a204f4f92c0be6eed0c200e102a947eb52de51882a82efa5481aa1ebaab5819a70c51a52255f9bed8c05a6b07b586b5d67daf1d8bb4b5b46708907a68a5beded9d7e8082d8bed613d41ad03199640308416552c4d6187c022ccbefb1281050e400061031320b6f054b083952a0b96ba1d6a67479e0823668531b7232ed82bbc24004a0e5e62f08213a4a86105ee5caa6ae8051d9e2c1989220aeabe840410290a664a0c9200f1825a0c22a8351bf18357d3e758bbe4f06106244c92203f59a4a828b2444634840943dc8a2a743849204815b49f1f213a9cc0004b98b4602483d2134f5c531dff13d01edf388af7e1c18715b4f87005962512502a6ec061c2b0e0852f4ad0ba3861de7befc560f25e72773007b638e10829921003c513d6c52041033d9275e9f204120c487cc028a457b646ea2b099a088e70f192c5051b068917f4e6c08519887260c1881b9e30f283edba585081881c44f0440a6a484fbc804acd882c3d30b1474c81440a343051881c31468a2527e424552a9621424014660610dc9c304f958321c0181f202b78024a0f5d8c88c860de7f555108266e582108a01d98b8edbbf3164327d8b08c60052f68810e6d6d689949299215ba4e6d3090b9f8876c249b12850e45c484b1428a2d208d121153b0c004a0245898d98b74f54a85ab1084cc7d7dff65351117081c80c8aa34e961a9866bfd0b15bc8758b1b4c4ac36e58a0ce6bdf75eaff53a016a0c473e7c8e04687729f52570af6adf9d7d6ffefb34ae126fdfd7f75ff7de7b47ffcafb7ddf579e5eeeeeb554a606edf77a25bdf6da0f686d5fffa2aa99ccb90ccbabdae7ed386bafbfac85e27ce51dee2ba7f1edf4c618637cad8d5bcb54c8b6dbaa90c21427921862cb17211ac5110a5c9a3841c31623b000e2266816ebf52a6a036186285a6c103ae1862d28f562c40fa706dc5e2801111c8c1003c401ac14edd800ad0c5bfa06632e81b643ffb3030d3ff4e082161051f32e46fc10f10408a225aa2c617962011d8a8840ca0f2fb8228942a1658a9a304e7200aaf2821e80b84942051efce0c406179cac20040b9729333e26d8e22643410a6c10c1103d769885d1410829583c51d26244a44684b1220b971f8ea87842ffddb93e38fc70c416472c21a10188c360090e035a403086871110f98901c5c40ed17fae5514b1ed1f207bb6bb1ef63caf3c73e95c79e65b9af64767d9133c0f25c4d82082136cc912843e4104119f2633294c866809c2450bb5b13014a3cff06e55a8bfe83eb530b8620cada8892c0721e6a9b7756217653932f208bd400c182f2dfc5831ed0589d0a7cae5630dfa746d8f5dd379a8d786b86d9d4d8b84c8626321dae70f6d7f978c222c491bcb9697bf0ca7e25633a9495f3b208703a8799f7b77811412a4e005cc120b13c0cda4e6fd338be02f1d392c2c49ff3e9f8a494112102b40412c283161416a4ab42064c99518c0b0a8a2055388c43001c5bcf969a105297468f2c38f69fbd9b4088b987d7e91105bf693b4c7c50650937b6c423599d8072f4929aceea8b2923acc7497bffcfd557382fe4dabdc5064d777bf3168d1329b160911847581072242d460a710822644d0952ff5a48d62504104523a190519120259322d11015710a158e10b320b9508e2099981470065214a1f16a02e04441e14221c10a2acc405bc025257c58516ae0a465c951f166c10a612868520352a494386d8a86899a930cb5a55c38355c283917dee1cf120a4ba40e74d8b78f8c1418f9b16ed70abd9b408ca10e81cc771dcbdb7837befbd9606be1cb7c1e538eebcf7def17a918b736b4b6b039b1639a1c3e6362d7282c9f70328dea0e9d30ea43666df042d59713041cd042b0a412612140099a015daa16682778b2013e4bc800132412eb4a4dbe0c2cd04b3132248443a102199a0c7e466825f68c97a82243713046b0d20b085fe81264296d44008949b099e422757389896ac4d2821726482a717884c50267472d5846949fa83123bd44c5026b46435c10932139c092d5995241999950625904c700c2d596b560899602ab464650285cc046b503084109909aa5cc84c70e595aeae5cd9416682363a644144cd66b5c262aeb0c84c90e595b2ac6c11e2e556443341174e926461b4c2e22c20260b88ccb4544766822fbcd217292b7e4c4b56295194082333c155c5828605529220aae322b4e46a873fc864f541d2ae655d51aa736216767d99799fc60a57aad4cc1fb40a972d376c64d75be299932b2ca6256b0e5f8ab099d588156826f822b4644d12859109de8496c45144a999e07b6d051f1022eb0a4d5821da7162dab20c9d5c2d312da9b1c042c9fc51b7b4406652e7e00cb7357f502a4b36a11f2c20a62da973405d30704a12c6b76e5ec0800103c68ec9c774d2c7f4a93fdc41166eeca77b14deec4a26f7afb53d5420e08d1f9725f8ebbe75772a40ea3c3506de50f3b07e833fe0a3f066f85ff7c5a51786ce93ef83e20719ecec1b38d71d862e82b0b36fc6f9f36e0dcde03e48d2d00cee7fe4983ffb8bfb5c86fee2ca300a18ebfecc55ac6577a5dc7b4b2ac34f7e14decc25138cdf5ab1e37009affc45eb2a454511a16b9982aee3971750316e7264348c1439bab05119cdd4d03ccdb72f71def5f4411c8b31aeb1c1397291acc721570f7638250e8e8a354353238a5263831323c6a364348c940b1bd548e6b43ec6e3903f2c9c2f71721e851d937c9c2bc480c962a6862687c45962b1565b8eb61c09c1207f704a18a3cd622d6168999c9b183847a40b1bd5584363b3586d2cdbd191cd661bc7116944599cdbe1d0460bca6653cdd0dc0e87e088b2363696cd863ab9185330b44cce4d0c7bd4ba49da2b24d78f24ea2debc8e6081bdb4926356a1cd2858daa05b6666a688e8eb07947a7a3a3a3289a7584b2acd588b223cada48f287893dbd8b19aff75244ac2da8a323d50c4d8743d0d3ac1ad651ebc876843ab988719323a361a446bbbaed96928dddd86c472e489b67b16c9946759463dce4c868182914e9c266b3612dbd90d9d8cd11d2d18db57261a31ac972955fd8fe35344f9a2c994191ac9fa929c9676551b16e34ac150ece8fe4e38036eff5bf5abd3fe9c246d5025b353436cfc62663638b62631b6d6cdac666630bda33a8239c6a733343de6c18323937bfd29f8393139224355dab7f12e8cdd58331582cd6d1d1d16ab572e1c285281691c56c0383008424494d5046c8fa927cf14f955230a18e31576fc3b5928dbdcddeb6837fb6b4d8fead9b8dad8e8e8e8e566108829fa741ee380d384c92d47cd6b76e5eb0582bd1c7c61f000482d5dd3de836b76b99a9ce0656b6efe9c475f872f986b44dbf1b47249ddd85582c251bb4e98314487b68fde8cc5974e7865ae07f02d2a7f2a465666aadd6565bed6b3b4a46a3502814cac6549f959a01a4ae3387b6fd412b64dbb408914d1fb320037d3906427972108df68c4f1f1bd9d8ebe93f85f1f4b113e79979fa78c879649e3e16b2b1184f1f27711e9aa78f836cec67f5f4f18cf6b878fa1888f6d83c7d7c84d6a88f4b90b1c832ed8a1a18222ee75d6253a18d44161acb3425288cfe8d4263f9c8c2fc7317fa72faf709e5c94afeb2a99ed63cbd9d31f60f0a6fba3ee787c92987d6107fea0c7ffae2e9bf8bf6e8a70f82d52dc698abf85a8b5f90379f43be78b0cbe5f77d320435113a62040624a31a2dca13d9cf0929a850b3141118ba098bb9d4337fe599d7eb47320788eae418c909ca99e508f197164bb71c19131c742b2173826cff1c207486bf3e87fcb97914b299f32e124865e694301e851d13e773ae1003268bed423070725e646ba9336f9e89119696f1064b8bbf9c65a3346e4d278fdda2f94b8c41fee43c0ad9fcf941e1cdd6a3b063ba3e462be934e3f53fd6abd4bab5c816157400fe2eb26505cdc03f06d902433958f2d7ccc23c2783d93e59b6936dc6ebb358dab1b28c84d89cc7da188de398a2a149a552a9711c6f291c52422da556171677bb5692cd151ba36ccae2e7711c6f291c58e38d1b6f77bc8db7f136de5a49ada4ed401aac116f9844318b386c8c584622908dd1a300491cdafe224d14b2b320d696edcfb2b18e4ed0352f6a4de89175972413f149c85b8665b3c16263749aa1698d2d310e6bddfc75833e45a02531372530ae30db3f1ca2b16c36584c2c1bc8b2792cdb89659b296958b614183ac35f245b62fcd51a2b0af49922128144211ebbf99ab0a686d29e1d957883fbd1a6d60e87fc61b27a14b269423766cc1893c90824e8edfda9a5d4ea425ffe22d068c578b330a52e335e1fa6888e585ac80de43d962c9bbffc4d3ca2a9bee995bcb4b8a048d75010d56962859011189066ec530412813a1148040202daf7a6b524fbf913e88c4f61fb83aad2459be993657341b66c54c7c6e8689f2fd45a6a85a13afe27b2058616341302246b6941b6c4accad68dea38a9e4c5eb198aa6b4c33f6d94b6bfcd910d928dfddb2087f9672140fa6d441d269cc68c1933c6c4791b11c7dd2ec213a9e4c66d8c4620a1c6c6b6cf1ffa009de1ff225f886661415fe46734e4ffbc8ceab4965a624813b6bf8d51add10213c645d9ba7d0a54467eb4c33f7758371b236dec6dcc0239cc7f7614b4fd95aedc18d9942e3217c93420362c46b5274d6856cf50b39f0dea998bd0c5895472f328d245447584a88edfb89ed0a1ed5a425d476892ad8465092324685988ac0c09fd3e5bb7d69285f9e7bc205ff6402f84eabc0a74c60fd9fea788428d173db3b19bf7d74136f6827643498e929c67859694f914a904e84d9c7f4102bd59be8bcc456a8d51a9ce08729e9490bfb20c942755837fa6d9988d915582c6c421f4c8c644200b53729ef1e6afdc85f28c61ce194f43cb343dbb6d7ff1a7d66829d519a2f8b3fd4b3205c5617e2a534443f4d9bad50200bdf9a2964a5ebc3f0ee9527225912e24d2352ad51aae25e88c540d5bb6f3b890a02f7f6f62fb87a392ab8674d918d998eb08850d0972b14061615c42c69bf3b880d05779ceb0f28ab2fdbdf2fc27aa26db9fa63cb56bc9f64f953646b37de62d43b66e644b0cd5f16f29d51a2d2a5a56501d7fb1a574b351a58b46cef4996add5444542787d4451ce60fa6ea0948dbdca0c89b1d836cdd2ccc7f866cc1205f6661fe4f3e9085f9287a3063cb94ada5205a17717f3d4ba28dbcc07917086347cc258c1087a4e68b13a9e4c59724d09b2f1e7cbd78c162b156ab7025ea207fb99eb94075e1648439e4cdbbc8d6e390d4046564b40562d9b466d95836964d8b0dfe992adafe29221b7bf1fea99af3984ca22886e1cc5fba488a88eaf89346f4d9bacdb6ffcb9ce74c116dbf89692d394f7eff9616ce73f6b043fe2922da4335f8bcac41c769f0ad9b17ac950b1b950658032c5e3be395b24cdfe790d47491d4c421a9592ab97992b4441656ff495b53d22a81debcf9903cadd1ae4636589eac5d6fc8cf232bd509c717a4b6b09cd067e51160c1962595e54f6d166f5c8c39aeeb72f66cd89fd0078261286aaa239aa8a594b6e907511d5358c913fd53255532d66a7f51d5156fcc88ee752482b2e9a768682a0d6a1c533435a99a9a9a1135535323a36b6a6a4e3526b1a626043f2f77351c8e61aabfb3ab8f0df4b58d65ce2ec9aea473259c2b71db44467f9fb596033fe32010f27f7fab075ea9bd205962c5e53aee16190579d7714bb4e41c5a6b6fa77372dce725504e5de6eccd4a3a251cb54d58f7239af03a244890d48b2fd749a19de5ecbee56905e430676fbe45b809efc87a411202ecb3afbd996b026710f0777fd6cd592f48bce05af7815e066aedab4475a800957c9e001d3682d8792979e11a761b85572f48a81724a617747d930bbaea705d48ffb543e3bb7bef167a6783393415311131b56062c134d3f54d434c2b9854300931053185c004c404a4eb9b52a831a160536b55623ac1f4a3eb9b4c3081c0f401936c4c0cb6d042cc42d7c7e27505006aad4896aca8820a3161c4293408453003d8b176001a743ddc90bac1b5c18d50a5f5eb3f04527c8962d6e5f73e9a8d8100988194c1124348517c3653335513fc54266af644bc09601fc9b75ba3e59b78ab420000f642d9ac17e344dbeee57ba2e5dbad79b75bbbb5ed8fb21541d21efd8c2c0cc6a62336c233daced067646339a8d167b4fddbf26d719ef34be24b821a61a3a59cb556a9c09d8389d187743fe0b21c7a441d9240ad25e0b8f51e7fe5a67f396ff497ce1d4ba5726df02fe3e55e3eb2fb7b447b38da839f5224a847772cecf5f2c0505cabde0d579f8ec446fe720e5f1217e15a07344e1a353bc3df18f65a31a97298dbaf7e5da4b7a454876ab1b35b3bdb1876bd85c9f00d5e262bffda5080d475cedcb67ff63e99eddecc11f156733943136ff46677e4cde670397343f282b5936dcc5e179b3ea6e1daea828a2635ee987d5bb25495c532d26707ac5d637b0fadd5a0893aac827dc2509b624c56b3e3ae79af1dda2746daee18a904baeada6c86e221aae33ff44501675f1470665190d13d95c3426a60217586e3201b5ebbfa25f9927c421ec333ee3923380817c1426a0dcc0226825798850169cea69390e5be25b7f62de9244668f8852d030cb4062e5267049d846c0c07e193d0761c74827282e23ce78987130f5696047d4b5de91905e12498868de022b5068601cb805fc04177094a562f11eda95705fbac222ee2f9e041a9335c04f3e434d9b68b378eebb89f7d7a573c237f15f9cbdf83526b78513c2954c7fff47cd85e4d89ae7f7a3594cc0ed909377d7a6d47668e621ae81db2ee359c66e07681b3028d53d8a7b7bfd937f36ab3d3cce3b97964a952d8b15df609db1588c66a912254e757d9d26730d9ae8df670d74994997b4731cf66bb419f5e11104286f6e949f1a4983ee0fe942b4d32efc8d3e2d93cafc8798c3ce669f16c9ecd6453e2e26ab344d96f7beeed77e4cb55fee8fa779a4701bddec0f772d6861c89398c2be642f16431d135659065d3bfb62a5bc28736587a484b509defa9f374f466531f19325b58eeaa26555554a2f11089a1e01a898b486c646133aae8fa27a6e11b021dc7e0f5554d6a4c65d3f62b08e3b6a5cb5fd8e9a2eb8b50e8aa7365e000ec4a7b6a056bad55e4a2eb8b4ababe08035d5f7c42d7179dd0351493747dea129b60e2148a4be8fa2292ae4f95d0f5c517e8faa20bc650dc925ad184621235363b2bd1164603a80ac516d8d48848ac42f148d7ef195f84e211babe68442b1459a0eb8b5ac8502ca20c45227042318b2b146dbabe880546481f8c118a43bcf2ded9323458280a018050bc1280500c42462802a1138a5604108a463342b1ca4e2852c920cc606786007464040000b0570c18392e9c92dce9968aa270b26d362d8ac256db9ccb563a42aa2257f7c57befeafe0b7c1fe27b9ce8f3342cecb5b9f7fee3fe2b7d78e5a932fd49039bd43c912bfc22581887cb111c404d1da58f1c16c67de8637ef83e9c58ae76880fbe58ae74e85fedc82fbe09964c2aa0a47b25e05373854b1d3fde873ac2d24735bdffde877b1775740fda48a1b9a7bb4eb1a9941bd2ae44ce735aa10de43ca71db243dc5b27dc5799f350294f7e368731c6bafcd11fbe0db742ce438bbc08b1b9d6e6be9ab2f0b34ff8320f8aa44cbfd6df9536a8fb6a763bc0920ad926c1eefb322513fc61f8b9b4b410aa99bf9ab9726f876c6c553fb4443606becc87a0f5cb941e60b2237c2df31ed00f824fcd4a52e0025c9d715f83b8af32ee2b10f721a9c92a5699bfb8efbdd357998e9f53b9aaa6b0721f7ead4416c63de863822019924243ab0a7ee51e2c2b908571ef830babccc2b85b6aaa735f86eadcffca4ccbce823ed5c11dc01148c8bbbe7f7d1b3a6cccfb579994680f8d8dbfcc2b7460b9b2e5cadaf7f1bd2d77803e44b030fffa2bfaa6157dd3fb983e30057fc7f7e0d3075f16f695187c0bc341340eba4750519dfa3df8960b4b5a7ee46a4799bf925a180d4cdfc2e867b22b772c8cfec91d2701a90b0c2df5f7ea7f5fcd8ff4c82a662a34dd2b1d3ffef56576ddde7f6f6fe8f8f1ca157e1f67dee50ecf86857536fc23b10f6a615f03bef71ee3525b58f795d48235fcb285753f13446baa83df61f86dd27d7ffaee6f4b1ff67df8fb0cd56cd8a766fd1cf7f187e0fed8f17bc0ff962bfa542331a26d71450c4a70c4ec08d347468309b8e4e0a4872949a6cf094360f0a4ca9091103fccb8c1861ba81081030b82ccfa9674b2ba37e15caed0c77ce91104ff29cfe9b3f10df06e27bf07bf2693c99298dd3fe5f9de7bcff36e78aedd7d00962beff3d3fd81ddc7bd47eac816c66131ba87bfb8e7c02ab4ff499336f7d4c55901b9275720d0efdeb775211ec6efed6ccc9115a85e203ffe127c581b033f67ad7776a812fd6e04b05bd2e2ab3fd4c2287e9d55fa553b18632b3a809a3b16861fac46ca5a6467e35a64e3a72ecc39c601638ede5b1f735e8ed674459fe1b6648faf66cb3c3acfb82fbd34768faf66cbb741b8ba0d8f559a85598f51fbe72b664bf04ab3b1bac561f6f3773696cb1d0eb3df9536926805e8b2add9a80e1913f4496fdb5ec0c64687d9fcd6ddfff44a19dbbfd25857dab030fbee24cbdda6b14dec419fb4768e544a10186cfb393c76eda336951244cba626ae432e636ba0b6cffb3dbe9a12a0373bf7c25c1ab374068dd952c7197b0aabffda5566d32a26402fa173ae550cb637588cbff3382f09241d8ec0b67fc993d676acd39dadc50fac2d331295ca350811a0c12a21e9a03195d1b843633b218d690bf3121ce6244ca1e950a96c96cd90730f30b45f3fff59ab65802d593ceae7d1091df61837d6dba9edaaba7cfd15e6e7914bea75dcce96a9419f77741eff543967fa793276e5aeb5df63dc5846ce2173f9c092162d8ec06064081d02e9cb8f911f252c3210c9c18e816d5ad4a505db44822368d00869119d8b4d8bbce8e0440a4850da5d14110d1a444e905d6355855d90084756cea6455d9c246dc18271445d6babb54e29a5d75a3abbf7de7bbdaee3911a3f9b3b30ccb9562948c24a2340eedb99b0c553705c8a47ca765dad3924e1381c816dc98d6f0e20a6638fd12d6b7b8aaa680ce3d2658f5cd9be041e6f90c3264a75ea5f262a8d2762f6b9c1ae5fad68af8b156d372d4a52daa7a62535b1ed17ed9b16252db14feda9a8b671801c09084770beb6ef245d2140120bb44aab362d4a72b2436d8b22b4a94d471152b2509a32a5043ab0fc556d463814514e65a7822f8c42f52a1bded5b0429398977a8522e00a24d8bea3b84e0cc11539a1a5284968af02a36ba8e46cf655490b99221a110000002315002028140a064582b160308b543db20f14000d7a944e7258990a844992c320c818650c3200004200004344668868a300f42ecb87a1b9e974d49cb14ef8cb25854c2b1e57508e47dd4c88e0230ebfcc54b0452e73033b8bbd2f3763a75abc7fd260dc8948d17051c9d37dc0fc4bdba5ef28289662fbd4387ca714ac8397582c3c20dc8a0fb0d1ecc5c78045018f6bee9d4ee3a41e172faa0bf2b1500a2fb93a0e2fc9b4f75b6117d3f0ac954c01248613d7ae28094a51e66e7b5d621e0bdd986bf26a730d2856a6ee1f15ae6b67c002452a94ce7e0e6d349c34f391229fab2ffb22f97e7e726bb52055286c091b93759e6df74a3412bc0c0338bc42177be672763711ba5d08fae5a2323769dd03e9cf42edc4adc84ef22f897bf50a5891510bbf51542660ff7cfddd99b5b259dd6e5aad10e0ed4319dc6c93014e34df39c69c1fe806ae284d9903c358c974d72829ff7a56d4c35a9364da08d02e310482db60812c9edb0eec2133320156535c4c5ff57360cf66aa1362bb26185d4f3275b22747209c872f66b0adeeac9af2988e3864706bcc87bb32598e2199895c6268a5035123af15e0f37dbf4ed7d1c6bda1d1e744d9e569933745ab2d449ed86bf05f20e2166061a81c2dbe0359a4bd5e5db577ff83af49a9ffa7a7ffe6e972f21d40016dc3c5319106c0066a4939057e7060fa75f7d2fc0967cb54b84c36415b4b1fe2f02cf02781c68e146775d73b0ff6c9582ee420d56c6d8922d255b24a7ce52ec5794db65c81534b404d100ab9975848390379b912f05a0433e97f3affb83a8d5bfa18d7bdb40473d11ccde3ec7e269144674c88bacf6b023f757e148adc9a8fbb0e874f6b42cede454568556f2f332d573f0a549fb0a1264acb84cca6aa57cb982ca2696551cf8e17691aae79330cacc9448446c1c8528dcd59df0866d59d9303291c6fa6fe68ef012ac9593ed3c806bb44ad1fc76b5b2065bdfbd38cd7f95c889a0b5209a1d360506c53babf9212cd402d5e7c6ab987429b302db5e87e96b311eba3faad50b9ce12ff77e7bcbe95c05108c99019e0889c28bda09821dc681e1ba01d0a38f609a6710c61be0eaef5ea508887153fb799364032c23b12541665dc355a3c48b73d78023aba8a24ec357977a1977e85be1670ccacceea101e4db316423487bd075e7670fd63a126e56fb16be99109ede5021d0659525970cead93646615ae6f825c0b643a76b055e8745007805224a3d9854349e3a8fb1f51cf4d097a95a85754d665f8752f6ada92d21ff936f56f2a1fe7e09e47994bd2ec81ea31e6aa427aab648b91a0f79cd5438fe7c5b89a35449069b444d3c690c15c5333cb613e57ab77a91c9c553555d3b59f3cc25f8baf643031ead7be9f411b4adf0837efbaceecc00816497f77059338aa192974bcdc4122e0bdd9bbadbd5e649ff2721fa1d743e6de4fc8d440686663cd5916ae6ea27177bdf6545a949fa152f683294c6a8c10ebcd68629139c0ebd785fb0cf619aa5eefd27d6678f1e76fe677949bea46e54c4c8ad9183277cd646c0c51cbe328744a64c045735129211185530f90c87a1091f39efe26ba94549853409a0021918dfb9fb7542b6314bedcf8dd2a461d1d3dad62f5e660758c6db3e94d72a634936de20bda03ebac5d8dc91fd3ec3693339004270e8de11e4ac53356296421c8a7d6b94c7fbcb51138cb35e17630e5a5a503c0133dcad628312644582f240ffb20c07bdb4e9acb23698bc32c803b0dea2c8e102db48d894ce7f9afd28be02bc75b8095dd841d2e2de1aa7f04bc305e55339bfa5da1eb05b3d3d643235487a02992cc08b033464a0a1cf8d1dff9953dd506a0041714f463406564bb74413b4c974ff7ed7cc929e2293c743a5d018cd4604c87ba38e9887e5b01c06cf2a93e3ad038a209572f7779ad4f5935ca7ea0d8f109d1f58fff70ab36dfb6cb5bd247e3212a09581f7b1cd6ae680812e7a69b64f04959668660a11bca4141bb8ea03b445ae63128e72d5c539c82fd87558a00af117b90938ff6244b70d6f45e3892796ea2552f3ddf425ede4a3272a1157389808f31420901e5d569eb4cf7478880dfcffc05d1a032a948a6e8f4df92fe52e89bafb7188ac4678fc748251f70631b521dca5e15fbb77ec9a49f6a45e1f4869cdd7a13e7bbd8faf895a9359a7db259efa415ffc11a0027cbc61167569a7bebfb7e56aa619fecc53679c080e613f0b89f31d9eb4b8960f4f83f81d081d4ec66654f6ac1ee37804cd7e8fa63686d1d099abc22c1d55cabae0263070cd05255467885af2b87117387459986270c1b5900972b938df39fdfd0b0f91a0b934a1e9f2d230947cdb4117b2e933a1b7c30b99ab0231128ca09edc985d1f9cb34eb371adc67cd5022eb862ca0e70038d07fc409aad87376c7312eeb62f4521995cbf3700ed9d2b16bc3d92b412e450ed9eab28759de2cce6b579eece2de6ba5e45d69d32f8c5050644162129f08e7128bc746cc32d52d2e191756891b24c59e16f9ae99bfd8727cb1d30338b88bc336944739e9491d9c0c9476f2b78bd2e136ca5998448973d554e0e7c884210e8862ec1c40da8a288571bff95d8da44fd6bca7d015f277ce8383ebab225b25a785ff417cc6fe2dccd34a938b66b76a663105697184b460ae7bb1b1fb91c3b62c0e8526444563e62ce0826520819cf8b38927151ce6a06727352405ec896947ce23c05f17f07c94e3966f2bcf2e28349f3257766035ef9da53592ec5b4798f9de634961a24eb2ef388d1a6e2e37a9fea2c2538ff1fa3161ca8e92f59d66a067fa75714dc9139dfe97bc1b21ba723c6e76f8070668eed2107aa0c3315537854c84dd6939d8831ada60b9f0e2631c7d7771acb3a92106af0bf8d32bd1c12036dd0df719d5c0407b2dbcdadf5923048bddaaa256d788866fb1360fe43a2ad9fda2d266a24747db3d44a58d15f9bf0857000678e5ef9a83bc96e7c7b3f98c1630c17ddeefed50f72f1cd82f762e797d9d04dfffe73594df84695c3fbfbe4fdd8c6d4534b57e60f31f6e6b80e87cccad3b0037b51daf8740caacaeafbfeb8188d737f97760689c66f1bfdb8ed5d300a946be3203a3971383e8383ff43f70f35d8c08e9f9bb496bf09de3bbc7118bc7c5cbe241f3dc1b8cc0e8919cccd3b8f4c1b209b0e2362bba509839ea76c181d87a6c3c4c8c11fcc1538985b02414499f9215cf8a1ed095b8b41fdafbbffb5c2baadd8bedfef9dfef0959c3497d31c619338d8255b2491e2ec2c953873dcb9a2f786a88a98ca0c30cf43c77e73afec09e0d64b7f28133509b9c134453afcb4fb7b470c3879c98eeca15f8f61811fe3f3b8f23c39fe506b5228f1dbfa8a43dd92b100d833e8b06f7178e14eea936e2435535cc5df35986ae002c2e2a711b6a879eaa77fad177c79233008925a0bde42489d13584bcb5d5c86adf5c28581dcb360fa9298317bc2d9cb38a509a34f5c703175171ec84828ae37a5a81214b10b9cf61581faea70547f2cf2e02d66a1fdcbfeaf84ad7f7c07de76cbfe65bd7d772e26970a52d30e0d17260b14020d35874d5f0973d80da42b1f7c6671dcceb6e83067c35b5b14d99cc3381474f6d3bc14fd8a2a6c54317dacd1c07ec11e38936d9eb34b7a60dad74680bcc27d65a91157b497852380e999990b43c2031cf472ef6dbdb511e1b36bb4f5e8e346723d65f28131b96933e89c5140e2285cad26c66688b4f7ff3c2da71e2824db2f5cc8b97f6697876d4e2ffef7862bc2d7ece05678b97fd61ee178e59bb45e13c49c2fc09dc256f76c4de3dbee4e517c30e2b26ad56b3a687e34ab48cf57a6a2d202448ea117f24e4cb6fdfab0164d9acb3cf407086bbec7d204d68a90a264baee949e22d56f3eec1eeabba389572dd0a36512ef45b95cacfc09d0db1f9f1216667114bfe96fb5722570d0872c977dc4ef75c6b56d991cfcc3e81ca02400ffa475b152f77f73702288457be24fcd2bf65b6d992c5e73c306f6dac8bd651fb79807c796b506726fb751dc4007d817fb9cedb21bec8f8f7f65450f0ffcab366b7e0fe1587697390b1326a1215a9663089b0067f19103e3f0ba40aeec821df9684c5fe8e0acdb0c2baf3cbb0c9a465e3d055a7b404aaae4e4522838e5ad4fea71836ed36c2d52810056b490a45931d3347ae4f639e143a028fd94dca4399a1eda0640227d5b19e05a0d19cbb0df1f909a0212227c12831441ba48a57cce1a8c4c671eff4f68a77221db39f4c6f103ca8c0598829f5ebb47d6a81ed4f37e8b34b1192b6cc137b49e8494e416e1a1695f3845e63a4df96f2b75fe2ea87115f97863a2ff56e7d92b82ec6f3eb9ed08045bc86e240710a99a10981aa509c41d0657dc8bccd5259d04c6bc26f8a4cef2c1dc5b85d9c4a8e341a233519481af168f1a4e01201d317c01543587268ab2be00ce864b3c92c670bd3dc7f8ca87672fb3f6aa5df17404b8f643aa37cd5d3890642dae0d78728bf17a515d94d834bab2f96ea26e8b79121c2c78d4782f573828fdddff4ba255eac28dd49bd84fad0ea891b10aeb47492f7642bbb5648475ad29420e0422447afc1224878ac9af4cb75c93354e827ad9fb3e1c8b9ac3cfe90795a4497c566e5f0d34ebac129c2b8ae2dadc5ae9f866ef9eb1565f63a867e4cccb24ab41da16eea41bc45cf0d9c19da2281f178844782111216dcaf6b7d1c5fe28223ee8152689da00ee1bd0aa3bbf28317dcb72aa1df8f663eafca47ed4e0488b31562d371f367cbaa9f6f4b9e5a6146edd79c1ea38398aeadc3fa66110e63669bd9b334d5ef35acdcd7e5165ca1db226af0e3654ec1d81428540505ab4c65fb18e9678beb427086c33ef1b2aa32df3984942f47383c56e012a710a736f5607d95a8198d89b97d919d478863d3c6126c02ac4d8e864a1169189162801d006634686d459f8d4d6b3b618fac6653a9696d571fc4e75b4422ba5855e46b80d7434ff6cebc6bd11d160c746b8e18791323a7c856880308e0992213e38102a0952203153824d2043613cdf4015c11687613bde5d954f4a826e103590e872880ae9911e1a4c6ca74597d4ccafefd3478d2f6cc5961ed65c27de51a4da47174e9a2fc71e1dbc7a9ab8f60f2364500cfff739e8f4c047ba13513d868f2626ebcad658321b3776d907e99d62349d4c4b117729a6cb9002c30a5024a786c79b6c88b502bb5a259e120c807c3d2154a32e53514282834dac3c6deb8e69dc0768b180ec82a9b5c96064073d0ecb9a3052addf7844de64fefab55be6b122d594eb0cbfd39a6c5a09f78648e5f292de8fc92085f932495ce80249566f0211c0823ecef0d9dcc96582e951d492e7b1f98d789da57fed891707ffe36c61334aa34c6e256cc427e779c5898868ff002d5402e98833e63346c4d332130c9e23f4e4a49daec93ff618bf6f69a544040e94f38c01401312271cc67e7b4e54ad12775ea976266dda58ba286f5b2c45d8ee521ef9f25bf6dda022b29f1383dc5ae4cd41e78b97b4e9becd02b21af02821e11ed75353f2c01d6f24b928aa5081ea80c01bc8d81c4f779150e3a10fdbeb8154ea506e232f9b618b02729de273bb959a40829dbeae9f9cc1fd047854df1864e314c6fcc17309c728afe482fa72b6c8faf98e3bb5f48235413c1a6f10ac0aa0bf1738e4384e27b64790eba21aca6b9b63cf01faea1a6b49492ebb89bce373f08281bdefc6aa05d6ba68ec141e2e54bb149e64c72b1c2da9e0d76f7b965396da4e0702522cb7cc81777fef8aa37b7c5c122936db7a175921b01f103f038c5e87204d2e90d85cc95db21fb17a6a2b49241cf4a4af843f92c579979e2d08fefcd5b09312aaba3a43b162c5b115347dc900a9784506e796b82ff6f78bd122c31a4e24af44c075388274cbe65596bc7d7691d506acdcc14fbc94d7ca045958b031a856054a69a92860db889c6bc0ddbcb143dc977aeb323aca19a389078ae1e4a8b86c8b12f968d3f4ca2b7fd345c47734a106aa8726930511cf78ff1d370be28e24cb6a216230863edcce8f06d2e223642eb11fc97c53a11aedb555c449928adf04d3fa260f0ec72682a2f76aece386d7f912e4d8ed528e1c4e3e35cc9d10178a0bc709c6bdc18bce204cb74e9f921954524f79fb1428393241a92b0373e3284c76d206fa082d9f827d91ca01b78d435298e3f6c49d1115899c897c6b0ecd43c460594519856ca31426814a0cab8f8a119665bf77eb7fabd24e23de8f4c8a85db9483674466fb79eec222a8bea79fff0887434701f514e2efc3e55e4894cd6229d1c89bde7bf04b323486910e59564133911726a1cbce0406f18a3ced5f271c3c60c4261cb03999393b43cea10aebff7788fa8c36bfc76ea5117ec331b0a950d52b0f1eea65cd2a6dbf96881aaf17475a78b25947d7b06435f869935fbbe8475eb1de5f17b59d3d32a08c47628f643a7de3ef6956a35dc72b423ff117458a87fbd9a3d6fdd2b5160d34e2d8f09b418db1a945b198e14f6815a3cb5c92614885bfdbf42c318a040cfec809c5e415885f4fb90a454f7d17606b4500450594f9c0102140154f6ac4bd4b1362d04036cb83e5430d9f550cd73b24154ef37268b66bfaa1e5e3ecd596fd41ddc10c37fa0da587ac2e6323f374203f99ea67336182bb013401e2496edb8432d4023073ca2d6589669585ba3d534cff2e3bd3f3d230c18aed6d47ce688f858984169d28500eb982d2921709743ab2dfc6c8a1f57c4e62c03ca016fae126a4a249425dbc98554615e5b69a0e2d9421f4776ba8f89a248fd0a3145bbef236ca01b414babc83c5b15ff4a8b222d23157e1cc1944b1991d2959db456604909131b32807e5d0fc430d47c817c8edf632ff09870527b34ed0b15c1ce86943945f395453e6d80e4d74afa90b8c47a43e4c6ffe1fe890489f5d15cb712a6ea44b9878d2fa59a029def9441e20ec825ef423e70b6f238fb2cc54300b5a96a0b433cd54ba58bbceb4a1772023b670a9c68782786b9fc23e449f70031aaa5420eefb22c4c1475f1c8863ed8311e0bccb02699f1b1bf9c99766a30602ea012df2815e2826f259c991ec275138821721a197b02b9fe1c350de437b0110ae07afcf5e60859af19ab343236491e2745f6ebe70316a49e40c363a8ba601704d278fb728068a9e104a686e5f084be246b4f1d18108dd3d0b8194f387a5899047f8b12752edaa171d40ad025a0bd2d877589b649c2d0cf3ed7ae2bb0b2f76c01b1367649631b27ac3e24d2f8e0350ac9be85aa43afd1b295492c6b0465cb1a272ce4004b84f71401a09496bc0f4c298339052943eac4cf0a2d2c0e445017f440401d3435d6d2dcf055831b75c8210f3df8e15b610349cb9f70b50dc93bd4c7ead9a9e45169c7fb32080aa71b0124a95a508f53341a078caa6e72f45b0111830508264b26a7438a8a8008a6430394ecea7065f02135d72dccb20a3685fdba9ffa0941cf3a4dd9602ae81b0913de56916b0701f15f6e8a596cbb297a69af46c6a7ede9622b6bde8bea41e16b1ff26b49bf323c93d0833a79b1f80aa6174aca4d062214dabe79b66450ba7501bfd93f419d429743d64b289129511adb26b441368e1d5af71bd9e6e1bc4c36f7ee55ca8b720c1140b5927582d01f885edf2d56ace1b16292dcaee23895074a4e0ca604b85804030547d3d2206bb98c1636d15c539178f444b6ec72a9472a15dbd5d2bd9240103674a9bca03f15fb7d8b91f8971635c325508b7a0d5643228d9384b6eb218f7015a5011d7421d19207ed6b6939f1c2d56140a9ff3e630534c46fae3d5e86905bb1f708d0c3b77df2cb8a241a0ec4897dffec9e985e061a024b74a1c992ec92562ce41d88a1fcc47fd77a03f6957b43e4e3ed271f312c17b332fdea9d9035e33f94373a8957e86121f642e6f2ece31ca5e9bcefb4fda74bc9f155be5be897d0fc8764ae14a25b8e0a85b07e34325dcad4621a9d82102b2f1ca7d9a862954784fc369b01507686953b8e3666fea801efe0f45163852be9839978b7a163e75a1d8262af09aa90b6bee318e1ae7d421bd4762a0a53e3a509dd4b65bb1f8e6fc90c3b2205f03dae16b7596075166de6ea510ab0d81c784556323f95952d3d041e90dab91430504d82bbac5fa96cb933545a9c313e70c346b3c181b49746826bbb11805eecc94c25e957d4ee4e3e55052469e5487638d1ec5ac71de4fb622ad0a6dd8a8e09f1984d21233cd4eac791d68951576e079f6c67f223fd0cafb0eefd96d9409bd925d7901a6a781e6e87ec8332cefd3c68c2dd8d36a4176089fc41d7be836abf3a47c30000fe8088c987f095b0b9e707a51860e2c1d94148357414f4b5463515e28d3b648e58835a0f15a94e027154902b8e022f47cd09a2361106459a6b95a59f77dfb0796139b31c850343ba184f5409ca97a68ec69cb28ed6322251888b1ac6844e443415c9e1cf91ef404d8fab14138a6fe1437f459b5d26be5231b4c1aa41e063a3dad6cf45587707a5d882b579286f0e1d73c984b39248f181160033be3b4099c91e8e449ea926035473d0c4b93728b4b560e86a331d4dad3b7f455708ef5734e9a02ea140dd0bda3351f34bb192c624c7e4a707cb03ac1148a47e6cf15248170514583065ca4d9a40093165dbcc5895ec959e4dfc87861f41acad772ee393a3e61d48e2bf77ddc04ab7cf401241d883fbe7bc8f8d5228e8ed77375ed2984d2ff3f4b3d83df031d045a6c41d36470eeb0e363e2012480d1b18d0e2868d684fde2d1c131e32b86e4812d46fd98637ce40628d3a86024b35304a13e3901dde6943b83850438c3f1a36caa8fffe368c1fd5d3c6d8a3f9626a1fe0858cee37936117c83d7243af4c783988768d770eddee368c7b34cbe07e9cfc9e6aa8b75d87f77884392ef42b140fda01117a0ff2040797561f57e855dc725e47095b5f142bb4f97f56df9f3d0234e667d6e5503f1b8ec7a9fe52ed54c1c1f3f33b65ca2a1d649e9b1fe8b8b3a8403a5293d452c0cb1f9ed7a68ab5a543ff3fc1178f9d6947477f4b14edc994b35a5de02fa516a7ae33deaab8153d1f833d6d4f9f06d0b423386e86e13cbf96f2ed993fea6842e9fb09b546e0b75c79a11af937d49d45cc52fe94ad0b219348329a8bd826f0c1263ff79ac3ddf257d520e311d0a1785c59ffc37001ebdf11e45c2364866919b0efab0d121171b539ceef7f180c59168e216438da94063ea7e8bfe4aba85e9956811509ae4b0094f2fc5da5f865a11c6c7696ac849abe06bd85e77dd80dfe1f3060ea3d3344888ca6b93d67a1b3498ac58392d8ec133459dbcac5535fd7bda1ed3acd133c74f0ca834ba575264760c9d31b2d92439da1cbf9dee68b4f3ca0fcf9c86d0e8025346060bd1e9aa78c575afd1a921eea6a1ea447ffc399e0d1ab34e7def624a8fff28f85497d5c8027bb50e2b2f9a2438a16e2da23bf5278e4416c63f9e45d520d927fb32252bcbe712a1958f6f5e80c08bda11623364b9106e3df607a168f81cd247bc523da940b533403a6223d45ebcb9e0a0324bb43085ff64cba7865161b91c27308855e791352cc6dde4dedb44f915a4ddd996bc079c17f845975333d4386d816f1c8a9738de7df9dafe36d439043b9f19823968f7c76253586222e84fae6f2c94e9599f7f205e48c35b8f425248428721f65693a614e8698c652e4e38e747b5a49d26c8149b6ea4824c01d2b558d67a496bf38d5263cbe6a00c752f08e116276fc8ca555cdf3668a39a2eb488b080c4e4cdc38c9d5d73226a2b87074a20dd29f03423927f5ddba997c335db0525950e26ecca795d35f08543149bedfa8b36dba65adbd31a62f3f7c0c7cee999fc19116433aa4f6648d1c7714bed68bc9c55cc30a800b2e6079f0141eee5ef147612f6d8aef4dccdeb59205cd822446c2de3fc4b039cc0f20be88d566af6ffef9f2431cfc2249a1574006bffb4ae44c6c031f811d016c944e63290d148b05acdd9138de0d0236a49a3674ba29091bff30b49756ce021e2384385e6cb4ac510a17b0ed6bee858da8d22ae31a4a738884ec806522ee03b8015c24940bdddea4cfc1f65f2704f80b33eb5657f2a794d52ecd8385c0af8a0bebd758c7d6a7a65fea3bd799c7954c240616e01763f5eb36ec929a0f2def8aecdbf00a778a5b95997f94a67b63dc8611661ab13e8f8ec0bec3411e7cda53e9852f51c4b5d55169342ab6d2bf441c53510c892b9727622f42115f7f35d491c8bd58b34a4bd5f5b7ad175f3e2e98266f5f2fb60b2bb03c3fe55b47ffdc2174e6309935167f1ec482676ec8ca82309ef9522ce1355f0c890110885b5361b50f535a21d04410118461007bab64aec3871aacc940deb07840ac84943563d5c79b194bf2014fb1397122d7c35c15f2bb95a32bf37b30f90954b22a6ed9aefc2e0ccf08871514cc125931693df6fe94e3258ecf67b650d60bfb205b9b81de9e57e4d357a49709e3cea6647e3612fa627379605f2f171dfafa265a914aa956e9eb1fa33635a3f2cf9d5ab303e955b65dc5921f41d9fa5310f1f4ac1065e8ccf36cfe132220127070754fc983dc0ec9bf043b559d9f838234dfed3e50b122a181182211219460641256ced1a2d50f364efd2515bdf9f3b828ae1fa3b6eabe9ab5fcf2aa56507ff65aa00dd53a17fe03b7e386f6e77ffa567af0c06c83cae2d2edcf68ba74d43fa08fb62d70b711495305f16d6019ce15d39c256fd923bb79b8683e6a1cf02714e90ec535b9dea8221ee6427641c8557b72d137b632c7e85ed8627f121392ecfc9261cb4b5c7f4f95694bcb4cb6d2bc2f2aa01113f15ebbe2aba2e747ca348bac4593be949b75087a90885959d5a4f6e74ebbbf070e5de81e9b23beef76a95555b5f0538be21b4e81e73f8ccb46e2cb619506980a7dbd13cd7d0754d75d6828221785df1cca59fd50f57e9fd4207254d2ea32c92f3adc5d5ec98f4158bef63109b16b215d757efbd07aee6c79c319806d1d4d74a59e6bd0f4c37ad8efa6d37d281033cd783e44a549c4e05c77c9176b04cd051e3c1d69732f7b7850efbe6bbbca44adeeb576710aada370ac4268ac9f12365ac54817b0029e7e13d3f79b963939ef54c98219a0bb2c19ac3596e9c37b08ef6d05795ea51dd49510545dae19747f97d5bfbc04e518a48f5599772162403b7787ab4e4444787c322e411d96da24ebe9fe891b837a9a345233c5bd2b86d8d481fe6ca4d89cf08e87011b2c12eecef7ba703e85f05998b8f5200c95889bc17a1d01de0c1b204de1a3b44f11da17c5e3faf132cfea8070daaa7e28d5d955daa194cc013f3f82fcac53bdc338dc6f950119ff37c90dfa322d7aa4741d8f12f35cf4fa17892a2529c79eacab0cdc282113498f28861713d7f9d03555b59f3aa34d782886476f1310f1695498e8c78c30f0422c6398e06f38502798a9d33c348a5db79628a13331ab41b272cbe030569d38c53b62b32167cd11076bf4ac2324edfd32a27b13e59dc8005926676400dde5f0079a386df8942437cb805e4b6e2e499f07d7b02d0e2d653fbb5008213afb17b0855a31ca036e108f358168149b383dc91b6659cc906d2becec89272ee64f98e2829aaa3b3037f5924ed2c0d14baf4370470c9bfc0f0b77dc2ceeaf3271a2cc8b5a5c66f78f9a34f6d036e5a726a8b57316a3876682e3fb3304f683aca1d57caae6c4a2f5ba5a104681cf1840207ec8f99bfdb8968243bf45f22cc647cea248a642e3c601d6d5167795380421b6094d299e73aa6d794fe3abb93c9ee2b3966874037ccea3bb871f0c205f63381441cfc6fe67af4114c2700833d873b27106f3f8229b953d55a4596062aaadbd3884ebbff2b0b045c6d4d989ffe06f9b5ef6fc932580ed52beb2f6b432cecfbee693dcf777e5ff725b264ccc0dc1b3c67fd4a080e60a5c81bbfff7f32f7ebf31eec90281a855e68e64deef8116bfeb294a13b86a8207fe2ea85985e12fa41c52e5561e65b0996c058fe47bbe94169c445a4f35f71d92021421aebc5d92ea7890e8b9307c7eb86e3ab5929f65c6b675bddde97fc5b15138a69a3d064ca0166970e6dbbcb5733e0b334feadc662bde3ebfaa2b6861735616f9b4185ddacc7907646e883a055732440c6ca21c1473f04b90961c0714f6623084ed74f0885b1cf532a1a35d9d8b9f0220477627ee33bc3ab9af869db2cad4c31cfe0c058cc2753720f0f6f631e3d436e7c4b21deab3a403bc6ed8b1e77f23aae6336df711c076a1f2810b91056223af0d2e5e4edd5e4999fa4f0c1e16a6dcc8f89e7ba1610ca8c21553522aae51802e20a6f52d891d7ab7aa98ec822b5040ce32131f10c9ec540c9c8c42204e5ee0e11b8c00af82de166228c24fffd37506d2d1ed8a35441cb8173a396a8f91abc14d170e1845cffd80ef8ce60f8a1fa4558ab5254086b268e7c3be822fde35f7548f9e7220d5318e70460ee5aef03512626925a8e2427c4cc102e4e53da08082e48d05d2429e0f3c30a3ea971652d473189f3acec89cc554aecc6808f2ddefe88a478f005b9f480899404fc5b937c608c4bd718ea434d3d960423ec1b8b8afbb8f0bf3e1ba18045de1a66a28b78621ea6eaba848c1396d385345653dd2eac0a4152f3a915e2366f4a1bb3680ea823c4d60d38327d421a58b4ed39e5724939faff87176004688012038074c774110e927cf000a6d81ec73afe5a3a23261754a93b3801577e6b658856ca5c2a81d4a35093af7231c4656af68d2928d931fe5017ca541f0c40441c1cc6f70434490b40bf973d9bb9a08f836c5a74784ef162b0c7e8cfd3a07b8fb550a3d1907c64e181b481b4efa007435c17b94aa01a0c4919d259f1873df03988c4fe8b2d1bfeae78efcbe1d4fa215e654b69204cdda5ec651fbb296e752ccdb44428d3898a781baa5a415a371192ea4909969afb0fa45239c37f44e3227f125a5b8303792e8be85e0630e71fa212976dc55efb76d9a59bd64bb3fa5951d6831c9a3f74f754d4765acde01d92d4396514b48ea8a85fb76a49df720351200735002f312c0002234cd505621fa5bc31bad7201ec6135b4ac8e0512420df93173efeeba503f618c8e7113f66a5af5242994b97717e823bf1347716593d5230a71d47b77eb189c76dc7b187087e59e9966d951a71016daa90d2d9ff9d6ccc51aca49b63f0fd06c1e94dd0098b85094791090a26667efa16b1b1a3721676102c8c5aaa756df44c4a87e0626a6a891b68b372c4520337ab254e8974463d9a0554f6883db8614c0847c0baea0efa30622f619eae59381153f2e499f1f4f5a214f57354b969b9c05b3d49c6e3330aea6cc72f20e41c61fa864754459f7480e10901202c935ff37f0dfdf0bbeb71df88190308083aea100ec3b60bb99d0dc3828c8391d77a65db87ad75c4757617ab5cccb28a5dd3aa80f4bfafae5805a0dc63990ca94961c347ae032a81a154204de0c83cc6fbac04217084f507d4cbbf14a6cbac620ce10ffbe456b1cfb5e0bcf67b147a7ccae4ffe3ede4a32545b27db68c97586b6f2b1d11792be9bf0d0e8a65c571d460dd549de588abc25e71bf2f6141efe1fd50236d56d42a41c9fd229291144f74a9fbf24c16cf23c2311d3cfd410a53fd57b742df24e65c01bc72957ab149981e3e0bfacba7bb875cfc3379e63c57d411f2c6e2d7ae61c97a2ce1ebd1d9d041a1621c0b1152def364baae72d2923c6f26ac4c22fd5f7b206b1f58dc195d59183c3c843e537766dd9a62c13b2b44be0fa0053cc95f9759ae797394148d6d87172b4f76556fe6be66ac64069a1d499dc92356a51f4b650726206a57dd00d7e1184064c73214fbe5b4509916259601fad91ea959c167ed20374c5731854ce8cb293618a51f39dab70e8293b4828689b13251b2913a618918956f49c92429df20cd45a59dad62b2c5f1423adf5335b90df970e69c29ad1ffe4276ad69697e242426c2956bb22dee7eae8c5aa2a7fe852429721ded58b6d9f2ddec2346c8f04788e12e4c89e5558de99b9f4d2b5b31bc9e45a3d0f7b0e044a1128bfe5a150e8a49c1f79a5010a29abf48abe13d1bfd8f4629a1db8501f2bf8d351f96fe67146e901fd786529634ee5f4cc712edd20e971111129d7fa8b066a14f813f621abb90d01eb007a1e3de6d6b2f097bc2ec554849009080d0a11711e9a712b5fd361185d8d0ffca08d704207fa1fea7df232300813144edee7e87fa77dfd33f76daf1380ec4dbca043826bb8ef0fcbc80758697179299fb811c77fe0db709617e08e83686ed3db0d1d2ff2d88bc9ed05f55b94db27222cf3987f3ff39d55417a9d397fe75f4e4df4876307d3e47eba009a017923c55fe44665904f194cdafc29f537933cca00f6413b4b8474af78dcc30086a92bbaae17a1105842b10b9535b9e1f170305462999b4d8dd7da9a4d4df4d4ef18ddef8d46a36f10eaddaab53b3be2b1825c05cf05522e6e841684a557b17c89e4466d46a279d2abf6fe7a1df5f3e34e83fdde25382329ded976685d99a974561401a528f9c2df0b66988e73ed1fd8d5ce56122d528105a15078c342ce0d42dc499d7df4978d28210ee86c0ff28288611598bb3aa4989a618201347969f46900190ea853fdd6a42cd49ddc8fa507ccd3c0475f6451a7f2fa639289b84f4b5886e07c3551c9d583003f2a81fdfbdb674132fa5c05145823280f7c2d2122a115f9956ac2d8097870460a333f94936a76edefae89a2889728a55c6f07c62ef20076db231a0fcabf8c46116b0ed5f7a440e353bc3e23f5f6f1c6efdfafd22da2e72a32cfdb1245cb49820e5b79be69c786a0d0605fe9ca082268a6c7a6cbcb55ee1910105be4cebbd23b577fbe031a87ac02d9c5ca2ab046a5a772c275021324bde0bddec254f58644bd06c059f99f65baa93825fc8b26373fcd459c162f2888ebcea1718b1ab3e8788a1b53d4388b8c59e418c58d57dcb8458d59743cc5f598ac08cc27d8553be60ebb3a32be62e315754c91e317335691711539a688638b195fb1f18a3aa6c8f18bb13169913b51067d2eac71fde14abd98829297f11d826a6a7ca37637ba86cbc6604c518f38f35124e203ef0fc95635b0092ef767fb1367efd4680630d9346c57504241a6ca70cc55beae8ab81ffc96bf5eb9a56262931399826650f106dcb968ddd07dc558684bd01fa4360145a25061b3c364a40e30a4e5e93a0f8f407f8f3ab99be1c2e5a3a18704c01a6d2a11b2fab962677e9395db2274e61e79ab4c8624d48e5552dea524846a3c7208a2ec297db21dd1e7b7131c9841d59c556e5bd7a092fc3701165a9b3ec7c9ac89a8d23a019d8a9a4a6d82dbed05d04be3516ac0ed9819bfa7c022899615955fa0edbacdddd7f8000c7617c19254d7d0c1c304b42a262798b4e4d80ffca960796003811be556beda4050ea16952f62297aac401aa446650ed04a54cd790a46105b742d9034df09d175e3be2ecb7ada87e9232a813b55594f3df46fc770d758d2378e47eb279dd8895ab07068dc832dc9ccb78a52534a57118f3fabdd9c62702d153807e84d8fad3969a5bfe1069a2237c2c34129e3e4a455b73c39e6ebc10022b3263aef34d3bf2efe9096620ab7323cdc11875e4458becb72b0f8d2ba019b12a174bfa86eadeb6e47bd2edbe1ed0b31e796a355e93db20db984a78abcc7c2880b6209848ea2e0fae726870d96285cdb6ef06dc7246f50273804f6ba55b525f8014a3ad88fc35b1d088aacb7da4bb658af2882786be26c1ac2d62d659a634c3b5c665125bbf578568d2fa0136810ef572c46ca75430aec9ba7d781e0831690cd344a704d74466cd4120a526497cf20a33bf72162eb7091cce717662606449b119253ac5456433017dd9ef0339af4a38a5f3dd2fbb5bf4567002c5931952246ea3c7180a9e4eabc82906161a5b358e95c04e2a25fefbf95021faeb4501e68aa07e2b467f30c75556a2b3b7b6d63c716ba6cfa4a920464e844cfd8e48b89846816df35119ba4cd51a8335fca0094146423b8f17d423dcd036126ce920ce78e5617a952008dbe05a6bb2ec1626d43025eddf9475d533814f7a7929232f901e58b7db0ba496884676d815ad3c63038f2fd21453837fe106dbec07a25e6622110a76edca8961bcce147d2d6997a34435ef86835514120619a4169762a0a6bac002d65c34d18257933e6d52897fb5688a21fc1b648b4e11d18230829cd16bfa69ea89e52685daa04c813d6e264a2ded3119d2432ab65e3c3b6b34384017e5d6f860cd93cfcdd954bc33b5685d269c0fde8e1382a07976c7be54fe62250a306badde7ce230b5be9fce9ae70df472fdf5539a93c7337793f3f4e365234687a3bfe36f88abb14a7fc5b6144b6b6f11b675a2ceba4c1a63ecdfa65abf36083123858a63c9f98113f8108eee47000203d8d1ef19f3f61acbcdb0347fdba9fb362291ebf0cadba9a13e5893720a018b432472e572a5052a3f606e4db1ee72b05b2c2c5216161f4d87f4ce73826f207a65267a2541275e126d3bb259fff86c64bb3740cdfdeffd2f729a4b91b9b70e082595721f8ffe25a690f52ba395f75d6aae67ce6c2a1632125634ee4fd524a08a7401817fad648692af9c50d963d82268c88683898023131efb34c8c17854c6bdc350d6eb98f538804075bd0ee98b953e65a618052acb813e654594c974e80cf55d23e95428e4a9243b9669a4f798ff13f9562e175a3c84af90b213ecff8160eba1679473d536738763cdcfe3cf53ce8f3df6b7514a6924566a0d2876309bb081e0ae26d077fdeaeb4af428e45995115eeaf274a9d123134a570c503d447cd65588f8aa87d525a063310d7023adc0a48404ac90725799034f1bb9cae6ba562b9597455f85f8ead060ef9636522b959afec2e5bb49468a63abd5fe7a786b61d9294edcfad97b4472d0c91e81086920af6f850e7aad959783cfca847d6234a04c9fbc50f3a038e64f759ed08294a7766cf11a7292310a567a995fea7641299892c464c2118ebf85c8cef3ad76845ac3cc048f06d3b4dca633212969732ab39a9062842184390bb51dc42717ac2e125eadbd0694407dd2cf01c37c10014c8fb78d15224994fc7170c49bee461c5db39cf051594297e7712f41b3febce7bef60c461b004e1e177278b10537471ddd6244b1477a1aa7d6d98fa822508da576f7b29465220a5b8d90f4a9e59aef28ee1ea505d2adc806e62c9384de4002c8992f024727347e48570e96599c4401525c02f8c55da93dbd2fee6f7f54e4feef5f709f600cce393449a98f7da866e3b23bb51f0f0b0625141713c45f13c003545ac7a9cadc6e85ac3e3d18700eafcf97c76851030661f47a8618a74a877ebc854b128f5b6de6b5ebb359f1aac18706701acb61d67f74b25d73fae0ff8f0cdd69c8cc0038b25f4154881a91bc644581321fa8adb1fb6b1e0aa85fd95a0c4a320763798ae9e62e0523c5fb8f506ef4389f7ad79f2ff15ca8d999b40d572fa1464a2127852bf7c1c3d11acf0d9002a8140f3434a5fba963772f22da8f992059b37a8ab8f0ab76bd88fcfcaa892954d23e74274f5c4c3135f3c2b8bb5e30f43f34439b8096af1d1920e75bc031f0de651a779d134eb2d1ace50a24685cfa7be9a3ae914530d361972b4739c67c91a818d4fa6dd05eccf5407a1ce98929fd7356c5266d2d799d9b76589c04ca6657558fd17d4e67f09121a86005a795ddd54afb0ac4790d1e4c7e2d78d80327e7f75ec347a2774beab834093584d703c395ec109fe5c89213a7a98abd679a4284bd2e357dc94c71105290a1cbd9703da114853dd5f9a06ce53e264bddf437895b12c72bcb92c58289b3818a634462d0b60a71fa34e755607c36cdf095791c76ef828913d02bd90a856e88e3de694e2530c28dd049d34908ac371245a0472eec29087edc958b2b62bc6642021a893dbb936ead8ebad202915167877c42939fbfeeb506c10494544d47150ed8cc8d6a2f98a5946d7995446182e45e5ba6cfee642bc1b353fa591d4c9fb52d337473d55bec487854c6db1a0b2a15c607f585ae480cee1e227b18a607cc89814a73ae09bfbd5111ebd026fc82e20810aea60c8c49bc44ff33b3fa374b6d180075219a4981a8cd50aa7159dd4cf1cb86cd3dd4596e22605bf03806e199800a9d9a218d40b376951c7660d5be062fb49420e9f4d8407e2fd02b92507dfafa92b372c8db35f846f70089eb9b84d9305bf67dd7ddf729dc2c373b1c8fc1c9f12ee2e4f1cdc3463c12c9c91951fb104e3b302c2efe3a3915b19bf7e9d5cc1f96cc20102e1e433643889f85725ee8578c261b28eeb00a5408a49692993aad20e557a98129e6822943862bc8ff4c207b9803e8be2aad8a3db4c9b5651cb63e0ed0d70cb5ba9076f290c3c4b5b6834cec5f35804532bc0fcbdea1b049fc369df50c8382d62c729aa65081d26aa861d576a122301af97bc02c852b18708e84f8b22fbf00203804e4e765849e366bf649b0305da80277fa0a36457d8215eb53210e23e6454c2e73085edc433df041460b106cc645a03fe97686d87f37761e784f71b3e0e812525e6580823e7487e0375886f368ae8b62b2085ab353fedb0a99e6d971449fb9a94007d58a794482a8c9469db9e50158383d37bfc1314d68a2d293436f58adefd8e98f43c317105d594928197f31a8186d98e77c4c80aa261a5b814c7f88822a4fcef4a10d20fcaa54779de2ff9d601111bb671b057c422fcec089b2537c5ab5555dd6dc7d02cd3b4a26135449519c0d73e80c4e96fd922073824046027e2446ecb06d582cf4127c178434d498ee08d87e8315257f68f9953d3b6b1755cd011daeda32fa09f5f3f2f3f08ab7a2c4ff023ccfbad54598ba015cd2283822f2da66fdfee73808c3923a42172da90750603033924d1fbc9e0711a59e5f4e9a3cb192dbb390fa28989b6b6fd7e7cfb168860a462d742cd5ed2f24124a7b98ec350f1f7fec708d12f09bf12083f4295834713a34b1c893c7df1601222f9c2ffd96bacf1ea1a2022513e7f5418d96b85ec8a3081921f9b2080070f89f9c2a92651f4f095ea2ae1c5111dbbf46dad61b6ca89277b14e4ac0eac068168e8e0691f4fc702fd8ffe0e3fa2b86bd876adc4ae15182fa069bbc73dc763df8e4027aae8462c1f560a8d6a0e5910224c555e57373fa36956c7a82f09aa777bbf60d35107bcccabc6446460a2908762c2487de332191958621cb2226689c5f41078aa2a603cf189f392786b873022677084c40ca48da4cd3250436fa7e5d24aae2ea8278e201d12b379ba4ac6abb1594b3d039fbaadf4f1b10c08d7cbde89e5aaacd3601eb711961944459482835c90415263b4750c86afe7ac1bed3004bf082cb99478ba0914f2808ea42fa46e27f7b4b9d08f9b930b4801fd57aa2c251902bb273be286536ad028a270e384e36bfe4c8f806c019212caae14f0c195f0a7267cb6b76cb420d775bd21beef6c873eb830ab342c4bcfb4abac108ac4c65e7c2f86fc388cd0f7203bbce450daae73b8354450725d76e74840cf9c61b24799ca8493c969e95fce987c5165bf036f4b9748973a86a948fa8833af699f999eec15d58b6a552828fac0449c8020cf0e521174e423936d1843fe2123f65b999b16601557314ff805e0c8885dbee7a4b57f807f16867a3d3e379a7c6e3422e1adae831868c155c17a5e54bd2ef9fcfd5129a4dc00281e477db15361f28bbb5965fb7163d124b093053d597eb2802669353d03b4dee4001d0f7ece20d22d3820020cfc6c45f2d9add8702e4cbba0681d6ae56d51d70c007207bf490d81182b347e3c6a773ce0b3bd00565762ff5212b0c10ba85297da7b2edfc656baaafbe642bc401079ebf0668c93d7bc99f75ffac638250dfbd84a34e0cbb8f6b7e33627d1ca97a88e3285f36370f73078a669a17e5ea59099e730863db927e643ee9d50da191b37e50adcb89e98d565f8a1bc2ea765df33ab8b688ea8e2670e81b39e334a7c192e8afb13bf1f7abcf34127717a49aaae783c1559aabbbc0eeaf789fa6248a449f97d19477cf15713dbd2002b17122996c7f256c8e5b10e8c390eab5e6317f385df9f8d769862b57fbe86881a04f262991e55dea74f816c4389f8ef24dddee919590e75e44e149d72639270ff1a276150b132c64ed1caad01e01234a3a7b9c58255ff9eab3861576f3ae74717c0260046a64ad9ef4e7494b7422198ff717111633f17ff092c31cf334e0af277d652127de3ad79c0a1b09788479e8f67242cb668c5d307a4a575b4f5ea8e86cbab4aa6fd792e12c8ddb8867e99437c5adb412af042db92a25a5db1fb5582f09d09e9d7adbe9d32549508c7ea34187cced877d09ed633a1395e349d7d8d999c5c36ede274786e0e84bbeddb29517e0c967551b84bcfc124ee345c4864599453f8681559157965b416cb4520c2b7e61cf93d2095386d2ad4356c41ee6348d7e23510c7d1dde8368d934dd2e8bb888b88cc43477399b089a5bd9ee6d6a6ac449d925cd8cdbd3fbbb8b30a70d764d6517b336b6c7ddea2d643c46e996f9cfd0e433b9f9a34fa41c20f30b284b6369abf7ba4722c7b908f62618c98f2052707540c4356181f88c2a2ad02f4c66b163fa42c97e5a1e983561c3985a3828c4dea85428184714c4a8f5aaa1a91f44a2ce314e9553e52075c0a5dc5348a19a1485a46b0af5cfba655a0e7ea27d0cf7d07a66a2db2a89679ff482bbc94592536200002a4d18d31ac667df28e13dd3a9680cd493ea30d77192121029bce89da990d004b8ef81bc61b283dd6371cd38fd3140696ec8256802d0ab284a319cad6967090a960ff2428be912d4c23bf7479c48960f86f8654681034f6ba2694388144ddcdde00ea704f6f3deac6a4362fa00a95a050d0f28348879913a0d48d885651c5c09692a9a43f73c0dca4c6c76b341ffd047007c73348c289cf393701e6526658c2642e0c4b6014ee66262973001eeef831d1edcc1f5c9d1b953cfd9f3195a45b907c1211574b33764eb80396ee212f953b5c90b47bb34d06b4d7ce65950870afdd362d9cc09de8cbedbabd76e370aa23169b2455a692c21bb298c1f79194a62bf514cb841495c7e439a06e18046c621ade218c1d31210c5a8f3af59cc2ed54aeb5b4b563ba66b6e7f6f1d4129b77664d22b1947229d14747ed9d336ba29abea335449259f569a75a554e0fe28532450f913fe68b706372e51a32339d36f72c8629de009ae80dfa9f921c3c8430ab2a774e2fa15ccae230ec0327bc0002bb2f270b0f5ecf9808958b00edd98c4a8bd88216f1514a60a8818d3810dc3db5f0044cacaa9d32c9b08e397fc65f562ce61c466c072aed5d1e5446afaf50284b2f899f8fa035cc8cbf4771d2ab91ef5a84f24986b6630e6e2248eb1fe20cb5e2acda7398b145e8a9e5e15db587fbbdf2c024389d8dfbcc72cd35be6aba2e4780e63c8e9439a0d1b4a499008fb849f336fce82a0488212e7b6b67c3af892354e7a46b3b937aa57990c1864f457136e4c3739b36823f18e1c014bcfb567c175e0f7a26d4940eed313ba741fd6f80233bee5b96f413834638ef4f09e79b8a15e3066793029707a6337ab69506dcb0fe7cdc241841aae8a398bd6f43a4e3eb39c61da1b3be63c2d77d008d2516eebfcd252eae3a3ba2c29ff1728b3dc54a9310c90856cb73bf9a4c92dd0fc2e7d0f9b5ef7acc718692c8ba5ff0cc6fc9660d33ba944bd05a57bb511b5e140302442f002e561ea16b2a169189d6aa188d54c7f1301c37c0731f6074a433025ba67793fd93ba82b82b95fb377d7e30681275045f8201ee343f048808248fbb5bde8c1d7afef0cbc5704d2d43a9e8be008f07b1139537dd0aba3e5da2aed81a9819194a8bc0875504c7d650d54e8b3ebc7acb8ac314aa2cfbb3624a71b651a3d6e9c67957dbc45fd691d35f81d96d5b2b89705fc6b325f12eee25e089e24cf246a8e6225618e05b5d1580f04b01f2e8358ae13d3317da2f5e9b8e8c01d26cadf7aad2503258d55e3b4dd312ae880b0418e425357d3217c88dfd35892e468fe37be1099b8fd7e98ef33726fe2a1bbbf259a0a703a2ec52b88f01391f3eb0da00c97ecc5b83b32a2b8211b41a113057a668a8d828a925502f5bf265ef06704ffaeb28043933e9454dd91e36a93226d997c823bec574211312d2cfac366aa0b1557f8a09d6e1d0ed894d5fda054b6e02b8581da38f9da967554bb1da7ca455bcb109768bf17c38cd4d104dbabd22498ff3e28e9a63e11e9cf165e56b4728a5bed01db25f1e6d467372e3881ba30567553d3d94dafa9271bc087d37441855f816a9679cab94375f703a773786f2344131d725162c84737123568957bf494911c77dcce2c024e550cde2b0cc9e64dc6d452f069cb03912f3ee62855e01a0bcf41bcbdae382a19d4f45a5ed2e32beda719fab0389d0ed7d0399f881933a903944aec24a387891f47038ca7f6d2d9ffcfbc68fa49042034424f64a81dd1b6b6d11468573e669312055938bb259dbe9516bb4391f4ec5842218aaba7af02159dab125849d6081b1664635a26cb858847002a7513e1ae9e111673092834bcbfd1d0fc025a58886e46dd7ef6816994f02de4cba6d8965a0516516f1b7fc84aac7af8f38a6ef5ada27b8c92181ac5014c71c75bbbead6056c5a5b550b70a6fcc87c7732f4126e35c619ab1925165506b3dd677ba7ba34126876b19e8816a79d5c1dada7dc8ecff50d9fc8f1e01710ca81b8f5764532221f9549326a9aec9d865839643cac108338efa853f4f36beb6e5c776ce640b7710e9e862123d023eba5f18bc2b4c7a93731b2df3a5e94a818846cd1f8110be924288bbfc885d0a9d759fb71bed4c1b22b6cddacb23c5417eb55a9017c36c7c5bb40b535ae5a77e12e8e0509bfe7d5f4c1cad6ab3ccdb61f70584a61e3499f2343df66e34dcb747446136a0db262ef39d2ccebccc09520bfebda35beeb0bb0f6b1e48f37ba59053e310a277912587e50b89f7aab7ea780ae97d65cd69f884e49daa9a631c85f82ea2cb6d7c42f01dd2cc713e42f45ed2e5b63c21f96eade4d87884e8fd64c971e50b19bfdf56857210ad8e5f6b03f61f301c9822a2797bcf1196e54aa8d7aa09f4b326af121fab097ca7f5af4670d747b5d3c4bd8a54046abf9536dee8410cb193ad0e03b7d79fee8a48ed2e18649b03bab6ce2dbd6195db119ecf8aed3decfa746c6e1287bbea0bc4ab8537a838af4bf3f9bc6450487578bd8801fbb7192083f28b767b09ab53812f03cb19c9f7b77a128d03abad126b30d0024324ed58f698b8d2be2c052d98e1c75d9088f662b5787773155df7892b923a13a2db6ab821567371fa176f6d0df790bc240cbf5d36d9e1aa49d101d24f5e550527bb929898a807ca29ec16dd87f47bfa9a0d4df48485c1fb95f24540331ab20169349506151e0ec92f25cb92211ab3b6e2b5a7580711d442b2ec9eee4bf6ca3dc6a92616c01ef65d24f1e4b81da207e6f4a4e98f21d7755378d0b2e261f6f502501fecb4c866418a0f6a93404a16453ba537bfe6830576a6898d55d4c0868f5a0c7874086977613e72007a15bdf50770ae1301a27a65bda999f5ed16a8d484903f7677dbf458347e61b672a808c6a319c7dd415e979e6e7c3351de50cffc4aeb9aaad5d0a455214d221f297b8a077138a13b41472b5ad7d9c1b4d2adb76a7de626e5d3c1b712fb62ba29e12ef39ec7786dae7607c7b82201233c85ce50bf722bc81f998a7edff69c4a6cbfd62ca9d03e3f53258a39a3f9900e1abcdc0d2392b4e4142703e265ef72d34ddb16f0dceae0bbc9380e0bcc505efa8cd5f5701e02da7b81454fcd7f2bd6b4b5839a460e66c49db5c73c89df802485a1c6f6008855e216c4b4a00107fcc81631642bd1eca268a011b691bc93779fa9f43f55b03c8952cb0a5bd3e8e8e32dec86570810eb31489355c977e4bc9e53e5f3f3408c0b8b0fe2dd5034aed83eaa51dc665c7a4fc2180b0e01972aa928e0e9927cb1a109cbba9a0211cd32e4390e2dc6f2e10034a951c3e0bb33ba6ba57b9ddf61326bbf4d24eccc6a571052900dbb3422972bee15dc2a44b1b3c45097dc770226661f467190dc9a71c378d82f57c80c35a528b3979bc65cd64ddeefcff48774e2d5651c0badac0524ec1b84ed8eac4aeb9a604516e448baf05745420bccccb273383722993e7b42c00bc017da0550346cdbe0432a827030b5335a1cb442bdc5fc3b5b01fed597e648e3e988b771dee1c774d24f7168b4ff4872bf2764171b16a27237aa7f2bb870d91b7639d2e61af5557775f1065b4700e7a5c79d8ed312bd88b2a23d148b9eb0dd707b8cff67cc9656a635145ef8ef7acc4aa93c5a9ba9ebee48ab1330f310ca637e135f28f2abbebb6800b87694e03f9f63e800dddc78996d06747a931cc4cdce706ec046b54a64009318ee425a0e6b8b71cfb046477e0553ceb52e72f4f936a3ea436b4db0a220f040dee61b7d1d9a33ce2a9654c2f9d1390fda6d8c02f6948bdaeba60e7270bde1a0033920c93d50bbf6c1846651076256dd796950a9b00443c84feaa3acb03a22953134f59f9d552dab1cff2760430371f461ed801553768ed4d843cefd31e8d134d264846673211958af649e6b2b4ce90e28bffc9e9e638430cc302fdc0d75b28773c724a69a0b2da3c1033bfb5b1a05ae58483b198e250ce5b38376cbeba4c5e2babfae18c2fe7ee658429b4363a3c423bc287eb269830294c45c948100e8037e2ba0541f56ffd6eb9281ae71e4ff2533efb4518987556904bc51659c8ae838a4c35b6aa3a929793e5955b68880f510ab2815f44b5856c3a4765e131de5f28091afcf7e2f5b530df58691f58b64e95bf1f126bfc23116758ea6ac3cff8c47ea383a5221c8444a341cc961649f79581e1325cf5621a56376584115a2529caa0b6ce1d8065b47105185ce7a0865f9a592bf2e19d8fe68ee7a237698522690fe60e22a052e6d8d3d8a7e6d197c3ffd345befd237f80bd283d8ec1c0e4eff25b5df7a42833827c397d547958b4631b6dae45751f5e0bdf61cae4e837d5e4285c279fe5cd7a90e67831cd1b370cd1ef2aa9e5630bab20a14ccfd09478f31ffa0a7d9639467f20b3eb43dde55d06666001b9b0dc965a9a5181e43e47f8fd70a9dfa0925a588970ab40098e0e7bb0780a131152298236eef86c1e0bc75c4287d820d10041bb48b4b6ab81d83a184816fccd73241e41dd795e9855d85fc364e73aa624c33d74bfa23c00b8e12bd53eaae38fe73c2f3445780a5e4c4c49e04abf68b728e00e7445ebbb39f9b4b5760f30580adbc571b59ffd2b3e8261563c2d7c361a33842ee631108bcf51b2a0eeb13bab23a47589238e1ca50602442e5a6adfa86b0f9406932ba481743f0144769fa23d3e7b6a2f87e3c53ed89227957e700ab3bcd2f9ebb4768198ac86d5a2bb9e4bfdc51527e0fca9c7617d6899394b9f523b706ed783470b46d78403c2a2a2301615f62ee8127fa5e8612e5b5e8f20456346fd397aec0a2d0698b37a0f5ed66458375cad7051ff286c8bca0e76949af061298c5567421be9f72ec6343027e5fb5d9487304feea495247d6a765614f927decf396ceef6fa65ae14f387ac35b3b76202aeed64d740cbad1432a7c8e09af3642ee74a3f3db715aee3eaeed5a45d32faa0e5ba0190da1a6d010da7582e6bf348ad292bc91446b4d35d4b5a6be5b7961655250ec20c819c04189aa6f6a1484e2772a1544b379ba70447425fcc4f3aed8c9ccad82d2c8c9e5f0e64310cb27641af09948db08d6b896d3afa3f4d696ef240e11d6b5307bc939ee6def516b2039709d34e6a233df09517da7e90f6ce26a7dea7a965aa822a3ae3f617a0cb10e85401604a95c94fb3dcc939ad32b9b337a1e4807aa95cd88da76e4a2a978bac2b8eee4e8149c18438b0c9786af1bb7b845d09678781391b70aa220507e904871ff809c1a1597a05f3d9c461c815409cc914618e4bfa18a1c378f6491848f8b79d1415294e5d6a7b851b1da32db08ca543c6069112b79c3e7397c6a9a2ec06ff42c4c169502aa3d2b20c490cad59609001f737a34519e878160577e37da0090d9285ced4d1931d3bb7d8af3ef2bfc40fe8cacd523b5f5849022975f07ec60cbdf42e7de21d3663af3f804e6be7ef83ae0ca052554f74a0fc2665b4f22c894b56b1387e6a2ef8ebc6efa1dad1aeb603a29a5098983dbd78b7b2124c79121ed47348b485bb3d0a64bfbbf3399aa4705709e65bd6d9fe0385b02c33c7aaeae8b7c2d109ba137271a02b1502d214d7d4f86144c22b128d62558243e50bec24995ed937e61c696a82d85c7e2759a0c596a5c9adfa4aad00014c97ca6583de2739aa273ec2399608bd94c4fdf2f0cebc88d709ca3541284548cd69f8e384f6d550121653757e1029f5f41175f538716504c1a75ce45b842c2b15e87ad098e43a7f8c142c8533824eabd89d3ef4abd8f227336b06313316eeacc7453cae3a8165ddf1e3a7439ed84ea068a87355b379362f254c28d927d8ec39b49ba4315707cd9ef27f75c8dc18c3ac04244f3d9d90544516f46b3b3553e10dbfeaa0b075d88e741da98903719820cc43966b332bb314ab5714f52ca241d1f47aaf1e22a5b3a334f1725366a304ae0ef178052dfadbcc162e0be21c055bc3ae7cee0faca39fa4e89feb8db9ac9eba7ef6d4c04160aaa596c36b3d88bac02cff3012deb659eecba17501deaa9fc85c5db87d797c718f7413b5cc6f8dadc76bc84a96998d955bb2128e8e921ccb6a550fe3cbdbe6cdf7ab292f887bf4a40f34e9851d4a3cce806ea3db1c4af783858b58cc41263573c43979f9e61714091c3d0ed8de090d57ec50ed5674461ff60de5f129ffab062e91ad518e614e0c8b2a9edb3169e26d1afff878ec8d62b6ec8405d844b80dd8703b88b83ec03ef488abe2540d3c0b0631a2fce4c24695a1ce489be00e5f9f4251380565f0fb5a7c0e8ad7c40802c8e072352ee4e905ad3dbce2e0fc66217c82ba1fd4ce016ed43a1788bf9a233f250e3dd66781ec77e0363fd918f163e239b9cb1db449a74aea5757185a74819045b94a8166cf74eb1d9d48c2560a56e3a09a917840e403385d432b22c8f03c9f8f8e771932ae27d44737427f2d81a63676e9170f2f3c1741bc79a71b7fac41015a90c08f138d5d9e7d0ad04195ffa86f7e58f64fe8c42c19ac2efdce29a186d4b541043175c9a8085a7cb8da530bbfddb4c6a1d8cb59012930531ebf5b4e9171740d9c368d8f4d3f8659c93969e9669a5b2076fe4a7652ae5e74dc2995359ecacee554aec7fa5a5df3f79af93e1b89a64a67dbdd48d7c48bcd8416acec0cd1da1c237a1382527c4fec5bce82da5680628f6f91dc22bc7d1d196655dd53e2640c0f6e027430a5ce33ffcee969dfd2ab30d1780cefa2708c7515225e2204c11078e7da71d39b1cd030e1a93b4e80eb4ed2e0c1b95f4f41fa23adfd50b9f802c037b9f228399d787ab78c24366d9cea9e6a20a79a64f7da8c41038beb081b67b1f80c30dc8b34a6d9f52618922a5d1da9208f63307c293f58ac061da59256ee7766b33a992d8b3bbd693740598a34ec8f524ce0af271ecd6607797365c4a2bd8dca7634ebb95bea6b2c7f8398f65fb92dcbf9548c64fd541953a07ccd21429aef7536a5afadb16f54b3834e13cf6bdfda91a7e903d26aec0ffd5b7b18ef483676117a16e7916fc26f41ac26e1eda30d304790897a8bd90d7727e3644d2687ff63b1d38897c20d3558242e2bc5c0d9422631b22e708bfa06e55ffc77031f1fd23ab05e310e8498f4ba4d65aff349f281f4f77b291cb02ab2d46373f5e55772ca83acf0e93efc2d24a77885bb3b76c55a19775762a20cf5cc7147ea0356542fe3b712acc1562bb39fdb7d8aa0bcc711d596efcffe6cab5878cd3ce5e6eab7bfb6600b50e1299d2816df0d34e83e5fa385695a0bca79a319f269c3d29ac9015f7235b4d1095869b4c56af948176ac6d92a84a95d79058c426e60a22dbd48bdf66081f62818b9b476a18428bf4298780f8ba3b51ba37d6d3cdb0329930d1fdd42e131b03139bd4b3720024017034bbf0668265dfa99218893c166d5f923449cac7cc6e10d70c74ad5e94c098ff334d86d9c983fe7b36b1231e6137291a98ebb863bf0e4e5c5617b5804cb4bec92f8d95a01f3a5f5d670189487e330d89a421b77e58d5dbc7706431cb4f57e4c6575807b1911f269086b5af633c3f39999f2ca239bcade012adb935ac1ef66d38c8bed95b230a1893f1b078972dda0a0775e31c15618f7665cd3c82ae892019080779773aa0668d99953c1c1dc065ca35a3fe1d7a9077b37b0dcb4a33867434ebacf0c5486f3a4e36ec1482d0f90e42aa10d15d3f14c9831c62a9a7f52036bac1de0b0cc8b409762c09e2e9b48a83c5ad08c05c9b136f328b995e7155b34275c07636dc3f0031586b9bc433393b2181f2e0a52bccf92f2957dd6328dddc3f843a71936f2c8e4a930fe04128055a49f21e1187ca34ae786e022c712001d2c23f35926361968a9e45986cbf5a1d7255407bafa5484170cb1138096e822287b74b21e4dd900dd2cb1abbb698e7e8521e43e50c9d9c9a60c05eef18133d05c6845a97895d7d69a3c30b1f11429d97734f16762d702c239ac29c71816724435314d1047602912d0c6f04425c64d8558e188e3b8ee0f02e7d7a524586a2f151ca67b28f59970935ac0996b0d79ddf1183991c9ed1a0d52b07aac76c938bb6a9599f9a14a4b34f60be9aa83b956f4d285eafaa3eb4a760ed89b4f591d7e0a1020ee591fca0b73782106255a1bb0a523d130d6ba94ec0c95935c39aca06da71c2f6c28b87294130c4ac49aa7a52caa3ba1ffd93a4ba0f39c55916a61af42ce1100711a987e138b07b52f46c42082046ce64b84ff06ba74270935afe1ad88cedb17e26c536dcdb61a2f7762c0ec14914a2931cfd7753eb5ac3907c12df1b1e166829f587fc4637509cbc2d5f52cdc1476a2c7f6ee632e1d8cd91f1493f45b63636921bf09ec8ba8b852b7a87c5c2d97197345dc01506f883cf1fe2ae3e1b9b6f6736b035d51a1eb6d58e10293597cc04d097d4a9433c4902032a240234174d761a48f15ee1cc628d718245d9e76a14e285f584adb5f8bb0fc932eb19cc2b22f0450387810e5281359717966d91669ee3e0faa3a26640d4d5156e9af507e17dd9b36585550e0462ffcbb06aa3161808f120a8aaadd5ee99c708074bbd1ab04a7ef7d03e2d76a382987692473087d0b3d8f6f169db8fdc41f8379a1a5c8b5bd65d3d71b189c4e29c85d7d16407305bd5e790c74afc9d22c886d328bfde3d4b8e60c8dc5b6b2c1e2cd8c3c252f3d6301a07686dc619a008f7c5b2f98f9ac8919553d0adf8e5997462d5729c2b7d5112530691d162b93b797ac202097875ea6406a8f6351bb56a9993d9ea2a255ec06e0272332f26839342402acda3c34a1319bc2188910b93f0eaa83ceb2b602e57899b1b8cb818ebc1cd132c2a536112520ff8dbd73d3b928e803baab6d889ff15ed752160550f1d197cb4c1bbe0379a7b73864c90fef9698179d7d73be9a5a025124d80d05023d104e1b9b9484e861aec7671ce8a18de2b9c8718f2e1d11e93e5c967c0ce1ea95c894eff555483b84a0fe2592ed705165b478516eeafbac0f3facb69c4001914fc81695681a0639f3ac7a182ad8c1359c2884c60bce890a559f095d7ff82baf3c3f00f49aa92fe0d23d337d4d781bbb379a1263f7cd466e0ff3c9211e655659a004047cbaa800c8b4369bd7635c155ce985e55cf853b9feaf803ce3b0340f6346f3bbae21795348e726dab6a460756bfc0b78417f31c0a75d8bd292727dfd7eee756889fd3f28c8d8f291c8b8063a774ee8f2f585a7e0dea14d5dc2566e473a4552fb60db0446076f404524479e5c07c1dbd2c2476f01ff8de97afa36a1c489e5c61854018a84f6aedbdca50fbabf4d7e95d317111ede5a641834b778d001e7de763841b2cb7b80fd836e1399f84f178b2c125bce3bb200c385c383b8ce0b0649ec8798b2a80e353326c9075b33b34909a3130500d9acaab893225339a03d4c20b3c716c16a975ccff3a64643e755d3784ddbeb79a55579815aae78c393d53f8d2467717f4b932a5bf3082e831dd252cb3bd8fb80c1fe65811ff2a4be333b2597a1b1b8479e6383a66a130e80f5c116c2e9500cac58ed4907f029eba92b86e01ef63b218f19ba86f078d4bf71bfe728dbc60972b65b75e2312a14c84d36f51239c17b331ef8d766f9ef2c872a8d891c16eaf355d8453a9dc602d56deafa4925d323417a020b24c236c82d0634e0e977b684ff340944578d9f2fd906b99e2720100a4f14e42cab53121a66cdda5f4487cdeba7cc92be732357628f1dcde8969d35469cf7a628c136b956bf6a5b1783581a2b82d606a0860101a3c31522a3a6feb41eec7b29305dd284eac612388d6c242e1f4303f735959e8aa9529aaa2617a295225cdbde8b6d763753074cd18e70f39b678d06a52db2f1d43312f7c53b672e4c79361210b5d69f3b4526ba236132978735a2d75f7c834d4cee250a7a6809bde6ae85f89645f3a05d7a5c7a6fb86429fbe2ef44c2164179befd65e9719080d3e0ce523c7f9580ea878f1659e4ccde8196e1b47e3d5f4899d453a4bb99a0f1a47f182441ba66820c740bab769ea8484ff48b83d7612b5f46a8299ae49eec691f206fc0f93d0d391f29f55fa5fd979d2c106e8b7f38c777ef8fccc765be0f0dc2f256e7c01fc886a3d1c3136efa67312b6b49158c65b15f2977d93b4ec1b7f3275280b8ede106961c2b015f058217a256d069fc34737fb45e4ac4cbb05392c9d86411d9679cced593b703d99d21ce627488788d0b3bae3fed60f5d5db825dc58d02228e9fab5c2f7be43d12fe873a138a7c1d13c7150c7bbee86efdc27ea4a98ccbad7ef4119eb6a81906422445213b4d772f55c5e38e2f7104e30a260a4219dc360b535ce644ca976a88a4c8133159b02fe505ae20e9a98293aeea2f740b307c2c55110a2501dffc3a0a5ca3a6061b1fc24be41bac9508f7ad117dd95f3e617b29f0a345800f2a3bc0c9090062922e2817cc83036727fa9ada224da4f8857a3a13932031506d0317ad7ccca22fd5123d934b8f1916dc94e2517d1d3ea65b2c4ac86066e921d78af7365a9270149cfd332188d789bdb91405831e282839b65cebe1d6a8b880993cffb052c58024aa719595b200e4a9336f5165e760f739aabbae6cf6caa6adf80cbfb5b4661c6963655b6853c55dbbe865c049bd0c33b76e7c615803ce10b00040385cacd73ed9cc498a50900c40866c0f9e8e06111a1f7d7037c1488642392b29bd0caeeee1d5f05680559059b2b637b481f3e7a9f19d4a851a3468d0c5e08d9a855de9037e40d7943de90366410d00fffb13be80088240248908db104e986dca93a7bdab183070f1e3c78eca03db6101fbd6f0f6a00e04c00d81b743fe4063f7068e093833d800e80ccf824000e0d33005688156285582156880d801d9a71650cb0e7bd17494a69bf4ab9ebdfc718e37eaba5ba00db00dbc6c058e8017602ba1f485b226057a2ce7e193e6f57eba5fdd67a2b916dc1b1c8e6402eb41dd1a22adae74057d50ad8d9c57d11ee3368cb35ee657448564989a8439a9c6500031ad00007dc273edf6a07743fa4d1280b32e5e37e7b950ed081858e524ac9fba49b8a45bc2f62973fadad798bcfa7573e9db5661264359a692ea933f933e44b199448ca1a63c2b84378c63d5922e62763adb5d6d74b8e8c0d423856824629a594fb077f3c4a68bf8fe262af17a4ddd5c5c652517edef2e9598d0b9c9b16319c9e988c858d149e6cbf6f474a163c3a321639b218be221f670b6f71a7709ee0b4d838516e7c5cd8bc0f7fcb6121b86dadb55e262fb4b62ebfe244f9d95270de409b23c3671f6badb5064929e47a314a29a5ac36514629a594a3cd1bb453126118f53d21e40f753fe44b7bbf69fc0b637364a494d65a6b63ecac86cfcf1b82cf2a8388ce9051b481abfb6446118e2aae97b5502c14577ee939cd701be3d2957f779ce5ac55e63acf5a270fa6040d94862989e6d472ab18033125d1a573ced9e586ef0c5cae39a803751797add4242228294939ab7984aa736ed741b66ba19d6bd72f7606656d268a5e0db96abf763ff0959640b55ecc6597f8f6abf2bdf4ee3c18b7b9c189e5e8ec7c38760d0f0b594f0b173e0f863f4041422fc4dbf56c08460c9148f42ca746dced84540e907070ed5f5bc4d92695f15f9d455fa51515b9a26825b26478fd294452f51ac6788dd1a5bd3aaafea868a857f9559a8da2a24c8d66991acdb0d8082df24a5bcaae9da991f8a2eb2ad4b50deafa6e0b02f517c83b03c85aa6827fbae6c2ae33d8b5ebbb7ef974ad5d74bd5b74ddf574edc9ba86b1e89af3746db3d3358e4e4ecf601211c22b679d0572ba20b973af73625debe074bd73d3f567d3350fef9a856cbb7a5aecae5de8ae7d7ebbc0edd221d7f50f5050ed5ae8850567d7b317954e298a9e0dad24ba9f4159e340af3fc57b6dc554ae5ca9d5de2a1fe7e2bf093f546da9aaba16735b3ecec57f2c3e50793d602ebb5e78cc9c2e7ab573d66a45d76935341369bdb1f0d9c424ed75d5238ab7290b912213232054a4afab4a1b7275d587dc2b7cfc5fc10aaeaeae121021d2e8478af4bf8f6739a47e1fe7753626a0d79f33570de2eaf5b39cd9ffe9f7a97d295e6a7188f554e5e7e9e949aafe93dbadcaadcaedf69a14bb5e99bbaf27d7537ed2a2ba56556e4daaadb6caadc66abdd5504aa216e86baf05e2c53a06632e736368075c741c43975b97dbd3d3d3a793b303a7a2f6e21a7eba4f379a42d4d3d31309367cce695d39e7cb44c52eb7a81fae44dde69c1453e1c732aaa2f0f4f434e6c998dbed76bbddecad72bb4ff68952dac47217d32bdc1817982eb72426c5778cb5b5cb6dc3b8d7e9d76b8c76b97e723fb3ebe5cafc09f6e47541eca71b822eb72eb72e376f074f871818637c2fc642669fe19444b7a86bbcbd2c9e09bc1cbc290f07ef060f8b77c5b3e255f1a878529e0d5e0dde94274f8a17c5a3c183e2457950de13efc9bb7933784e5e09bca69b278347022f06cf890783f782d7c463a2713929cc8153a2d49454fd38b753734e5be974c1d04cb4f65a5b9d7aa872420a951f27a72d4e5b9c9caacd2fedba5843bda05c50d3a575a9ae15c6778b13978bb1fdac53d49c135f7c43d1eac55ea9089a5cb0d0c7c9946d4c9d6a008d131a272828a84f27e7066e3197f3e4a6ac5d6bab53166b0399e3425910fcdc73b6e371771c86a0a35f8ba47f135a4a69df07add25a095a11e87becb5d6daefda7beffd79ad7d8d77be13b3b0e4b55fbe083f42a56998374aaba0222eac00cc83d22da07bad7fd674a75f49ae4d49c3f51b9c35dae9a7358cb50fddf55a78e85c10a9654a1f80d4a3a3a22251fcc73d742e88f4c1bec41840548096b828b784a6b6d707a1d2680d844ae3a64dbe963e4a206638c698f3655881d9c7972254da5c4243cff49ae0f3399044140a20fa7756f8ac515148e96ab4041aaf56e75ebfde0735e242e97f21edd745f27447c122da81b766f70f971195566b52ff2cd23f5aa3e0ec7c89cb9aace17f3da79ffb0be6a6e06251232d8983fe8e0f8963774ea75f2a8943768ec857679708afb38ba4df0b3ba4fd5d98f4fa1dca0df2fc75c6bd400d727a83e3de044e7d703c52c81374a945aba9df7eadd000b94250a35a42e7de8754e77e036a3469b3065e34d468cc130c800a5ab4d7d37f814b535cafe46b53a6ec3d467c3d74c6bdecae9524e2caf06be56303201ee4a7cffdb5da0004027ae0838ab0267cfc3ac7fd77b572162d816e9ce03745f0db748316a94fc323f4eb116982be1a728bd4bf6112fd9a541f49cd0790a2a3ef8b7c00293aaa215dd2b2d2b85ce95bab6f87941f871d90119935fcb38643dbaee5b7d9da127e25b926250d0ec9d672a82409ac869fd6b810fa4868af9c0fcaefb496ae60b54687ced3a5d59aa6975297f628835ddac819fdfcd1bf33fc6823c6cdf01a42c990ee8f3c09ff9112eee35046e4087e2e9c22cb54ace12bf9d317e9f34f9ed11268e4947a95f23a2169aeb001a1a4e23d23a57c2ceeb1143aab7f25ebd06b551126f888b1280d37552d6a64f1d588b196a858e7c436ac44c5ab4ac358f8aad7e740a421a0074d95f614abd2b655af5454475358aad7bd7b3a1df88895b052554ac15b82536f092e65d65a6b0d528b744eadf88b294483a4acb5d66a7f6e8841eeb38fcbc5ab85eed9b28e85c703fbf88e8d8e9434fb3973b9ce726ac589c570726e746c76f807e3f15874b2dda35bbc5cb87cf273200eef8f05aa41544aa129e50b49298dad8053dd8b9fe027f74e1966ad42492a6bda4e493d3c85c89576a59cb2d6e60b14068a43d5b257ea3e72cc1d6939f36423cd9917ce2c418148cb191bf5816b1884d1a26ab8d7659599264a308344126688b8c28c0f5466aa9891c105a01fcca430a1e0fb0aca244d4dc263585847603dc1450ad73c4c1eb80b0bf7c40c209051314fb8e0e5091644c05f5d563d6142961a14762a13595a256431c1b3be7c204697c8b2e22464e923ef51595190c862426d59273849e384ef2eabca64e9c0b50aaaca5cf54ae3b4aacc0f29bcc4190b3f1cf88d151a04c7e9b2aa4cd34482f3745955e684b903cf5768e1b133748b774b54285ee6155ca7cb2a32595cc073baac22f38482e15d9755649a881fdf008807fc7b322bf02b9e75bbddaadcaadc6e4e4e4e5b9cb63839d1fad37228dbd7749da0746528fd180b297abd4dd3774467428a7aa6b5898c42adbdd74a1533dc30c5cc114a4af045e9014d540ac63c494182103040a9a105261031025b8f19253078418d922b5a6af4650b32744a95c8a06067975563b2c6a8e9b5cbaa315b28f0e20507ada568bb17fe31efb9e71ced73af3bcffbce0bbb6bef77ed5052adffa56777612ee7e7b2f4516b08629f748eb6ce3d2b197d3b187d21247a36cdd339aff4fc91ebd85eaa3eafe809a2c47e3f0a3e5fde70da9c77ad52ea8d4343f6244c02f5ad0dc758a8528da33ab45acbd9ac7d706c4ce7a32ca8b540db0a51111c89a88c29b10c91b1b93274c62cda208d4d33a861e3c6ce4f4b7e1b81e1d740db3857f23cda41414242424242412188a3de1febed96e2d96c686868686816823f8ef66bd5b14356ae631ede0976741c889d479dcea1d5a4f542af5fd55aea354d56a7b3e72b72e8c8a1038a354e58311f3ba363878e1d5f341042e02eaba4a0b2312af2983b785c4944ad1417f552305549a1025f93a6afc0655514b9cbaa285250658c8a3972e4c8c1a3874e961e7bc78e1d3b7af8c879e263f7e87ef4e8e19244f47b741d80d6471762b43946c55900301a46c29210044e54c8311a021d8600ecc2258c9857975cc280e9924b18abde259730545d720923d5c74f7209f3d46dbae41286499f21045e39f069b5f8fcfbf66d48e1d35a6bafb5302cd10f4008e102d448f6242d567b77a7811c1d591d8995369736de2fb1d635f0712efd7880153e4a2c231f80f00089436e2a6610a848b7148f7134dd0a3d54a4df97ffe8681f65ee8d745b03f5dc807e5fcc2f39106edd7431615dd65ad7bdf6ba387b5d52ae07aeba25f8daafe81a7161652c8c6593c034dd70ce36601bf4113c735272f2b52535f280d238ede2a28f8afd036ee44505840f8cc46c7c7e6044053ecea53997a458546946ea8cfef741bd04208403627efb4630c6e2a8eb7641f25b7c731704bfc51fd0193592adc8da43c36b1e43d0916bcd397339731cc7e17aabad5fadd7c9a4f5881a1549497ba3cc5969250349f18a76ceff7f234f82b681db2f02c5b0c9543ca2227da9e3818aaa15f200a05e6f0d93d69017153d8b293eaf4cc029e12dbc865be2a47ad5bf9fc86f4adf3ea5df85494948462802c216182059d22a4b8241827dc71ff636444acaf9d896ccbef7b2b6c1fddd13a1ef8547bca75e48ffc81259e37f546715e675ef7df73c4cfa3e16c2c22431e685495f52ce11c7bcffa36f43590160df3d11d891d877df8547723ef60bf0be7b50666d1803711e06debcce23a9cd39b563fbc2a419c53415659b4c3a38373366838d936927ea7b168fa4c6f3b276f3e39c9ab1e7b08fc1beea844933eb79983405897f4e9834635660b815079333e7569f32fc36321cca0aa09f3e119bfc2f19961252d2b0a0e9e6a720d9bc7ead7f3f92fe4deb4d98447548a3987afd91da345394ce579b8d0d4ed68d4e98a4f3fb6dc2a4294836bfc3249d906e8538a19259bb09b758f07ce00e28ebecf297599cff48735ed6724219c619abed7bfa495390f4e77cd214a49dd79f54dfd6a3a6f38549df4f41caf91d267d21b5aab3fa3961129daab3fa3ba19259d3a156b197b5d88f742a16caf0575828c3e19025bcc5d928982948b1dfbff7c31e693f8ca9d73a85b43f274caa4c619d8ac2372067abb3fa36e016deda3f56a61d1e9982041b6bd411582843f23aefbbf7c2a429b221fb0d206b3bb4eaf589e02d119c8921062b25a55ab5b2d11aec3bb0bf0bbd1feb54afff43c5650d0c18abaa430a58882042cc910eec504987321b0691c986d8ac4d3624ab57d877285858b3f48f15cc8e25aa9aad71334c7c692a420a6f59266bf3421992cc72b66e996444666dffaced1f6bd6feee6d380473b5ab867f1ff1d556af8fb1eae3acfaf80a83d93f622a990d2d1515ebdf78e1a38dea5175360546f5fab21fb8acbd6ab236daa818124b3f1aa42ca2a2b4b7d67b7388b99cc9b7f7ee2244295eccb98c5ebb33f262200189457afd9e2d63b1107addfb02bdda3bc2ec48ba693fe082159b6a09ab1a029e1a2609385a98309957348c20ab8824544e9ca1f26443411027046185135c5c6842bb0045ae49e62816a1a132a14053a5d3cf1fd50b68927c2e20e64d163a41eb0dc1e54d27e89244f33735aa21be59124d58d59d38de0e7ee1f66dc781b8af54d7b8d23b25c64f3b0e752701a5d4f3c2c7dd29adb586f0a2fa1c03ceb7adb622a1937ae1708c0b37b68f71f738f4d0f07177cc7de5388ecb780c4147bf8f6f0cef8a798c1ae1ee6bd775dd7347316ad485e3f7fdb5ebba717f7ebdc1d8e6de157e72261fc76af68e1cc771f88236db1f627459b58685ce9f685123a67b4c40b12dbf352bcce8b26a4d967ed365951537d41e26d5a64613f8ba04a6d5945454d39c4ba88d8a73044e8d44a84b26c818e3049430574f78d8a1764313903cf9c0145143b4c0c50b0e351b1aa9b34e8daa531fc512f4515235f5518291a18f528b047d9cb618a6933ecea9164f280185294a122a2cd4c63ff28489530bd24c894a521b8f22b0830a364491e5090f1fa8d5700cc0150a2e1c8126872482a8d1709459552cc061044c5e9c4ad8aa8d53e900546c89010d962b2b4ed47a4ac801889b0d55b840426d9c5215863ebf98b1a14a4b2a882d2dd4c616943044152ec002450c350a284561c115449801c1093416e42035842dcd172437786121cb18274060660a19181da056608416334ad4e68f50690ed8818b07a4d2589961a286840a19189441410bb08288a1367f46551a122b6b9e20c10a1f965451529b54dd07c52cc84390be2ea17ebe389f7f4df47c9d01f9d349971d01729761e6e0f60cfb6167d7b62340ee59124939e7b419099d18d05bd31756d45b6f3cd35bef3ca4779d7abf5e7aeb8d5f5b6f97de5b6f586cef1cbd633b1f0f0b594f0b173eaf73937303e6e0843f4041422fc4d9900e8c18445bbfbabd77f7d2326c66f0edaa79bb6ad1de7bd3a05e145313274d766f57cddb5533d87bef1a366e18d18e34e0fd81608079f3bb790e1dbadbdecbebbad7ddeb6eefbdf56b078f4c33a0824d51595971dbbb367d61453c1beaf9af14ad55cf9abb8ecdb84cfbb78679373639bb036f0b3436a73ad0b5710fcc4a1448bab05339f4f9d4bb4a9556a59194d7c601ee39cca0eea9724b860b9fede2992fa7c421413a9f89a9a9292a6aca8af007e807d797753fb8dce3fa9c5974cb7315fbac9a47d67980c4bec659c3c72cb698f50cd5d9cc3c44df968c19f2715ec0478b73ba9cf3f3c358e0e37c570531d7fdc839087f10a671331505458a940bf650a44c494981322585a5dc281898750d9b2051b43678d06c7663729bd8178bd9fce7aa39c68d76d46d729d3459210b03c606697f396912d554edcb490d2f13adbb2d5c261be5c026a63edff59989eb68e74aab526b2073c79143c7b653efd876bee6b1f908d4a8be5cf33a46f7839b3526376707e63ae3bac7f77dbea83a6c091feb63d7d7105e54dd0faecbce4668e3e6be2def4848a5a529dc772feb9dba54da7ae9f3290cd4150af61d9844040d036f549d5d99ead303af5206af6d0d1fef54951d0e27919a189d036f54ed3a10bbfda18ec1abb44467f45ee16e3826a0db70d6778d02605b4abbf2da70b6f86867080501fdc8208a012304dfc7458b9ea19958e41d914923afc5f81addf05a7c2596968a7b63549456bf94f5de8b6598a968e5eb4729a594e168bbfefa5a6bfdf74887a3febad62f7c5f2ec985f61f5f2c435990fb575a0c0506a8c617151029d85e5441c04e80c1603018cc06b359c190a467ebb0664d155a2a48824b124274c925091dba54975c9268925c9268d25f5d724982850ff4fc49abbd98cbae97de9d07ab55dadcd8dc486d31b0a76ece7983f325cf181563d6daae8319cbc9557cc9d1c9d1b955f1838681ce8ece0eeea1d3daf9763e13784d7c3c1f0fbe72a134123c2c78585421e542992182858c854c4c154e78152a54b1d4f920db7bef2deba10256a5674f3332c06030580b17d9858f0b1f98981d26f75071c4bdda57f8bccf7759a8a0a2e28a8d5111070707e7c19b8d61e5e4e4e48021154cf411e1ded9d9d9097f320f0f0fcf0f904c26930105b9b0ccd80ba1174c383178c2051f1f1f9f17a2982ccec419deea934f9ba56668fffcfcfc0cc1d05a4141414130624081c6c298985f886410c998226b8a144c4d2184b5cab2923143c68c15ba2c39cb8ca219453c4a118d221a576864402383296e5338e9f52753af750a2619d4c8a0c6b46238539a1a366ad8c8532cdd253366cc9861e386569aac821b9b060d1a346e186989c9467bea1f6847b42357111b4bf53995b8b172a4c19106555a45580d7068806303b17997555a376415b4a470e4c011974ca8740c5c183d54330340002000b316000018140a874442912049f24857f2011400105b8240625234178a4391308e82280a822008a2100662200628658c330e31463600cdb6e46a16a0f2a8c8d1c0b8bedae3f41195bebb7f4b69bcf3cd4d88c2a2a2b74891112f8d68098acb88232cc44726e090fb1ccf60db5d44b5c179192604b48ebe0eb926d46c61cfc9c82144592521361d5f9efc0a031c4ab9df70e59ececab1acc20fd146c310b17359bae8d912690027139b41e70e572cf39368b0b4500918fa83ceb9365313410a7786032413c1125d22278b0389f682b45784df94c97cdac2282d2e0949925ba30e7a6383080d195a231f7e0c04010a9fc84854bdf218e63f657f8849b4fb521e50455ec6d381e224845f5003baa322607d229e971cdbd8f33fdde40b1981f303288bad75351a763d1a5a6842cb7ce9c29175cb2ee0a2e2a1a9efbe5e2f924a6a80cc2cf72fbffb63dc4b2b90edfb0b5b2fb8b080ffe7879cf5dc1b0ef7a10dfcff951e0efe4d5338d2fbd803b1bdd6dfff4fbd84f0fbe5a6100b76cf900d48fcd2030f680e832c79e2e09dc1f72291db7108ed054ea7b65225b55289bc33f0be6c40c4d7fea368321ff1e49bd08da57c13b2c96a4571f9e35554b056905696c7976bec0c125b9c362cfa781b1300f416d0afe0af6817810e0175642e42afd12d4a7dc812086443b4cb3c0938ef6398582f21aa5c5b956b21b62ddbde0b554c11ac9b227a78ed149136665efa097f62b5e85de632ee4bff834d20dc54991dba49e56e3020401af4b87925c79442350aae77045522b4fe00f093fc5ef6889513060f7b75e22ea646d088bb2f238e130d47e96fca510dc04556b7344a8df76d094c8edc55237f4f73596a54dea2a7e977a94c9505761f69a0bd6743a068f110235875629450dda6dd643260c1809b95c1fa3025e27372b0cc6e2108aad6b4e0aae0b456dcbdfcd581582d8d76596bd2f3978178b6a484d6c419a7c1014b6658b13d5d85321c8a5189d94b93266ffc42f6b430a7a3151d8251dbb9c3788fcf630bb22322ef1d6f99412e4267e8f8cfd1cbeb86d522fa6b8b68e97d998955cd54a4d24ae7662ff9746a4e3728ec124d445012ae6737b96ca65cc821d947a800a08fafea518360ac288ca13759495383ce778de799d1f2cc0709d935c0037d91912edf238d6e1024f02ae7dfb971eb4c467a48d5c92ff5813cbbd0b3bf3d3bc367a788ab268e4d4b117c186bb119b274d9df1709312995fdb4f1cb0a22b2a74424ec855e1ad4e55d8b4d3c1cbaa2b18159169431c3dd2503e3c1d556d05fdf56aef9b2fa30e7d70b8e7435984c23a95122b0775b99306522800948e54363a225fd23c9ce5fc9a3078aad0b2b5098781cac228636396ebe8f6d397a01701aa7edf76fdab9e9b40263b64bad63a3c7a720fe9bb547e4b93512fa2910faf43452c705622d823bde83c0310b2e165e35139e7137178dedb9c8dd79e3f4fee71acc2b6b262860249c5fe0bdd75d60a635f4d892c152cd64e08fb0b2b32758d43290c46139a6e52d7cb5cb2c34c3d5f0ca241e80beaf6ca2cb6266e7106ea9da1c5c862ecc80c745c71d536ae240c9ea889794290b1c8685619e46b9e195210ff4d57a515909d447ee6ff7bc0fd2241e4a8b81a51b3ce8b507db84b37427479323c18aa8e24e4f4337b5f7bf3782f313e9f7ab7c0809bfec14c14faae5214e03159697ca4bf877b68dde4b769ce893d001f731d66fe67c0aa48a58af1fb790475a52e8e0d7950db36a697f8120e5cbc417ee7d7feb00b33f76ed924097760fc8794f53a68d813d76912267d8f0460a23d46d7cf295c5cf66153f1a0f2d2ef4220b0b895f1b77c97e69020e12a407e590568fd170e477b6440ea08aeedcae14bdff63bbc36c75d4cfddcb42c448c0a6a0bb36ceb20ffa1ace072ef1d352cfe68743dc0be7c9f4b7fafa7d985fc82533ff4024956e15e23ace7502177295990122a7f0c9000e1e43fc07a7094032416464c98503b6e0e98948f2bdd5b74bd64b7b7a3d03ab5c4030424936df81e0ff0cecbe6ed1aca976098d9d4f758e856fa8e39df75342b0002e257cd025651c87af1313a18b46714411bd71c8d051761a6cd2e000b25650fc901d38ec1a25162b306ff3cf5010f75dff06f14f551cb956421c7aba12e8817fa3209c00a1284035464ead9ae6afa05c61f121d3b9ad0c223450b448dded4ca1009bd1dca511ca35478d2ac63d91fa943df2fd0a2431eec58caf5c66ba2576b2f518f50d58f25bea45f019664152e1bb0b5f133988d2b090313a442c2fb646c4c209ac3145fca988a97ec3f57b52ada48da36b8f2f1925aa5116cd96331247ac547d9f76c779160c2ec38e515c4b36d0ddb26c3968b6e1eb6f0ee2077bb41d03ee5cdc02e22e8d6259ed54f4175142a4773d430e3511116fc1ef0cdd492f97f119b83fc839c80cef504121e0f460ef8f37d0db27c40cc9359a3bd77bad531b936609097696bb670bad30b704b3f0d76674830df0737529c46162fd62962a0b089b6a4c4b069c5c2f4b02f919c72b58a31025ee36dddeb2c89b7832b87e6629df668184c4612d2fd48e4d496b6c4d31620f72693094c507b6151ba6aa2a5104c8dee1fe76b998138eabe686fa83b0b1dd5a8b35ebb8f27af30e4c57bcc0f7ddfa310f4afc5150805220bf0b11137e2d6d2622c66a98c05ac4718cf62630bb9f45621a0218643d08ed1161225c44280af9ba1ba5a0767491893c6b809412f18ad25296115040a2ea0e81da3c388dd532371cec43033d1550be8b68be74461874f631f7e132d51264a89cc7dd3fee7201adca61c890d8e82e5c2870da93aaaeadf0fa8084a5fa8bc63233eadba27e13e3c8ce04599698ae0a401efda4122ee56a115eb3d636f5245158a143d1d2bdf226e000a21ade5c4221879bb38b16f6bd3438feff7fca0355f00c0821cca04ca0e173fffff921c67206fa4975dbb2a0848d81891a65526ad80bcd57305e371cfbdb28d631603adada750c885d60d63be3d3094ebb798e0a2b8b34745a5af4425150c430dd3e2f6a6ccafb7d22e5096228842cdfa6a0c88d326440c03e882b182864d597286f4bd336dd0d1325fd171af4f196a8f08c901f929e78d14e51c938dd13ff045dd553d30b124be7f5a24dc31c24d4bb67a8a366dba8b063d8c843c31614b21c352c350dbede8550e5bd83fe65cff1602538e8a307a9e39b126d865d0c5e246a3158095d0fef2824deffde56205c0b556ca2d65bd2178541621e5a9bff26aafa7def06d617c0c1f306c0abdf17c98bc115f6558e87e8ea2ce030f2f569ac70301a9f28d0688521cfb8eb5bf1f3f403a00319e705747204f9fa94482f69e3520c3a99dd21d7fc58c8fd78cca17f5379b22c094772f55a46d172fb4990b8955c8cae3d705061d55bd9b058d173881c456e856e5fe294419fc3c83b3bc76a8bd01da1a11633bea02aebb786ee8098eb0c96424cb838555934f9eb89df265065a1b994564a1606b5a3276d292b3c32064737913ba4cf575f71f5eb5eb3cd31e404d1d81fed0fb4e6eb1a7cc05bdcb1b48db9460ae45f68162de16fca3705f6f6910882a8040b12f13caa46292be8e7e627a9d0767bba9cd543cfb7b621e8bf38a3b2983408aab77b8e86b99e4258cabac79baf850dafd83fafe103bb91b278ba4e26e7375870355f52176cbfd73b5efb1a2d64dc3d2b699ad07624a4e02a789728963278b598005171d65ad8ef7f2ee1de20622cabac8994b62f5750a50de28a76e192eb54b6f9844802a057da5e4e41d53688146d1e05943391ea8656eeb6457f75b9aabb40e0161d0d2d2d1c200079601a17e513d2950a84602543ddf74bcf7a68f873f128b10a6a46438452bdd754b5fc904c3e3bbe8ec578a9c5ee65994f0f20a6da97479701e6d21eab508ac72a4af75825a91f679582900e7e806421e9e9b9b742e776d37e8c7da523a0719024351e0528e23de94c809d8cee00b3a8e43741bf20ee3ee2668003469884c83d221e819d96a5855bc2657d48a01c849925b47f3ba4ed6de060487a4393fb6fe71c5ffa7197f44c3753c16b7ebb4b66a1836511e176b4ed3da400ab8f02166750e3a0768994e2031cbb74d09ed207093920504352deb6b09d770d016ac4969db1215978bd17b6cb9ded2cf7d04dbab5b9bfe271dd37336318e8cc75688132922dee75b963cfd200229d8dd01c0c3b0ab318783769fa57499868d9de83800b5eb91427a777f8969bad656eda3e9149daeec0ea541cb9e04d27a25462d3ad20dafc581a12f80d46991308a7c1d4da4b2dfc7ee9fda22156db11cbe8a35232f3bff5f60a0c7400c10d68e2511e14cc2692808967099acb5b1f3028a9255301c89b71f185082db4c0f2825049ce4518cb0384764400c9abccd6199bee579fbadf485fa44643c121d4b71f6112711bd29a1041cc66be272af1bd601d5334102c0028d3456bc19d078428a24624edf70affea29336a7f1b00ace15050d9558b01ccc82dd4b87f322dddd0b056f3631b9db50e7e4e285961e5c2936b6ecb3b7c57a87426750d85161d189336c82e88e154c5c184ac5d0941f6b41565aacf058dd611984c9d5b7456ec67602cefb6e5d7a93873bc15de04b07c26b12121f7df92fdeb2123d61358833d585e624ff26a7decefce01dcc13f04c01ada2ba746f754cbf2fdedfef3cee8e242c0ce358ca06e8091cc1a8751da6b76f4086824ada3850630a414fbca1d22e41009009f56b81340e57621cc643da4d66a37ad6b0f68ae5784e2d6a3bf8dfd20fd1edda403e0939922b21ce2cdd9e21f619cfaeb19b8b1ead680fefd4539d9688dd6c3eac018450b1148f902d2cc676f25e4a90eb9de092f485212f364b88af313eb910e82ba61c304b48fbf10bc66c1286ab64db2df54d40a18dda49be4f25a68b3103c74b2e958929e631d047a58f0426a982021c62de7329d1c3c13df4eab0d5fe99ffaf06f632216250216cc7496d9814c7e04a100d24611ba16836e62e5a7c5b9434d3a306380fb43c2052c94e9167e40d4403a6bc9a6e5d0406609dcf40f333c77b8d7dca8e214570c16a7026eebab28ac37cda7dd006a198ee68b6202bc6e9082e297460ba26096fe8086510a8bf7d86c5b451a2726cb40240534cb7cdf17eed00da6bede955426056d18eb93d02ed583f3cd3265c8e7aa9d1cde3c67b21be5fbab233a5900f62341e2404c31ecf288ee5645ad991d8e5f5d21804421fd9eb2ef5743b0440b19076bb4b8737f138a6e92316faa21bcd368f509e3059f103db4e96143da5672005a0e8d576d28e915caa4aa28ddf3b97d3c9896145efe9dcf19cab9fda96add5e202451468676ab2781ebcc69a48aa721d53aa461e56e4e3878f26c28cb9438f6310b0f8443c3b8ad89461266355b01e8c52401266a6fa65ab7e8414cb7fbd99948088066ce61a0dd4c5af0ff05653d17d41cd3c48e5a5b3c8cf0c0a686e206f9eb0b1e602dbf12be44de602f94d4b648a9e929a09441109db7f1d224e93ea852742cd18ce3c0fb5a10960556bfd7d1f88e7a9eeff1f5e7e875aad42a483085fe7df067c9da6bcdf0b3a9655e1a3aaec3039f7d2c29155e0dbb3c16adf70fb6c4740811d0fac3e9b309482434f7ec72bf5cb10f79551e99b131dec4bd2b3f11b35e72a0ae0acc4de63586ee8105241e5c6d929dde774c90d9985b0173245bc10f1d35355562ff4daada67c47c967d252ed76e2120a85a3bdf3a8fbf20c0788f04aede4d3bc962edd7c7440515b946daa62d38d3ad4cb3831946b7825930c1c0ac1b089abd657b31177c22c20ec5dfa147ca860b1a6f6653dfa33fbb3aedaad175f5efcb4342462a7eddfe2843e745b8608abfc84b61f64a28d39589fe4e69912df452be9e021c252439ac9ef0d5224aeb05adc6382cbf78620b60c1ee1f7a010161672625396a181d92e1af554f6334ac85ab1e03e0cf8d1617f9102df8f37b0f524d5ccc0d21d11316c776a4409cf4c388a213917efb7723de9d51466b0a88e7f6a85c9bd5134137906abd4de9883d41cc56c6212505df818637558ab261b4de7ad8b7aff07093f593884ecf975b31c475e2a0370cc911d7c47a1ec3c9365b47a62bf4b995146effc19b71ae1e950ad2d674ce1a2f1cfab538e4e75f54872b191b105d7d2a0aa3db25cac99d5ba2912d14a70852531b778b5cadef4fb59f608446085eb00ef854b70975cab5d6323d7df05a1db4ed274c2bb1f088a03295cef0a06cc275eb8e77700539e919d164ff6aa26fb9515569eb7f1e31d635e9cd4087753e5cdfa3e61398d9bc3dca5ae07042bee6e33851e3ec599ae774da2c97adf415feab0e3e3803d070a5b7e5926f743ce5545ae9e1d30e94692c166e805710c8dfe5e9ffa86dfb8648d577795aad30720cc9742ef0ffd17fb457a56623062ad60a06db8f9d7b07f8c6a1d2a9cd302ab8cca81bc22ddde211938cafd956c42821e4e66eff79d081ec55c2dcdc0c1f11680dd45a0bd77df4a2abe50499fa5fe95c0966180d09db4477532f271fc039cb87ef9ec4ba40a3ca0004b27e931cc74ed7eeb375c8117415448cec93cd4644393165d24658e197535283d4a5e6494a08bf79b0e1712ad03f75631ad49c66b2292e802a66ad819a71f13255e1a9afe90d1772427d2b4df783c010fbcfe849127f426f85321e03290140fc58cdb8c54f3b722119d780eb17ecfea2e6c51f309f02f75098268952159d1f54118e20dc939bca6d2cd28800951a3fb85267fa20d4dfa63f8590f9d161c16c30284646b77dcd21cf3e3ecdb1dc9c020e3979d85f978adb279e1173468ac4d6383f8e13dcac8b3e01497346777b32c10ad68bc3601de3c1dafa2948cea2475fd518f544d601f267ffcedc289cbeb2b464e90b70517d8826f392cd8683e6a187cface88dcb207f259254c03582e5475fb09d275939ccd4951c0342dd7419f583a39503d31a82fb283e30ba24a50bf0efef829e8982dda1195c7d4626a9b84c9780211d21ef48af94634cd2ba693592f3593ed30185862a9809363aa759d0b484eebb0b329432a4f8fab672b6b7a578b3590207e29c4748ca4ba45b8f2633e744ec588cb71ce4c5439806d79af27d6bc23a9c4660b80077614be1920a118001cc0b3537923dc8d4d3dc3d3bcd7dc14d73199c4172039f52e97c925ff27a22eb7164063e7948fe59d09e7947cec4aae7f6763306ac6db800ee558a9f677369f1b40670294751ebcd6c8ef54f1bc768ba3179d4339d37cc3426c93ca77367306c7aa094c2d51457460c045efd414b1b11d82e2e51bbd320496617bdb4c5582ce4e4ad65e599cdaab3b253ee332e62801ab919480b5b108217b76708c12444f612fb1952746295a3842284b6c0176bc639b7f1ded209182bf3eac24dcc70d0518beeaa27a1c1bf2a39817d0dd29f14cb7efec5e08cc12634e0ee45070a1ae119349bb9cbe35d31aaa3007c00ee27ee80c419e748a62914ff81a9ba2185df6550863cf62af9cdbbc23e880edfdc83ecc041d233c8db865794b07251922f415e58bee7a1f0a0cc17dc5373bd5649560f60a33c4cb00f99110e2a3d75b62722f0ac8e45b27b612e16b179f168171e95f0709a6e6d0a470e9c5aa356fefbc454aa146f43de53b7f17a83a1560783046012062af4148f4d1f48580723d47cbe58ce08228ae9826054fd7be081be4fed4fd95c4747514dd3b797f605f847ab3a0f1dd80b6fbf1a79f29a0595d74d0671f9591080360462bff75f3cae897a743ea272672e8082f6f26188144dd9a254ff98c22ba82436a8c5c466548f99da78b4748875b7241f409aaeeaee0785251befba217f06c3e9cad5ef405bf99bb6a2d83366da09260062bde6968471bd05a0f05ef7f5c4da2e229512c009166ea1ed0ce99c1c2796497e23c8d7f512b1a685841c522cb6bba36f521befc5704b5b6ae9ce19dfb5490bdb64ddde8ace90fec9516fe83b51543199208b33b86ece64b4f827a6b7ada5f882a02e05df76572c8c638a32aa4da46a8b3c06a4ca4059162abddc944e3ca4d685cc3b8a2c9b9e1cf4b0ab2b87ac6cb2f28bb7ebcd6d1a4a47823a33fbbb47b7098bc45441f6cea986853b90b8f78e447ebb6dc2443b0781c7511c933e3df3b465c46d7c90407686be2467e8a12a0d2750483b233822feed99e5e02aa0c177b81df01de70bb823044ed33e88be47d8d4afc2a41e951e6bbb08e341349dc04634e8ffaec0f1d594e53985e2ba82d767da587af118f7290da6fb3781ead2ec6901001fb5796978596a1ef33213c799d2210282947a3d42a734215d3b12714a29e5e682e41fad2ccd5457ec549a55863f7772f9c05e39fad78b65eef1d81bfd6d5430b6d92382376c0d4da31145024cd11d2b8f43dcc31116d3a6c420ea3c2c1be10b8d4ea1267e8c52d53a24cfd3becad1c0ab47578df5ca72db8d8eb5d97b344453db4088eb707cef9b5b40ef6dd6e2e42cb40d09ed1ca802d4e948f09677d3107393e1618118a3900cc5a070702c5543bf7b79c14f5ffc1d7f65da484a5dcfc5ee4b48070122b5fa54d357ab28871ee2db77391a1ff1ffb456ee8d633e31d8788c3ed2499405c273baf641bfee8d9fe0110ef01b3c48891315400270b4517fa2443ca88c341bad33380d45c1d7dcc069a051c74eb591f8d18df2b9cc28ab40815dda018c05e3bdbeaa86f0aad998aea91ed562b3c9c9eb58bae76eb80ac06c34406cd7a14a55e5c1c3ce3e4945aa25fd6605979bdf2a77b673b10a83a0d4ecbdeb96a3ea9a39e4656c0039b53e450542357590bd8f61b02da52eeda32ab2a15fba66f1e10646e089c28a41b9f19471317e105e378a0619874b3a7a3fa7a555c354b9c926248470117a356bf4c742bc0015f0eb821a0631e0c2ac1f81fb60e71efb346dae1efde165c7d8b5fa6bfd31ae224c6ff6645c6567381b6f2f7f1236f44b13358a44b585e708c5f3a6551afc7810cb51d8012a8be4c819cd0388f97f4b045d820372918856ae7bb4157963175809eb2e512d1af0c8fc3ee00f430c10a89cba1f36ddbf74512e1a75a714e168643f0f40a4da5e323e83e366fbb0df2a6163ca781182f000155ab929d77374df1af412e466afd7525ff89b7780728641fea06cea9c08460cd887602ef770b0642fac52c20d5a91c78670ccc901beefedc0530536398739888571404ce013f3f26228383c1f6a0748022b6e2a0394662eea0a00516777b1f1673612610f8379e65c6c74f3d29e6e944b1fd9c43261998b6fa17e547a6f6536cd20c1a84af19ae4987165ba8a9724b006eb9cc9e35f73359163a8d6bdf6208fdc11a8d2b4f692a0380381ccfb38966ad011f8b550d144afa8097404244a3f915088cb022471761d3efb8b94388a00e7385914b6119929814c805258d38bfea90ee4da7d7ca548f63a025d18a6de2f17eaf55b86409b3190c9a822245a0a2de6e4d7feb22ddb6a043857da47944effeef6235aa9b371cbf5ddb56295d6f3d9518bc8e96bb42043a6f84b01ce33cc9d2e5f42448b3f8bd53285c8e4d1831379e67de8890f3f090b2eb40616864ef8a1b55036de34c1ed8a186eee3b72c7a60ca3a66e474a8921bde47c27c5271658fc5eb683f1e23c1e9efe94774adfe6bf33792998efba276ad1ec5da90c3e1468a872a93b73c4742c264b67cc49cc71168e7ba78b3bcfdfd00217682fc970587ab590a8c27a04e1cba51e7ca0f68513719bc3836b1930940e575340cb49c3e2aff21c408172eeb72ad4ef822f4d8dc628f51b67cf233204eb62f1d32006d205e863f578b3f1c014df5d37109c23bc9937d1f29a8d6683592795ce237f17975c56db4209a35bad278e7e162338914bbc75d9a42e170a7a30cbcd82315c668dd82a09adab1ecbe2473a77c8261b20ea5ce4294d8a644f1725a235a44ed93b938177d3452c8d4682d33ec52a1299ea8f48568665f13b7b464596b0e1f722d89e8ce449804483e27807c6c509f78831371e79d91ca131f97cddcb0c58409470450b232978c80510e2c5dc27f3fc8d25e4e7a6165a0ae54d9c5d0162b715ce5d38495a33c1a17849875a387c5cd601c4f6190511272374462317cbb6e4c50d7d00b9834d3fc7b48701153383e4cf9474c0abef8054686fcfd0a22a80a831e45905b8e66c61b2ae9e62ad7a8e967f42547493775eee3b1a8747f3a622f58d5ec501eb6d17d420a6869bf1c1b6b6918e0a2953977e16a39c344f3913fbb2998002e0d0e8b21158e0598e8b7998b19bc3b9c744d0966ef7c2ff96079cb5922c03f5e048054eaff6f07145af844272af89f946995e81679a8161ee84939672f77e2acbfb4c666b1ce8e50a073f062c9108622f688aeabfa4c51007b096ee1a19ffd014d7a304aa1bff1fa6345259aab1fea8c69396513562b84ac84de3f2fe492c92f6890884febff1e28c243ae39125339e05effb56e92b8285284a71f9d7e5ec9aac69a51c8bf0384dce9a58e70419d1acf6705e6240f3a40a20cb6994870478c62ba73446f15b9dea717025cf05b5a816d0bfbec99223627484bbb7474a73fbd8cc770125d653436fa3c2232ed22b7d325f95f05ee365041340856fa84ba26e8216385dc51dcfd8f6aecb6868adae4a24a70287148e7c20d8dc46f56e7c2eb1c3f09e5bbeddce7fd54dc2dbf0076dfaf94913aa18aef7d8afc12d00be802047e2b4a14f25b7cdfa1d5e9b194419cbe8de2263bd85de384047f12a34fadcaead2d8496cdb2b64df24a17ecfdf3b85c33673c4bca22d08ef2f49fcd4a832c75879e4f19e845571ca14420c53efa80ca93b6562d3d44fe35e048955a5fcaa49085f168e42ee11577302288df6758869b65b54468b1d690abbe01a5a487caa5cf31e47f433c541aca77fe03d950367e1e6c5f124fc8711ec20727d32c390684467f7d07c528cbd518061c32578ff5b0e17c0cd003e05aa9e70d5bfffb3cf2740a3c3a93f21014d198c0167ffbf59d68ded25f1a0c3252619b7c1367474384b9acc26b102e6adc4c36c2be196f3030ea6b29ff3c150f08e143e225f79f9cec7e1fc201306c648b22671a3e521ef001d848531bb5c5996ac727683ae7e6cddfb65bf4b74ddfa370a45a7ffc770c97b55b0634f1ae58521f74c325cd86cdd763da2ffc0fdec0604872a1ab5e9267d25beaaa3ee990ae17b477834aecd814ba6496513ee7b75b747b46fd49148aa11bfedde562a56d4e52e0737324eb700d7718f73edb792b0b320030962a8383382b5dce4a44354a66bcd8193c10837f1c8678c3f510beed59e85e362763a23aa6e0575360b5f6867580ae28cb06c1585c3c387a4361dc9bc460c40ce26ac4a3636ef811a8fdef4bbde5020e72090881b9fbbce9fc4f50b47e579ab9d970dbb75e69c72bd906e95c1d783ae81c3b5acbbfe39cfc8e6cdb6cf85c369ea55ec76ed7af6bdcf7de439d519cdb6806481e68d7c49ae9346c04d7e6111746fa6af5c6c0a88fa9085b643e64679dfc54d87058e844de0a882513ec1f1a80fcfbc91a48e7e04f20d12fa10620c710ffad14dd4b94d7a05ba66e5894b55eeb69958eaae60d6898c085c56fc25f2cf2f3a43b2fcf2a0c836b5d8c6d70adc5d80faeb518fb60b517671f5cfb62ec836a2daeb6e6318804390a806c9aecd31eb20f61761b5dab8b607c55cbd917abbd3cdb62b596b12fa6d632f6456a5fcebe58ede5d816a9b59ced22b596b32d52fb72b645bef69fdfd08984c18a9ce81b21fd5d839ce5625e03dd7fbcead97355dd44c4a641cd6a194e6c8c52404e40a44a595069af4002729efa1949c8688f61d210325451ee9113b0e8e893830e62287ec63fc25d5c92cd51bbad787a4b436ed4220d721535262f63ef35807d66b898d5e0ee83b8559aae9f43ac85cb98744523422bd7b76d3308df048cbdd9a4596612daa69d40d57aa871a2838f910e9cd590373f7030b8dd4432fca47e9c0a31387f22dbc0faef577ac2a1a59b75b699d1412b743cc6a13899206c57b15f41f242383491bbae9851360a7332583d34f01eba32d065d97145206d651cc54a4b135f8f290fc9533445d0c52e64fe69d1091ee5149b5a99dc3bf51d16c96c549df368184924f5f0a5207af0de1957889ae73b93f74b8940374d156930ad053b45bef1c5a5d491b394706d2726a901d10cfa3a245802e4ffc4065c41e02e905e4144badfabbe4a25f89645598e8309c0eec7bb6f68d68fe4f45fbe3885472c91c208a021c7e59661cb13510eba44b410103402960fd30978e5236058bd695c52a35ba3328798185f39d654c371fcf280a14e25c0c27d1b797d415f78dbea7939da246029028445e65e7ea44067be4d946824799b470b45b5492e6b3a25d978c0153e6d28e1ace0f5cf31929c57133d5e87439c45740ff0d994147ae35def88adacd15ea0d3f24a1c2b9b59614a696a071aa2b5f60a4aa5ef3200daf6f2953dfc460e03ebd7ee764b71257b6849ab8eecd66ed25d640f2de99ebae7ad329fc07fc780cb6dafe445b5b19f34a8b66c2ad843c55d8b9ff6f0c52824b4011057d5c4a57b140f4c0a15ae4e74c9070063ec7e4bc9cb12a95e0a8f3efde51cb7f143636c3f853cdfdd773b6b8af515dd67c0945160168f8d1dd37df1262eda42eb7e70cef2ebaddc9749dba0f1a82e2ad5d3bc80d574a4e672d57dd448e5417d5e00984859fbd152cb411e41278836d64ad0848ebc7860a43beff70dc4f8dc91f291fb106a3574540b5a033c183fade836c36a8c8dc559c142650bf58ced39a1f58dd7abf16da03e24fc3a835a5aaa8b364b3107d4e4a82266c57bf64c8ea064b6f918815e87c40f8aeda884285202857625afdbe90b65a6c35b31d1d807dfe8bae12fa31e375a84ed9caf986698afe82200b951175ae1cff646f810b052eeec16b2676510fc95c9a162197ee37c1e7e3892b12071387c365b318372885f3039a4b0d710d93c5ec1f16140b195397721dc819641cda9cd43760149b5ea689e16318af7fab1f4b314dfb74de2fd8e856588f0d36f6f46ca4b77514c4b75bc5e2a5333c9489c4a21dd4bfb4a31a55eb2d9f9bd7179e25186fde712256363cff27a90b93396104e997683a1176b2691727a97e6f5542c20ef80cc8758ec72f30921080b41c879bd8e68823ae72cd62bd3f63927f8daabec197909a60d083ce9c8fcfe1a81b622b197cc2c5568db2fc4b93b4cd42f96d753bc283d7adee5217028907101c64ec6c4059736d69e5514db0521c6fb6cd3f905f0a9e0afab27203274fbc6551b7316f9554c20677c2bda57eae21c725e084d2df33a4f6f496c4d127fe63470d6874cd0276e10df5775510cb800c684d98e8879884801b22cf209066742c677f311c3612b8efa1aea4d27cc91fb150b289dc3edb8eb99b4fb6e5e3d63fa58fc25988d0de9bd8770a4ca159d11cdc38cc5b03db5279cb2474aa25837e58efa7de5849008192512f579f587c4b06318dd8e05ea34a35a27bae2f3b1e2b0fb86c38551d87b3c3cf27b246ce932c5e34cc5fff61e5fdfb4b671e7f5113c745f6c9ee4b1e45e6c55d5eea57cd70f3c6b34b5a28d4a7136831684eb4dad1364dcb1cae225ab8e9ed81d2e24299965dfd177d50e9e6643bb9adebe4cbdcbf79ee41e9e5cd4d163e272b5ec3bf708ed10d382f0bab32e4597557524bb02d2c82e584bc20b92c68219e512b12b66bba5168e2cd5e8b4c72f38bf66f7be0c6dc9c4b930076fbf19ae056cb88338da1f5c0dd5ae6087aabd11b9039b8f1daa4be4de2fa94f0b9704391dab4d34fbbb720102e2c4c512d606673f1506a1d094f1c15fa3d2389aa101d1da1620dba0b330fbb2b88857a0602d7b218c5cde054047016b33d7dc2c2f5609e97502129172053430394475e1b2ae8a0c3e761cc4e227c92226c6c4c5bc0b8e941ef23be2a2b6c16105797031b47ea72c3e0aac47694394768874557643c148c16c6daea217e49e4177a6a95f2da52395611abef1b49a7f5165a53c40fd3600b814bdf3d18307d9ae246a436bae774ff38c232c158040c59fd8abfa78cfd498d942f515cde47aaf82cff858ab53cc031942d4d1c1a1edc0da2a603b64547003a00133d810035560d0de3e04d90707ac91e530a75c1c8a297053e6aa8f863e130c83b572c7473147ebaa7764a027fe05a659b8cdee0b4abf7ffa56ff85856e43e7de25de650ba0906ac3f02044012dca3712362c68ece80daa1d15cffe881e1230e5024b73759393d2c6c4d84acdd0e35b31a521f2bed28ba43e5300162078737cb8ed94446649022e578ee69d1638d6f461479c3f45e33d026d9ced8683d58760dc6318fddad98d5439c040334ef6b39f9213820b29a620a0407677c8b21c29879b917a3ed520dfe5a4e6ffbd64a44d598d19f9172674d91d325777ba945c922e0738b6d71604b6b5ca97080e820915b00ca24a9add4d9bc30b9a0a2158c41748f2b2ad7a916a50af033e65b00309333801bb3a466d5f01534998bc97b3a18ae5d5bfddeac2f20877da23de84f1f5ec35f794fdb7f4a60f28180f563e10cbfedae0ed0f82b004288dfaa657c73c2ad0f2d820a64b182f769abb3d0abf870b5856c9a307de58b9189f497ce121aa3f542a738c1b7080e7718c259b2423b622586a2466e22e82b446a25cf80ce216e4d94912b140c3b9fab21105843dbebe745e8c6e789b8415c1f1c992b70c840e4321ed1309864160e4ceb76d7fae6490f23803e9139507ed094c76971d95d1572e9fd1e30f15cdc9ab20965b6ec028dba51dd39e2017125c9ebeca9578a002c4f6253c0d8e8a8fcc1835bf9e5ad05926cd5997a66bcef676512bac1d7bc76454f9cca5a6a18b58c790a1063f05dbf84fdb4e64c74f0cb779c43817453ca44ae0808f2bb6000478cf26e3c90d7cd5274d28da8a5cbc3742342caebf17ecf99f26231873257b0f7b2453198393ae737a2e5883823b8a594b27a4fe14f77c1cb9733198f43f45e6b643080ed3282b4f626d440deb86af0d1618a1d0a7eec5c77ff08aff8a5d55b381f38fe8dc92600a082dbb60afad37fda428c442a2199a22cc7d9b4edd471c6645a5aae7630677cb6630afbdb8a0c4621a894fa23c716212f1f6c07fd5cc08e1c6bee8c583a7ca84cddc3ed51f8f17b0457ea7b5471493f6daac25da745fc376be4ede98b1a0987ad797538a98b4050ea70ae339615f33d091c201fdd902158b58c2dec84262b401f90651f71720330f9d8521c480b0ff71f78cc9620f112c2636acc0aba87116d3c998a590a2225eda17e0e9acdefd76056639ccbea851bfb82438f32fb61abe49e7f337b2fde12e1021fb8e59ced35e99f520cf81a875dd6e3f47219c9186f03028d69da9713980fe45bff662a17585bc182f39f71bbfa7b2271b46f26d70457fe037cb4760d63dd6771d4ec0ba3e9728fc3f2b04481fea9764fecc09094fb8fd0dd207fbafeb678725975a182b1c3a636256da72b7c82b8b4ea18afcbf02c08b6d97125e1304eb8e4681b2ef259653e86687fb4a55f346fc603df1e73e73bbf6f6d764e61f118b13effd63b12b2c7e61f15045388c951dba5cfac613f3aad76965e310264f2387e442e5cd4c8d45f5701ada4cad84e5b287d9cfa094a784176a8520dac356aa61e50a64f91a55793a0ff9e5c96714d607f27abd31046971352c4dd5b09ec6b5e612b7a5783b5b1d860a06d998ed179187a6f853015f9880c08e2de64a44a0de09fec4a51d0e53d342fc774612a1c53c48e6360b0397294e34cb931d73bf75e20c23576d27e18f2a6c9993300ed576041a5534fda43bddc4835a46b6346d764f92d2d0dee024bc48974da203512e8c092dd22306ccf1c7d272aaec36d9fbaf23882a9ebcc62a0c8c713111fcb7ca7a8eabed33fec00b530b2b755345bb3752ea0d549f6ce592f46c65914272ea030f98f5df33e53a9c9abe37e249b6dced99e158e12c088b642f027bf544af8d62243dc141c968055ad55133e5a0731f39ca5a74432a622465024d6a01b41c97edde7a15d74e8d59e5349e8405549f90346a06c65cda1074d4762686fc7e38955e7741d01e6a88e8f941269924fa9e3b5da11496b168688cca7d7489115779c0d9b8d1a07ef156de173576011100d3c8ebfc78942db7c7a7148c46a550fa99f6801a2dbaee677d4363d0711c3a1a038ba2c091107826b610c3272f8d6a6f7fa939375c80256212e4171577fb6b7005b4de88e62d90f905307f09e62f91fd4bb4df9548e0e997d92b9611163752ddb663f0ee18653ad55e37c4563f50c396725a9ca7f7d934112de9c4b8e5ed532805154585b6e6d27a1357c9fabb5d76479c6f03054e2a9556b97ed59f46f4b7cb11d2547d3ab18f25f37181daea52a59ab5b97bee9e0f14ffd6682ec97d4ca0b8bb555227266485576bd2be1890636d171bd78bf4b721627f7e696682556b23e1e6803edebb3b994292f62484a303d3e83ed1194a860c038d53216dc61d902f3ae4171907958495e495c476175d111658476aa7b7bcb949cdf7f48a2ec896b5d00d9a67b9df0af801a803462f6c027a7923a89e0d5d376a8af9599dbc6e3ac332cf6eccdbbe0869006d7388ae27d733b24d98ca18ab7b8b6e4bbe378956afbdf05e9f10b99214568ea06dedf7880528ef5ddda48c8f03a101f508200c18dcd9694365b4e0454458a2fa04295448e8bd8a45f4f1ebef5023a2a4863fb44e155e7a715e8ce45db78c993536454e6c6cca0242090d26060ed7344d348a590b8cae430aea3a1477d2fd454d1ea4140f33f5bea5994a133d1bcbbc477b072d9c664879a232e8bf9af9970ee89b2a50f892c07e3c308c4c102403921b6ebd944748528acf1cfcaa893cf280e48c5ea993483941b3b060bd497b43e40d6139eaa23f865585c8b3bfbbe3f1f754431029820c3a0969a3402d68a3cadc7ce68fe4a4291b452515d1c4d41a101b95a0761df5f11e7b7108e162452b367362134f11ffabcf7784876a8640b730ea1ab132b7057a0a34ffc514d82ccf6ec18ad110dd8625d7d15348770bedf7be70bfd2708cf557c6d54cc850ff08b5c0064f5d23c3b901cf4da625e1dc4a1c94b7deba34b56f7f4f956c6871367305c7bcfc9e4e413ec574f31d3bb8c693a18c85026e9c05156bc8b8d9152d9f63bdf896b2542227a5fe4f8f378df37aaa116f9f7c577e18c003c3b69e96f818291edc01ffe467e77dfc48ef155924a020f1f48be82d2ef9fddda0a3dd5ce3ff063a5054a0ced1f1fa94084431874f1c38f96ac432cb91de541b9af4a10dae1fb8819e86a47749e15c31ca56e9b5a41e6c776c1a3769f4706827541c6266718c5c940c79a8eaebf4c23955098a2e36a96a5df40914556d46352ed715c1e57a6550d5c61d865a8e13040dba5095a483c4991ff030d4d9b284d9e332547344aea26703a9fdcd9868e39563bfbf65ec73b043d55fd00b70b1861ba69fd7121a274679cef420d4e789044b18ca0c5fe7b9e440d8f91294e8ba76e95afd1ee1479d05bea9b019213ad637881780c2035e4e083203eb03d0cbb55bf49ac2865dc16a42cd70d4246dfd20b4412bceed9f8ec24f2e09a5ef93927cbae7f354fc1c7a577375e0d0658fceab7e536f77013f7f1d413eac2ca12c2c646ec1f635a440af9c612194926c31c0e65f276ea070925f27fc30a7ffd1839df851fa05175035a5243b04427af4d4ed8cc39d191f849db06c258b488b385fe7d573a7ffe53382748abed228666680e7e38522be70111ff434da5c42da32ed659a32a52842ec72cca2c4225d4312da221cb7f4a9d0cb0e29ed6740ed74fb439d565864f729939f1530060b09eabb93502d3e914265a9c8f309c1799434933e67331ba385711c9944d8243d2697072c5d57b59cb9815e31e0db47847745663ce38199b8aa750e10195efb284d48d84f898d85cb5c0722ec90c17df708aabfe8486197e1e5f68ba4bef658214ea167e547c4f071d7ae65b656122d65af178e51e4591b274edab703b7c3403a2271b2c2075630e6b54823e9faa495f2d3e463ad4ab2c3e24414239906f2847d390875898844260f19d4dacd6a39edb2d384d90b9cd696c0eedb51e9a0bfa2b864299c8d0a5c1c7f42b69a4d277e30bb0efb465069e4ff56cb6c5a0a53691a9948d509e768d9332516de50ebf10f48206aa009b656be0ba6114764465d4214801ddc2aa2b9f464b234eecfd285b25f2b7995cb97e64895a3e32779fe08077039f7aeebf576ebca2f004c0313fd78dea752d0f0df5a40abc286b22d00f73496be431ee1a5c4029b1a9529b26b1b360a1012669c40695354739860b40a2872eecc3ee8db7f4a75260933465fea03b394299333170f13de53b2a3dcd03f598bc9b36ecb4cc4ae3a868eb62f6d5f197bebbb57ed5fda5fb9bbd3dd4fc8dd015a5d7b6bd9d68add4bfb57d99e72aa2c593ebdf8d1a20002008727c8918d8d4093c4f181a610a845e307099a19b47c32605b2b996f386df6733abe53a9291da54d9fbf9fdad1d488144416bf5f4cc0b5025ac8e2edfb45d83f815e9f63832800835a9dcc86235348103361cc735b81725cbabeae53a841b41ccf2ed4e5422d924b022f841b5b4d6418f4f7c71e7af5805e5fa27192d8d8d0f976702c4b40b4c326ea628c2f08d3cbc333f47002c15ccc0b180bc3a51dd4a38d146e592de60c9fc6a409494cfa4eb845e1845f4ff1cb4eede09a007fb6b06c60b45577235e41f6d852ea7c3e70ce8e26cc6bfbb533eb78de72cb92955a41bb40e04464580a6be9b1ce4d1d11f2c6f9bb55618fab1227363dbd96e3d6c49890ebbbbec195f9fb1ec6e87ce9b0464333e4d7c40d2dd43c5f7bdf78d63568ebf83ebe6375680f38758227e68487cab843182a74e85f2923dbb74bccbd157058bc10ed58360216a1ede3aedcfb60fd26b8cced3da5cfa69f674b8d8961dc054a70c05a7a05ed01b3421a0593c96b651a12a819040011b181b7450fb86fa5c71846863e436fdacc81623b832f45500fedbdc07e67dca0aeb500c0c875c5809fa7babfeb9262b5fd86795186c3dcbc10029a892d515e405655710ecaa58f381e5edfc99adc3a8b13f0cc6eb0d84b56122bc735a8a65582d7b59bccb56e0bcd7a9132aba57a2641602b19e8b8eb80db47cc5c15cbb2a906e66c7184aedf3cffcdababb5575afeaa45b57decb57bdddda494294919c504030512053f40d9cc6616bb57a7d315e9727e96b35dff2ce764f396cd4ec997ffff0fab2c84601c89b35cfbfc00fd2c71201dbb2ea99aa55ad16b2e781e0ffb1beee63ff63dbec72522e5debdb5d69adaaf3f4ee3f0e3c69718191f57fb4517a0d0f901b72e5f6a7b2d2da3c3a4c0db17c486cf01b696e4c367b94ea51b4f3c5a48454838f8ac26dd80d40349876403120fa41d483927520d54ac4d6d120d3eabeb0c3eab49e6677525c980a4c36735290624229fd51506a9950a0b59e9d16834f256dcb7fd15d9fe9ad8fedafedafedafedafeda4293c96432b1b0fd35b1fdb5fdb5fdb5fdb5fdb5892008822ceedbfe76b6bfb6bfb6bfb6bfb6142850a040a15aadb40e4331c54d91e2dda6186d7f6d7f6d7f6d7f6d2d61188a2952b4b4a850b182c565c1e2ddb218efdbfedafedafeda548cb6bfb6bfb6152b588c238bd51a1dc0b670715db878b72eea78dff6d7c662b48dac7f22feffffffffffffffffffffffffffffffffffffff511445511445511445511445511445d17f144551f41f4551f41f45d17f144587886ab5aeed7691c4b1ad6881884449707777a7edaabddc558772d5f8be8b92f0595dddddeb7653144202bf3b09e7dd3dbfb66bfb17e7c739c475b7ebde6d97b9ce4464e28d4e48271f4ac974baa7d3bb3d9d3c3b4778ce1eda9906012d094ab10efc4a4daf9bed3a45876b959b157a6cd7206ce5aeacbcdb95974e4e111d73e734fff63efff3b707faf45158ac878f184c56c244f6f26cd77e77fc1ed9aefff23ccd47bfa4219fc8b994139548241299bcbcf4999baee3a7f3a4270fad86bef972daebf57abdbc86ded2a0ce344dd3345fafd7ebf55aa1a0a0a0a838a541fef21b879d3cb41a7a9ee7799e30180c06833d6a7a699aa669ce4cd7399d87fbf33ccf130683c160a6699aa6a9877c2a4ca7e5b1345b4361e7799e270c0683c1608fa6288aa241fef21b87792c749ee7799e30180c06833dfae8a38f3e1aa2522850a854ab950ec3771b96605cacb433f3c6799ee779c2ec0983c16030cb43b335f4769ee7799e30180c76617f61ab950e43514c91a245c555a1e2ddaa40296f35bd699aa6e7799e67cc96766675cef33ccf7ba61053b4b4a850b162058bf18ee3bb1d3f172ee63bbf2c6f7a631cb506c194bb7d295883f40bd368218c6f1aba69be29bee9bde9dfb4a565b65daf90ad20b25db398b1f06c18c31d4707b0362471ce76cd325bb7d57ab72dee0485806e5b6d454cda2974f0396d97cf1a3de6d4354cfb381a2e451aa6d14e38eefb4070aca35d8df8568c9ce6ae945d75f79f5f0f0852df9e1688a74104e7123b6a6c6628b144f4815a095409daf676dd5122fd682cb8f7de7befbda42a7c56df7b00168c31fe7bafaef7debff75694ef2b920fd82fde1eeccaef553df7dfa336c84d21b55d8cb56f848949825add257efc70251cf9c103aad65a3fc7791c8fc3df8c433e32596badd5fef938985088e09c4b2213144fe4ddf21b79231955d71c4e461f9767bbce4e726cd7180c4c397ca398873f8f848352329d52508860bbc20f1512cee3781c9e84f3f7de0b969ffd8f65c551cf92c2f1261cdedc55a34632aaae2fde44b5a5be0668241fd9dc55ff3f0ff9e6abc3bd45942e34c06031e52cd79f0f1fe9826cd142a778203bb0ad07446ead576b4dbc7bce3de4367ced9e330e5dee3b37b1a2179de89be35a2e07d168efe56ceebad586277759bc5182701573a11fa2b39c873e5f5cebadd5ef977bd4ef36b745d8fb30b4f3c65bc479dda89a6c9420265b24f2b2787296eb244edf792ffe63933d84a8b6e82083ec85569c2a94b92e74435774213131b2ec8c3b136badbd71ee42897cb072d5f8c2b25f74213531b2ec5fddd4e78d1f444d02aed58fe3aa60830d52c017dbd2c74ed6c9ba59575e51eafef7e2bf18e7323bd76551373a199da0944ca71415a7f1f0a05ef39ab57eba8eeb681d829ee3a69b620a1522b3594bca0f76d949452753d1cdb66b152abab2e3e98e743c5e6c05d7e15b5776a1c71c063355368947059fd5a10caf87947e2964783d2d2c4a5d19943e4b48aa4acbca021012243e3e3f3f28fc06456a34c45ffe4a31a2fcdc6ea9144a05ebf341d1ae448952140b8bd56ad1c245cb22a9d558c616f8e675a5fff8cd1ee1e1c18f8bf8cd6f7ef39bdfb08edf6c0fcbcac8d248650b9a477bf16224c3ebf9ff7fecd5680020952e3c1ad7c321e17cec8e8f7ee21bc8e9dcf0ed05876f5d6867b6e474dc85b9220e434282c4c7e7e70708c422a9cca1b3c2688835cd1700c0b71714b677a54713a5305c3acbe3e6a607ccfaf031e66275044000780230dc64a8000e1f3320128037002f95e21bbed170e1466a34a4060a25008f947e2ff886af47fbe964b52b6b664825894692398c08a82b3d5a577ab4aef4685de9d1ba12254a51fee3e9cda3d1bc1e87e9645d39a32b6d442f322e8e816fe68a703bdc117cc3377cc3374ea793cd7cc4cf3292f1322ca398981d632423c7c02e936b126a10df9e058483f09216561b8b385d195d0e1373fa6008e071319dac2bf1ede6f578b4241e4d4400fb32801021e0c51a871fc7a9a008213f84d800e5870f3e32d90f1f92c87e608caf8f0c63d12b4fb21f1eb8b21c57f6c3881099101e24d97729994c9692c964b0ff4b034e0867837c644b66b3d96c360bc992c87a204b220b2204889024b224b224b224d65a214fb0b6066b6bf871421392b831018925f828c15a2b3b2209a8113624203942ea4e01e5b3f86620c0ef3bc18e1d45a811610845e0e08043acb5d671d698e3586084c738671c20f761e7df2228f7ffffef2d2fc65e8497f7bb0c04ff104409e4feffffbdf631cef98bf8f2be57deff2a101aacf5ffffbff73afeb2340111c27c9c33ce7d7b67387f0369f65d5796652ae4924aa5522d2db596f2c3f9eebbdc2ec779115e86f292d96c369bcd42b3204a2058cfb31a443904670a384194419441945608ffff3844e01cfa1f4188fb1f01471a8244fbac0e02107e70c40742f480e7f4ff252908120f8cec00f1c30e4e14a91b646ffd21d623009af4f8e85fdbfeb4201ffd97eca0edaf137a1b6c72e5693ac4b9b9c9d08b16e4e3b39a0cbd4c767423f6cf7973e35089b35d8f13763eee689709285db5ee9ebffce52f2b0b17a82ccbb2ecb2dbce7c427652ea7ddcf51a890b331167cd62b3edfa63eef2e1305927e7dc724e8cbbf9e83a76be10a2da427a2629fd3ee775def4d1f30b87bc1e9b4ce5b39a5d0fee68b8837567d7c3613c5947cb6747cb6747cb6747cb6747cb67f64aff7c9a1d2c9f1e6d069fd77aefbde0674747f89663dffe71180ce42cd77809900ebc84c7f69a7b641ed947eef1319c96ef5f821ba7a6b25c229ba91429fdbc3d863ce86ad7bec461f28f3fbe498e3ffef8e6bf9871b6e617e952bf23c35eee18f591e482ef5241820449050902eeb018732bdbbf0a637900513f8efbae92d22f45a6defe8dce39ce72257cde0d12e4aed91287e99020f1f1f909ae5aa285a59eb05443452292a92499ca52faacac39ae947eda43530c76032bc54cb0522cafe895b0943a8adb6fc5dc5a4fa52c3b4fa4f64b33d3cdc3ac74d626d8c3689db529e62e169d752975d78ac3acce67ada05f4524b21689e7f8d8d2a9ffa5ec8e64b47c609c7328c4715d27841d1930784c4e91c8f346a3169196d99ad51759b74a1f3218393074aa595f64184464c0307dcc9a343ac0dd30669ff7d5e043d1df4846cb6c016bd1c3c78c3292d122e2630e855a331f73cb6c95a55228841261f4884c1e2a84710406ede4a14218b41e18477ccc30787ccca7534a8a4acb878f59ab8c64b44a1f732b565f64bd0a5134561aaf740e1a1da268687db5ce4173451c57ab441b1a2a8d451c58abc41922df3f0a101f52c0e4dc2afa8e5a78bd8269f6acfcacac7d24c305531982af14b6cab0357b6bbfa29c901bd8a197d45d39584250ebb0d2acc24ab312bea43e66bd3ab3fe4e56e7ce76c5d46357566c0b13043c40a0c3ae846c8c9b183cdc6571f0923ecc4bed5959731cf9ed979a8f19dfe0ab42f038b2c997dacefa25751719e30675179969286eee225f5ab11879c5a45bb1ca4248671d6c725573d921f13f4cd12be1b8c1b0ee1542f2758a5609c90b9cfbc509784a8fbbf28a9488cececfce9e910f1cd9599b68eeba3b6b53cd612e4d2be62e72555bd5dc856a41b545667db1b36ef970d70f29dde5c966eeea88b44c778974d6ad1c77d91e663e25bc710a4d610cb614c670c9566c67fd8531dc4d0a4f5ed29df5a894922558c92c9d3b974e24fe5359304bb487e168b22ef1f894744ab5da2633ad13c95229e2d964dea437db9904b99034959956ba658d923e4ca541097a956ec0d9678d5273573577d63977d6f5d19d517e76ae290d5f6d55da4555582b4b4459c0ee7c535f6411ec552d850a1f898376d628744adff03193abdace1a07e5d7c3dc5416ea76e15959e3b0c6b372c8352a1432950eb3629ae5b8abeeac4da7bbf0ceda74e4459a7afca4c1263dd4091e6c12dca4a9dcd989d8264d3b3b9b787636d17636d576d621eb9996d6dc455634a3ee222b2df7b8eb717616b2492ff7b0c9ecb3f38dcf28422eaccf729d758d87c131602a2e1305516d11990c3959e1759df7ed9356cba7e30be9ac5f93a31e39ced5bd70b13ed1334e0a75c4c044a44721e723a7b97a398f316e6ee4b4a80b533e725ac469944864a24787e95217191b77e1cd691a1cf7b2392e06f50728d876d5e1d0cd699149a739cd7d607309e09176a173baa62f57c2a3e0976079686961620636092672d824c8040e600e4c9c609320c884089450e2c44d0914941c61c3e98621dc408910ae83efdb7547c90d7ea0a405402841c1f3a0040446042da183a09f4d824508320204c1a01e5083291df443900c360906a1a0862012f80cbe13134d3c6193a0107493d5872584d82478c4123b6c5b852566fbacb19901ea518403481315e143870f460cc1061184e0c7043b644b18819022e8e00499771a21091f920041021f3841c30e2b90500412487c8270431325a801450b9620c11235bc7bf7ffff6badb57e40c4b96b055c41e5a69c4c25948f7432f244261d17caf8be5bce5d75056439a9cff251ecda5b6badd56badd55e7c5f22ce5d2c2f22ad807b85acb2537628c495752f374200b7f3ffffffff7f28ac34a653feff176f7d7135ce22c9817ddf1dfbdb9eb39be531178243be10c78d8654ae331179271e7742fa503c93c799bc1b72a18cef6fd18a9742b174dc0d65ad2393c9643299b556f640ac7ff9a5ffbf976009a2941c96fdbd2f2e2e323131177f1dc71b3235323532f5c6d4ccb8d8b8c8c8c81c212333da941cac5b5d1fe0d6fbd4497735f85c93a7cfaea05e1df330ae6dcd2df85f5f3166a66a32a66a19578d99899989f9fa3a66a6d69899fa326a1756bb98917151e3a2c6c4c4c4c408c139135d87988833de8538059f87b4e319346cf4d4b041c3468f0d1004411b346af4d4a06183860d176a646c32605f3aec8de58143869e13d096b12fa01b404032f480600ff811d17382652a94e0736d6d3897904c0dbe585fae6686bd363533eed7ccc0e28c8b5018b35333c3c605d7313133acbef6e29a459402f0ae4674aca0ee64a0ef085b4929c7715c96a991b9ddbd575f2deaeebd1d1792027e81bf5c646abc66866bb2eeb8b6f1f15d24adbeb9668604ee0c775dee5eeec319363bae6dae7bcd0c9fb1e28b0b4f2bb023430670b88b64293ff2eeaf4967d8a42ed89a9dba438314f615ed2b0b899999b1a9b199b1b1b199396b666cf09db191816102a7bb9f3368cdfdabefccf94674dcb06f3dd3d00e3e3e3e0e30ed6a64070d4f885688f85cbb482c7e2212cedbfb811ff89d408c3f10e78c43127c3afdacfe6b028b8e2f3256a6c66a52c65a91742de3a44c8dbbdc5dd7ccc8583d53e3d6cad4d818172d2e367cae49972de3b27117f936fefe1ba181887d65f6cba029b8b9868f302f217d4337a4ef02425a0257c7c858fd2fdaca70dc6bfbfabee5441be3cbcb23b15f43c016c0f5f7ec7fa9e1aeabc9bf30fcfffdffd3be8f957c1603211d038cd732801b555f28e09f7862a522006f8cf5c52bf5c5ebb7db1fac2f5e23007f35b4bc13518413edff1f8b39b678e2f1d16eb76f8a42bafddc8e6948a5da876fca1bac0e690f431412dfa8b6c8470dc034f8e6594ed2d9de834e65019386d84eea41e5483bd87eefc521f144c3b31390af2e61879a0e465830848d46a33759b587dfb36ca875585df5e7f766d308b74bda248c3c91c4a73b4d1cf972870a4ebc769488ed50e19684890090e0088142133b4a72d4a0040f1234e420840e2031aa0a6bedb3a04f9c261d7ceda33777913888084ef681c126732bf630dc0d184c32d36ce7d24722e65e1159e5c8f6aaf431af661b08457472ea68556bc566be1573976bd72ad3c7dc92b9cb5d2452b76b5335f3317fc5a5aae22a2a60007a5a3c2ae0aab642ff051607a5f44ba96832df45d2a4b3b396f1342e8743140dac555a25e2f85074f8bc059569a56cc52cf8da194481620bbec156cc6160b4ca1691964e6bc75dd588077cb48eb4622db355a475de56ac45db59832b945c29775e217197dd810b36e96d72f5a39ac5807a21a54ae94a8343d40b286ecfcadf8e4fa5b1ebab43908366a55521cab54a8720070dd67aaf6aa1ce816200d65abbd6228e18b84863a54311c7d52b11e5bad2ac340e95c622ca57a17e56ce27267c642ba60a2b8d0e556d8b48a8162083dd2b1d1fb32655e6ceaa223b6700a430cead35d99aedac35d6aaf3616658a974f617510c58e9ab5f58e92bce308316ebb356b59df50b77062cba8895c6455245db79853e2bb3d4e02357b5ac57e8c3a06888a48a67677db15689aaf35959a72f4135bbaa390c0a7385feac82b40e77d51fa69092a594c7cedeceab253bebd7cea4cb2657350ddb59dbf7525a0a7217f9522b019da5f4e5a5469a4a55bad22b110716512fac6650e995ae342a55d0c3a068601c5744bd70b5d65a9c81068beca8c60b2a51b51255e8b3f26351cd2b379969fa86bb524e3a6b18dc558ae540d150a9c4124ce7a0094b3acfca382c99a59eff12cdc794b084a484967cdc55b78f8f26a1eb94d0d63c957ca8440b1dad51c886060010000400e317002020100a85c128cb61184653553e140010597e3c545e3e18c722b1502406621406511cc3408821c610638c418a21650e2900729c9fbca5a718649eb11db1cbd19688a9447dfb4b5f80bd3a74159317edba42edd6bb97f805283374c8fbe3a64e6cdf4833e9d1082395459ab58340bc7854b4503aa8d7877abad807899f05e9a3e55c4e6a0f84b2b1838494176784ed67bebc00ab415bfd34018c13354d22e888ac3544d0421c63e65c994ea0a07aa53622378717c12dcbcf0a1d6c21cbd5a1a148c4c359060a61da5165e826410c5613fbdeccb4ffde95a167ed34a223411e31b7d662af08afb9cab08e04ed2a93a1849becb6e9ea31777e0a28c33dd8b5521aee2929e73992dd5d4a89a132ef980b0fe3a38656b0773df475c5059a5855605ca3ecb88936979b3c0e6fe48ca73a81710b3b20b2d5d387b64f9ec5e3e525f116b5e092f8233b684d9ab61e4a24743ed6dcf8bcfdfb6353d2482ff7ab1ba62fb697d08229537ecbf712ed7dd1c4f658fdb259f614477dd9a9e06536a83c6653fe786191462a19e3834bb96c5c158b472b80ef5ef0b3aa70a05cf6a5172b39bc0e2477eee294adaf2e38615cdbc106e26aa4d9737c558f5295d311fb1c9a40dfece30ed0fe5a9b97d8e422368f8b778b0ded31152fe7cbf350192eb79e2a4794fedbbdda1918e35a7d1bb0f95ac2529a6eb9a06388be383144be62d83626651d0110e0a03a4485bc5d91bec2f17a83abcaad89195a777231c1617572a5f3bd21f56f73c57cdbee8f3ae49ef3662849bb310bbe3c4f2f73aea43a073065fd2b608fb53ba6ed0455fbb094d380335bc708fc2a5ca07c600333c87f73158f94a681fae4d1ca884ee9e2f63aa3ccc0ca944665d143f8df34d3fa6b18f559b1794a175ddf001ead64c5b0ab6e560f507289bdd666227afc94620d77b83a2b8ffa851403b5b7be9a10f664cc1cbd23bf57e620d4666946f8a8b66b47416e9181630563d6dc10a9595046e4c392ce355a7da7c52fd8429634fedaa7ab602830ba23c0266779c8291d99581ca5f68654f864d5309f9eb0b648476887eee943fec0f299189dbdaecdf398c8bb7365a801497290d6fcd8ec8c6adec6d8967c543d29cf631683f3f2ba61b1cce6bd690a8f1670a31f8a48f4c953d4611ff37910dd04ed8afd8a3156488acbc0da79cd2e5770f07335e25e856f12c727b3dae11d842e7b6e35428cbc08f400aa26f3b2569f408fe6da869c2c97f60545de86fd08856ada46098a69d63db5401e2b40b9e63032fce0c23d76bc84ad9a51e280c5eb6d028f51e5f8132984c15c6875fe6fc965e7ebc6831a2b6bf950973ff5bfc5e8fb2860cec937afbae180c2004e28c2b4ad1b71cb2d83a28a88293cbb3fdb76ef5acaa2bb634f92ab2c86ebc90a427842b443599d2d52502c9ebf9d90609303102dfbc56eb71a3fe96b8ac5e02cb9390cfe5537e187a30b0462e6270a81796daec5d8308110c99f454918860367cf7cdd49b3520617a2f33f48ea3ff2bf1c78f759d7b1fbdf74fcb552474806477ac8ed20ea55988c7320baa7e43e037fc6d0cd660d285e79f2aa2c5c796e82065e5787e3ca0620994b8eb6c53d10277edbd4ac819f94428a2b29bca1bc114eee3bb512304e24b48fa0117f649cb8b2120d2bccd8b42d0d576a603832bff449e2dede95dd05406790153c9df6b7520c35a1a5ba3d935342b9e8a4544a2a8caaf4184bf9de3402b3694fcecaad2cf65a659024d896e93762f159553783d0c70114a87142f0b0d86f8fa053b13ff5a8177ba253f2458e1fa85259ad7854a811ec97c5df737854bb3e15608a3a476e623982c6b9894d3ece9ffb2ac812d479ac85375f4d04a920d34105e14b7c1077083225e38138def4d6ddc08c52ee5b16212317ccb34abef4d26203567e2884dfacab51abff9066b004e556d1bfd3490d29ba2f0c6b1dc8e1c5ff120e00e91040c2dc9ff8a3c580812d1649f0edbee447805e972a3939318b6231046808ac118d04ff44e5a59589882c03023fa4158d017d20605a60e506968da5c3e158b1fe3d0d8f665d14a2fb07b5bd8ff39fc60f5f791c0c648d18b74927308b8197c9f8abb00eb71b5af17c8019e4d812ab383d309d652aa1d9253e2983eec3749b7b996a726bee55c8385468daee64afdd9b78bfa9b234bb0bdcc8e35a91eb6e6e5232b672210f6b53d58317260bd50da22bc9c3d07c40320af96ae30dd239ffa794e2082dfb6345a3b0fbbd79f25fce4c5e22423865523a5e510c3130b96a0a81ffb8aca928b1e615e292d2038212b5135c595f9e84642e1646a358bbad9ed4382ae6dce6f8ac27cb3b725eb3ec6073c498b5c70808e1cc522145bf4331ca722a19a8ef9812334bd1b2214385791ab80f5fe85c420232b5d9d3c33ef49f405c3a4781fe6fe82d8030b9c6de5cc171b273274d5e9c2f2de71374537f13b78efe6f44b371569c63d0531b3cb88f050cad4250934150666d6702c17c71cd5fb4558881d80c43c486e007580bf6df5c03b358d36bd0c6ea92b0b1a2a68e397c7694821f29f0968306466d6248364febc56fc3bd5b41d3b0760db252aeda1e336c18aa58207d90314e68d7e690306c520f88bf0cb4a800364a605932fc08bd277281b5e6daa1cf480db499dee86f6615638f75693a917ff896616cb50c5cbaaab995c1c2aa91945f280f2b46d97f4f8074e3b53600e896f8547fe639ce34489ada2399acda8ba7ee27e6a83c212b745fdb2407df6d4660756a8b484fb22e310cc98d30966d024ece87b1b05dfa96ba7e2aaf4b01995b0e9c98ce06a6705e23d90f94f9a0037f914e88c4768347ece22450a321efdc69e0be4a9ed47878e0c43f9b1395a284047ac9a00215c7c1a0bd7bc3ea0bedcd7f97063554f3044cc5610e4fc902d7bcd1303ff7dbebbf297246fcc7474b95454e8a59b75e238b7903e90c0ce4662bc70d0f328e5ec4e4517ddb51ae68b1c9807177852a490247303efd18fc5898a7203dc16b0bf072a5c1cd21e9601b4a404265a775eda74040d1c6f3592649a25cd5081625226db8de48359f5b3f287d4cbfecbde35bf802afa1e2713d81231baf66d950ee6b755dee074971d6dacb5a98a269804baa152798349e598e2b1fb0a8c3d8f7e8f5394b0616ecd186e7d3c94250d92b10867eee3eff633a27aa16f985d4b1c8e8123092a783524cc0c3511d255622c2f5af4b314d1a4a9969a575ccb36390a2e138218513f4cc603eae804b7a6992b0d2550b655e123ddd9857052f6f9f2b0029010988f234cf2ed1a467d1ac8899a49ef9b3bdd92bbea31b718aa7ed68b59e731a43cfd56b406548fa6d5d974c3a5325e49250555fc1a0ca680afdf2c2e325f739790d4e1661b8a22cd68c6897c9c99a21664136c05fec4c6116f8b1b42998ec4fb48b08ce108e41b2fa95a9c93e292e9040c1b30f796749c9baf91e52902bd201bbf764b62efc505a732b19f14092031c394551f816bd2bd841ef2d2e6231d3f648aee91a4f3af549bd4a481756747992ccb42be4be3a05442e418232b99fdde67c792bf65c4cfcc59e434f9c9b901f060b20edf25ef1324c7fe0a4834ad45d7b61f7f193d5cd28b3e53f7438fc4e1aa500d0a7fa9eedc1074b0d703059af6531a5bf0c4d90933db7f7336131c2f6147d302c65682046fc5f6cce7e9891695a93caba2515bef72a8dea4142960b6925da7cb70c1ee889af325b76198715ac8987b95165ac530c25975af615a404fb538e069f599716545f1dafc5721ac8471349025d1234f2a4bb3b3ca3fd48d0271fc04553804552259304bf34125e2345fd9732c8be39c673e35b7bfb4af94d290f1129f57ec332b4369d0080ae2ca9bd4eee99c144c48fa795ed5ec98462724e264d0361b51a0e1e262a5851fd1c11238715029440de336fb788fbd27638e2bda3f0b626bea1a2ab2a1c912ba7b8e21031f48ac319739707f879d5b0546c9f60ece8be12bf8295ef714cfe44da635cb4efeba2531fab003a4010adff3ab984d86efcb83e08eaac0a02ee77419c296b262731a8ae09f9bb5a018e3a84ae292693c027ab41aede46e8cbc5c1c3c5b74890d775cd89e8ec7fee8f6c426f228889e4aa546fed0d2c506555d07e47bb47d89425e92f94877adc0154980539436206873f68ed2751a6399d7ad7797f9011e98135cd89446bdbe392214d2762decc96bb7afaa9a256150ecdf5b43cc2b286a3c8b5db856e3c8440d8161c4f1fe585591e97c60c66f67a0ebf01df4d0060af433039a64fd8f60de463030d21096fb18f870b9376420e27ebfe275870426eea555ff3983a4b6c71d82bd8542a151156987114cabc32467e2abb9611e56aea12aa64e0d57cb02783f63fdd29f172296643df6665ca3c221abc6c3fe9f3f81e0d0d06f84c0d66ad9860d8c0da75633c98a3982d4305a8d7f69ee3815425660761b83cd06f80c6ef0076a1ca340d00429b446820c8764b5f6eef9d407f8dbc06ed1e9640a365466780fca5aaeb4b7380078c049ae98afb26a0218a4a7f6ef0cec2e2661aef875e2081df98e076ccb11bc882913b1d38e845dae677878dbbe1d8bf092befc02c07dc1e13063672341b180dbf2fd55f97635ae0400564e20f55b4dbaddf0534ac980f2bd2a5dcf30c0cf97de0ebd34c0ce97827b7b1818c09f4936d27df8d117c510128d45c900e8f751b0d3e5477fcfa814acc3cb79d674b0b283e8390f2476eea4b93614fabc8bc6bb9bff68a8f0be984035db1c36ce30ee1d0150694ba8d048257677a8d1204e78f7fe5879b270951635a97a2e8ce35404231daceb963f71fe10a2835e2ffb4f353ff34725cee9d9bec4b058faf33f89ece163ef7a53f773b52a6eb71679a86606c71e1bb5faf3480b6ffed0141573722571029e9b3d72e3f0eb01734f56ff8daa52a3ba27b922ac40efa7cff942ba9986b58210aba796153a031fa2eae5e152e1087836e5795d72da3d1b52018d7e9f8f5fe009d5e7703804fe663469a88e8526a362e7d05e8b69b9bf322ead56319eeb63707ea2f75c8e08bf0defef47d64c322821f2aeb5711d07e99a2fcc6d200d512ddcba8393cc5295dbf0aad043fcc393583846a7ef27eb5472a0194ec0fc0b95ee29e04f2d8eaccc232b914d45a7152877aaa2f8c586d67a948ae47f2600a701078d77633e1df21a4c71f705475be8b6019ad72da19d2768b3cda72e8caa0bb107add78c50d898b82d2c6d8d193f9e4f9c3d7fabea0cd7eef5960afbea5082e7b0154df27babd5275c0ec9b58e0d5958c5512ad089847005f36de4610da9081380c169f03db08c8894cc22868e9b6e87a26cba160e7a3053c4f642eaa19dcb240f353d26aca3c13c45fd609818fa9fdd7a000d6882100ab3b7d9f0e3bdfa591a8aace5567db9e3f68c7f69ee1e564776e345d0526afa937a0ced5c890c41abe98ac1bf551d209469f8ebfd953330f49085497984dd63afe20f3b1ea3816718d1b167c54d1023d450d33df9958d796c57aef9b5874937620ab02538367798abf042c87ba4a8b2a365e2596dceb4ebd430659a672481c6d89f211ae37b70cd7fec12d8765660a89e1dac4d91a4c7e2868fa608de3fa3c695859481451be755d9a428f109fcaab14d63d4d992a3ef563e57bb79820f804e827c1bebe1c06d293523baa6783c1071a1cc89ab31c493e969d4970832ad17f079b29a23bd35fb48ce01325da58967991eae5d7cc7e23c25fb3a7977fce4c2152ba71692d04f46e24ccf298e6cc1dba33b0252274eb315e5b59fd6c0a82285ff62d1ab4bd37860130960535d762138aeecc7b4b8144778690af375e9369a137fe97241cb66d58861811bedf38a621e8cea6c94e36747467372cffdcafc928508e8d2c1813493b6e0a124d44eb0ab78575cba9dc9860792d9c5fcad8ec83d1978c45091c0c0062ef7823d82379082f5902b3d3e81b263a101bda9daedc1b2b6c2f6f8a19fe9555f6860723a7e676c166c01017820c5e8db1941eb231d7acb216a204d0cc99785e989b0638f2ae26a8495d71c2fa9c1ba5bd0f281d8da0c9aaa488efdcce8e82ef92186dc0d8e49d2de76f36dbbd8e11b3a62bab9a345d59d454d39555cd9aa6ac6ad67465a5264d53167526e48c58c6223748e2d0d04f917d48bdf4a02a4184ee7779e533f33c9404d04321fa0dd5d6811099f173cbe5e80c587f220ef486011c30d60d6bde85f0c24d8d27b6566d6752b72c86cdd0cfecb8765a5057281b54df15ce623734acccbd8cf850781962c134651d52c55f9d4e68babb5ca1561d1af41a0710c3c3ea826a26d683a7776cbe25df7399efa09812d0cd9f56514346e7c747e880f6e24a6125106165c52d636c849256df49011d265d41755e9abd29896602b643d823723eda3334a8cc43594d67c16592a656c9414b333de75a48ddcdea68eeaeea6886cce4166097b1f25bdcfb464cee4a3a3b87a9acd872e8bd78de09b633949a62224721da63eeb171dc75ee4c6e96576aaa26bf16c5d5e40686419a881d3c277c03e504406e94ef4db9553f241f9130649807ae351503a824358f6e582999c8a424f35737de4c75dc54f7747ed633498f1cfb8aa972dfb73b8f99d9df78af295fe1555bf4fc5cd736ef984319bc0beddd26a4e5eb82fdedda9c92d79c11287e9c631f6270f4655c74285d791c130ac2fc86e097c8c078cec8c9df24a32ba2e6becb2ff9237b241b95562dd4690c82d81986c1dabd4cd2bc2e6b4fad3a8310a92effed4a76034171e4700f2a22a669d3c8d4db15da0be009d8b0e1d4137af959cd3bd602056f82ae929038bb308200a5ce7ce3362e14d5c78bb4e3526db13a017b430b4c4c4e60934533162c6fbba684b1a3fae7d803c76c8b428e3b41945e1be9a7fa70825c57fdb9c6d1c741c5c36f108d08df8a0cca2cb546a250608e2c80f511f6391500d41f5e61e266fea2ad1a367d46fa3109bc740473a6277d341156b38eafd7c91e6b44cf8f7d953f96fe88a7570055cafe60389606315f7042f1d34b8e30f33ce41519dcd8ec130c0d282e9f44f37d570e3e3cd39f740b5d73de47b344b81064742559cb744f48231993cfc51e8882dfa82ae09f34e6dd5826a622a1b0980edb06d20c288a8262aafb937b586e9b5fa0105df1f65b3cc69b12168d4b9f18612fb67aa11fdbc268e1eea9b4fdafeaf337fc110fda5ae5115037e43027eb12f637aeaf4eb7a89ecc3e4d0430de7d9297c74b5212f6f2b0cc971a7472d7f2ea3b6eb122c64aa4b991e1e19547fa1e81a3ac2cddbfd4200b3d1fd11903a8b3b21ff8018e974f4ee9bc5e5a50437c493c0e8f9a371d981f30ae8e6b407933571e54747084c877dba5c468ff249d4daba5795c1c40d4964d02d4554dda3e81d4e31257b709402eeb099460ae3af68eaec36789111b8b0b45c61d7ee8666e31818ac90a20b1d0a5265cf472177406cc61ea7db7fd17091ccd84a09df2673b21a5ba641f79dd1e5284f74b197476e77601e738db9429174a28a1392b239611454895b2c4a5f038965000a7f7197cd3163f8af6ab347a95be92ab90b403646c5fc9278f58ff1deb9f09bc19350462b27632827cc4f0437bab591fd3b6398785d1ca18afc2480e2d417ab3d4eca91205ef1feb9301e29096ab80da1bc2defeb14513cddcf7280ac723b69784f56434b8954c33dd68019b9f98e37f96d6ad2a4a296caa9fcdcd541b55522d8d7f9f5e69c5f0c648edb2e90bc370731f63174a803ffde95a8b1e89c1b151c98b07ab57ddcb7b2e0bc05620be1b7f0cc2101682a66c0076c16db0d70b74ed4069548c6e467c2db87b55e355aee6d9945326a64e0b081918ec6792796941d944c07f1a130c18af817c6befae4f6f6c0a3c27477dc2a9cd0ab29ca78de3373c2baca9f173125c0e7bf0e8fd44e5d82fab8c0ae44e0b7b5ef42897c966c99be0089f389d2cc661d78255159e2b42a229c81a640530eb92858d109b191020c03774ecc6ccafc0bcfb8d6fc3e6a80e255f224f8d2c089f0207d6361add5829ecbf58fef70515e94411706f1378bcfd0175b9354d16009b7c1f3506c7b1028ec968de6b9ce0e307fa515c58d7f6bbaba11d84896c1aaa83eaa7bfa4d8aec0ca528eb2d932d90d91207096766b7869009e2be031655f2c75246a60fca9383aec4a324d6314afbaec45c101e86680a08ff12afc70fb63ffe1bd8da5ab6ea1bcae8e51cd49eaa8d4c8db1e3dd2562801532bfc3ebd3a17f141e41a8690af19e47d2d811b9cfccca17013ce36874069120255e93d29aaceb61078a535d85021f9743ba2565f06ff29a3fab0dba0578cee6d15bca43184d919873e974ce00ecdc9ce01a00876bba5dfb3740b5c5368cd957e75e3cfcdfb0ce5c79806cb7d621864aecb01bd647935f3895aef7c44db6ad4285dd83483b8511b1d3dd9a37c36691748a5e664eb9edce38543b5717f328bb44434e8595d3f032346cbf27ff4cefbfde556e77d1d3a8f13b8c29c7d2747568051f3cc151797960178f97144aef996c4045db1332890bbdd25c3cba0641de2ec808703f2fec981c94c9d8b83fe570fe261a8b61ab6e1dd7979de8c7d185cf32143ae00a584b87f771f97bc437fcf5f71ae12a1b71b2ff50c3362c32898b6146deecb806adca684841476528ba56ef46c26056e96900b2ef545616969590925d444721a798e0944da281bf42712dc44933ebf94313cd6d4e6af9ecb45b43eaa571aca4139497ba5ccbefa7f7d43eb7c6b60f5870899d7073c8cf4a22d49cdc8de966eecb5d77365729a0f3e3b02d30dc68b262f626c8ae0b7a93c874f9e556d27f4f558d4c56d42d3f191ba8b94d011c552477312fa8ab1364201ab8efd3e107bb11baf9bbaeb361a32b175ecee0892040336cd7186a2f8a88335c019dcacc6be3b56d463bd684aebb0c2dff4eef39555ac12bbe1fff9f1b15374b8ba4268cac2e25133614f5bad4c2753c6206ca00fa9eef324c2fa218ed5c4f04c0f03f502d91f4e52bd591268aef8edad361e3dbd6ec097096cf4310d72c5445b1b7824020c26b9f6f452773472fbc4dd502e4f6326a0a696bb3c2c69e81a2c442df54047b31936a512f70d703e0883cc80c263f54d052b7f7edbd57aba48db2412fbdeca5f2ae9aee8535756626e8657271ee035134f74cde1832c4b09c3f2f48c7db440a7f6324c8acf8689a259cd41bfd744ae39fea64a58fa46a16d96de8458549d58429d35f6e3a0db46afeb0a9c8a4654bdb3e1129ff4a10e2e8df889e4f86fca5b7d5d1d78a0471518512c1fce37c931a006c1f597cda4657bad0a49b4f6bf4ed9f0456b52fcdfaeff98d6732721293e6e92f81a73854eb136bcced5f87a8e9938d31a7228a63f4233c68b4300cbc1eb4ee26c45a24e6dec00b9abc46805cad6897f7cf92306c7249d11d0860f2e2ab96892dc60dc0b163ef1bde5e921c06975ca52c1c3f1eccdb0d48917b2b9ae1b8ac5101335da2d1841056244a07cc59d053a69e05d3792f94efcf836caec764c358f7f33bf063859093092b2d46d79eb83f1e8b63044694e6bd20b83148d970b2ff938c2fa27b9b53c914198e0a4deb0f2dee2a8f4601d6aa137cce4fcab70e15874a1658a67249de2251916b1d86a1c65782b47e441ff5233c8c403185fe8d763bb72acda664732573a6693c24fa70d5a85a1c847ade03a4ad76fe17478c1ad9ce77c76209c438ecd1b1487b644377669ce9e6366a6c283961d449c833d31523366f6bb03268f3a4b0df5022608ffa7a894d0497e583c22864de78b3952c8b76a140e0844518d5e08e2431fd33847dc841e4367a6ac93caae098aaa0333e2b5b03a214c4b2072275f270a444fd449f7a2a227e13f61c9c012cd5a872a249565bbb4bd752d45c641892459b44122a7242f417c89cd3e3652a0567c42abc4c32d329096ea892f17086c0e1a320c86fd5417fd2343a68da47977906e4ad97da2c809b6b729d198ed5a0a9aa43eb7d551138f5a999c0aa8e2224c66f7bb76d1c2adb242b49bd6921148a03c80c1ab3ad93e0b8595ebf66eeefef2776fae630169b12ab633989bf47b4f29728c621dbc02c4c1c90cc3c5e38923d343ace41c3444954c6a43238ec465ed2849a0775d8bae40261f41a6e5efed96b4f710be9ea4abda24b80e7b13a2de24e098386d9e743207ae0db2e52c1632c4eff376c9a6f54e17a99c44b72f3d23b3f5a9434069750805604c6a6ce3a029ed177ab9080c06a6f1655cf992c570eb2f1d6863c98408cfece71489bd5345c3d78fb13ea45972a0966091de1a7aab1a67bc133d3e1a03a677a324f3705c07d57c58bc82d648a071f4acc12a45aea235eac7302f54a35b996b52dc550307a59e2500f855ca582042b715f2d472e0a88df995037b1b64480c6cc90db7acdc80e70a60337910970275d2104f09763f8362b13cb0699d079a9908fa20866585771df46bff0a730efb72a44abb22454d717ae3f1cbbc71d181082d7cb0dad9730166c8d014b21b8f5a294a227192dd3acc0079416698a8c671d9c73b04456d3cc6b29c29224ec9b8c5fd083729997eb1aaeefa444ea77b6aaa84b012c0807db2d09b932538f35b0058257e48fde95451534c8fd5443abaf787229fd38d63bafd266a42888669feaeb43c5b1d9b45dfcaac601ed10da7dced477f696c6c02fe180e7f5d71e1eaae9e4495574f55eb95e7dfea087ee3caac15996fbce05a8c8fa474bcd4d08f745374c0e9665011a274f112b9b8551c4e0f561e1541132c2de1c77e6eeeca1990046206ed8bce67bb7ccbe7adb799cd19f48f55a011c5ab826fa74ff8d24d95d30381d29a1ed5955af4b3bf624a112cdc56f8c120babe53e52b4202b7d501943e915858e0bf7d2c8b7ee71d46f037075fd8cb801cc9aaa41c34ce5e8689d7e59316e91bff6e2107d33cb598a919911a31eda0de547f71b40d85528516594a567baa516d0b5f0c5a934588cde8ddcf785c72c504e1bab2bc50d61979321a63ba0ec65a8e55924d87563fabc469453d7ec3c7e07c3a32dc93c75a0c6996c1b97a5540147990260635cb805635e6c0a282494f37d984ce7b69f2e8eb61581e27bf39716e1f2f2d0233dc2a12cbd2811bccd65a1bd28c201b8965cd1a9c1225354c4ac3c0b76571bf615aa0c1979d222697853e096b6d58508ebd2202d2029cd4e81182010e1342ca904f90d68fe30516a22b1f5bf4b31f5fdcfc57cddbee63922809a971e04978ae018f1357d87a5727a3cfea1a033cce98a95dd9872765692bd284aee4dd204544f367a2adad37c5ac65420db5c8876be162f7f2b758975541a77c20d7e4fa459668c6a9288d2c1160ded1f86f7e3509584c9b09d9ca435d3e254a8333ea8d0e13e23aab14fdc8375bb0702068292b07e061c35f0554851b05444a3927545a97df9545b1f9a152bc3f9100836db8b64494ac58659418808c5577ba5925e7a49589b495a9956e28165716c475657abd326f07cf1615beba638e2490add0f8778500ca36cf24b3217d669d5bf2608c2387d9f59cada080d1574a73b52809fa2d02c1415abdfb6ab95f3c4b090c2d88c30d04ba585d6d0f4f1a71c484596930d7a2246ec710748c6f6b517b773ecf5694ebd83826ff39d735695ddc71d573dee41a50d2c4c3029ea7d56897c75454f0842337a4b8956d2103da68438906e5c5dba10a027537eeb2bd9b1177f6ee8be3d86139c32be51e6bfd035c30cea54790fc832fc388a2505234005663b1727a1276c5d2e1abd2385b34929746448d0e5a880b44ff933c471eb897321bc5973567b9e4b63b537b5dec2b8001a45bac8110c275c3a1726a4a7eaa8083a6dd9ad11a84814c67bd626654b262c48921938103e472235abc3134e339b00b553252cba8121c982287ef6e28c2ee265fce59cceb5364069b2bcdb131aed4050e04440b9f45a6cdbd2647e3f828acec8f554ff82443aafa8c81e82e124e3712c68018ae55fe9fd8359031b985f8e8c37d54e359291354d4aa49a9fe243464ecc80119c75d19d7817d749106e9827163868b9a4dd81bb0720c09db4dff5675cd2d9f93f08b0c293cd2fc69e6f4d5b4b147aa71d5b5ab191b56c64fc3fdedb25ca1f1861824a299559a1eda89d7c924eb0e1cf154e478440faf8ba5d8c0d33ae522ad9eb2318d1157e9e8845ff2ef24a37b5d6e4c7c69c37f064c2414c0ec7eb43a0a3516f2f1ce0401fcc849ce667653dfc5fe305ea589a8cba15d48e52c910ae3d80249958cb7140ea45a668a7a028f075239de99fa4f2795fc05ec6713e0fa83cd234d9c9e6a2deddc47ceb8ab1cfa6201eb1ac87aaee02c29d89a8649dc7f76b54d59c24db2076c315160e20ddaa846598f39c0b7a3aec8ace49e125a500626a4b303e18d400fd57102b19fef8ba4ecaa3364947190e33a256779b97dcfc6a189f0573ea0c0cf6f027f66cffbc0b9063989e68dd96f8f9c074ad07b95cd876ae32eb496e58425e4a86c636ed4fee1e675f16b1fb6f4f5965c8d9c4a615eb8dcf8599c9e9f53cf0307ee5d89b8a750bbed9fb64926d4c15bb677de32b31168f8bd0b13d5b41a7fdbb62c3b0f66cfe8de4d0caf914deebbe1f62e4c54d372fc2ddb3bbfcc6c042a08fcfdb6173c3e5086bd15886b0ab5dbfef3cbcc66a0e1f73eeca8a675bc6ddbb3f9c1fbb7c85bb765f36f5e96cbde4d14268a69196fcbf6ce5f6e36020d837f7f5b1fc355188c4fe0002fbf8730e95520ae595d76605bd7663390305c5178c115da88e5de2ccfb9656633d030f89da74761c82a4d1ddeee120b5dbd3f01546ddc4e345705e29ad5fd3eb77c54dd95a6ca21ff1a0e82b50e1475ce950fbad38d6b5659318096ab86cf76ce930569fc68779e28c8dca89ac6985ecf8dd598adda9d2715b46ea8c66cd5ce3c51d073633566eb76ebc982ce8dab395bb7334f14646eac461ff8affa9939a2e2b545b4546712fd80f3de9c29faad39a262b555b454d6f655576a8b68a9ce10fdfcdcabf19dbd3a53f45b7345c569ebede64d6715ab2da2a534b3d736bed71c51315eb920d31f56b7415302811135046116ec94e1917358c06fa9ef19e5b92feee60b51e635007aae1436c42550815395b3ac1481a494cb5211bba907a71d36ca88ac70a6e814c2a42524628aa4cba483aaf6aa8517351dfb3638e639f5ad6e9a8538f154e98a3f8bdf11bdfb6afa6f96aec9af6bd57aeff50b226a3b924d01539868c2ac0acb2819a32d36419b4c9a086b3b6493da78a4825f2f0b5dc1f2bc3630f5c38d62e331076cd4c8c24c74c0d29f0594173b783b0f3a479a513aa8d4d640761503aa8e1a48f20c07accb12937462347aaaa231b9e8d07ced6a4a50ff8a3b544aa97bf472dcbd2ca6ddc8b45affb5c0aa426a807747dd8b7c01e15570dc5aa7984546bd3aee5ecbd19a62b41ab35e7cedc2b2d3ce101a2038d39a0387e3f5985b658cb58b69b539686a07af8afb045888a0a8ffacef05abef361dff710874160f6aa308d4c013cbd01c34157e481c0dddbd11572392e9b48f1cd83c08df213f35f94ea323b95f2bf4632dff1956a7d8fa29a020cac0444e6b245de718f2f6f4d723d29b3c4bf555200ced4d6e54b66d3dea4c5545268068223381124e4843eb066e7cc838d1b246db36953a7365567808e676ccc4022c10b1a2bc7d7053082a4bc95f1d8948253f48b2936a3a8675c3fa21a4147e15dcab7882bf4f4879b06c25e37826fd93a9f7a6c6c4fd6733d7c0db77fe5bb56e8609c4e0c2ac2467e87f7f6e710016da16fcab52501c0e090340570e405bd97bb5ced56ed5c84ee15d87b13432228d0e3230e786e13dad2564c7eb86b49f5ee7f061a5557997565f80faab593b2164d0e2e7f535fd2da6836daeb3f4f43c72080ccf1ee3ddb51fdaadd279e0dff72f17cf1bffae5fe9b5fd5693c0fa42671bcf15ac44823d348aa46182d457edb8a5b11e584807a4dae33e84fd7728f894db8f024d6f526ed6a7998c677bf137495b3dac50fd7d5a7d79b190ecc9e34bc977959f43e6084e433d6c7462aa50f50e7d7d05921c5b455fb596d057f3bc79b9ed63d224563b0bb84daebbac4d613f5c97fca2d9f4cba69fdebc7982220ffcbbb2fb81d1e6b5c2bcd63c703b7ee4ab988fbce3ec8bcfd59cc6012b34625d214be34e9fd4ba628dcf3205984c30379862bc63027a37c76a453af57b747a40ab2412a69a236d8f8b3c2b1ef60724cdbbb8cac1d553b1770d6d08360110ba36a067ed6c6160354fc7be3891292400719c20d2263f625f840c497532e3a0ccc605cc380e500dfe831d9774fff9e51618f14fee6acd743701ec94de76184915da7b8465273cbf830315e961c5df22f3d60e7b50a1ca5dd126ce5f4045633ef2309a1388846fbc9a829ec71c789c8e5dbdcb76181820d9acadcd02015bb359929773f58aaf7137da9a2e21a14b4df20e1a540db79230806b7c805455042ef1bde1f3bfe709fc1eddc10cd01aa5245b04a9af3beade512e392c20271e037c06ef2b010c503b29aac5a6bc5d5c57d54eb99b792b0fcfcefd07f47dfb61796a2870d1a37230c9d662ab4e885c3f83b8390d0bf59e6687d1217c27feab29020c17681d35f1e019149881365343f33ac9093331fc698bbd318ee0c4327a2b954b2644b038f08f97aa08381c9445213c4a4166ae5ea15ecd283ed349f99c80b02f32fdf55e7e85a7aef354364ccf30a19ea0e9465061bcf8fa426e435e06e8f9c40da649e0a2c3e4dd24028dd8936c2b3d37eb780079c9c992b20a3caa88a34019f8ac5eae14dc38343715d86ec67e8242edf8ca9b6cf22bddb9b5eb916a521a77b6508cf124cc3b5ccc3b0275e7b63f533b7bb0eb9fa3743500fec72be34be0b7f722e9d3ac3ecc66e163e95191df262a303a8eb0ca31f0bd84919f00cf04fd3ad2e7b83f99c943a0678404dc4e77b06c7b709b8b6982b5925d60328c8924cf9124ca02d30adc587be68a2241ee092c636086eb47a12d85622c3070d663e0e3a8023dde39a140ee4b8da0cc6d0af39dc3fbae2d84d2fb51c698562ae81405d2602bdf1caa3d6adc745388dc498b6cb28e9a3fdd9501c70021f4ff5129c8342cd74d50f37e54d7669e9fb9a01ae97c816264615a90af47f6078a43b0ac7e6d5a8bda912a6335f33dc56493fa3e9f9e5a96f15e2be480bc41a74c9cc26615124cf3efff6a79bed4ab9a9f228ae40814111a27f02c995687facc55ca554b5f08bcb198592005006154387bf847f9edf2bd9ad22780f7ec26d018123f07011ef0ebc4bd80f7f20ba33c78c2c9be21370b8510605b51b6a675c92fc049e8b4783103ab4ceddf9c848a1917fd772b55cdeb5efa6806cc24bf6d10bd623c171f287f0b817b3430d0c648f4d2787261cf77e63389507fc827367644204ba348c742195ab73c64482173460ab3ca3d6cb83b37f4196b65470195d9ab11001e4a5ce24effb34d9c95b8545a4ac833d97dc2743acc9be3e09292aab2a29ebe6cb8c7fb461214f4a2b2307c8a91487ec7fdda5b9f2c17477a8f27edc5c0fd783c832e09363d3d1f6e4c961014191f8669b0882df3920669e244babcd972a0f576d296a7a39eea14126839a7c6cd79ea81ba745e18f3eb3a9a5682549463b91cde8aa1c79513248d36291eadd0c59d898b536d31d1381f2ad282660e1ca90ed1afec83aea8ba8eaa276621dd1664c35afaae224f4ec5d1769bac28b0592ce9c9d429e2b105af10af752503c774e11492c12649a54d75662115630cb0b97219ce5594c05f10a2c712eb0e122b7ce9f0eaa01c201654020a7d33b5de71cbdc072a6d70facee469f5cb9f8434ed7cd57c2a34513ae9b8fa8c98e17408657c5523eab38b16f76a6b7dc0eb319687adc00049cf6d2f69456976bbba4452aca24f8c6a05fc69ca6843920fb7bc98b6895053ce568a155b3d21cf3ed69ba69854e83927025185ddf6e6c6ff886aaf8f9c8594b4de77b7b648de3cf5a3753a3b7cbcdc3e44e8fe998a83829e1bb02515f50462472b103b94fe267491a7fd3c4afb89772871f288f693f342f6d781ef5ad79e8193784e5e62dac4edd3512b5fe638d71385eab5e7e80d4739d682e0020059f0ec8614d2a4c8a6ae7c37667b244dbf41954ae21fa0df94013fc33a7eb13bb2e01065673be0be6318a59ae7424feccca3c499903fca2f9fd1aa98c99e3e27021ca6c07699fa5a2bacc6a5c51733a14daa621d3907dfcbd76287613e7d1b3e8bc7f4a6c82e60ca10f8263161eacec3e6312f0d7eba36a3c032e5167a2f82f2ceb942d4436664b5d89c8a2d6373e8df085a75ca8820a31d7093d6a6ebfc283016987af6b01cc2fa4de109c21b767036d0eb4283d90ebb4bffd0d314a1b2531e5456ab8924e5c1f05cba11a43d18bab9d11c131164da6a222ac3d595509680bd41f0f886634831b8db96bea5a3b13c87c0a56f563ed57f4704b0425bda2d887436cd810f745e3a1b8f28f5b422d3e8f0f9e5c96415d4e2805aa0231335470470f73a4b940f810d170445001b1d1e1c17045a19aba15a2016565107f16b4f6acbba3ccb776318f08f55fbdd42d8e815a61f63b985fc97e57e8bfde810a21d6ecef7606f78ba690178387cf2f3196b4eac3f50c5ba291f6e8fee5bbf1590d88be9bc4ccff8486964ef1222f796522699023e0ad7091a0a634bfda6f6ab55d1e07d905933c32a62a02d77591f53375c9f7ecef1a646be7ef96e3670b347c1fed2c79a29d37df7a6b6f1d1a663ba9fa1addb5fe96b6828310374d58de1bd20c69f4a65fabfb9bba30e74d57d8e8754ace67bca8be98da10ef00c6b68d7a5352c385cd618c1fef3555d20f6fc6977a17f86425e08ce747c39c1517014540e95b3ef8da50dcadedfc472a77b6300dec84010044fd754b2a79f62f82fc06c4cc0a58d75667d4fdf490567887a8b24445b78b4b6d166ab4970698b8a68576444168627142a8786a178fa0e51a3102f9f4ea33d62654e1a1167b633f0e9db9895396b5a209bc459e16865a0688f3849679fd8d68893f44ba245e224fd2b5a202769eafb31b5332ba32b1aeb84c09dcb5be6d0d21ad9c1e6422ff9c089140d52d489efa75bddea666a6af5ee307dfbb75bfede882bae801d89d410b8fb93a9046de556dbd1ce9ba97172be93d184d32740cf8b2c869002053a2042063c7095e48c0b9ec8feaf811a60d18413283182043e200b78730f95a104195e2e55aa77e1c01480642b10030c307aca90e20a256c40c405d010194b786400511af41174d024054db828c2832a1600860cc9109cc8e2095790518493710475a20c23bc2e2690e4d6c51760ec80093d180292132e5e393801111922b0539021848906e5ba28dc2998e0848acb20227b76a7917b484a13a424a1f4fafc3f540d9f80ebd3bfdab5d65a7dde13fd20ed33763f958d6eff6eb6dae5650e1aad7695a71b3a0229a38a263bd31148193c6899ce35a5d44caf421d2c03ec4266186496321d92edacdd3fbb334211209091e7d766d19f370701b30bb8cb33cf707641a6bdb99633958acf2444575be0d29aeec5f7de6b5dfd9f8709b0ae7b844b9390c9643299501508169619344c6314e1eb9eb3e7243267063e540ef77f28a794d64a2badb3ce09258b1cd09cee1eeae0ff28d539fd572b90b55dd7853a97a59a7e7673ce5209fccfa40a75be0f7fdd17ead0ff40100c75bcff4210ffe0d202e5f9e1e9f4a1421d8c51f814e2540a638cb19df9a3e2ccdf1567feea094485423e296e53f290fe994fe10caadcb8c6d11a54465b37be9fce68cbc64ba149efa74534cb939c741f1a7328dea22fba729b1f392b3c7dbfe37a94d8fa3b320dfcad02c1efce7cbf2fec3a9d1934c4feeca9aa54b9ada0d940a356d06c6905c45e0ef10b6a9cc22035a340958fc6b8f0c95ea933dd2347420ac7d134fea0bd3a66499e4036090c6490e77b230e17ac12e2bca898ca3c6c34b655110fdbaa28cfb856fc71b9e1c8e86a46578ef3c281217dc1e428e370116ba0d4e09c0c029737b99b5c949b246775a1f763e888dee4dafdc6f783f642f3c415ad63fce8081c4b47427228503c4a141d41b67ba165b7b2201aa14ef726b13b6317601754be21b27074e537202759495841ac252ca1fb05938fc6429a1bcd8dae7034399a2834526eb428b182584b686e34b791e6661a4b9c17edc1799534b75cbb312bc56ecc54a10ef8aaff74bebfb95be373bc0be2e276fe6ac292f8830347f982ec0558ac0657934b522575ccd08fa21f4e7ed4a440c1284af69b1cbdc9d19b9c0f59f69b241f2f1f301f3e3e8cdcf107cdbbb608b7cf049a44968d3563216101b192b0b630a2b9d1dc6870345068a27838af1b229057b7e057fcf2d10d2986035423263b82e48eb9f662e0b29bd05e4d682f2fa18ee9a9d87df4a1c4995139ec36df1073d28bae724e88096da8c84995087b170d9ac7706284715e383d383e34379a9bb33c2467e19c5573bddae6be7f36f2a323a4d34873c379d191e686f362057d300cab815283b3d71371e9835dbb42a8636f7254acb924feb4143931b23979f431249c170e2ce6a41b39f253a37b7daf28deaae1acae06156949fc69799588f3a22b7f1a3e46b2d7c01107b60497f656e3465b1688aee67b2e9760402a7ad762672ace7ea134f30c4f33a3c469c59f962f893f351eb7a83eabe88a8a4d83e42ba7b911c106e5d27776ac968a2df99301a978235bf147f525d54843c43356fca9f175bc2d7cff3a74e808756890beee57b8c09da5c1d1dc429d9b1c0e97382f9c98b3ca9b5c7656f7a5ee26474596fcb9005271255bf107c797c41f97bf21ba622e195d398ea7e1a202c805cbb56c946df9c813f3cd7315c9482f4515650a28168788f3c291e11cc141a223acc15d351ec73f2739aba32d97f7cf52bc558353e19a8a1e5c53e1a332f2471c87e4051915496e72248e8cb66c28a22b960d3627bd593674d1a4e3883462340d127ae56f4317cda23142936e7316cdcd4931da66c6fbd32c8146096dc3f2fe345068c2689bd4fbd32481e649dbe0f7a7c1d120a188b66cb8d180417384b609df9f0689c6083445a0f9a26d4cef4f736b9b5b340700fb283e60332ed597344ec8fe3439ffa17a7b5d287b6889ec6fc38db68682aa0ab23f4622fba392c8fea923d95f4589eccfb282ecbfc282ec3f63969d9e00080a80a4a063fcad38242b0129420462049023442046ea0cb13be60736e3baf12be20facc675e3555237be8a1bb8d93e1637206e4095edf87363eca6297bc805d93f881957770b650ffd647f204bb40bee79d945042213bd1148ac06475b2c26b8a1ec8f73f23282b148d60b329a5b37b282b0c02599cb1ea2b9d916d1060dc5a878ca56fc71f9ea32da70a3abf26b92c4175e3f72e28f24baf21adc0fdc8fa01f4b7e08fd6022851f50b2ff8f274dfaff18fa81f4472aa3ecffc309d04dee06146f929cf41b293dd99f4608832a7be38f528d7f2e686e261c1f1c203a8b1ee41ffac686a22983f3b201c95b35dedf865b0df1268aebdcf1261704bec9657f1c1c7f1cd88dd00dee8689b7b211ceab0687f3aac1e1bc6a7038af1a1cceab06971de7858361460e67c86ec854a10ca2c98a253c4306d431f33d1189e948910a92983123d4b93fe3bb6d5c82ccc8df986b62e012e3bc66c4584219dc2bd9bf11a3b9594fb4e1366f8ac8f3fd88dc59dca8587a818a9e053f165e5418455b6bd0f0417da24e2e271a62bb3a8b1c8bebb0c58d79496802038156435b252bb3b3b14912d395dde1e4bd3f2917e09224ebdb8b13b254d4275d1150e496476b9b895b724b62ab1dc9f669d448ae660c2aa6e88aa0853127ede392d88a68b95c399369333d68b4151a098f1485486a6112db921b131c2d17c6ca994c9be9419dd29a9c023080023dd4b53ec235145adddd674065fb3bee1466c0c2047bb3f57fa7fed6be35a24261bf2892edfb91b7bc18ac6da6b33a4f88f60449018a523a1499f5685318916d584418eb18fb9d28240548e6e389d631f6bd237839b217a32d10c957f6a1c8a2cc4029403d41d9863e42d9961e784382ed97a658ca5ec0431ddaf45412293180734b1d61f05c732e640fa06c59b2f56aaf84a05ceec82b3c8032944b1fd94e77778f79475032b19d73e68e8e3d7cf4b0511cb1a3ec9e7e4d1d69f715a00fc449a71d437f7bdddd63e9c5d0620e10fa40ba75bae6c4d08d3e9cbaebb0038de6dc80dc62284053ea4edd9d7a0d9ba0565a9d804b646571f23e6de1dc720ae0ac99eb33c0594dd4331b4114720912057144fdc6e53aa9cb138de096ab4bae9452aaa273543959ebcfd04e7aefd947ec64fd8aebab62804b235877e45aab0df241f6c8f101c4c707ce5a42df446b1b5376232d7ca7d63b59748b87d645a65f67bc3216b8849961992c646a9f890239ff25cda59187293652a646b7362abdfb778fabb95294a4fe624e7e3e9f91d993c3f7729206f172152dac637ca79644fbc0c88a64f7bf2629f02713c5473207982c6906471e2616cbcf47264a89db082713c559e1fb2de7acee598cc8d44e33b12c5393919283744f9e8165faf9ccf87c3027658c6c47324832381928cef2f1eb7192fea4f28bb65223cfd093ef05001800609992374630ba1eacf2a91acbcab37cbb56522a63cb0d2a3eb8524fc43553aff21ddeb0327e3596f1a3a5522a371ced79bd60379c0a9813ea007192ce9933c6c05a21f77f2b608eb6547efe97445ba99feeb13c7df2acd5c7c96964a7465b397445695f8eaee6af8c1f2d29cf5f1989b87c2cbfa23c5f45e429a281874657f43febd6d6528ec2cc3e5b67ce2a656a3048682dbbe6ac159065069ea757853aa5b76e23544f6f6d8432fdb03bf1149d441e9a93a91b607ff719e0ad918202fbca8a78315dd550a584b0fb58ce7caf3c9b8c01257793316434b99be490b20d2bdcdddd9d873667ce7726ed3c79869c2cfb492dd327464f6c41bccc439bb9ec9a411b60a8c333e4248d4ce6fe1147eb2737fe46a853bf45c7be93d644cb377f2e2ea18ecc921f9c23d3fd09d1ad9551c6f699e04b41a6ffe9d011eac82c7192be4cedc8088699e918d191790be3dc202c94bd69456a2b9f7aa76599da6d8c2263cd750c10302dc6e8c85552a971c7c8966917b86cd60c4dcbde62794a83018299d1154dbdca370c121206c8490ac457f43b31255a1166e624ad63ce0a94522628d3f7fa4e66fa56dcd9a9396984532e13445b55c6c5856563b60d510c754ae1e7f80c42e694bc019cf1a384d02953a629c8fdfa3df8f64fb8c62c775065ff6ec7b66354d3c10445ec724b6228bed072904030499ce5956896a78e1745b1bd68b029a535da5a197754ee7516177d4591e32cf0f7a22bfacdc209a3c9299a858353b965fa0497e97f386134eb7b3d0ecec7d2d62b8a32b56369816cccc1236da30258fac23ec9f3ed18796e9167cf070000421def03a045b7564615ec452321d312660633a32dd57d3c13caf43f9224572b18a04c7f35eb96b941b756c64f054b91e9f7e241a62fbc050e2ed3ef95bfa6e1e49c0593e9e344f1d64e0d474aa6e50ccb919a8f413abb4389e2ac13692465e63406e95c83e01384581d658eb20c929346646e3236999a2863e4249de9c153e3b1f1d0788ac6307009338399e5196d05d6f6d108cfe82c531ba0d6ca778a67c85b2be38e0d6ca546231620b00d4ce5cb6f6603f3d68e914d0ca42c36b0dea98d4f70e75f82cb1eb64c55446b8361810590466e12db700db2532361b0e34fea776a752cdbc89629dee12c2f08d60297fd4401489cd552e363a7d636331430c3e1d536aadc341a189013430c1883ca5deb510a30cf100fadde3a66be055c14b3580497dd0394110418f203e4da267cfa04a00d1965ea23cfee012d7f3146a65dd403db8c93e1e8157f3c64ae0cd8b4d5b97a06ec16c019ea6021cc04c1a1d631f4bb63260eaf165a0875bc1b99c67f040323e08b1db1a2d61a8655a656b651adb653bb39cbfe69e7e6240dca3b46b62367d9c7ed4071961d776c4ed29da393d83b46a1b883e424ce499a52d9c1a9883bb71ddb4eadd4341e9a006c13f53e283ea1951facdcc106cdf858ce8430e4c8f5563d3ca1304da954bbc23283468dce53d9b8d1e28223878e176f6954e105002b980240ae603e1676c4f09099e9996fa7e6ad1bda8e0d6927477f474ad2135a107a326da3a34c5fb96ca328499906e195691060997e672b0f8d67c8597449a662bc4e3e8174f0f54bdfc856e007dd4e930f30fdf2a59d351f74d69ce324f378044ca7531f323328b3633b19cbf38f643ae5eb172f5f67b9b815ea5e5d4fe9c652ba20c9255856d66c17af935f5d4932d14eb9b6a9a822136d25a963e85b5451a6365323ceb2638bfd462ae5da4695e99358c552a3632c531df94bd034a32d6f3c39f1157d7a7136cd66645a3694dca526997e3923d39f49f21c4f4e32fd2a968a3a86be15c524ec2ef9139f80cb1a493217a0b6994fdf895cd648ca540cea182a04f62f559232fd12c72bd35f71a292b4f25a29aa0184e38523864386e3850396238fb129be7ef9267069632c47a6ca65a4ff52adf7353e82749e45993ac914ec1109f822e51999beaa6d2c98a92ad3197d0b44dfc68e98b434ad083a496fe75dd308d6ead55aa994aeb396890ec784b575093308c290115094d2a66d03da34262a766bab918f76b70f70ad48ba7b0948a6f8ead7f4206b7a7488f1bbf4e851e3ad1d4e82bd838f525a9fdaa794524a29a5b4b6fda29bb681707f695fdec5d7dddddddddd3d9b76dbaff5d7a0ee861dc5906440d97bda0a2b0575d79b5ead45af73dc75154b9028a56d7fbeed1cd8b601929eb91f095ffd7e29bf9bb608b7b4b44d77ee44ec0453fb134a77b78539d9efeeeeee5ab853f0e7dbd79c73be179ba3d84d8220088e3021fa03acbe2724c50b8ae20141f166d73d99bbbbfb5bff2a8242528064a7ae52777777772153ccf39b2956635e16292532b87d53791852f3cfabe586e9b23f5d7684812efb2df4676e910195e9a3f2cf07ec4829a5145756a54e3acb67d0b309eba0f5938f3e917084716ea2170fbac02ab9895e352882c3dc442f209d97f7c537b2cd3d34252997aa25be931778aa545d92d4dfdbddb3bf671f4b0554a1af5fca4acbdd7f0e346d5c4ca0db948941dbcc9116a1c662150aa01a53b9378531ead673e81d8802d11785c1836f4a21755df73e6b1bef3bd477ef4b34ab1669b2fb0e35d2db97aad49778269dc672c78e6ebca9119bc60e77a5f1bb0aeb98ee43295a6e403d7e3cfa801ff55d64bbe1721f4aec21f53e8800f5a9b1e506fca94f8d3eb87a407d0a352a69e95c5f80bbaeebbaaeebba6ec66a826a7262a0810110c8f184ee2f567df7e4ccbc61699b26e2ea1e2512715558dbdc68aea34995567290a8e64b35974aa552a954fabeef134275c4fb886cf0e453a2c364192aff19ffebff96052716d42a64440067aeff53d96909ffbeef9bf9a8285eecd95fe53fe32cff9f74cee4ef9b7eaa6f879a6c1ac1b1228167ee3b962aafb3631dcba76349665701579ef0a5bcc0a5ef69ad6feb7c5b85607ae4abffd1a01bc5d11c4daaaf0aa3af6a59748b26ebd79106d155fd5a7794f475eff530ee542a6b8302bbdbe68d8f34c8b9a041ea0b4cdf3f29c95f0ef398cb7c565454b3dd70b9299b404142b4c6e990810ed05675d1603356539313030dae1c4fc00e1484555f9f9c09f29a0351176a5aaa806acc63e29b7d23019acb09449e37739c3d2de3df4d84e02a7bc8478f1df9c525370b7847b21bcb193be680631d4b159ef59ebec03387f66da410a0add9d32b7f1fdc914e82006db50b3781e8189f49c689640281c4737be4fbf0d72fe9e0bde96fb85f1a7d28fd7dd3e883929dd25fd3fb70df0b1d848554b42f226bbe6a9d29d87c55a34bfca56348d9bf82dd775762825be877ded142dfdab0bfcadd67b4203241945cd62632011dc2cd9573d69722c0104904f081f24dd06bb5dc9900458c9cd54d5020943b7f0ec5592d04f081d22fce84af8fba1bba2dd5cee55fbcf55deae2248bca495a47f0085ff03ffb9e4d9fe3f707fc01ffbb607f9d8976d4c59c3422f6cc9792d2689d9868d67ead8f5a01ade27d2beed0057d4a06b8b4b159699364da7da65ecc88b3ac15eddca103472bf315fd16cf4b65814b3b8b390b89b7beef704e9bbfb07b3a11b022734ea7b4a9171fad2dfbf4288c2ec8279e7345e97f993efd81bdcbbeffb04f5fe536f408acb57608b2dbd910a2218a72131539ca2ce4262a12945dfee5ad9d61021e081db9f7959d4f606f546127edbb7d9620309e8f67e608039189d4732ee0fbcaafc7fa3e89f884e1fb807d816ddadd83585225f0fc1d683b0e66b122af5cee762382d55ec04fb4cb104394605694fb8994b0e51e8738cb264a91832838714bb774812e5ea204babb974cd7f499be12c87007847ac41785225dd77560981482f7de1b9e92a020c6099c61115018854b0003a50841c14141083885532aa00b74c11ea454522a380a951595952f6678856585c5e3820442c80e0514b0cc609951a300a966d0984183ca48e0a49280c94d7a028d1a346a10910008a94659595959a9a12222814c6564e3868d1b24a0e246cb8d162212f4b4b8b4b810f92411f9e0ae8fcda7969dc505870b0e1a914f149912f920f131e203cb3e338e1c38722439a18b1ae559a29043470e1d44207d36590784aa84d7f1a6224c8450908aa8c88b8808a07bc38103078e27f440870e1d3aaa288ae2a8021111b271c6064443e07e8ca45c93245cee2649b6dc5fcadd24c949ee26494c72493649aa410e73374902caf39330fde99c73f6b7cb9ffa9c737a4bbb48ebd803edae5d314845f73001aea2ab04b42b88e9524283373d220a2cc2235c1105114a532881e4b5123a5be9d4919b480827b22a3791102a20c267da074756a0e055e4871d24c1401551bc888a500223a4d0388802099bc48f0d86d00225984802a904020a46788083a2245cb05c264c237030bc8b9ab831598218b1237a14e044104a68d1449129a240a1a04245104932e4822362d0c4163d5fec80c204155c318327385041dda1c1c91f5caa6e671f81fb12442c045c9a6416d6312859db14dd2a648db04f32bd94d6264cb2272659c7d017a3804bd2d4fea8199336d18ea6b55c8cddf2bafb353e4376814b30e399db59957e60642a6a913f9fb4bd9e521de35f73159b075b5891a958ca33d4c14d70d71f4cb55c579cceb7827af73ea596e6647d930f7228a6e2a4745479898e2f6405eb83a28aae6ab56ffaf9f63bf7f84de0fa2a20814b9c2b55b98bb33adab5545abd5aa2b5d2da42c73a2fe8659c2b78a463e6d3563f61a67a99f1018411480211a5baca2ecaf5bd8eda226795e0914cb343cea237c8f5adcd59f4e98fa5d92097d628d71ae4d21e5953ad74c5f7e127707f8d9467bed93ee7382706be7eb1623ae0eb97b29b657b747f850bec3f6349276d0bc5aa22db2f411c64fb19a0add3d8538b1c8e57646b3bc67e4f19fbd231f6a7904fdbbdfd39f309e42cefedcf24d3fe5c5293cb29fba68c9c9f7fb49dd5b1763869bfc4b47a2747e58f5aeaf4e47597651ef195fd3965375c7d05b36fed9333d63496aa1dbeb25f126b7c88394e5a6fece1a4edac7b67ad9db26e7681be5a69155173d2be7e2955af4c3fc7f5ee58995426995658ccdaec919016dd585626b3c62cc4a9c8421cad31eb184affe26eae24546212ca54722b30959c4a92aaa6e0eb9792855aee9e767fadb5d65a9b524a29a50bc8b7bf651aad8caebaef1cc447d876ef9d9c9c6adec2168876f56bad6dee57d34baedfdfd7faffd5d2342b4b4d72676530e8f822b9eb64b4a5f2e9543eb734f25073a9f25971f1242661cfc5930d00b8b6f1ef56108572f70d93eb98cefbd24528775fae40cbdde3f0e95c84da6646eede85d63636778f03d6fde3ba15682b74bf42f72b74bf020bb5ea17e9aba106a763a575d65a6bbb9c8c60a8f3bdabdd87aaa1a6a652cb926bca5bc3209f7ff9a2609834f48c600462c7cc173be69682c0007e370c4296a9dc414a22d90b83605c43e94325c0544bf53fd0411004c1d9f774c320b7637a9a4c27fbddd8a3903925b7c8609a99889ddd103eff0fe5419c3a0d83d03a9b511aa4da5a6b10db596b6dd7755d17c4c35accfe7bcb93c6b2d39c96cbcdd99c612df034809785cc293d9d56db79f75412c9f4c7b2cc35a81b81182f8fdaee186bab779d7b5ef77545b0e9aded6a2a08879ac5e1459fcad44e0ae8399d14f04ae58e5aa98f92e4ac19b093f5f27b0129c0cbaa00814b9819ad62e794f8896538ee608332f8a009144b3fb8a69ac0fe2abfd29253f7d251957178754c8eff576ba8f37df7f1d8786a3c457445c1f7b7624edbcd99437968ceaab59bb3c2aff10c15396b3a7196118f8de7c85975e4297292c7899375f218f1d87868277fb0bee94ba75aadbdb57eb704661cdc7dfa385070643ac769d5f927823e7f2bacfdece9f4d6765d673bdb95c49d232769096907b7530b02835f36cd3fb1dcc10681a38c908f98e5892ed48199750c7d394959e8c9f8890e7b814b98590f2d37b0bccaab8c3eb4f8b3fc8c94caa73e35f6d07243ea577e65f4a1c5c71f1a9ffa954f7dbb5456766c3b3577a182938726b6c8305f2d33cc579ed9f38c29b87e25898055be4b799386f8c1dc05fa33c4a6abd71170d94638dc704072966de1e0562a14a44c656a2a9f0a6f483d8b4c2d9771b8e180e424b63db61aac2be5045c431d5411b894a9d95adb386df118f98ab67868bea2ef0394849e9914599418141852a63249f9494da6e64548991ec960d1ac1d2c648c9ab563d413093629974bd5fcbc5b327d60f5bcc0e527f4097dafeff5f57caf0e66860350db4c5b2b0a0f019732b5594d32b52953938952b780c1b4325546464a77f2199f58ed66d866d84e28afeb6e879291a9c918f92803c3e19f70c2b03838cba202330baf77efb416270c0c33c360e94399ec298419b2483033981bcc0cc609cc91edfad5dec065fad7eb6ee0463070a703ee2910a380accb41f97bd3101cb9a78df5aaedac57b6489335c864c7d10a3c6186f24e0d6646c3203a721d77b0b06cc71d5bcd0725cc10522cd337e19e649ac4c7209d97c008c13081a139ab659ebec8895186b1659823985bf68cd44f9c640a8317af7b635ebc320c108c500a84997daf53d360ce3abd6704aeef9e3b7187eff5f5cc6f02dbd086a0e993eccb64ad1d6192c02c99c120014724607f958b0ab4a38ea453a9d2742465fa4118873a286bbd2f0b8cff33c24938385fd1541059dfd2da66d208325e404aaa250c47183c87197096673a2b40aba7346bc20d327d085080be0ebf83b39a0a27d832fd0e78cb5df4c39a24e4acb2972025531926ce9a55841748ba80945a588249c204230377384967bcf2cd1f0b2a9732359b8b3943e05368a32d95a75934491fecbe9bd56515b1c5a6e34bce11c6a1327d1c1c6dc904b94c100e6ea50638c8f39ebe4c502a485490a8205141a282248c5bbe2894d8aeef4b3981713359080c395235c0253f994cffcd39bf1cf74be9fd3ec72350e42cca42cd71ab9fe3e62cfaf7ab98c314b1115fad73cef9b180a14e57ef9d37b504e31c6ab465ff7e0e36daeafe7e0e37dc1315f2fdaf73b15d28b15d2c56e4d0a46d4a0f00a7012881a927707d162b5062cb7c54cb1c7fece3f739bfd436a59f7f2a6a1bd3f308a2abfb9dc84388ae7ebc5ef28f178f2077e1fe144b1e6376a17cbfe341cbf779dcecc80388aeae114a1e40f9dab0a9dc7099e396e3465bd3950397514fc31bece331071a5ddd47893914d1d5b5610393c0fe2f349d97d312300f584c128f57db847f9fc70f6d53fa98241e321eb3db369eefe730e4acf9f77328a2ad896ff90807c600b358819a65033400d73662be9fa201ae5ff60f66f94e32974d1f3346b37800397979c820264acc12f97e4c04683c64d05f0cf100725604a66854910a45e4ab42ec01b46f8400aa28e504beff99c0b0bdd0f235893972911c68f3c67fb810486a9b1c5ee432c7ed06051d37c8f7bfca839a28fdf285e685a9202c644ec9dfdb578e2d9af542f31cf7bb592f553479bfcc71947314c9f773c0dac6f4396ea28d5ac7dc7fa9c28651be7fff85e62c1b583479ffbe8d1fbe2f73dc6e63e4fb396ef9be9b586a356aa6d1c6eb630961d0711fbc5f6221dfb742f95e1d21734a0e6d7c2b7c514b05e56272e49859c7c4ccdae6f4f763ee1547b45cbea71102620c5731a475cc3de53ae6aef2ad633993ef7d2a5e2759acc01580b5cde77b5494efe740a32d3781230f64c7dc0fb2237fe30fea55aa7a731872f27e8edb1ccea1c8c9fba631079a93f74b6349e6fb5f138e68398c9be73680be7e297304e5fa9f150e7698838a350d1573d69c41df118bc3d636ab5c8b720e6de15bbbfaa5ed95a37c727d1f67b574f5461f733e3168ebdbfe450297a8af2cde42c56e46c5322a868a39cb8dc88ecc9ce52824282054125490b35a667dd41267752fc48496eb07e9a18c2a4239c915e525ca28d7ca4337823982bca1995aa9374ec24dc44451266242289757e5f22f2d34dcefbce32f57e93d881ad77dd875954adf5fd1641233d092807695be87a6b93c70df03a5a7c9f5f5da992d3dd2501a3b5728ff37ee73b2280f9e8778feb934dcf197eb86dd584ed875ddefb6f9b95fd2f73a8796c4a6ab0ee4eebdaeebba0a0cf1fb2bb9c39dfa7acd1cbf63d35545e2eb66d54afdc6bfeceee661e69e7588d7a7fd7538a910336068c1efebc02af2f0de7fa0d8bde2406904dbe581a0288663f70a032919704b0da70f9f01a710bc5fd395c9f57dd46b17480382a2123aa582a292bbe3eed8f142ae3af7dcfd861f088a4a664cee2df13a5b29c5e2eb1725f63f10fc28088aedb2220f32d4262d086ca6cdc3dbff4a538652a6a0d8aed9abee2d3876a9dc6120d34f652ab6d4103ef80c0847f019d0af021116d249a56f5c26e222e2f2ff84cc8b7bc0a7a20a38c0423a290fa1b95bf0de2486209c81265fef6e1a9e4ae3376fd77594deaefb4425d33aa94d7ca212cf7b196a36fde7b39ac60ad8a75fbfb17b350307b22de59b2be0752e7da52fa7e93fb1e7f3bebb05d3cfd000f44d6339bf0fa462bb4ab7338506a053a6d0aae42b95dcdd644f77bcedf3da2e645fd396925a95745f451e3ab18e32d8ec3bd39f013025426a720586d07c9fcc445c25fa25b1a7f4412811529387d016baa6210c446ae75219825c675899ddedf674c70d78f6bc5a3d255e3756161deb1134a028fa551f4b2219df50418afafef62b100a9953daa66bc7f413a1d353f0f58bed7ace80daee3d6bbbb7e0e781b67af3bdd000e094f1af61aa05d8730982f3c67a68eded9e42019494edd3d14304f4d056867abb65e60f51dde9f990eb5fdb0408e7611022f93bd037e0de5504d8a7a101ec785d045f1d67935686fa7af120637f882adb1eaa103a458a0cfeca73e4a1eb189f21dd800ba176fa0d3eaf2db55215dcdd958092dd3d880525f04b63010c7c0552dc9dce1767ad204af6ff32720966f7ce51289410ca3d5486931572132911cbe1f6982507a46aebd3a6c1491b141d9c9cf3c61458e5083ce7e8137622039713e63f6bb98a3ccc7c94694304652a36cd0736f3e2abaa999a29b8549592e072a655df5f0958fd3953ce5cec82597070c428fbbbbccc4c4c6b6906b804b3101fe9a4d764eff198c54505e2213dbc7089af114e02a90403a715e5ccd43ca9c2630b1d69b0df53e454ce711522f08ccd22268cae56b71bbe2125c172ebd5e176e76bc8ede6efdf81ae4eb3e2ebb12faaba9c36daf21903ed81886b5e717e8976dffeceb5f64b640f77bc988e5ee44dbcd1692ee45229022ea78dd6ef99346df30809282a3fc0540529707b917be461f23073b7ca0f70fbb4a940814b1712722660ad6b810b4e8f1a17a1ecff644f0d5ced5242c95a69384fa101a1018957f6df39f2d76c9ae3e8ca63a0ad966eccbd0a16d68793ae9a216b766a7c34cda2c69aa83123b528fbd77a444989f38d817e0b59ac49a9a8ceec549e22579106fb56cacb494fea981a664b748c7fbbda5563a92531d09bcbb2ffc4d3565eb006c5e0ebb12e5b02975765081cbbb1582eab0964ce2a1de63e3115c48c1cc9feb346574ce09a05d953b2ff05c95bf6dfd1a369b367d5e1075cde4bef9867cefb7ecc429be2fbe40a707981d40b84fa17c0fee7be814e0142c620db91dd250620fda272e92415ceaba2647f9717db3d549f846b669c54c2b43f2c2ebe9aa36aa69ca13d7da607663182105220747d478f977eaaeaa7f39df49a8ef1aea8ab95814b0c6a81cb5abb31daaa51da6a5a6b352f52bbb3ec37287b91b3ca5a2bbba2ec8428053dd91d3f716d5ea45b1e4457eeb5255a055e44769cd792c64e46575d0e77ebc612cfb1d6b27b106dd1f6a08c9fc065ad2d11b2419d2c3b023ecfb10e955d51edc6c6c075a808ac9706eac067f5224141115010f5a089e496a491b8b8ec10c17f21677ad4dc76c15fe5075c561528b8f4d994cd19904b18b8646151054501cef4408191ec9f53643b0878ce39a70f1188144cdfc793ef237f0e90189a8e3de88cca125cceccbcca12b854b9dc54d0514e70a4a4becfe9799ee779de9c151cd283bbe0150c022eeb2bf6f578ef307721e62ef8a7a6804b0c66814b87d15aedb19cd861b507b338cc482efbbb8f1b59824bdc19e818ef6ed47fda4a3c6fb75ce2b19cb83c963337730eeb465a1b0397b5a7f6541858634254234c827897fd0aba0bee827f7d02f6dcebf52ac123a152b9d4e604e775012e411b3eb89c2f95ca250a3cbfc9222fa22364ee83e76bc2dc05ff962cb00d1f5caa5c7080558e80cbf9a22f57f24d5707babb7b963bb04cd18e438674a0878e32917a6021326249f6efc09c5dd7755dd7511db0e72b8b1de8c1491f1242e1fe13e9fd2f38732ff8a60bc402836e02efc930674d98cf34022601fb2b9760165be6f72f718b28cafe200bfd20de557f6687cf0f880bfe9d17b8bc59670a40be0aa426a824ad5477452c15a201000000086314000020140e8884a2f1702c4cf4409a3b14800b82a44e7a569a09a42487610a19630821c41002000004006646061b00caa1588870fb323ef000dfb810b78af011a481acc80dcfd1b5d476b6e0b8303f80330e15dcda84759e6db017cb34ce20a4067c2593e0a718033e037986d5c22c3d2450e4e4c5526866fd70407a4e7ee500b02a59cf6fac12d082d1c87389d5ed5931eb3d358d0e21448c0e2c600a543ad4017d9cc09d6cc7341962b2152122c7e9252a62ed6cd1a007d3a78c7d488c52f945f6a87a3b8bdfe19233693fb3067a09e0d871d1c13e3bee62c33e6c376cc83f3d5560a9c75e957be6224356a7179703d3ae6f4a8b4ddaa876bd7cdafecdaf475286ef28ce195a7ecd50ad6db21085102f979622c227c9b39a51e7802119ac260661a89371bc9e4a6ee0a0d65432bade8da0258d522269600007960077260b19cdf388833c02d38d9431b76126139dd8b7562bfea9fe77cdc219de17307ba87748cecbaf7e6e7588781ce825e6c4b112a24a4853316c5737e3cdfe279e75992dbcc4cef03aca3a208e3c7bc6b1952bf15923005e846e9e22452a57bc3b572d65fe30603fde73ea64f5cedd73fdb001caead3dacc0e4316657e24e18d24bcddc6ebb0489bf9f26c539627c4e64382467d301711e1e91861fdf01ce526cf4eeb07105e53eca3a5ea936841d25aa3a0b9c21bf7a057a36988f37285fd8e749a1bb7d6a7178e15f1bbaa9e874e74dd9da24bade750ca98675cd89b35be167bb4b186d47a6f68612fc932dba940c502f25b2a0edce54b0692b1f58f1cee8b75c592333fb6841a3616d3e17a7aa2bac4ef7ce497c69837d6a8f031f6f8425c086ed747488640a0a16d88871d4f36b19a58d917a2fde4b079ce6a895286dd881cdbc2015f6550e4e124b14e84cb6818c00dc9f6c0bf9afd9f3875e97eec836574b799d72e84842c0908a56ce1a13e7b27cada58f5033ca5585e11b6c9e9fdddddef3c3af466c609250a7eaab424bed0ab2246ee411a82645e9a13816230e76d7a48b8d1f0780ef36ed9a5cd66243da2ea785ebaf0b11221985989c7641f14875f627f383adc9073eff373f87100434c33ce0160520f236ba3f446c220055a882c4fc6b8f3ab0393d206ac810191d48d29b69efa73b03791495dc17dfe3f255a913a25f82f91c1fe7192289ae264447835d3bb469abf95631d1898bbbf5e3e8af12f86d248842093f45040481a097b9c30b0dbf416bcd2be32fd0082b9dcf16668ef0b4e5deb0fed2b407ca28b569b7003566a02244bf72e42945348b25f5248877b3f6096bc3ff736069f9ca455ef49de5186162f481be4c87fe8a605ea4449fad77501006bc9ebac1031bfd133f1d7c8beef72fba38a3cee8b8bdfc6da2b7376b21ef01b3f7e9a0f086eef84fb167dabc50eafe31f95c2210d55639daf7cce570f921023070d82106e62ec6314d5aa1bf85a570e5ecd4a65c3c3abd1fcd4cd6a24e8506f6e628de3c11c930b5e13d978ff04eaa2a2e8b8015ff311199462dc8cbfef23b040d988a9a70ba53f8d0826bb97a921dd8111cef8bfb2304a653d3e21c70216743645112ca93494a2521c0f175aa260abae063a58f843bd6f36299a01ef9cc3355ad74323011c22d5e465f002208975cdca85a5e822b211f4640a0bf743731aeb7961834c25aabfcaa6c00a9d1848e2a02bd6c4a893dd894a5c92cf53b356fd4c6e64ce3512244e97c3244da92fddb8490a34201556f6f7a5ac56a7b3e033affa23084fa434823b97a4a6a4d11e143724ccf0346b831fbe79dd48a65ef0837292631be77667eb2b4a5f9f9d4818dee0176a48f8ef6b507ab188f7d9aaa1a41bb433e11fa5360b6dd171f3e1c8bcfb61648c4bacb46efa8bd616e6c77e48a5a31b1a9c2b2cb6ea2cc5b963adb65e5af25863d0e650fbbfe40ba030a28844fb420b07db48230a6b97b6615c8626c2ef8f554da83dec2ea0ce03a6bbbf74b7899c2aa89c42350a6b941d0907c48fb79c3b6de79460062a484c50b94d0f00ca1350a14747991544f9aab5085f9287fa7e652f0a8a6a919d2004c23e746f4a00384ebbef2b0a61309a91287c990c2caaae95a133775e544225e7dc42301a96339f27c1029b6c73f9441749ea39eb892a0ad01886162959baf5d2c9909bf42fa2ef1b36d0424c3e7693e8717a2ade1e7f050610ee206560d008d010329769c457d1874e77d1d2e2be3b35d39d25c0f7b8a19dab51f80e459fec9acd1f539b9c180f8c1c12f565e2def42431a12562fd13283784cd41a43a2bed0fd5ce979e5ed2f43b82e6d603c32ef91385da61b4441a267e1647d81fc8853d1b04525a202ef37d22dc2c51467b330459abaefbfb7267c7461f189c68e98c5594994aba88941d62e21919a871e3e4fa9cd51aad484a261f6bc045d850b73620949f1ca67f4079948ada722fbd82f1aa1c687ee10e9811cb64bab7e4c2ee59d2c3aab7bc16cd0226d9e43a7727db356bb879a57c36155f000a04b689aaa0c13f39e39dc9ecaa34224f4d7db0be6f3bfdae32ffac7bb1061292c3b5a5bcf92833fc9500f1e8adcbb2d68eef909e22856b9830767b845e0f8b733b2b44a3a02fb1b58c3cd1d14fdca5c98b59a2e39e471f549223da8c379557412167a8f4ad0b06a16e54aa80f4199b01cba1cc53f80898f13b4ae896659ecf79eb7d6a36beb7e213112035306b4686509436682afe15f49f4f2300ba3ee18084696c089b7a2a19b2fdbfbca95dbfd35fc4457311d82ad1b21728bd051203533ece9c21e01b8e0c2676520b354c46796467db91a6c53c06dac05add4f652262930137d032b885bd545803365285327fa33946c0d990120a0a761d81e652e0332e8b5df407b6a03827893ae631b75fb1fb8982b4d8fcbfe35d28b903d33024fd4c14b22779007f34424b95b35793013aa4d456a82d15068c51e1a0084c15417d220a8a494bb051d5c88ae3a435ab2ae90ee21872d9295a5ca09274c854cf70843f543b0d2b3bfde70857299b3c5dfd6adf0807d3c153cb6f32db40904527b4eef81dfa80892fc520183e03be7ea3e110fe86feef9eddeaebf8724073686f2e1ac135fd16ccd67a1ec1fc04754dabd2a021972223d31c00f0816564f1b587b153bf0275c303b2e09d824840c6ca842850481e531bca4e827fdd1d81d49546649b7a9c5370843f3049de5fecff43d7d2b4a9f1be9bcb8c50814ad3cbf51b1fbf28a105bf47fe581308fbfc4664cee36b46b611e0ba57602683b190d082fb27a64dfbaab1f78b64ec29d7fec76e51fbb4bd9f3d811846ed59861d69c15b9ea44b5f07a48f5048b22394ee19820b883200e6082248a9ef7a132ba344f8080275a2b223e09fcd03de161a625db68b66233645e61cd42421ff6a3502d5a4cc77fbb9c99944fa6446b4fa09f92791e4d2348749abe145917036ed0512298c69d3692c2ea76188110932858c885d5d6b60f7a38ed1c1043b22e5ed79283ac122b20e972839739b498462a292c882e468234a3f3541118ba358fbe1e70c274a6edc3af0e3474db170d50230d5a9c580d59fafdc3dab006d41de324251c0986a4e66cc2f00aa05d016d50b5d10cc02939f95d7773d79b089bef28a99faec561e1a87139d236255a3140a7b261983f9ed60c0d5a9897a7e9053dbed5f6fad1115758c2f59354b4a799f66e851914fc641f9f3558f2cf093d271149d2b0c5f0f5ab86f292629123998df8c6faf78d2b01fe70f37d3522f0b59bb4326e0c8f87da67e2161369b28836b22ea8c302c9d333792523141ce4497f66bc79229827ceadad66ebabd7fb5d7fca8c9142c7b23dc05fc3cac43b874a048c2e4bd921aa44b89a43719b25f34ce068c5439e2bd96e2b66eddb8bd3dbcde32841c7eab5f66ffc4e445d5a2508d92a6c12284b5a60a0c4bf783cc20bc100e5e3d7a457a0e4bc49db0d23c4fffa10727c4161f6f1c2811382ccb85815ac828a066ea266c9509b7e9fbb41f60101377f73efd98e59456d18a011c18cd9cc00b52d1d7582dcbd1c59ac875276993a51d93c66baecb6b26650e8fd7b654050b22a73cd81ef2ee897d04b1f19da80836ec03d55ca50c2859ff568f101fcfef5ed94d6b0ec0532ce7e937a4378ef221623c5690249a8c4371775b7fe0a6412d9404eed0a617cddada1a3e940662001d3aa8cfdc9dff2c65efa905f33d29a6bbf5ca4f1001834f3e6e94ed58789756781ece95bd2e4035e6828999317bcb9501df64bf9dc0b29f19b637942cea6f5ab80f86bd5ea1ade68cf540f828afe13e44a598827df1d7631e6e561bbee7cbb5a8f3e571bed8ccc6e68f0a4bdb19c8060691147a548cad9d6a29b58899ad3b617c49c28b4aa5046858a3cc137587ddc4bfb7449e85d841affa4f29e0082324323f04e3f33d179a638af8482dd82a92766ede08d2b86e3039d6b9ef7187b9297fac39badde83befbbc82b61f4f80224eda74dfdca6874128d40f0e5692d88f0f93201c29422eee67e48c67eb81c7c8d7b3530e1c66da4e8b7b34b5095c8ea3aa3a08f6a144171ad8a6922ab470b7305b3c1e387b954f77b3641102b039fc84d3292696359477a649e2d2517b0c45ae720139484090244db750ce31dd5b2c77042bf423d3d02db0142ee1cd0c6ba10b6e25c9026e484bc4b39d6b7afcf1aa8002d9fdbcc6802fccbd696ed2635be6c8667c2b2e5b0e30982c3f08042cca47ed9847720c13cbde8016dd0bc1938bdd5391621bc0bdc0dfda2015a8eb112c45651f078ad294bdb3e99d04fff95a09114f6cbf8f1ecdfe4d78079ad9108da4c714af8222da78f32fd93c7f0f265cb09161a15ce96045631c23d1cfd18a511b21a23be996322b8bd014fbadd21355d4b3b00b7b747d6c391b52d49379ccd7174ffb5fcc8f0d2e3c413e8e3f41b71c0056ad397fd004492ce98ef442ad2910933cfac428487fc706f6e06907a66527fff1cd7928e09f4a016047eec0d18725fb64a2d2ba2c851967ff8ce909656412c7ff77e414fa0cd7f5ee68badc741f3b039b40c97a0841a1f15c3b7ae689dfbe51ffead89d4602bb999038fa7f08b827c639d780122786ac1b87e3a3e807e3118a7fdbeb3bed443f0d4e3fd89afd395621de5ba786ebb64d7240255d126a43212cb905920909743ad905178a46987a6ae74556bfcb27a4131dcf5e7711b1fe2596e8974f79ce557237cc358e8ec24a81a1cd8a2ea9585a17af76576dcb34613beabc32e7a030238762c2887c71c78d07694f0788f9d731792b8b5c90a8317e24095f922dd1754e3e4898695c42ea92fa1a82d65b4b5c44119198cb7ec3b87f00580c493c01a11ff8595df7671fd7d3ad64684c92821ebdb22dfc8f0c586710bb5ca00f9e958802dd4a26ce15924b337d6e1bad814e1db0e9ab76eb7859565d5e1600051825ef0cf9957ef36510aaab6927a9d6746b164fc36382d41ff85a3929f449be00b326a8047fa9e630295871f42f6bdbdb888859820a9093fd6f276ff0a207045e9caf930dce1f2f02109d70ffee5718aded0d66718234b83ed8ed21f52008ce478c7cfa36dcfa454a588d8b490ff5539494acac9cb154e74a66f9fc29242db36e11a2833afa991b1a4b7dba1d5c613ab18b83db503f6c855ebe959c3cb35045dcd7fb07b54bb9d19ee95a6b1e633288925b1922148bdb6fc45385608d5539924baa89b88bdf4bd1a76f6b4bb983909fe332878767209115996ac9b41821853c004b6d566ac2b029a82be434bdc357e953bd83ed47b00a0d28005fd042eeb1b4a570eb2133ca8e04e397565c9c05c13a35dc51ba4f01357338e6e73555e13cf4505916adf3297342255c3d6c64259a051725cada1de24eca9f4953ebd0b73c273b60e1e28274347e4659bb7383b32ff1ff55c572c93360fb58c496865c7bd791ee5807e261f6b0c1d3f538ba0734ad031aae52b8acdca2f8bf8ce5f9d75b02b869e51d579635487cf44676e2fd2b05ad6cb831660f4d6421fcb884e1689ecd4fd6120819c2701967d8ec66bbd7f7f910e001cc297379af214d8b4eade3d91c6ace31b53cba5e12f2cf526fdfd98b3c576501ba44cdc48eb89c2493856ee46fa6ad57cb101b7ac1e1b24bc391720418040e3b632d62dbae62f082b2cf81c1c8c5eda4d9715e1af3d1962b366fd77a2abd2d282fedc20ce8c894c4bf3b447c21be65cf9a5deb137c52b6de19cf1c6db96b95152aa52cf85d813c9dbf57a937097503a6882dfd26c6dfeae9cceaf73495dd63995dfa29d559f8fe31afb549e53c8c36d8aa6af215be69ad2a52a55d7df4685d174e29c800aa16b5e494e9526ea543c83f757e455a9f1108c62a2842fdcf8b9f029c28fdf692d5b24fd0a8ae5e369824da8b5be39517d215321a674e42131bb154069006bf7f03f319057464d3a897727d2bf823bf1a85eb1983ff5a97ce0dc976738f6f4cd88f30ea0808d1d50db054cdc70b6526d41cbaf71360628ddaf8d8957ba2013fca28b18cf380ffaa0dce17348df5cb815b8acb8bcebfe045f31919fec8b3b4d181b6fb610a87b1841c448e40035faf5eb4041bfa0d480e96634200afd485becfb74e80408887a4cbfc6f920958b36fa6d1d6cce8b30043fe790eb2abfefc1c101bf4d1d1052104209b757b66155f1e92af1a62d580a15260d2c848e0d7315dbd7f28223b34809f8bb0063077e9bd6f2e9d9fca13d0dd02c64c307fb8abd3d8d9dae6ed7df7968d82c62cd76f10d0e0e16390661c416f435ea6d3fad31c0e5d21fe9b2316d5100558b2cb3965d708d80e519afbe4bfc4a658660e9a78c32339531b0290f5f305c243896062749799f6417a4207f3c761efacadd1e63bdfc7dc2aefd784ec1a2ec19b581acb9211f7453eea6c6e7ab37894081e488f1d6f52119999adf1ba79251653c7197830281ee90b03ee23eb0d60df3416ba5399c4b3eac79d21effbf3bb2468ab35eb704af0b509afa96c3787037d4f52a1d9045b7b53101642b217296e2aba57f54f6ea011e3882ed56283188e421a9bce39ef0875f53a3973b949baa4af7f9d6446a62cd23179d0a74288e0fb94223ba7b0c31556d93c9c3a08c21e01ae3508651b772d5a7bb3a557f03778ef23c1a68736f4a0e795a32192106f06a1692e3725172ec692ec184440b8aea87416d99d93c67d0d19e2be9421ed7cc8c1af902d1bc0483419061f67e54fa06750d983ab3e66d7e47a87a28c7cca141820a9b49cb2e7ac1559e2623dcbeb8e0a208964b563a7b9e3058a1ea01e685f7bf05b4e4215e6ee6021c3fe8e78336a6871b04b9b3b32b0d0bde7f38abb07ea82dccf8ec931378515a57af523c6723b48d1afbc1c6a3834530bea3c7cea9276bbecb262fa3a4eb35069f2fe4931b5e1432434172a0eec4d31df0152ce58964ba1d06a1a496b40b12ae6bba8158ea7f544b5db3225852647c62e4a80cfc597f0dc07c08a80f8b0f33b9bb31060b9ea484c25d513e9974408ea8235e2e952495b28348c9ef5075e8a0ea5d46cb3ab9a536deb0e055bc08839edbbb928d95155972c8a609295fecb97b884b70c4f710f49ac9025a31ec13364a5bdf5b16a3ac687cdf5e6dee361a20d0754d6c13e2c48eac9002817e811e273e36ff48e4e79205396d1eebea8feb51b4fdfbf159226dfb694b70bbbba12df1a7df00bc96ffdbb491cc8178628689536ab1cfbaeba19fe9853fc8c09d67654a92e195c69e67a45a2602340d1a493af414e9a2476cf08c349821c8acf96dc2ac8991283b04caf258706991625b53569800a0f161d403edafa09fe0345d58b9a68cd780552fbad691884c1018c3b63590e36162f2d0340b79f1a3a142084f001632f41965713eec36c45eb314bb031eb9f46af2d1b131293ac7a977aad589e420af844d3f4edf1c2786d2af74cf89281cacf4d74c110c8c80f7306625b296c8aa63a4fa2a35ca748ad18a10e80e1f814878dc754d4c534217e4aa8474402223c7a2304b3441ecac4bc2c809413966eb4b27221598b0f03fbd13534df43c8ad13d1f0be389bafd680312a7b84c412b05de4114ecf37cf1730dd3dd27cc23792302e8902095e455080862a9d67627e66a3470c757fce93bba28d089e64c3faa4a1e9304820f0fd07c3ec4f5b7d25f5326327a86a6c8ddb3d6be2373aef4021275bf11440ded66f04837068304f3364badf7185c8def93c09f0f835cf86dc8b7e873440fac732adfa405bb7faf09c02bcbabab92a26b2a55d1ee418a49b45d4adc69055f6a7918bb4797ea3a519b3b37bd49862073eff268de6d3dab5dd5879e51a9823b12cb6efb6a20963e611e61983213f5f9709cf310939526ce4d0d41191343c913221b92d9d5b4f6b634cf6dd1301a86143eeeaca8470c5d908e55166c99cc0b1093ec892699974d98ac119cf019df53dfafb9e9157f5427ef9e2b600bf19cddc50bf9e3fa13c30aa864485de7cf9c609d34acb67c611c0bc1d66d7bfe83eab091e252b6e8711abdf13cdbf7e810e04da350420d32a410310d891d6a61fe053970a39dd8ac0e575a1e2fd6588f8fc20fc802b867eac5168809e6f4acbcb84f52c541a9c65b0ce3aa781ea916b3bb7f60616d1eda0c6c6ff330b0c88ebba1ba97eeaaf8d31574dc1162bcea3e5eb13c8099810baf54731660ab15cc63e63a537deccd4dbca242561804c80386c7141f737da06de2ecbb6be821f3b06b4702ddf84125ca2120c6ad3b964f01f56107e05bc16a3368ac8f6860511263ca3c48f9ddd53c01ece16f91ca8d0da4e1f900f2df0909e4772547325219ad7c9d6ba7576a0f9c2396d8fb6d6d24b982db1ab8bb3e2b34ecc2367c81888839466c4a68155eca218c2769ca880314730db20308b30a32d4efd33ca539e88a429b47069774a502112115299d025e47fb61f9309e312d4923d22260c5c2be6237812d52535c8dad94887153c7e33458f7de0f0d2cbcea14f286ab12128ec879237e8643ae8bad5cf2187fc79fe33dc45c6c32e8441e7d0f08daa2a05b5f7ffea1f0c78ed714cfb9c4b4fbf0fdddbf05a206a1ebdf8814c0330a4cbac821166882a1244850081acae5ef8ef3b37dc0d43ef2a50f4dcaf562facefe4c2cd2381b6073a75eb4a7acde2132ae518c3cd581d8795f0a8a283ae03e6c50cb3bb2d9694b4150148205cd7ddc574759ae57c8611d04db5200ebc46257214208bc1847cc52256aff1d833a3370592843966eb9c1cccbebad9169054994b849e45577676ddefd2f26426a7c11c42b9db528adcf7de78a2e2116f81a231d224a18061758d7783d5519876649b2eeb1fda4543b89433eb34e3a1258475a7e6783ab99186469ab1d3e32f4caeeb0230204755a766f4ead2d66ad74ae68964abe570cab1371e14ecf35bcaf6fd6b440f349980ea6a8a61a3f00e51c83874bd2b502a9c4a62987b4d2cb5f0508b14ec46a5330cef38976422a33bd2dd4fcd45121fabff4669d3c9f2a1313d33c9afdd8b35455121ee83c042c403d532a1b6abd35a9f41b9c0c841625622636786f3d709a4cb835ce9c65eb503d332286dcb221e13153c25531e9a98c325e94dc60264f10e02dad02213d764a716e9afb8ad5c28a8257cab5e5bf51e81dda98ca09e479cdfbab984edf01b7b3568421a2469f0614b0e6ff8ff6d7453142d4e86fe009cd7e4e80c3ee8eb82bd0987999ed0b4f2fc4e146d7008d0b6a6d9579ba71f0375bcdcc47313262aaebd9ece4ff841a84603646b9d81a010ed98da5069407e690d57fe14b6223c48becb5c71737004bd5ecc2033e0b8ce43cdb00f3d3d8996c8cf9ccf6b7733f64fcaa2a30628f82e11b617c34e07e3b2941ae3677012b4a628c71bd582c073e8fbbe2fdd2bccc6d0cf8c956f4f993d6f58dfbc333b91511618a7040f2767b0fb75834229f6c9c3622fad42fcab0ad29beb9891910a4b1799bf7969bf5a31b105562a5d864a33c288072af54a3c0c1ec6cf8b391f51c47a5e4e3a02e19464a41063627a96ef51f0b118d9fdc46982bdb2ce1f8a05a7c55e7bfe9bbfde31feb8df919751ab4ce121a0edf355225d90412ffce09d33fae23d04b05f8b713eeb81074feeb75efafc96a4db2cae506549e4c364e9cc8c606b0f6deef1f0949f0595914c74a344e9e6cb495609ed6d5998c188a06783df3194963f02a1b2ff3fb380a5e740c14520098565e0a147fd5160f7a252d96d5531f896984b1fc3cfeb1ff7966ddaa1fe934ca583e045002e124b0e2f9165cab4e1423a2e4564a5b6e33eeaeff0acbf8cdb0614ee4a21ff3f64202e8a35e397a75aac32ef440beb302f8559d3ba5cbede4ec13432c9da1f79d07ecff9bdff1dfdf95c83609c2cfa5c1cb11549f387bdedc0c686f2e8d7e6a694857ceb8f5b8b5c4736914c0197c25d1dce374d99d43911649a222ed07b12ba6a640b39258b3f72547f013f3e78323e26ba1ad044a30aa82574cd87ba71f00b332ac2d84c926b7e1c4357d2d2b744df8584bdb571c7c8744a0f241b1a15c4b325729a7eb6d9658bfcf640e2395380906819bd45ee1f82c7dff37b9c7fa50d4ae3d1ffa6bea7add00b3bcdc7d39d336fb59c06c98359dfea566e5c21a94211444f399eedfd81013ef9c662909611962f924adf0b00dcc2f3d7691cd9be3c4ce0298ceb41ee440b82ea1a5c64fc0bea05adb32d170d37087ad5b45b8a19cc4103b064d80099ba6c1e726ac9c9e622ba344112a7dd56737508b69971e4b14162fa6021f5c20d98fa001682eda72bd69a87a10c1c46f9072fbc5c0cb55c5ca857c74829e5c6a4503a58f5a48d668f8b2e20dfe80223461592c8410c696db9f72fbf4245e1c19354c4569395e8660346bf98123ea9582e3a17bf8254a3cea1bc9ffd1ed6eafb68ac7b587d3158df4d30ebdc665f98d0c6b475ec28905088eabd865cbaa60bb8e8e016428e7f235067f87e1cda54f1f3b5e464fe313411f9b7bea82ed39cecb55e0c0bd8fba286eca360931577074226b02ee207627813c7b4358470407b8b8e46ca566777c0a58e4ddedc9224063b1f26b84c00a4a8d673ad402151a215af30ec6055fd9149dc78110946d6c109758a7e68650fcb87a1e123416ed1fc9492a0d7e7a294a170293b2a05590eeaa096c6c2b54bb8d7c428b11747ac5df281c0cbc96d152640cf1043384fcacf53f7c9a5652eba4748118e0bd60a06725738b451df27a00c0dc6662ada018d22895df1f657bbb3e346434e38ab4a158974d6f26ae0f25eda1eeccbd7b3791e6806258e2c2e692007606b4375dd77a026acb2de3ed62d149adf627b55c85ae586a40e4fe502e4ccae878aaff2a81b9ec0b6ea6f03885e373198c29b3902bef8df5eee36454692190152d020065354c17978b8d36f679ad279f4bcaa464f8452ae8b7d52d767d0f5adde11cbc06a683fbe06a66a186d700477670bbc105dfa0e622aaa962e548eb348abc72b110b62c190b96d5db6ac2b3ef2b0381a20b581615b1cb62b03f6fc85155254297b15f3ad0790d8716bd88ec9c16458aa340e80759f265f65c1e87df7f5dd4541281e95b5a323a4cbd60d8d2e2bd5b77ae165100096561247e578f5032b5b458b0ed7818e90520783889ca0cf70c72e4c7fb6dcef987e561b12e5ef981cb76fab25d4c44737bc4cc91dc9f6cf525b40025e04e29fb646f7582db1b2983f7f3757ef2a66f115cf47331b8432e8e4faf0c06f4d640841aad95e2c4a140ceaeaefec3e56018ae84354d8ad512ae4b8f48c57976e277acbee7cda35961034bd817829f29e800e11d531fa2c11ac93bca29c834b6d3d6a5a1c31c627241927b6d4904cb1cb1cb00a5b77e04d9c65a3c0d05e73f043bc6b061f95995b589d137444b774ca74304aeef9ee0366c270a6e3926b2c50d40c6636344f42792d4b6e1e0275f1841cdb7dabb1afe496a6bf49a32cc0525637b4af302e55c1a995cb864f41d7083f063750208a5d6cbcb2d189a41069e3d6a2f0be48ba941268cace90bf99404854a2f90af5d39b603ba9b1951c3e2a0265889ba1048ac084afa2b277ed0b8fd0ddb63cd99add15c5681dfaf97609a5b3399605c2339d5936218b81d8f4e94252e59b7ddb09f5c0cf7c0a4bfc19a1ded5e481984daf8d5b62a51a089b6cf4473b4378c77fac149e0f5f8c79bdf621a606b71f5fbdc0c20e773c02378c758f4efb838ace1834ea2599e701af44a633eecab01f2654e904edcd6d4b4f5008977daaaa502692b79c702d17db2f299310c5ab7c468229b54c3935d7f622fe56082ea36e01be9bba40032b2adf5c6ccc732e37e346e5edd35903f4d12d7069065b5edec8c919dbfeda36396561dbf5b880daeccfda1a87c51dd7be4cdd40a20e9a695ec0faa2ba3233fe9484d611355ae3b3a7153fbfc671a9638523d047fbaeec3fddd06315146502d5ccdcbc6ad9576d80247b18ef2977ec9e853d18317ccd71a7ff20f67901a5f175a26e602982908d7570d01711139c569e38097e25feaac66883f1f3ca98b1896ffa8ae2ad6d848205a75314cd45365842d6c6c32606362a095915aaff1661a1f2b1c5dd49adf331a7ff10170e60971b45b84ab9eeb5d9f3ef814874bbd28441ed3042d58de44495fa01437c8dc97d398b1590e86c8d1523a00699e1b9bb1e2f3065d7fe17bc7ec1abe1617a72ba05b5f0e0defec93aff3268ee5f4c6f7f167f0b262c9ac24b21731aa33bab271fe64e22a4be47646b42e70e41b875850de1bf85f6c828ca13ce00131460e2e5282675a23251105d037f72095e9d585be4d3c102087f56ce7fcbd75412ec5f0a5d8e06c1db6f1beec494d244464adc7b6e22009da224a1c9395b99f1724d97a8484308f1c2e17acef671d8443d5431cea26fad701e35fac50cc712e294c78eb5b7c202f5b489f43fdf5f5e263694f5c0d2c1db6dd9d88d907805f514f8ac2ca7060e46fdb27a473f87301c6d0b558eee8761c366f3aaa9a19b7ab7114e90a1314ed928b2e8d051e5547e1b9ad384cb8d455b6c767eebaad1dfbb155f4da8d2bb8cfa8de05340b16c2a9b4c3534b9d2f9744ea9e0066dbcb5f629ac72566b48943df22dbc88523501d2096b89bc55a8625f24683b8652d81f6831258c162ec8518295121dbbc222b75baa0d3c6ac8cc7b52788f916b5c4f5d12b2f9cc8f7051db8867813db0ecb7a8db2e686c56051c61d3230c77c8b86a7bc62539726eaf879e17b840c9fc406d221625bf06f4b6f4ff233a957bf6895d5482a0e050f30531ac912ed148cc1d92a4a744f668a24caf94fc48439312a210df0386c94c190d48c55463854c5857a7814cc71547a812fc450b70a8b7a895a3b9d950a050156aa93a94d51374783140c49cbbfedb9413e495580b8948b04c60158227eaac3bb22990c58351a40000a13112f80e4c2acd6f4046d719b0b5f1f1607ae2350e283a3b7b8b65bb07df73b7a73e6388c8c581120b006d8793ba34bc981373fbcdbf7faa872176b21e10cf35c06d871578b96023f3a5b47abc16f5ef92d993700f2aa447819c72b2a9800dbd90ee76bcc2cd5281546a0c95a39e0c3d7f7531ecfe139282a3bba3dc199504f84d14bfa94a1221c968590c2fa8908b3c9f93ef8cd97f786bfa048dea4d096cfbf249d8a9801cf95aea4ac5498645b34622fabbcf78e1731687f91f1e748757185c4b5935ac63107fc51eb7089b53aea5a017cb98e903eee0eb636c97fe56c4e760057d2bc0157c98b66d2a876dcf1230d7f3b9ef2383cd1a0d8b60d235dd778b4182326bf6a27bd0131b737faf7d92b50a9fa30ef770f147f590e8167c70ca825379b9ca76502ccc4056ee99046de4f3a6cb4d7a96756efc1161d2684fc69ad94394fd34ff612df4dc96b4b99f8950c91c619baa2893e36e91c13f69a95410ca6d028fe12fcbbe416900039a2534c28cdecdf4c2ae10cfc556442cf062389f8470aca409916fc394dacb99a2826833f7362a204149ec0b02eb0c3983f04167aeeae546ab05f7b05a85d5267b4bd715791f484c9fa07c7bb69cf8a4331a6461b06d42d740cb0906254aab662ff35cceb50ec9cfb69c56270d10f44d468139a0a427e10550c219c0a6bf616a8e8295bc7a5f5478c7c481322a08d1625f005adaca1f1e2002ce5b83b73684457d743520ee4ec95bb151983db00f7b4976b3596dd9fc4d3d13f69b0e24c87ce0def8936969f617d36300124fbf139c9cef37052ff3847c07d88802f7b5cc36182b1cbb932a4066ed379d3959eede0e48cedc7e8b1bf71635f41cc2c7f09847ae9fa3f3e788b9aaf523aec9fcd989b1b3e7ec97ffdd7fb1c76087c822e72c6c818c6ba895367e612d51c093294951abc7c5b48001b83bb4e534ad8cfc5d6ecbe7cf07ef2418cbc70f6d569b5f348fa48a63889e5ee72107d6b21e172b50f67e2b8181186a0de6b4ffba2075f1a426d17f7675d1cbd18fb6021f4bbcc4328d3df43aace037624a5c0e42c2a453ac1194ca90ff5448cc8ba9696ad0f3fb88db6989af359e6ee560fbb5c3e0e1515ab985db769d60dd6f53540bd0398bfa2d205cd1e9491df33b45912e7bf7c5b6c46555e267a8bdc4300b1850f0ea0a73615c41702bd36764c2b3771614e243b347b11fc4e65b76d0e32a2c526e54b5e4b71d8893e6e5b6a0c57d17f7fe4dc0b4ce4c82c1d7b33f523f1bd2bf40a22443fe26186f9a68cbdad22c8f7a1bfadee8c400782b1fea8b138c1780bfb48486ac5b2cba4908a660919dca5987871267cb000829b858456ecaf9793f7605067ed58a83eca66912546db041656ffbf45a7da4eb861c31adddd08ec8ccfc1abcf0ef487156fb784efcbe6575a5decbf7c4ec2d1011f97dc171b2d975cdf30ad0376e74a6576c90c12fee4b612bf748e5c51e7eb3ef566e19d2f4929ff51d2a93ea7a42c21fcc101c96e82a41ee29c7f8c7dc74d9d4d1e624dcc83decfd5103f4dcfdaf1c9520e7d4bf16a80e0959218a1ce43e66a71b9faa453310e5dba750bacceeead8622a6d23c4c1b7ba21a77da1a9a146e4f9fd329a1219102c0b226bcc43513e01032ce7f934ddfb7b8f9fbae354b55702891cff0397dfb65e2968205daeb2f1c6be380f83cddfc3a04b611d101ecbe028a66de4a18ed1bc0a69ef8b1fbb9b023591f06149eeab73da90ff154a612b65d8633968f25d0688d1dcaa8c460d23c472c1966552232fa1f85ef8a66e2e4028a14d048532ee4c6dbe1c2ceebbd72bef8665c8139d6b19cb0e6b95202144463dc0aa39d59000f044a1c0c54d5885e591efe3fda0e35b29fadb84f5eba516e58b17f7c6ca7ae26242af7e30725e98382afcad6f0af5b85ab5ff01eb22980a248192938e9f1de22dc87f182efd308383827cfd3cccf450d3d78bd69986397004675f2ea4d38e206cfdb4a1bbfa94baff9c836583df7c50ce878205d8a0e86e85a81509d4d544365af7fde1579459e48132e548c2c8c33cd0b26efc66b98ddc5a433b397c618a5f3314ceba6dc863b1e99c2238822d1263d90b60a948fb5d2ad0bcf554f11185e08fa21c9c0d5928d34f86401b3e468fe3594a21f7635f836a0f426cf1aa856f63504aad503d0051821fd805dd4753786effefb0fd5af80c5a8fd8fe3b9bad1792b1bd92722af4ed18828827ae45194f045f385c5ff6367f619dd691617136cc04735f6c57f545713e49915c37c5489c09ea0186c585a2ee756f7338f91d98c5064630dad46cf495184a04e032987b8d62383d126ffff6f7ee8e8f6083e9c686ee0e68b2bf32335cd1254b50d75385d5246494fd64ee267a7789b00605125a974239ec723a00d172218b777f4bd39edaa1d6665dd5852d218eb44c82e2ecce545a8748fdd39aa7d4f64e3c7a8799e72905ddcaa9d596ca13bcf738abdfd57740cbb2ce092ee83e6af7e8559aba513532f3cf86f6ed5339c8754468e1607b17ff2ee058ee640d499aecac865b77337eae40780ea2e8d5596c8fbd9d0613b883298b5b8ea187c7d01d33196fc100f7f461d9c4ac75a618c15de0be5fde9ab0a335db7ff68e00cd49aa6be5f9926f5358c8eb24ca38682ba2d7de59430798575151b90ab291f36d8d7e12cafca2cd7cd02e44b1a9987e3da6f6ac3af2c9321e4b44eabfe06484faca52e0c287a487abb48b9d164f45ccc07a56413fd0deba69382a631623f2ff6cf463edabf424b7b8cfecd0d7b170c69cd3da036bb449b9a6c94253dd89e7635afca8e89eaab4d0642f640ca330766481bc58daf251761d4da80d741949e2b227b6a622aacc0f5db3e28e33a8dcf3542811a4e611b03a976ed631a76c20e74ffa8b1ae03c2987c818d8930158288ce85128dd5a9b2d2ef806a72280d2edbf070cf09540bebc95bb719dceb11bc4db31c8fb6c316003bf2428db14d0ef01615e34edbc4661d609447aba2dccb0128e287460f626428ec320851e1900ccfb28a73117fd7d47810358a939ce932e454fff38d2996117c195877cb089cd25fb179763910b13f21c03fec73c981b07b43b69ab46bbca60858cb5c71266ac890b26892a7dbe6794ebeb22ab67c0d897fe36062e15ae566df2237692f6a7bf5d249bb7e7a112a43d594e1c8a4fe834a3489644379d1642fadff89f4c575a6c88a7b39d5c70ebdeb289b909b9c49fc0dee3f8da9882680f259a50c3fac6d60ad58af51a18495c995fc50f9dade0af0c3ed65d958ef4ab07b54714e3a903d30d97027ea17693653bdd5f0db85d576d7e97452d0c3d6904f34aea11b10f0b0fb69f72a875fba32c96c51142d273f143ada87d0c8a723036b8f9d52baf50345e4f60010c1ea235c969ac56c44065cd5d857b45e83b9f522a00d562a684af84814568d1f29fc8f40d5aa929fb0013e38dccf114e4ca35dd645bc97bba343c741457e5e23aead2acf2b3f8d627c6d903c14df20aca377395622ff2ca04c99bf9419d07f1c497ccdfc654d12c4a618ff986e6ebddfacb6c28ff49d4b7af269204058ccbf2df5e8a6b89220fb387915bb12ca4dd1788689a210051a370f9218acbcd81b388a5259bce8e20ec5882e3c440a04dafaf84c42c94c746c2005293259a4d12e04f9f17a2225194058070344451bc1f3310d09cbb25da02f5734e281994909c311e90a6786230bf2e55c697828756877124dbaa86fd8ef307f34e853634834fb6ddef27adcfd414112051074fc8908cc17237073ab387c1cd93f5a76c24ace5f4916666d93912491c5586bf697801101e96d81e1429229298e485cd59fd1a5d26c2fbed2e1bb6cc8848ddeb48b573a7f000d8be62fd90962672cd48fa4e4e7fce1990754a84ab47f3bf8e55fb0540fac7b209e542fe030c4826b0f8ca5c2bf154b3401958fbd7a0ad495aed6daa727dd22644d3184d2a01328d7248cd4beee937dc5a94c317108c5f8999fa520534d2fc2cf231472fa1b875c64dc3f7107f4e1a8772fde72f4337f503a7b3675c0841f711e2d8997de9c6cf58b4ae7cae8153162c8d3d4b828218c8d4917589ae0f2c9dde7766d24c0f566b499068dc820317df7a418f687881eca3360fcaa500c7fa8985c9f0eb286049888ff01a735b9c027ef7d8dd629d207268434c68ab7f71798591b48fb2833122d214886fa461c09179b4220ef5944561b600bac88e46d9e56e69bd20bce42504b9d5a1d26edc98817dfe4115cdd88e84a0a6d582ff16178a28e709ee791fd84a1d6ff3e83f768ed705501fa8141ca3e8022e4b75f83a0a84be683366f371169171661d0e3257a1adbc85b410c60ebfa0523a189d581c00e6c8813844c0eee977b6b06b71924029c8f8f8bb3e113d5263490472b49baa96bf6cdb4f406ffd08c5287fb2f80a9c9b67427d7180a5d91825b4f7c00cc4780a54547358f815a61b854702a227b49490f479250f4649b62649b656dbe17bc78cb5b59a54d1a14c053e508ad1a8001fa6051cb995aa69a62fef270ae7d72bfaa613aadb740e53ac91755ec5312bb9fff10aceb738318bd6b5ef39e669dee9bc7b98c9a5ace6d6f994688021b4e26e181ea1f7feafc36e050cf30ac139e6e7e164b053a7c3604863b6444859fd1a0329243b63b7819b6095de5f792973c5dd141ff31529d50eddecb5f9203733bc542e82c74c5c35fa9172b3687e9605c3d91a24a4038981d61a96c324068e41ae68eb8659787e1e4cbfcea8ea18d2e54bd647a4f8f43650f873f0244e4775d7914bd4f62aede54273231758cf5e2c46fb2dc7a77c962cbd523fcf5c4fb06a9fb46e7ab5130496ccf6ea2fac7ef99590c1e00d8dca80c3414bb075d9b26e22c07aabf0c2f027503e81fb328aeafb6f66a1d6d38d12e02bcaac7fd648f28a2bbe6e418b2f824d7b0e37566c501eecddf337b974ad9a6e8a330b00a0908d22cc2ce4a77f69a0f4d3fa9d02d4b9e76b919a7eb927af8ce858dab0d7882ce83d9dcc672776bccf44826e3b0cd4696899d4c0642339f1bc2d174512b99561dde96ca6d8850b9ca252fa580cd2b70251629751f3b38821b87106e204401e12b1a702039286ca243ae0e1aa445ac31feca4495307c2181034deac90dbb5641f1c513fc2f5a4b9a7aa33f62ae156cd9171ee54ed813e3ca6921d9e221a2fe9547cd1b0716f02141e6504c8d76becff18ca1dd5a16b96384c42597bf2ef406e138de7e7500f18bf96657c68e17036110f11ebd4ac450a6f2f326bc2343981198acf1eea0193116676ea6d4eb05455eadeee6800c8191acf43d23ab08e6340d7d8340fefe3747655557a1850dd911d5c5151f526c4407de2a26c9675ec34af1c210fb5afe095d4b0dafab8e42e955577a6b021e712a3cbbb8c601890951b98461f09b9bbcee23491aa4c3bc3212d20a45392b7061f4bbe7aa441a01fd39d845e93c83e2a0b7f4a2942863460993684bd4bbb69e0eb3f58d39e6b8c2216e016a0451205ed728b4ee887b4c98acb976579cc035ef624a87bf5dcfb020748442887e7287c6034b4726c4802aa8978c72910ac13f6b0b94c36520bebb2d01f1309abf5a197a81596ccda4ba70305c45ad610f1a2a26da880f4c442c83fb80056f0cc022832b3ed860be026c57bd17be5505aa5170061eeb6f20a205d8bc268493e31658fed9b94700bdcdc5575335ee3187bc2a4f0cc64f2b34aa84a96733ce0a2c97b00c0e6e7ba9208350116ce894b1372c90eb22fa85534742e9dcba79be12444e7ee5019270164b3c6c7aa980e210ab192ea0bf839247526750523d5d2eb5d65a1f45ed6a00fcb79174ca999b482b39cfc26d1bb6b6a2f975649ac09d9ac6ca3c70f3c9f031ce5d5275916bb06dd5a4b3a47eb17720050d3b425fede18d1227669e164e5fe244c5e91f3ed590d259fa8f92ab239f6be6c79c6a4d12c3ad8cc869daca64d602c1c8a84fbbba0eeb64c5149dc2bef21530bf871902f74217ed3cd018ea22eee2df188e3747f13210d42d42890650e06487d167af3036b4d0a40be55c6f54cb8d8a0d967c5bdf0eb60b3e7a68fd15c7ff2b733c331e87a17cfaf83f1447e99511fc19ec320e0fb86d50a452bd762537b904bd6e4138df76c1845905bd2027a8b015ae632032d7b1108503617c640bf0fcf06badc7c2e307c65e3133b6072244a36fcd660fc59c1dbf29354de486c8a7c75b632200aab00683528a698e770229a4f2a2c483cdfd9f41d1aabd02246c6b730b5faae820fd3a0853feca9ffa182531e8681c0bf6f9733e15d29a23825d28d61f51ca6b62b8f1db640d6760085a41c60d6ac3353c19376e8724b5483e791325f7612f8d8db5afc2e2732a6d16d72ee645e37ae62f9df6d1f60630d8d2bb42c7b779cf8f45c813f007fd2cc30c9c6b7adf4eccfcd17a5bfdea5dce16388be0b791f61b8eb3e35dd389f4ce7ede23ff3a88bbe2042258791ec7cf632e192c722d2950856e68f4e4a9fc40b2d450b85a98e264119161840c2bd6000cdefb36ba9cae387cf0f1f9176202a0bf833a5e4cd642042e934323e5643cbf1cd8071842dfa012b14bd7e39eb3f12a088ce42637a94bf8d3c425d51ca21959073ad93e8075fcfe604f682e626e7f6dcc60ec0543d2fb4953754c9a78da5dbac19cd877b7890d270078bc9efde4ee8c090a1b25abd579dc4690c24a8868c1e769524b0229d75fbb10779325685ae13d99ab930cc2df3dd97296885d21e2b7e8d4b916c0a40a263345140bf19d7ce7b92a41bb716276add5c6d4b89634da3c9c15646e095b613e024cdd9b83d9173b419504c9e59b606b5caeb76631587197eeaa258f41fc4039ad0022e4623477b11daea70f13bcc1fe69a3e1f259a6c5cf63dd300a331972400bc29fff7e4765e0cead72c934db4dfc51329b8c3b83fbfd62f88807ae340ed41e5173f40a3abfdf359f570fff4a412154904a1f66857916b06f4c2a6714f5db3ec33e2f7d382c4a8751b54bdf67c952bac57c1228bf2de551d0f8fd4978de6019856fde6b30e9f2acd6d9f5bbf5b12161e1f3c6335f0ada710c850ef553a8aa78377e8f55b92a222ec2aebf59aa8fdc1376318070ae208d15eb01ddd5e84d98847cf82ede2f906f5e076b71d08471c8330e6fa85d6b68f1c347d5c01578b0294d6f9bea83904f00f2902ba708634e6db1c7016c40453cc357bc08ceed1fc6491db80fcf3030c273fb305430910b35f3dfb0000a6bf03f526d7fed690b7774b1a21093e83b7e5ed120d5dc2072ccfa57109482c16acbe8e8c2fa3e93adb649c55d14030f9a0c88ebe899c23ef0bc3c5193b800daa91107656b6cb3d399c4b26aff317e2be07ab5b8ca138cd47d2b1f128693384e46b1b7eeeabda81c51e5f6c90440d24702ae381f73d7d4ae187543898153577aa442dda5b703144aa77e720ccf157ef6ca62f14b48cf66adf91cf674cc8e52db3da479451b5746bfbc27e94e86ec58555cfcf44e121de3ad2a9899151afa20c21a147e6fce6764668ca379d614a0cf4ed823b6bd51fce23125e8b7ddc2090799df69fbca0b32e486d466270e7c27d1a5c14731eada44058f02e2af97066ec63c6a42454a091b083624f763e0110ac444b35f810351e3c959b904f42def014728c7902a9e93fa1360a1cf513c1deccbe5913811cf9104d0a97487fb2aa4a60b9d9978ab1b73e6f7754383fdc1b3632cdab385ed8f10f55ea0fa0384f5e4e4491799ebd399d3b1691b879fc115d134ddc4b6641d2fdb0df278513686391f18e368be875d3bfe0b85b312426127dcda4c330022eb7f3bae9030ec98cfc350442a080b5adef18fc61874bc4210b32ca4669bf5a14a64bb7361bb0981cc93b05a3e935db679de2324936484b832986ac014fca2d73d7ab0b5bcd299f9ae617dda7cd3217b7dc16d12e9984f9dab736d9efaf14316ff8b029b3a5537fffc9327dc30a5a09e18d28faef80df67d56b86e4a818c5d54b2afa322d399a6c44ddf8df327b19ed8a6088fd840aadeb75fa9d2354a10a8e6497e8500dbac1120f7ecda994af63da3040eca226f4efba531c674b8a4b96bfb486dcd442b686a2eae629bc424705e2e24b8490975865426a6e0a819abb8108eca2cad46650d6a330f74229aaf1695cdff375a723c86c6fb9b3fb10f81698f0371ff3e62ab8a32b0e36e056897435fc3e5cf7d313a66f71791458484b1db5ee0b48af2e87e5781572b8791a8e2541d4e0b4258291f09b3099587d59ab7af920d4d3652c971548ca25ca491ef073f764019382d026b1ec2d01ec0e06f0ec072188ac19928105f4a72e457c424a82397769ee3b2e9eeffd4d63150575e01c710efc970a5701d8e86388fe5b397b7a0639c1c4a30ec3a204578e5e995120ba467755ff0ca3d03e8dd3de649225ec8ce2a2dba9ca08fda8d876a50ae5ddf79bdd83a44e0e49041645dd13be93bdbbc590a5a9f022d47da530215f3ec1927b1251fee0ca1b00d57e39097746a8d1691f8d7d197949a6fd6be9107c0a7f5978f6df545043c850546d3e5444dac1b96bd5019245c23236c0640b5390fc7a5e3a9ce1b37b964dbce24b412097426d0c376063255597d9b23af36e4907c9e65356e73d12da137d8de31f726f2c5cb116db2dd67a8f11e2da327258ed1a1d8a1a804e20901caf87cfa9afa955efbe3e3cbf74e4320dd03ec6451929cc2610f77527d43dc4b6de0005083c3280ffdc5f6a3d8046ff712cd53ec583428706943241ed4ab337277458cc105efcac390128e7b6ca697cbf4a8df9d2f2247af9245beedc7d8e254e34b14d63e9d45ed34ff4117a082101ce92ff20ab731fa78019b9b14362c9c50ac69148c7d91f559121338fd47d869f7fcca76c39895cc1273517f55b06aede142bbf39628fbb82ca72cb660e335a756c352bb63938efca412488134796cfaae6488bc0484f56977d130907c5172f7a390519cd7084ca26d56232ce050622e400956e8e6c72df5041fe05224223994abb4baa590f0f93440a859bb0d0cac2caf7493810a0bd896bf604699aabc61816b56540d85fd53c7535ac1782a774c62c0b04c332a201e8058971db4630579846f8f1c1076d0314190e20820de76211c807e53bc0e4c53d14a617c6b41f673efd6ea0d20b37e1298c274c5acff1b8315ffdc063cbe6369a92955fd346d8031816d7d010b4797f0b3e2ef93cecf8f947c982c6be13364a86000d4a9a6b037d4f737b39b2148c6a0ff6cbbb10d941664fd2c4c595aedd9450d2b42a4b72e97bd176905b84ddc96aff99a95066e0cdaacf208c1f858011a0354e95f400a3d953306ea2d3c37b123653f35e6963132caa185ae3abce076f6680abced140fceeb5b01ff7ed39b18abf3af86e9ef28ff8d21147ac4616935ce36256f633587d462bc4c3068ac668611917d9b83e7cf728ff74525ad559ae819f73ab2a48fbb3401bed1d0ff51b9aa068f37e028a6a666bddfc60eed03e8a0f2ddc20ac80525333f08ec32472da0fd387ff12814ef22806ddc50217c1153b3b74da0f96300cf8007e5cdb4c2ccb46aeb1b118f99ebd9a36424f1c99fb71dad2a7c065343eedd8ea57a10842c97ab58af2d9647da1060758f9a3c8c9a35b2659d6e41d6c87e796f0f26cc32701c90890f60fc85d8eb734e60b552be4e85931c9fc2eafd162ead9a92a4fd50267f0eb6850e4617126beb9eb91bf01822b7687c896ad9c57e3044dcacf6bc27da3ec694c01296b8265a05addda121d36773fd3c26ef8cb5d392e1c0dacbad99f67c355efdc65a377a8c90c37dc3813886530d6ad3208d2a7a753ba45e3cfa50e7922232c131c755ae7590541382f1a8a2bf0f5a55da93fdf78b80e8b997f1db6648fe1e06104ee3bab3535a672011771df1dcf4f0dbccac97011c48ec192780bb06681ee7ceb805c46b336e6e63cb55331dd443863a6d833d4107e109d68a774df0543f1985973c3b449f2d4f52a7dedc3079d9f33751a99a939a8290f9b89c6c59b8bf4f321a9ed6890a15511635183907012f329c7b8b20f7348b2827774564c000fb05cca4c73a9a06177a6ab2d7ede3fc806b5e8b73a86cb3046b30fc6996061126a4fa044b7522da67c04e039ea25dfb13fc856811b3d6dcade088a3a3b8016818b7430ab25ad0bf478746a3448e2223595c51a5e90b2dbd6b2c547bc51d8b47cf57a82e6e7a3b4d9fe3fd16ae881699a8bcaba4b21e5bbc0fc267f9bada53a5c29d9531f8c8e38c757f11038c3630775d17ed6ca5adf9e8f3f6fdf32bb53484e8ad8e16fbed816a15fc688b5ce4b825568154f806349b0f9ee79691a7e339c41959b468411e4e8ecc6577092133815b3be633a48dbf358bb44b1707edb3609e3ed5597e8c0102bb1d0386e7990590181edda4be1a061c013dcddef367635cfa7cd3b5694dc81a77aa25de23c01132555e0ee02e607b3c129a27a6ba737879a171c2d80e5dfc1123d06acc091245bfc637e6160a625e4e3cab6c1e6a9d36df7281c90f3f69882a76ba8b9acb1c62ad7087871d2d40b9998527265c40912a9e4642bdb32509c3418752152acace78fc26c7186a1aeaa381eee5e791397d021496ff4d1a9819e0cd40c5817735ca402fb4a7e009617b141754a4129c7812d364dec8e493ffa25a6031aeced8494b2b6d478a26301403a77aa7f090d6285a12a7eec4590907808a96017405e37cf8db6bd2361163707c9131ba0e6193607864307f21f96d93d50ef460488bf81c60f0b43f4bb8782d9e6d2b6050d6aa782c8212bc00f20ca8226b814d07bd1a1fc5ae9c29a478f9beaa6a529ee4c749f1a82e94496de9e7c4d1688232ca9e43af132d377a429ead90165a0fb7a57d4014905d9741ba826d53542aea96e3957e202a2ba81fcf11babcc6b1b255c7fb5e98bb205c2d9c2942ca64a11da30d03615cb3c954a49b82cb1c93299c4c8d1174bc34462120fd4af318ac13126efa5abfa2ef7f08a9137a8732ba434882a6f36a8ba8d20083b5e8991dfbf72b27cfc84ae421ac3c57b8dc9ae58d0aed0fa17ae47c816dd098cd6224482a0495fe49dc026e13168f7ce6a0c35973361b66267dbd6ec892a45dff7974d58d261b6134ea15b09f4e85b059379365a56b47043d46b33f148359f4c8ddf7f65c2ef6b016472d3cd71831bc50d385d3a4ef6fc9f7e45fb68a1a598fe71232402ecdec673e8e53836ce4bf15ad0eb4de1e9285b9ac11b3871ae381be668e20cc96e125f6ec34694e62f249a911f4a821851e2d76f7ac90a58c1d9abe8c32162be12a5fe53994aa8f71882117001fd4000d18247c405c9efe5583f46ecf9d2628b32ba15b6cc6dcfaaec4fee68f8b9a338660e9c21ef0040f3597621984a8a8b326691008a597c56e05b88d6b71fe3c34a86adef2e903d875616f6e09f074898bf525d52a1816e52ea0d083b7915fc987023bf38099e62803a7ecfa0a5d7acbb2f9ae9b533cd50b1b521d4586ae9d05513374233cfae836ac0362d6064e60e5790181171e91ba8b440803bc006a0d58b0041895352d70888958c3f19c5c7ddef353be260158e869183851bee63e3d97044dd3034573c7dc199db73ba13c3a1c452eb8cc6c62d30d064abdafee0dc78934d184557d8c35590db5fe50b0c68f331abf42fbfffe2caf6117459b72a601882e611ae0f486f467758cc2680469dcadaeeec89ceda14a2dc6d8371b905a81974df1f998c02a4eb35e99234f5d489dc93ec5025fc90306feb8bf23cdc9c47cbbc7e6076923b41e6c184e252a007e2cdceab91fcb5048f03ed5af5de7abb6732110eb1559057dc159ccba614cbd81976bddac88f0473c402a8937f8b89b3b99b8e7abae9917d0c0211c34ab815b2d94e2903638515e4850294ecf895745d0fb5ef762ae14705815bb40f063d9816b34ed3ae48262e16a60d024343525443e580e072ff2b51c0235da226872944b5b41396fe01886c05cb45be8957042fdfab12e76196410d80d7bdab600ed247c4150d4b329ec22c50a0027e87b90973517d0d17c457ed687abc423b9cb0d00d65fec7ae039422b3fe16ef3e2382d2ffe811c5aa7dccca38c0ac94f8ad5efbc59bba94bccfc5e8d811f6f36177e8a1a7d200437bf8d1062342ab4e0b99b8ee98ced37c43e5ed57f10adabe0d11d6999b8fc694f5fde6ebaf295aed82eee618ca63c8d08104ca60937e52c316d3d4d64edb3f24b18edd464600182c8b4e3a69e95ed417318f2bdeeea185320ed8b94fb0b367bc1f73e5a6b4d45b5270701ae1f796b4b013d0437d826a8271f622978f7e3f27c8c5a70dbf3cdf86bc150e82afcd833e3d5d09714797e4949f6d1116295af7c28b43231fe87baa46ab506054f054604ebcd001dc04bcfac52c2e63426597fc6905c8a5a5c0fcad708436be7bd282268025e7629d9a4c01aec8eff1baea183a73f89c5befc39424903c5cac0e3fc1cc5d472d35e0c35fc28ecf90c1eedf552a6cbbad82250688bcbe2fafec354da55ca01072021b20a4f86755262a4ffbe17721abb55018242d3d2d73184d10ebb8b2618e6428a65a6629102933017c496615b77ada36bacdd6202cb175875d2c771304acca65cee694235954240a823b69a164bf1f1a05b57f2758b612669edf530a1520fe11022690a3c31f4984abffdc8a4adf320a57611fbe7314bc747f9a69fb637f1a36dc92d64eef83899cbe370060611471adda80124c40ceb35387939642dc9d8cd50b3e79e39b8544abd3f7b858cd4294228eedd2b84c42cc5d62109de47639aa31eeeb940306158aa6c2508fe48644cd33684482dc8d671b0a4a620e3a35dc536f2b7e7feab9bbcbf0c69b72477e73626bb831ede568363d602ed999a77bd9ff3c4a77c6da1cc3c05eec32b376ce16187ca13f5e21af7b1c2cb5a4b2296c06520e11ba1bd56e480b10d3cf65d19175631a52eca7c49b01cf4f0b63af003a9f7f534b38589832612015feb94ba101d85e244a13129d492bea2680252587da254114ed3adc26ca3d56180bfe47abccdcd85ca8c6d09a302951a1b477d7d97c0f35b22a248626af64730c4c1c233751ca37971dde55b7740b487a1f2299a6f086e4cb75cc0f50ed7a37fbbfe73cae9dda3ef1447d16ea041700a59dffd10180d8e65505f02e4224d4f36af3bb68c3e07fa03ddfb49d9e47116bb12661c790af4c75628388e3a70745f2c0f471eb56e813680c365dd3567cab05f6934915096ffe2814efc926e30848697d4e8dcc4256902d415b33506290d9c8022f3996509b2b32ac5461e910144d3b3ab29b39a5e2d63395ef00e87bd8f429abc57fe9d55e20ec00b689829eec36c000787f60faa4cc76644a587e4da14094d05c3d571e15b7914df111b65f5290c76041a84d186de0756c622b34348f6d49f953bfa49978af4803e0acaeae997157f4897ab4f4f367751fa6dbc84e3981bb64d797a16774e4e35a3a2b2643e447f8e575a652c905cf8e2c4101910d07a5d45f65431cacd9d479e3cd8e57b8c61cc550087bd945f16e087460d16dbef3d445cc5d16bd7932c3639f5fbf4d362735d3ccb7d3503b7aa4a6f843b320014130c05c8c898c912ff097b178400ecb53c562eb675bc440784670030723e8ae7143f99ca8c701df436aafa15aa6023241ee24e6e7e4a09f3a4506326c605a78900f03980500db2a80ccd0d1d98111d98d2d181d5211d58f7d28175990e6c3c1d187b1342d23729a9f535903f5e907de506c16d3423c7c8186d0ed2e6bd1e502879a287d6914ed6dd011646ed9635612df6c083ee74b18fea51111cad4ba68621bd4479f87e3d67dc91269daab599314c0f03d999c43cfb8d3404c20ab7dd1fa06d49cafd0b08969a0e03f55073d924e6894d9dde17bbb2181028cf889fed901d8da7e4c53c2e40aca64087ca9aaa35e25317e232eab465f0f31821b6b4f6534d80407e179f247a4c261f0aeed066957395686b155664d1decaad566e7f2a13b4e935c93fffe245ed99aaeee693b22387f221023bd3ce333be713e7a74fdb327a371e5c1a91a49786061c8a1ba9ad00e2d339bac0b52d5332e146fb538b381309c04c5841d818a59e3c60c53f05aed2174840a549583c55979348064e421b7d129e42f5c51e1981787c5b0134b1faaa2af53dcac94f1e0084e7ea428be456327a5fdadeb7553feff89fcaded8c17acf398816f7f8c8b9b9c259463f8937d6fa2903edcf5c77db91d20f4298c2fe1b344bee6615a843c2a29e852615490e2679aaed336041a4ac481e09536087b0ed2fac283ff17672b250e3f4b786527f1b90e234350b93f9be01f8c60a37656e39cbc568f6eb53d7f1ae37163060d49e597aa2e5fe0454dbe946fb56d1d25ba878fd9db0e2045d08e3c753204671e2fa974a992fd07e2a813cdfdd03d124a588acbfcbf28c7f6ffdc3b8f73d19b33a42985a48fbc7812074aba2c8bd7635119d5b5bf42fe7395cebd6e71dc2898dd432dc172df94476351b7d7d388e8319184937154ea4ca9446ba4bf09f11c31784739b32701413d1ca967d921e8a10c4bcbbd2d162e2fdc0ec34a70ba166b9ea673d0d93a0682ae674b79cbd31622b9981fc573153398b1d7e5dc7903e4ac8c1e686486e54f0cd80c409a9eec5546f5a4e7946f0d00f21b50a82f8607c2d3011e8952e8921c292404caf2a9ea8f7256742d1fc3ed70a9e9fc71cc4655af18ba4ce416a673e316bb495a559199d20171ef5e6b26c48ec49949822f9c2759a3900f4eeaac0c697cae0255f2b437328af74926f8e5a3d769fd1f080681d2233353ade976c74d14724a175eed9b1afeb27fbf622c918c77d7a34c33204408997175c45ee490e476b67ed89ea4965b951cc62f6f92bc4c6c7b9ca40c6a39f8154ee2faee8d20c5238ee821dbc5d11547fa6831c1e22b0294cd94b7b1d47fde60229cce4b39147ac0b04364205bfb87a75b33d7edeaf578db825470c6a838cb8c5cb909bc2c3b60ea6daa23c0d55cd9da4001ebada81bd16eda78859bded971d0a3958c17287a8ba9c59010baff815228ab9e369ede27b3a198648dd5f4a91df4d792bd94a12854c1a0e3bfced242dfbc86449bbf05192e4ae9ddd65e465cddc148fd2023945da87aab0f415be336c7578cf1acfdfb48cb7f7f13b98f45050d9391fceec299acd417f4bcca242406c160d2a042abf127239617e9595319c07b207be5cfbb02ab39574f74a71192a112ae696defebfdcad5970c2801b2668e1e6031208481560f4292ce2ec28375df457e828f5da93c773ad06ff5fcd08ede2b319502164fd3bcfb0852b118ed0cf22f7a5fc4373e23048206b4ae84b1fa6ae6e1256fca587c7521bbbb3d42d4189df6e1b885333fc3d95bf7a1aa77283329191e89504aad678811547f456e82fa13b23f271a2d71f1f65094b98b6dc39cc5efda71018b2ed888732d213eff8ac7b958196fdd4efc5c197328c2c86f31490a6bd0b47b79842f6bab580415267004108a929c0f52e2b15923b48f7406a61ab4508a15f98c19d5d8355e9073e257d9e76b4dc6741e1f8b8b7af99ab88321e26db544ffcd26d7a90d0a4062018aeb0646b53967ba2c5e6d09c0230b625abd55ae045f0e12b9d15401d93e1c5ff64b563f8396e93d3f2593ac1d763cded13d74ca29bb131013fc4b511d610ea6eed9812870c3c8526bbd61de45527c195117b8ecf659745cff19b1cd1792deaec04979815cc29c226aa3f0bcc62716d050a254bf1035fc95b92c5c3089b65fe6cc7b0655608f5c392c9fc789f9772d2bf44ed4046bc6b61a85940327e05ea5f314db59f4ecdb077165849d355fa325d4c8ea6d7025957a25dbbf2baa171dda648e23343863f5596d2a0a0a161454d5b9cf41617dd0b56422d1858cdb3529e7b1bdb5aac0057d21e81e1749009dc9cbdc13969a109b8c9f624adf194efb0cf3b613492190ec98890ea0acf49dfb279ff06f4594988fd2d381086bfcbffd777cc2d0a3389ad64024a00f1e1549f4d8e477bfde2e64a689ba2365863b780becf004868022b7800d00b868b9abd599ae00b07dc44bc902054125b40b504bba011cd11998915e1b78cd9f54d3b66cb1b608b58797eb7bd8ae97cc76f564ac7dba75c66fd14b35db1f2200a523c6d6e27f2bc884e1d23751af9dc0226924a08dfb0b1a62bbef78b8ec337fccc0a97dd3f5a4217b326720a468485e00603c81717eea82fdc6dc2c3fbdd7ddd403a8cef85d0c652ff13ec07b8fc5c6775bd62be0c23d970fb20855847534a8aede825e2c875fc91e7cf9e1739a96316529fbaa9490fb5c79847c8a56ee70a849ea22e2e58d50d8fc48ca4f878ee834b5bbb1416328ba439dc36bd0a9e402da22abda2cace46332b735088c58bcb084adc4b8ffc71f5ee942ac0ebb9537a6636be2abf9b2fc5cda45d7773bdf91b85fccaf1e48abd9b6d16f6e903718b5a77e02af0b9ef72afc124c34776678b55dc8ee2980d9be70194acad72083544930d16b4a3de39b35b61c8d98490f9320d2e0f395051b814278c275cf0cc9537db83cc9de36c70e52569c45a79f18ad635d36615fd59f20c638e2baa9f57b7bbbd3bf99cbe37bdc9933f3ab3f2cd1c5912c96511aca1025ca54f698bfb5b454b3bfada11db2cc4df6e4edcc86d92e1e35cb29934fc157f6dc809a987ca85c7aeb3335e7cd5ff7fb8d5b44d7c4d8cd3b557e85c791d9ac5576ccbc34ccb12c5996cb35e93fa65e3999b280cde07a3ff2a4b698263e04bb203c2bfbd658fe2f006638ff44dad92fc7465646ca69471d8737105561574b30441b132d0c301a842390da0defe35ca7fcbac9da68ab71ccdeb7b34bc978a5dd39be808a1f68645c8edfd95d0ac463bd42107032bf2042c8f10a007d200486ee1350b31a181a498c1126f91bcfd1cb5d2c646d43552760f00d797f7dd799fdfafdcbfb4f3bc32eaa1d346abf49f9ad70bb92065fa815d5922778932df0b7bd4a82fd0ec7fcf316fe6be4ba790cd57f825e7f4da7b766843396f0909f45d18a37ff7e5ce0ba6f1e980484b04ccf7896d9f0414ce8b6e0ec2cdf9d7a39b5f2522e148c2bd6be5646adb951369aa4cc5334483fa1137bf0669a497001891e4e7398da26214669c97e29dd9f370e7187510f662ec9c44ad4293a143ec959574156c2a4dad8a3494da50b9ee89e4f6467c4eecd6afc23ac53f1ef50701d2c804b98450cca4038d292473e91c24f0462ba1dfa29aba107124c96ee3b42cdba698a2c1211deb36adfd28539541d225d0a98cde0124edd9a18810d15e04ba18b030c85f61c63d5dd5b33c79048d0ec1fc73061e13a9c34709f38c525aca8292a97491f389d5a439f2f60e40985d0be492fbe17dab405a8499382302fcdd1a53c610701184f68380d0d516d0e6da279893aa43d5cfbfe408ca3abee26d9fb5f61d14d2dbfe5a52c9f5b1873613e5237283783333cb2f7228de4f4d8ff1989747b141a2240c07117f59050bfc2f30f6f1d288070748bc7e42fa09b71baf85440647466580e9f8386984f860919efc2458c2bc6a7ffcefc2c1d55484cc7d5be14c1d3f31a9fa7ff9bc66b2951089501c8e495726e22fc6c38661ff05b4e911e755d69049b3668927be651254dd9e763192a41ba1304ccc7a89143b1a1219a33b1496aac43666c5afce6379166e2bf7c3e105dc320a6a55689a8fb4a1f812c171e98996e44317ff9230420d4f15d72ef8e4366fd695ed45c885eba3b2dcf9953d163cfac27e4bef13155b15de310a77c05527f1bca244fcbc67e6bca0a764ddf41c79a75a55b143ef0766956a7449a6bd3fcfcebc075a7e51c0f5d76e35c8e71f8f90aef4c6e55b61331be6ff748700dcc2ffc380635456c557d354bc5aac5e0fc2634b1e7472062bbb2664b465660f9362cb7108e21aa727a4d5d9ccadc6a71aed759a8ac53ae34051727b40a169b110161ff228eb2884e8da5b6fc2015c21acbf8f810cde8c06fdf71037c33a92865b1bd8a77e9885b08540422290ede67e6fc62805e8c3ff1b2a41f8abdffedd431d5eaf0265622612ca60de91198ebe739cbc4ea5de15247525976b3259c7cfeba1ddde7a7335d48179526c25ba41d86949cd70365329955124bae96ad5a8fe198deefb83b9fad533f52f620a6301ae78946d2cc9780cabb9fd25a32813edb1c406dfa2dd811af970f6a43413c1b8ca85152c436c8b663df97b041fc000e164183276ccc9b0f59670885276cfe7f6d3b1270fdde13cbbe2a0f5b28a3ef9fe81afea499ea730436e7a2e3082eedf596460c96adf27a822b9203b03fbaa8c6b5e2829e576c5152a479272e64205413dbd2025faeda7d09daa282f4902ad86c6f5efc1a5053e142e1872d198fb0a9a342262cb024ddb3047027ae4abddf351bfbcec12357ab74a1fd7044b4b2ee645627c5f3b6b35888a9d3abac126ccb8967cc8c1c1db510d7333ef6bcf40b86646294b8bb1450f14712c06bfc46d486b9fea627696c6de7983ea9282b68f83d0d9fcf82f8cd7324d4eb86560f3aed862f72e0905253bea451896f31d58c80ec188ac9d2568c76faf8aa3cf3785bb7113141a69d6f936890169464b8ea42cff007f61286a56dd734009e7d26f9fb46319a189304c9962225c8f68cb228b1e0b27648930370d5cf819aacbae24c7d61b3372598a9c953ac2f9174acfddf39dda9ce6e72ad082c6eaf9133953b681efa33956c92fdcf18874cd5cbb8b8052053b79ef48bba17d0727f7256cd2ce078ae0766d4ff105326fd3a489be4144d4f5ef74452cceac361c86b2a019f0af884dc6fa1705b56b58b0307b871bab8c7c628f1fe94e5e1168ca5d3a7392b30a0b163256c9f3a22eef0f1646c0e6820c2f1e3b811c3143cb2630693afe96bdd813217860d4c964d91d98b125f03d884651962f538faf9e568ffd8a4dd007dbb9ab929b06db964acf623a92dc22d1ec482b3d1100eaf124bf43c410be0ae78793097c50deb2b14f8b7e91a4230c149f184ea9e8396aa67cb3acac0617dcd23f68c2246270514997e8b98ffe89748732e4cd01f12345fff018fc97dbca47897229d95fff5230df3ad9f46317a936b496563dd12899bc13b972b2944eeb2802de8944645869fa95432b45dc89ac4e8eea985b9ecb62cfb9028e72bcae459a825d5e77b70215bbaa77b00baeb83126edb01d38563b1bec7078dce28a02a6b2019d26146401757e60772e44154a65eec9f2a70660e95d828b6062793f19398d22d67efb27437b66e76c21b7d1766c32525ffee4974085a52bea6194cd7527bf307a609980dd5489bb7de804af346caa717d4b1b2f732ccd27b5bce401f238484d88e059196b6da610f0ecec6fccbf4c5ea5a456043ab4afc2541058a98a3af02779dcb0cf3c67e45740a76bf892b428b2277cd0e981514f9cd210f05e748895b1dff6e22bea531efa33e8276efde127ef2355bf3f9e519c772a62d45b919dc7ed3a1f51554bf6de7b6f29659232e90b470c1e0cde75fc6a96cf2f50b326e91339cf513426281b0a4aedbd175a94f750b5f790187d59f3a8db7ba8a2f7ae4f89c2bde71f7595df7bb576ddbc97264fde2be3cb9a4b851903ecf321008c05040c301e1fd14b1cc47b6eaa3261b879fe8b50b7bc17da0bad592f42a623d450bb4ad3114ae82ae918c4198ad6aea3d933a71806add84c484c9749660a6ac27464c589a909d44c866a9934662fb4175a95c151d40cbca6193545613a7aa1bdcca68de7a62894bce7de0b14143543cda6cdd17b61afd8a68d370bd231bb43d42bbf54d1b4f17cb5b244eedbeb7befad96ba8fbe174ab3fa1635ebf604dcf9ed06e516854761954605cd1e017c7beede28f7766f97da7a9bd8e8ad3f1a60546e88386bca99d7b7d7a64daf7c575badb7629d34047d817e7bbb756bc2d16c324289815d5c809afcbc7d359b3d2ebc2e936eb98cda45a5c98b047555433728df235786c3269c94b6d63f6c3d26c8e7dff803f5f1ac67c29972957c59f28425d688c2640917f8b49b8e2a29c409346e67dc88744185277cda4db8769351fb2c59ef4d52acc96834e18070894b13ee7be6d5810184d8832e4e9729b8f33cf7d1dd9e0cf47b566ae9470f3fec4872b147a887d6332476ab6c18765dd83df678d2c38febd65d642e489a54cd3c18f42018b405fa1861147a9c01c4833dd278d0bb1eb6520f70c9fa5eac243b05db15972938c63afd1819bab7630fdbb39c9a803e5f5cc24fa227983cfdf2167d9476512a102539f234ac5293eb6a72272c70e8e2a08b5de944c3d46492cd9e1c0f3a6bf69c1c741e96ece3661f2ed9070c4df688d2ad49f670325ba08330e8e8b4d0424d8d8a75ea51c402decb82f3836564350b74d683ac1670c2dc5f67b11efaa9f31dc25ae9078bb593257f3f4bdd18ba4cc12f3fbdb35dd74c62f4893774baee6103adb5b6874da692333e239367dce62432dec9d4bad2394a0c5c86b9f08d76b1b88c8333be72d0c55825dd417f199a3dab1daba10757f966a8b64067c9373734bfa9c9e49b596d817e33eb675ca660db8d76c44160199f71199ff1f24656c91e5ab74007bf9525e066cde41eb7da025d660cd2c3565ba07742e0764d2937d7dedb8139169207c32a0f8a46444a6fe72a6f642d8037b276953db4ae87d623347b7a86a6e8650f0f1ef49e2ae60baafc03d42dd0c3fc036b56d79576209936a08bdeef7062dedc58e896547a20f3b1c7a04828efbbbb9de3efc2aeab81e3c19a1a274cf20756d3437b528509fa0ee6c8772eb1ef3cacd1173a8aefdce5357b4edfb90cf9ce05f61d9d64989b2dd0bbce9d30c91bd9149ec50269a90b6bb367c575f2326c5286b60fc35b8f0e2f4daf0fbde8c3281f866118b27818fa0e1426203e74bd82f0e187e18974aac99d4efecd1e173f39387b72f849c64f337e52f9e95403c79f7c26cb64553eed40b143a6a305c583ae63f6205873346d82d4e46a0bf41c85eb650fad4609307463da80ee3286b969f32d590f4d0caa890157635b3526db83336e887185d7700ffa8f19f3851a25e64d94fe295a028d076b9800a388d9736ba8fc0c315d356c4c165873232bc9e03d8bf7b04d9b7c8463c0f5c8256068f6883db40906ccc680bb5eeee8a17d0f27d306480ffd3b76ecd8f146bbc231a6f329e543079d478ce8333e7bd8664fd7fdac648c38c62c40e52b7f808ccf8c2c9871195f8d2cd0810d332ee32c5895f8cb3057c99971c80b7af8a81c467d6456238cfaa87ad87a683d6c5e625c93abf1524c1095cfb7a17d643ef42eb3fc4d827d85156a6a48a9efbcbc45325925aa482e53b0f7d35b4a28540b0a060646bcb71343510c493a50c4d08aa168ad15dd47f7b0852717d19ebac0a19fbac03b662619e090c524831db32eef905d2f77c8aeb3b88f0e6f64aaf9e0587aa0cfd265c9837e12ffa6894986b97a1acb4f64b91d8c7719bbcd281773c94db9c5597212953fcbf756cca14387c87222b9b89c5872acac8830a41c9352a55a4aa898f15744d19444e53e9a8505d79a057a13d79066813b90340b743276c876cc7e80fa27d6ae9f98b5568af0adf50f2c77f8116dfe8959203f40e34f4c8aefa173ad690376236bda80a0b5654e101ebc7103bc71c283374876943ce84bd81317d8fa890bfc13131d0c6ba618b65ef6b0896e72530cffc44cf9076604ee175d0cdd70a1cd9e70f650d7cbf5fa016ad78a83fe03fa0fac5da61d33981e4078d0e9247bd8660b1c59dcbac89277cc40f75c8059c0ce59dc66ec6edb81a45ddd9844e5208b83be43d62eeca08b94b4f23d7e3049af750b74b007190fba297fbf43d6e4e703fa8e194b4682cb0906ec4111157aed617b56018e660f8c832f9eaa30e158da1eda83ee52f4654dee41cf71729d311ed41963de80de7d01ba8e1d2876a0982cd0ef4b4ee5d22ed191cb1b281ef42e97376e3ce839727973f4a0bba05a7c967eb48c2e58b34077bd9a057acb112e5daf079de5946330aa5938ccd51ef430671dfc709ecdf25c956db73c37ad648cb2a28ad465ef676affbd2d7533b5d274fbeba5098a0cca708bbfa5bf7e87eafd78709de5fdd7c181bbb1ec9103590f9a64d306bc913de8e0cd1ba0dfc8668fcb837e339b3df741bfc9cd1ed2839e739b3da907c517e8220cf43007769e81d0450f734dd22a6eafd70b081fd0c3a32162ac5df7c149e484add4a13224427c495ae2c89722ecc127605fd67c19e674be14831e2c45d9839f4f29ce1e749218f6b0b58b3ae83d9e80de03f41ea0f7c0b5cb3ae83d8c7ae4dad539e83d40f741ea1105ca973da490f165cd185ff67823085fea7cd9c3f6e0edcb591bf2e0ac01471d2610622dcd8034e61427aba95f20a6eb6b163d8d39deda50d6ac3ed170f97d6d4b55ec6ed7dd1bf35b0212d26a4906064c1fcfbb129024a027f15cbca4af54adb5d6340482144848bb40c0319c8d3136549fcfe927b92c67e059cc9a68424bbebdd3f8f6f284fba6cd58863033853671d694acdc7bee3205c7d0b87c8e176368a8ff79cdc07fa5fc1f3e022e6fcc412f698c4e3ec09d9721ed3d37c9a6873956502843d22e927b1eceda25ba177bb15592c6a8b684a66ba5c664bd2c99ae15ae5f3c7f19aae48a7b34b9f7bc7cb9bdf7527befab2deaa229afd4a864ca57b8f66872a6ee1bcb1d2e4df452ee3ecc3b5cda8b4ecaa9a2d9f3b967c67bb8192dfde86104348e4fb2e23e7c9fc4e42e533002fac5d1479b32ecf3595982db5bac6eaf3827088220d875d6650a0efbba4cc1610974d123ade495946e792b5cb356b866ada484b97c84e6a85d6598a37963c5412a1dc38a118de7616e82b665dd246bb9a19640c950485a826092dd215fd218d91b9b3da17b0e7a1e16315d6150b36273bac22426cbf3f20e09654b6eec3d17c1cc0a936051f9a076b1d8982ccf3d90c688c668da789eb70a73ef799857b869e3d19126c7b0e7600680d1b4f12838962c24de337acf5941d3e6c4c488955b9122ce9a5226f7d55950626099a0d993f375aedc3015d55b8e6472b40a53e779dee73edaf4ac8e0818a792d53be799a15d956ceffc267ae7a6a0d983f35d6675eb7385def90d146812a350a56aac09339849cdea7c0587cb790bd22ecf3bb7c1f3af92edd53d4f55f2367b562dbcf7b9f75ee73efabb610cd2acce947382c630f5fa36882b540aa59219cd1e1d3b83185512077fb97b64e90d740c428fbcf523416e6816fd7bc328b6817166cbfaf7618c636df7bdf79a66cdb226da5b1392661d814b93cc84647a13416acbe6257012cf7125bd314890fcc2a549669275a34906e4f473da94a6d95bd47da19af4daf296232cfe4064ddbeca506bad5ef5eb3eda24fb96dbb4a99ea7e79964a626ec6ac8d4c48ac9ea0a19363c2f5735168e5e86bcac1ab2a38cd1b4a9ee8de537f4d581b06cd3c666551b73d3a6662ae01245eb920c339fc24d9b2a63d9584ea60b3573a1a298acea25ebc95747d1be3a0b47b3c7f35a33a14c47be7a75d4ac5d2624e68a93558d8528d068562cc4b0cda5a06953bd71c556c3203a7be5ded1d23c6956a5b9d1d89a55bdba486363dda2209ac4693a2dc174ce5a6bb5aa66d5344bd5ace935cd9a735a21288f1238a542fdacd652162570ad2c4ae0927427ae0c128b12b5d68aed9cee91d13206ee7afc4052ad354aadd5ebed3acff3beef0341d614c33014c5129016a747b23e58c7a8eaa456fae453fd63da4c0f9522e1b6ded53a96a4301449a58a82541cb6a6f0e779246ca75f4babcd94de9629e092f4b43fd27fb6f6a5b3ab433e02ed303dd03afbbe18c5a12fb4917671259bfa13698c44636d1ba22f74137ad39f1bf97a2509f6597a00f5c1e30f2df0318d36563ba892434efbd4ecb19595df0bd92a0dcc55499d35a12f5428a268f2ed2b46987a5967d456aae1fa4a6e9f39e3f87beb65c2d648fd251167bb28b53ecc40ff82dd8334a873210f660a7a7b3e4b54c8d3ecc37d6fb447d8915475405fd6d633bee401ab195fcea2ef2efa0e18b1ede026441150a0e123e4082586901d4142c4c9129f00102de186125c8c6114039f72de6469fce0073058430a6a64e1f379e14416506880c512317cca99cb020863b4fb10025116468821c3890f9ad0420b1c18f974bf48281211ccfa499445922c685013f12467cd0c3680a6908610675001012344f941195d1821074d4c9f3e443c491d881023092714a14a142d7ca6a726498136a6ccd67042891a6af84c9f73fa37a7cf185884587859d9154fc0d7154c10628199d286114a08e2c84c0b1f3a64088c22d8122868644105922e98c8d14fec8d0b26acd0ba50e34b9d2ea2bcd7b69628a0eedb310db91bbd5545aa598b88b3c60876e66ae9471d7928291428af6e16ad42aae0ed3c1bc59b4694a807503ee627510fdcf8d24fa21e48a9a9119b4f6aa02fb4536755b286f9e409ab8e4fcaf9e457f486fa7c524393f666030e3ba0a0074a9e16cd274cb8f1e594927ba30a1df2d46ded95e7e0c3a75eb0563604a9a1005b2899f28427ae80a30c1f5a842f9ec04185386880238a69d1759db5d6763d905e5f69fd6aed7058f5d47598bcd1c2531c9ed96d6b57d94de850d7babb2b799dbaf5aff4808b6bab879e523c6de6d7b72ec459d34d7afd7431865291c7005f9d024c84beaa70be5653add4b66a085f92ac2d1d60364bec7ce988eb18438315f238d567a88ec3d324ebf57a01917bbd7cbaf67abd45439c35ac6963c179639d25049fd4c0d3c656a75388fda3ec8a269e4ab52b66beadb9385ccfe17a901bc83c9f871f90fcf579ef8cf2f7bebaa657087f5df44ade90c29e7afd7512ebefbd38d779aefb75d625798169e8a07bce82d041167c1e7eb5e5f9e79e7f191cb337529947a58fc8826a6d1a05d1a076557f4972c60b7210060dd098f9b4d1a40924aca802e4041af8b4d75ab5cd399ed4c0d37d740e957af961d697d388bc396f2c5be81a15b66389face91c65ccd973a6d755c64b3d6712bdd304b0f98b5f52d60fa884e4b23b801f7ed4737dc2e1c6faeb64a0f009d961e10249cb5555b9f831f8924c4e7faf4b293dd4ed6644bedf57a0121d4aee983238b2b3b25df606e288d2fbba12125ed6567eb6e6507e5d259e79c0ec4fefcc61f3e2781e30fa097ddebdbfacd0d11479c31555b3437746ba5cd313680b837b2b0da6a1a24de5c256bed2885bde635778f5eb02ea8d6ca4ef6ddd1baa14ed6cd68d03769566f912b17f4a66ba72394b596eb9a850c8781be409fb6c3d0303489923d25e24115da25694291bdf1e5c4b9723c95a76f3c755b5dd4512a3a0494f24164467d52ffa8134d22a15400f84934d4032bd898370ff04c97eb9b3e3b40e19854e6174fec608d2a96e0c4491b4d04fa8226b40e6818af386c0086115c14604da031a12cc1f527911549d06182c80a29a9a9a24c7cc953b2842cf98114bc58e30a268ca6e053e2502bd420b282085fb26e6e88623cf549fdbbf7899a154356d4645240e0181afe7ae94fbf36ab3a4e759eea5e9d559d9485e7ea4df610391d1f35546fe12b4f13b876adf097de59bb4a15cea82730cbb111de1d2156addfce9b93e60c054fa7638ae6f03e1d4512cd9844a2f97bb1043e1d33119854b637d8d9e2dabaf7de96159a49e28b988229fa2cfb6225d1f2a3639fc09f230b8966ff39d2d17fb69068ce65e0e9e244a12caa0554886d9210a62670cd51eb75a928db72693267e94c61d09f3c3ce79cd3e4c4e75d597861d3da927895346bba6cadddf5ee77ef68ad6cd6dda0d99a5ec519ae80a6cf8bc5fec3dff44e5dad29252dd605a7cd376dad2b6160eaa0d756ad75ce39bdd761c2beaed53e096b21ad59e16cf674ea58d1f1a7af639981e93ccea8edd4cbf6ea35372bccb70649d596adb576fd4c049e9eb3b659f7e79889c0dff7709f3aa5a39d3e62da5fdad3318606eaed2575212fe485bcd8dd5dd28fa1614e3a524a9f3443faad1001d35a6badf6fbaa57abc909dcd50a02816b77dd8eb456ef332581a9dbfa35689bc0d431e933dd0053ff9cc0d46729e4c524acb5d65a6bedbdf75e6bedbdf75e0f64119fce9e31cce10293637489a1b1374ae7799d779db79468b0cfb26aa161a1f95600c0acb576ddbdf6be05000b96762c90ba0464f5dd785fb75ae90f6ba7add5566ba79d96659f603569a969d51300585a2716eebdf7eeb0979a4a3fee181e8185b4acb57687a9ae947e60310ee0a4240660ca14dd5badb5d6ea916aad62db1228965a605230ab994c0380dada21e616429f59a9647ed4156c8c6eb72036f0b89428b8e4f13330f5d259f60798171d299b8f478f7753fa516f6cbb7ab26ef7d71b715ca8f55ada7ddd6e3aee8bd65a6bad55001674e2cb51c244f847e591535d50256c636939957e5417acb5b6764ef129094c1d97b00d5fbf269d5aeb4ef58254fa714b256cc3a51f7547f47b7b81415b4f146bdbce24964c2e312f31335ec302e9c2fe6b41a4196b6ec21a7269069f9159a958241c128642dad0377c230f5e4e2390871534ba6c001f53c014e6be94f017389f0b021040f730f07c6f60ea393aa47adfeec4940eb082320cc2548764ed9c765a6bed8efd92a44af8898ed20feb82aba2b08e1c2897161d9258eb2c9124dbae277d3fea182281a9b3947edc938924599f93270c7cbdf4e39a4af80b6c9d8a3c2471a594b2c19c95527bfb26ce9af2bebc4cbb6b65f0ecd0eca19ead5e6e9ffbba821ee152514d10f077eb25e0d44fa22a862812708e9f445530d170544104115ef94954c56b095394c02f638081513f8984b2788150926a0676f949246444154242d43870e94809ac238d9601c641a03dc02c5ab4116ef949b403254fd8c111ea06fe7e12edc0c80c7600832150f10422a870a3b681c19f44541851811bea9ef0618c63a6c0222885841acc1451c84047128c5ea688010e722ce19672820d5c9200d311a50628358e720c418896345e2e52fc00884509462829b620725282ace5c391428a19e00f7f243cc588450a26804c4527298288ac50c961295c7044c9286772a308521bb315358810d5b8953e1c1d14851ffe48180931240491b01844401f4c90895b7c3ea6c442a118783ed698814f10d1f960e35b4288eb438accfb7074304490fdf047c269d83a1c9c0f0e23aa8f23bc6e1bb0f64125c84a2942d50052c308566246cc875150b7e6971f7efc830ada8753f727111530f858c0a2fa60091c6a601db82cf0cc16750aec3f89962c3104af7e122d098209f8450949f0e92711cd0c27b0f793883646d700ebf84944eb0136fd24a229a14cb0fd49449bfda0114d2104a21b183dcb4fa22588bcfd4934c5185fa62e14b2efeb62d75aab4729a59f0cc98f0e431ad6cf3a2b352facd2407dc6d4cd87b2f01eb94780a868490161506dddd97728c35f9261e643d947279d36b71d245d2288f8aed51474dde40612df4ea2d2e44956834846edb2279b911ad0506908adcaf769467ae3740b6515886963a33234bb96b6ea28b313ed3474aa7d5739c9e0f276ec1b92da79d4fb48a725f8b34d9b5bc3d9f405f0d25a3da73df8f07d4f9bcec18e056f9d7a9d676d900cfee0d0b4b9feb581a997f7e5c6df7bba01f1d724abadebf766b29964a62113edef35cdfede11bcc28dff86b87db0ecd9de189a5970c8b32659253d29332ab437aa7cfb109b5704eb69732b0b6e473b5a6dad2819cad6a2b20ee7ec79c1f5244b6212932c8d2f4d44f92ad8d15957bcd9338dc0a3ef8bab2fb869d38ed3c065abd5823559f2f656ac65a415d43ab2f2add7b4e9b2c59ab518d4bdb0ac9222908867df3831480412db48bb8098ae4b64b25e3706df5e8a43be8b58f2ed6127e29ab5ca629466b587592cc255521cea568b414636f1f65dcaa2ac5bed601695883451a8592dce44b135a4596d6d0bd6ac6ebd9ad5ad57ebd5de0bae495a85d88bd1cb1b30af76d96f870152c2e45b8431f2fd82fb7e3952f2e44b9821a52f6160303198a06ff73a1bd4aef2e25aafd6ebe2c4232dc497a4b66f1d887d0b845bc4f762c39a8b9e112f6b9c68781225993d5d7de796ac0d314a29d5d1ce98d30b7146a16d6fe7913127088662adb3ce394925d3caca490853371d4d5c4f2db5bba551281717ead28232b9e470c9912307aa8525478e13ce91238729c74a29478e1c2424a018de7befbdf78634eca31fa51fd75da6e06a3a6a96e7379baa342bd53ff73ae3b5562033ff1253721917e2a323c7543f2d49b9ccf8c3ec428827643e29afd9061dae1a7f986c28e1a3a3e4b32443e521e7dc728a7270394639b99c239d970e4c27a613a423d399e9d07486746a3a369d9b4e910e4ec74827a773b4f3da81edc4768276643bb31ddaced04e6dc7b673db29dac1ed18ede4768e5c2f17cc157305b964ae998be61a72d55c36d7cd55e4c2b98c5c39d711f92261648c0c2265e48ca49143648db49137b288c49146648e3ce281f1c4788278643c331e1acf104f8dc7c653827d3eaa91e7555bbd7ab1493c64189a0832cc5008324c0d0419c6b64386b9e990618aca0c83fb408631ca21c3e43c90618e3a90635e1cc831b00de4989806724c5006728c0c03396686438ea15d20c70c5920c7d42a90636c14c831b709e4982209e4185c04728cd10d392607811c73f480ac7a3920ab60366455ac015915c480ac922d20ab6641b28aa680ac1a4a4056d5109055b61ab2ea7680ac2afac92a1c0d5965344356e58064d5910c59e6f523cbc00c90656205c832413e5946f65966e623cbd07a6499a118b24c8d0059c6064396b9bd90658a7ab20c6e00598627cb9059c695573b79a5935739792580bc7221af70f28a7693573cf2aa565bed3679d5425edd6aab3d0079b523af8c5a79955b1dd5563b0b796606565bb1a031cfacf2109507d18297cfcab348f2551a73a757b89c9beb365d58b96e3cafd9b3fa763fc2656bd6c26cdab48d4c8689c1cc6418586d35ab5bb36f5fad51ae90f0faf632e7c653e5dbbd5c0e00f6cd05127f23fbf697990d70c98376c483d6a5bc53b92a95636c48b9ca5529952a95552a9f25204c3071ef1296287274044e7969ca9972a1114f516d8546332b9954680b6d32a12db43181270f5a2553a92f274abbacbff0a0e5446956cb70fd3c7d7458da2eb066cdac64bca666563229fa2e438eaa4820c6550e962400e33ac6181b74cc32d8f0d1e1427c5439c60695a73c35fe408f5062c4cb47e5427c448c0486a1ad705e055cbfe4a1f114358b4707cfeb22814b548ecacb2fe5475ea6be441dbdcc9873eb56ce2d9573d331ce6e95a9cfa1827180e7973937daaa98fc0a7d768c5ee14eee3be7f6b85b5dc71c79fa5c2060ff32e796836bcf316acfb9e5147191cb8110543b20867126aefe4db2c326afcbf34edbbdc619633031fe24bae2c9ecaded493406914fc1f07d9b6148818322b828a38821a0e14345d557027c59a363a2f972d62fa714177e56a143be52a0217c498dd023bff22545f203233f50c257c7a91f3ce1abfb13275f9d15c60bbe3a4e18667c751e1f507cf519c418faea416c5ffd061cc498f2d573787d751e9a7c751400e109af1b18c1801085d82d8c20244441a8f295de2684585c604338921285299a204114d282c8122e8c28a2895a841bd14aadb5f6fc6230860b66a86e4393d667f6509247044f72fae021889f0dabe3923ba97e955669b50e85be328635cf505bddadeca1ffbca4b8ff3286d55b6d7d5588cfac2dea3144416aebf3eaa41aba8f1e7fc0a9adaf86e30f5e5b9f8b3835f7e9d34e93854f8f31dd8843b33ecfa1b63ebf53f0ad16d5cf6bad7e5e6df5f39a51505b9f87f9a3b0ff9cc28a6a2bc686d0ab8717f7b5d4f9cc9a6fd4e1e6ba1988b386e2306de8f5ee0bf2e5982ec7b42da63f6fffaa7f234fed52e369dad46ffa8894f47d194f1f6aab2d7a044c27aebb5621debed7926e3f18726d7777d357750d0e0c3380f184746351dbb5d65a5b146909578303c30c601c21dd543a7445bb6b3f4a0492b5d6da31c9f49ea3774b8562034f2f554fdd4b82f883ce19c6a8cda8a69cb6a6d4ad0f76f6f3d0368fa0059e4de1590282e9db799a9d37bb6e86395ad43ed43b6fd6d9790cf8390f9d589b817a101b9a0c1d07ea1b68d7ca89cc8726ab0f75f0eb543a383cb4ebbaee53523a408d630a3fbdcbdea4efa93ccc37973e4c3067bf98d558da1745b001f1456d5527c9e9d39a39492ec0d34bbc2283eb7d3eba670002906fac5bf70235ebc69a75bb8e8c0bd4ac5ba4650d5cdea0bfd76f918be473cf63f882644566cffcdbeaf1ba29097cfd7a3ddc7187986e75694988b3c6e79c93d24973fddcf378f00105242805a44cb09d52a811c52101532a540a1698fa135afdda6c7b9c73ce3967177149dd0aaef5489da95757baba2152ff71a2cd39eb68c7ae20ed3e69bf455d6f5e30d1dfcd0b2dda0b58c3745f29df6eebecdbecc9f1ddc5b7cf92f404bd4e5c30a308219ea8011b30274960220d2162800428e4200adf95083d79966802892bc8e00111c680814f896b942e66f0012450a8523baa72831dacf02b3f89b8c0d9202c4d1bea24d2b4f9caee4b1fea57fff011fcde7b31ed8b1305dc9180807ead67ddb35dee407aeb979422cdb2e6e91dbf1afbd7d6d1fb66f17d294acbafa134167836ab44593c6d0eae0b155a4b5b6530bdbdb44443f72cd16913e5f53e34ed5e297b88ddfe3e53500ddcba78ad25d9ef5e7aef0e24da251a5a06ef4740e3a84fa70d654d9b3aa4569f2d807d77fb5a3bead0a826f40ef4a8566c3f123dfa762bec17056b3bfaf756ef6e4d15c3ded136cb5eba124635c9706949f7de0b56eb799e1d9d0a181ccb8f54ce39e9ed27b56b8ed7926a1cb8b46f72029796564bb23ee3442ea925b50b046f57aa41c8db18faf5ef1843c3bd657f8ce7423211b873cfe1ce455a67af9419f86fbcd78594df6722b0f51cb6b58647636e94cd1e1d7f6488ee0a3b84fb721615fdb468e0f202e1ab4f4a299dde4ab982b7b47de8a11bedd0b4a9630fd493c99a7c79cd846a4fb8b8c5a0c8869ad86e45b1d23312f3cc001a628bafb5c71e681af3e39836b569539dd6abe42a69b22576856eed3eb9454d8eee918bbb4a6e935aaf94afd455e28be4caee919b0382386b4a9ad75717a9c95ad3ac4952ce44333131d94c506a505247a628f75583c334ab6da1c497a65a4dca97a6275f4d3713eeab5bdaa9a37695a6541506c8e472f52894d50f6547b84db3db5e4e0bd8090edc5ee2b134d1445a62401aa6d9981b653253e4a8717144660a9929a669565bd559d0c0d5656e954c1dd556f5540acbdc646e5f612db8afd5c725be7ad992c657a7347534caa64deaa89232b4d9aa3ef4aac1bedabefa14d3a67a29939be2ab67d417d3e6a68e9abc2f2fa07605bd2069d7242252f2d5a6e4ab14475f419a1650be7c897dd522e8cb97235f5f642fb317da578ab2b5ab94c97df1a4a281b235ea8bd9337b9ae604f4f4fbe600823046572b7da929f1ed449c35a50deead4f2fca9c9e77aff7bdd8912a48e958da7ff9c9f273a2c4d86292285865b181696234b84a0e39398b5ba7316205b18c5839d61bed5a711f3ed6910d6cf6ccbc5d15bd5d05e1edcac9b3a8b09078369ea68834de3ace2f504c1beb2858255f72405bd6ed8b692c514ab0a082de864f51b097d9b421c2e52a7cd16c315da657b36ab387e43688c9b24ee4ad97a31b6f7dcccd1ed14d66d4664f0955a55daf959ca4e5f369e9bad04db195951cd290d2eb79249729182c6d097c51d22dfba2a459d6f35e66cdb247d3c6ae6eed2a5147ab272d0e36e9185a6a2b1beac856d3ab5da8599b82b026222633324d6dda589a266fddd26cb1aaad6a2b919a51b25e658aa68df51cc3b35c9b36561cc75cf8f2dcbe5e1fe4340641e3f039b98f76c1db494610de5aeb224b9e3e5ecef1a73c7daee75dcf5bd55e682520dd75dd39786f786f7849b70464656b96756f7ca135cbd2d8deb2b897939c509e84e5766379f2843ccd6de5e4ad779e7b25202bdbe7a1c655adb6ac7ba3d39ca4c559cec02b5a254f6e7d3554c916b732b8d9e3cdde7acbcbac922c6e3fb71e440b5e3e27db79893a7acb320e5d582dc1a757d8e04a9d0a12674df9e5beba68a91de9d8b5afdfeb6d38df375975e6688bfaf7619cab7db963b5551d874abaa75234c6a2851441f4856a446f6295fcbc2aa10cbfecd8d73a2589af77a82a61f65c2ca64d185f7a754a279355b28b62b6c219bda95e7a3ee82285cb4ef6dd1ad1d7aee8ab979f135f85f01da22f7465cc1b2c664f37846953bd42c13cc0652783f2df87712ae5acead5e91426b9c3c4c0bca14e98148ee9ba1e8554cbabd104d24fca8514386ca07d3e22d012d39ca20aa83ad031d5ac6e6cabddc28509b8bd2484dbe910a747f5e87db419b4ab77c976f5eb4a5707dbace9795794f20a188a24af5792984c2b243104bbaf7a6c78d54b9d9589edbd21ce9ad2d21f9edb12907eeb627712c23850cfa15d9e53a7b64aae381d674f8a9b5126116afc740ccc172810bd699f1e7300abc367471dac579fefa3db367b3e0fa4229c84668bd4454ce958ef1dbf69535da7bebee2d8db75debd9ded3e4bf250289cc2e1ef23cd54aad4acd5eb122c36097f7f5a8245f13b09e1eba4fa460997502d67e0eb3eaaf515d26909163f13c954fa616dd7755d5723627c3ad56a6dd7755d576d2d77e868324ac459537ab16fcfbd198e02ba05dd823f3c07bf5b716a97564b5699a26f5c6a6405a64d6c5d9bb40bf4267d599bd0aeacd66f2c2ff831d5eddfa7b4da6a12a5d2e415a2e07869f549b39a666b56539cc7b49abab4dae4d23a2f6b93a15600e89676858666cf3c00e8d6c1397ed68b2d416449028316830104ec27182fa00fc04f2771e05e34d90fa522d1ceafb5947aff67c759e100290d4fa2210c3de8feb9f87a39caa1b63eea20a50ebae73935a220e8b4f4c31b273dfd1431aa4188ad259a03100dfac0247962ff19a1476a6e94b3a440641c36d0ac8f02cdfa7cda13a9599f5f9ef2fb523c40fc41075d6ccf61f6d4ff7c877655f7bc2424477dee9fe38cf3abf9af052aeba84044890ce130a91391688b298f52896060ead68e6577cddfea44d2ac1f3ef4ffa8de31751c6fe71e634a1e76f8f6fa22b9012ebb08973de4a78b42c0d46777d3234d71804bcae4a78b43c025a5fdc4b95e904185888c2136d54fa231841ecad04f2228b32fc136c6b8110961f6363f89ca38fa2ebc30d285174046be5de56178efbdf7ee8012c3201a5a3f9b05d4ac3a81aa9121df6e5f47dfd5f6f58975326dc42270d931dc4f9ad5b7ef2e9242058910d08bcf4ff97c1db47771234d86305152c4081328289fdfe2f3597cfec9e7639fff5324e857869ee4404d861c41f244e8419fdff97c5b7b2fe8effc1f22b4af4d2adfee2c46f0125c7e600c11e53fa376950da30294336aa0a26fb746ed2a3b189576953dfb1c7d61c68268959c959c3e4a3e9c2703f2663e969f9199b1fc8a5663f94591194b4f4835965e2d662cbf213063f9c1befd8708ae5df5093fff25ff10c1499142c4a85df58d9f9fca3f448c8850214245d6aeea849faf23ff14912141426b576dc2cfcf917f8ad0848460edaa6efc7c97fc53040604146b57cdfd7c54fe29123362a4d6aedac6cf6fc93f456a4d9abcda5599f0f359f24f91d7902143edaa4bf8f9a7fc536488099359bb2a959f8ff34f919912254582da55a7fc7c53fe291254e4489123b1765525fcfc95fc432466c4c850bb2a1b3fbf947f880c3161726b57357a2237285088ccda55d7f8f960fe213223a2848892218eda5593f0f3bbfc33c4d1105586a8026b5745c2cfb7f987080c08a8d6ae2ae5e77ff98748ad499357bbaa1a3fdfcb3f445e438604b5ab1ee1e787f98748d09123b276d5349e880c09125bbb2aee89d89e3cf921426b5735c213a11111fa59fa2122d4b58993ef235f763092c682bad90489942fbd21dfb72e08df5f67c6b711a474b76492d6a8b6524ef016577c54bebb37bebb2ade819efafcd611d0bf55f233b246956c98c964648d8cbe3b18ed6666befcc228e3db69478155b275305bed55f8e0db4b0d6ba26f582b992531c915bcc6cbae091a2fbb28c6f6cfa8922b8cdd10b3d55e53c4370d12e39cadb25ba24c4109fab28b75b0764d2228b16f0a9bfd682544e0ee3b980c87633977207d3e6b1a936fefa288eccbd4eb2802971dccbae81d4c0c8faa98affff0e0cf6943eb0ef4e887d02a4deb59b354b44a939702a9bc8405f6867840efc12ad91d75abcb0ed6c1603d5332be00973d83f950cf68cebb05385052dc1b0cfd007e767183dbd76c6dad9ded3ac1b1d6197078aceb945fcfafbfb1f4e122e01b93b84007d643e7c199e112e75b5c0397df9c2dfbd9b1acde7db9dce17740bd37e61063ddc6d8110728f8980187c785d9eada8e336dba099c7614d4f61cea0be422f73a7c5671798142d81f7584aae2f232b958f25aab98e9d7fc21e15746d584c30648788925d4511632f0bc786e9634c2c8d05638ab82dc8de524395cf5421854d472c4700eff3ce7d6aeb2e69523c37c30b45b04913484cbd66cd62e5149bb2cad5d422a17d8a7b2cabef8d52fe5242f5e731215955cb360eef3826b564ba1d67d7429b3688d050ea0bca4e0a3a314762a93c9888ba55269e54d4fca2dfcc9454a52651eb3bce3595ce431d4ac76d2c883d62c95cfe41895c2ae1d75e4e2f7a28e72847284da55f2f69ca192ca4b335e5a79a9542a5d6bb95883052210b95552e5365fe362938ea03981e6939a65bc910325a728e7d6ac761cc3b57e1219e731cbe9c29553e36153347b4a7ef3ca3cb769d39ed3454ead5ded28185a8b1f9546924d69ac79b1f062d15ab04ace8c2a8c6b0bf6a2c1d1e03e8c697032fc7959dec84abb0488dc823a6acd2e107f4b17a06f3f6153eee7c18447eddb7d340f5ab37808896ec799ed13f0fccaa3c9370f2673dee0c047074a8c1c5019329f940bf1c9b9b5ab426997a9a85d2b33540b9a05f50165427b40b1a057d021ca036a05ad820ad11dd025740aaa031a05858226a14ed026e81254097a039a044582d680d280ce801641654063408bd021280ca81034080a841016ac20052728010946e03df8600214a8e08716d01775011d425f4061940805a244d01835821aa147d0206a037a84e280ca281314097d82ce680ea8126fe729aa2495c2db797095a4b44a522abc9d27c773e4135403c6be84c9a9d55693343871e5e7f5566eb4c822f099108060071dca0fe4e0810e7060034e779035e0948abc72ba2467c0292d8f4ea7c818702a45a671aa838c835325b9c66914f9024e739057700a45b680d3590680d324b9024e9fc82c38752253c02992cc72da449e80532672cbe91259024e657987532572049ce220ab9cde20dfe0f4480e80d32432049cda20b7e01489fc00a741d9c6690db2039c1e917938a541b6c1a9917ce37406b9014e8dc8384e8bc80c701acb2e3895415e805322b2009cc62007710a94739c16c90a704a24eb381d2227c0292cef3885414680d31764975321720d4e8764d26910f9004e5d90799c02917f9cbef2005c48a6c15b907b9c057906ff21bfe02bc8405c0519064f4196c1519009e027c83fdc0439062f413680fb907b38097201bc87ecc347907ddcf37bcda62fe5166a2bd69a512d320f4eb3c82270ea831c02a74c32089cf620efe0148bac83d32b72e974287fc0290f720e4eadc81e705a45ee8053a1cc0197c92ba73bc81b704a451e9d2ec91a704acb344ea7c819702a45ae71aa838c01a74af20a4ea3c83838cd4106805328f2059cce320b4e93640b387d22b39c3a912be014496e396d2253c029137987d325f2049ccab2caa91259024e719003e0f40639024e8fe4169c26916f706a836ce314890c01a7419987d31ae407383d22df38a5417680532319c7e90cb20d4e8dc82e382d2237c0692c0bc0a90c32039c1291739cc6202fc02950d6715a2407714a24ef381d222bc0292cbb9cc22027c0e90b32e954888c00a743328fd320720d4e5d9007e014887c00a7afdce342f28fb720bfe02cc834f80f19065f419ec1559009e029c8401c0539063f4196c14d907b7809f20ff721fb70126403780ff97d04b900eed9c74b79c7b7809ad5add8d882d1d4be1d1781cb9c5bd982fd4d50dfbcf2cd907c93536ba7a1a9d1d8688a686a210dee5bb682c3250c92f2e6f53cb8083c040e026fcfc95572076fcf39aaa40edeaef3aa64e9ed3ab04a7ec0db756295ccc1db75822ae9016fd79155b203deae33ab2407bc5d8756c90d78bbce502535e0ed3ab54a66c0db756c95c480b7ebdc2a8983b7eb1455f202deae83aba405bc5dc7a89215f0769d5c2529e0ed3a47959c80b7efbc2a29016fdf81553202debe13abe40ddebe1354490878fb8eac920ff0f69d59251de0ed3bb44adae0ed3b43956c80b7efd42ac9006fdfb1557201debe73ab64106fdf29aaa402bc7d0757c90478fb8e512511e0ed3bb94ad6e0ed3b47953c80b7bb5e95fcf17617ac923478bb2b56c919bcdd15544920deee925552066f77cd2af9a39206f076d750250be0edae5a257dbcdd65abe47bbbeb56491fdeee2aaa640f6f77e12a1983b7bb8c2a49006f77e52a0983b7bb8e2af982b793af4af6783b09abe400bc9d8c5592c7dbc9a04a92de4eca2ae9f2767256c91d6f276995d4f17672a89239de4ed62a29006f276d9574c1dbc95b2571bc9d2caae48db793b84af2f076d2a89236de4ee62ad982b79347950c402555257fc1e793bd9d0756c91ddece13ab64cbdb79822ac9f2761e592559f0769e592501e0ed3cb44aaee0ed3c4395acf1769e5a2569bc9dc756c9d1db796e30486066f503a3e4a8744cd4775d2d19a219080000040500b314002028140e088542b15828cd634d103f14800d859e48724e1b8bc324c6619432c6184288218418002002404233890400b4da9ae535aa8c6f48eb256d04edf24b5683046973ff14018e9c1509b8e77cf75454177546c06ab77464d7dd28d08bdcd19337d3e21692abd83fd65acae42be53adcd5781fbbfaa934e4e92d09de665b6c6d656151f5f485b4a7aaf69b909d0386d7ec27005b5ccf6d8d2a3ec0bbab2297c07891a55ed25aa50dde50c49755e58d93380c31e45755fb3dee598b2c93a1d4b030b722e75023305a18a1d70ac080169a760f27d15046c0d716b39739996bd8f4c1f1f7980ba0538af89efa244b1a4db8d7858c1411facee574330be3045f65893eddb2655bbffc02b91d1513e9e08a031a38ac585b30e2ff911dec75c417733644cbbb82eed7615ee343f3ad640e2cfc70c5f53a7d81b757e980725347bf1583a35d3dc799ab4e9dbf6a493e4e017010a8bf80320b125db3c3cc95217957ad527578b2ff28812f56235a2f90f1e8dcf20a82f806042099b2c402c92fa09780d792e06468210c757eccb011219f2f0a437d5f70db4593999beadc4e7833ce7655a56ec54522a80b3599f15f941a13adbefc8d2b1bf4e82c68714ab197e3bb0c981092c1af084e3219b72de1c2f8d4a122ef6637c3cd77459b17b1e4d65a4d7f95cac5a37145c83d6458f492f918f161fbec1c2dca8ee53eff152c390e801190cffeaca7d6778f96bcbcae427d2dc3aa2ee19908c89eab65e70ac9d9e57e13331340e413701015bb13f33d6a5c41ce34f2413be85b65fb3fd9027790a1e0a566f20bd59a1daa695a6025a0b8ed00b1112836f328413aee2d365ad296049284e704064691597fd617dce76bb021709781f4d14ec7636583ca862e1d1f62e1c8d209660b8132fa9a10b149ac3c60aec98b8d21d4ccc326080876daa5b96457ae13257da62700b989a9cd3cbce83935ec32d24499498ba8ebad2e08446e0f1c79f0067c0a56a1aa84030b17539531756cdb3e452263850aa440cc7eb72ec5ca134595a3048feb5c30f60778a0a6fd402ef97383ac01ed6e68a47f8a2851b440fd3c8333b2269b53891010cc660f09988ff1ac030eac549089199d44c1ac5b01a62931cb8d331e105c423350ac869ec8b89f2c50f4d2db5707030e3aecfb68f689180aadcc90976f36a43421625af59197ac33f56b0a04e56ea0d68d52b734536a45ddef5b5424608120657f774b48bd100a875421a0ed16756ddcc0818bbc03ac4e4864b6c190ec0983e2309bacc9ca607c1e43bc5429b534ef371bf3005a6ce6581ae75bfc4a9b4609755ab7b3afca9f953338fe391f13198ffb05d6fb6e2f006bcc445dcecf4b9ad04178e06b8f748ca1b2205bb6d64b009b942db5a0533b989ad590640a39d1a19b61f22b57507a5400e25542a8b59a931855346bd2d2383ae264050ccf9e67241b89ddffeb7257830fb37089bb9a2c90badaf838cc40723439700b6d98f8d281776e18fd537ef4c472b7bd4b6cb6c1671a392b54f05438f9fc3b37fc5f7b28082382fda123027badf058a19ddf6c79fed024e86ef011621354bb766ba09b0c54c0a9d997adf8b03ea6f4cf6b47f5c3c7e2308bea0f1881e7b13bca4edfedb7616b743fbde4ec9487b20d5b564eae043c0246ac94f3b20dee9f501dc0f1493b21afdea840bf4e7703c8ede7929574c03f15a3d2884c031e528e53e76235ae0f61fef7864261f370b289158b8396ffcc5ad2bcc2db0be6c4e2ddace88e4db195808ffd88db9bc4e453479e5ebaeccdf1eb39c6127496750b64b4b69d45cd80ecfb490ea544c533a49c457c9bd957df2a90549f918cde757478aaa3e3b9301eb39e9c6e8ea39a365bae17fb0931c39904e72ca2a9b28bbac7b5e8bebddda51347bec1292ecaf48e4446e33b4ede4666a01b082a9058515f5aaad11398f1ea121ec5cb97557b99503951ab2190db9fdcb960c86ec0c83e6904232263c1e1247609abbf8f91553f96172f1f8fe3054b394d95907669ea25c6c196a3102476127c68d1d4731f7fb99573aceadf347316b23d5151451fb0759186b3c13b679d617a40d7d222d3401639175a7afd38459b1c5744f3972e0f1684c1eaed528951c26a0aebe3200c99f7e624ff9142475b464f771165348226d9c6070f33adba54700e894d0f52924076a05c599c8061191b27be4f9e8a8a91d26185f86e896f01e0e364f797155879d8fd5b2c644a750bf3c02f752681dc39619d228a7e0cfb3210b4c35658df0b9ac05e4b3258e7f345a52a19a9d25c046069a649978c84cf8dd93039450259527a5f0cd07d42bda88156a57a71237d6d2327c536b1334042ee5847bb00525f5f03d850548df45a03c82752668cfd51a9ccab38c0ae4f22755d8293d8ae48e8275235a6cda345bab0ba136317f830498734eaef732bf94056d8de9f165314aea03eb823282a517bf07f8446c909de61c5c7a0541f0b469d3e415a1b095529344c78ef3c051e99ae10f77a1fe7d338363c09223de099b40a8ccfcf82d47a96a767cf32c872d63282b328ae9f2d61ed59100e9f6d37c52a607c7e16aa923dd55cebe6e5c29048d041d12ad851810a4c969cc5c0fdac4ac6675160ada7f2a22773693044aae641700b571ecc289b6668abad2f634c2e06fcb0181a1142109694e5f0209b4e030ae9341b3c473fd430ab7ef1eba95c076b22123ea09c2072a4840eb0d3d4c46158dd2fd09b41881d5c76fe0b40ef5ef8bbac64cfe735922eb9ec30f9840de17a27ba343c9094f0385d9e7c1fa39066920202aca2e5418bf01c85d9e0f3966278363bc8705e7e40d64b7cf65d6d99fb0730a95d9edd62f04eb6668a4d86ec3d7bda8d6b51a7abc8c0192ecc0b23b9926745cf5d7ad421c2b40882a6b28bc0541acec77fc86d904c600d16121f1db11ece5db4daa24b5d96933ea8b26070939e11832ea76d162454807b7bf5d63bfcac3a6ba9c779666e48d34214603356b3a76177d6788c34b43cc71bd61ab144528a8962d365e41a9f5f19c83649840429392d0c7e0c940c6a6086537514bccdd5f04747e5521ff2042be7384a865396375f3678e33af5de8c11255b3d85aefa79979cf9b5d38303e7f898780bb6ddd6df37d76390e72020625757c76f423e0e28a80123ff09b3ab77aadf08c3298b1a916fb5bee2cae65660319c61223697daad3e3828f8aa58eb162b998e76250ae80559edfbda85a7a72ed037c341e35ee1fe136344fd63e4b425421eeba3a5d66470671fe419c1f0d88b200852f4a5e962e494f376a6985a803ee098d2d9d380d42b67ac5dfa5ddc1c10f238b741c83d104600a099e072ad5ff8090231080a5c0a1ff0c17749c51ac1e65830dfae974e82ca896fd4adab911a1f2bc0b1482914dc5a21aedf8f29d098406ef2039334cd4a83a302518abaa59c125e74b2a4c28b914b42e6c32a2acaa2802baf0337881dba738f5e7f71517132e29eb3c28b5b16d84a31f196d5d8478f5062fa81791dd44f547d034a96330c41e68866044f1e1db6ba9d709c8519569449ca4accb933fd91605ef831e6c6382d707dc3e89ae549968420a0a69c2087a671ab8cd24c457a76e2258d21c44649a36ba76eba732b701928d211f1414968d801755521c50ef5679bce0050bfdb484f5c594495f4488d98bae3b24d57539c639bd007af8cfde1d6a00428a9e282840da547a44beefe5f87cd2b52809fb5d054dd7997913cfd5cbfc6a496ed407b89fdb3cbd03b1ad5c960414bdc3d6556e8b26bc65afde667b6a3a886f8ae989b03a5c7d90bce8e0ae464d591c4872810ca5111dc228042a38d2c08d020cdcad2956b439f6b217ef0ee46242a247a2fa58c5ef45c9b4c479d5846e657ed8a4f421c03adc7a9c8720c445814b05324ba6a7a0de126e6682dec2355c371e7d311384f6be97c9b85de7a9a89b01c6faeb2a53af7b71626a2148e0d26bb9f207e8099178aa46646e3402ddb410f0b82dc7780e4fb8f22b02e02158f20fae7f40f0f098d660abe9a1926d92ff7eaa004118a6f416708ff472ef3041537d77c6e33493a2fafc0a39d9409f05b399ae5caf59859292ae817ae356c6be182b0df0582b25ddb8a9c178777f350fe5fbfe01d4c25c458151a41a6f76714f713b0645d781cf54af9ffb33b6c14c7eada325e005f13f23003091f8206a48d96d156daa0204efe110ce177faea6799823137809ad6bed3f1fae74570a4034a2a0432c10479b02660e992564ca961a93399229d68c98ecc94451782d4dbd2111e0ab635ab5d10aeca54c25917a319eee394ef10abfb39d38dadd492364a532074724119eca9b352ebb8006bd1324ba1b834469518eb612e607f24853eb1f7652d27c28accebd6680e41ca6282a784052ed42db1acca73cb607d4de27ef0d587570be7a2b0eb2e5a315d476c63dbecdbbcecb55783890dd09ddba625a69420ce71be10423f2388124317b76d2163a1e9615ff7ee40a32d5777d7c78bde179d9d94435628584a6e2aadca06f5565ed15c5deb7b94d00bfb6b847762dc3af3dec99d0a757ce9762f50ca72d2885a1648464232a86730d620a5f1609480473f561d89540e06e6075b6fa8455a917b98ede88a2d7e2ef75a0d93aefee180b0cf855775efb73def26a43e4b02f7e9bb9e189a5e37790d9d6f8c73fa155aede15c4cf50ccf35bca543da0c035861af2cfeafd6208075e81e9196bebf33733a9abe65d963b95fb69e02d148361169260ebf5b00b0762a014951eca8dfffcd296fc62d707f3ab2379bf6d745494e24dd4f9572c9983480e774019deb2fc98c72f79244fafc55107bdf7ed6932afc9648fe9412d37077a7fb6855caa6a3ca06f4ac4f02ef8a8c1f4b8e426ee3136536863dd344acee0048714b4fa9e96e063c770986f58489648510d51a722c94b23257d9b6d39608615631c8874e4dfa94c8dce619820cd572395ccad879fa84778b91aa4a92c8a02324866a11db4d4bf3047f475321919dcfabee03e02c1b5127e81ee8953220e8bf4cd32ffddc97f4e5cb04f7d255f492667999195efa769754b0cb18ead20f7449f9b94c5c2e5d1f973487cbec6fe965b7246b5be6c8969e5e4b8aaa659469e9236849db59a666966e2b4b3a90651263e901b1247d58660e96be7f25d5bd32f6aef4335752762b13d44a6a33abcee2981152cc0d2b838b44a8baac980956d27095a95ba5dbab92ce5499ec547a502a491d959942a58f4f49254e195b53fa2d2b298795095ca52bac920655999d2abd3e956497cadca3d2f31fa864fafe7cb1dca4003dee077f433cd15abf5728db083343e474704a90f827d3870afa095f128bd1a19ac05feb5f1d47c2a20332b3bee73f281d0084c639946eea18a235cd1be0ea220e40cc06601f2a0558cea0c90d6471298e5aa970feb60666787ff8509aa120cacd145aee4570ea13ba5045052f5f710ff4fa1c449984d9e6413be181dff6495e100903754a8448cc266839a477ac05a30553a4815e431a4284e40daa2c506bd994b4d1074324f3c17b58ea9376fa739f304edc1ce1569d1e9a2735e9d2564b903055f497f36a5bf5599bd9a6ecd151ecab0a4915a9d8a5e28c82edb1e12f5612143d41cf000eda898c5e0ce00ebee6ed0ccb7dab5d37b7d45f141d786eebdeb3d6036db9f1cb00a2e322417d7f3bb936ae085d6b34e1c3b1313a10eef43c7bbc378bc9cd2556e1a9c101100df63c3b3c3f9dcbe94655b2a4c9c1100c147c3bbc3f9ecddb54d63291c0c1720c16083b7cbf9ecfcd65e253e8232a8da02f58baffca2702e5ee6c058a1755ab022c3cb209809e2b845af1e0637b60f56e29aa4081373b80ea23452a083247ec0d8e3fcaa316e8b8b22b107c5b015530e2d486027b442dea80c37d7b01ee2b22219afab6b830ec98b2ab5f154d8bc76ebc2cd5ed4ba09c2d2219626fed0882ff380926425f600260066ea31d108ab2e5b2a6d7db66cad7fbe189b413801e22106f6d3858223a20c3e717ff31bc8ecdf08ebd7bb03b0a3a9b0c95d591950bc12f65c613920633e590b8b8cdaf863046db7b8c52af3769b21d6c08eb767b554c5d3ca533f49ad0987ef25567ee55942e240923101334f40ccf4abe966e9fc59dc96402402af6899858d34339cb22ad5c4aa4a14a00db0f4be7145d1b5fd97f6269de905ea5d4c21ad5da8869caf6e7cb9e8751b60b142202deb3ea42b164bd62f7e7a80c2e04118686a1c35c22a01f0b2654a353e7905e21ce6cde76a911bb905c5583fd150f324f0e7494241b435c94c68584eb135a0201549572594b59383637638e209ef16d2fdf73821abd6756196ff4207db87cf5c74fd03f5d36cee5bfc231e56e47b21a59c956feefdb3aa69e7a81c7ac60e39929c064dddc9192ef74e98e7b99dd6faff577f980d23f765f7543e8b17963bbef776016bd0951a1db218d406509b451184e01dfec9cf49263be3d12326e0a2c2da54cc29634a05d503bd41dfa2f42e0d205c12f5b56f975473a2e32972962cc8ccba359c44dbc8834574568d387d815a6d97a109a7c14477492b2f8f69fe65f945400492b9694c32ac19db8d45d15b60751abcc455f9845c0447ccf7b1eedd012c29e6605087b2857ec263f0a1b30cfc7f3df269374ff5766b162cd38c46f265635ad154e9d915c25462fdde6703f755cd41c24f981f1c3e710c13d39dc99a126c62728587e0b4606929f044c18034c0c1cab3177303f8a1b1b35f1bb24db5a60e73a788f5a07816503790e39c0aef5c9270c9b4c882ed135eb8c4129eb3528ddbef978a69f4489699a5e705255f0e71e7f0f3e8f5514a527890bca2880797882a0d04e5cc6fd2afe3667376383a48b57365364d342301303cdac90b619e8ced40e5b67d6df0de953bceea180d050b7afbdf54620beca0589ec62586b0df9af741474e5ddcbd54417979ecd01583f3ecf5526757c7534f84005a98044eb04f0d2c0ef70abe482f35a4bb407cd146f8edd60a3b14b9588a3fa74cca877ab407daa318ce9c7ad5aab040a283f64da188f7e74ac70b68d87625770c9bfdbe1e21a1191d938137d7ac163f32f22f2c368f315560f0b40a16d34c905858cfae6592f1bb7a9ec28797f1ce2baf02c9524b0fe110601b3f290c23c52a2ecf00f92bca0c0a8d9c3cf863c8d4d57a7cf07a0b72ee00e21b85b040bf6603fb96ba7541b48f0b109cc942f24773ad4d1ea04837eea1df8996014ac939ab6c90f3cf011c3a2604d5e926304a5b2464d1a44e394e843d37d99c9c129f1c8e7c217be60b943980864dc6092843d44e93f77e8e53250d140d5f4340a61159026db200899111533b370c504638ccdbc4485f3b4d205c36e1e445427aa8d88cd9f558bdf73a4d188a2b6fbcfe4816c944b04910b83e9460ca6f11e46e0e04c7f2edc9b72b9822ae6081714be54fae7ee8a158dda15aea6beae1cbefccaae3eb6349119afe72e9ca902fd250e333ffaaec32aefbb7467f1142decbffe0f7bb9fe8b21d37f364de63ffd46601e7b5db7b1d35fe12c89c1b7030eb1fb5198d90cd70ddb52b987f82aaab57ec2a8aff9bfee90dfa527ec891046450ad1d8a1b3af7be114dc959f2fe7f4382b577273ff79080b73ecb2bc5aa93c347640197e9a12e932d3b5e01099b28fe4a3bf6be9703ce714e4e5eaaff590e1c63658d7f6c0a565b4f78882891affa366ae64dffcc613822712f36d781cd55f2742731ab2f3eea5aa3f32eeaf0146a7e242d405c10d83525ab40600a346ff35d1b52025a086b8708c00fa96078337de0b7f33cf2dea69be888f854e4cabd910f247a63e78a729c9b984e4849b0b61e39935b4193a85cb9ff5bc64c1c2c984d4e2d06ba8b80e2f04d0a84a55d272ddf3d23e904eca1981476843e5f9bec7c1086dc0570f3f99cfc102c8ca67d4fcdc695dfeb0f0b6be49da7a42d83764023c474ba16348e2b4f129d0b4a4609cf61fb5bee2a37bd7f241fba6bb3261543a3c525946d5e2c04f295a33884d6818c3af977f6b9fe83af57972cce9520913d285d3a3856275943aa6077d7077c1c9172374e5a630f2683f233c60e7a87401cbe1b152098146414c55ace14268b85c08d036ef684dbf6c1687bd88878245cbfad43c64e874e90f0597740cba47b9361614648932c576ce3e1ebeca1bf13c5277396f4b3017dd0b05a0c7e93a017112fe6596ccb34e38b023d39d4cb4ddd8be51ec48e7876fff7991a937a0166fb5cceea37eaab93ff1d613c86ccee776afe7b9ddf5c6fee61b98c61870dc79ed581998501b9487cd9d78ee5ca38f875774313d6c32dd617460611bf3e311e1f3a9c0c67bb20734b3a9ba672da842db03613d88bf750e0332905aaa28d6a17e5bb2abec2139749b05f762fe314ab5a336569ad9b24e80edde051e30b9ac40a0911846b580558264f3159c5aee8fcaf09dce919555f1fd544719404e271c62d6ef5c458405e7400bc09ac828996956e3a999fa1d723013dd0c5985ebe25a6ff6711d7d5781333499fd7819129af6905217aba5c83945b9a6cbef17800020c054d181958a47be789427c18124ce40691c0754e6fbbd85364c87826c6cce7817918ea75fd60883d856b6afcc070053afeb546d73cec46e75a05104cb6f5db4446b36565a80e28ec153eb4acddab58f156df8484f2653fe09a4ac94ce416c5ffc98ee783498f77c7c03dc0d76071f9f681c46f80449ffe8c2d270f6523bf8df8b3e7617e008576338f920703cfe56d89c4f2ee59aa8fab242ac635b6c2b365b738bdeb8b29fab4fd43a038cdf250b48b72f323f17fc123d14c04370dd6e0193972fdfbba5ba1284cb06925ec0d89a1c61cb0849939773516cb66ed5184a6ebb41f0b1d0f26655a364dca22459f12ae1c9d1b2f6dd430e90730f15effc5d4bbbb6c41d739cc519849efb168e16cde7d1921fe176df89df65210f7d309acf7a5b6bc46548e45203d7fe9957ec854565b05df14f24f72bf3ede44653088bdab28423fc74b107d9bccb10ab065426e704a62b42aaa21413a2e0cc516bb4e3cf260be820163f97c0357f4fb7331afa6854e9b9ac65c98919d91a21a1bc42a30ceb19c1181ad8d4f0b5bd2f16f9f9acee37198c60c80f7dfa59d05966fd67c1994c0c15e649f51d52bc17b020da7f02aff219f52b3f4b51c430e548ab05054407952a5132dbd1d266f82c5c43c99221d082a777d4fb4e0e46fee04dee1e1ab8e5b5ff3bd54fa3fca94d53d8946b1f1e035c36c4b44723db98fc9326616081748dc1eafe05135e48b45b76aef068a3860cce9774b3edfbf52eb88acb4b54a6e298bc273eb8dee79ffcd3a5ec5f6f2529268b8305faefab4d17c2b78e3923cebf0ac5815b7797359fb124ce9fd9391316edb1bd2303f900d8c1ef196dfc27cb0ec4fe16a8247ed57bafbb6f80dd2e2c75ff3709e98d3e9e9466e77f2c56049c32d1757ee3a6a394419f6a6e3cd7cd88157bec7cc3fbdd2690ff370a217da9c9b6bab8ed6703290506326ab5ca08c5e416b30265d611c39c06a4583b1074d851763c3e0bc84a95baf0c278c611630102c54947e0e11828a5f7f8cd5fbb5f8528f919cf7bc7dc2d70ebc0568ad6964c0ad4e5ebb0b88f025928dbed4a300111152f1da31d0b5ea6f910459f19c5c3aa2d33b91d885fc93c4fb6dfbbb5573849392f1f847087c8fea4984af5980051e090d6c8865f0c44e874a98cc35178b4dfeb1ced72d2a486fe020077e106f5a0930758f388df8bd039d88e1738857cbfae3ca5bfe81940dffdbe9ff7204c2b6637bb1071c8f894b2866cc901e4c053f9cc62d7d153e54b25d6cb9634441c3620418ec2eb5974ab2444cc386a8becba7c6b2f14c2753bb91e5f4fc78a7886cb961208867ab94c282dd98579428e6e3c478f9195f55b229a1bddc0e883158b6401ce420c4d8d35726af9209198c17ed2a0229842f816bc849559213fc175ce69cb6d1ac4dd36cd6cf5b82cdbf9dfe93a3bcb59db774e8ffb1f3d4e69791ea189c2352422f45721d7d40dc5f285c0fb8eddca135a3c91c304913c53e09b5c554d76e280c10f6b52127681316a1c03c58bd63cd03a965715f32899956d681989d8085cd50ac9e9dda0779e7cb8924f0c048637083a1f6479f943cfc4b942b90e371134ceb44a5f4f7d613e4caf1d10958286eebe281738cdf1d195eafd8c3ad277176fc6d78b2a738f081654be976c9bca1e6a95c0e5d466ecf313f117b1148b2918ba7a4643356c3d304858f16c4b9e6fd7456151c0b7ad92cb90072e569c53e2e85da466059191163749e928e3bc7ca3293115131756361e6b32d7312b0fd6a841ea97d9290f26d470ba37ec5d736931a48f55d98b660ae09f77677ae582cf25bcfae82f7564810b62c3260feaf9bb15beddf8056ed05b31c3059a4b3e5b01d676bb1e271ee8c704c750120da0954f09958b26b582d0f45edcf9fa2d4962d14c4b53a19f19d9e7770ef7e8bd7124a22361c1d0787458d32fc1d6997e388948ed6659dfa58f40b8972a9fc342403948d33682816516f10e118205f36ab9edaf24e52b627700789440427e48773fc0312998ebe06680632959b678b3c8482222bfc2b758ec6b022b4af6cc53ff2cdf1de0093da3044585677629f1dd1286bd74f58739d30ea6fb54172de5bf4f76df76c33d2a2927927d598a93342e692c438cb532b90f335c686434db29f4ea10bae6cbc5689610ac91a5e945f1dc61713df488055f9656430e8bee3b98c605dd5a8e9dd8c1b956d4db68020e33b27417237b2b4b87e34ad6054cc648182634eb02c3a73500c20738f99907f650d1f7cb7fd246aee694ab823ac801d7928513059402ea1afd520ba3986fd31cc64d1034a8559e7d77b423a8da50fb748522684442839c5136f15da740bd5951e800e706d406eaa4941f4887158ede29067e6d9fa840ac32a71dd04c1d433c679adb8cf83e3a910f2dccd12e33a6f8bf2b7356a1e442b2adcf10b4184d90976597f5a8b2b748f39f2708f0054d65fcc1549776adc75d694450aa3ddd48356e211c2fd00c241a9f75441ab74ae4507688d7275aa173cfb6ac6ebea06dea728b88d432fbcb1044651f85582e63b43aaef762c621988f76115af2c7f93d81a033511554f31d20647a02833695780baca5a4ebf1bb0ad31605838ca1a2ebac5b05bf8e77081c32337d3e999420cbf38d4fc143ffcc6d951c5aa0f3cc9f73dc87ce48f308d8bdcbab52d424fc8d590e72b8753ec74b668a5540f6fe4a2ac1086bf9706eb46f44d7878918f6f2cebd54bd21e171cde851a4f23462d253314acb7e5b524c05099aa634ece8fb012838cd89a834798328a5360004d6c02e089df5399a2118e1415f4442f2ea0b0719b7c9ec3c8fd48a6877226cde9d51249adbdd7a72f8b314795653682d7f6f8ac17b2d326f96476e05d23ebb5a1150237b783e33561b000cf99d831387fe7451eb5dc54e6fe340cbead0278a2628d0ac02a5ade97232ca1218b93244dde52ccb58e8e7120ee707b133a3337e67e83eb434c7eb227fc74d55cbb2054b68c49cda847014867621bdc48af7ee4303e3efaf7dcacf683978764cd412fc2088fdbd5f8496c67a4f3e832e1a80a112b7298dd372a32324170eabec731122a970e8d91315af01cb847a8b7f65bad5066f08471e28bc8efb18707195e820229ac5ae42ad93662536b2e17a832adf5c304485f9f47982f0050cd905cbfd28686a0650779f8263a7fcec9ebbc5c66c18351b58094ad81a23583a026ff735e88d0e854fc5898acf06e81b613ada22d25313fe41a359c845cca9decf52fb57a856e6c577e7b1ae2e5ad4c65045721ca26c49ef040f8764289f06ef5ef55b39057e264d3d45c527b3e1763e7942ea04aa570a5662bcaf16c66eb95fb2bea95062263f28c6ff0404e64d864cf8d981e2114fdcad7396911807e2f26e05983cbf95cf83d21bd107fabaa52c0029d90a0b42500c58fd8a133371248b282d4600c1c260e1ca6381ba95a393e3078eadbb1bf703b804a44fc008223e6405530c0b0a9497d3742613774731f3413973a43200bd3613d9d67caf84fbab78bb6351beb8909401853f27dda7c91b6a0dc7ece190051bcb3a808e83a690a19f1045090e2fa8ed33e55e7d38685aa4f087cf84d3684b60c6fa44fad383f87b2ae1587d9c1abc9545bce5bddb08ff3c7bfaec76206b91ebdf9ad78c46137deadba71aaf4ba0530f9fa89d8784fd606b1a66f7f15d652eb72f20e583eaeccc4c1448c5fb64ecc571e97b98222ca0aa30f1c04b5f1bc28b341bf0d0cb121e106e6d31c49fc2eb678df39e7d046c686a1b6ff089f3fb38166671c53ed8f058bb75281598e0886b3977993d054c31b3dfefffa1ede2c28b6315b584d6f3197b61ca680c9eb056181eaeaa39284ec3cc47f13d5b1aeb2e4afd0857e314cf32e337035a51645b2de16fb9e27193eba81f90adcb38d528ce5fc4d17ede182d7196495052ef73b1ec649c4b11e9264f5e1ed932114b2b80772712e046b39027f2189e540d8759de8cee878cd8c39d3121af7a7d4bfff3a8ca331291db1a008e64c09b13070542a797303a3dccd4705e6250a859fa0879e41e52a1574a54da91bf4718ce557224e7cedf01b535cdaa21b4a98766184b24bac9446ca35c5400c357de876f923e3d421feecf519241b46608bba1e58bfbb568e3fbf495acc1dca7ef866d5e5cbbf9bf0fa66cfe70eb2ce8c98f696a5200f2a14d44390d5423233275796addd90296a7ed3d3c157c38ca6f28c068310a1687b5f8cff9b0538dd35f300559c3fb7da159afecc431480b57d383aa6bedbec86659c812c34bdcb732851b535d482542e881f82bd1a83efc10c419eccd1f699d36e9b30740fca25f81f325992d240518c2c27e06d406802d1e0ea209f4eac62bacb6c143baa82c3faad02409b023533028e206d287e064844b1a0f17920b224e7405e1aaa8818ca09c5f0ae21499ef8f411e00b4fdb465f87c00e497e9d02d7300df7c77266788e155d19e6cf188dab87a05f1f5c32084803bd24428601a290c221568f7246249adceb821dd6034ea17e192b7c4c3909480d6f1077798ca9332b614b5fd08067688fe4ea4bb60ec5a4aebb324d00b07958da3c3ac0646b9f3561bf9ade75c580f60a9a6baa35e060fc632637f81088076e5ab1c3884200d1a0c0b2f3ef7d506f7c67ea83b778c760877e7699209d03e8557000493f9fea0ccab4f41da08a2ef125041d0f49e4e6d67c5d742aa9719ac70ece730c1e6892d9bac01ccd05e72c43e195425cc48fe001c6c4c469670b076e7909e4b0131b28ff46820c54767683a52f164a6e482d34dba1700cd881291c6e8300253cf10ff069c3a43cc74cf7be0db2f9b0c1195c052288baa51fac050d4bc69d253af5fd06cb475b42c14e082c98de1a318480e6b17e2f071af3322d102f6c923de75ebcc92a60fd4e55e4ac8b85d02008cee38b1443bfb58904a081e4145548921315345235dd4f48f4e1005133fc4b5fb5079704348bc902e012a79e741c08050c3fec80450b194e4f04fc5fcde32886061729bb4c796fd8c329327873113c18038205edce69f8d9d3adfc32771e2555dfe21e53dfa11283e7e06aa48e9222b31eb80e4b4f24a9dbbc06b3e1647f87924e6da01cc7be6504efd22e63eef23f0a7e4e8c85b33195ad5164fe750d952c801ebbe4b0c149570591f950306c9ef156cb6518eb1c49605533289abf8a82daa312b28d2503e926da103e99c82fa33e7ac44a72e6290943df4e2200ca3f363cb38f7b2e0749d5ef5fdb1dde03ac732e26d1b323c443914122c7b915c65861155cc593d56a937796abf263206f25fef5b29341c31304d89dfed2db9619bbf9ef2d04efc494ce5a324ab398575924dafd1b2e79ec0c6326604543639d89e4fd89b7f6ecc9e789a0182df15e54ccad3231986692aaa8fa46d052e183c0d85a74948f4b2cc4b1b4e8709289236f45f35f037d0820e743818bdb5a5f3c9c2d84116fa941aedfd8a194b29746c4c41b67d780f8542f005b1603d0e4d19fbe73bc2f2316a0f4be7684668756aad5fd72a8d2e8809d6e449fc25893e0a3f53e09042687c1f03140dd226f8badccb035b1e9f7c54cc3f4cd4620160411af70ba26276d5e4c22ca311e6b55a91d88f57ea6364361dda771be47912038f9c03007702b78be00460b9988ff187df882a2855e10e20501510b2370abed4e2644532acc72322cfad12b9644416076955c1e0a0db3174457b6a1a3dba4496460c85afb9d1d86e1241da26c0a647281013204a5fadbc0a544a746a277174392d4b89c479305367378db5bcbb82f47eed3c0e2f478992cec3a0578a6707cca8129373c24ec923c2fbc91dea8443d651fd2524b80a0e21e69406d1d1e7d751b035489d11a47cfc6c8645943a880fb705416351b30a2720c13b4f1b9bb90cf0d8c228f6c9c1b0b004b3b8941014ac121c12da4662de66b45d15161db0dd939afc1ab0efb6da1de92bfaa6f07b054af286fd8f9bf868ae0fa371defc3de6eba033b2c3b00d41d3338e0f044d265d06b37048dc63d54ffa3f4bd8cee44f30734cb6a83cf9ceeebc78a554cf1869451d35b3ac3193e50d0cbd4d1a32b2bdeb8fd3c6c09451d87d5f4108106ff20d273986e6000ebccd0263c1519544c26af64723650703901633e932eb3bf3ee2e8aebf614ea4d2eb1d21e46f0082d5fe22b415e1b8635bb3792c31d3425450d5ea0f3abe0e41be97bf70caea098bb9ed426a229fd6e69fdc6753b407bb5a1787ac229dddb859caaaf86c9cac4fde959c4a67532dbe0f409dfa480cd1d4f039b3cac909441a0a154a2b04d03ffe1f8c5f6bbaae1a1f896eadb7b9b91b48870ad1563e2a60860c7ce852822e96f68b3223d5e2722a9b32e332bfdf7de9bfa81ef21d320e9dcf415e4bfcf56f41c3b8af4df1d3c50a624d00da1758ec0387b79e3c27e40e737b38405c61f65d9b30c8031cbb796c5d6577d16992dd9e37774568c48e1c7c63bf60ff51a9e0ef7c8b40771030f903da419c9b8000daa056a8fff283cfd802ecfdb202631ba949d0acaf254a3e1eb4cafcb5d8764f462d918f255b19b679ae549ef2c546e01a97c3a23174236abca3961b86f8c51e4341a02738997a24707a5f872bbf5eb0090a7f1e9cacd5b0e3c0a9b9c16a2f74c74e67a842cd6009debe97a0dc4e514e59d85a2cdeddd4fe96238066d16ae273de221776f5f1482d7ebeab4a69acfd1feefead1967fe54bc35376aba2153f948a470ef71039d74b90d6bad011a95f2cbbbbd6826329169e205ee94458bef899a30c1734a6bb3114b19ee36c40602569681b4039a55967fd55b91e31a370fd4a922b5e485f5bdbd4602746e16d6240e3214393bfaf5e0a850a46a2af7cb288bc90f732c7b0fcecf26c11e08e365e638ebb4b840493be9a333b5392367c5f640951b9d2ff960529dc5420dda044b1dd3faeae7dc80a60b98eb84a126bfc0746a24bbf4fffad79144b0d596145681df62078729f23bfe4e5a2188b2bf80ab94d77f4b013b1bee4594111b77993da401b3728212e122cef8445855fb5e8a134550bc68e04de17868a73eb84f6ecca3cd44c99dd2d08a3934f1570aa573a3e7b2304a7aa80f8491b1d919763481cb977548b1ecd8e31977becb499670880090e08169259fbb1b30748e80494e4ee046c301fc3884faa021ec236182a3ecdce88002530c5560d621fc98bf3a794a20009d84912f8c06c98dfa6b85c4aeb0e6cbec58c237c441539ac4f2c3207e928327dd894470f7a7335663a88021703420c963bf1492a2cb9e35fc9d633a81e58c9ddf4b331749c24906643d8cec2f8aef494101610dc03b79be8a3d11f931d74caa258643d37cd4862ef7991564c507068b1bbb5d9159d6d899fc7772ff1335d28bde9e02af760fb00eec2626309a77da3a6ad00d48b78f708a1dd0568d6848965daa38956e4cdaae1b17ec12974371a8a1daf5141d2c5dabe9bd9d66658a81153744200e8517d9b5f29d78d42bb1ffb94668dae373694ac90823ad7d1240b72f8eb23ce1f832c115778ed5f089bc90ed2c1f40f3a86d5ffebfdba73a2495f40b327b3bd58e66fa4343285c85f4d8a0d914343fcd9ab1bb006c465161741b8dcf3fd762629732257ce69bd65889259ab42670d44121417838e7630b315d4c4b168108f49a7cf3e738e463dba617e718b5f39e0dd679c9e5110f3187527c24c9c45178bda2329656b7d19aef64d3493557b0995993e4733e9739b1944a254cc78dfd6a090351a419ae68e2c8b310c491e2d98ae90c3add382e94111047839f8569b4937df5169496047681e47e956540e4db43f725a0f621b6dea1ea7535f6d98c23da2b7f1784e76553674716020badcc87c4f3098573a98504a21f6255b570a0bdc8e9d1578ecace1cfdb205f9e88fd4b540ebd42630600201f3ee035f77952787646c53904ab340254d6c4c9b3e5653b7f7921380a99000a2e88451f2b36107967db80efa276961301a70179301c7a2c3a83e3e74890a7fbd5f8c0f57ea0d26545c24f57991d8f97c31aadf3ace3edc9496681a3af0faa2699ac8236e7f5fb1e6c044416ac535634d2cffc865fa0f39d5f5bcb4aec5da5f6db0664e8b0809741ee0fd4e271afdca4831d625bbdcf8114c46a99e01cbf7a2c50bf82b156673861085baf976e5b1f0c1c20d2404c1243fd12cdef725a732c9d0761c6a93986eba1da5eded9ffc8f563248fb17500f488c2798fd7d61048623696c094dcd142a658bb5952783a13d6940d337df060100874bc75763151c2511394eb14ca104ea2b2a400d86d4f334a032be8e17e17758ec2541189c87f5da606b4c0361ebdf930ab475925bbf67bf8541e98d2b25afa978acd8922e681557bc6b57cd6a356fb83cd03e1162d384fdc4e53b0451c35528c10b148088142130d8d7056f51ce470bf61a134162ccced736549c570364966b044148b142d1076e603853aa48649f496aadfa0b187f351e238e495f9ed04681dd3cc9deaaa99f8ece0b8698c4949e13493adbefc60d0ca0f26975f3d58485cf0999120abb6b47a81374055cc00538258c0db034290ac3ef8ea7bd385672ac15a58b6989a14ddfd69559fc61d8c8ece4461601753d5f57007b6362ab389a4110033e3a7a4a4662ee62f50fc91a97f4b8e447dd5a2468e24eeee655a4e15a60a7252404ec7b30a915f058815c2173712fbf4e864affcc98930843cd49d650b083b3fe5752e0575912339473ffc40a7f1d66410e00befd0b42e07c3bd35635fa275a4563bdef57bd3516e5233e4a380489a1cc03721d44a1e724feb0495bee665502be87c6a9022bd02db19c8c8ad3f4c78277c06e2c5d2a458421f50a6d5df35b1f9856af84747e88a517efb75b436a45f1688111f1a7a5094b99a9bbc2517d63640ae3a79439618ba8354df91b290e7740478a2f91ff0f7ac5ef296f525ce2a775c1c4de3c5a821b762057ec754bd4168ed4ea6a97ce97a973dbdbd46723fbd9dc7141175ed03bed29f54eefd5e0de57528778e41b65797531b8bf41bb083c74eab1085010bb3b41f446e30e4adb1486cbab63c2414caeb84ec76142b095201f5523d8f7125e75ed5eb046e53c6692ff9a97638358d68147b025f34a398b1b61d4f1dd599cd306042ea615bbb53dc8840f156fff3e8c4282f9d48f31ac4cfe97822daad42eae5e00f556ff1d6ce25b13ac9cc40bc3605be19735e81761a573835471bbcd53253e0dc63ec18cedbb351eefc559f440418d79c80f3535db71df2304bb57c0f84f92fff41dbed85e8b78a88be289ced72549ec7d313132f3358d297eb7c6b31bab84fc7443eb50f8e738e969136bcce4876f2698bb4932f6e749f79349b99034520104b6f694078e10ef76c451f8455f221ef837040e2768989ef645d125c1ff73d35fe9f088b5779948d820dd2bf0f6536f731975c7c9bc3582cfc12e0fe3341be813303e27e24a0607f4f76f9bef81820edae99fcb476bfd71808f7ec4b3c1a84e6c08c76e92e8ed96531b75cfa68b9a4aba36ec1da1b65dbd1b18c2cfc71de7dd0913a98f52b25567eff38a120653582d18a069f40e2885d28fbbfd41ba2745cdd009e0e48fbaf79857441352bbec710a27e79500c6dba382957a91fbb71bca5fa2eb05af6d1c4368c936f43a1f9e7a4f2e913950f03dd06942372ee4f592dc80b273c467f549733f9802c8f8f94669e0147a18000771d71fb7729844e99b801df90fc90badf76434e324d3441687337b921dd3d8f4f0722e4a26a03d46b2a74054d3439455088e76c31c699a87783ee30ecc7365826207edda9fe97b3962567b64ac1dac3bb33ce86f831c2bdc7d99165ff2ac450a99bd34ce49d05ab253b6b938e459dac58eeb85caa8f7597beba853d89b0fdb201d173b2bb953b09552333505af9327a6654b9db445c5b38d0ca44ccd0145e76d387b8ab4af5241cf798a2e42e94dd5686da68f699eea7322ca219060b2d1da2a994b607d14f8d92ecb507a1653d1349517c0212d913de9988826da75343e3fe2270b171643cd4d03cb4f64b5794ab40e9af7c6f5ea53fc764e216c1c516b995352f2b1bc8327c7b8142e27c96601a78a5331bba91fab7ad50b8321d094d0dd8536b395e727dd816cb418bebd78c7141ba2077893574392ebe7719808ead7313266572cacd0a199ab3c1a7736bbff90911533c4955369bbb291ef9686c1151fb77a6399e80511957eef6f40efdae1c1c9fb5fbe423da347a93decf358a74668d67eb48e7ccfcce06ba8fe90dabf3f507a96cd141120f771352c2b1c5d43b11b7ace03d1b625f7973e2caa7d48dc86f0eff08c4bbe337a5622db7b9decb1abc60da5d501e86f37e26b0b1cd7610ef503f984731f62201ececa0b88a6af706c51522356921ced5e956ae3c0280036bb9d285ea0516377bf99758755c2c4ba04fa4ecfa56061339359cf092852cb3124f369737202fe2b9757f94d9865c02e1287d654ddb04b875eb9529b75c1a956239a02fb2c3a39158a16bf6546da2d3fa5b232cbb377563137648c8285af88fcd91debb686946024acbbb3af169cc9684608f102807f6dbf616be18300ea00a6042ef4165f36f248facb3cd50e5173194a77800acd61b201d3e6dea3cc1213a8faaa9e4fdb01e5960d00f31f60068736b7b1b1c57036c10d888aa58f458ea41fff53ef5f894ef6fecc32f6dec5d1ed20bfcf6cc71bb551133252f5cd2a4e7a57d59e84e61672992f9b692669b2430e0d0a76e92e9f8680a294b99bd1afb5e6574e9341cdbc2c96d71fe14853d47599b27e9db67a4e5ed712dd22324d542a6c37dded05021f16652397f20a0f92f1dfb311c824b80ed918cb921362a11e4c66adc33b2d2e06b2e2490f21f6e141f7263aff149e7527639975459d9046ab64e53a8e9724723d66d4c8df0f2c9ba32593c76c58a6d6edf215b4490425f1e8c202aba3632edf6b1d24290c453f3f34f930d3e8145bf8d21eb2db4b76c7d95dab06d5a650fefa80d91ccd1e7c499ee745d3da268a35613ee4d27d3a032bf120804ebe99a0b7dded54391ab57363fea65c9a539b333787fdcb52a9f8402fd7c87b4b136e45a37d569920d890e6403454973970d0b56e9c5c370b219f75b7f5278e4910ba7a4df97ec4a9d35116648d5b6955e554c6c639e55c0640562e605189900d9d871ba962614a616d3b1821c18bda07ee2630b775839b3493bc7c1ac139a6baf666a34c59f25d8d05a26da76ee7756df216294da2d978a21bd71afadb67a6997c270e632361942e8eae77fd4b51d071ad1fde0bcede7a6f2cedcec52709f7968d8ef2c6636746cd10d44d0b0192e8411af675399a22428be7fbc2519b8792f69d3df4dcfa24a421d99283e4db6a90f041f4e808e7f37a04db264182553169aacc151907a20549241a98051a5a4db60218256311fbd926a91754f6b54a8ae5a3752eb08b64bd83d682b5f7feb5a323f2975a038bf7b22753231fac952cd8c719196e48d2e1449b06019b51a6f5f9c1dbe68f136487c8988e199dcf79548dfab8fc91cc2844a8c68c9f709db270009a6f9056e91e4e60d36bb52109c622f26c509de240fc004410cc5c66f6bbe0cfa026bf5ae8ba3fa51532ac5275f009caeb1512e7e6f62fcf541c04a56183f582286f1efc004c65576516be8f1b7c21870e1c51c08416326bb80c34af0222d0e40bbc87f218e563f4de06b3675c5210fb4eab4f16062fa399c8c4109f3640559757d0308d5802c154c27d6e5ffd2507303bfe4804d9dca3316d6306b9f5e6596c6c033283b94b70251720744f2bdb132d950579992220df9a3620ffed00a26ca01d69ea089bc02822373b5b88a5b491287108c170d2e4f35cee5940b279543e4e17d3d12503c6454c8f07ad6010dcf60d37c30cdcfdaf5bf934a1921ba08049e8e92c04b4c44357755f024cb0935b930b46c7dff9c90d77b372b45c3689d9814d30292ceb004cc1e4d9b022ff4bb3491921cae4998298333c0c8d45883b2fe400ba0deed2a5971e85d71cc4f17600cfbdd37de551c4f891e5f167e4e00b90803b94cb7c4b673e5355a6c7f911125ef8dd68524fc0d466b4caf55b61b5ddf8da950431d38cec956a7ee7acd7b5ab2ad449a336ca35127781c39fba875d3d6eb3be2f830b096862fb0b47a7645daf2f830b113a1018d544721b1567f8d5db72ee5b7c748cdb2dcc74636ea2ff3c4cf2325851642a828e5c2759d62461d871188b6de395b500abba369806425a7e0c64cbe58b55ce7549aac7e61fe9283d6a32ad754ecbe9b3c595bd8f620e2f88c161bd35dfae0e4c5d63224dfed53fcb09d316db6e2178a73e0f88ca2dc04c6e9773aa3b60090b55288b21d78ec7a180b57e7feca9ba04043a19c9503a7d85c785fa123537f2d771fc1e9c495d37e445b9a8b0cfe3320630bff632dfb71143d9f1e11869993f05dd0dca0898625bd6084565125d9136ec605b9a0b0af941385ce30891c2a1699c42f508231703a08537b5d0ec81d8a221623429c4471e2b9c068a51c86d478769a1146b1e79223ade7b7ca464f52dd7e5cc16b54f8b90f72ce9e5a95a0452965eb2ad9a044bd64067c7dfd239f2c0575d7bc7178f32d98b2c55f45e0385dd2fcd64dfb8abaae1661caf4d52dc479bc2496205f80d04cebdcc55246e92fa13cb93159f35c8f5e5b4d875e8db266c21aa389c5e1f6502c45aea42383e52a371b07dc1e2715d2872732eed190184852cb3a3715cb752506475ac857ad66e87fe1870af34068ca3a367af3f36ee405d34b93928efdfef658ed390dff4c1349d2751e515d9e59c60e7958baf542a9aa227f96bede7da929e6d6f1bdd6a89aa67950fc52eac6da7d62d20b9393f831692320e9047b9a58e0ce27d558abcfa3745f17956262f57d145c0f9c51903f53938170032063e2b8ad50516e0e1710c12187f39ea33dcdb1e690dc770e56e36a8850ed72c9c7a1a99e224130d18886b335146c4b932c722a173f8b3bf60a2ec699f4792b88af02da448e9df718e7b6891ccf0661dcf7b40558c0818b1fa32cc413a11660a6661cb0d3485adb9f8c3e8e2a8260684afc0f462a1e34cbe0aa8e106af9b77de307dbc8bf64a4baea295a7737436a4492332815450622d08e69e020bfe6c778787d253dbe5a5f176cb094eb9969f13dd2024aefbec2737054eb18b07e3e0ccfc6e4c63f6a51bc332e4c42e82b16c7cd219e1bc54f4d58ea9aa682c72f4b9a53422be502773d712e5d88e100457294eaabb34f414610364840095a8c2e74864c9bb1e8e4145db60b9ca3a34e32d3d9d1b06d61686eb313c595eb117ea1e06a7dfa591a040da6afaa6c29f1c4e126703f33c4d72f54f4dc5fc1ae520d9a0f2b5c31d94ca9bc74ab934dab151413b9285ff79f0724f313095f78b883a1e22cf3313ce23bee7718665cea65e67a555eb4e6ed915bbdcc01860bed5d3b90f77f115f0f4f5ce2fb97ddfdc10068e9c65ec1a38f02b77773aab8dc5bf0dadb6a02f8e9138a9cc14354beda95252704edd71a5f25fef38caee266dd9b4f69e3fd7420cce0d05f5ed106e80f9789bb8f4efbc13ae8d8e90bca90f1ff7c80386eb8c4c873d07ee094ea930f684c6c58aa3fae05c0a07e69bd2e0876cf444cd64dcd92f1ce4a2b8e994056ce3bbdf7347a5d00619dcf711588799f0bfc0511ce11bdcb607a5b995d3a6fca90f8423f4030b05a4a0b7580a062b11416e210c5c5622516e44041b07405167888a0b034450b7a8030b05a0a2c74b02260bd9658e0604132160bb4cf9ed2a24a1e8387cd73e2bcf1d8ca036b8793c34229b290038584855272410e15066b5772a18305016b5358b80305175868d07e2bb935081824705c3e2f613cbceec4f3960cb077c848c02e8eabe03d7850b00ad997278520b2901995c8af2c47f2e0c0c928175f598ee0e103132c955060ffcbc2ab2af8cc7c2bc75be75ee14e29b46d437b4d5f680e110cac66f6f5b93088320423cd02b7ed516824fe48b8ba301f57fa431bab1fbc1a10beaa9c17094537cd37605b0c195df3deba4f1de1079b1c41b01eeb6f912360ff21a5360159e971c958375f77aef3c39a77767f28d882c412fe0273e9ea78e97322d35eab4e923b055306d2f20a7f331ee9b294a098a59d3bf9202eafd1f77b813e9e5c6e44b99b1f82df243621ccaf3bb527d65783f814813728a70d7ff8213fbd2a8a039a6d787934c7a0fa7c433cbcb97d34a8ccd6b131c4f7dce9f7b84e763b5fb5005f1a61b8dfadb791a373963b50faf3e40d59a703282f3704340a7b2128d2b91ade11d1c2475c8dddc338b3fded1bd8052edb8940fa31a7794a7cb44612e2eb2ebd0ec2f875b8a590bf4b7fbb7c2e9a9ddc4cc94c27a2a62d2f0b8b21f09292935daf2315e70b1fdb64874aa8ffc3525d4efa48b672e08da347a1692169b39ba3358f19cc418416398ec8fa8b32ffecdbac1e50b96bfe16f4e209841622cd840f54347350e6236c1daba2ba29c62b6d1a18ddb0d281be9e39189eb87261f43dcbd7a79ad72f76641a4c60a9f6c56e5e971c35be68d627a8324b0c6ff74decb23a33282c3d15bef82e7ae641f3544f464efc61c2425afba4526c1dcf4b822c774a20a7063398337adf1b74731545988707e557f7f56691d001b06a3b8187ceb5c421199c3d8b13b6df59c82819b10869c334ca1a4ff686e97734433245deb067117a727be30367d23188f007ccd24e1d91798b82fb6eb3c113083adf27bb74ba5804bb2800d211ed4822a4143c7e85aef10f19b4316fb7d1561b4c55e4f39f993fe5b65debc13058d7f8d158d99536a3b801dcc84fe68b09f6d735a572cfb437074037c64309dac628785743def21b9505602dc2baa9d3120a4893712f90da1cd7677846713ca6b9fa4db94e8e3f7266d7b0730f431b599dd9841063e6ef6769bb7d35edc7bf6566b1948c1a9b30d288ebdd1e4593904a63769719c85ff3db2e09e1cb1acffa8fb96bb7bd54239a99774e7e2cc24bd7d3de891ea2af279beb9d578a1e3f637ed72de02962746010a94542a056954ac852a9217e03427dc7a41dcb781e3862391d392da4bea294ee41392777a5a9be28deb28c6da727c8c54bf5fcf572fda941b0b7ef361c83cd353d613bc8dab28026818031502a0bd17a96b40514ec06011c0407253b4134b6f42f833fe6ea45fcb458d080136a2c3bf1021aaa8422a795db22abfbe888f4aca70ab4352b2eaf52b249877ad8243a76164a36e0d5832575888200ab41f7cd6754446c9f4b45f1e23b1672217afd1932a83a0b5880428d4899a783f6ab018898529d7c0ec05f8e72a4d0acb5a61d8d9a4a6286f8503fc0c7b018967d2c3f1025050611a84819e0e5528736c7a1ad385ffdce9332d576da99fa72822142f85575215b257c5211c0665c642749d16e8949147c370b979c97c49a1a6ebe85622466ef4e22368211b960e4643eb39e4f7273efd28da15c93ce2e6f9e8da48f9ca8a6c80de41b6b19ca0c76064f921126ab1b5e7ca40c41c6816812d80777a5721ed52c6412e8d1bb057389656c3769cec8ab4e64d29382de904164ea3c1631c9db1a298ef30532bd262097598db5a94db6e063d98cce17a1db06bd8db901ab7122ad4ac6a3eca7a42463840f6d3241d679848d3bfdc556740c4af3c3368dff54d5565b12bbc83ad425bf701c9c1b3ab061557457a8298c57abff242f889fe1db97d16eea4b849872cd6920440c396d78878f45637026601157551fc5598de794aae544887c5bb2dbda69b6ef4c0a3f26c930c8dcf004470fb265227877196ce82d0c9672d093626d9c0508146b5589858cfc69e5f7802ff53af9636abd9e873e005973762874e5dab069648068515939182ad93678fe205af4023f7e4d86c81cd68b8b38839333f7be815a5148cbeb40912120463f50949e8cd88bff7b1e23aaf47063a02099d738e9fcc8f19be19d8a05d3cfc2092306a36288a7b6c87fc13fb15a007c7a5723807ed8f3875676c72655ec28e544f085c95c33ab0b4e27f92ccc59c6059f20bce116fc756e88cb07d27f581e366d653e355300a5e5878597e6fbd91295292bf89bcfe670eb7c2768e0d7314db294a0791a460dc338123124813924762e523598e1a056a1b12a78b5116c3824f7b0851cf88b33ebff0415c5667e406d97a30f172b05cc6f09923c75fda1ee54b2cbeee5e816b960ccfc07fef47634ecec55d9ab93621a2290f6b56eb0386c5850dc4a66d8050b955614831dfc225e8275fcde5fc6f2d659047c76c3bca0164ee3c41a59db28c913b63ca6e8857d81a404c4ef92518fc226917ff84b42d1ca924ffdb3e7787db6bf4429b8b6290088b66e67376729f9bd05074cd4a4507021c7825c889e6da3e49c324fdbb0479e5b60303533d6061ac425ad336ec9827c59066e4be0d1cab0996f2c6993824e920445b6d25b6d70bf1f4514799cf59441f1573507a5e14c2d5996ea7cb94373b2e16141a74191d982b568d9d48eeef66b67761addf1deee9b6ffacbd172bb3c09b86318c5b0a3660c7103e64436879025a77938b4caf73ac424f0ec64dcf3985300b75c3aa6326e0efa2402758c10a890472e5e42213e8471fb28f980d5ee904fa5169b81087eddae0ba207bc4281c5d00281b9d2b2c02cb0082f3562f1c3adcd41069b7d1c0f76c36dcf50840ba75fdd1fa92f9813b362c5c377d8d85d4b62db9c0eb27abe95de1a9501961542750b63aaf3aaa91ac8da1fc4b487f8eb8cafe12e72c9f71397b137cc3b8450fb8b9582970e8b4e01002d854bd349306becb4edc2f06df708d4cf18f04f68fbef82752bab7bc8d75e56115554e30f384c98347f90a2984433b7031f1d6e01211e57d3c396ddffa1c99c42baa9a0cf2e6c790ef0d0c951b20d3e0048074270d70fd8cac60fa838e37f46083e603ba04a63b9242489830036d525d42e631b47282ff37d29c2297e094ebab49c95e8db64673a648948276fd2042e8e4a7b4a481f23b8ac45728f361d20f40cd03c7107ec057ef45b8865a1f3a818aac49c96049ba22c50425b31248bf141792fa6e21a42e1a17456a2ec95b9b148bf2da5236b31495ba0ecfcd1a91cb4c8b4b9af81b1305d031257e40dbae8db28ac4bb9a28e3542dd5ed8b0467b9244a786e85c7ebd4483ce4c3e35b60a3cbd8acec89a6bef44b72d539e3209101fcb28ec745a453f146b90733769bf96b9766f2751c3783639d7014977e60d3d2553bcf0276fab24c3277aa4a1d6237520fef0654080e8c40e7918eafc013cf8ad4f767cc278b0ca8c19029d844cccd841ee1bf3fa4e5aeb3f0c157c373bb03c7c3eb30b20efd62ed34c1a55a430685487bed8c631f164e1eb2491c3dd17898bfd3728ae7fe80bf22f9b24fcedc66c808d813ad1cd8b02e9e807ad2126ac4a05c04607763b740996be6ef94c0416300d70343b47b9d30adb4973de430653979821075227010cdbba100e46cfbb9df4932160987c30a8a2589df229bcd99cbd1b47d974c24196f7b2a1241fa358a7d4d201e15e4c3d052e88e6eaa778c4a31c3b7c058a89b3b8abc0b3c1d435ebde843d90ea4b78dee9a6703957e0e09d7a7a04395079094059f6922d56ad39b4a64e7423dcf3674f38c290b1b7d4c9a54015016c37039293fd2d7a721019ca5410f0d205f5f90230c7916ac16b6f7a467fd32ac07cad9bde3c65fc614194ce0bca06c4d16174b8513a7b20c9be20dae55c84b4b93ddfdcf8275939e77d7b353f0044f970120934144256698b865d265d6d3c24ccd7fac60d59559afdab11f3a0ece2a5897852cfe0af36db26f6d19834fba45f4532a8f0824c22e5a99752aeee04003888315899aaab9bc0cc7a742b06c250765f990e4e45aaebf0a4aa8da75854948e3f75c198ad2b927d964f08185e496f58fcd2ed53b4510df3458f80bb480ee038842028dff97b426095640c614f68c62c22953d52d2a16112a333500d0b26f50064ce6325854ca806bf76176e1809105c4c842d46ee2d159c1c16cf3e4ba8c1272f4b0f9bf66a6de99e7d048016fb81f59748868ed7ac9d2998ceee3b95a1a0aecd312d76318eb1beab073a5728e442004da4a08a89932bb73e19d22fabc5a035230868537160b48f31daa60a52848295f5376aef2bfcdb694e4e78d6ea143de79a4e32be80f8c9fb051bf23b6ce012e1b3955efb896c128e5339902f7850a425f5b39c0c47e5eb6777bb201f0e5f70128768ecbecc6c3430fbe950bb3a43e364c14b75309eb9f833d742e147c9f8fb99cd3121fcbf7f63b385751c0b48cf5ba85beea6bc7288f6b1c3d8ea3fbf093632c83cc08248f69677a696d08e16208645e580f56c4f29683e3a8f0636d6be20eadf1c909ab2ff26092b72de2edd60d2b2ffa34128594d6549873abfe7fc5446af924f73341dc7bb6701a344df6e4dfcd08b1cc43b8d4a50b5765aed4271a5e3b394d2b421419c5bd714f16392940e56c6f95840dcf7c57f18447d4322fa67fe3852b766ae6b18296f11cf16a9a2abc6c8df3c5ac3885edb6490076c8140377384d86584f3ddc74c323edee8a5690bf78458c7b3db23afdd2511465679181bac5504a8c8defc120155be93191fbf716c78004a8ccf0f505f3cb083c11ba8cb5bdc70ec951b53c13ef9a9ab9897c7be6cd8deba380593365046503845cdf9dbee18108ce31112ac9076892973fb98bab05db180a4dcf625aa5422814057b9d9886d1dcdc03871778571035c48248442602040c7e129a60ea0decca50b607921813304012f6f03702269eb1760e1c14fe5bb2f96f0abb0812a54c9268108a54d79d5c87c6c5254bf2698d1c2bc599afc1b6b527a38805a78f0e59b855444a1f1f31d16756e49b88f3e89a1baf2dcd2fb952bd5dd832b7ebf4d0ad7b61c79721e08beb8d66b21c0f1487d5abf68a49d140b61e9ee184944c727ceb67fcd0638698064d279333b6541803f52a905953420580c0482bbe8ea7cda262ce40aa223d10e2923e2bb51a6a6eee34b95a414647cb946c68f7388e87d0b55d2f5186e301be67719c2387ac1e18d7db8b85f09e59bd3411c1cb292d77ab8e976c4e2aa2a7e00b82e50e4956ea24a5297d9c8dd503028975b5a675b75f448bc25acbb5f35ad5839b5147ba1fee8267c1226326ee1f690ebd976494c996f84a4a8d171bea2d21f12d99044dad0cc1a18c9a28591033fb427fdfd375fce29bf32342040adb9367308f552c25935e33fea65c568cc2a818006fe1c34c4082c663afa62ffcfcc752f3eb9379c05962f53d605b35e6dce92cdd0ad5b849d50733b171402f25f75101676d6c1212cab899d35d5268f3e5660b177992198d784c2fd32da6fa643073f80fbc36f708c4c1facfa5ca6da45e66ed6d1716625df833b093ef9150468d446eadba73e997ea6bb9c698741212607eaadf046dcc7e69c2324362b16d99c7bf2ecab60d86042c60dd5a4b4f780c6022a6f3ff4677d0e9e9a5df32ab267cc8d6993efc337fae8693722ec05040a6d3cae7b48f4a89460b57836a9c2102150772309886507fd8255511a7c7cd3bd68506a2a71befef96bf06a7082a7e69f97a19063af0541f0744cb9fbd4b30f997f3ced07c494e47c6dc42cc38dc0ba5b34ab3090620dbbfb40cfbbacb45cde7e36bf2ad5ee8e5c33a53910202948acec0eb61a05e54bc11accc638b78a126dd99d2861111348289bfd9ff78599ec6b0d104aca220089154aa31a56fffae66e5a44b4f93af5b66a68ac48c42b7369757393b474a45fec3036d8a0718b26bc6015a248f6943d85a87ccd509bf41c6a18c520611a2a9be1f28b50bad5aaff0a33758e714d7b08cb5b47b8e1e44a938150534c272d530337a674a30152725dabdfefbb6bc462c88e2b8bf14cfa8119945b07bc0e0364900f7b66b0edd0ed61a803253a761db81a1ca0f7f569baa28493bcf3a8600c2892f6078f1825a3c758c7c71fbf05d9b3e71da4d4586bb54db65f4368e1ce8a298674c5c36c73ce69d5a691ceb5349b0d3fef6de03df33c1806eec22e19efea0176e38b1a912dbb2fd91d8a1d9af8fd25d2dae92b279db4c24d352ddac21bc3604eb484ba63c07a9e550f8568b80c49414460470f3488905e06ab680ed750751af431856a3e900296e4f4a29cad410b5cb3ab5f6b7f61660f8e4060b5b2ac977ec9b0d32f1a4401e287b74dea2f355dbdd52a1988a6576d7f1f4e6cb3d17f1c05504d40a8f56d76ccdfb91a87247db9a01aa11404452b4d58900ebc62afeb24271b10d73279590dd2fd0b13993c905fe3d8de43e8600a1ad24cf33cd18e53653c3868d24ae66fbd7648f184f60cae20cdf30519786207d0908f69b9e2a633cc9c80fdb9a55aba35a6fb621b636d5380885e8f6880de3a5708627ec7e2e0dc86b15a08847c1ed14344870a7b78ae67ab0964705691575b59080474fab4d40a0509a7347391215c139f8ae3908752cb99edbf88a3d00ba88b7ff1c2ec02ea16f01c222c84b6b946a7751842287e6410e8832f4b045929f27cbc089fa4cd7cae6cf3541dfcfa2a8d0ddcd34b741283096a466eae22d0fd67d640cdf962ba6f4438bea8b700f81f26d75387ad39f9e342a2c3038d9875c09e08a82a7acad643990f47aab59458698e398555fcd44a110f4eb25abcacac137471e1cd320554e67b99ea87ef67e685631348f76da230cd0db0d2ff31803083fdd8f634683402560260856bef7db15620480ba797d055759b5ae348c682c89b098a2b73c48d63a55643355e189040c879c88ba01e7e82f759a4e0841786820ebca0322abab56ed4932379d537d115a2da3711a4705e55885348194158b0484099e288d4ec8bb4c6a9adf85b88ae2582c657c53c655c94e9e63dd57df60de4ade03731406020c1162d3b07e3c2da2e0b4b6c25ce089be7f2369df3fa4d1d0523b59a0421ea265e5377a79b454bf441c04fd3d8f6ca840fb5921055b2f750bc38343eb4c662b8c9711714b0b702f401bf4796c385c2ed534645876f4974444d5a83cafd853555863e09a1e8d314b5083c275183038515e4feb7ecd44dd31828cc9591e6196240f792c4262e5bfb6e33ebcd136f9cb9c34a2a9118c917af4552706ea1e8c46b256c4f24f83fd9e35483f4c5ea687b321ce8670ca22374a63bdc5c75bfdd55bc3619d06681c2d1fdbcac129b0e89616776054394c29124b73f10f3facd409b46cfdeadef872c051c1061dc59f71c3c739233a35cdcea6611fe28c163e678d85cb16117d5b24ff8721a0f357b86e07032b052f668c997481c487e42547accae00694c1c90d8a25723ca36abce32ac1c80d7706634195b70dfafe4fcbb2b913eadff4c0c695c5f540261b06ce33a89418a391191f6c89ed4434a66e2465e8e1bf5ba596a5e412e8dd2a3a4f4da1cc8ebc48d079ea734c34b9c72ecaf3b1e700d27b71163dd13891ac50accbc68258cda02eccab09bce8d8f75f8496a5daf61ce0beabd8a9b0af4ee901317510f8fdb82743bbbd60a4844033b8bf7add0b5b6b040301180e9b26e2aa536c4c23eeda2fc8d312eb3b511f80206ea4ccc2f51bd5f62f8f7b7d98736b04b4640f02816e677952eebf67a0d9304e865dc805eb6b51596a219aae1d66f9a061ec1ef2d62009b0c05923e4807f3f1232343004c60621a64c4c894f9f3cfff4a798f25399c7e25b34f935e261f7c5fed408100a47872ff0e6f69c60a791dfc322b2792bb6f152a3ea773ad3ebe5e6f75db01f09c80ac86275920e709140061f8808aeb327fd32a9b931ae2d58d974ba739305ac1012e0b3847cb426060d3cc6bb035fcd306178a5ceaa5f6fa8e3e8babacdfc0fdd53a93a3dd3196fd6f96651de4ad40eaf1613952666c2187c0587c84a01453bc2675ec6e57f846a05dd4fcfd8706a9c44125f2de110d30a454417398fdbb97484177e0faeb345ee2bf95e635d39a7a592a489cce1d320279f79ff2a954cccfd5e5bb6d87f54c2ccd6ba1ec10ad159e8bfddf04d884955554860962bb70579cf179d7b9680fa2ea55eac6539be5ed08b83bd80b144312e2b762ac0b4c5ba73497abc46c6da135bf101a2861a9efa3159add399b3d9e5d3e4b33cc3f2108d9612c0981f606ef82c51de59a5d1b8961b8ecde313936080671e0591ef1c5a37e9dd174d670d1b20e2545e79b5781d18adabb987bc415f9d07faae348b9432782dbfa146eac22113385b38fbbbfb8da3045536a0199c073bca5c4d9754c0e46f81a2ac2648bfe53c9019fffa477a03816d358f1c6ca061c02017ed30a0e1ba0f2341c17f8e0171df78f65f39f7d15fa6ea37f6dd19f63135af21b119ab5d9e4328cde6a386374d697e97912bfb36605619f065e91888e36005033373e26311dd1714f569e83ca371d3f421c0ccfc6b0a141f80040200280920240df7e00102882277c79225bc7175c08214d5af9ad867ca706009497586206c8d431d89dda027de81a1f7ccb04489dfda79151a059fd6c68619ed0e4c449b627505979bf49cc8dc4d52a43fd959e35f821330ad0a1f5878385a1e9e16718f46f6c15791d2ccaaa00b40e7c9d12a3a21a799c99994e787d8aa6c6aac0cafced4f67f44f93f9542781b97fe493a52edd5be71cf1c0270fc6a7a7ad8c073ab32f90aa7d16505079cdce77921cf38f63f87d34da61f87bf7d836098dcb712bdded4b41d78583e8a78da82d1db7dc26318debdfa36763c1860ea82d17a795e5a28002362cad398b7934a34f4eb90b16849875b766728aa568f87594726750d65978b10ac6cd2fefc752ccaf25146ac0a48b71ac1182a9bd59eabad692a5acdd289740d5ed9ba661b1dc3b03bc460013162b4d5b1cde8a1926505f3cd9c06524c9af2ac1251c0d006761f61f70b03a36c3d5310ce6d1d678e7483abe82f9d73424b04b760092b20ef9af454e4addda7e0e57214bd960d240c2d1683a4e127041039073d042ec9dda52966105cd14242a3e0b4783bf6b705a2f888b8fd183b7c3cc3a84562494ec6ba640ea731025e2ae92b07b6ee32b0eedc4d3503407b6ee137edaf7fb3851aad025898319c6539fe2da717ddce7c0f4626563b907e32c7ea543d7c2f9125faafcabf8f227d811fe300cc2a41be00cbb68f231aa11fadc5dc1141f125135f230b237d843a02202a161b96d16b3811ed7cb93d11962a39e93e4354969e9ebd7bf5121c9a8ad8bb629a83f73148f5eb33d6d0c3140510dd1b91feb48f58dcb1c5fa1d9159864c3885c414382c345445d34d16be3a4a765f7fb5b4f21cde4d63c641548baed20b7b066200c691ea7b0cf525e31f6be8dea9e308f07289157a0dd0e9839d838a4df452fbd61983ffd5da5f46b95219649e2a33ccd20ce5cd0997b3ba6360cdbfa1327a4924292ecb1b5468470758b158e841c0257f1d589186941a98f41681caa8bf847bd4ab48adaae1da065fe08b53c75f44ac9704020d216c015cbc6be1e3cd53f19e5c2ae87373c73bd0cc39206df8ffb1f270d0aed453ab8c4fbb97742a63e71ebd191e7a163086a9306049145d7620f7db99b0978805065be1dfaee8d28c0e9ca66941bb0657a01d3b24456ce88beaeaf307ce9bb93d0413bbf764e0a0bd5393bdb317e87c42ccae587fb8d2507422f6c4d8ff5748efc258918f457591a12020072f8eee85d25f7a3d2bfba5cf2cdc6438e41a3e2ca009c498b53844e8d3fda4238ed15e498d18bb36ff5bc3019ce8cc5680305d4a9495c360a722de08e3f9b17a643d08f452acfb6e685d195654f2b1eef8f1ce3560117a609c146f615a2fe4c1c8a474c023d5b78aaf61e8984308385ce832cb19127eccca8716bbfb430d547a872fa0b48e8e2c4b73610a1cc17646033217af5d90a0bf90f9e8449900c83fa7816f9d790f543837838cc230cc30d1b3b72a569562ed80998536fa0895b1756fbc4df006326dac7d6e4f9f0cdda0953f6d39204df12cf2cd4e68b823640372282ebea6ae634238dea608bd0b3215ab28855d3580bf1925667e3afc61e9d3d406b9cd6fa8fff164052f7247ccbf4eb478e07ab0b206772547eb00001f4f3035a2167a3592090ba9a0e4a6b8fc3bb00821d8c3722e0fbe229a0a7d2823c1c5dc1ab87032778e72abc0176d86882bbb4b519414496a30a84e93ac732479bdf10cf968f37187bc966863fb56cafebff4dcb6da485b4bdf7de726f29659232a009c109d40aae398d75608cf3b0471611aa5ff1d676af3b199aca57afe502ce5fe61981569e2a66ccf37df82595aeaf91ead4af75aa0d2608543cefe9914e57dda4e7d53d67a33eaf5ff7ac28bfbfdd9c734e94774ee46dd88db2b6bdade7899fd65ec7cd7fecec58bb791eb7ee1d8e73cd3d2edc197f8c9ad7cd6b0ebf47166cfd8a72c61d79b34a1debf7d6b7ab2738e7bccd197fb4abe6fab66ad5a55f3a779468b3edbd0e90dfdd6eaf36bda6e2f9176622ff39a59ba3e09b7719ff055d82d686297c0f8a6d8319c4f30ef394f29edf30abf09ef7d0cf8a5f8fd37a27d61eabcfffacdd66c0357cfab5eb3ae7de0d3f4eab1e78eb1c18e3eafdc2b762addb114fe6e46df696fb2a0f4dd6602c748f937db5d3642dff38d14718032c93c95a80f9f0113ee9444dd6a3f5cfa77b148ccdae0be5cfce27e7b20cc6ac321306599665589069320dc957cfca7038659e674dc6f9fc3eff44134ea8b56e3dd48413b416a436039cda85d3a775ace388b9d5c2b24ccfdeb2c0045725409a53c7af297570248744f5389e352538defd3459bb38d75ae8c06dbf756da85df7089ec1db0e3c6b38bcd5883427ffc1674df6851b185be101fe9d38f688e3db0cbf4e1461725ec112887c07619e8fe333a7471cf7e837fbc3f1f975da0fa8c9b45513ecb9e6d62b8e67311699f8f0a676b5030101010101edd8b1433261f93281fb9cf3ca55eeb34d9d533016ba091384cdad6b400a996e7d03d2d541e59cc63be07ce3bc721cc7e18419c7370e83280f460f28aa721cc7d5ef7395f7b4076e393e37299b518e9cb0fa84f5c78331eca4d6b1ee811e6aca52d38705163af04a548baa515d529d542955a803efe91ada0a5481aa921a245631a7f07d3d5c02f8fb5ca5a42101525265391368d727e6ad84766131c787ccefc037b1870fb8a773c2fad32e25ee615b815641f8ba8ddfd47ae3f6baad59f236ee59037a4dc95b173f89f489287c3e643e7614ea8fcfd4b41f06f4286ede475da57594a6122e9c40134995e5889bc8025019a07655dbe46d1ecb67dc36759b6de242f71ee1083c78545193b973a26b322964ba5bd790c894444950be4ce04206c444b1270cab92204d76b18b15c8c6b32653ca15483bf2b363478df6b3c3a77362c6f195f7509c503e8f50fe4d88593b7484b2cc0cd2649aac6ab230d75f85aa70c87c2c96d0a375136c4a544289a0a8f4d94ec458cc3b33b1c7313ccd3d4a55a0af1caed1f887c3bd1cd47150a7a1741e05693e9750a9e292d6b1cf78f5a979de164e501461f3e9db08f2379fa008558521ed42b9e6d38b7cb5c8f0345e23f39acf79af066eee6d61ed7187eecc803e2795aacdfbb16c3e47763eadfdc19a9999991155309f25e6cd51d81c55addd50b83d941f8846a29191278d3e39d279348dbc29e904fa9d0d3b057d8a2a70946b5e02d137504385934a8f9aefd0d73c6a1dcdbdd5c2da34ea51f31513bce3d3add7294a9f5f669876d2ae3901aac3b481629db586702f991f7041485eeb1f1e3952975b8a82aa0861217ab004220fba94946e1a53bbf23432d27c52d17c6a5f5e73ef7e74b3af5dd734dfc4137ad47a9c3329c4a75d34e552a6664e08f3a4aa6ac4854a53964955da95271052bbf254f29a6b2a5c25ed5abd0a874c683ca5396a02a940fd3e8d78023701ea5d946a9c163cf7a4371d63d9ef790ace0212e4ab25b58c5abc97a597f75c5ecf2b23b74d49e9e5979756b2f8f2de046f8f33d7bfe236efbc5eff8a3906bf7957c5ea517369250ba5cf21bce73604cfe5f9089ed499ee7d9e98593f5b36d80d9c5bef794bd957f62d8113658fd72fd20852087dcf4b681775cfdbc4da9a9b7bce6ad775cf658ea7bde1c99cbc1dbdbd41f1d861bbc198bb7783e2c18ad9a6164bcc23f6dbba31ea567487a445f4e5c3adf70f7df910ed2c5f1dcf58479c511daa636753ca9b1b9f237de9b47cc6255db94e38a58e0f6e4cb27d339c4f8b78b8262382b2b396b50b25a66e7c8a2ab79a751e7e234ed16b9475afc7949b2f311e3ec31a1ea208d36ffc46ac28efa1a8e945ba44b79e6d186fc9b0f16ccfa8b156cc5b32281f3e6a9c87a3aa0af7d843d6fb6156cbc673acf66156cbc6b55b7b941bca51dd3f6c6a6a6a6ac41b1751b871d6ac5fe6b45fb5a951dd8cda86592d9beda34bf3e94c21e7068542b9ca270f478937cef29ea64b3d5a47dd8819255aa75c3c4a83c0791443967b3ceae43159218f1ca7a7aa4d8deac64728a501988cee23fc71d8e81e760367ca3429538f764e8f4acad42e144b0c41ca717a4d28e5a80aa51c2f580291bfb911658f284739eb83da8309ce55690ba6a74c4c4ea4952d666fbfcc9b7923eef43845ba454e5b694b0d554382966390e7a750356e673eed9aa24b29e2175d4ad15161ac46dce91125d22d3d5a2caf0abbb4b28593e64cedca63987665bae4449bda555b9eefcc2ae596d5630f57854c7cb818ce86cce721522e4b237dd51faa4367c48cadaf44eb43a45bda95abac7fe8abd5c2788237620fa73ea90af728e5ad33f1e1b27b316eb5dcc7b1ca86889072e3f35574438ffec665e39b1b94dff8d7aee95edbc5c36f7a3aec7183dba3b5aff1e5c329137d8de20583719c164d106cdc3a8d228518215d3ddcdea7c14fe3b9f5342ee56b933a374ee33d422ac71bb709a50c72e33c70fec6c71c7fe33ec21b9f375e79784fd784b9c66d3ab8c9717f2b5c347dcaa723fd747f2f853df0c0030fb40e3a700f7272341b158f9b9b9b0a4a20e537a20ae6f310f374140a36a2cf4d49bb7ab8f52da85f3edc7a0ebf6112431b1f3a4e3d5af7a083b0653d5aabaa2f8cd548e5411ed51bde7ae6a949de7adea9b2ea4295d9d854994d95d9800df0cfa9396e437bf4f09bb0f67873e3de2e94bf4c0ab13c44d9e34dcd0af351ae82fad35748fd1443cf76268114cab1f7340e1eeeba0963459b7036e58433ccf679bb5062b671540a05d187ccefe136620f1f30ed11522ee1e843a7cae84b47b463481debd63e058d637c8868bcb5b319c4da2f2410ebb9ce6edcdef88ddf78ad9fdf501e22a0bc870f38b794de8a28a07cc8fc94a340675ce49267cbf4d6c67b84f34b8f2dcffdd313028f92288a60df461c7bb437552655aa195f396562d950a69b10c7c3704798e7efe021dad90583a7e338e8d9ceec4c0a99cfd42e1cb76ebfb46bea68bd0e97adc3c3ead3e34f8fb6036737331ceb5546c6f429811b9f5e67ede2e1d6ed9265baf169e33761b511738dd784f277e0cf76d6b29e23cc1f8e30b768c2136c3376f6d657b93e0e87cc4f89ad0fc29da2e9166fdb11252236ac890402b4026205c40a08f0225131c0fb616154c1fb71e1c99c96215127ab4475a62b29cd664c734ef1a756a00fe3d4236d9240a8046afbf448eb8f156d52fbfc3cf5f6f96924edd24aa0febed393335650569155fef64859b75de23067d7be1e208032c8f49e93480e4f3590f1a50c26908c01cb334fa339eb0b9be1f384f2791695f179067dfb2c7d3b0b5288fc124cef122aacd798f2edc6df6e5222cc154d987e189a02cbf889001d91899245173048a101d63ced8373bd2ee08f4bf5f9e9d489be6a119d8ec1702d4162d2012d3262e2ad4d7aeb9ef363c7566942c953bf4288af316b048d826982c0998281737dee0b4c0304deba4c679d319d6608ac1283573354f9e92b24d3596470aed3594e38483afdbc768a799bf4c65befadb5d6da1a14f4d36b501502c2b9de24f84a8159ac2db227e21306df0de6134c4cb78d8a8a8a6812464646950a12927dd29ee60409e9b3c9d9ae242561a256911b51ca73a370578a204fe893791d144e4eb4e6eb25616b0496521c296a57fe903c75fa216957fe649fac5d3f7da9ce744b5f1f50cac8ecfab5d634b0f6ab25dc5e5b02696b451662009354a788c7c990038803ceb7d28a59a8478c8151318629aa6882d11b85438273fd9ae9328d34b42082c92735ac28c1a4df1b21d84d14181238df9fac27720a238a90c1e41530a99208f039da98a4a459c2f9cea65f247d5518a5b5d64a29a54568b705e224b556b356b3b6d65ae79c937bdca239f4b8019e64bce504d671394a29edeeee246ea5f47edd73ce595b522a29a5d4c234b7bea359211b387753f31ada9f09d3441a3c9993eb4f8de3446dfe24d1b10f058a85222d94998f23f9b2409dd8f2f1d94291393e5b28df9e568d5a2d8c9b7c4a396adeee69de9b675592bf4d9c735cc36a137d59200bd40464819aa64b70851813cf2d40619ec8420c609df774e6b2ce919f6e8168f763671b51aa5b76d3296bb5b581249e4f449fbfef3aa990d708f03a15abf4e538e7a4c1a97a295bebcbca1bb3a6b52d509dd3ce7e4a87820da097508499c9d94b28c410f114480aa147463aba601fe9ab0be3c6182c32d86c6201ea8a155200c9fa480644b56cb1a28a0c367d0ad1974c92c960b3dda002d442f4d546d35bd63faba41b96644df4d549494e4c4af4d5b3d94b583735fdf449344b405fd4a809842c518e60d34d80491805a23ab307771bfd04b2b3ba85d7d56a74db366c851356b0f8cdbf760de037f76ddb729ae763d974d9f2395e5a2903c68b2d6af082a5d79756ca28b59555162f15c2a4e1cc15b8dd862b2abaf10a0b96e29c1207b3e79cf36bf90a0b6e1a30d27b686fbeac276800fd3b834c6fc06a1279edae9a22d8bf1248871d4e568fb78b3480f5b2f3a4a6fa7aac3d4ad6eaa595325c6a5e5e31b265ce99adb763b93ac299f5b3e71ca2e8a994b329f5588725a05134512d987a2490a01e6513c94ee50024403d4ae93df73ea0656fc5117aa4def3a9211d37c104a6dad434143ace5bd0b9ea56ceb59a2b4ea013e55b401ec1a415f3cecb59c40de1c99c8c7f08f0cd743e2ce3454254557171b83178b0d0a83868a2e039596fe0e91df77db5d4c9f2625bf25afb6a359a1dac1dbc7a0ffc5029568d9c416e1ee73d6c673e2f38df2f0be66a53e71e7c6fcef980f9f3b99fe25525e17c2dfd2a0ee0bd1bbed77d3354d9f7be6f88056da514047b4a8ef58465f79c7d8e7bafc394cb146b48c8757adf9c29b4aad57e93f7bed771120459f05202914efb3a35fecb049eab25b09c4be634d2658a29ed9a330bcad6a673d24dd3a20ccd6673ce2237340d088d8a396717271e8abbd588e84a491b7561f32333298501ac87e69cb8262e4c8f9a8812929eb926291bd5b03b01fae29aeec549568ed6b5b0b926fae2a48e0d5bb2efa66f173b50a79c54d69eb45619a29a369fd994c2d0b40b5aa5a8f61e21e7d4364aefc5b8d5721f4715151bea6bbb346fd6497fa3f3d32c2a1399f3723fb29cd699b3199e79ab3567d669756a9deb282da2f372e0d51a04b5f403ab0029a55c5039c8906eabdc64adb01a7ef806580aa9a4c0d2430b648bda02b56b4e60e92da577426a7832e7b26e91cbcd64913446d2099f172ebb7b319e413ea4ce9fc6a0712fc6dee395e2791d98dae13a6fce3eafe3ae0a0b28b67ae4c468a15293c928d32193c95e90af7bc539c4ff30759835c8e07d29f3edb27d907c3b9814cfce00461d0f75dc3bc5dc377492ce731b79cff37cfacff34403bcf7c3d86bb53a776e9cb29d9da7818267124819dcfda52f4c25139c42ad66bc98138c9be433929b499d31e48bf31727e71839d1b8efac2ef0f4ec1f0a447d1eaae350774369165551b46701503805c61ac7174f4a9ff40524d8b730ad653f3d22a140c3ae68555360e90dabe10936b7b2b6b22aab0f69fdf5964cf62d2d09e15bf7b42927456da29440aaed69abb5324401c9a5af6f9de8d1b757f7b44faeccc0216051f7c49c1870ce413281bec61e81eebcb06c7d314158fabad3b29139638720a753f8c08a9b0df004ab5bb001f3b515c4e0e745157aec09c27c2a73a89db55a73ced1fb521d25b077c3a8b7f03d9dded9c0f2732b962381708167fbe079b26badd652960b98f39c734df08fd61c1adedad3d87b3aa75d2807fdf3ba9477cb06f4d5aaae5cae1cb0f29eee52989589bcca3dfae108592e41957b54ac19413ef506c8a7620d8da3405da3a9751566fc2b6785793ecb694249fb6359d60cf68a7d1566151e7b0f5585b8471952d7422ab2f65887502bfef85ed64e0a9c3ff8f60f1860019e106a0955b49d109eccc9a9a2edfb2e087ea8540ac4df55a9524b292fa9d9a76630b9fc74d91dd10ea9c342bba44ea9e6d85ccd7c33b576429da705b68944602d262e38c1ae68856c10d5e92d2402d360c03945249752433d36ada1d406f58b6b12729222c2f26d87be2d157b6491ac2ba7869c74cd9dd0bb7227d4d177d16f39ce1d33f8e99f74559f73d2f8fca441f2d3a7cf1d403fab745197f549d3c4cfe93b98264bc993261aeee2148ebba55039be1b360dd6e04209b6853661c0bcd677c34e82e20b2798a7e3bbe1006c6043b6e3bbe107537610826da1847935dfc6c20b826057bc54c7e6bbdfe6cdac6ebe2d2402f33cd01281799d1676414bbebd7190633b53d33c2a09e806492022877483a4103b6512c80a8cd5397fe884baa1d669171a0a8104d27289da212d045288fcac0d92610f58a318f3f09854916106dbc29a22604502b832e790946c17c7755ded681fe139403e95d75a2ebc3e5d52df42222f35ea967a0fada96fbe04671694669e11e8eb6b9e49e9ac736c1d4a7dce397b7adce9970a94eb6cb779e75e9889bc576badd5af57bf0e3ae7447e4efc6d5fe7b3732dcc2c7ce73dd4ebba5bab73e2b853ada5b5d6b1e69d70a49556bfde56fb1d5be92d12f4b5485271e707538a652a88596ef9985ec39aa60a7bd6af0fb48e75cd8609c27cad067d1f08410952d0dc02edfadc3a03fa85dd7eaf290ba15ff3873a7d617102511deb188b9585eb1e0d59f09b0f71807cea7d0e8699887b538441d15fcfd451298ec873e110fb29b1da30837c8e0a7327fde7dbe73d14fcc46fcae84b7ab30be77bbd3d6a200db2f933b2bf150609420d9241a6cb1c8cbcecc0c7088a0c4328312981491baadc9084448733bac0f214aa620333828470420664c8797482242f1871831f6a304a029667d2b3f04445941bac74896209589e4a532416926e21ab14e957ba942dc9f3d277e408d23f207dfa4821f5a509781e05674105e926b48b6af1d27ba48fd2b12c92c96435c8304732990c897439c4dbceadb77c87b7ee79fcf1f6839ea4b7d762915b3e60c532c92fee24c45b1cf607090ecf53c914b2f2760ebd2572f2764ab176569958deb6acf5390b43c3db251d9e27d39719a66561ded2d8fe41a264c95b6f21283a3ef7d05bdb4ede4a796bbd8f7278fbb9abe446c2f2b6b7f46c8db728ebb93bccdb76fa725480573909604cc5d4c4af56abd50ac757ab2f527ef5ad8c58d9f86aa55ab95cad7cd5060e9c8882892ec818b372e2c409105a7a38b2058995ab58afb279d58f313e7ff0aa2d5ec5c5abfc9231e5556508bdca8c23af52f98e19595ee5238031158f8d8d8d8d4b262cdec61b00c66c70588f63f3384f3c0e8e18621e670c2f8f234694c77111c0184e0c8cad6ed458fa1b670018bbb938be26cad7a8bca666e535355e903166062e5e5889620b58cd54238991262d638e5ca106155fe32bd6afb2cdafd238fad56a95c618bf5263e857be0030b672954aa552390c8ca91e8cd588f1e23fffbecf737eb4cbc63fdf614387ff707c45dfe7277c37dff77d5f8d7f1f8feffbbe0fc7bfcfbfeffbbeef63238d7777ef01637e71707070705c01600c87071b509e8727008cf1b835353535358e00207f03e66f3c043076a352a9542a954aa5523908604c65c5db8899e16d5c0818b3b9956eb11273e4577e0030b6e201635f18265f2b94afee393fc240f95a8b4c68d7caab9f2097c2207dad2a254bbe7af93a9db86a8d579f58e696af5cbe963942060b143460e9426691bd01c30c435869830a8cc8cf91197c9106132f2015c1b24462e9500485173253a80cc1b23c92559967d157afe1e26b6a6abc55e32f3056e32a6fa954619c7895bbc0986ab55aad56abd56ab5720380b155ce7f3c35ccd07f7ebf303dfce71f80b1af0060accefc77aca1c6ef700280b11d3a74e8d0a1c37774e8f0305ec7949dff7c0060ecbbdb1a447cf51d30563d0818d3c1d3f2e2e85b2e0030d6ea420a8db76be528cff981f2e2c9a35045a0548e427d2814ca3f70c51723311cf142880825d3e842250c12228ec09205e5afcfac2e923ce8828dff1c0818fb5a2a95b7547e555d44f12aff01c654ee44ea87fcc0ca57bf2e7c755ce5abb77cb4d285ec2b9725be7a00c058e5915a0c71431128b844a961cc8a885f9df1c5144bda68120a0307d88a4b12bf720080b195eb8031548e9993cfe12318cb7159de625567b158bec3f2cbdae1595e98f11f01fe731f60ecf3008c55ef008cb1787064f1383c078ce1b8ac9f79f233339e3333e33bfd9a403299ec086ce68717cc989521660c1b60335a8c82a490624411266019ff8ccb277e466af19965f3f5470e41e7b3ac72460d3bce78f2d55b608a7c750793c5571f85f8ea3b6844f1d57b9a72f8ea3dc05845eaccf3dfe7324ba51c4e44d080c549163328f2f39dff5c7edfe7e43f7723ff79cf91ffc2e880c49536c83879c202f69dd1c27f2e82b16f1682b1191a30c8781a773046e3858ba7a9a7433c75cfa1be43bff0798aa2730ca5fe0115a55f90f1748692a12ba7339ee712a22d9e3a792ac4e789d4aeead42716caf23cb73c551a40114dac70f1a4882d5a60b406a52c57e45006cc124ec032965306a4c90a19182423ac48817951450ba1209af890052c4f23306078965796e38031566d226b05a385afb53a0f3056abf8992f72f819bf0163334af8500324dc001a028827b08c7fb56af22b3396c6a8024b0c5ed4e802b6fa82865fb90d185bed789597325ee535604c753f9799f5df57f3df376be33f2f48ffcdbcf8cf7780b10f3f8a7b94d3a3fcd2570086d8e8a2240a192ce1039b61f128d7e12d304667b80cd063cf01c630992a0f8293060f8220ca413007353c38035622d04d00fd0490822ec11c3c3da8022b13f0c2409f41a0cb3c85884c5922090d969c601ae20d18e845063e7431c6491642b08c1f9481794187a5229834c10496e5112acc0f58e040e9c91169802e68f265c90f42489cfc00baa4af00741163f4f4440d2bba007d4706f1b5d61c54f1d571b49af8cceaf2d946878c98a76d1cf114cb53a70163d4bfeffbfc731618fb7a3c8a4c0f8ff21930868a8112e9534e577cca55602c75c1bcb7bd17c47b30cf47cf777864f05e089e9b7002e879ae8295f776f07c02b54b8987f2a6d07b6480926278328214260bcc2b23c30e425c0145861e60191f49a7f7ac38e9f028946330869264ae50a2063146639079025681f8eab77a11868e07c53ce828300612a15286873596ba7029c20936f43b9230b9e1872f6080a10618fda2ccfb37468afffc03631f18f304f09d4b37bef30e8c75177cce5bedaa3f3ce73efed8e19c478d109e83f226d0d78571cf719c5ffae27152c68c2e4a181515c1b8304ed020071e7c68c18a1d6032cba3e7241213614cf1d56bbb7c7c75ee46f19995431d8b79baa4e5a9df18d7f431bba447ea23649edf7c5a31051e29d4e3ac414ae04a8355a5c18a009f57356855835679b55a2dad5647dae75592cf2bd9f47975e4f30a4989234e5a34518394187a955742df2ba20cb45143194a487c710325b0bc3a32620a19d9186060f102cb2b312a312a9dcf2a27954aa5ca2aa6efdbaec2d2afeaa3daf2edaa2b2a1a3eab2693cfaa1e3eab905860d43484041a4b68b102cb2a1e25cad0d045173184aac0b22aab84be55420c2d51250c27b4e882042cab8cbe55475eb0173c7ec64b780987f9764c0596f219cb373e63233e63a46fc749780be682976040465cb81206c992256019bb8ee84209175dc46872029671c642df3863a36f3c450699f2f49dca29a6ef14354aa5b8a496be3d85a55f34a975a6b7a7aef83ea7728a87cfa99c42caa91982948cc40c214142039653a99c324ac1922a8a986a785a4207584ed11d50740754f81945855054084585504edf8ef2829a427c46fd7c4615a150285446fd7ca3866a28f2451644450f38806594d0378a1e01e911d03f831409f805c94fcf60986f0765a00f199fc1221004c10c0a7d8344626410314fdf4ead333d7f5fbebf353e99c6e76ffa7cfe66f0f9fbbe6ff63181c1431351a87439c30d58fe7ebebffc097d7ff933fafe8eb2c820599ebebd248fe9dbeb24cfe3e261e9572fb5b7e5db3dd91b5e93cf1e119fbdd9670f297b41404368a1618d1cc8b2c0b2973da16f2f7b46dfded1946e4ad7fadc1975469d51d719e9baaeebba6e07196487a76f4e8813e284384e06f533e71354c4711c37fb86e1baf1303c7ddfbef75e1aee0d9f2f189f2f129fefbdb37c7fac88210743376021032260f95ea2efa122486ab860c50917b07c8d6270e1a88a29d45842062cdfa3336490339ebeb7a6ad696bda9cbe7dc3619b417cdea4199fb7213e6fdbb6e5ede77b038a011136a8c187256568f981e54de87bcb9b91511833a254b1a288953260793bba42bb42d39434254d49d384e83e6b9aa669daec897d62bdcfb6c816d9226badad5245667d8ef86cad259a30c820b3270c3fbb96993f95e9a7d6caa52e7d7bc5d2afb924d72ddf5e6557be73f5c9354806317cae483e7c11439227597061052cd79fef0ae4c2d3122764084262062860958886369e7491c61145389002ab62cc0c47302591e4852cb05c8f5240c53c65ca02da44653e414594d219159f2913fd020119c40d99e5a91b969b4b63e957087a8b017e82600019647a9e6e7cf3b43717df9fdba7831ae81b06323b160787b58ec10778a0de8b71ab3537279d42f1f8703c0ef86dad83cb6c5a2d0e8d6667d4f1b0a503ab9a0592887c75de89b3c8a99827131e6e48826a7156349b45334f5480a557aa92341204ebf26f18820fcc529860096e688109249d6042f7943042083c3564418ff7a9bf40baa838a4471528a5b7a7ec0eb5d53a13524ed257373271aad3a49b38be7b797674421dac1458dcdd348b43e725eabbbb69b6e29048473c79d474a8ea148b8a7c7c9eb3a2f6168c8e1cfd0c26120edf425988e03d15592081b43b4d0e8ca1beafedfa54429d04c251a6e3faaa9c6c55cb524a29a594524a0effec26a5b4fb5a4d3aa54dabd534b9c94d6e72939bd4a4ada0bd5ce7e9a03eaf4ecca695521a84d5b6dbdee898bad4a9d425a99c9fac4d34afbbdb5f2adb53b5b0c417a75050e9504d9942c9f7561e0acc81a543d45a6b9dad16c6d28ad95a6ba7b5d65a2b42b5a20b6c10563ea1bda54dbde9cfcf3dc6e8a64e29ed6e9fb52705a2a86eb789192443fa631c00b8c2fc9d1d6dbb07005798cf2386bb9d57b534413483b61a5630f65168e1f19a107d794452a7fd5e8c85fe096b1105911015534a5f10e990405a5f4fe95554156da59cf739211222121aea97e784e80bae44d587af8863a25e6eabb572439748d3aaf57343f5524a2bf5aa795aad5ad5aad68303db52d3b730d0aa38e79c738aa1183090568dc26f276f5a6aa3694deb64611ace2eecd7442daca0f64d3a289aa7699a56692a89263f9ad10c5739e5565154b476bb120ace9accfb917948bc1fefe701ac17c25cd26c5aad5f4dd65aeba4b5d65a899e8be2c955cd0438976f8196c1ba9e7914ec76439f223c304edcfc7a0fbda1165ee7c22ef4297261d77f78609c7bb55a8d7eadab8e03356fd3a8773f09e342cd2f0d57687df74d5002dba320b5c2f65444c16e9ab76073bc614df3c231f0744ba4f6d644db6af77dde6dfe795fe77d1cf7b33201feab82fbf77edff711f51537f7baadca64b21ac66d2e0acf6a3389cc9379324fd65346376fab7793636d323bae89d4421e3f659a97b6295995a410b749542d5e6a3fdd5167d421f5d8de4d61445f5d1252bbbad914d2d581318d26e0e339e71d765ee4c2bc8ea99bb5ab73eac090ae2e07e1342275dabb7026910368ef98e86b67caba1c48d77ca1639a4aa60ddf3ee7cf0e7d7937c824d24bf2edf1f0edf5697a3f14b432197d79b2eac9a6ac5d9eacc7fa06fe3ccf6712f9f2bc7d1af13421362a5e4fa1b4f73aeedecfabe2b58ef347c13abd29289eac53d9e36e25c13ac5b24e8b37e7ff4820d4d62a6dad545aafb536a07ac5f1745299e3a9d8493dda2d56a3b5e5e30e0f1228428ea7a26d0a3384b4642ce34249dbd0156a92401a5731444e3610c9d9281c5fa9bd169800511291937ed1212126fcb58ab422e80fa594524a6d108e9f74db28a5500871160d6eb6bafe48211473692df997718727c757d1ce7ab44bad8407c98ee63396f1305f80807a6c2595822434bd4c333704511c3fc1586dd51c0ce8d182be97f48b2aa14060806a8b53d25ebae3588c53f56ef4b66bca91aa9e4c0d65bdd6fa445a51de76d5b1c7990a82e829ed92acd5b4ed72db7dc207cea98aeffad44e5686333da23aa06d01d3a31ebbbe8133e5388e13c24eee6e42d0b661c342b7cb6d5749887b6abaf3a84eb15615be72de2ea58ee65d398ee39a50716ecfddad49d0769308a1c7991c70e6f9a63f3d3ab50b07e7452eacf38c7fb4248d8baa21511de9d2947ad4c6489d762ed49ee400da3524fad26654a7bdb5a61eb9902e6d0dcdd3c6480ac5373d924fc8d7a6a4f32694e914df4a3ee851c94e370b90379fadc8b76fb23640d32ca44e3fc99453537b31fb3e3a22c19d68b77a6b02fc5a137db5d4b156054f9bdad572b4de5da83dc9d718f972aad32d8fb31baa8ab30b7f2e8c13893c7ac40374039e9ee9d15684c54eb7965609d4d05a2b64b5af9328a594523ac35a654229b5d5bd1d3da8604a2b15eb50b5343564c578322753a0efdb6934cf810fa0aef9d5ee262f9571e1b2b4f46576999f62cf34db49edc2612b05aa4f9ea43c74736a3fe871e6b48b32f97cfa8feb93ebbcce1351e8bc76e28f1d2e0fa92f5ba7d63076c4d323ddf95ae6d059ab76ad6356b8827c7a7a7ae3ababa0fe1551d844d0dc829bf843dce971ce1c7ca36c3102223750a20a2565b4604450935a05182ede68034b1958c86861461211464c68204619546c408d2b4b1871449719d090a34b154e599e00c38a922c537650020d32a6f039a30809a6d308b48c4d92d278428d21a236b0b0d0a5880e59b6d4c0098b1108118416306c20f1e4f3c21420a892440e3a1cd1258619a638a306305811050e5dbea8521421838b37c448a282a549137106155a64b0c14b17522061c486293dfc60858731b04461021ab64c41220a1c94b8300489a7309c4872440c512a12ea061db4300289a21d86461084103d249948d0452558a206295e80ca54114412287650220a9331538049625602ca850444dc600a15a02d455edeb8b20366498a2c080a258848b08217605a50624c0f489ca856522d8011c3125d8a08228911c83370808030a3081fac8011a60a91236180d1428c2e46907e20010696a0d480072c42a041a53ea105b14412d2154cf040c50888467000e95204ed840492098aeee1a7cbe9777a0b8b58111ab428356da14110d5898fc00f3f465cc001104f52f440f1e488238a1083861bc0a6b7ee11027c5fe87c49e335128942bc84abb4e074b7b48ec3833a15bb698ca05925f168649d15787a7777b3b2c0d3bbbbbb93c1bb3160e9de6ca51fd228598624e3bdd47e29e6157628c132092cfdca8f931fd22fadff1489bc0549e8cf177839c3506505966e432def75de07a25278d36cadb46713342d60e921a594524aa978a9cea452632981b6a250db03c2af4e1e17b8bebcf25344c918578cb4f1794c4ac3c347be89b16d5bddaa6b229316abcb1eb5276aad95fa7c265464210631e8ee31530ab1ff06e65e5e3182e4a5905bad94519a52c836bdb6cb2b459aa614527b64c1530aa115906b44513a430c19a038f1054c4a1d29a47b64c1530a993db2607ae59d77bbb7ab762d0a9559f8761b78d4a91878beb4c2c5e83f252cdd7b630c972db6262c545e74c1378b2e30288517587aeae595224a7cbe345ce02b45a6f415de3ca2aa5e5e29326446edd9b56bad52a7bd2211ea51e6b4cbfea0b56bcd914d5508d3dd4d810e55e8714a2729354dd33a05d64b294a287ee4509752cacb72ea529453f0dca9c12bb264ce090393ec9e934a4afb004d577cc2b0baa90c180fe8ca0b495760f051d13e82ca2bbd25951288d604cd12f84e6f8595150596be61819bc02c29300b09e7f17fb4e49dde61093d4aff7e79c5a7c9b38e3095914104be57ccc01b15b505fc5d21f3526578a3010bacc2c18c4e63961143917b6f1b583bfae00abe5786c01b1541e0ef8a0e035f2f530ddc91d157b8cca5c9066d03afa4341bf85ea14b2dc4f7f2ca8f0d57bcdeaad5e8f6f2ca8f0f9f731a8c9784bb5a8de27b6fbb81b5ffa0057cafacf144059d017f2fad3c5d417180ef4b2b4f5edec056603a03bcd1409bc02a1ca8c0f7cab401e63e633138f5940cb6699de9b37b46c02dbf62048dcf6352118f3b8f1fdd16cf45fafab3bf7e5b4d34a2d9a884acf7a44d0d11d98c040002001315002028140c080443c150308b5355ec3d140010789248665e1d89a45914c428658c31c6184200000090191920198940007f6f21895ecbcc46d3cf5b489557de69cbc3c44ad1b182b1cc51440e66d7a51017bb7d4008e6f5bd9baae9779099aa35f75dec4e4e1f88f1129378c1d661e18794e0aa3716674eeb7ce1fbf420c255e390ebec55538dc8c55b5d42c3138767dd2e1b65898525a84df9d8e71f095cc6c92e12396c2179d3172c142c0e2b5a4c952527f29231308e9bcc08cbcd3acee9261487cfd2da56434859f0b7084b43ed82b067bb92e50e80ba1fc25a08db83fa34a443bcdbf84081cdc07908ff89902a9dabdf0f912c11ac1eb4ead06193d2bb9943078056852f230925aaea927d05c1e5cd49abe74fc4bb7661926a188b0fe3a2b2a85d0c66ad9e5f9bc8d235b936a0e0ee4f316afdf9c2c25374393a83220c5e81670b8a852759729dc765b675b31034e26d190bce86944cd438fe9f9df823f536bba2957096cbed4358955bfa494eb666d8260bc8f0003490b9f2e796ced3e71e22cc535b9b6144d67fe1b40cea680e05b6fc4a30df386b02001d86cc1de11d1a9fa33315fe5ffbd0f2c2bf445c75ecedac5aa9d3ff374b1ac1f2a1ebfe156002f2965ee02a0c8be3e6ed68d5af4c169714ba2bacdf9c0e325ae86c14c3168c1f6206f32c9db222a6488a870cb99fceb5938d9cf23875427521f45d171e5ad23e80ea31f928050c7e690736ae7089dbe3ee93f6322e77f63c5a20220308c830542fb46d6e441da7515208935e7b50ef28105505a07d3cd9ccc287aa676070c7055c8f9b2fc2879101872a6e690fd703cd00017ae63cc096c2a7f26fe00d9932296795db71e38efac4cbad590f81d2dfed8a51ec4c4e1d5b2193baaf2601113303fb196c139517c2ca32709b152f447b43235691b2e38110ee432a4771d4061842d81f4339e78c760dcc45b71ede2e632730441abbf0b50fd55ae757ce9c23b29e40d461846b498de0b46acf6ba5a8f27afab7a27a3a34d86c2d5d3b4621fc080474d9c54f3c4df308cf50283ba00f8f4cf585dad841e44ba429ba871d09f906fc5c4fbc79c5647c6981627ca96bbbc7ec250b22adfa6df9ecfcaf09c3dc11984da08c7b8b2ddf4627fd17b4d4d6185f03c93aac012a7feb8ef002bc1673b16183a0e79652df7531c9cb9901ae3d7653a988928d3eb4c28a51a011255803b382132041337be2bab5c52f06b5a63877985715284a76c0001d3e79fcee073f40f3ad01b71ce584ef08271e71b8414951f41db5418c067fc2e16fda60c917963ea8a4e3c065660b556f9bb32b08b25e187271ffef16bdd7befd0e04fb217978097553a77004b0043fcc044652e0debdbfc4f00ff10f8bb8c821b628c0a74a0e094da19e4a5cccb51bede4ae92ace4bc4c5ec793b71f53e5129afded961aa7528fd593d81182b5ee66db7331d14bf3b1a58c94f53728325419020e94c90049671b05c8ab379f88acc53840ce62fa2822a4048a6357d6d81913b072349e876f12589b11645f50f2b324697409ed474229a779bec5550490e3a30fabef7cfedf8642ecbfa46b4c87c5e8afd80204d9c4a974af1aac43f0f73a28e821dbd187a82e34b23f29467030dccf8ae60d7dc1f9a39b1b4927934955886366cbbe19d547c2b3765795ce27be71fd59b4e04a16fce5a1fdebd10fcf40dae713eaaa1205832b2a06134153fa9c26d6e64a37b3d5d4bcd460794cac36430bc80a51a85ac087655e798ec58fcec801ded5efb37fd91a608e969bad9facff42ebc12066c65d362008e54800a0d970f6d8291c1102650b25fbf454fc610e7b4760dde2d0ac4306a626cc8fb2b998b82040f88824488a2f7211f07938c6e20f860bc9d50aa0b9753587ba30975d3770150a7c97ac74bb52a75125244d4795632cc03e4950b59623518e99c1580fc44908e60bcbbb4e54dcf513a4690758295400c0c3fc5f0be2612cbf054e713efe3092167fa5f8ab65e07dc861426359d18d1723a88b447c7849f35483814d0b992ecb78b29d6407d049dbe96bcb1a4551170895cc2888148da33dc313081e084132bb03fb0155ec472a94ba3c52740aff10eab23c14cfcef3843614ec532b3acb79e57a328c41aa1c395b400657492ae3d28a929e90cb589b25ba09dc8bf0ca9e9236117c5f01a71e245fc22c2b704004368c01deaf1eb2f97d2345c3fd2e4d61418d0cf4fe577cc12d43e921cda1f1a3abc5fc6fcf89a40ab11a308a6d53d993a330c0a19c5311d6a845c151deca1690200a6d2c82a833ffd8fc86243f7a7d3f54209a905328c8a42a2ee9f147fd15fc7be772907526c06961e38a360fc2dcfd62c1850c5487db3a56890e5b835452d75bbc717e673700a4a03ec801fc063478ceb014fdd4c9c3cb6c3c42e9ad5b4a5ced7cfb46bf0441ea437ea456dc03021589bebdbc9942f62674fdc842ed4a0071bc2a5da5bc457af6632ad3c6a000a6a7cd38419c855aa6d8d97d5d155a434b5e83bca7ad5055a0bbfdcfa5b3e51c4c22480285caf9bd82881a4c625d78b4a1c3ee03c9fee7d3011552cc25844585ff9065421430804361b2e4bbcff5bad04e1a85a0053bf0a9f226ae26f6222451e744df2d117b960fa517d3a861d7fdf44db5ea4b7e02f27acaeb3a4a97f12103859a0160e1698454e4556aeb6ec102246c36d394ba5e1e1f9d83416bdb1eda6ef0105de8512d71e34b2fc303a162e9bfb0f960bca02707db3ca4d0477af8ddaeded5d3d6dd451736a3fa097ab5c7047f126fdbec93e216968558f222f5e2742d91b3567e71e54e8cb51ef450ec9beb74f40e2e9b7c100795c862c1ce65fb39ac481262ce35f1b074e64510c78f998e05f4277c8ad1e817c03a5d4a04d45071f076b8559eb9a928e68d55ec24cccf3501691b3760d1d779e1830ef3ebd1483195b9e95847621b0d78f6132db6a23c21201e4a664b47c64ef420eaf545cf23fab388a015ab3c0c4383034ff00891cf6431310d2383351241304404b33a357069125338109ceebb1afa9255ba992c8dcaf7ece03e61ea810f181768f9962c17d5bd94dfc832566efbc263b87dc421a029a919fb39feb3e10241a2c6cd427d85a9bc293fd1dbfd5ac62bfe283f333ebdc0eada35c257a779676d0da8e09435e7920b811054d8db02a02dc8d86cc9aabc9f107b48bbb4ba82eb1d68b0dd36a663878e4c1e59a5b261892035b71bbf7247061441f19b622b1b67cf5191e2670b8e25b5a391ac4f709e2a21285dbde21598243bb5dca17fb75925f41b61e935790ad946776e2719aa1f6f4aacd5f233181ffefb4b7501572e03e3c37579a93d18ff35b623dca8e263455c42196473c77415ada3dfa44e67a7ede8bdd028049b443549c8d69213b2121f18169c22c6ed4b4b0175d9198d37cc11fa9d616b17a7ac7cf4801d5b3beafd69a526007622b3dd7f0de3a81f1e19e4e5a6691fa6351ba8ec7eb8a6ae5c45f03761e8eebf440339ff498d502dbbdfa3913a8fa1f371275825a0761d40a9cf47820cdab853bae80e17213172f00181ff7daf8400dada4c18ce001c4c4025f8ba1ddc07257198053214164a12e4498233cd81cbef34503e6d06073d7a3ea6a75a0e806e9f89d17960a5e09654013927d7f88da7517d4bbb1076d1f99935c37c74023cedc73c9cc0460cfdcb7d34cbc3a2407ac46bc1f486238dce96623660e72ba53d5a898fd105bbe40f73cabf52777321adf6a151524a26b1f1926a03ef67404880f8873de5a1925b9b804057f730328d21cac8227d721271e9250da423463ccba439a05ed7fa1439091c67bb745b400d87e374979f2219379dd82a8dbb57fcb864d0d7d5bf15b192e936c781373a2ff33d4dff54c8abf574424be4f4439fc3d621bc94536f4351a1e11c5541b323f50fde39cc2a288dabbb5f4f73a36c017a13fa7e37656f8e0aa56f15d2d51f7d693c3c640aa74267ce33e8cb3c8b64cb2fc6381b46c3241598b98f4f20253502c871a848a878815e6a1f5aaa5408e1b45c47989ae0ccd35e5b4235a9fa2ada449d8597f89d65b65bbe9740b3550ea4980a7b83cd5b7c914e7fb11978b9d81dbed3eadd3764262b0ad76dbca36a0763a36eed0b76bf5dcc8a32f421c78b4a00665bd870a014a87c17c7d07a4ed61b3763389f8f10b59db15e5df5c17106fa97190d3ae6e6e48073b54bf548f642c2cf869764bdce8e40469c71910043b0ff044b5300905f7492736ba46835b8cab4da3a67f50b49dba639c96e740542481246110f96314532b1e1ba7a6d2d3024609d74015c708d73d846c3c49a9514937316a9c9bac6b8101534f7f9f6b6259420dd63f3ea70d7d0949c2ce0d66cf0006c074ee0807c29fe9135ee895fdcaa12006081d9c273e03d5229fabf4ac82558944ba4184ae626f8f8aefa51fba2b44a4538477656de43562e2f0341bb2dbf689ca37af9549eb5c0894a3c107e0cf6f753b882bb21ff3caa33d8d0921610d454ba2620e2bacd28d73f4e59197f29b123990f199565b3c7f43ec8f64ef863dfc59e04987e2b7da70459af68a15cbc20f041475d40586fd771198831651b2c3b2e8dd245f866a7a4477b22f0a1bc87cfe0c3c7c7284e97e54fc1896e146f613a7e405be0b4d39b3e01e50536c3a92b41cfdca5b3609be2ee195c15aefc3b958b479f77c91656e2aea29f09368df17c03dbf50e52cd4a91b1265cb8fa1de934f453f360b8d348a8c5eabd8316d2b39f81b841fc4b49ce1b56a70dad6093ef38b321635e7ff751f0445329441bd3aedfaf8f32c0f1712b6870551ccf02d0b82a70c88ffa399bba5405d5e9029bed2262df63c8d20ca2b4b07b480d3a6ba3ea93953c0bf6207ee49a71de0ea72072f66ffb3725374db68c22ecc80e635e4f20b3b264244b5d9dc55f8104bfcdc3729cb84ef675f3613448b32757b2a9d632459fc9e111ce49c8dc248a9f5f2d39626a848ddff7147508ac7f29a49428891659e325b868cbe8a66f2324e056c75e8677b7fd28b0e79e8298927249fc198f2e695ddf5ced483ef36acbb99209b04553ff71204a4e045d0a7ba2ea972b772bee42f8605ac528c43df51a493f68763f0fc0b4a0204b81413635b3a6c2059cfd3dc06f66c7cbcadc9f433f894559613bfa876cb32f64bc695a6344905b3f1fedf35a8b4d1cf103ab334c44dd91d4ccb477466f06ef7f30f3b8dd64574cbd4818db16f679b83842ed5dc84485cc1e9e75a40abdac24c6df3706643c537dd421d1114db7f4232c725bb908688e1c00d5401892596fd05eeb63890e4b1e2189774b896ca897f4edc2c664fa88a13269c8396957b9260d6759967196cec7563e1b22d84c73bee6df1484d357bad043c86c488291aba6ac76732cc99bb023fcfcff0f78ece8080ab027af79aebe87c6ac3783c0aaee4744876e3f4be1c303f91ccc4eb89678cb24f7d3827cd9b05ca74a9e9a50e047f822fa25c6eee65df609bad0fa0bfa9ae32a46711bec3231fdb74334f443a18097ddad8960fc678a59e43aaf5f52054cbc76ec2b42cdfe48b1581ef6369f72f880ccb76a477bd23d64317a67a612b4dab6dab7575cb77fc46d1652a3bd470265f8fa2407b4576698870ba20382bcb001ce4ec4b4c43710fdb6cece7926ccc0c366366954f091f496cb615556d5a1a137a15cc1a18c937bf0ff23271b64bfc874ec321904e43f961a63bde615e89762c9fea3a4c29ea0f318e59a334cc21b29ccb1a23560c1e19167d0948dbf07c2ef35e4acd5ce1dc0d3a11e5c4fbe195efc1bd895f8bf9128b04c735036d2d544c64a5333551ce4e6681d33f177e9b25da472a00977b1de5e42fb156c31f91ccc5e7f5b1c520555d12055a3efe4e810937bad9ca1263222a8577142a5c7f93ae29aa4841e290991cc7fe3dc0ef4ef8beb4cd03dd6b12118bdcea83d305f3f9ebd3b832549c025d61b649127153f486e7e8b154bb1becd613d4e8860c87a9bf37512da6cd1390afb6dbecebff0938a3040dde53c59b5e1b4f6e9542461c99b5bd8da91108d42940d2a6b5051d59bed6d127b677f4b9973d99f26af34127f2b2116979bb9c5c9e025baa1089eb1188465881925be695ece3ed9658b714076c2295f723bed3a3bfaa71a6e7dd15af68f0644217c705161bfd6780d413dfcdf3dfa92c73e7bcbbacbe08c6772c6de136e2f83513950e63384d837f215277d05847d7263388c5515c253524ece7ee2b6a008347c357d0895a3c73284aa032a33527dd5ac2ab1c1c25a445a83dcf5c19fac120aab02e0e995ac3c25b2653184bff1858a5241692610e81ed6961d1a74dbaed82d25775c9e6b78ddd7b84e6e347ac7b0aac6417276d0fb8cdcd2316ebda64c743bfc234d2e67320ece132b857e0e446b860ce7c4768800e22f49f2ba2ab08f3dd5f4269d91de9c177142819b3b972cf87d1a46f11d4115a13e5c8340724c87bb8b2eb45f3f3f5f352fd4bcfc39790fac86bfe030ed642224d03a10f44f514ddf3f9a137635844e1d75c131e7b6190aacaf3f4ea5e0aaf79445bff2afd9f3e71191ef290dea290b6d1655c7564cbfa572f8d8b4715ebe650818250a2eddcf30b506d93b99fdf6c168b3b6e629498cd28abcc6b6574d4fa5dabf172ae21fd61439d8a58d9447ddd3561fe8ba88a2d163a0e22e2175bc263361de152709aa72c76a01b7f2f5b74814cb47008a72d07b16ce6f68698301328217426c16e629461bbf60472d443ffd6935457b4c8b18af9a0679effc3d3ed87cb391b34201cf052fdea5bba42dc9a2a5044b5032a09d32b6a5038262d9f5a75b98f02e936bc5ec6c2c190bfb2558ccaddff6b56ee62cee54818181430980582123e31dbe975202360acc713ee0dbfe8b08b816c2201a735827c329cc302a3817fbcec0e4505ea8bcbfa8c9e79fbb388c2b9b7e91d3c512a97d8e482ac2dc2bccc3d74a96b220da9449ecd4f796873258e7bde67aa019fe0c104405ab9691aeb12a4c7e6fe257391a6263b5139183f14b190a19bdf4cdceb3c13924d706f35eb1ccd3c12018bc99b1793381629bdd6e5c0287aa77ff077f8df351301b6ae2e2f21e73a81d750fd5b71ecc0aea9ed8a5a46af9825958a534a259cd3fb3e486b5f799eacd25907345aceb21059022b6a840953edf4224b454b7f8fbd6313daa46051ec74ed58300cc0e2201f5fcca2cb9565712859e198059d38ac6af79910cac4b54a2a5b1155ee4e437793a29d65d566d3b9f999da86a5c315f916c175d95c4ae14cf97fbe7fd397abe149fc315d14b8bcf7d68895e5a9bc3bfee4b0b4ff2eab5fbd2d160bdfc3a2d18592a726406d36d401de45a6c5499387300a30214aa19ecac49815e9ebd7a63fa801cdad6d0fb80748a58aa73165e78b774d548766b5d572fa8f549d40aef6fa73752b4f907d7729e3e6bbaae24b92cac5257df5591189568f2b2f6a1e7c1b99bbc309fe1a72e9883d318ab92576b4f8842a02c6397bb278d0d0b596317b67f30f3574259023213628a5718326ee5209b3520c20779145ca955fc2e7857044c521afc2fd5464dda97e6eb41084cb08dcf73347eb6484d4fd05419b845265d1b27a44bad904c86fffdda81114901ef43cf2f6bfab47c2780809f89d4b21aacf4bc4b16a39e34478273a735d8f01f3b71728b6e1b73fc38a3edb951115acf7ea07cb92304b3270c99d7610301f40dbf61a73ed45ff6920b5167ed459d7b0532c04ebd184a01d9293b341c8a96cc1c801f8c1fead721144b541db2020ce9eb3bc5b5401b9947fe36740a282bf29f1c8f8297a83429ac12502707b74358afaa3bc487fb020e533d12c436af04a736559a720b7e1db587ac427bc853d802ceefabbaa302b411e03716d6ed8946daa55605f62ccbb9d7538fc7ead977adb1253f0f797cb9c4d22be7585f028ddb67c653a5d3c858b72756e75dac69aebdaff8eeaeb7b396cf6db14836f094cd1a58f0332ab342bf81af752afb2649eeba049dd486157247d262a4b8b3f0cd0e31ac945fb841ff5acc529c8e2ca5e5a52e463caec935b57c71c55c54e2015c1a4d29b76dee955c10b1378a1742a5d345284d3c125fc7fbb4269de2c0b807f31050819406985408acc28ba99a4efd79082da9b407a400b5e5dd289aaa40c6071b0360077239b62291f645934d674ad12f11ce4209bdd0a03f8c57d6d336aa033276c0130cd22ccff5ed29890101272f08bf41b022f374ecda94d92981242cdc47479a73136c922e652e38a81b96ba6b6d078ae868f4626ed442d61fd8522444959e7019f4a71dadfaed8bfb4e80e1cead015d4ab11ddf82e60a9050e8336717f5087aec804401f531ff105dde440169a6812d43e3236bd3ae34c1c4bff0a5c8f07685729fb2ab97ef0a888e04073233d953dd6d55095bcbfb345c5478381372b2e2a9fb77ad36471af6967f6f7cf3cecefe0b33f70e67081ca6c548056d9a20e9b3daaf91b3c7d21fe3654f6426b12114b173ccd3c2365a2fe09effc4519ea6c51ccc8f711ea6324154b460ae244844770297b5cec22dc177e7dedbf9c338fb3ea936afd74cc5ed3faaf5f5538235a6368494ae03d11e385d81375d2711cb6ae0e6cb268751d115aec2ae8195ada2718f4188adf61291ca99bd31dbed7562af75c24370a32a9f2a2daa2af19a99cae499fb49da2771a8631fbd0651ebdce79cbe0c4e212c50d1f2a9f1f65b6cad7a6181465a3f182a525a36c950f4dbae788be5671b946fd77ae9777101a04f5420960b54097b6222fa58e8c32b14636485943c9bb0a40a417b4a0a95635ec7b5053b7b2a1ecd1c51fe1bb60e9e150582f906f01f578360c7bd406f347a15f59f087903e044ce36d9da2a42679369c1da23fac27f0f5e14a95ca0f004de28d7e519dcf23c9d79bcfda71711601b98f200987c9d9ee6e9654c3b871e5eedd2d50c07a7daaec41abb6d849e067fe1f5cfcb96e52ce813a802d2050f806dfa59d5b7159110211c49b099a07a036c0f3bc2d7d3be823bd1c3fd4bcc16c89d94ab3d00412ffe7583d0baf4766c74f079d74c0d694a4988a45f2f0f4e45ed1c03fd669b802b89b0a1362011932d86174e428bdda65fcf975cc4ff6cd5b04e10a30dcbbbde028d257399be8a5946226c0d75c16437aa3bd6911c63bfd765d9d4b1c3f63e766dd89b3266811d241cc971f420cd02c110f683ca11b010386beee552f814fe8999269f0e1e4bf7be844c687f7dd4860ce6848fd12c7f973ae198b87d60a273e4cb2c436f8cc2e00d0161b4f94e3031388e23b60e1d30ee9ddc3a0cc33dd74fd2d71c075e33ba1f548cddc5be4d48dfb14cc75bf9ab02cad62347b4add62687c182052742486afa0ded65c1ac639f4b63cdd531ced98f53cd06943a7710d170b9f18e7e775f3a8ea5825bb2aa74c2bd26a41ce23a48f6fbee660a1c66080d90bbc948bc820cf6d815a6cc5801214ac09561062f09a78e12f87850323e442dd2cc482f82be061e0f6431575a9a2c9006d45cb0cd017e61f0b769c567af73f1ca35b77512f5afc773a16ef67c52c65e71e03e6e363ee205a2cefeadae257a00ab4c6757aa8c159eb1bc094dc6b43da2698b2a50b43ed745ef97808d71046ac50d5239ff2bf3c47ce02d0d9ee12a237334c04b3bb30cbd2d4865d3ff104baf0f324a66ef59e6ce9702da6b5a6e90befe756940f0c05473e80611577b9ed73050223c2008bc0d4e5f0102662010539fdc66bd6480404b0f3da220dd98a8a4218ca4e31fdf870bc56d71ec32f7b76c4aa5bb1ee7580944115afb0c7aba4379b11b220d6cf3f741bc5ab075accbd2b85d688d5baa8bdac1078fd9b2d06f12a7db36b565891b91497e497446af4607b53a6f4870e34e40a2e241374d73ff1b274d088a45fe88c3ffa9344367b705a8a49d8613b4ea882f07775bb886235a0c186bf8766675a7c0086ca04fd420c6362d4a1b0b52eed9258d14245dd24eb7af4994fb6a9d63d79b26c31169582e89d007a9ca10363d6c9b60016096cd687a7ec51dcc2814ac148a7fa5d3236281bc3e73205f59ea4d83c595bb8df7559671a389cd340e85d26520b869f4dc6aca0f651c97ade62fad446073d86039713072a341287c8c1bd82025038d50fd3a1661cc980c0b0aa1a18c90764762f12b76b26850b331ca98726507c0bddf1d11b7e912c21340a71cca6e1254c815726efce08f9e10d65284f0d69f68534acc9090d27f196db48ba6c3570df76e922e15f44b07a938c5e8b078028ce98b761d3830866bd806e2eb5e7e046fbbb0065310756670889019cb2719d33bc9f4c5f45ab11a94523c958a677f12f426eb99f2c169c6a38c2b545049109adf24c57c7b459be4f4a03d56f863f1bff6a7a2a8cea57c45c38c1b37f84c5b3010025d45cc70b7705bdf865bace14f001e1c8e1af41b0d8bf2ab40d972c125253f93a10ec253442e5d93ac13b4bb15cac6517d028cba72da37e5c51a77e8784539fc0b84c02122536be71745300e6ef98b2b180784000dacec7daef61cda04c26615bac9c1058b70e90a80080143b2449838701e090a34fd80d6ff5f686580a85df1898f24f0f3845d007044be389ce5901f2ef991f93ff95ba38022622544c2630a14f09f5ca903686dde391de793631d0d53cbe6580386a8736240e0e198ef8799d8d98337b2ad15e5c88f3996f1a331cd3c665486f96292abd51ffa86071cc7013fcf6e4e8d77f6395ac7ee1934d21d4bb79ff63e30902a3a57f0ac6b5cf5647acb935ef92faae056787103727b8e806b2c8f6710746c2ae2dfc8da01e67c644174cdb818009d3343340b0416192aed994218ad07369d3c91ea60a3fce0a74f3a0d4c05a101add9f7523dec3758d77acf8ea556a7af695144195f74ebc50abf0da780de94dbeebbf583b8d56b785791d270502bb0b22f7fd6b98f114a76f5ad5aba532bdc7054352439b4ae96377d4798f142b5ef5d87f9fe12d625686dadad628518266a5ddc958f63e0a4a90b71d93a633b45fe9ecadf47114816a94829a9a7181f0a92fc0d1247b2a3d92a7bbacbdeca5d5488f1b3f3bc714388aff9df5f540c104e6b195e990c6434c7e020c16a560b69220fa514a617d38771408fce1e14b0858ed8d1bad49239ec2d0e27b1e1354ca7353dbd1a88decdf74caee281e9c932711a8f050b8320b831a9c2c56d8660f5c2fd9fa471d76410a566cbf23e3f8b0e75c99d68399a3adbaec70968a566dcfff4b0151cb51b928e743058f96024159bebf55b4de6e616818acf817184bd446aa0e5d22ade5909811f175828780e204e39538266c8a3b7b2801ca3210e1e3cfe66ba8bd69acc8eea1d1ba9a1d2534d77d76368bc0b78472f7e3ede57ff288b3f5eb0491fba77638a4683a88dfbfe0c36b3cf27d89409e0188459a24036859fce5a222574aa8f0dfd115b88ca629024517817cf42fe2fb385521910124c06a55c87d78024c913d1ff3dc55415a53dabfaf876239c841674ab961a939b1505b384ce947a1f15088904c717e69d464daf81690126f5ee1bec93cc9eb95806cbaafa0d3f57a4491bd53a44b349ef08d58a534939349beae768f85ba92771c3d9a5ca6b0a765e77709ec848f199056e4bead241428d0dcf6368d85a4ad54662ab48225430f9c4b08a210460343f703c0674258b5dc70b500ddb173c897ef3335b0b905ffae9abf13be4ccaa37d4603ef3352c4d9402af68c982be3ab50e96883a51fadfd03f040704f1ba50a878607f2b371390ee731602682484f64f6011987334cca158344d3680621c30abfd3a88c6a54846edee5cc693309086e02074d7a75714632f5bbceda5dcbbfc0fc3d25395e6af480516a10abc4ce7771eaffb321a71ebc9821a878206af62877a3151b1479796965403f42b539ddaeb264aa7603b256557240cc540792b9c3ff790d6091c8e9061528a7006260011650f7d7f1632177d48d490fc5211f0592dac820f792115377ffea8528e9bc14a11c830a095b36a430f8985363c1a68e317d32f07a92c267d8e956388fc264e4f9a15971ff54fad3791cecb46879431029ef8890cea582458c40273e76618ff0a5e0958077ab474712be83a7b8222d38104a099429a70294a55e462bda0b29440c0a784d76a40912057a7099417ee661ee8c691580085e086d39d7dfb77ab1755df15c01fe5837ba1bde981a13c171330eb5f737959a05d2bed48f86cae68502a75c3032a2949994470f703022a8d97042e4c4151de600a3a15787c1a35cbf3bdba237d2e728f9602bf56637cceab181bb92cd4688bd9d0e951c1de305239e5aa0292517397490d61adb8cab2ebdc29422b61a623db3fc13bbb99f9ca040961709b9b04d04364f62bacc36e9269097f38a049e45b5e3ae2e8f8fee2ef391121204362f102ef0eb429495de6117f6ae1391eac944e9540a6e07afba3890caeca9ddacab74cb85f499bed564936db3f9f451bbd275fe30ae6f75e8d1fcc967c52751cdd6fc9cae806c60b81a0ef6f15a88f45fc662c0040b2aeaea46b05d6c4d67be14ba87bec083b727184fdcd73f7352e52c5aae6f1934ba422a362040b0d921abe65bf55dffb1e2a063e04aa8612ecfcf60d85199cf1d02b55776ad78ee6d3112fe4d3d14a85f771bad7f94674aff37d7a0ca3271c9788d589f262dbc2df89b0762764343fdd44630d32392cdf11878c7424b57340039ae741b253c278fc9f36af6cbb8f525016487622e0f166c69fdf8f3115e67144d2637cd568bf53b2ddf50aba805626c5039cac6e9240185ca31def69d7dba42c8330ed8094754f647fd4d3917c5860a75157602f5f173dd5d76936480cfcc54713f1040fce62dbd27b6cb3c32950f5fa4274c29c88a62e7f0fe0a062d4969448d657b4513698a28d8ec054c0334cd8488cc14f62bb1368fcabceb5c362c12e2e01c827f0ed1258fa3abfa4a80471989c18aaa8dbcdce2d8db5ab33f68514ce67a68376bbc9413b2ff54c8bf7e41ef0b227f76c84157b8e2006121b4e4fe227341e253bccea8d284fbf4b39bc627164b22893101e663e2e2c72017d22f757b4c24e835b0909dc8d20994b910e42bf77ac3fbfb7f0edf985bfbd52f9faed032aa0c2d331a32c68333bb8ea81191842807b99d3135a818230b1484c1fc410544d765576a70b99f1c040438fa064884baa081f26a6fdf151a918fb5e91625570a01530d07ba1aa7d1fbefa6eecb7d729cbd09ff72e49ae73b62b59118c5c4f94f108a17b0399241dba28511d993fb66d9cd0c06894a1b6bd3938eb6bb6dec32a479d0f2b827c39556226d97c547f8f4a774ee034cc7f113102d8bdfa4b980bc136c0a97b7bc8e3e18e300a3472845144eac256acb223a0873a6dfa97d1f8c6d3ce9f73338133dca9da3b069b9c1bbfd3d07b43938cefeb23083d9ad854e53a56a5c2f5c9c602d7ff0b48507406dc1ec3bd38e4c9de04f59a5d0e448094857bdeed766b48a27348ddee614930e1a615e5e902a11a227158453978fda90ea9ea8478cf80bb479f21c0ab1479ed038daaa95d392b62ad1a3f2e30c0253a0f739272631590954580afb727efc59c54990b39322fa6ca9ef3b61a63db1eeccefd97080f726f97540d0f751235d206d13971558af6caa8aff30cf486292e70515463916c61903806206636fa7a9fbccf6c04a046d8c65a2799906d48dcc17539bdf2c6a4626acc9b570218a5b7bdf0abb0be75f4a7d83852b2ea21eb1804c521f7954b25eae932fb3cf7ee12c871db10e23575fae2097865e1c4d7b5fa281ce0e9c92d4cf64bb6e312351c0c11d57181a42d704c6d93f13dc7025f4460b51b0014828061c1863f8fddbc5de684c876a7dccace1122bb39e2984a1374ef2f63ee920f74af2a42b4d6dc7e7f7d833a58de62cf8298efb25331ffa5cd10aaa143735e041e6558f141ee8571206048971f1b66d4856df0da7aba716ca17062f5b8bc4f430213de2ee0b1b664c75f10d29b2a8d0d411d2e499012ef595a1bc78ae36d21f22fd49d1ace9abf9c5de7c79d3d7bc5f79570d7a5bb3c39b25f3cda372b3ada3741eb5a8552fdb38f55116bf7b5424e6506973b710c6e0f55ff3a1df97c2822f760463b074ead5570b064c3614be14d1c54646691183d6e7fa128224c0468353666c5c2fdf78a6054d601a20b0920fd79b8d045e6f24e982d0a11455c682bb7bbacc4397787ee54cc93f0895aa5a12c73f5da07146b262678a5ae19b842b9771407611794af6da30786d169752a208c93eb009d92198edec0059b2aab2fc275f40c0c070d0af74512ca07d5a12bf5a18a5061261cf1e9f25a78f77f440c945eaeb411a1698e6d833671217fc2eca6d8da04043b6dfa45bc7c4985072ff93f257019e7f7390cd7f7bf36304543660a56ba23a46d4bb5b9bc68f4af5581d7444a7252b8b8862ca7cbe8b23d15e5f05aee5142d15da231712aac6806c04b73240fdc605b812e6d344956e4fd37f8cc90785bdc94d07888854c7123a096763ce99e5fddd047eb7099e441c8b0d8c71de9386e1e6f6c32971fbe2a1923d06c01abfc8afd49f94688e69123fa6f828c369fbc4f7a4f962ad1c07d894785f976a6342fa22e3e90ff58ab0f0870350883ed5acd826453ae971e6b0d73d6fee37917698a6f85b6b4174a10583900dafd9511ca06c764b6240d4e07ef01ac0184b3811fede2a6b531f95a4830be05c311d2e468cea3593b8e76381082808f702b0d6818a850bc5f9b8440f262a3ee5fb1cc2a087984642f3a70c805e254996a2e64891e0a58b26913baa5b50f468aa9be48f47bc5a73845f2f5707893de5f7e3c5fbc51ac5bd3ca6f35989e8aaa4c017d60c0bb3cf8742ab1b1e202b81c98bd55a78973a04c35a210a1bb33da312329393b314e4f25c5a83596cb68df090a752a278f977f2a4c330517a7f6414e3cb5c23d8aa87525e023f3903be88c77fd3b2f893c2179e21ccc05d9372190de5656cd1386ab88983cc09e02c0be68521ea1cf2f57b1c0433615df2e40209e024aff287939279faa29488d29ea2e59ae74ceb3fefa65af282027f8e9a0f0227b15573a4281dc07eeda0069271008574d11b21add39eee1ebe78937fa0fff89dc3d72530a84001be39e0814427ffadde7e1c44f5758eec2c91af5a00df720c6246f486a0dd9f8e5be9b80b5df89ae8e27aa407e85200ed8d614ca32be91633309e08e992514cd98b6d1c46ba9ab9603458b3d1d717a7979bf01f1d109eb8eabb5869b232c44974a08060eed5ab7e44d1dbbfa6cbd88089effe1a7b54e968d09c7a62c81305cefbd45822f6e513ccf85435dd208d62382bddb009d21023f3a1880f2013b02c88b8694fdb32ab957faac4989196abd3a735e466296808e1ea066479bf4efc48b32b21b3104b58f6707fcd93bd45252a476ade7e512175d38cf91b06328d9185952789019439e2e7d36af845abb131206f6011119d7ab60e868a930d14bfea041e83eae11a1c2b92de349b55631528e81889f07f9e5ee08b0e1fd22396945410a4eec7644ff76060187bbe6b3fba8db83a1dfe7dabda9a534b2ba266a7a8991bace93d12fea8e02b9e43921161006cad50b532b4212b3be6e8c5c5c9ac46684adb136bf1a04b3e3882ed30b7c90be40ab55dd43bae0f7f1930c299a1167b860a4c6fbe0caf495195b92d22e180a6c01b5188c0da77bc7798cf2d7d41405bef5c1d6a6b19535d8d4cfc26b8e4c57e14ecfbb07a2fd3315384f6d0301099dd54963ef8b9a5d0a015b4e67aa666b6653f19cdf58343949b1e6c6c832e4904c1831d2a0e34c429d88effa68dde2d4142f0abb83693393b1d4ac4954164f94a48156d80f4c7c3d0c1810edfdf9c95d95799cf76b14c629faefc5b9330a6b655b77bd6c58ab335edf2a287a414fd511a5116ca513a9c92636a77ed8576aa647653c6c9286d831c8db2f8f34dd73f961eb32c6b7d40587dc31358dd8e41127eb0d642a86882af769a99d5946cc563b0227af8aba18046df1b06ab4d321679c8e75dc58e029873a39de62b240e4b74694235eaf73b09fc87884f24598ac981980c0cfe4348c788b5a1fffc0e3e7e7631a4e34c8c22a8cd86a120dc88597064d5941ad6ecde5e71612167027c062a0e9d712e74717070f408d7a56b3a0a8c97c6bdf246568183aeb044056e9fa16d338e372aaecfcb8b0de7e0e11f238e0b1c6aa5c743ac6b309a9bcc3813770821722dcf51dee457235156d7bf3d3c0cd32324f07564db64dcb662c1ac46719dad281e235872d1b5a198e2c798c8b3c39d04455822d44ef70bd60635997e7b2252bb36507642b02e6bd9e3be4482aa8d302980a0f83f5a01afc3cdeac08b7069e8737025810647e31dbadd8c05ba5eefc1b2b15044f59dd5a80721570633831229d2129fd97d6deadd44e3be2bea44e44b2b0127a9bfdb9d16da0a051385f1900a36085be1d4397c4107923a9b843538899eaf5d7ccfdb5e444fcfd12e4126d570fadaf65cd4e9f3811725c10da0104b1bbdf2a9f0f49d4980732d30991cb4564d7787e9dd94148d14fd9a84af8a9e1f2aab6dec8f6b18ba8c31f91bfe162643887e549012e5694a428095a6f6f5899e3bb7b9c2b8eeb25e82aeccbd7b697103a9355341ffaaee34b2f017bcd2e052da6a8fa3e1f9f134d6577eb55f201a5701bfd2ffd1d3ac34f35b2c7cc28c2f279fdf26c1d838bb135f7a97c51c521a58a76af85a0499c4f38261387a2c9e3cc254d5d6dd8a45777bbbadaf2cf30fb3765d27afb05e73847777081eb6aa1afb55293c5a05b3efaecd779e98948c843c82eeef074d74d514a1a4c669d679bbe5b0466a5a0e13f25a16c0550f8c50efcc82a77ddd584a98717099f5e2102580a6edcbd12a8d23443932b33aae8a0013a90984fbcde63a1654ddf48d89838e72fa991e840a2c643d1805eb84060dbd0c4265313d22e4c3c82a51d2102dbffc21f5697486aadc63b550816bcc151945e04e66fd7d31d897419c8ad6e70d0f5edab0b671ca999e47f851c251a1e15d2c35d2d460172a7c2422cea981dc84206d6feac7d049a29e74b3384338acc5dc150bb50cd9debcc920769612823513f552691c088a6d0bf24b69581b4fa826f2a00d1c61c85541580861adf7824b2dbeedd0290d711e6a921babe288f8069c1331282fdfa9f7c6dd0f5f0b0159253135420323a23c47c3fbd6945648d83fb1a5ec87914a8134c2cad430e0419df8703018a61602430abb5b0fa73d842cee0e1acc78c26f46a96b3f98a3e7db3531554500a7f6e465feba40912f917f5c5bef59806febc4c767742ccb0d3303e8e54d29ff1681b436561a861c6d00518a89ee18b7a99d7b79964f5da023d7c8422806740e47e508ac0fdc8e6f5ae2ab4c66303be0217c81b2aebbcf80c00d6d6a38e4f0875e7dda471e89ce5e03344f0b3fc4053006bd105804e375cf518d0c389382c79dc7ce133db6b4f98434356a4608c1d82f52c859a50edb18044ca3301e50b2664a061692c22c465b4ec0026c28a2fcb95e8351190569f74a203e654066fa22b8c98edc6c7fbe04084fa9a9054c04ed7e2c2dc8545134b058c959cacd288f016f22309c21a68eeaa1bbac71540462038b51d0f14e903f5c6e6234a6112dc4ce30815763074990d67d74d779eb2366fd9f80e93785ae7cc06a6be763ffdebaa0f1ec6b6b84492b11ca5e2a474330f025b717db4ef18bcb2896c78c8f8a9e661f5b0986ccb2572f3668223ef2fdf4bb79d3a9394d21f6cf118ad5f84a06ddc1f9f9fcd95173969415bb47fcf39cd8db086d05b097f270054d38e501bb3d8dff727ce38cd3a21e9a4eadec211dd984b14bab3c9b8ba43ead392feb5c6c7fcb590583d6dc6ce89398a4ce9809498b7f08d08953fa76d1dd529cbacd382e3412e4bc79e3045d51bd304e92cd425953ab04efcf896c8aa80e92a2dd4f4ccce91037eb2ef7d70eb3f431092afee17cad74e28cc099bfcd80db4b30f01421ee27758cd5a9c872937a78b7ec3c6c84dfcaa81adab8fd550c65f68fcd49f4d012dee8f5a2f0bc3daba3c7f8a0d87a9d97af99e9ac94214d280ee913cd1b6b7f7ce498fd6afe3ace43d7292eacb5d8b5233593e3f451c25b5ea057cd0491681f335356d5891dddc0efb7a4a5efd25062d5502acfcce4a9a8dad83a04b759e6aec266fc266ad3c3a8b2eddacfd9c79a3ca37ea57a1db4ad3885fc511fcea2aa7bbec1ef2c32ce997acf4e249005c14e8ff54a424275f0993cbcccd03ee955a611eead8e0d0fb1211b3acc23b95f4ec92d198e0ddc65c632a3d9f1dabc1b8272d9e9980bae1de6b8fc5f70d1ebb9b7b5f7dbdd0951ce8fdcfe066879de8343d8bc08efc29df08f7450064f8448a5d2c3ca94af02347407443b88325ae908e097db3460db17c9c74f4f836f57a12a391da6be1186d30e743f6c6b35702999a591386081c8c2abc52d914de36bda2608c9f2efb0fe22657af991746e5cf621f5184fdfd329a096962443224bc016fa5e2beb6998298ee62015a6cd45341fc3e744fdf7a728cb1d81ac13e289e9d30109df1e637308a5c0b007d00a7f8b3f8749a25b5aed799181d521b37b637dda4de9bac446a4f13413139ba9d2e745caf12f4683323b092135fe5c9035121b75315c77d38d330717701e0d3141b2f40b313dd4d9c08310ad4b882f8428d29728cd7779b0d5091d964708b85a405dc0da1d3e7c269b12c8cf03b2c0864dac83bc44c15f50321a57d6d10714d481e4c16520e19c60884accbe8449c5ffae5871ba1c73aa525d8d7988e88b64f85bccc63f4bc9c621bb9c47310ab5fb6f65d7821797540aeb52b4cca0dcdfd96c1617f04605439eebb067ebee7284df7065486b62f0d9378edbb5d6941f7e4d67b74a43dc3af52d15ca72f8eb91b84eacafd9377684f67e0a86bbf6f3cbd83e137a8318541319aac81107880cf670e42a7a9620b7c1b1cdf9dac1550b0d18659276618ef73281200fe1f8223329e921ce0cc40287137f9173bee1b5a427288c81c5240a76d3c667fb12960680b15a60852dea0eedacd7093e5c4557102b242bb825ba5148f3a89fa513056de64259dc399f406c3796e1407f0f65de8b99dcadcf9e53d94929e02dddd6f5091ea21d05cb628490c8a067a81fdc91e6ef60bb92cfdc11a9440ee241d8c3237eaa7e20f6519fea7e432231d20112d0ef83f4c6e233cf57305f4866b1facd484826f08cd5035a2daee373f312e1054d40c0a75fa63ef042323d02b26ce4ca70e7afe79980084d99062619495f92d69bd05678376b8592399e5e525b95c46f2d2b14891ed612ea71de94f536f565e713c3198a1580b136ac5bc7f7b90a415c7a2be91ecd5669510ffbac7da309c6ee91470c34c1417cb584ae889e56dbc50e01501492a5d15b607a7a370aef044e698bec0af649ee04b7cec81344a8d9a7f87cdc5b5af7d7aa64ad45cabf92bcec09f5b7ac9f3f0d5d8f3d7956c629115510add214eeb2a39180f4a33d8f01ce270b383fde354161284438507148403e15b75a799e77d21522c7169835e13aed63fdb1538fe16dc069c2d7e886dee8428621b432e1381c1aee36cb6a70a4d81485a0336cd513b811c7c611195f7a20e499c8b6dce3e5599dffd1dbe3d02d475132209dfd83ee90e0dce6f25fab18328091f6681ebaa5df904356ad89e3ac880187fedd0a3cb547911ff7a3b631ee2c69e45033115740dac55d3a5abcd7fdfc3075c3b29eebe705f7fd22d276a8124fae2d8e0f6d55e8f70dc08a122a64cd29a89fbebbe499ae5a6b1548d5d6604265d2db426ad62223997f8f0386c7add7de7ecdc4d44222a15f2a7c8903dc4986fc1c03d084312028cd6f3a69a07a65ce8b2a577026a12b9bbb43e0c264f0a171ccdb2778c84400ce2b660dfb6db5136f8f3864dcb55183a981213cd6fb3aecd70cbe269d8155dca66e6993311257eb4515c6b2680e18cb02fc671239ce75fedc91cc75d42f01ff923320ab004d76d91bf6cab198bc50bcaf72a323e3848b7cd6bb984561f12fe8d42253de7d3b636d5f9814fbc724c650621a2b576ce5d6c186f2d8a875f970f1a1f0ffaf4badf0f70f1531047e4bd17e0638359960c1d7ddae812962b1ecb840447c2bb9d91ff85e37c9e7a160514ed55ea8638ecf90a95c07d9968a03453488ffc3097c09044ba1cb20716c8a5b10834f0cc3e264eaab23109a82554e19ba07b04608882140a96282b1a30b9587ead9dff8b5a393da68e73ac88c666a8d807ab5c93761de8acf8cd13903bab6d3dee5712339a583ba7c1dc7fc454f15bb92a6239416f980e04a0c32f9002b89bb91f7c3f3deecb0b12b959ba828b446111d846c85d53548b1b8a1164c47da7610adb8de75f74054879315cef0a12d477b468f1c95b7664da1e9c771f63d471746b344cba52cbf18e917f9d38c0e1f49215d39e94fafc9654c9ebe4e8db8806898852ab6a561acd28a907b10320e246fe4c7efabce8ccade6b8068c27cd1c1b479a55792d75eb3373920e3d41e24acadadbdbb675b3c2c70d32ab9784dc0a2fb4506ff9944b8520f8f4fd94f268d2fc468e840f24cf83b0189507a500a02113109456016f950f7e5c7ba7db0862fe53e8ddc41265078a1f83b0bb06f308dd86d9b427392d2f378d149a231a72dad3dbfc04df690f0e01b390a354675c9353653a674a1d03197e97553ff3867d8ee45bddaba03ef0188b2b12b1ea0bfe28359a4479ffa7ea51e273208ed1fc995208b847250589d7aa5ff9e8217096ee9fe6487fd014dafcc9316867748ca8c16855af9a947e77dfb1fdc28fea2161767bdb40270837a75ef129e1b8e091f60818633f5783e6eb4307e49cf8efb152eb98b63889feacba0b15768f1a1d03b1e7092db46d6ce84f3f65f77eaedd2e4dfa84d14e43460e02a7263971087b086883cfb23ae2747af3cb2bdc33c6b807190e8c32cf10fdb9a3bf7b9adefa108fdb09d8b35ede8e939fc9f6275292873bb34f8bdcf7e36f0f33b322c9cab9809255bab01589a277ccdc037bea8dae76ac730308b5cd313245af4cd5238cf303351c1361a7933bb412d13bba105a66f2756bdee8b4b0b7d9484d75f2720908a94d72b7cb8ffb8462805a8916cb3edcf595830b8bdc6c1d299db7d34753e1890b2fd6233ed42692eb5a1c9ede7c599441ac2815ad0b9cab3cf98f4811331099391b874c8b381e2b20051b7d400da701ade95fee340a1059bb8c434131b06e0d249fb0878092a296c05177b48f2b997a865331441a6cde7e1213834400e32120aa926b609a8a7ac4bec91edf4a4d5c07549c4b9c6aa7b8aa918ab66d0c3692836b3cd27e382f7a041b3be3cca203aabaf4d933554a82d824bd0e4238afa625cc0e2634884af1f0b919fe904c9859a714fb5b38c31807c496f0e8306509e45f48db0766d0e5c1c6c819d988539484d80947435f515c7355fa646643db2f7d0f96a3c70f731d6efd5a624af083a2f7d509c4d3d618a27442360765498c76a30c099f87ec7d2120fa4c109f76ba67f662d5fcefa5bdec83798b450d7bf6c823ed9535dc3450749c3ac83caa4e6cf83b99ce0ac3ef09657a83d293e204bf8c2f50593d6ea6641ade6a43ead6ed5113b5cfe7513928dc24c35e28dafa177b9db5736d6769d3acec12a23d080c48110009328fc09fca054c3a2646552926bd5000ab9edbc1f8d104c69d85a0cd83c47b19e6286fb0f390ae74544396fb52e0853ac00a31bff11383d0bc619c648a7bf3b230fd2e19bba4c4f0522c47640a9abf54994fa2bc2daa627d127ea332ef8b0ff9dea288f2407e4c81a0256cd871d55f410829ab8615f99802eb605302444d6f41049be8de2ec066dadaa3600b68370387c58cfb8ddc3b0fd6c0625f71f45e024f000898afa01cd72c50de3ed9f4a43483174de84742b319536cef386129b86dbc74858c6405837e0d41097d290e2bcd5a6c076102bf28857f5dd361123219ffa8c38f7d157fe21f3cbd2159c3a54dc35803bbe23c13e3e9ecac09f845097e6c5dcf923daacf00918504ae6efd4a6e7259881e7fa90d8ad8cc169144a4ecfbe4c25f11ae2b7c7a69a28d535d7d01a8940e4e988f91aceb3a880f7ef9ab58a62a00fcea6002dd43f0f6c33408cd9123649fccc2669151d4e2f983547625ad795b18b406b53fb48b172880d4e6525c586e064a73460de5c516694cfbb8fecb5f24a5ac009a07613572cfd59782fc691a078a05ee17b88cb4f8a12d016279ba8e94df0b6a6f7bc5b03e62d3af0251776922bdd6f05e8664a188ce548725002b85aba6ecccbeecd1b8738607ac0c4cba717011d8c4c4124231cffb7c5000444efed47ea09b74e54affbf9a356e18161cc84971462824e59e3a7b077b637541497ca970377f0a2b5c60e3a1cf1b224619a5163707cf190b508bd4b8ca1306d5e1cf38164974c361207bb10b7f8c21590e876fa74a45a015003e77dc14dff53e3e7add51b88b8e248f6d139d5ccc6255b98feacdf3309588a6805f28e208ea513df6623db4f95b43ffe3b3fafce19a98634c9130c246cbb54f6f710c52252356b1018cf0906ec67c84c606d7452ac9ff94406218019d450257e8d0f8db0a67ded8df7aad957e27630453cc51c8f4a9222642bdf704b3b30704a590032a615b95e202538db3435d670df5f99f167b4dac1c763111192151fd4cef948254a5d8027efbc9fb2dbd846aafdb009bf2de9aee5b19e4300dd331d70912313dc2f91aab8b6360ef708b84c86d88220daa21a4a36e319d22bd0ab22a7b7bf2d527f56d8ab423ade49c0a34df061cc6dbf025ad7e3f340df58b159c6a790b2c1f10a827dedd9d6ec3bc818d33121133d5eec9210328a9eadba0e9f661313522990f6e33228183dcd97c8d2a74e2e9bd48075fa58b173314f7c65ae5ce7c67db60463902acce76c84da3a52d217f5d96b392da931919a6be53821c10e4dd047685b68303ac03c2d82b87c0703d8cdb4e2b76e7878b458e44fe6ae1827e8bb440ca6ee0dd0c9952c5382f1f0618427efe3c968a9c6abf06a141106ec884d447108060ba34033d003b0b25714aadf1099288842db2f522809169f360442016c46c24a5418c51b504454ddac904851627c313fbc575b7f68f3027ab680e5340e6516620890eb8f22df46694dfa603bf299a450e1808824a496bc5f606630f0a012ccfee7497923398ee5f1091b8c0631b041f395d1b9bdbc2c42d7e3aad7898695f52b019d97871cb292f5e86282b7ed2c16d4aef3706570ad400a5b8fac88d30e4498344f3547a84c1a4e204bd5de172d245b8e70eebd86b06be32534a22b04b12c7503920f306f2f560624106ed8bc444544234b05906cc00e22a68188a9bfb9ed515db4d232a6c9556deae4c0414e3ce2d1e8d3936d30dfb53b9ea4e69a7c83130c57e667a57062477f24134f5c35e969073f452beac9fe2ccb98b720a937971741b11af950b0d7d92f9f832f6859d9dcc26d94190d89254cadb3eee1c8bf395582be08fbcd0bb40fe435009dce25022cca8de347b7dc0df316927c344132c783a334ecf8e90dabb1dffa6f5717f3c2a9a1a507c88277ff54feed9ca8a6dfa0f60aa6b4ef963fe21ef5653bc58c628a6307e3b79585674192cd3339315ee297f0416141316bd48b60f993760c53f5a05f45bb002854703d9bbb629888c824e9963e1430f392a15776695ce1ea0e8f3d6d12697936255738ec23fa0c28d60b9d98c3b0ffed06f04d4310c570713a6ea1322e3300011e885e950969a616b6f591275a18e96e4bbeccff4ee4840036d61dd88b11e006443394267d3b16a3f4238cba631b143d2a6493166defb1e9a19dc47daa472443b987a2ab0a832c9b44d106d320db701bc04bdd9ab909e3976af38e9902c89fddba45b4a6de5b335e82152bde2c15aa68f48b6018ac2f0ebfe7c37331db31803012ca9e09bb583365159c3c7deccb39c56615171d9b859e61f642caaee9882a439d1f3864388b14735f563c6e78ecdbd615271aa58773a7cfa74d9d5646988708b609d644866a504d17d89c868231fe920670c78507a4ed26db3ed33bd66510a6d61d17b7e99bdf07a8aa2aa13f72c161374b24c151e356f99aca67e58d1d59675e4193a475e40c27296b4164fa3148e701a5bebe3fdf1f6e881ae68f5a0a7d20417ba92646c36e85db329a19b5c09c2ba46de3ab3dd3edd46b43656e77b93a29bc663f3644396b710e616f2627361ff89e25641e173c87ce510dbcfaa6e7015279f659801fa8c1ca2d42b36154ae48d1c05554871b02c31f1556cea0949d99d30b9d3ed14bced86e0f6013366e80db18d404027e3635d486f89f6f420131cf326d162b8f80759862d8524bef4984039351522227f988ba88b571fd23ff528fa12c44d59cabe2c1e6792a55ad21e766744777ab3fef5277023bd6afa55987c3c548213b7a1923200f11d696e24f93379aeb5191c527f5080e1952ba6efc2a019ab17e4721debbdcff1c9a403d192eff709e070b21661d8200380bc07228496103361c8d26034145344170cf4106b889d7037e8d37c0b1d6c5a1234c8216b9368b157df3bd1ed5b496637ac35e5bae43940bde655699868060871e986a26eaf634fbb74580bc76379072e72c2f6955e8ce7ed6eb8d29c95ee6940d0dac83af01e928c5015c2c5156cd2d8f20b6be4f5ddde1f325b639bdaa451a66017742d04fe3a772df5d7acb764c7745d2ce9c923b69d1790ecf2311a391c2013fedccb7b13b87f7fb49a48903050459702ab0010a83a8307f0fa5d8998c82ef0553498b1445e825097165eeba997a3f259304861d27b39f52f40245ff0f3846c48e1d7f9e58bf2089c76769fff71b6610bf1189486bc19621796777a65e7e931016bd2386aeb7aa3d90dcd1ee9e44efac272ad5c9804ed0d3334da4beb1eceecb0a4a567f4bb9008fd92ded3ac8bc685fd839a84162d35693e0bcb48858951a2d714741d3e109d6e39a02a26966769b6a5105c69e4026c93c9c15ff78630b0d03d06776c04771ef50e31afd6501cf2877c5224e16db49baceb844c462cf81764142adc6fba3060895132942f2b32225314d5eade927bc611575e30c8c93dbe4b87be3a55705dbae61159371c6b3f2d6f0dd352a62e3ee7279d42b63864f67ddf7afac3b1a6dc5874ba893bfa4b5042195578ab1665835943aac7419af336b8207f39e8b3b0b5358da2879c9cb6157ac1fb2baeca87f4df2ebd6c8ef1cfaa4a863eaddb1087947509e69ae04cee052087b4ca1b1ec2188339b22390e73e5e508c08bd3f51a477417c45938350b5c2a6e81ad3e286aa006e6a14885b2586915c28015b32a70b9efd42c28ccb4aa71272ed952b70d52586f3e51b5009d7da572604582820cded25405bd063e78d3983b9b75f4744d8569364c94126aa69938faf469a04a0b13283a3bf8409262e85960755ec65411b1e262cd1da5c0fdde02467016235dbb95ae73c8acbea5bce763eb302c660e922e89d2d6b6a068d42b8a1e01ff15f93cf600f3aa350c6412a916906ebe28aea34b474569b0e5bedf9b95539a82824546191f460f4bd552a6c898a9ba989bc29dcd2bccc8645e754cdef6db2b8a598133a5ef2dcbb2074ba053c36d90717dc1bf5305169caa81acd0b0a251465c2be78fd1fc3067f03d5341fea152a6d4a365657dac8eaa1acd0839d22f2da3554fe4278351db692fce3cdecb3d3b90e4b8f17e81298a945c395b6f42a456bd7198f5209d00b9147b216912606d685c91508e80080572b843b3e90ffd51247b1acb4f381ba1ddf43248f900cea7b60f09271a14ebadaa533f5577c9e01870111e128e4ba0e5c05dff37e5dadf8bfda129392c85f80504e3a6a17c7a7e2f8d4ebe5598aa4f84ee3f5bfc5bab6bdedcda5673f886bfd518156a4963ef8dd0fcfcce8e8256d59220a1a75a2deabb6de905a1519381236c85a72b192d772e6363d6d09c7b38c5d23f1622bb989326d3811ab9860dab22fd2bfa460141e3a436ab4ab2e638191dc4de409d696b329cf9bcd83cc2df55a9bf36c5bafcfed4154cd89ed5aef4046ce1f7e2e1307d94111d9af9116a7fbd274b44fd2d46e2d80e4361341dc26e55cef893c1b275e92f72c31d30620b48b8645fab2c12e9653fafdab5871d89e04337aab6908153cf8ae0fda99d333e633ff3b782a125f5775d98fd05222dd2431091f44faafd2830f2cad1e8a1b73f947384506da1f1cdca5ad61a5f30a4a052605bbcead06814e26756b143d1274f7bd050b1c4b017c3819e21f86b047f8f599fc66e16a763aa8484de8e1ec20013f41b8bea204db769104715a8339c44b739f424b67d4bbf7be1ac8ea485aa5a1fe0ad558e66e091d1c20fb5de1a47ae2441a10748260279fee8330d73859f6e1a200c55c4d21c3bca2d2595951bd7105d0289c821f5dc2b1bc7fe82e1e2e3df14e1e9f8fa2ac3cd178fcbfa4ddc5cea18b1e63cb42adef0e968b28c1747ff9efc985732bb4b0b9e57ce4fae2b5a5603e0b4cc5520b8b18d2f331fd387c2c8c9a79ed07cfc95710fa67fba3f5e9663e80390fb0a3bdefb4f76d741fb1fbab86ffbe8a4ff7df7e845b69e40c74099c18a62280d61fef30fe204fbab260da4aad13e714b6e973297c06fd30d94a2d3030a156b5614b83abc40f62ae727fad165ff140a3d51cd0378711ebe2227c6c2cc04f00ac4b575f5b3576d5e68e696c00370562a5b27c59194804f6b3f1b36edb26b0ac273f436bda18b8b14ea0ead880c219283d88af684de335a94d2d3fe51be5a9510850b128bb72604213b2c09f50d247c52a6a0268e739b123e81896909786576cace5852c24fe3c95dd84c27a443a5597952eeb0c33700ec5c300a0896fe2502aba217df125160a8f6bd5dd7183f985cafb77fca0c93b78485c18c75aada5796f3fa44bb5d46f115ee44be2361ba17184d97cc867393c366a17918ac274471271f86d902d4131d463dce356bb20b16f6cb8b75981271c5190125318e1a9d97be20f6f050714d03286954f686622d90f912fea4634392fe7327f59776b02c8cfe4bc1573a56a88e5f26a9fa959005d6439b5e277ddbec33b6d4fb4d22c07e3fa193117dd336802579fb61b599a8f8ffeb483abf6959653959918da72442a6b4771179280399f1c242e173af16a94cd073ed007302bfd59e655f97a0316cf6090d8686ea608d93e4b3fa417a1002cd624040e42b96904b5a303f1e77f6e6b565a3a1eea45b4c6431f60ea2172e2b4ef714834507732c74d05cc5186bd8ac3323f8d2b83734670f4d65ba3020dcfc8d2484e394f7be92279325a707cb910375f90671e377ac5138870eff386046d0d16560ca3c679d511ad792dc26b4d0664c0fa0c736ead7c8f7c50703545dd3074adf326d78ed420b4ee3e8443db3efad0919c2139a746d7524f17355ac75fbce01422b1314b989866656f90209cd47a0a70c49c5e4c1138cc82c8af962aca4a806d1fea5b50aeff1ea7097e81aa0340ab23b91ac4209057df47efb11436956e65c23e970141e948754ce7a170c606c24cbdbc9f790822933d67c8a08ec193683294648a6da37ce7e5522dde96d0f2ab0a4fd28bbc6ade5e5ccca9fa9ff67c4cce67b14577751a181014dbd91daa932e6eef48329a58beb70d606124d23cf490b1ef92f8215e4929db67750648fac893c87ee204460a008a9b92b2ceb6bc46e18b7ed20fefe67b47c514563d310834a3807e80b19ada9c29f90b10bd101ca88ab92b036b6a9f47826df90dfd0a60f2d326a7faaf1906dc2002ad48a6a111a8d18747438d1cca726a4dcb363a02d3d3210b9531c61653e30a00d5ff93221a634fe1aa4a14f10475a96c6fb7891a9bdb4cf665889a36e0c2630b510604178000514d2cec6f980d0558fc618046be1b13b05d51b043ceb03ba09c934cf0451a44ed5399c4678105ad6671ac28c343e08a98d6da6971dee07bc6955fba8776e565a918ef5dbe19be6193f585fd9e3fa5b9d2115a5fddfbf897ee95a16211a12941010fc079c824bae1d7b1bb9b8acd374391d9dfb6e2eb4e58f5fa34ea95ba1090cfa8bf8a3d2ad4a5a71707cfc1cd6a8de11654d60b37c53674f64d8739adf7ae10c9dd532b7145c1f6bc81a7b2345ff5596382a64ef560e9523f94b3a1b15299271231a9530f56d425e718a975f57cff528c9c6bb3a86dd48ea4609efb376be40612f934d46b126ee7cbad6c119845a5d8f0bd02a2e4e483fd1d92401f277e2c468606186bfa1558da9a4967db9d694fce8bf4ae3fc0174f0c19d05c24090acc0f49900866eca90b05b9c436cf889570b5d33d23e67f43ed359229acd7cce4be3b02323771d4054089aa27fd303e41e06a603253de9d1a6136e02eb4d81116e4a7923a9463dc809f51bd8678fb8404ae5a3438001bee957e39a99879745e16cf55e19816b79c38bdd46519b04f8bf4c6ec985fc15fbed50cc40724d247628d7effb75de73c664a62060c1e2cad62bea4741e1f37c92410fb27bfe3dc66f1040f9dac3bf2d875f5714d49cf949151996c85e03b04856e0346d3c1cd9a50c983125b0a51f8cf4225861f3d50da5b8f0a819d4fbb8c8742ab5c4361882925de488c91c2c8f46c773b9c63e79d36488c72e4279c5f7bc95967e29015b2b426c36cd06140c430ce842918c7685293137ad9311a8d441ee9ea7f79e30582e62792daa00bb0c102309164e0921ff9f49028e7a64d176f845db73610e545720f4399ec992f6dfef3c49f9e5f9f339fdc0ac9cde5bb20661c68121900bb2a8d0838e9563a6d004677422890a62fb0550a2e1e7c318758c3a4c4074b22f7e8ac093f3f5903c9d25be949faa5b34fa0bbaf49a903edd515ee9dabec67f3f11838c913f7c1c1300b04323fa1680b869a54ac3a854bcc2572313a25b9482999d5d25bf2f52149ad0a254505da9d88ed1b402dffdf84aea755b2ec24f7da64aa1a2413b8cab7ae23c018545cd4eca3396517bc79423227307921e8623cebf2e3b5f83c3c34206b2674de67447fc1331bd9949a3167ca559b8b2a4118843affb19a1ed07937de861db72a57cb7282b4ee3b5ce66ac961c4d117a9f4ed9258fea59ceccadc29108016a45272ba3cb76982a3c3166bbb26d6b4f3a9362efa04f6d9f2f05fe72357752e680496e9da97ec50172bc271252b86dead575a24d8480672ea62b2e813dd31bac658b8df6de2f79bbed94876486b8ebc0051f213c15214372d61dade93b0738f15a502856419b202fcbd51af4d52642e6973a1cb061c03c83ba4dbd080187e05ceae4f238589a5cfadb6ead6ce239c90d9947810b8151469c247c213fb1929fc31ae0b5a445e7492f47fb5e4649a4ec2a8e5f62c54aa9560c29cc835aa5807ccbd6296ca2fd25a1ccdeb94f3394ff2751820a8426c256bba1d1dac35f55647d4793df239f6835856d6b4dc2e01fdd3bb051c7504ce8c476f4666e369cd84bdd10e08657fb105f32e0f44f3dd6f5004b1ccfe41b4948163c659cebc8511320212ca49db072a475d8d98e9b3f3c0076b4704d9d10de6f1944aad40b8a255a0e10209a76964a03e6882bfd71d9d07fc4feea328982ec9c1591dbbb5c8c70e81f20b364288b56d62fe0e1ea0ba5f29ea54f02c7814b0a5b6a940de2630aa2f90bef860b59f8f5e14f94297a5abf3d19572b1263ee87bb39f6a41bb406c950ef57af18aeaaeb231cbfc8db086c90f6bb698d1b9e042724c7290b40202ce1964d402548491b40e05ba1449532cdf6cfe901d131edd6eb1b9e4eb826763f86428b7e26dfd06ce9d6e65a87d5e283acc7392f0b8eb24140b32510c0cf58d970be241ae19536e9de301231a7e72e2e3e08cd17ff07e4cd85c1e6ff0ce83e6e7fb98cce96cc06dd34c63a2043a282420afcf0418875067a245fd75acc30402727e9dc2fa40f1f4a850a4915ef11e0f844dca7a3679cb2706ad06348c27b0dcaa286d5d70503ed9a2ea8f987448de531d8fba3fd5ea7b8aafde4b5995835b7b421fa96cdd70dfb778f9f60c72a4ed5a085a6945845f7f7cb3f464b6cafd86979c79178f0a1f5447a1a58463ae096e06a1406622600b86a1517099b2871d0294f4ba396a5328ed14c7a3a289d72d85e9609936840771fb98b2f132ee81885de24ed3c27972a936965065fe1f7fb740dbc61b27c6add1b928dbf668463dcb32a45ffce6bb3545a8226c0b78da2bde476e79352d9ad38a0c57ebaaae6dbfcd445f02814f29d272e44b0ce3f98c6d28e55178b9f1b8f273a0a2c75278dde43077c87ea1ccae27b55ec4d765b7c227bf5cc720cd0693c4d2eb9a4d7b881df25102f6b281f09c19c231b07497bcbc3d90f0ee943a97a254ed6ec06dc7c5526ac740e2344b07b5af154bfc19952ea6b5813e8433f37a0bfb2a78c2cbc9b97e0e21a07ff254f09a0a145109e89f2aa15e6ce318ebc7cb861d6ccdff559712d33f73cbba06a6eee298d963e5d3c7ee88401df7afe80e2ee04a39432ab042fd3938c53d9cd1b06c112257771e04380857185d98a8a0f28b2fd04af26cca1939141ed7eca06673153884ba907f627f7c3bdfded82ab07de9b616410717874b8a234aabf53db2d0f7c7a55d6d9538a89b20c52e6c55d37a3701b098327e1c4e16950f51784d28fc01a5c32a7ff177221e0cdf4e7fbaec8ebf869a7a4df52fc295675e5c742db30aba8d37b0bfa3253cbc8ee7c730a11d2f7f4cc1fc85e2ed81173f52ef0eb11e30185479eba66e94bd8e0a9c348a0946c82ceb31d18dbf1910c90048ebe5ee793e849c1ae620cafd80c4482fd5034f038320389b93b87b27e3a89b26b7dbc39d884f2c7b4d90d0693c9d7a2fd68330d5d05cbd2c6e203589da7344c80c24d4009b4e0d538ecb15288f25ad9bbf533f418e479c598a06e3c57cf43ba6b3da8e28a0ee434692f167b91078220364ce46fb91a297812499d0f1b7d6069596dbe74ca10e9afb28883ff86f3c68045a3d2fa35930d2aa4a072a7a5c4566b9ece8b80e53c0974c69fbc188273d35a781af4fa31b067cd55f00ad9a8d3d18268e7ea130f70841300006a8de636ef18aa115563b3bc960854888dc3e7440360e8d404d12b2f7de5bca2da54c29059509e109850a73ce6f4ea7d8743aafcba3f7b55f92b368ccddddcd41acb8bb9f4018a31cd20142c2133541b7ba2d5dbd94524ad95d5787ba3a6ddb789359f66245625ad434d81a6b3b5a5e7a9f05e8d2ac94527a9f908fded1afa7fdc0dccde26ed720076485d5c7a5815e2d428ddc26e8b05bb502fd851c8136eec6f6b88629ec849d2c10b6196c6368758ebadd2e54d14eda1199b3a04dc6d21690f97a0eb4d1bc223dc3ceb44812f55ba4214cd7665b3cb1ed1ae8c3a887643e9642f58ab9758dd947d77c3dddf42241a8dfb69e8f5ef10d2bab41fdb69899f2a257945f0e73605ca25e434b7d2cc2dd1d00aaa8cc8c843f3e9c736592c1fc4ba13ed2b845992ea31e3dcb280540155573ea9a96a5daaf23ab6f1879c5a34a1a2ef8de1ac62007d473e6d8b209d0e51154e48d4c044934c4a552208332679327a5a42253b035f205bb6a9752a8b4cb17f847dbdba5c1fc723944e5105311032a68b4613f143e145e16c0c5bf9917ff3a14fded2bead97b71693975a68edfddbf94a9dbcbb50ed2eee3f197cfee080a9021139d3aa2e0e4fc92b7675eef3b759b631d7c6d6b005045ddfce418a7c3d4db5e716319d6dae2649a89ab99899fd3b293633898b767d44dbeb9464fde42cf9f3c53b74bb375fc1a3bd6b133b782f4095da2e25fdd722df40839c00f55d278e85107f4b513fc3222a02af76727999312d267338878131943c5612966c15488a0d020c926250bcd0347cfd288da72a80636f3a02d4fa95b54821fedb4321a6f3e36b0f741a74e170b1f1bbe3d4abf4637029d5e3efda30f9d7a4e7f8cf351ef6ba78e2dc7427ff751c7983d7a9f8cec291cf4918962627cf48a81641ae31cf63eea31397c313e2f08b1d0cec7c30e855eade02ac637e97da998975eb4c98133ed32e88bf012458f8b40207fa054aa0201f5aa39242aaa57fe31506b3c7f106484509011428b80da3e1d844211d1c140affa884b0fc2d5c31b257026c9a57157a443e95039a504c6e89cf7b136ef5b69dee7917a5fdd8a744d154b83834a051e694fedc28d2bf75a7dc6fa2ccb7c986867da976eb0ac929052476ec494abc041fd76a98a6f57c2bbb44bbbc4de11a7f6e99a86de5783180155f3f8104595bcb6e4355f3ec1e49af74de75ce378a81f3f0ca8a72e35cdc4ccccccccec5b26514fbb6f089dce2764cef5c742360c24c258c16b9e4a087ccde16b1e0ca837c2de874202b3287de61d9147ff65a0a59e96467a064cdca66519a04b233d03189b8039e6d56d20f0a7346215c483081ecc2d565c30644451ea77115d448bc3ca8846b896c888629ebd1d434b2354bf1192867e8424d65f4419d145b46368157455e1e3f24dd3bc2c03423da723cfe5a79863584e7f2d2d2e425418aea13722fd72da5ae78ea85f961d2d4ecc6bd2fbaea1023c5751c37cf63a2515d44f45a4f42aa2efece8ca64e0ff3aa5df7c0cc92c9af3f650dd086480b608a8a88f2a1660962ccf0eb3645913a3b6a8392d3ee172e82a38f4163087bed3352af654ec9fed9f9a2cce5af1d08724c5f40a42cf3a19acf36e2f6614fe4aad4dceceaca00de770b378e80c58c8aad1e2e1cc4e6f635fce01ec97abd0d8520d22bd9c94f409d9841af8849eef69458f31ca295afca32e343abd16070975c6fc257394e63a3687e17c071293a74ecea37a0f6c693aca23d55d3b3a09d3451da8ae5fd8c5fbaac9b46d5987edd2b4ac91b802ea63f66b648393ffae6fbf68fb3c793b5d1c24d14dde8e691f9df3f68cfbe89ab76f99b773a78f8e79bba97e3c2d0e92be4ea6ae3bb834976f5dece4d25c9e75d7d25c8e7573692ea7ddecb0a5e978b084e364e4a36b2d6f5a56cfa985d97499de32fd34ddc46dd791977e71bd3cf3b67adb19890e8b46b0f43b40df2318dae9a1a0269799a329011f0bf071e663f7d1b58c7e73f4e47d372cd66af33eff8fa6326f8174e0af5eb592bfe67e34a985690ee874ba73d26ee7941df57637095c6683f570a76b7bed42deb973e77a09df11278a7c745affa2bede77f9755dbbeb73d7e591d4eff60f6f1db7c8d22133cb206edc8dbbbb31c6dd5d2c9a94de56b5b8ad8792d225f4f95dbebe55548c512a4549e39c54db328de368e550105a81dddddd500ced54b99977bbb7b7b77765bc68dc82ea2e2fa51bad2f66ec0b10288e7d0342e6b4f2515bb6bb216cd8b0e1f2b6084163df165ec2e8ee6e9d8cc8ca3273d00b121948dd5d77f7a58b3a467d11a376d3f66294d73527a5bb59c6190beab760a0d0ab4d12bda250d0c2a89a0ef3b7c3c6b6850c6dfb250ce855e6f6beb8e9c0f5e8d2ac9457bc68f4395fcca85fa574a35252503efa8b192f40c41720e24b187537468e93b2fa4b8c326ab5b6b448cdc565777777a38c715d6cd01a42d8d11b220aa528ae781a382019e1a00347d7100e58ae21765ede664005d00ca8f879c30bcd895c065e10beab001983dace4150b823f478e4a0a611638c8c86b67bc2fa079bb5dd9aeeee2b76941e73777777770ea28c32464ae794f3dae8d7a5236394310ab5317ffdfa8d8b93f9c24dfa9a9c21e79b6bbe3142d6893242de3a2debe2d248c7a4cf4bb24061147679397548bd9dccbe3f9927b317a54f87d2f901127a101418e6d3a337e6ec52ceee9b1e1de3845c8e39e558988e791ff4a94323a4107a716e47bf3821d0a7f4f5282f2923ec2242086137e4dddddd85e1e6c80d1030dccc2093c821289d41320c326f04fad6abc84764be671075fafac76d9fb907cce0c120f317e526811629dc1daaf4f6f560c8883e7a46d88db4f78138a41481f41f4dc5b8cc85bd11bea60bbce6456937e78451c6abe70b0b322d4a2937e811426edb2f46d4f572fec7e4a557972663cdd3b26db9e8d1167621e1e0163d23f2ba28a53e2f6a425d9d0afae5d467a7824d6e9a0557d1af8ececbb5d3363bea57379bd4d6e5266e85f9db81cb2fc738caf1f0e46e96059557e896d8cf15f1b2048d31c698c4964412b569121263da135e51ca085b92e8c846658f0f45651a23ec96665d46e9314614eae506c218a316638cd1bb8991b27e5f96a8ecbc506329e5b60c31048505d4a99af65112c3005ee86c86ec2166434bfb0071930978715e50463aaff851480a452129242485f8a5509cc1862345268d291e1463c61c4ba5584a8a3d42c85ebce609423aaf285733dea578d416d60debf112df7cc4b711f38a12c20cb50f642ab37da00d1fb5cf6c9ff6396a9fa3d57fedb3c2f64c8350d924741ad2a182e9316e281d308c619c691bc76d5a862901b323d26b4a18e3cf4e024390925a08721dfd84512efa83158545bd0c14591a21c018230c17032771524aa25e74ec987e495f0afcf4d8ed743859980edd65f260c61c891ebc38fbe202f39a2a6652d6cc52c7e3061b01c509318242bdb8c48c002516b123e8bc608d2dd1c49d606efa8655fbc0bbacc5e9bfdcb4b438367ff90db4b92e47e9a0725ffed1c75ffec1c8d8c4f0cc05f3b2f91217df3c55a6c12dd21b5e831b9ddf9c8e6a07c6c521a7f992e89b6b1e0f8cbb784ba0c338d6d1a77c42e25499dfc0992bf3764e564e60b02ed79a9b5dbde1b5b9c5aec96e885dd34de834cdae6f782deb668b6f1e8116df3c158d407515f5542e6c0ee33c9bc3784ba6505f728231398cf7b466275f083f75fedd71f1acbb3e739934dbd91c02dbda3c2653a83389bef90bac451460b4603c26d0b9f930ce131dc653650ee3dbe251b9f0e230fec28bc3784ca6f0c0b88b33d9e12ffe828ba31ce53181c969984e85440616faa7efa01c0990fdcc21b02d94c3b86a6787ab20b0ad1daa17601c4375d9e5f3726ce7e705a653cd1d9d6a7a2a1776b80e7f01c6511e1394c3b80e8fc9141e94c338131dbec3b7e5f282d201b34347b72dd4b65c2e1ee89ccf6efed5ddc099cbb7ee32facb4fb5db56edb675dad6723a3b2c3873c9c4c4a85c88be394ff4cd534ddf160f74d5f45a517282b06e5cd8d64f74e85bcfd55c1747bf478bd34b24f8810fab8916eb24e9157f95ae813128e9156f4dd9636057c2be036dea51d5504bd4fe26dc92fe2382145ad29b706bba4b6f494f6bc9141ee9cfd3445112aa9d4b8b28c26849cf691e257086bd9d18e00cfbb6a41b7588b7d32b96d212a6cb3f171a3a9ce1835da7b45e27d016dac0364a2208a4d6120ccb154b2d1af68a2d94c25832cf68d1804aa2053d192c2925b516690b368e5aeb45174a492d3ec11167486975c76aa2a55db30c20908c5addc58ca04543334308571c83da5b7d8b8914956a126444f5a32b944cd7ecb624e1e3db9be188a153a27c40e5c0907a239807e95573a742af52bd62e620bd6a15bc6bfae5952dd5c9a64f4fc9fab7c7bcbb3bb69132842a16e20743ea8d40eff9e67460841e23ed764a991522dca48491f6114d828162dc3ceb3e99cf7c8b98f765fc51bbe2859580cd0957bfa19e6e3615ce3403615e26f349e0e796f0d49330482263c851fd52b173b627375bcd69229c1d21c9e8537a75ce3863242325026eb20d91407b44f6a8f6201503b46d62a060163738f9afe1a0f29bc50d3f5fc3f0677b72c339275346f9a2cc57f521b0c5d39ceb25443e3b7b3d4465c835e7c65870b79f3a9e1db2abcbdb591a4ee2a8c74730f5b1c322846fd7bacda8888d28ad35957238d3303580309008438ccade4bbd6a8a3aa27edc5524dc2b5d237b09361155a8857aa544afa4f4aa877a25843d6108b71b8b23e3b75fb46fffb1510b0de1135c5813a0f717483c3b6457d0bd9da52922c4474645b0f26614639c744ecc352c46dee815310ca35d08cde964db7cf4aa97fc75fde8194ec7874c0aef23c6f883d52b1f2b48c107519cd717bd8ffb8bbd75a99e81ee655ec552bfe8b433d28cc45fad12f2de514058c627f3d13766e620583aec2c7d73e61a139f7220947953c74ab2d65aafeb07f5be94fff5e3ba2e99182a4140e3a5a712f2d25349ef79a99551d97fdc503f5e03e8e335dac97f47560ed41029a3c76ef46a937e8d5e7556a5b27fbc0634f14482a8838e75289a699bcc627aa639cd4b71513dcb328fde7133ac4b3d959b4b1923932aa26e267c3bb34610e30fcee42621aacae43d6ffae2c35dedaa4899ff44dae2452f7ad1ebe5a1537a5dfc7d4defe9a984fce5a92eef1922bd8a5a1b09f40cb4f9f1f9d88d79e8a986d0b7257af3e788b56dd897e4e105f93da7a24e7f6cab94fa7694752938d3ec3915754220562ae517a6c1940fa918de684c7d968a99988dd69827b345452df5e945f6a1b5eb9bced333272afb919b87e1e68db0b731a8476e7e52df1873d9c170f36dde77f39b47cf6838ff7ebcf40fa5e3e5082f0350519b6b8eea6af7ec2d3a9ed94dec9c9f50cfdeb779ccb8ee13c0b7cf2c59b204a18bcf30dcbcf48c44cf63491f94bee96fec11a90dde1aa9c49bc713d42f7ef3c7d3add02fbd763f7ab5791ff315a364864a6db06b3cb4340c39f34f2ed5aee9ae91dc955da1026d4c5e8694212951bfcc4809970020ce35de60f490cce762d835860968d22b3658bd6218a19725f52a43fa2ad2f7e3ab752e19d323d2cccc9848d329b439f99c91fec68f1df68a5f2e2d19d7c9fb92f4925ca2a799e5a10c29c6e8d1bf2523caa1bf7ce4d21615fa279764aa16952f2f4936f9e8655a286d7d2a431e6d2793e459c9cf181faa5f9f6a1e493df59c9e9e10f9975f9e9298e76efb56abd5eac322dd300c764b9344212964c992257ef42e4fa2ea143598335e73d25d41df55fcebcae215af897a8d7aec1acda95316dd15e6b16b78d384c0a78cf9621da9ec4d8d1b0ad5c57c4b68c850a8d4d64d61bfb0eba25714aaafc52ccba8cf0c939c0e866d5ea4d35b8f368855767882b49428c01f2c59320624506b209cf3f2f8a97ea010e43fe681a8efc14a95e71a21f4f58ea0fef23ed60154c9a86e66886f845180324208218c3ea1cf107f3d23d12364c9789fd47aa19c5764a5aa510f271ac4b841a740953fc6b8462ee67428c591d18b73681af5ac63af99f7928fbe1ce567e656d8b86e0ac7cdbbb408b5e95d9f76738b10d5ab36a3b26b0de55082ea1a2b12e899de7ea870ba9c9e0a37554702ab14434da66449d750fe41616b6eca6bf3723c15e854d2ab66d46bddb7607ca914a5b5a652eeab158b75f39c7a0cc6d033ed1816b118552ca858e0372257d009c5d8a8283ceb47ccca870ce6c940381d807994af264ea0675cd4de5873a2ed07ea160981e5657e8b8488c15fdd8dc1cd9b17581ab841e7cde31fad07157a0ea59957597a7797cfee724ec249dc33c88f9e11f68e60689a8af39ee7345fdf9cd6b2932637ae53b1f0999c33bd66720ec8fed6795cb7c2f7bc389dabc8044acfe3b728079fd7b66deba0675d7f732b4ccec89176cea5731e0cedd2374ee7e280c0cf9ce38078b4579ec990243ed1f373163a0cfd669af7657e49cf48e69ccee5996f33ebcf3a8df7bf9ea71867f91a9e3a53cf16a79fa3e7346b1e12ce92f9d2ad468eaf38500fafccafcbb3c5d97038935fce2dcec92f372d4ef5cb4f8bd3e297d7c571f1cb5b16e7c52f77e113a85fde7704f597ef0f95ffba3c0a641b2e8d34c9a5917eba96467a9d4b23bdc5e5c56b599aae9e4cdcd2403775dbd240e7baadcb9606bad6617469a0635d74611320e4cd856110346d3a952e670edcd983f4ab6bc9484e2f7d937432c7a1758fcf4befa9e2a3cb90b39c0e2f8d11197d011f777721094ec2b7939042347dbb0cd5f4eda612be87b37c771f0d35b0a0d30b3d22e12c6f64bd1d762a210bf87619542c2ce0db17f0edc52cafc595a6e903d5e5f24299a0b253ff28cc4a28a184ac3e12565ac11f09273d75e613ba066da967ae719734c9787280f4cc618779d6c58c39ee0130852f85a71e92149eba56396b184d1be89fe19a9727e43ae3729590cb4f0eb83e861f67799ae5b1e9394d9d76a7bfbcd3470fc9e9598a2a4144519e100a6389a0a28d92a3f4d3afab7d2ad5e8462edf5ec1cb67425db3454247cf1ad0b30cf3d6fab4a167e8444a3f79feba817876ee209e7b8c67ffe44f15cffe4d379e3fba045d627184ba2696d1c41432557efcc74156aeac6103b6f21fcbf1d5ab634b990f1d427af6e946d7cca5b93497e612f5a14d28d05ca2411cc4434c85a5c82a58449a67437f519144462f7fbae693506493264d18e8b9891c1ae388cd376f07ceb003a0e889c01976005051b7b5575adbcafcdba41736cf3cd54ee69a6f6bb99dcd53b552ab8bfa35d2538142744d10b4a1545a4aaa7a8a0e394d0a8236db923244a58d9ee9109c61292591340a2286244a402c0e0c4dd4443e3e0f3d959225bbf4c3e24087e1c85ef9a8e4e3f3d153b1cfcf95ef6eafbccf93264e9a003d69275da36228bd12a257ec74e8999d1251233a44a96844ee84b6bb1cfbfa6fd0a6a70624d2b8018d10c051030c5a2e52b28c20c907263892a2b55e531a0b989d398676254da46bd66b15d495b23808f8aeb0026be36383a07c51c3371671d420ec0e18b026ac37b7cd97c2dbb017433e3e3e352491cd4429e0e2bfed77290544df0408cafa68551966f8f8a4189410292281ae593fc00957744b2e0e907d19524b13bd656958c573794f6b7a4f8b1be8db2eeab749db4dea45bf3ad5bca2f47c606340686d25e9d7615bb9f2eb3149aef1bb8dd4ab7528e325e745e5bce8c46836319a615ab67191e360338769d9a6719be9144f27d87cd2b8cdc49d4c753b29514f4afc7a4bed2980f0b78f1cfdd33e72b4936eb27454058ca32a56908c384808160a6221580996c250d67b0d3794760d39961a1241147e3d6e95df538b93161797971729bf8e52e29a93520ccb58a3589669dab671dac67126d3e9545b624b0b6c6e319d6a6d697171796971797941a174e880d91177ec80cd3b503a606076ec48a578700f1e3d625423c4903042091edd61b36f5b772a0e08bf6aeb2e860312b375370207849f84480209b0998411a80e0e0809f1934ac0d01ddcf60af553dbdb6a775feda152b18af66a535b775fd6c42fb395a11e3d7ac41e3d60f7e01eecddf0a35b77ddcd087b4ede1e102eecf6d8434010405cd48da252f92f75f45c8293672fc149d7f489e884e2a2ae2fd77b4a5a01fe1319a141a50a8e4e49272ca7a3135209403188b9f2d29606e0d084df7766590b592bc4b3f755d623e5208e2fb480e38d211c5a7b2444124b6fd8d002164091b31e649d7503add8d51184daae230815fe7aa45a368331fef206400deae955de1c90d363de1dbfe63ebaa10a7d69240152af16bac0738274f77e2c8fd1bbcca8febd82d0dddb9c20df5694029fdfdaa81feb210bcb43ffc18a9ed3399d033d081945c851841b5e54768d3126aaffe7acf60a6d36d77cab9fb3d86fd87d054344fd968be8d2ab1b05695514e5e899a308f15b14c5c9dbfc161501827719faa5a8373b89e474a28c52b6ac5bd0c5114afd3c7e49faa3631ea997b94f24a29bb4f9d4201451cc4f8ac49763614a24f3a987e4ebf9f692f437eca3ac312228a52a96674c0af6146f4895bdb2a75a0914288102a12e2a96a0277f13e4e4517856ed2c9c91996ff7b176b21d16506d6715d99c777b7c28b37e7ac53e35e6640c0bceb0cb6e4a4fb1b62775951efae480405fcf070aeae7238964ffc1bac9599cfe4eea9a34f8846d2c3044d48f7378f61823e6db7d47583fbd638c9e944484c548e9e346ff7c45c4dadd882181d8b6e462a7348fb1db65c55dd7ba9d4ba6f0689ab744a341897a5f4f90f7edcc7cf4daae9531f7062a6688fac524e936c08fed09a55b3d41fddc7df80115c4eac8569dc4a0faa5f0edd05ba8b650b35843a0cd1208022e5a4b687819a3559d061412ad29eced3bd066c9aa21c511472d1a9606406a60d1aade14951036429d3dd5b52a98761ef2c939893c75dfce9b1c7647582ab420c462a1f9fe8d2a33752acde96b1eab0576cde1a99bce49e4cf8d37e639bd9bee47af9827ab5742191b47f440c5192498811a8d0120ec20075780d0c11959b4da8b7041d46f8fbe23ac57f90e0f2bbea9494d8dac1f611029d2ed578c0e6aec9db6d9379230d0350b7f8bc4b0f2cdfaeead66dde82264d64dd77c8b850a5da372991fb18cff3622457686307c7620f0993dc8d45c0daca38d0a9f7ea78727131ee2ec89233333c7ebbafaea589d44040345bac61f46efaba7a581ec4420840578e8d031508448afa0c6060ffc71e3dfa90c37d600420a63097a66edc42b9d602568b356c4218595347cb82288242c053750a9320426cce841c386aa122f711263e12b6254c622935c526e472fd493fa2591d4f94ac758ba39a594ebebb4839ef926910fe92e74ea7152afd8f94aafb0f48afdc8267d49e44f9d4ba33e2f91968e948cb034515212111623a5251f6fb20262dd4829dbb78b3e912c7118bd24f2a1eb90e0c807bf454180c14957d4fda04d7a52bfc552140416bf454134c16ec26f51103fcf46d086af34bb1b9c5484fa2d96a5612beac7523e89d25aa53ca5749762598a852621a58eea5cca49d02727bdff477f931eee129dde88bedb715503209498b8c2054438220143b001043ba840a80c2f98dda14d4f156bf0d004133d80a08918d50c2b673c31820a2144b9c810e3d9233bfb8abda65011d22a83285d480103891e7090412bfad67a09f9f0440f49bffa2d7a42c66b1ad3aee997ebb187ffae1a58430c7860e5d7a3f4ae972e7f9b21fee65ae7824772facdb51ee23518c46bf187d7e4095ebb361d9e72403c19318861585f188661d8ec61fa7a845ef3221f78f8acfbd6331f7c1e5b6ea26e510f4a0f1d73e83dffadc93fccdbe4d2e419e9e5bee99c890b92833934795fcf73deb78e39a679464c5c71921aedb1fe495f8fbdea3ecc8d600eabf85d1f1fe600cca1639af7ad679b75ed18dc40cf43bfd6fb6f7d37f6687c253985280d1cc9804679c16bce1440a00511ac3cc94111117aa8d8a0c61855e67062082c910c54afb8452f1921bb109e502f7a72748577e8418da3567705c8a135c4103964697527c345eb063e60a208adee3609044480030a255add411070d1d2e2063b1e7242488d5677504a1b07980117585adab5c16e490d0fd2e8a14503e743132fc05cd0a2e165081776002028885addad16da9cd22fcf7e97a65309910e5d4a1ab87905d481f82c8e91cdfb3b1cea57bf735239d73a12d44de386515e73d33a8a9954f4b9addb9757a48e97f2e5f6f288ea9f0549cfa448cf80a46750e48e979a896869a404ca805e7e1248fbcd975b2100bf79bb349af7650fa0bfbdc2fc729510ea97d36b5e4912094da569994d4d90cd0f3cfa799ba5df8aa4507ab845404879e82a64583fe640fab1e588fe923e4b13a94ba0a79c8e8a3e76f4d4b5d993e7ce3963357d3dbf5bf743835e50c477b72a219747df8e640dadd8e3912ca0a769e0e61a34e9eb69915b454f863e5e1ea54b239ecb8b54e04cec5efe010ba16817a85bfbe8061e7c20baea9114414598cf0b88cef86f85c50b8cb698314156c0f01686f4893e422c250d010507540849ab1da0823a080721dad036a236b0585de1860d57b451650eec02823f00fdb0c40d33f3e9211fde98402718810922cd512715ed500411d0143bcc6d2802119103aac483941f5a5648643131688dd58f265c8e947c40c1510e598a682b2505f1031604145b702e3ed0070a14336907276660b163843c86aef001698b57a400cb7fab2320c0b872d262c7087900e9d0c446e4850e69e830050c47d81bf37316cf03702349bb3c76cbfa2b08b4092edfddddab188f5f0c0f5845130f896c7848e445154fe0aa07071bac3022c90a0f3cc0d113c50ba0c841b4c609e21842bcf2451cf1e6bf8d8a12704185956d83107aca0b351e42c8b19d5e81810fbd6d1042d885130fb787109230c5d2f7b65100288a2a72ac80063aa0d2faaa0688180216705c99d24406918c8761f4f0b032af4f414609a639341d6dd4184392236ca890d3dead4dcecefc5780ff6e5e034b03c3ce3487608c39def83eb2341dbd22447ad5aef5a0b67f9bc4de0d5da0adcc00d606f38e50cf00ec41ff701eb651e5af1c9533b034d0992bb234d05156d46f8d82404a6b35aadb42af54d2db290267a0bb517782f40a42baa954cb7f94084aa1ad77706976e5633d991ed4f54f8619021a0a313ed666afa04118509e8822c7166cb4da7fac4d4f14315ca006114743f88117ad8ed199a3eeee7a6ad7f74f49dfbc6eba82e5ac33c99cd1a8d083401b269bc39eb167cec39e79f4781676ec9987601d73d416356771e6b79f9a2c8ecdb70fc19294b02e089cc9d141dda77489a8c4ba161a42b8b9704a8a466872ce49b76e3a06b166421d5668a3b99415ce48da2be9f42704d4a18700f3dee96979aae8455fe2f1604b643799f1865f87d4a1f35087de9229d397401eccdb5bd28e794eaba663d3a96febaadd0e853352ca558a2dd0a6703f4d6e5a9d6af5f762f323829b168c37a1ad9885f18ffb85f1afe517c6bf975f6fb8c3f4de1131bcb303b545cecaa4c45aa33d82d8081e7376a04d0fe7e1718d3caee46871d9d2ade08cdc31027453d70edb4738f9e6cba9664eefb83aaebdc9959497af5008c2734a5a2b2ba2adb247f2aaf94e46a7a4d39553529f8ca00dca08658442c98dd7ccd244e7ee66003c13bd3bd37f2b047c06f8e89ab75a49d7a46bd3db81b07d033d7f794ec7c048959ffca7876faec18ef3ee4c9e7537bd8aaee38d0aff5bad16c7f411c663fc05188ff154d2637c5b2fece03cc65fe03cc6e42f9898b49b7c5b9d8b6f5d8bab3a94f7e85e3cd555e7d1e940b98bffbc788b5f54c503abaec37f7e50fee3e29cffbcf84f8b9bba1d9dea72984e75f9c929ab25bea0746cd262a92b38f382c9e4b27b817381732627e7a8b65dbeb3391208b4588b28c2a822bbc9b9e954a557517539abc575aacbdb391d9d8c5627a3934b0f6d171c682ba3829ed1e99abf7079e631f1984ce1c9fcf2ac53ed689aab5e9081857ef6e6666e873a47e79d9fe91743a79d0abae698bf90658e655a9c1dccbab9f448afa24cbc7aba630b26fd18ca274cdfcca1bfa03199c2d3aead431b6ef3e9399c99a82b157a4ca64867e23169d79ca167ce033dd5e59bc3f654d1db5597e751ed72e93c55987b2beb54983797f46a3af592f46abac190a8db16d7a3db96f70adaa8183acf06bdbd253c1c7a2a5ed2c3b7d6e63dadc81dad312986e84aa213895e245691e3d706bf455550f01adcb88e876ba62b9508b4e13c7a911dce5349e7e19d55c50055127c739447188f298ff1639f1d2f1e8b7844de6802833311d378833f31ae7212b8aea7c539047a5a9cca398f711e1a9a2852a2c5a99c848e5b1aeb70942fa792312ac89cce54cd690380a418a393587d3ec6187f4aae4dbae6739fe8f527561f87362dd02add42158c9afec6a7b24795eca1b29b9e881c15890c98679e0ebb8e376a7c1922a723834a7a3b312a9f8eed701dfe03e328d7623a125cd58de03d3ae83c3a2590c8e2cc8f361fbd08b4698f31724e82f3704e0274cea5c7b3f908de92f68d7355b7798f6d699339d50e6ca79aa73a17dfd1b5384c575d4777721404b6f5f3d3e227ffa96ef2d9a9763887c0b6b86de7e727043d3e2efef3d3e23f27e7fca7fa8fc9b7974ea657b12b0267a2731d3cf92c74e9825cba6d11813371e5bd8ab107aa8dcad0db2baa67760bd1f46da7fe02fbf49860ce4e3d265378986cca5db54ebda7a55a4fe5423b3b0fb3536f09e6eca97609b7f7b4b69351aaff0ef421d073a00781aaf524fc2ae93c94a33369a7ce64cae5f14c67674255eba9a4e7423b759e76ea458f877d3a93e9b1200a07b59d258bd026471863bcaef5851242188bfcbec4a0b2c323299df5853142282184106e73eeee428e26ed886b5e1165ead0e87497b6804d9f8ecd0e72c5526c8a2858d644944cbb228ab65d11659b53080730ca31f315514c5c469f302104238c113221f4859017424c0831212425470e60cfcc4c081141eb9aaea1bf28c4195b601558515e58541fab95bca41a55c6fd9a312695fa75240a95d0fd2aed1189f27c3217665c038fc8bc0f99d50aa46841df2085d00d6af65b2405109827450e24581aa9104970865238eae9b7480a1f2aa2507af6d8c21293a2719756a2ff526ca46aa5323f7a65c58984326b90437c284a97143cc03f0192a4ac2186685ae9568158a367d60d0a841c3db3ffd5940d4058f91fae1419e1460e461fc411ad54b24ec6eb46694dc9dfa228a0fc96450f91c30ffe63411ea0d140a8db18bc46d5aa4034ea0e276cc8610a4ba89a7e8b7240011b39fcb0421440a8df221dbcf8f85b44c5e8bf548cc35fe38d1d76a9eebe2d7203cb27e96e4dfbbd7ee36f77432e767767ba009d80aea143d850c6eeeeee6f777775ac51f99aa2ee35c7d0e07add9eec168f2af53b3d04f2c5992d0dc0e1a857b4956281b37cac42abae346e471cd7598eeeaf837c42d0de5300d56cde430d858a6f1f7368118dc84ee419dfcd4d809eb0104152589083ff988836d663fc6cdf368a3850c00e446c71c4c6196544d1fa9668c7d0a10923277898620232be5dfa7e42053078838830a85cc1a1f5f1d033c08a1fb6b0a2861358e6682d2b4d8008043b484304587c40d46a23588ed8a18c2966a00147966d95ff623efb4fe6b3333ef3cc69e6b5c9679e82428ccfa844599dd0bf39c6e96c1d23f3ec454f3cd1bde694d3d1b22c8bb96e90e29be73572628dc71cb331610c2b7ca85f9c0eadd38911e84c5ae33e7f79e474ae38809760bc941b1b8b6ffb269efc78e86c45fd5258f4d17351e2cd6977db2db2c2430f4cd8000ea52b780c3056605a95c7c643ef41b918e38914f3e074226f49452e18c1434f552eba18810edef105134f3c7498bd410e46fc40065676b062040c1540483e3d004d61d48a495fa8e1b2383a388c16e0e0a1a3381d7874f4841343085591050a5ab5268716fc60c41737242dd18243da86837c7c7c7068410821843e88377808218410c2174e074677e1742207008c2e12b062ecb755a62841a708e1db6b8a05407cbbb3e007dfbeaae2db595bb4c4d0162d28e2db5b389dfe228a8c6b6a54628e877e52e22cfe43fdf82f468bff7ca4f0df8fef2e2a12717c7b8ae8db5749b8f1ad44157de303247a40e1db394ea783b0823862be7d43f2e1db5508c2355ab46205503cf48cd38175c537dddc7777771069c42e01a4218d713a1d9d723a91b13c74e853084552acf86ebf389d3e1ae30675251b0f21250011680cc10467f830e4821364d96287263d5c2103285a438804e98e677c77a183ef4b8ccacccc44471c3d84a8aef9f8e8e8c9c223a25022c4b00ab0e07c6f282fc2882bbf453a38f94fabe2d97dfc161941452332ca90e04ca764aa24a24835c9c7da4822b8b336746867d817095298213dcfec3b89c84aaf3a8902593e3a083e3ad27f8d844ac1125d5d27b5955e21bd511d29762ae89208dad016ec0c7b14463178e6193cbbd1d1d0d2a4806ed952dbf013409a8f13285d13a7fea8748df694410b51cd0800000000a314002020100c074522a15038265184c97614000c879640725618ccd32cc9611432c81863082100404406404668360d6453d6119d7208c55de8cff9a5709497f3f2ce8dacc49566e1ca3898b441b196e96bb4156ec84d2a51e027f7e3b619aa5ef4a6fa8a987cec9fbe63e4402a8b25ab68a116e8a0c34126018980b701d3e97293c74a4a04d922064ca4251b8dd1295a44b0709fbf5b30482551d3649e54db45cf998abb4d7641f33fe29d9341caa62bb32a685ed7fbcce8b632fadaaf4d562268139782031962edeba0709ebf3b7e8cb89af13ffbbd26cda4f49c2b15ce248f09ed672f1c964955bc512465193808df56ec366f2524425ff3d89bd3b2ce44037d0d0456dfffb03d7cc5f9c801adfbd87b76bc7428e2ce082d9ab94fb1696d5340a2830a376534e0ce1c94a8dc553b9654027ae67507066874843762fd3f8c2042d58d214a07c41c868561cd61cdd13fc1958d6b97867e6bd49f2d995cc1e9a4ba1742fd270cbbe21247704bc77a298c6430fe23325df9f792727f3d010fbde6f5237f13cac293becc2363d08ee68216e4c24b7a2319b663f1df5b683d706196623673f5ed90d6a5ab3259c6fd17c7e4c7e8603f7b96de88e08d298f96c319c9da8be3e0a0dce1e27187b3ed052f346b243a304b92908d3eba5fbcc09b4ae323e3e15b27000fbf474c83bc1d7701ade1740ae0dadee9eb32b6f3e9188458b45abff2b2c021a89ec67243d2b905ace33888c28b439f31f547640bc412fbb75ae918c8edf19346b708146e4f24f60f1219510212af31c8e52c2c5004287dc44ae3bdd14cdb75035b1a3948a676ee4078740e3e7005d82e79e801804927c65496277e421f067f97a9a6a4f30af1862cc245afd6c458a5fdd68d1190362ae30e204a4502df2fec85ed65370b7d43211b686256910d895acd30969d58138a6d4647b538bc4bd07551a2df1e5038f2d9f21bb508f78d747e156e1ac406f949a3db89580b88ad1a5b3f1d15157191443f8f38057971f6fa300553ec175f9ac7b06660197b4ae27d264e3d28567f08e7bf1ef4de633e17c221c9752158c9d6cffb53adf57957fcbad7cdad7f368ed74cf85972de13d443d49c306a751afc76873670d1bf8308385dd94db9f44c2a6f5828dd6d3fbef01465511a7333ff65d6ac7daeb376191780111e5cc9cdbc8913854fc213480f9848f83d748ff887521c31e9cadc087a8bb98958df69bd1ebb1821e0a7cdfe1ad21edd2adcda248bc670a39a7406aa57bb16930d6e03dbbc9f6c0dfd1907d2dfaea5ca55d3992804b444675047a188b387088f3c1a4b53ca715c5d1b8aa3e030b6b5a3865eb922dbbf4467c972d08940d0f87a33c1d91a249f19c0200110ff9a62257a4bb721284f88543ef92f75f47248062150ead2e049b68b87b5abd61c0da3861704b4819919d290572565afc792e8f054aff2edb9cf6e552c1aa35b9ea1a2ce22362748b20ded05214a27de25626af9d1b221d0427be0b13ba05841cb95f879fb83502ab95db8d98ce51dde8c0958df8c2dfe5e5de28770f05288d0770597f9678806d4a55b432bd22791f928eeb039df41b9316dc0724f053f7b5000a8e4c657b45521afb00ba8fdfdd9068380f7cfbb747e6579b1b9f004c835122765d4274e08305d0581d760c1097c582b7696796f138c7108cb84e4c27d39a6c1e1e87e3c5eb2b844a5f952359c10d938c02d835196c9f91b0b548c95450459d97ae1236f9301baa272e38dee227a932f94ed0d1f0ec5be5ffe7576de2fde377ab598f041cea6cfa1db20dc3253ff1823b622e7f6b4d9619b6e1124392dc06639cda5e9ed761b3539fc0faeca4d927991b19dad9f7d1488751b955be935b4cfde2e0f54351bc75c8371076917dc78581fa6856e1fd98037e43c6d8bea42380e7721b7af53784f7047999db99f9883d1fc6d2eb558bad4b4f8598c142b7ac36ee598d28d8fee449b264475f27800b0db3a9be6800a497a1708d02fea26af5a10d78583581abe8f7f07de37e40e4efc7be171892dee289b831e52cd7ff175973457aa0d7875038aad5057c0a80848dbf985f019b446694011dc9846ff4a620b4ff1a531445cff48c96a79df6a5a168616150fe2debe5ab42dfe720f8bb951b835ae2eb050fb7c58fca7a9e8fc94dbb3941e3a784ab394ca2cd674df5da3138882ecd701b0429d1c61cd7fcf0d665a670bd84318ec89b3060afb87091300018ff030e28528fabca9d42250a687776241ca26585cd5ffcc54f2598b81b60d5ffae8d9470e2b4080fe23f6db768b50d91f9c533a2cae2aa89ec8b8276a454b41a85ad21d1fcc736e6f248baed6720db621dddf328ad9b8c4b7dd3997574b8238552efdad0400d911cbc256dde90c21b65ba910681f1020200de582644b651021b62ce3cfd3fd45a70d9de52623302ad2503bff5ee031bd4edc81f5511fd85c39a63d455f2a970be6b01494cc63e23186c95cdf813d2d717cb30ed576d48906db99b9aabdf50ca06f15c368b4666130b4a79e817f98f0e45e4c6081ed1d3ec500a510d6ff229249197804199c16202929c1e009b99e5c1da23d32399ae089ddb03942a978de2048c0d639d894c1362a9c2921e7d3752d925aca5f84f94e8cf968f38e4f67b75ce26d755a502c14b978d5ca942e3d56ec1f4287fe5821fa20bcb613948d72500cce6830c9a5e115101caa3dcf511a1e7c078edae352827f675ed1fd5c6711386e588703881ed2e2e0d42de37d039f6097df2262331577c02b85ee3b01d20b4ab333fa80bc49cbdea4ef67954c8999a2a246b2e3923a2fa26c403fb3ab5b10b68c755d05e360256cbd012a9a0227d1b80f3dd2078d96a6d8b5f23a12c32bc4415fb69f2b10b54797e350ae08aaa4bc5d68673b5008dc71dc63acf9b0511ad2ea72600118a8ab9e660691d1109210f371cd13cf0a32da17819223e85985785b78385a62d751c8ba83b7bc7c3e74ee4672289c7c6362bd8b29a1fbd9183e9a04d71c377f2009381480bd11f7512655d6b9fde4510e9fe04372079d1aebccd8a976bf54ad56707f1d1a7de8aae6ec9583d101bf6074400b3f1f6e3eebb98f1d52b3d814624830eb869d4a492f660861aa2ab9935dda75f75a552c392ff790213205c6d0abaab7780c1147557564f2ca07c7105749975a49996cab8c2f129951b29a4630b3b0bd1e608958626b9abefb130e02570cb16c9f3ef0677d339990add3e06a2f8d5b602a0a277a12927541fa98ab4dbfa3cdbb33a8b59b356a63c255d5cd1e766a47079abbe22463fcabbe86295546098ae128ca486a32889f187b07e0dd3024da976d870f14800124579249b6c7243a788fb2670b1a82cda588a38ef945935992048d2715068b695ef9804390ff8204cc4712416bee0744fa43b2a4f0b87f395e46cb8ad0cf43a8f9cc94bc1ee8f9a152ea0d56d5959050fbd1a32ecc8943a0a830c4ff930d5a6f2c01a7f51a874bd66a2f463ae7d2ff7ba074cef8e840bbcd333e4e7f8da3112b8d222bede40c125dda970278abfcec406ffeb0fac656e947b49b5f1d49baa1fa550156e2a618eb0d21287b1ea75baa0cadd143ef2d752106d3939672b26f83a586acc3961b0309a690d41c81b0197d727d50570c06828864dcb4de333ad7177237842efb1c5407d01970d166f2dc1a69e20dad4f06a525df01a8efd2df3043134e0e4a7342219073847eeb31a41bd702e559ac34cc5b313a34b42dc64f86da25842f03ce4f6fdc8e42c9b7b8ea3a69f554ff786ba05dbceaf1131784cbe1bbc6aa8803be209cab1c69f4b3e4320082eda7ad0eb098a2c5f7d089b786d3d699353a94d345c2e6a4a4356b5e4967d12775300fa8a8d560fff776197474ceec7a72900ba8c4aad2752a4d605f951f48d22f36f83580e351ce06ed90cab58e63504f5484a8df64b03881ed1f11793294623e45480b84dad6a09234c45b65cced302da350d357f64c6273568aad50cc27381efe04870d383c425c86721ad94be26e1411fa6664f900464a45b13b42cceddb35e431e28899fe63b51d93e13f7ebe38833da5127d19fe4339855396b533c2522defe57c89bfe930c62c9fbdbd474528f371c1d3ba3f385cf71efc131c9bd64be91133d65234a5fafb106aab39538a7a62df2a4e69756e4a514efcab0ef98a72c08693c5290cf17ee7a5fa6d5efc70ff91cc9cb2b0008568b75dda13c609f78334c1513c9ebde14ab29cfeb6466c39676cdeb9cf89241434a2ebde06cc393f23ae519498a05574bfd7e87cafa2fbbd86ee9b88a6c2be9699caced35bd779f26200072f53e4f2de7fc63de500ad436c08f87f2533f6319d455ff0b154ef85c8e884c2a4eb437953025f2d3f8b394086f73c9858418f6fe529c8a721749b940a9ad166ccc44e0330d799bbe3122576d2f02533b55fbb04766c9e8294f122e840d92788fb112b9dd19e9a761156fae39e26d4c41d1efe153cc841268e144a8c1d206eaf6552bafbe1709a976332d3e3679b05332e570c7a8e1eacf3d33bbb858b62c27846fcade99f92dbad35248a307a1d81f422edddef2be51a7ee19cfeaa7c65f1956bc47859b91d3d12795898b28a4500d4dc1ea81beb571408533097f1bd9f1b4f5cdb91c9f6e1a679edc3a85c22b8bb1057524db9cdc3c79079cf7e3a6083100731682cb88542f75c9113a98967ecb012635614cb44179f88c7e93e14cd3e891162cad45d65069f26c4f01c9cbe0448873d07815d4d5e2347f8dc5a75d33fcf887210eb96ba938cee9cde50d1c833e518ed9ee8f7ac28f138a3ea8faf848232703449283309e9dbe2bdba3572ec6d3a78ef929e89337a07dc99c25e6a8ded39c75cad6df80b5a21e9073fbf9628d7dceb792ad8cff2a698c7ce2b439efa55bea53ec312bedcc1a1e795ff33929fb676b0fb353761a9c577db67867c5ee05bb8fcefbff22def1db8aac24deb1f9c5ac69aee122a43b2c1a086d42b73ebe1f2f80f5374a3cf40ed32112065d2ec1d6fffd3920ef400e70de107694b88b77f7aaf058a60a1f9ade2819b7cea6338ec4e4dca99888821d20e70e2515aff579ecc32bf38de0edc00c56b4a87621ff4d31d3ae74c96eca5291cebfedd31cafa29c46a052a7319a6a0a5e600f984c57349bc9461e61dff762c51390211137aba0060c0e31598c24f9a26edbca2025c8e8f51d9ec07e042a9b4ea7632c9e65e8848b6269f46f82ea6df7c54ac237bb71dfd9ea4c2ba610bd57479d05deb2393799f6378515dfa031377f9513223b6fc5756c5ef3facd1db995681826bf467bf1e94302ed2a082669f6e32a99fb45128828760c3de2325c9947672d92aa3e5222454820fc3f1d69c6bd2fcf7709e824d3cd1f85581a55eba51affb0ad386515a2416d8815ad8e1474dbaeb89b6aaa3ed809ff0a426ced04437b6bef5895c317b2a71a620b19f2d5ac834dafd42098500a8e3aa0db1eab403199678b52aab7c1856f9aa5f55a32b5b1c1c15f5a55981ce85dee753bbe981c877d109cc9baf69fdd99c4a6dab154ca7f9ea65b237403d0a37be7b0aa71f37b1053d0cc585907aba1b321267aae345933bc2280e24518a4b13955443a134c64738902ef2142fc91ee940186fa106bb2dddd7d26b1af76a367707c68b7be1d18e6f1efd1a825d8099aca31adaa5042cab6d5998e34821a16675124b512871c387b229e6eaa617d411db895eadc682c84261253d9cc9263da9f687cb1191833dd3a063f5e32a2164facbfa9a4872916e48d6065a18b368c36239fd5eb0a78b4a650e224a36f360b8065119b8d7875e44ef00880836c8147c3e82daa989e2063bdece8e44c0fa90cb3040ae5089b4077990de433f4bee022ccc953043f212910e49b86d37e115565259a68d679e9c763340bd0fab5deaa40534f641a01ebf4c4e68510abeb6b86f19328b1ae655d0062d6a5f73b8c68216c91b0618c2df58bed7680f9b44a1d8ddd6729fa6167908150b3288ecd55e2e34f0a7e2cb594d06900f781ced129765e4309a743532403034f97bab39088af8195548cb3a5d89e3c53f4020d28e0508c808c5f2c596fb19da3173a44baae1a3be7f815d02714201176cab16e3925ce5c58d3e4ee139495484612f5538071998b6081cab2c2ae56a38f34c7c7c42b53746f0be1b995032eee10813683e5011e262dee686f6d21e41511c52e022e9316e5ae591708b2493d27f08276f56687d3ef4b7b5f8b92b0b2170f102b47fb2cc0cc5e64af28caf57ff0ca5a5f04a32ba772d7dce40dd65593fe21740f52630a19e5aa1b71fec2897d84287990e31c2ed79ffcd543d567a25cea6be7be043b16671c732af9fceda3a853702a74af52e7802ba0516f692bac1dcde9dd1ced9dc840fe8b39e21784aed8762b31c6e1196cef7e3147fbb10787f45daf1ac77b459e3a69bb2f7e61a79d536ce3250097d29e27ecaf21beb3ffcbe213c9d0a3b97e89ce13a11810e0ddc156dda5af57615cc2b8b6f6b7d733e4ec39ddb90c79612bc7e5a3896c5fadb1c96b4cd2e6dcc0b37658283acb2e0e1f27a17558ff4ac816efe2b62f86490503bdc5dc747f974dc16634ebe8eb8733d20a4f02e607d7e5ac212f1002ee135a2a452a525bf43d75b089d0f41d7a7e7e3bfb1e5646d2084ecc7e02a51c98cb14ee2b365ad2a6f7e5deb173461abd96756b9f2e2d6eb586b00e7f29887e699db051bb167a1656581a8d64e004f8d2b37ba2f8480a37e703d5c8e12f5e4686b5539ca387a27ce4c95696b208d8a860be99a90c60446737a8febf0553d4c85d638acb1ff2389505d412db491e643e7fca1ba2cf5c723e6030b3a4184537a19af8f148367a25a787587f22c2d4cafce467300403947437850deb904ded80e2a44aa82fc91be3be06884e252cf123e2980c73ac2d34ee299197326c579175d440ad6f94c5297a5a0b060da7340adbac7503f770aab33910c4b71202969883e31fa79c76b6bf8db0ee1f16b29bfe69ca64c4724deecc224c6bc5caa9e87aa5c313688d33f5a238d82dd29f9388510f51ccdb485e33be0ab048acbda4a1ffeb2a92fa5bcc3d435585ba17dad6ac6d2f14d3d9389749843c77b2f29918e7e017202a3ac283395324303a0e8618c483fe41a3954e058cfefb3fd84eb0db121188ebed6e61866885327253a392d8d0f3063a8bfb9d90625d6f91efc2c30b8b3a27ad09a637a1ea1c63fd87a00e56c085f3a2796b2973a7bd6bd8bdf327ef125047c38dd9c486575805d588723311d2a3d214f310512df2d0af7ccf0d455e481bc2cf5998ed4eeb3c27279b449056e8dba78855d8a030c1a6b8feeb09644724fd7c03a699c4978553dbb1c643c1179ca38fd94df704248e7218cca3be3bda7d0081ce8bcad9ddbd7e020ac07b071cb606df90b57fad1ee5f445ee09661cedd1fbdeb63b5bb4e5a7b9a15af81c8e24cae2b225e655b28971ec32382be6f4cc5783ef16cb275a930500979f72a4312c9f73bf7a1156acb80c41d2c9b02e1a5e7b5cf88eddbc8765ccb0fa0c277b88dbd06bf9f44abfa51f589ecba12da6fd22e8d585c3c9cea7ce95f8eb936dfcf10e1de09a1252a64c43cab95e590cd7c35ccf47800991b30819bba3e0cf1bf34bf465e03850d0e630324639e28e0922612e66141fd3268f4310b91bbc8ae0548999f53560fa725dbdf8b3e8c68e26c778b062bd09c205e199022890b60ed1a91d92d468f30b2118a0a4e722bda502f25d8d7816d8dfc8c71cf660722aa3c92ea4aaebcc3696d09e749cc2ba8b54b76a9fadda474cf90448897a2fe0698d30800bcf305989a50fd318adcbaaee120cebbc065540535ca663d378f5d492c204956a678ec156b5e35541b69295f6d7e0835f152de2563bb3de82182cb293363332a1a0bd9453289c70bbf1cc60c0260c5655b125624ff789f52d9064fc2a304a571905c9b547dcc7840cf6a8e20f0efcc8c5164b7219100171fbcb13fbf6cf853e9565c801f9149777335bc6668edd5c31cf0dab5a52b790a0a9dba6ed8db19e23e861e1960752df3095c272abad86b7ba5f7de1e28301b90b36122cf41c7be7410129e1ac1d7b68ae60a66d515ff89cae8d74a015749bae259f081502b03ee85d9f45f4d2a97089a18e19105fdaeaae12e80db07aee0a2e1206abe53aba18f3a7ef3901ef2cda3e203714f3f49fb338f5453eeabe9462f3daedb6285ffcddc84563eec0f66409a5e5139aead45c07ffa1ec41fa91701bb17ddc9f2a480427c504c5adde194cd25b51955df4713cb54d5a15fb0b571b0f68f3b80444c1725744f11150b35e0d82d349decb427a9a181be91709509f6962580929246f457e34099110115dd8d9d507dd64f31dd28ea2619e77fd5e455a076adc0ce33c6523f80911eb48fb11df7dcb00078704dc12fccd2ccc2888025d60e39ba25789743ffae17016a2456c8780950ecf2c2d663dfd91599b4801408c67822ee022cf989df81b90be2602a1db15bd617b7dda01007f5b3da1ecda649ec540c98a4c8a852e2b6e07d630404eed85f4199e30c550249a37ce7c414c40fe0b883991149fa643010629e164f32f3e9191dd6dfbc2524a08e47e739689cb469d228f99bba39c1934b280b61c110e0ee6e8e7ddfee572cb4a97b105ecb8e952d88e4ce82784d0a37572e0e3614a3037266cb0d6a989b7b3a8f4fe6632d48e2c011081920cbc142e09f19ddf2029600d13c56ea59fa891e70f7e1e32afcd6ba39327153c957b072414b59c06485c3d594c0b47ac0873523da6c7b83c07fab4a92d1d6658775355cdebb7bd47c66541486f1a161dc0e6af755b525546c0e70b503312583c01533c1ba1aabcd78a28e357a0175fcd22cc98f7b2a5de05922500a5b2b01a67d4751ef2e8a098c539e13e3230a33f39b16dcd5e2b7a7591ba5a749d8dd84190853fe699837ccd693632e5c8241f20006e0484ae14d839e0de93d3a161b117942fbd7fd14a796ad2edf246bed67dfb535c72162f362147f451e31e24e36789f46762c39c39847db38f28c15f668e12eaf57b850acb209f0de942bf4af5471d0c133ee290ecfee13bc6801b830e74bdd6b9c78644d67201d9311e1c9d5c1c9272628609a749650e73a32848ea1d98f5ef8d22fd2d050233ea2409cf9ff04cb1a7660bf706f766196529bbd04fa7ee98adc207e3772ca0bfa0f1a3656f81cda6895c0cc7a5864ba0443757d8e7322c1f138f04c0060e232eccdd4b8b3d6aec5fe087902e74004c0601118a25c5f42e08f0326234bed0e088a5114bda11184a6da1a1059fd63ff7a6c119c4e31676f8a220759e0fbb1a5260a388e077349883fc563889be3438c7062d474d6839f3b08dea0d0397560052ef954e9d4768e6394b783ba7997a6a79b94d412494f0af42793ad2a15a01f61b3897aa586ff72d9228d734d297f6af0579e4f2a7e228648b0a07d29e31f59664d435d83d43b15fb93cfd3357e0b080d25bc74c1ff29bea80df51a1a02df47c71af30856f166819962101fdc2fba6e2b597bea147ee244f7eafe28024136a9c92be7b319b516040d5e23f25e4003ba0206cc02c8ef7c5898548586bdcdae4150970c60e2895bca1e9942995ce0434bc759bb06f0169cae61d242ebe45e981ad2020bf35dddc5d1f07332141eaac722e2664620e0afde7d922b64e10940c8f00729d52aa0c2f3a341cc918b02c4bc4b7dbbf2b58a3a2ba424c5d48e0cc14cd6813922139706feb3ca73991de3cbc780f80b22f861e0e6d50a90a9afa57eb1e1f87b0d80b77c89ab920e44c4ef4535f09221c074ec170abc43a60e79b1334b672b9748bae28b9de4bcdbe016fd03110609b76ac2eea456e3116db29c96c4394ff1703af8fa6c177e35f9ec4878b6683ee34e602f5383fd0a75c8990b8e6c84e74a1870cffc23c96f4b8ff15e58ba676aad1f8a8393387912e8d3a82d3fe77353789ddab678ee13fb13e87d488e597f1719c6bed560eb9bd33cbdbf4181c74e05f1e6299c1ca4b579f45b25a3c1966c6b8aa61866376d10cd2c935d8238b5d3579d9e7588f9a237cbbe355ad9cc99a1b68498fe34a5437462a1e9ba33747516a863ce265b51d38f30741cca888d7c7e3abe28041be10dc7716789eb5d9ba89f221b70037fdbadccecab83db3b455f3c57fb29948ba9151b2d895b6eb11d5886c3e8a6add8c90e31f9b11e0f57bad879132a47b264c3b0060fa265905087c93eeb28b026af2ad86007cfca3711f2f247138040053abbbe33f40800751b23dd04ff18ec4b123b57fda4ccc10d508b07c5ca69a09730ceb2eaed0579e138943c9a476bd8af9a1fb2bf4262b00d70209b5b5ac521f92f5da5f0723b728ac538a65db3f3ef5fe22465625645e81c5301346b779c97a827b1edcdf8548ef87fbeb4c50fed47dd7a2b1373f18d32046ca202007dd1a5d040b0aec8fb8e269a84b2527d0c320f46a6d2b088e9b1f37dcffa140d1f2d4fcfafcb0c380807f894852de913c94f12bbd2ebcabd02b5dc7ee0d5d9a7c8c97965638ae37055fcbd7b2f40a5f2088e88cfa6b0f34a105e4d8a2414f251bc0ab8de4d753d76e8eccd1b0caba70296138f785802752e31e47f4c0b1b099f67eacbf0a346717e7a0d322689979a3c586aae59a8d7fc33825dc02100939e27c6b0978c12035830905bc853bb61107267ff62f376b69b27bb41e82dc81395b4e1fe50faacb2cedd57b5f6c9e5072d50a8b21706f429dc5d2b21955584834147c15b995c9c2fef8027c7cc751fac18f216f477b5cb03ea0ee3b4701add6111596f9df85c8ac60318b9e830a260fc1e89ab57841a00d17c67fc2a72c115392710aa895bf1ba4fe4b1ea513a4ef3161d66cc79171235f9464409e7e2a48990e02c9d163dbc4c770f8e43283fe064e4c0c0c0c1950d22dfcea2fdc11109406c6ab04bdef6287dd192274ec2f38f6f7f56e2156c7902b0804d8eef44b8add4772039bfb0fa8f60d1da3d6eeae943e75ebb7aee019788bfd99b2a16b28bb97e226caadec1163ce5cfe642d2c5d0cb2e3d4ccc518d9510c2f69856275cd8d677d33d2c4edd615b8006ce9f3e9d76fd6eb308ff206c75365f4f6827b89f4908f2ccddb96e9a97fab277d3b20256d9498eddb61ed6d9a5c62efb01da6f49d376724d692aadeb2cca5fafb663c8c4999dacdeeadff9fba1a1bb83a7befc090dc84c73bbfe32245834d6ffb596a36f06156d10c569938aee7eb0e6571e7154ae3bed6918c5e2ce3e55e7046db632a03de07e8aa445a83267486f0f1ddc55ac34f8cbfa2c0f499449f72f00b0f313da920fc040367446709308274044dd7f971ff955c81d1b88d1bca730058f2b705000621bae6d5c6904bc0bc39feddd982c4474a15060c640d13fe1c35606bb1db8fed70b376caa9a11e471dc5da307acd3ae70874c8c65f373d96811bdd17dcaa0443d0a1e31cc33eb173c6496df708dded12ca32c77de250d5b49a9dca8438918b98052cb7a23e3182e0d4a5ec8e5e9b986cd87a96d5caa23a82af89e3b10b0aa0be239ae88af3735f567e00bc7d7b8c7bf99b55635c3fd9d14474382a5fb678ab53febc88be98aecfa89e19eaa0b297553954a476577d8351c4db5d694996196ef04903b2911d04ace69e8b980827b33c6a08ae6dcc17d2e7df2341912e9599ab6dadf907c5b2086d2c1e7257367896f77458c089f70ac1ab557813b517590a7120c630c7354976a9e124c596996bc42bc87167cfe2db5d9cb58ce1ef68ba6e15275550d408dc86e063c52d4cbf989ea52572cd20e8dcc11e2d8d061f4ec15d23be792f9aa52b297cba7e3d2768422e11950c9bcf69ac0fd197498243481dd4c6938e9d10872a7fd76cfaa3653de7acec1c361c18f1f2143b37696103ad300ac864295ced055613c47bb3f59d3eb4cae3f24578d0ca3fad9eb96d67b04860af789e8fd9321f02ede971463f28901385e8fc02e91beef01ac1f91a83f29e98e0aa5297e06124e2c5b82db23700f3cd359f1391eca018942dfa6543d2a8b866b609d36d7675482da546fed27b442a1ebc8005e33e1be7e4b15b398e28eeabd1e89642f4ad1165a9686141d74fdcb8936edc4b3fe698baf56e241a8e46671094c71379f743f4ef3a3af8bf70179c2653aa59bea62b679df5dca81ea4d48d13d0cc7b81d62837830ad9d5364b50d3a91328398581b6abc2662e42576b0f08d7e1cf7dc5145d122a0c3ba7fcd129c6a695abba7817d1a4bd8e4c361f978569d17f4a7fa5ee66ee0fc290b06e7acc5f8dc6a639341531fb7323eb4a098cc98c92492b57383c975b9b05eb403edae6b0c045e8c15fb3b5e79644d83060d0a153e6cb4aa2b6da8e0cdf4cc1e9bac86459fbfd6567111af0f281c7b37d223321e919ba6f63dce692c11c2d53797e52fd046f6d54fe15a1e33d1d0a9b4c9ea02e04550db18d4af4a794d1436b500c87fbc87e2f3c686a36d4a546a045ac32507476ecbb114200d8fa79ff0e62b78fdd1abea0a98e7f1c05eceec6837577980e78ef3deb6b60d76f075428cece639a0960d1a11a2d570c720d8305056526b9c01ee2aed095e0421a26d4dbdf2153c5bbcb75184d16b7e05df9124927dd9dd7182d35f3b404914c7238e11a5f9b2fe3c4ed64720d2c473bb0bce36d9515545cc7c87438de802f8b755ef2965e5c971007e7758691511d03dbae2c9cc05aa7a6d723e356a69523d43a2bfbb4114c3e893cea7d9735dad7aae11baf478eae444f7af927bb789d60eec4648109b264d4cc89b76b27658257ee11a50013823e1b1e399f4046cd541d2479527337f5ae0e790a324566ee773c521292a07960d20287695887974ce776dbeb0d1457dc595d0dc78d32ac64bc25d973b770ee55d25ae52d1c76ba8d646db277b95f8a6048062acfec9bc368c573820c5fba9c7f8c3f9ad81f2762e3eaa9c1a3bc84f03fb7e98f0746d131d147f997609c86f9b1004afac00072a15ac93435f43d1b458a24de5302a095a9ae8cb3b6448a9f69d7967a1ad56c75bf987ad40a96d7cc866a72e71684ef324296a2b8f6840b932fbdc3ae10c0e9323caf9c369841521a59f4ccecbfdcd53cc6188894f9184e6d9713ecefed60ad1f85bdbf4efb77655faad7530f171e05578c1798a07481d0fbf55241014c32d125d0a16c93e75a5a6e9da6b28ac9508562dc660ca954ab82663131c37268dbe5610fa6f0216fbe2fcd5cf9cd118739b5484c2cc69b5ee4482b802276ac04f195760b09b5b86944e1e3de5656cdcc96491770f6486515c481b0ba4e8cccb2f095427de71a574b1c47dde6c00e0278b9209be9c19529693322d166843adfabdd9833456294cb4aa3a76925368e99f8d752e76e8abb757ed6e38fbe8deb22fc4d9d93a51607d11b68ef4c9f6a2f027839610dfbcbf2cca9fb58ffe0828060586f70c6b964be05c09be9e262a364fe890165a7e6a3758fda404c388f88a4b4c927417dd4a658737915995ee1e2ff8379837107508591cd8e380cc9250acc2a313598a3f3615f9f027a8a1487a85689bed257b5bcabd02e2ffcdd55c21f26511ba5c96259ebf83770fbf6428abb0e703e6a64a57a06de3b8c8adf7e7ff9c29f3b43fe4442fddc35cbcc43164f430c9642c273052ced775ddbae400c3b2aea84f98d8584a73940e70c0bd944c6e5b99a1a89f9d993e6fbc5e05b6923ea8512aa947ff408a2f2d6634a9aa0d1877384c5e6f981620f413e2c2b3a83e93b360598399552293937d1e32f44aa29264ca92fa549d621b12b59f72a9bf1f6640f35295d5203f189fe5ff6a4bca6693a85a7cfda150160000d85c3de28596a2ba1de7c9beeaa9c29e7790e98c509ad2bdc89429a68dc62272e36cf89ce97dc8c36c8d8bf0b7b7a6e29936d1e98a0cb8f19482661688d9bf9973286b7a3a657e38a40bc8e11f4dcd57093d41f65106bef3f155ab63f5bbd5257cbcd2656289d74dfe1711f84a4ba57ddfbd26942735119ed9761ef0c72bf5ad1d6b4a7c388adc8e33056e7e59ab8db8939c08fede6f81901b0d65e72f9c119f3ca1374eb38350f247a42f95ac09f57d32f708464b339d45c2b1a62ae78765d0fb05c7c9234e59c772b2482e3664c4d466b4eb3a3d9bee04123fd02aac971bfdbfb980e9132c78a85befadc61a2a0c6c25050089f8befb69fd22eef553a4213c1cfa01bdfc6e18db43201eaee969b6197c771980beb17053f774eacdf295ddbd1e984e772f831f1ab1abf1167b62023919976057086cbbf9ff9fac57940053bf1a8790dbbd6bc88972bbe70e37e4d1c5845fcc3fe7654499ac99b4d88d9073097d49e3a9b2226943dfd31f1ec67aab6501d61cb8ca44717f93f01338a004c79f5eaf96aca1906f138e64cc435618385b82bfd48bf05a59911be21a8d544e72df50bb898d4cb207a1d162ca9cb6f2b285acff0a9a1df4cf095eb97c19df55fb13fde239933c645e431ee89f5df0b8a0d66c4180cb3533f3948f33438796af23ab9e588a9d6bd0023ab77875c036db146dc91650d7e4c265d56d2d8603bbef9ff8df401e52276d159449fc2a7de0d94ee7da1b36eff5888ed129b0c20c88f4a0b417eb4af17f3bc93bfbfad6190dba47d58bd7e2be2176cab11b1f9fd5fdbb27c8a79daa1f441e579d9e7421d12d4fef984762ec4130eccba607558b8e41e19f3823128dcf54ae29f1cefe28b6c7f5c2148b938ab883029304b5b53762be9ed93225b565b062bace605c3c970f25dc7be2d0b60a7bdf7ee88f918e1de06380f94e5d47f92178873499a4d0edb845efbfb78dca95fe34308771ad69c9a0ca238537082e8e770fb2bbd3f1f6c3aebef7a72ea15b5424ca83b030b599b4a82b6fad70ef735d8d146724174945d9136dd2b048f5bd2d4404afb424ad0b32f6b5d09cf7131b0379433be79de1f7ed87ba6b4a1b2729ed0cffe0ac945e4cf6cbdb113e2808c058a66262a22065655f2e32f196a22e4a230eae7e123784906aa25463380bc4b1995680ac1055bbc6c5b90fb59a98199293fef7ed00c0a788fb5458aac9f65d7b334d9f22985801e4e500a8ba7ecf05b24160319cd854d0a68462124d5b812ed4ae9bf9ead658d5b57f79901bc74d3a0026fa8eadda4862f62842f828ba8e1885e97b6c97a2f8171d5e639c757d4b406a1441a1cd721d52ab12aa47c8b80b330327f03a84cc2968214950114685c951e974ce98bb883d7a4350768bcea7b72c82e5b9f13e3c2953ea03d9abb5acfd227c046f51f2a11613e1d29aabaa07bc1fa18f0097a434d97cc21fc4d80ee0af6928b087d6dda4099a3cf4bdc8ff0461fc53c4a1d11a12b34998b089fd3612bca288f2585e11f712da01b4fe1926b884d48ad62f55641a0ca09f5c496a53d2f1b6bded06cd4b44fcea954f351a426c7038636b81adb503a424be011b8ebc0846f3307f86dcf4a81761a01d981c342aa9250576f669d15ed5f79bd99cb7fdd875538a8fa995c58009ac037ac2c61ed5cd5315fb8d26398c4bb97687d81b2787a1cb735834d809ce557f04dd6d671f694243160a71ec6c20ff314ebfb062cac9562bf120523190de0fe9f9d84acc1fa8510622d69f9c676b28fe67797111261638aed3d0e1fe2b110f6c1cd7452d4182f7114da3577c21e6def40c44906c48af63aba0f4aa42be209ff55a336048537e406826439a1ae020dce1a4a9615fa6a941201029b61ec5c23d0a34ec34fdfb87249a073ed9eb9c0222331e0da1b7a539a353a28727dfe8074482b225cb171851a3ba8ac65ecf67aca87b487368acf63397441d2b06d14741e9b191df665aafd0232000521e8be954f57ef9cb19b09b81719a761c0d5124992509e05d4d02984564cb806034cf1fac12c170da94fb985714ccfeaf23b3cf2dd5027b80721ab258fe936b38803f5014108f8dd3f752bf07e1b7719ae358ae3f39e672b6a7a721223d96baa60f1bc91858d6d19bbf25ee39175db80fc79323e0fe11220938675c5874869670089fef72a5b427065e4ca98b5d51ae77d3e302eb84bcd3c57624650c4348663339b57c6730bead0540ee61bc66217dad0d55d86894a452359a385cdcf21c6c21942b818d59eee1c0c828a25f0a6bb4a685f80712163f8693321b04014a7bf27c24590ebec38c8caa634b1c43efc37d962da6d9c69492b1c544f652e200d4aabb9160ab30b28e24270993f44832a5995a78d721a16a2f7d7c160f65090fbfbf76736f5703b401ed501377ef6c65dde60d2c3abe0dce3146b95d05546ed7ba0f1f554911a55a08192cf04e91a53d8890545567d1686acc35c8a0bf4771fe8fb48e43d7e7f97c18b80ea18becbfe2aac2a117b62003d99fb03f43e3bbf620ac1f72684b6d2419e329bf3cdd8c22351ecdea40b5fde0f3513a4fc5b0683556a6466833a89a1edc6717f7dace6113871e1cb05fc94697c1affa9cbcc4570d80899b4e7f1bbbd413a3da1e0cfc4ecb54f8a36ebb76d59348ef74d19253a0c62369ab362c96424931d127848ba5db4fad09c81867a2f1020b029af3410ceb3f292a37147bb712bbfadd3d7c0b3273686f25d18ed98e0a11236107e8e9c73bdb8d6779b9ddcebc229ac3b9e76a40d125c2e002b6b56ebbe6a07f1a3e6c618fa72798a89be6156a870686dc14a480458b7dbb32899face6d1ca509c3e897ed05b5295be6d6dfd63b63206325b360699adcd42665b0c464d4bf4f951d15261136e9f9d6c421af0b742d0b60806f1e5e9c1aebb2be6c4f91d71b509949a9527963279c6b41c9ef901aa4048706cd0720857465106c5131df299db2f95318a50a19c7a9627e8de03cac5b6101040472745d50a3ebc3276bffb2fbf97fec16184640112c0600015608c48b3bced1b6f51b06ace8d471da5dbe43ff10fbc2e55908e4d0db558c3a1e387118feeac2ae4615f66e3582be3a5179e7e8a1cc9cf9036db8af5afd0c29b3335740eba29a933fb68dd10858c7fed7c82590553f3a2985155e98171ccec033675a5516875deaf13e9506c672bb28b9c74c678309a02ad66515c0b81394709d6585238046fc0e84192eb279d239c0b341e8bb9121f1b4691e690fc75116227bd0b37f03d69548a3a4f907f6dcf09f8cbb93a9e2ffb52e83c079562a61c50b8e93f48e0135bb30ac6c85384c82524cb2ab55fb813e590c975a216054c6e353224feb9a922c4236eeeea086749398262ee248f1c342354f21f376688e7a24335c36552a63c15f88efbc3e66aa88d40a9beef9ab13bad026df42451d5434a67864a7d0e7f938e7112d93a28e823e21496a92654664fd679e152c34d50f088e0e2aaf19371dbfcd8bce2dd34f3789b6ac762c375eadc9193cdffe9103f324b26ff03431112c28d12c33652ea719b55b6644e2d7a04cade2038875a3ad2ec9397da9183de46adf046b0268be67dfca95136d89244344b1cc34a70aa4e17ae11430b1af5f84a2601c834dcf569adbc4429526138632b5f87e6f6d0479792f0715f8abb7dce95aa8182bfaef7337d514c444c9c036b13e4d232098ba5737af6c57c1bec4e31097b925c3a98607ad6a1a9b8994780b3bcbedd925745d21587db26e27f2e1fb420262e790dde4c649a6c220fa750addf527bb6408413bdf9034563abfb90d07fb70e88418e5018c46bc62a65494cd766bc3d44b9dbec275219b0eee23aab3777c94de13d8ad4ee7be1b0d6a05d43a23a362c155aa4005ccce2f957fd5e67d271edb913ad01e17bb5100efa7f1e2663009d2376f22704b2ceb58e542b8dc8e083f403659059894f56a4a5c65a61ee0bae948ab11686e32b37f2bf4fa0ce2451a9f6411c55838a7610cb0d64a3f72dac82b37f9c74b0aa3a648bb2ab92b1493651d73a61ca9ac15487da0d49cb204bd614e4fd4da9fb7821e93956483c2e5226ad078509ff090b5e263d372c50ba68a82038b14a4af24d26146c4a5955352ac97c97d7cc45f7fc9cc64192894bb246e89c5a02950c5f95a010d079781cea5ee5f5ddee76b1acf0ae4cadb781fe172945958945600e14646e615933ab54a791050b1e8f5d4f6c13f23b76eb0dd059fea5e130f3e0ca33a5816f6ce00edee062ec7f31eff6961b74e2c8c8d18ec875613503da103afee8a73652e9b449dd1c880e0720cda8de7463405a33ce7a21102a345e63e6ed24ae52315de21f65cd4064254f2951604b2f6a12261c3b34e8f851546c08e00b89781e8bf2d5f8d3a363c835226067dc5a7caea2caf6ba1b7d63b387ad2237cd560ed4ef73118d48101eafe16aacd87275f8f72341ec4e348e4e45f37c7dafb5c87d0568d31ddb66de5aaf9abf5a517fb80e3f9cfb015ba506417aa1b0e82f45076359cd4872e83f41462893230aacb490699930a80c78c3c5c475ab710d8d12a33406c830973d77068d354e2aa303c7dc0c0abec7622e9899cf42754efb9033377dcbc1cd673bebe41884d9adbd6ad891529a7c2e298c15288710d4043fb473ada31167a094a60a294c21345ff9b8bbda2569e8911efc8edcddf560fda5987754bf1b9f744ddb1b2ced127f606e063bcc6029dc1aa4b52804cb8e6b00da3608867d20b81b73a2b178a501f1d90639945543c2ac740da20fb239e535adac258b998d4c58103a90fd91075636e11c09f1586a9879a42c9cd3c111b942f409a43161326df653f574371be44b699f67d3375b29464fa69514835432078fb6dc3e5fd47169dc155bea8e961db4f7ae20ca653d73beae6cdadde380667daa635d825f1d423e9d997afbb82f11368be4df74778969bef0b33da486afd420ae99f564a44e311aebbb91a2cb126312624a47a11a009f7995f33a7723f53159ebe6a63936c33f20ac57647d90aef4bcc17736ca8588c66dbe97a65dfce71911e8b1afc2d9688a91c4057bdbade38e6f99e5c943edfb6a6a961e92c6c983a7944fd24cf5d1db79a817258ff2fd0515475f7a1637e0c0d32793764bca2c47314276da284e9efecdfdbab391323b5d65c81a5e8207ced5a8d5c75709e18ef055ba13be7fd0f54f458d134ebe2b6f18b4b618a8f4fbbc705eaac280bdf336318bda3e2a6179036fb4447a2ebef05e04499fc9880e28b774e700171643976bc1ac29ea9129b9241636e1f06c5af9eb4ccf6a7d59e37975719f67e22d4f9c910a67294dc8178ecb38348f168d3847fb4e0333c5fc6a29a632516512f2f1cdc4c179068493f8763b7a267a7a3f12d5124e83b2debd3535e8ede0fb68d63fbc01a26d5cc986368378bdae29565f5ad73b16f702c3c077d9b274ba841f85358e95bfb197b6b8c4fb6a91d19f0875bc881a647db237ed50e0d00d44d9082c3010dea9531d82b33bc5f066bfcf592af3834b74e57a07daa5d51de1ab8fbe11259e7092bba1ae1f43fdceacdcb03b3e309f0209fa18fde1df5451b297557e5f60389036d23d4c405ca1ae4e5e65057bda3aa301bbaec8bf910b12365afaa1665840c2cfc21e9cfd853b1c6a0161c607083f40108128929afe52cd43d8a6068805217f52255080876bacc245827add16ab55619a4f30de44d2431be8cd1c466de51a3be901bf5fbd1422f6f51d2b9a0b624907807c181fb9740a39d158aa8a0fec7897a34eb9034d46665e4119243b6c402d695e4692f91a493e122327c14a82910c1a26daae1e55bb9f8d7fc04ea00610a1a67fb624c97ae988c9b54f691eeea70987272ca22c828712b35421b31793e743edefa8ee3a3811a74ad8df745bdcdc0ea16776f7691f11055dc01c242fda068858624c967497462056549b8d9fd89264df5c6d2dc425fe5d229eb16fa1d266e2b3f211e8ce335b8a1f9d87841764bd428195ac4b5c0bb5ee74be14339fa4dd72afdef36acf3571dd0af0cbac15f41c7fa5b177d2bcc67e9fe06d15dbd0b9d08501e671a49ad487294ae7d423dd9f5b8ba3dd178c3e4669877601b0803a6ddb6eb17c92bf0c846e9c486804d2fd9427ebc416da95347eaead7a5c07f333eddaa43601a7065baf4d31cc28789eae3ebb5c6c312f68d7207b42842494b20c08e28c1088d7c910d4ce4858d1c6a503d32885ac69c1f290193a7f9bbfe7c33ff178071db42901fdef070d575ba5607f50867b92494ca5a2767dd3ab3863a0062b102692b438c34d9d070aa7238d25964a9da5bea756c93956c4a69f756dbbabbe9e5fd80654a82c1769e0e62d7e177e95d38f1e660a5a80dee9a443a6f699561654618e1b2edccacae1e00bc4bc3528c442cc409c967f920a2d45761d11523b817a605851cc5e301e6d23d33800ecceb70523719aa9bbd8eaaec031100adb471be4c025af9caca99aad70e0aecb91a7cadf7470de9ad0697d7e44674ad7e974fe55fda7609dee93cea354c8066260198a55a820de1af04cfd7f7d2d5d20dd0addf098eb857b29667b095e50e3567ac19a614a44533c9d339c485946b3df8085f81051eb1b6b76c99ec996568f2e95a565e7945c045c160b7eb3e1fdf140d40a86888044183c4524032334ebeccc5ed850fdeb493a16541b2af28b293d7d3fe8146eeca24ad47fd0c195798b601e95a249633d3e8c0a40759b03584b410fc9cec4e1c779171fb9da3cf96b5ea69f238b53b5c53d28dde0c0af0215827a6f3fa46d9c11238d25257f93d3fa91b8b61852239d58678a21799c3f65d6b5fddd447f8c9b997eb467623e08cbad9681446d4af6882ab5a0c562ae599a854fa044e39ac700dae6d49561ddc301a548a78b07685277f213ed59c7aec486aaeb8b1a49e941ba625e5fed7244f974651adebe00d6dcf15cb2a5742d7950348b0f0d6022c01aa6e9c5bee8b8c222e10bffa0431e5a57fccd7252ccd1ce631b501f5043d593fdeedf080083add2729d452efac7582687e8c4938e74813def79535cf77e2966aef1b184d0930a8aa375a99b6407de0c8085ecbab2280c8b29df9785a73db412c831d382bc5e5ecd44ed8ad2f74b5373112fe1b392622d1cec25331bc4ce403d9a0f3747c39497c941da19ef922ae1379edd5f4e0921762267383e390d14ea0325f378fd07ae50c8d0cc1a5f94f5782a8372a06feec0088e24516cab4fe2ba890cf52285940c113ad38c356da04ead51a4e12f9b18abee7c7d798d4635a1a583497d9fdbed9ae759991de5ef7fbf6be9bed9436448a752c0a0d1c25cca88c20c48955ec38a747aefa7fbc248f746cea579d0692a74e426e3a2c3cc6d973d08249b03b173fccfe2c494539b52cbe4ca53b4c18dd44e9832dc926274ffcfee940985c2d63e53146cc2afd7705830a32199844895abf0b9ab8ba4e40552df377eaac0437efd08e6e4c5d426967c2f815c261e8add11c24777f5803c1ff01c765cbaed4eff20aa055eafd0b7d75d558f9806eaa08d221d7853ed1c66e384f3c1febeb2fdf52050c462bcbc69e6c76bfd1d075e0d39d17cd24f8d075e272d119fb2c530361deb6a6fce52468ab9887468cb9d0fbfd2caa686f3210aecaf0ac1791be96e599089c33119e05649dd04c03bdc56156ec718b9aef1aa27f27fa8efe63e88f152fd2fe96e3212b506344a41c7e84eda9abc0c46a6ab18144ca54777b29e1a5183309452296aaa2c4968d91e9e11ec85dbdbf93bcdf7e88eb2dd3c1509d2eb622a15d8d74b54210dfbe20c64f7fcbeab1951b43650af65892b81cf1c338a976a3b837869d43537eca7f109d0fe08f112931e7d5bd5d4fa106473da9b5788f0b6fd81d3633480cf4345144369eb4255416bbf9770ebe8ca54431e1861463bb740a79fb73f7cefa8232022dfe622df78146d44628ed469c5cda2f58e4e1997b374568eba839edb3ba6a215e2ca20960e76224d6590a847296f27e746a30debc39bcf1c4b5dd7a693dd2ec73bd7a12818d17736f90b6b2c768aec1721db4ca7d008530e4c7d1b6c7342920cdd02fefe2c7eb0b41a3fa8bd3b4b346a3f842ea6706b32d910b36450336073a0ca6dc3e0c4cadc1c0a5df1de906f009c30f30cd536f42e85ded2029dc028cc35bab1f71041fee375910ede67e263af629d37aaa4287612ad8f149b6aa27f88b50e45b5171de0166b1544d95e9b6b1e70a477be99ea7690e8a1ca1bd50197cf09caa700aed69bcf9f6360ce18b476ea44328bb8df0373e908a94452313fd477d2bdef7646b4ee008453b1f83e0f572726fb2592eb2993c5bbeba20f0bb02ee8f4033d4c48d739e7fa009032f7105c8d80810415d42554ee27c27a8238005e4e08a65505a5ce438af8dd46e2e7cb29a126daff1b32c2b67ce73bc5e78b2e10329c7a170abab8fbfa7b47e45d92146d7fd8954094ed709c2c6532eaf532cb6f293d7957597cd6e642641834c355fc3f14b1903d04d0a8a43b799640357d5647ce353d683634c9068cd653f0221530e1900a39bf177d0208a9f829caf4c757b523a19552cb26a1d5d20605ba9e281105ad4880047ff7090a0073d4fd1110bddf8377b283fe7525d0f3cbd0097257f7fd0a91b691f46c873048cd257331874fa2ff8a78d42cdade431d22dffc268d7482a146eff2f1b38343aaa0dae37c88ce6f664cc23055858b90a66c038e63e87d14ca3bf4a3291620a6876cd3b7a4a8ae69ec798d9ea66b4366e1097e63da02c0bf25d3992bd0715fe5deacd33d7c15f4992ec8b3f265162d67afab12e5557a721ddd036b7dd2df17149e1781e236f81b330ae5bedb6c3112a6ccde9fa1807bcd0ded384f0cf563ccb032b45af9f6f56fa65ef8f9aba51fcdbc36007593204a5f1176e4952d3ccd1d5bd0870e90704f71f068b874b8d7295173bc75f38502ddc1ebcdbd0020e8975994d9526c2b4debb2e01ee5db3eabb4c945ab855bcf0f85f550aa22ab5c9c6ca5f94990d07e097724e6543d0332a3f2900a0628427b079e8025afae803983da8303b54be444c7c6367b4c076d61726a05c3bb6d2f10f86081df67e21124f76cc8b7308d66b4f4dc604a6f315a9a256162897389d5739405e4c7fbbc4a4ee6ad656da5f2442ed4aea196ff8f218a70377bf764e477a6fdf18f0eab25a8d73232bf2680ad1f26710e87bd19a2872f3236bd4e109588296e7f113175d7338af3e834a4f247917922cd893800d28e975e890e620a7c15349c48339dc5974be944a6351d96c32c93bfab09d2c04127a41b327756b0350c867fd62783cf973e01d3c4a1c238d2ed7259889848296c82d2bd2dac604e5aed13a799f5c0c45a423c1a4a18a230906eb111983a3f941a3e76cd86e0c19490457dc7dd14a48e01b920b6d8335ce334c9b0f156c970cfd52ca1446e1e69d0f0368daeb17a3d1cde272c3505a2ef7b0d788857f882ad2a92abec9a669b490e42a00b7c8d0bf8387e7126ec81c4951335a4df310f090034c88e33ed767f8d549a6a7a625ea683fb54d1ea085ff13892363d56c42860caa022511456bc779dd49ea4693b0ca9bcd860759e87726dcfde9381d31f0e177b648888a681662345317ccded0bbcca2144d504f1fbbff2ec83bc149ed15f4d7621724893ea11947c0e112f0f1724ae2f2ea609854c9d0292365c9d454b83c61f0d7f02e5a55f09266a581268ccf3b7b6f7fa7d85e8b0b02a18d48f62d6892e18ed4cc7db2996263f6f505048930405dcea0d4208e3dfa3d22d9cd34863234252cdab49592a6c0c7125a5e324cd413a6be2bf65d588656ec03b803fec51cdf8104de0286f7a6da713d6436bc5b17a43842579243ea2be7cd0f8c46ae9f42d2c3984b826011be79502a571b3297d8607dd25e17481c674d5cdd720c9eb2681aa2cabb01a523e4e0c44c7934bb7731d6c7e991e0aa2f60a60db8287aa61d31237753749b3614c5c7a27b28a4c378fceb491f97e0f050fa41b72a7f9ac7d571e423221f839597c17f5b1dfef0555ae206de46636a8334414392669574d63b8744c4d34c2df9b788e6b079f7e1809b0ad006bf9252e56150972b60f65dd36af0b2c722a2944cbf668c19bda6484e1dea459da0e64dc4236354bf4b88abc26438c694f0e878e8317baf37fd8d70c50d309040363ffdba8f6629bad416dabbc8716636bd9c9c55dcf0887149b8e0720f059fc91dab99a76bf13f364d794473e1f22d66912e59abce2e4cfee9446c381d147ec44439298dc07180bee69a1eb68ea199a7cba5336906c39c60298516a9cf8445500c878e8008b167948153e30d8c743b4d681c1aea81d14c4a947a09946a8ece9f1e7dc7b581afc16195de2cb23da423ed5f6e279bbae826a395a30e7ecd0febb4956816685e31adff498b2cda42f83a6ec7dd5c74613a7b832ad15a546354d7c21e2bfa352fa7354b954b53e063fe344d0bdf80bd48c6524ac2fc0f21bd496a27f90b3bad6a65ff4af38a002d27c65c0e1511c71ea2ac25c65ed13a1724129094e390d893b90da677aa700b553269a1cb0a41a62e3a1dedeaab2ac13a4e1e8debdc145d32d47f85ccf31409b9205dd57f350d1add17d63b8b9daab44bc50383c3e4037830ab16718ed7f418129e6506fda0185f5b7e3a68bc205a1f7ac3367d7bbe253ef11d7f79659828543b389ba007e9c33f1928a65a5a8664a8674a8cabf204b180030c677f564bf612b9eb0015e22e3eca93f823e7dadc90b71d7fe9cb3ceaf12adaf02e92937f9bbb4e839893143248ea8c74f8311b5f40869c0982c7a8ce517cbcce7c1a391b4da17fe75e6510fb53c6cc7e4f10ef2500f74bb9492204e328a2ea2dcaea3bc6bd95affaf5074bd32e12e1f2afea15b4e1583841fb77630cdae7456d68a76f05a350109baf4c8fa28bea8b56b6fe58e4bba0c10ad1327222818493b3e9bfd84e14bd3d4d11547730121dd11a8f8952edc5bcbb5428f4c9419c0c877fe3e06133f5f109af80f6f6081825c5d69ac82b9dc5b7ad307f8ae9902f65d92bdc10a2564ae8cff466c84f51e6841f0b43b8ebc9db7e057871950e9ea32295743b360389ed23682d0208fca423c60be7198f859ce99b4247da37d671003e1a1c17082ac68007c050e383615b46ccddc9d531cea062d337bc6807dadc449fde27be03627613ccc456b7a72fce84f4bd9f76bcb7c36686c5c9aca2b03bbb295d673f6ef808f9f694d9721263c061a7db09d1e4df3e5ca209e199a9e1144b50e99b757776589e543c4548dc7c0b0f93888d251ce2b631e29cd2e9b69e875b30095a639532ac1cda7b71b014f044930fab6874d04d74876e77dc18ed18c1d06d6a889b27325cd5f3755933228c81e9e85bf396504e083ba0ad9a2c4c661ec47002bc58ed0ed67ad24f161f57b0d2f28b69f1eb79a07603508363b97e50a86574dc06f260b5537cd5361323ab2d0bdd980516e39e778a845a9c16c9987b09930b1b47806d3b1d78547f16cf966935445088f92b01e3aefd0103b22f88650390fa84bbae4cd87fff89a3c1198703b93b92734855440841c4a1ae3a3878a22e80d3dd00a5dd0117a47276854e8122fff453f3b54f2bc775f13de2fe6edfdb4569bfd80824fcc2fcc45bd1609ccaaaf3a8278def32fb6cb39d17edc03a518cdbfa70a3375f807fd5f49247a50baa3ad52cb2c3d2e71eecb33142f85713b4475d7b36af94f00db2297279123d653ba051e239fed79572833f2889858add68ba7a03feaa7331ba0401bd92e6b8afaac4561b669356a173f25781779496352334579027afd3da35264b23be630c564cc0d6dbf9aee0c8d61efeb96a970bbc5d827cdc8b6eb03f7d117354f1567c31384705f5c6da5bda081b518f4ca93ca116c1b6ab92f60575a2b5dd36e22f97d26b1fdaa35f5af1b21087dd100cba7f54688ff5714f4ecf1946a3f1c20e39c58848ab72ed3b1c057f209df42809b9a63cc55ad8300e5860c639b31ac84e68b418042f02da04076181be714e92f2243f6562a3c10a0cc7f024070fe51c930cfbc336b8acbe4da0f63b157d9265b9b04b6f170bec1591ef312469d1874f9b69783ca7526be60eccd17c4d0763c9ee003a3ed93e52bd2919b8986ef2bc3da06f5587bbf2753ca8ac967ad22d2eea788b9cdcafdac575d6a4936491ab25fc1cee4817c81c13116ec4c293dde6718c254fb9ec5de099366e377757b0a86b9dcf435723359a34f991baa9f129efdddf0cbafa189c94b9af8be8c0816682cc8042d8203853b6c58f3433cec70cee48999115f431f0186520947c05646f66e2637fa3ec67145b844dfcef2d24d8098c43a051200409ddbe85efa483685a1c891969a94c05850149e40f9e73e62f824b84247d2650e0b23171cd9eefd206b2638ab193e96e85caba7bd9022061584e1c65a4d3e5040c3f86e6679731be02ec5e09b3baaaa5548f69d41a1fc2dac6a796524a22022ba382e2bbcc9234cfb53941471d29b68a954e435e165c58b2adee2039a9ce49d1c930318afc997d7e5e6d212ec8382c9b2fd2cf37a5af86727d1d3ac3c34147223743ea0602a09ad21e6fad18a44151e3d3c641ea92acfc02b86f7504676a8257f2d6e04d28a4b3d3b04824338ca84223ab864f3dbdf1c82a47e8f901b6016b048976e5d5d61b205ce6a10af1fe8115c8e60386df94f6884916b16c062e09d446d64e436b172bb0f9691bba581fd5be0979223235893b3e52312cf9b69e223a3bb15fce4db18e041554357c85df6e3dac8a3eef3557c84cda62ce444596af390b637e319d3a2e6a395061b73f9fccdb342b4da38bc86d871a40c6ed524d9e6ab26fee8920c243ecf06644aae0b120a61e82a4ed269784ffef3da4dc1e99aee496e67960821d68de582a1d9309a500eb191f7551567453653b9401799c9fb9308a158481d072228b0e36533c77b4512b03fdb5feef21a70ca735eaf41a688219f0d9d90aa90207ac754a5c0106d52fd1eb738fdb02c48930443d77d5584ee8824e586cc9488c879c46225442d5ca714d15de1a6ea0efd41ec5c31949999861509ec2b65404578d5778601c98b00a797deb857ac3eb7c90e005ddddf9d7c7128548c6d0834f6638eb773c38bfa022f921c042b3777400e28d155251922817fa3237b2bf1d3032988b414fe0030ab18421cfbda609c67f8e5061d65ee57bc0d2affc8eb5e1d6f1bc80a228e946a75f65df83d40cfd0718d7895b258fe6fe4f2a65c7c5b8934929a8ccebbbf1345896966100ec651e4b58cf032750cb71e4d51e89ca9678fc88dd73dd3fecde17c0f8e8d19eef814e4f96644708058422e7c2f11f649000b374cd324f5119bfddf46489e498c9b0f5336b9f0b59f47765f10a4c417ecef674813b0d5f500c857500b0b1beaeac21317ff6c8502e61695f207ef55c9b7e96efcc9a18fba5c281306e8257c007aa9197d18ff6af4e18eb9b385112729e9ba7ae3bfe67956581d4d9d90ddaa60aa8406c7105fb6670c2adc92658cc87c467bb5f6994c0bb6995a82777c47578c19a29c13cfa480a92a2bc6797142c895618501a045ea793f72b1a373b66cec398ac919881c8a92c35910dccbcea9839e257b639434814e5af7f9a30b3bee8a034e0f23d1a52ff9233d313befa8a7b0d3370cad7b1a97b4c4359075a96376e4bd1e6b4c08e0af56f39f07fbc6e363424499ce5f4773f48b16c8661e6a0bb9695f03976988826c21d1cae391cafb1abad3c10b1bfe2afbe5ea81a461df08d1e3e8e24b997df095e49b4bfdddedb5887ae35c39387b5f29eb22210c5fdc45dbeb57e7d3ac005225790f7c164b9b8dd38c804c88a07fd34ed18f8c4844468cae9c4ea405f74a6b374d1467f75df0368ad0e5a261c7b13242e916f738a206bc0f838bb6c783061895f178c40bf0033bcada442cf94984ffa9432b403f68315312f3bc82e08f97390c4c7546d3725659e332d32013c5b6a421a79b0f7a9b62da8c150652e4a04c98d0df2fe1821348b9c98af1c4145abbd240cb98dddbfeac6685cf4c1cfaf024b64a6f8a98e63588b2ce0de069592524b246ca94e4c3dc7e2ec3325d0c7a876748f0f0a07a52a37df1a9a125b5b5c30395df1d223c0cecd8cec6b251884199198aa9025c6392d0f7de928ae473b96dfdd52252ae57ce1b5dadc37e15772618c18d6f60137e050de9d184a7092fb1a2b4dfe558f2c70458c948dacfdcea2865fb0e3f143fca9624995599b1ee736dcf6e271a90028ac3de8132b2e789f84f6f16c7d724055e5dd3ef0eff6b5652956656a6284e60be6457ee54a5047ca8a993eaf8b44da9caec7a8fd91b123417e7c193a6b6c8827188b30065d0548299c7765381219880c9193017577cb2232e28c2cb46772359f2cbc3ff8bf4fe5f06e75203a84157ec0a01b3ba29b405cec0d43c552d758fc5c414f3c5fff738cb691ab62d634e4a4df373ef72c71effb457af3b4937fe4578e6bdc3d1c40b1a18591fd1ec72c890920a25d54605cd1337d52080dbaecfb9bf66d172818046653c9f619af5502cfc1310f9e964b6ea1d797e3cfd38011242d5e3413933b2f5ca6eb787d6375f47ecd3822c45519c6d9541024910902411591960a35c878e31466fbf9940cb2711ca8a4801be07c58e76d913b79588a9f54624cd9357fb05a982fff592121318b3541a5d7b43de4b1f0c754305b9ad2227538b230e495749703be3ba5d3dec2e01663ef473fe792b39fe51e09a384d592de325cbd6196eb17672409874fd0cd81e869754cf07cf1ba527f0ed557153b74cc91445fd986840fa501821198fb9f87e9a9a9a1bde9d7159c940990ad69dff591d2b83046d3f9561b2c27550354c4f97ee002fa6f58e4e1e2f5f64916042aec1be6d84ad2997e138f3a6a2845d223c8237fa439841312815c8fadd9341d64af8c3bdf3b6043c8a3783b0fb5a875ef62993c61676a5729a4eae26ca306606d960de43994ed71aa67a61a8fef90f6651cecd7406a7480fb061e2b7a5f820cf49c4808b591bed4fe2788a2f421fc181d2f3b0eedb7e3ea98e29cfc591fce6de191842a114d31dd8869cf8f111d37f7d03ea27adfc46abcfa8243bd399aee2e8a8a88a63714933d9da5d7746bc8d0d12453401a2aceedce62a0e08e4de4c3e5063811fe43552982728de94218e5048e5ca0a9f91ddd6b25f140e731a957bcba77114a401d305b5b0f5787d7311071acda38edf3fc1420a57a183a1fa5d02b94cabd6ef2e10c4d4545e675f7117850a00dee6ecf10813350bdcf0c6054d6d36684e23ce2fa87f400bf6671dc83062d1db76cf64d9985d4be759dc589eaed94bff49aef4d694955ae570a501a55b093cf6e19b6c2b4ec850c05c5183445e5a1c54fcbbff4ddd39883af50417ea1df340c9fc7041e0627bafd8a45ad5a2140f78a36acbc0b111eeab16ac281ee60969db6c58cb0b50090209015a086d21736b5ce4d40b0aa2e634d8d181d1d812d120c6efdc767f5bdf74e1e6fead270b4dd70d20fff18d71f555963de9778fc18b2f37315723e737eb66ed676631722719888946d1ba457144639d357592b8524f5e58d644246f28747173c511a33ba60d49603639c11b3d62245382ce918c93bf7e10e58d813d812e1464e49e6504bf1c7f21324f4b212f983007478ffc1ad27c6420acfefdda931bf21b08aae735a957620e130b8b47d5cea2db96c40305e80e765db0426ec2b5e1810da1354dd61b461a5fbfa6fab164caf9047c84746b2d19b1f35d093b33312bee29fdf65ae5c6d6add64c9149bd316ccc538d450234d55b480a7e5c632ba93513211274fcd7315b377be4bdb4ac05bbed9a507d817e7d85c56d61c7e80e8cf8866352efdb3c2f1bc506fd88adc344a8ac410d67e994dfbbc95b22baf118f3426774c9950f8b350e1df9e144545108ca044f272339b4ed4f0d4128b207486e69197a332d37c827090541de65995c9f7c035c18b539054d0af015f3ab1e9e488d865d94ce3cb950a8eec2a509ef10ab5715b17dbb79963e811cc408dbf92674b1e3d10dbe56e4d97d178ca9749d5513b993a044e41488173ad5febaf2b6c1c205709b0827e9dc267e3f3bdafdde5b2e90314ee44182cb544b4bd74f186daae015e06924d4904a0a4925ef79c4c7a9f50f35847f0a9990418df7154e81d50b6320d0bfa47d2edcd504622e7df5e200eeec413ffc650b7e03c72b7b37d5404af2750a6c35453400152b05c772c420a0cf6ae94daf00903bab261cacde76780183bf1ac22cda52f74615a30bed061dc8f67bad9c60fe53ab13279d8800b6810a8ba61995d7cf5ba5e60053d302ce54a03d84e09b686618fe9062b2746d209a3d18fe2d4bea34757b9427d00c3ebc29070f5a93a652e6a5eb33ca67fca2faea1cc912a0be6b0977a3371a1bd597199c6cd03d02ea31293277fdbe7387dbc315f2c96e1521647c8558bb491cec0bc2942cf53bd9d38855e28a520828a46bc41bc6a580268d92ce02b5090c25106cd5395756b14318d01b8e36ac07d47d9313e12e6d1cdcdb2aa51ff4eb46a0f1ade77e29698c67c10e0d2415ceef89d162aa2c8204f262be2d0d665d404fb9ac307e85756581fbfc4d46ee76ab7c4b2728976adf064875c7dfc39d5f1374b01b8347d558872f5d57d9c83844b9fd7c022132f733e5cca8114796f4f961bc844c7d0ee1d1a6bc492ba0fc8539ba919c746dd4ed0edb9c57e2260d5cf8e6e2568cb705ef5fe2e9b307fe81260e0a04813f591826ce128a7184cf398422ec0096b33dc50282cd0c72d832b8f5255aa156c798809b56e920ef81a82d692af7ea1d00c4c9d54cbd5e35c866d026c0ee87cba2926e2ca2e14b352e2e535d9d7603d93a4cd8265965e19ced2c99949076ae02f29982ee35c6128138658811ea28a079b017e485362f6228a107a39b89234eac74a590a9d988a20c5a1634fa4da99cc28bcf030de99f8a43057d35d703eacaaa26b93f9fc1d44a4d329b115d9367e9ea76397e1a3b8f6a68f390aff449d0b81c08d60e6c210c46c7d7fddc29655ade11e92f1524c10d967cdc0efc412c989131de9dd6a1ca79e4309a19934887ab75fd4989b690e7baf7144fbe2ec19478558e7bd895660a8c0bc58139d11d5181e7c312a96ac3aed3dc77f604101ebab94986de21e0d1a7e7174380256329c09ac615f73bf29da0c2a26c13114c16be7553d1d63dcdb119f4f6c2e15b4b0004b34adc05bc5bbc844cbd217f27be509ea1aa8f0450abda24865f617bf7e0103e10a70c018e64dd4814c5c8c8a941c2d2eab2fb210b3bddb451a56f1d116a779029291337c8e2e5598389e75151026164d95d98512f124f5253499b58374932b354ec00e70be3bb0f1bd2978273628e257cf795fd055003a3429f5f510a81941378000cea7611db50380eb641061f39f5e37214e8afa0615faac79dc785eb99ae02a5af0f2d68586b7ee62716d778a46d671949651c8653adce5fe9575118d005831b57ce3af1fb1f55027cfaf9c9ccbb3b50838625857cfe16608f72d07992f819b629511f6f54d8276bac6878d797015d406faa910623c844ab2756da89485396d5d4ff682f11e9ea579175c3f01e7c931c1c85dc5a9c3c8b396bfca866d142019d72096f5a561a1d3aab1b8a0df4b4db6e1c1029e74d75f0f7a5e4c9ab7b9336d9c173bd202538baba0af4f5ad6e057f556c6f0856a57c57b7bbd00e297f677addbea583d5ca336b91fb8f6af661af6eb3b4e6481dd251a3ce43bc4e670e07a1d4113d8e31c0943d68cae39ce512a7af5d3188450d8ed2675ff8d71b2227679f679783364f382072603dfd6ea458ff898b2c17676758234c5d0bd4faa1d44ab330eb5dc071ae3b336beefd4605f0158db8f8d9a79acb786403d79985966ab9e6e5aff941931e48f64d35c2f034b007a7336341884d06b0e79788f7957feb09d42185331689db9572864b5fcff6aa2596408d98d6c87f314fa9f922c87aff8dd0fe1982a14654eb855e884f2fdecb2c62b865aa1e4beeebe5c13fc26488b62705c67df959dbeb3459fab50095148812e47050b885a80af1a01385d6778ae470de6e214a1b5da043f23f8a9f465c3fc929fe0f594b502a97eea3734be254756dc9024b43f99706446ce228100edf0fe7882ae66e745c12a1172d022eadd454d09cc13d376b08018c61d74be0d22df1fa3beb664ddcbe615b424aad8316c79eff16ec4f66807b5f77a68d213b21a5f39020db2557f458e9a0927b12107e63c8338c9475807dd6fa8ecd2120e3174028c5aaa56bc3bcb61e8c02a958005ff78c43411b2772d6a7943d0043a35c2904fef97a4b21c4dd9a52bae6591a133092bccba96f4179af86436e778e7d2b3bfbf21616aeec7d12d08477b087f41b0e0b41729dfec923a710edfdd3c67513c0bbfb303383cb80e4cc4be4359e4290a2afd7d137e9ad20908325b3656de08f710a41faa9c33d724887455d04c563b25428b291ae2a16904627cc4fd0c7a825b021a28952128e51da1a83541fcd56028b32525188c5be96b6bec324c4eb7416fb31271083d2d9dec579a3d23fe74545ea9bccf0191e83de23300a0e1f98037d3f8ee1e6b7ec5a4ab77f8f5f2e01995bfe5232b02bd32bca898c5c396de334a8e7a95f479d9f34d30f87f00166c179031bddcd3b59f8341537ec921453a283730aba15b8f31654794529b77c37d766d099379783a20e6c83bc43c837901debd0cbbfaf4c9685806bb412faf1c7427ac802b07948bb24c5a5da854f082364dd4bf8b9569944b97e1850123deb805a85b89ba8a4892be99cc9ba0e0c50a386e08a62b1342121979c214de46682190400c19356144daf6736eed08176e731903d6d3589420f4d6f28e050a7f16d9cb892bdd547683244f6f329119dda9e795740ceef015ac645d1d78929088d185c2c55e0faac630102aa9619ba22efc49548e5109013f6bc07e8b5f092d7592af7ad832b3e0a8e396118f7b1cda97efe91b4d85a4ab04939831721037dd06bebcc1f395139e7c45ea2c90c70bb89107df27514f5deb58182cd6d913469152cb1c37a84d83f7fc080dc8e371e7b58ae9a9daa90944797cf974aae014adc0ecd45ba999e0bf88634885c5b5a2f11cb0fbd21a92d4d11a0111f8abc25c2a092a0664fd0ac6e580af47db1ee292c19df22038b8304c1210f180f0e5f595bf93a365dd56fce94325b851ca362f02023178df7894baa98cdd83b7e5e5d5e87c97133a36b6d2191e07f5e6e95b526e6ee4cd03893469f12239593459668aaeeaa01ea769d65a49b5de635ecd8e27d7253c1c03de67dc97395325e0413905441861cc877559e3e29451aa5244acfc1e7fc7a56b9dbc52da2ce01e092868c82af23634a198a44350dbeb779e93da73bea2d43b19f0e6b5ab387570ce6103cf0f3315b1bc057266985ec46aaca27db6b604507e472666fd650842c45721b0a0f47765015e1822bac9c58c3537450890e832cf258d69fcd2676ab0c6388ccd3dd068db40d25ccfe7aec452a3c64b2dc58c804b66ac7ad33cb6b84753682d83bcb4f2ba7ae331e60d5a929dba0aa5ed0bbe76d1ab38b4690fa071e01188ff637c7d7b23b06686903403ef7a3a9406cf16934fd8af9b812e20c9bc157e109191e0c14a4e219665d8dc878714637cca34b12c6a617a0e8ac11ddca464575f5d051fb067fc215d3d601d41a70f11cd2ee811665801afa184738e640ec867e25c01ee4f742d98ff793d3f38fc156dbe66e2b4d4293dbb6dcf765f10398231306121cc65f709aca9b1e0739051a45839d3c602d035f1c8194cf66ef6850d9349647f99c448c6bbb99d649616743c2f56d35d3ee5c7586411f8118e21a476764bb3904e76b9227ddba18d786d407c6b85142053e6a779f7b5488ef98963eae18a801489d0fe96bda544cae9b93686550d78042a20635de78ccb06cbe5117d7dad4021fcaaab42ba476b6974ca19def483aabdc5954b876d69f2d2d07fc4a337c0618d9d23cb7ab76f8e73002046dd23635f088364aa285d644f2cd0a25b5d18096abc3ff1dc60c228d8403886e7e6bc7ef95306fb73e271a29ec8e667be2e37af734bc3fbcf1465af735d8e5e9e67f20fabf20b3d95794b98233fab093460b0803f2546af8f51d116c0d6384e37ef775d995389745ca43dbfc7bdd2ae47a85879b669b8fbe89fc15d1d13a6074b8795fb51237a528977d792f7eea8200f99d787d80cca99cd42cfe84dfd5dd65f099870e19870d1a7bf9db84782aafd641c64bec364714028364e3b9a9e5a287347decad1a505c2b245a6bc36301adfd86abb3911267871b7417030cda91743eac2f145c25efa65bd863fb13e8f33dd419bfc22e30eb78ab4670ef37ea731bcaa52d3010d06ba3e5f10a6e54f96e687db1f9b52f18d04b03195b3223d6000d146dfd9f5064da90e6227dd1c41021edaf355a067f192ca1bc1e6556f9e9bf99f1b24c5785fe8a59b6979e84e73acd8d4965d83ac349ba435f833dcf2b9ecf15d8cd7698f75658e8b3d375d5f1f2ea089f21d38be56ba13249a9708e789a6366e519040474d7317544711e7c9204c09de4cc2663ecff38ab2aa9dc5681633fc9168c6fe8358111be26479f2b711945aa6d2055c3afa7a4fbc2404b68d53e6b5b545b8f66912a196804d533aa587b77a8b3cb48c3887621b13170040693c735f9003990ee098416ae13f70798a05067ba5082b7faad34980b5a293897f798604267794ad9bdf8929534a8fcc4c49645eeef0be3da79692c14d71d1d3be5f4c313f499ea5a0544cbead4b5d67e4dca09c78300f0ea01aa06a618004914ff7584d68bf0d28f04a8ff4ffb09dff218699204afbb5cd2a3c981e94ae6891f3ca4ff12e10afce18c0792577fde820b6719f4432875f0ee518aaa4f03af6cd04a2cebd0c19845b2c5aad975242c6219d0b262211d65db5d2ba0f8e58cd9833ef9410301507cbc3d385a195005c26900a61de55885d528355ecbcebacf454dda19af73711b8183acffb858da5b81ce5c003db4f6dbec1e5217368460e8832f8c9693ba748b18022d2a85c14acb6bd56b3fe21bea9229e9fd722678e2f668cf1a0616647365eb9d21b823f4080b6e8a9c122f8fdbc7816f9ca1121e0071aa2d577e9748bb09fbc9507e448f6168dd40eadffc2e0c3a5a117ea9bf9884b851f2f6e7a083181d947fed99149b9847fd9f43cd577846e219a72aa787b0a62d6816c1b2f3c9bdfab2f06f61572295f47e524e0456f0a22927baec422bb58e19a2ea1d2647fb382c694683978b802319721d153b917d403b0ff1b6ecd5ca8e74233c67c93396c58a2932ec484cf9c3241deb608004c52c793eff69381fc6e2d7de298b3add988c9987f30bdfdbc584ebc43d91bb36fd50d1442175dc590c09aac8708b6fcd91bde644b3c2254cc91ebc7ff8ae6894f262876bfaa2196f679eb4b74d0e315d56b42322b4d6450713a949f41c98ebb6f6a2a81e915dbfe163dae3287ccdb01f88b22fa6ec97492d57e781ad1a71d0c42b04657016ad8f38eb4c52f5735d03ae7460b08760f0b0c8514f71a327377c8277f998bb2ed050efd8418684777a22781ceee6858a603b5ecbc70dee26bde5748d0f38d3eb08e19347b993f526177492c81c6452e7abc00dcd27c6defeeab40e754e1daaba9f7a1706881fdcb2d66d4d1dce475168c88faad04d7416ad785e5e272233cb4e274119866a9bbde5acbf6d1964c3e5844cd11a742124df34f8ae61a451a0f7afd33ad675ea48d439a61eabb17f2faa5165df78075044dfcb51dcbb48d56937ee5bd2f9709f24900360e13421f0ca9acbcddfeb105ce887e0923d8b7136bd978d1723feb010ba2482dad1f246dc041e0b26438d38dad18df983dc5e2ae1f0eb207a6598b98b09e77925ee080773e5b88c717a59b1913bcbefa2d5e9f7f785b7b632afbe702e5c436930e0fe67b1b87db613f4e10494ddbf82779a0450e6c5d969eeb075fa4ebf969f7e6bdde9e596f00775264b50c63294a10c550689251ce15914e5afc05af9d9f3c7321e097c5ccf8f7363466992a1cfdf3278b15e5787e073773cd409e3259f183244307cd11b2ad62dd156a26b6af660f5473faa9ff6a17d8a046b348166d0ba205348a0a898a34f3c6f74acdc3af072b935194c946a5bc25e08305fca9ae64f48efa21a968264168ae7fb7d16e1479eb272000b90cacbe26a5d67f86129bfd4c7b8d5e6ddebda335f63144606f9ee8b1a26f2b509db1287432427230ba161160ecc8ee60d04ad6de8a54f23f053e1a5290e806c273cb03552c2c2bf1654bc05d257b43affabdeef9764c82d047b1cc632279820f5f105024621e9885964fb51f73a461df88d1b903324de192b381567f83a01b1c2e2744f71e14383c0a3591cb922df3c28271a3cc48312aee4eb33f31135dac8cd0b501c2754a3033ac18a8d592092b29a0e31e0fff5a99eded55fd338ae13f8eb5b645f1b718807c5eb085f36b1f65f0dd76f49ef41a25444f396f500ee160b804010e8d8115938bfe8bc5383239717e76145671f8c41b81dd3500b697befbda59452a62465960862085d09dc56746da5d391a015e5f6721b9d95db8a26a5f35624aea5d33a12289437e43ecab1dbcb6d954e6e2bbab6d2691d09ecfe448cfed1c6d87943fcbd799ad437eddaba694438bdb6723389134ad36850e9a4b352edaea2191a97920105ecdf4779123841f6bf4684e2f492c08f53b25269ff3ee70db9eeb69b49a452536652dfb46beba611e1f4dabaa552776652df345befa61151adaddb4dc2a9adda4c62e667a66a52dfb46beba611e1f4daaacd2454ee9ba6eac2f5cff659ada66db60897123c89dc50a23422beb6dfc0234c30091dd902f330ed701cd7514dc359ebe47793686884969cfb9543777a9e7772213a3a3ad7eb3c8bea9d1ffd924242418208803fb439baec1f9140ad7f5a835e0a5d7be408b78c4902a1ba477910e8ec8f2f8763e9879646118e481f1ffe86c052bad57f3f69d4ad9ed247a408324af4d12f3322f6e86cbd1168765042893dfabd23a0e4bef65ea949a499335cdbba7669e04f94fbfd48304c7bd71e09866defee1ac8a2dbe6eeeeee5a0e77f7eddd5de5abf7d4fbe9dd71bc738e72f7767722bd9e44fca7f7a9afcba1fdc67ddaa7beed579fdc1ebfb60d49a13e8b82fd7d38a0bba5b4dddddddddddddbdddddddd2dbd9ec27c14c8dcadf96fee7f4122b1873f37467ad47684dd789a3f7d1116b3cadbf941fd69e6e31ec727a3fa24ac3fd68e373fd68eb633133d0c9c3e7aedeeeeeeeeeefec51b4ff9d4464971711c8a514491a5bc49e2fe80bbe24a998511596eb2a97fa494c9989154ca92090b2d32f793fbcd1bc27943b6a9612143d6fe7a43b43babd86006b71b8354834cf101b8dcf0c8fe2388155db22071850bb2cfae48cafe383a4830581194fd25ab68620b1eca3e9cd4d8c0f867f75eef878d3c7ffef5625ee439dfb631aba2897a3b1fc88e8a28f2fc4905530c58b5922a9b1b1e4872f8c327961c462d394731d3a76432e5932e221539863c7ffec5ab6fcd5c90e7eb0821b273a5499e5f42912b79be09476c90e7af2085975817a02b42424bf27c394474258da2232b8c8af29c4d61469e5a9a30ae92fb1b633cb7f4e0cd392fc6af8803600506577642ddeffd2ce878a73a6cbe8543a5b7a3f32a4b26c70acfd4e0b0768cd6d2b6e764b7c3fbee59f8f3fef47518778fdfc3ddb33e0993f958b079d6cf7c2c446141c7a3b61dd65a6a69c78ee3c021f72988029f6c53d84016ea37d0cf7aaefb58da83f03e30ef636920eb02f8bbe7c1df134503794e0f02d863f3a7dff13e308ed3386eab31aac2d474e44665b491c31f559874c861945571024b0e23166ed38ee6a3b0748ebca269da434066ed02f8bde7c1ef813d51b6df01f6e80059dafba31eff06f29cbe037b6c4096e68f7a9e7ed6f7b3fef433cffa64be46f538bed4afbe1b343ff33faa97c1b8a6fec6fffcd0fccfcce33fe1c032332a9a1b37be08c3a9d6cfe9bbc7a893d7c1fab64cd2fc1f243830ef8130ac4550c12a4b50881f2417261f0986f987ae6ad77cecaf1f201c983f120cf37ece9ffd4e644793ab2244229028b4d3547374446487e6a70ac70c63d3fc1d22dd3a72725a226251cf5ad57c8e9aefbe1c39defb9a1c389ef5d1636939fec301bd020c20cfe738798feeb54f64a70326a87094149722536cba16a5a4e4af1f3f947e28fdf8b1fd6b393e124a88403c8a58e44924f698dbd3ed2938e4a6c0d272bcbdf138fe87e659a830628ff9f64335a1c4f0504a2833f2fc88528a3d68cdafbe841c6ff3f4eb2cbd1d1d9fe363a1bffb9a8f859acff15447c747c45df36d82fca8f9a2689fe363c1dfbb79f5389e67f538564f411e1ccf027bfa71e0f856cffa707ccde77d8eafd3409ecc9d683efc331ff7aa4fa6cbaaa59d134a89a92906448449a95b734e21db3252d493d79b76d57f7f9d401d6f390e77fa20f5753ae704467759a93bf5e4fd06dcbdd347d8fd70adb5d6d3d77a025994c5aab5d65a6b8dcd10ebeb8eafded7d5d71b5f69be9e6a8e8aa3aaaa160df5c9587d8db389b0f63070fa6adefb582f3d16f53f7d0cfd5e8e5fd5b06c742210ed6bfd5abfd65a6bb55aaddaebd45aeb4bae56fb9c7d0e1cb24deaed0d4961a7469505db1cff73e3572f87504712a8ca58047273add569d0c0ce1381d0afaf81f82a4174d5af3dc041aef5043430eb02356ff325b05ec74b6f67c7e3a21d9f0e1b2a066328ddaa4f41fca45bb556d64e4d4df4e8c963a1dffb9a8f8528f659f03f9d9ec7fff416fcf0af3ed4e3f864fec6977ad5d73dcd37f39bcce3d73ed6cee9311061a7df4084adbe08bbddcffccf8fcc67ab57ff54f1671461b5d68a8d720ca185199fda324744db06b22a96a90f31fe2933f3da733f5f16c929fe42fd2432ea74c0d2aadecad87ceba9c73fb37da9d73ebc6df8b74f6df86752292c2319c0a3bf9b76cd7f7f39a833bf9d12b6adb5ed7d60da2685e4908c49209d08a4022941ec317f3e10f9328174cd9f32398c4b32db96431d4e76a91e7057f0849962551d05c4977441675a50c6284f907ef5cf0fa315b7398599c388b4937afcd363557067dbe4908c1d4520afdc7f8970d18e0cf8f3e33df75c773cf302dba79e67fb14d813856e4f31873abdd73df7d1c34084fd6caf512077cddfb84e1a4d91321943f27c66a366e034e003a3afca92e357a7a408e4f5455704a301b294b0edbe7ee46ac99b0b214922c9341724f9282d1229c618a7f86b4e3c021b38c66fe54931296755c6643905cb8d95a01c0639f28e533ae789ce9936c8ddd1dbe95e48fbcaed57ed6f0622ea09ae5af993ca295132d414d4942c93341fef2e86eb03bb3f3feefcd479234f992735ca3747cf280be99d22beb217ecb9160c8106b8be0585d8a6ebb8cb5ffb847c3d5178ea5bb0c7871654da8055b0e52e7f7f1a0697b0a3433f6ce553161cce13b69dce913b9dc3224176676fb06711f75a241998702e15489c8c220ac50c4d06d30ca289fca1530691bbe27d428720328edc152f95a66d8bff80b71c6736c486849a6ee4385312832b638c3fbda3de63541ba38e4dbf8488a05dfd57856d5652043ec8e8ac74ce590be3196737868a120e8dd04616267bf714822907a8596a2ea8907d4686c32eb77f33c7d2e9b35bd64e195a4add4970d5961dcc6959fd4110abbaeb564f240ed033677a7bcd9daa73fa43f7cefeb8a50fd9a93015d9f1f4963ee4ff0643bd5dcb0478d9c113baecd37e051dd32d9ff45e2bdf3b2a439f6e7977e7d3d8ada965ce5d7af4767b9a609419c3c0671759ad40950a34cf4ec7adb5f54178211d5fdb6b6058ffaa2c88bbb581d57de6facd6e65af95a63033180699a04f7e0a44f4973fc12b25d55cd896c0e1f49ed9b1a2bbfb649fec5e486bdf3e756bc1209186383f8e3f8f1a5345e070660f326fe794fa14eec3f8f4b948e7830d26e0fe002f8165f7f4de6212a1e141274b19ae5654a630f3093a59ce2f73ced9d57afaee546bd7ddaebb9752c9f2de48f7feddfb7bdfdbf6f73ef77575dbbe13baac7dfdc229c4ad34ead6fc227b4918cd1aaa9595b65a7bdf6a1a1806b1945efaaad58db4b652ef4236cd974cf3a5987973af04c355103894319510388c5d7cc01c57c10f041db0c4b982d1b6bac3aa9b6f7e1f750e9646eadb4842fd538588e44f9064e2a4736a0fd55bfb3b70420f92cc454460c21115c0c672d4ade9b15a4bb7e64ba358965f5762f0d21118a60ec4973c72d7944ccfc5949c801c675cc0903919a45df3b5acc26be5de8bf16ad5c2619a63da359fc92a08fb8f875fed3fe592c33e3af2570bb56b4aa676cd47c2396ac9be8b5638b875af0c1f39c8347f1b839fe4f95c942c9116304eb0207549a25fccf289146945fbac94750e05af7ce256f00a71229c86cd328360039e6f739c6d3126c725987efc9bae73e8af7228edd93d26b6e2085bb4115b31876efd6f68d3c0d58f37a1cbba446f080d1b58e64e9ae1b05beaa456ead6120ff832c947dd925878c0da735ae0b6d2044b314c5a4b73e968c660f98aaef8da1c7cf905e6933cbf8b662b758bfb429b375013c324cbb7db0cf3c09d1367a6a01cfae4500569c66099dba91ce3b293b2fccef13cc7748e3718a5ce11e3ed323b869b344dd8feed1ced6bd7c7aaae4fdddca929a5947ea55f51f3f485329f1ef5c5d3e7bd4f768ffbc9759cf6515a2badd7fa0ffc83d6203fe6a4334e6f269cff6c8125c719173139f4e10df168d8c0b13555373694d28cc111a7d2a871ddbd65a5f324eb50444af28c6446b249063735dd26dc249beec5d84129b301fdb9ae7b0095c98e64324a69cc1f6394c980443221b2bbbdef5abdee1ef2ba3b7753f9343360d9b128de26e5774bca93ec38aea35f97ba4565d352a62f9b8086b8da3f4c3a2776500be55aa5103926fc7c7a8fab6078023b06caa66ed19843d9c4819e9782cc2f9bfa2561f43bd6395126f94bd55f74d1775124fa15fcd883fed605ee7a623f40f5e59074e2757fbfee455f722b027f91ebaedf5f334fef87f865becb33bfd8926e5ff0fdc18fe461188ce2795fbf9f3b7943daf3bcf6da7b8fe36aed34289c56eb7b5feffdae7b8e6beefae4ab7dc76d9ee7717fc22a7baf79433c8ed3f276fff4b6d6e7b83f79f57d729ddab7f69cf6427cd34069a4b2e9167d82fb6d6d22ae73a051b93a63a573822d24949b49f3e8eef7aeaa0631f6b7ffb1fce7576fefb22af74f9b150f1e4126d03a2e87ba78d32d592bf8607cacfaf3bb3e954f6469254b5928ade4ea848b37343fc0da6fde12ac5ad9cc493bea0db98ffff4797af8e77bf8bd3f7da14feed41bb9a9e7de27d78a535f44e1cf275394f7d53b7de7442f86b2be7d6e5ebf17bc55e6374de6bb9d80a7b5de1ba4ce695f665b55ab9b396ba532fb43b412a4821084bb975f73b5a9b97e77afb5d65afbfe2779faa2bc2fefbda95e03984825c71917a2cc4528c799164f72d8000e05812894e30fe5f8ad2502692d5d151cae261081c81daf82fd069c3f343be692074a298d5cf522fe13c5dfed3f9dbc3f61954f2fa44f20abfbdbf350f7bae76d9d4fee2aea51ef7da1fcc9a82d766b935df5429facf5e9747a08c47c3ad947fd552177bf7dde873a711fbdce2777df095de6408d08e9a33ec99528d76ed9c6f50a8e3e8d1a9d032399910c95db27fb27b3ed6e69feeae91cacf67602b4a75eec56c4376afe3e40b9fe09abec43dc863e41924bdadcdd883fce1cf771868314d9276b5fa802e79c17e9630a71fa49a3e6127be026a4298248ca710a20b4e4104f0184159a1fe0ae5bf16966c091557572224eb7242cbaab95e3fdee536a6d901fddf2f0477e996d280585742bbeccc3080905f9d774cb80bb6590ec30eef65f4929696b15039e1050ad50905987873636b1876f3360bc3ae2e20dfd39e79cd3dd3dca562b9f784e5be79c3e2794ffd12d2a4abcd0238f024950e5fa73ffed549d96e3ec0649aa0ce2141d9da41d9c1c4185e905313c2a5696d820e689142a62a8a2c5802584f9853ba1cd90a429a00c1e82c869395b633684506c0621b69831a071c307da4a6605e4d811694bd29630b95fc81633726f5923770c95e36c8b0df214395c216d32c0f3ca2d4db856b97901145894701a7035422e11448b417434d4658bd00d5b9224d9e242165cd71012d34be0bbd448d87ee14558260b79650a1fa86cc15b8e332d4ccc60c4137c739c6911c297b0cd71a68548cb100fba2a3cd0a071e1aa2809ba69146d5560a8c112694d2fa8190da5fba509cd3e28b2516899410d24254d445118b1a4e9280a1d6e4843298a201a9a9adc156f164c9ffa81bcc19226404b928851d2d44ae02ec799922559059ec971a6640b07434915320bacca71a68409a15ab57e2c331c7aa030703d208a331ca79a69194f356e547777f7c9eb9adb5abbb65bba8344dc1591c0ae74ba8eb74bd9dddd957ef7ae56ddada239b58ff700d2e054a0194fbae3aeee1fdd8a3de21b99fe2c989f7256047772cb5df169f247bfcccd29dba7fbd670ddbd489e3152804a778c51eeee3fa7d35494f7a4446b91ecd5bc21f735dfdc5e6eab74725bd1b5954eeb486cdcb66979c2715c47afad742261af4bb7f75acff386dcf7fce4f6725ba5333660f9fe37e6cc77f78eabf958cfcd2f94994b6e252eb9959e34141ed6eff8e85d80f53b408ce4aedec1472c58cd5330383aee1a0118317658e2c5121148ba5fc727c4e6234225c7e7535c2689349b0aead60b58e6fe8908a5148100c93d1481e0d794a89e4e2889266bd57dfdd5c7aaf111ba9f49bdf7c97c37f3712e6b9669a38d36b28c0c28853e09249b8881437c24899ac8275c3a27c7dfbdefc7484e9a442010889cccfe1ec6c2f22eef7d32267fc2ce8e8f3a070556a1021adc608a22cec0420a176fc40e48a0c430f4c20b5090b9fe6f2f8dfc53a166ceddbd53a1e6eeeffcc28ec918972624a6232559928c8bec8f5a7eef6ae591822ad0cc3d0762a46e612cddb22fb305f15112fe23aa24440f5c68a103106cc480c9077230424c521959d8f0037dc52c42526458c4e4fea3dcf289a02ca7cc600c9d7e846585ce61ed482269e42f16b070a1c5c8852f4b66c09a044c3451eac1884a1a6360fd92a85f16f5911350967fe32b22a384e633c15dfdaaef88bbfaef0ce32499aed1a2ca95a55c656829cb2a57c064f93c9d233914ce0f9c12381cf784702962c24e09a931b0bffcfb4f6660566445f0dd2591524b388c485149cadf915f82fc6b2532cd2eb813b0fc9bb0933e1074b072e0a09949a13c4e7b0ab2787aecd39bf78105e07d60adf781b9de0706fe0e9b9ad50d950c3e755febfbc042781f1800de072680f8ae0f73815bdf0be05be10007e003c0d7ea826f3e219f8e120e75c00ffc9ac66e0c62f2ff23e22ef9207c3806bce32bc15df2757c45bce0307231c15df259df11ad081c46a59a1c35396c929ac47e2be0f864cc6a81c3d87483e6064db44a7a28b13c2471aa19d54c2a87ce288794cc2781529f14fa02e34f0ea13e49940a83433974f24e9eccdd7786c403528b99dd27dddd45ee320a0387b2284f71977ced93b22e389453f291bb90c4a02fc74811fae598e8724ccb5a26bd481ff2e51839a65f3984dab3fc2eea9c381b6a6aa3ae9934513f692859ca9fde2d13d35ea418d9d4561c029e5d7a91227843e6876005c6b9fba75b32d62df963c62835007c660b2089949528168bcd506369e47e192a148bc56ab82e20c9fd291acb626fc8fd7812c5623126764bf743ee474da1582c36f4e53291fb4f338ced8adcef31f519dc97dcdf25c562b11c7ea0b91026f7739ec4fee4fe8d29168bbd8086b943eed79262b1180d53890d22f7df1c3c879494dc6f5b168bc56c2062b322f757a3582ca68314192fb99fd630bb2fd090c54b92fb5da80bed86dcdf3dc462605040e45e4d67f2ea60fd2a18ace4fe3875ecc0c52a937766b3c46e4bdf2f710b437267b84d32698856894ce5b0111165a4c83bf3b244ad8b8ea81c7e98e3bf14215230bc31c67facc526f688f2afcdf6d176ad937695b405d9fda65f0f68574f1e39d18a033a506587a70407cc18bfd894c0aa04f40040d3c4978ebbfa9d3582ccb43d4aa97339faf997e0ab15c67ebf3cab4dbf74dad5df11482b81439d213e527018b988a08d223e1148ecbaf564527ffa91f6cc255a01b0203aa3082c64e850440849e80006144fc04051c2891f4e4e1cf1f2050d6aa71c676300592e20dc1d463d80e2878a0e7ab0d48415ab26be681954435021a58b51184bf800021e9811450717b25061a589254e513461652e19c0892fc2ec00040d5a5c11c412a429bc3062430a2c804e4ee0e0cd3057dec8d51849722320c7d99296252c3a98ff4d2743c0ba9b495a7a2262993b664fa0738680cddf9982c54341b69161fe4558ad957e484b4295ba4b11b606324129d22745980813d53c8d3a87be3090101b1d0022ea9c3076c14f3a678a1126499b4074cd97ef58667cefad3a92c21cc55b79740be7fa8bc274c4c248a1b686280cd1a45d80c20cd94a29a5ac9dfe067b24cc07e632601d77cd6f4ce4aef93992c41043840329a5f4578338ee9a53a85b524a217f455f252159973dc9914ca29452b69c1fb3a4418eb33064799a20869325ade5534a69900c107e0026ead6fc1b62fc60224a29c5449d23411edd9aafca121990e77338a85b748787c6071c62a0880503758ecc4268624b7169879455b66aad2ad904029a8541b42357aa12ea9cb035d439a14e50e74c2e661840532987d8499e524aa9e32f2c34a403e42e2ee22b2e31edc08399184b106981cd5f21be3e276860a2862a6e40220b6cb6fc8581dc35315004ead6a45f00e5f947e6af30bfc87c13a69321600ec60f725004ebff9f6f8443e5afc0010f4db0306a52031734f11c7097e3acc90f5d609a1c674d689001873165e00006130e5b92582a5600b9190ab85e8e33a5286550cd75e8eab6b44eeb9ce08aceeab62fddb42d65c34a4ab75ddda3a494d289ddabbbbbbb76a3c63581a4f9681c9ce8eaef6d898f93874add7cf2646cf6c0f7331ef85e4573c3bdce93c009527876dcaa8b32c7473c800a88ea553450f99043eb3c12b419504aa99d77be9d749290ba36e7f83cf0554ab5d40952b405544a69ad945a9e68b4895bc5578caf5519f19179f39cf1089e965659e4a7e77804cfd933716eb6b8d1dc6d92145c93e34c69490e71945e88616cc615acf222b1c027a41c7620731db02ac7d90ea074163bc8216807405f30ce71064653ad5aef7067bc682284655024e3451221541145a92f6730391911c35f7420e41519a1be50914337c4d0e90b1433705088bc2f4430d860454add971876d090b8a43270b830238cb626164a939654438d4274ad584289e0629b60328928aa51825c46936020d430284733094913394493bbe2ed810d4570c15f8e331ebc60e100658453a91397084401d91f472076e5fa7264dcf98043140a65248ff0aa757348e5dcc0065e0bdd58315201c028fa428dac52d9804acaa76ac7173000d346ee951a60bc640f83c34e15de7811860b1d4891fdf14a075b64ff07b3446b55461a62289181c30dc428e2882f4640d9819229603ebfc8028b078517ad55b155e460a98772f0c57390050d97485673e0440e5688ba0bf08d1c6739783225074adccb1a4de6bc319838ed92ef65a906ffa1d3c394247097e3cc4b1219f8e638f3c2a58a198880ee9d915b62a643d92afb6b55e1aecb9dcae6739c3969f22ab8ad5bebbe31da18bb76bffc5e7bdd45638c31c6d8dd2d2377ba52384ef64402cf974f811bcbd3de596db7e0f3f3fa71051a9df5230d5b90fdb3fffc6c8e99b360a520922b0129623df0c00a1ebef01a6ac04653161fc03862ee400b1fa0fca65b94c6071c622f42cc6e4c12eb33c41429bc2c01a28b180fd2c8c105a2a0202492c802eb098616655ce97fb1171770504576ef2e9dc39b2e8c00d0c5962e47e00028772afa455760c0d2840887a6ed8a0cb2dc6f99b27d2e4a2277d99fd1c2252645a627ac348cbb466c30a13236ae6c3136ce6043cacc932223590482982e4061b688c25449e0d00ed927554bb764ae606ad72a7bc8666d1855d96090948ec048a456f2173d9a1c11118f1dca2ee42e7f1f60555a929eb36ebe2100642f141d2921dd7a7465cf599b90c061451acaf6ed90bfac91bbec5b3b8528ab96c06155ca76a932758e98ce710974beb2bdd3a8531609347bc5f2d523ad48b345fdba49d6c83e618fac1627b68b95592c16c972b149d9be76bd3659891d0109754e58c1645b9b82ecc7af56b42f02414c478d54b980a9b5a873641bd529b2cea9a06432edd34ffab742b7245889ba655bb6c85df695c06125b25f9bea98d84f2887beb6b56b953448576420aab2a3181556e891310924992c815923ecc848315fa46e59cd0e699f75d2ad01c4aebcb0c4c2e9bdbfdc777f026dcc5df63df0baeb7ef478b8efaad2579bdcb5dd6dd32891934dbb15c90ed5a6aa74bfaf4876c83ab1317f29f1748ebffd123a50c484239d533140611c2887dc65a5e0b03665fbb6a873da4aa94d9de360f7f49b1f17ba72cd5165bc3fbffbf4bbcf23c7d8e382e1093e9647b4b6b26ab5419244d6beb4d239f18a0b5d4289658a1c76760d3072288dd6782287f2896ca5cc2265fb55daa2ce093ba3ce09f194ce0957b2ce097958289d23a358c31665db440eed13d9aa91436b255bfbf4adb5f6c60b1729edcb29a9ac7e6f6bd3aec4949adaf935f6e8d73ef975e8724e307410cb96445c8905e1000607a41b6dc46e48623ac018304cb83091c520a4ec032b58d228ca5c6c018eb33486720839ce9aa6642e72b1eb2f3c21f74735ace4b09970e7d08de24c0d296e149b649669e4d09fc8328da6dc53e22c0da62cbfa5f4133d36c0611b492fca5b2a024161839fc316a2407c196b293da5bd291814e28528730758febcdd1f4374c99f01e9062e30c832943f4a519894688af2674607386ca336ea9c50aee02ff9237fe44f1bf5142697d9a2f8a13b95ec2f8db8cf39a7bc17e3d5eabf1571a2ce9d321b99df94078ebe047c68100509914a36422bd5361c03bc4f0d32a592e76f465a46f71c81785d8cdc16a3d6ddb6d21867d7ddefdd2d638c31c678d2d030ca71e683a02a729ca121cbaacef1550472bfdfe6505543baa44bf68f7f454953765085ce2958e57085582c96555865994f1984cc6d1f7a39851450906de44bdf46d9b187ffecc29bc31b5a107b9639f4c96e05176fc2ebdefddd2dce5fad77178c8e8185c4978f1042637608224b920d67c0e413892f1f1acc7c40c49822807062062c0a176fa408ac115a8ad01f710ef1f4afbf8d0718e710e3f8aac01749c260e1e209248660f257f1a55a2243121548aa3cb1f29452dade5182904429a54999524a29a554524a9f521aa3048188524ac54cc994524a299594524a29a5de9436a59476777737edbe5204cf947e677be0eed4d61a77773141d9ddddddbd4e9f734e67595b6bdcddcfd092dddddddddfd639e79ceeee3edfdd27f5e9ee2effb670ee0c3784ce7042836406184923cb42da7fa60b8edf48d28c224e8a9c3921559a63b89454c95f449a8bf52bc501714c38220e4a17a61ee4900bca92e3010e39e48698c0e4907b92e28cb2e4a4642967cf07c41e7213b389e99ca61a59f6908ba184707ca5ed46f691e3cf295468934911ea1152dec0e4f8344fa214418a4023b35370d15ff3234e7d3d2e069822580b5c118da48908d64290a02396e88951c05a8833d804a3f80e3024c15af0810694177aa4121c9a602de0b88005170bf891c1fc8bb09e58a5890db016dc098cde784f6334118612cc3fce0a587f32b70bdd5dc23089e506636c85fefd4d411266366203a594d26ea794524a2995d16bc06d2fb8525c28e87334ce19c110273ba54e69a4f1dddd3dc4c1691a2d70fcfb1fcda2893ce73d82b35ea40c3836e29cb37be0e29c73c6ef720eba53992ba24c524a29f5f994524aa3534a29a551504a299d94524ae7a494d24ae99c9a125ccc35fef62d058d506b29a53146ea5e3f7efe9552f95d657b05434a7dcbe1116290f1d91bac75778f11888b99c648bb4569b6c0fe5fe4b09b5f7094f67c016423149cddea984d884052c0c9426c8f93c318c6fbb4d073ff6ea006fe20e901d31e6806767172a8caf353e5991ce2504a3f9c60d441a6a010f1e31ad97f033eb4a0d206ac274f23274430612ad3b71514828516621621580463318b100c952918aaf2478f03318b104c823df157eef217d2b9074e98e88109a3a4ab7d1176bfd82c82861f5ef0010ad80f921e30f94033b0f883a461f781180093af7d400c80dd9e283cf27b4a7097bffc9e283c9a06f644e1b92fc19e96bbfc2f7814d148328510637041c4172c6011167d6ec002050f3c50e182c419301d9e8fc5605db8f833b371ebc7783f5aa08246ea6fd9c8bdb5ce5a6758ebec961fbf7e595dcec8a0c27bcb598472dc72d79aca71d64592eeeefe68b4c0b15b40d4e011711087c4af79d48f8645b20a3437b8bd00d3efe2a7aa5b36998b31e21a4072582b186b5434c0fd7246209dc3d07eec56bd018373041971362a292e04b8739c9171c4c59736c7d8ddb2a59473c6e9b2bbbb6577572a63c72a29a89a5bb8faf503237b3576165985826c235b1a2a7e74cb8be022e3d05081439b223864f5b3a2a728181466b6919ea2604ff00a47730596fff9f4029ab0f9fef160d8740751406521a6bbb4c8820b019e39ca6e90220498e628bba15b01f0978c1f731825206331cf025805334f30468fdf391a60cb7794517094d9a025cb588edf07b091f93296c319653680c932bef424206339fac8ae5f5148bf9a553ffe68b605ed43a203b32a98993e8b044994a983610bd9bfedc7cc8361f7630412d66791b0e50aa280ca1414e27ed422e3e4d027fb0bf1f8c958be9ffd3810dd15a7135c1c798e474818cdb8d8e206424732009add2006493535355f03f6c42b557e88c174e4781688e36924962a5c60345ff33eb01def03fbf7815d90c5c362812c9ed50a64f1ac6c7eb5fa15d8b37a15c8e251fd0dd5b3de0706c27f310afcad767cb80aeb6bd57c1fa2a2a8a4ec588a72542a2211000000001316002020100c060422a1401646aaa8a70714800e73a4446a54184c83498ee3288a8120638c210018030040c68019231a1a713775e5adb8ee838bd69b5c12223ec7b0f2b1b33b704e1a02018fc1a47eca94c278246d10259f1cc1697da292bfa925383ba3e1b581140246c2de27c729861f721122d01fd2e8fe331d9787137f6dfdd01af0c6a5ffc550aa69fbb2d1061721fd950407906af3e7bd6ea5c1b64c3460c6d6d62276ef5c66b9046df9d706898d718814e0b127ba928fec284b80d513ac362795e8f43cc9c2ce9622106a9cdab4eecac3cb670dd5a62dbe56c300ab45898808beb4796d0dc3658e28d998de2ab1b9fc0e31d0c947a5437567ca79c6f28c42c987925e113e272be4b1e98322d3e44dde6a528f080999f4863225bc435a28f5da3ca0a34b7cd6745050e13e0fc437569a55f4ad0df8ca75d50dca4ca1b9902bfa988772f4224cf0fd46dced5ef905f47a31130d9d6784cf765d3cbd576d4849d0ca30e97aaa06f1015b3b6218cb2b001d7b63e049193cc026c1fec2f2388296786172aa4c4ce2ba08bbe53424828dc40770e040021aad555ee35ef90caf7e5ca3ea1121a973ce2cece9252a103037a092afdad24d99ead92e343b18d674129073e6b2a7af66bb041695cb33ff31fffdd53139c9ad01d69594823759b4f875c8b3d3afd08d33ea86a5dd94370b3631e13845544e904bba35433bcf27b66781457cf06df2cb86492eee8f46c53efe8517729acf0caeb124674156e9de4e33407a456b7dae972d98f97dbb04b3f959e36d32e7c1c7cf34130508b157ba698827194499dc88b3fe3eaeef871ac3e375d68c600bfc5fe56ce0d253adbfbfcbca32c3e45701f39cd81fff913824c6776c2601ef7dc7c3f76fd1c66d2dd45a9a8c56843ecc4204145f219472747cb4a15c3d8e51c6ce15db1b90d08242d289b566d05dec78af279c80b704cb92bdf1f1f9744520f06f96b808c6ded4da04a33f8212271a0286787f105a12d30911fe9cbf01e91479a23da383ad10cfeb99d6bfd27b5098891c8aa4fe7b06c1a14d4ba5ac9be678c7cd728e26401ec207d6c3ec4c836937d2ecef567d1a8e948ebaeaac895eb70a2fef5bd498de875b95a51aced40b34248c722971c00c92fd69cfab2fdf2974d75c3462b945cd631223578bb1e6f58d69ccbe620c087f05749689686a821d63aca11af8f00df9188c971564e7b016078214219f3447cd7b93b730531b889acc04124c25e3f6173a1787c0cb55ceadd71dd4132a4712377a2c872048ee4dc925944d048e999e1088d753d30723a3b3cf96cc94f9e6d1272922c4945213f981c01c26ad5d814a88a4948d07ec4134638475ada57fbd3cd56a349e0ad2fb2429950ff1e4084c297bb446984833f3d6ade5544478c30e38a5525c1f6f62febcf838947dbc909e57a6c297a478dc878bdf47d2188520ea78e92109138fee5aa121086974a66920eb9c5381835cfd223eac5efe9e6d042a1b0914f5730a5412b7bc1b73244393e2aaa4a6c08fbf6c780bdbd8dc3c3b1a3a2e77253f2c0aa149a41a8244931d5774dcfd46110db07da63bebfd6acc05f8c8227b5dd74e0952d4d9cda3a80665afb5a58b9828011e37511d293bed110581d70568da796e24cd123c8512ac7b8f9c7b5ed1f6315939f4c98ee88feb6c255571ef636444a2619fc81d6e6fb567549f9ec85532cc7df71d116b0b0364d9bfc45a6f7876b5c1e89e6ce34c10c91171c9182b7bd460a760f63450a99a86e951bff7561182a7601ebe4f687dcc11b05c5c4943629cbec4ff265fb6a46e95bbf935c1dcca6c293cebc7adb0e0b662d7d6febd93c4514e2ac982969f6d4b04a1fd532a23e21119ee1e63ed4d1fb76dc93e6f3da719f522afaf99445500400db50a4b69a00c3978f4ef94923f3a3d08b0de2d942abc59c87c8bd4a796970ac80d9c81cbe8a5b199045685b0af5cdc29d0a7829b680b7ae4de88d74cd611303b1b3fcb9cf5649bcfbae03a1edd54704fb23035453a4cda90cf3ab7245f63a365e7d95bf34af7e152ce808e6a129916faa5d6c9b3a9668c442c2d7f6eff45dd53de51c1de4d9c2f5457653a688a2e37766adcbc8733e04c772f5241b363504ac131006da4011195bd06a0a51e8f1653eaa9a11042fd06295b17d65d6acc1d0310643e070edf629d9a537aec51e7ea2ef0541377f79905211df3d5e4ceee3d1faca5ac8b2ea10e628197d711ca63fc8e24970250435e3a7ce1f2b8fc9a037a15a63ba4b25c5627772dc11032bf0be4f7983b75be7038571510a4bb8d1a678096dbfc3d814d63f5f3045208d01010372db557a3330b87dc9e51cd2f0e638254cf112dbb41f8f63a73e936c6fa017203919a3d687f56441528dda327cb82aa3b986db2cf7e05b287d7be361eaa0d96b194367a64b36bc1af4609698c48a8f91cc0b52772febd344f3e5b00c273587aa8c29eb7d87d2998f0bfaff0b0cc3b986c05a32898e20f6b75aeb8d8e088e193040223bcaba8e7efadb3de2501f7a751824bec140b7a08dfe50e0eb3513fecd914558e5670f3438a3206bf860495c9eae5d2f151af1964a13438155fb0324f652aba560e79ebae44416dcfe1ef24d2fdb46b240cfaf142ec9421283f45f397e803603c8b16ccbba9d0b0570e4e249f96c22e54908ab79727558558bf043285326faea4d99873fc8339476e9e7678160078753e2b1c2f48d4ac78c936a038b8324edad329fdf6f20d6f5a6ebd5b687d51f04f64bbc5131248148ef69b339864470a9b31dac8137301c80d2d8bd670acdb2ac5122a370f0da9fd63d9590cf45faabaed68f7a456a0e868561e15aeb0856000795abdac581db022d48cbe56c6715a2143aaa404b2c6906ab966d21cf6b1344ca89eeee3c0d7a02261497968bf8ef8df9f580c7d85e8e460ee23430f04dd2756b5fe06cdeaa69c793aa56e4ff0d7a0b46f8ed1243b85509f64826af1d1662c37b5b286e871e3f34da117c62430fe114b91fdd07df6eb8c84b7e6389f39fb9ee50991ff4f308155e45ccb23bd3564b2e6dd041db9ecedeb392232113e5faec4f665043cb1312d3ccc0f316d63e71e6bf832c2b9db3e895197c71a33b6cad7052c748acd1fb8b96e5381b73dd425782dbcd7c33afd884f6e6c9ff50c1af4bdd8fcdec5f2070bbba24e0d1b551696414cfe344bac276d2945d836040c8795c18c46b0d7cd63f5e8a297aa903ef9c70d6516ee556f30a7b935ba9a0737cb6ca958a074bcbed18b3a0dddee18e30adf5f8c1d3d01c239c59cb406d0756e4455fe6b2e959c1701d06a278ada478180fe48bd6816eca798a18a88ad5f9010e9caa3b6783248cd99453a4f546612be1b757d5bd818dc6d732926ab0685866046123b305956bba81149f2b7e03c0ca84aa0fbc9367b9df309abe8b1c571a5a0b256dd8f82fc403ea575418706782b9c857cf12b7a2514d58b56dd43793a6dfa96e9f8490b7dfccdb052bd53439d129cc2d88235fba2fa342d4e58289a79791eb9da26ee06af192ba1a3f247d78243499fadb3af111688da7b127338b9ed2acdde7d56a15a0981ca505d3d8f1524d7bc944f4b7658a136bdc0d39d4438447081678d4416cc10a594be5f5ee58bcf967360a17866b01b44e70f8325281c42eedc6ca1cbd8007acda6eefb1720fbcf76666c2d16aefb0bd3ff9bcc043c85cd93e266dd2610441ddc33b3d2ae08a11b62be35e5eb953989c8b23c465486b279503ff07d2bb73fd228580b9e7cc83ede80277ba2927a64e882ca34cb21b377ca84e08a1c020a16d50a1bbca48e686d51ccde48b923a57a5936b103787c3518220737942fed2e376d0a909154407135374e89bb934cc1d2a74647f7ff7b16d4953459d07244623985b64ea29917eb89f0b7b61546cc91d8476d58f2b4f1e3b12ea04254bfa45f88472bb92087b8148c9387e0e6bb343a4224f50904ece71484e80ba9130aa8166de743e1e3ff73a182d7541f895cd105be5faf088a41ebca7956b2608749b5e71be27d58b37806f70624627853ff0d85b9d39472826e67e6b4bfdca164c373216f550f74fb0c0f9a468f55952f898755a73b52adac0978dda3b0e65ec28d2b2b96aa66138b43c86379d2b8e69cda1b2108477e934f2556df449cf68e7cad2d147b1bac4f5be34861d7943620dcc3048e665de7ab5eb39a5a9095c0dadd01da0b9354425531997e22900e2da4de2ce910d97a104e6d7b6ffdebebeceb228446c8d0ffb4ae4484d2f9b2127740d1229f8a51b66c7610235194462f562bae1a9a72087e82f2dc8533faeb029bfcb2e5f7a29b515dcc91b549395261d3ff4fe12fc956b7398fcbac56e250fd0634a9f2fc8f5b9839d9a75ed09744c1771ed293d47e78f4df3dcc8fa797a59e60bfd316c4758353316bc105809e82405dc5e673c3a2f1faad54235d66b3620a880288733b87c9995c1c4808e18e9a6e41974fba45a12a6bcc5bb2d8ee2fb716d4f97aa4ac87bfb8941771d00a4130befe3e646ab8b1f7ce94ec9eca7030f3e4f013e264e98da1987c54e383e7dff6603b842e6826963254dbbc45917d8b1f2402469d2e3df1f380b8b94c01895f63bf4b008a4b00c422263a9df1c83daedc3d3ca114260820db83d2cd8c3ecbe8e5415ad202d1ce87c91a7200899da2797746cb2b0f16fdcaf4e751351df23bca609289e0d85838c9f73b4095df5f9fbb080cac3cf6ebb410b13dd6232ffde6a3a61484d32baa01ebfc64bf19555248b159f3a66aa8d242ef3785a40a63de57073d15952610a8c81aa10b2f51c51d8a980d0ae5d638ae6887126983e43d5b84b2a14270e43834fcba018026d8fb1ab6c9529b3631b02bee6c7217ebb2c52100a3431f5dbb86179c8022a0a293559ba0729bac5d43ba4939e13994e606be7decd78b3047685a6b1f8956e59082bde03dfe21d7c644ee295b0dc7e981e678a8c5097c84aa8b4dd3d054ca398e856b1f4cd3d229d190798c8f5c8efac6216bc50ae85bbc686b0f103bca0b3557c94ac6770451de9be12ae976bdf6d1738a21166c4c0809891e63f4e311e66fe193a66eb1404190c78d5a6fed5165cc162af60f1052986709239c30b4679b968871b7dd7f5798841ffef06dad64c08f6eca102f82b7f4280c2c803fe889a11f45ae066fa3942047e175adda48ea7999cbb7f3507049974cdeacef27d5769e15c26c3e06ec2b5f53249080d1db3cbda7797288c1cdb4b39850107cca7d8798e01b348076537ddf48c48b9df3886607366d7365508d9ea96dc3d3a0917ae2b2e824fddb16dee1b97cd927a153a7abdf6a61b3c0d626924c568549dab48791a55532131390e9fe43560aad58f8ff4143304a62303fb27cc2abce995f6f675815a915bf51d4d39ee7275efc7116c383a6db73c7403a0147622305adb131d768ec887560192f584bcc0eb1be404996b6d9f744871651d58a9d9d7ffff7668deeb08e1454dba5953cdaf24c2f7357fd9d128fcc69c7e76f951a051907d0c867fb539560bbbb93810d8ecf5477b6c7179f751421217fdee630650560dc8a2bf40a806639fa831081d10c368dbbf04d4cee731adb5d1c8cd471b6dac9b5f3b78ee5312562b6b0d536c2a53fb53a5036c2f3731368d1bbc747d7d9bb6d30b48b7c7f9ac85ea3254170e15ba9dc8b5d249f92e23984a176053001a8553260745dd121246311aa19613e6f37117eeb3152cd08ca996341f62f640c646640e8438e735298a1e04057da2e09b1f17ff556ef1160941bcc2e2ddc4a581c015beb62f3408b6e2a582daec5d6caff9b0a0cf60ee05dec8d02459eaf09327c70e1de87da024cbc075ef8ad8954fe2ef09393bc1b1f0ca255f8c71eb9663864f2f1f34a48444b621bbd5381a41e61090114ba8b08883dc8623d734f76455be16f5b09dad4d24666b56f7e68ec5141c5c1e1067325c525bad23cf50b1121f90c1754b94419cd974d221b056abe0362d0a28d3c701a3f98118fc370b24f5e9b0413372110b9fd3667cc3ec5686897791880c6e13d41aac2923a3077a71df950ef13d8fc20889864d14dc35bf5cbcd3e211f0924f343868808684972200b972663334dcdb11e81d053f7d61f484b75a7282c66ae1a683bc76f53c15bb872e5712d7b326ea18573fbf19822a1d4b9944fe75722e15e15cd607253af059373495a554f8a45d0826cf52eac753bc3d9bdead2fc171bc7bd218e99bb8e30a7d49af846270b67d0d2f11638449844e4b3ea0424d48b5d82466442a091be2583db8d29ceb22007bb9e0cd622a29522aa46b6d806ecdbfb137608cfda340b59d96241a69a70355f4e7c1461383d1db96796773d872a36c3e83b44ea085f06d13d19e83f45132c77fff9abf7152b07db9d2a1b27d050a15b8eac9496257f0452c2f85034a122afc37aa9944ab41c69259831b715d2047259870e8bdea6880b55dfc82746a4a5cf0aa27fe2d96105c5d762ea42c85df21ada2575a6d88ddf6d4256d251f72553ee569da715fe1109d52712f4e8ce94197d1fb19172d35d59cb89ff79c451e4fba5a3822c2ca9d28a7d25918de3bdf247dc2e89fbef338d4f69805e3b3be00eaa8a5190c96726cb1bf28f750180bb859ef1085f015dae79f810ac97d50a4a1af9b51d30a7fd0bf1108b3ec8efc7ce0fa6bdae87cdcad27bca41290d215040b1fa3aa1fc7056285b7cfbad4d4d266961c87615485d97fca36a9685f5c5a8bfa0c0d041107a369d611e4d9a4b5c5658861218e2bb70ca9b5fc12b74b8315ea044d487f28700ca2423061007d9e43504b32362da4596b866418dd750a0c7a8b6221579443a90b06139912069a583a2db63afa1553b186c41e03f9880e8f0d71a692a3cdf59acf19368d2485c7e3eb9346015a566ff34cdb8e5fcdffc43fcdbc80a41c04d771e5c4f49bce826bbcb87f4f082b8d54061b8c414030567b000c147ca77b8f19048accbce8d726a20f559bb880fccad33ae12906f5d23e039261c3b81cc4a968772c4e2902e57929622c6e602d608cc11e88a62b459c349266482b399fb9b8812e18058ac04604f168dce4e2990e37d5f1523859caa12c661af48428589f1a1226134b32c87b191e1c1ec3d9f41e37aa7f1bdd24dc357fd529d77fe011329207d5a559698b1fcc816a843e6ff0547f4a4eedb79b2b90d237971c5017b12cd0c29d19c7d381db9e181580a5b12204a76a0de818212282346f114a63d1e02193703d7864e2c7278e562f19f0595951d84ed0eddb98f6f047eb0a6b9d96e6dbc048ef1a88ffb1e346ca5723135858fa032fb0764c0f0e72518c6ff7839f02c900b8041ece1f7ab056fe804d82355892767efab74c9bb9e01fd5ef418479a8ca23ccf6a7d0c7826b1e5680f02c8180662fa283b6daa5057532666ae34d214ae9da0e58cd361c78860d276bcac4d55e7bebef26a8fde6e5f06420814900ac0ba90b7b4682be4941616835e453ffa733ebe78ce59a1ad55c4a612989003d2aca07969830b716e04f69efcd5e48b19581d8eebb30ec56dd710296315b2acdb3897c59d59ea7b2aaaa6eadb13ecc96cb12f3260c1bf2573aba3a107bef1f5c2cadc4e6cd79d4f41e256f21ae1973f63dd2ac20a90bf28e14a9d6e41934b191ccf0929e68b7b01008394d6beb0297875ae21c4777af831131da1931e0545a60cac800bc4c39557a18519e2f4010e18eb4da001b3a259f600761afe02d6025d61440eb604278c5895960a69fa4bb3989331f29ffc6cd9c754aafa25d25bd32f6011f8e29a8c1f6602f147a09b26ddb06e4a55f1f847f36965dcea0261401bac0f9dd313baf8dc9b6f8d27c3ec6a96733ac3de512c90a55dc51ffc160e128bca6ecf53d717d29f83a897b527edd15666c6e10fffafcba642ad71410c7c6cd8c2a80372f12ffa0316ea63a470fb10559198c62125c94f5d058f1f4c7c9e83a6447e7479facc818e0f0f58cb87b64488484366fbc0c05a1427860dd2de976f2310dc38c6ee9de18db038434c67b4c2b85baacae318a6f38358d1d04a5edb2d9fa7699e8fe387426f7fe3071ec0d7c15b6707cd6eee6313acd9f5489a7299e1bc54866d2360e27941edb5e876ed6083d2b5a6275600fd7e6ed53931fc9129291f53cc0edc136a63ffdfbb07303e7200937c28dab3d1a2fc05935c7bea02052d5173a65e1ad1f26c9019eac688696539234b0f47c9d1267eb187ae2bf99a336dc0654bb1c5768e380167359e2299f77a5c8cd2f14dd93c0be04fc44c9c19c6e0fe5d55f73e410cc43b3da3d23c92d0205089e62aa656965254133921d4672b77224e133f88a40d77c807c9a8112376a85ed1479c67415108c8e58839eedf1849825b37373127ec361e31c152dcc18237810aab5402e5350287e041fd422c0aca4dbde1ee097aacc5d76fc2cba3c94933b48ee59d30d450ffb088fac6edd778a2bb2956ab61d01d1912ac500ed81eb983eebe26884122b01c3c8e376411d8e06a7045be5c116da2ad34ecce6d938f73cbd6774c4d4bc1542964210316c2a817fbebff1748cba56b38a00dd1a07aec5c47bd4b1ed8f343c9cdaf3a505a83d71be3e38de66e4cf50833fc80e5dc6b44271d94186345b6b53db2231c1d7fae092a72c2c08cb2fe9ea658650112abc5afa9dea72857798051ac3900a3ac7900a6280b5807b84aa26089cb455d5b9f2751aa5980e46271d7ebdfd3944b1600b9b5e8f5f2f4f4a070600f048c73a96e4801a54ba5548f2ee64bc3d7796421824477ad224179fe51179619aa010a6cf90789e9955bbd865f213e3661dc1708b1011578f82be4237addea553fa3f3971353028f673b3e230aebef7dfc51d61867d1e2157741b813be10b575f61d7ff5620c7dd4a0290b64951900ea94f0597711917110776a50c2f1f566035af313b2da190b575b4091cc17711838becd304e0eb805b7bca836f713b4c3f3d9ed03648f2242b2c3b18c34502ca2ed7bdc81f906debd4a8b7ee9e1e29e69f8d3966378ff83ca21afa4d116fa464c820c8994d8af2b33336e10315e0c07f51b59e2ef2c9974cfc5f5706854e6939fa17ffc689444edaa2bc16eeab3a08b45a83f7ab32a6217e0ca6cb0d97b3cfaa4532700f1863ba05d84ec0611c4536083205b0e208c94a2dca1d469d3844f99ce2a40b7f2607b476c7a73c2cfa6da74f12ecab0eea76c10d6847b04da53ca7054370b553348a1e70854cc04dcc3bc88fe7fac1b5653983f3d020e05843248e5b1b88cb2971630dc28acc38650cd9f8872c998759442907b7aae448cacc971b62d4587be2c52e8ba436c7ed0b806345a5753cb6d110aa2eda1acb73f51e25f305ac2a5770f90705c2a6b93ca4533cdd1a44cd8cb47585081c8b988792f46a32c3a2cc5a090b13b4efbca808e7aea15383bfec7481aad69e2b7eba883df79bb870533ca569f23bda30ef0d1ee59c0d6577adbdddd3a713c04158480306e4a6318cd858c6e8d62a7e240ffe9471fbd3a0e0ace43af034b9ef0fdf24c2d90a3487a3b44dd49ff3d990e9a87a1fa26e0d4227f130e6727047bb62eb0b22bb7226b990436f996569bc58ddd195901d7a9ec43c6b08e0ec70ca83c3147f1f10ee73af2b41ddf811239e3e18a03f2f7aa2a3fc54e1e2e0f2fe1cc58601b8f1c9650a7ec283e250fd5215d095717897f6fabc5604c0605fd5db4222b7168e187ac2179eb72148cdb481639fd3d31d81bc743cf5006c1117213332656674c0c6574bffdffff4821fb0d5c9cde36a6c6d6a1627dff1b3023924830b622cb1f2697a36575d88621bcb9a3ff4d30944ad0d2fde779f56198f130940abcec2323b8419bd18c1f0e01d7e7b7d240fe1ec1e3332a3666726e581bb7d2cfa92db6e78dd089935ea7580f32a390950e58a2b94b96f886372fbd1681856cdbca2ecca420a405449b7418e8734bb84101b9f484d00390c4548217f60d9084fbd2861d9bb0981fe16d2fccbcf407c93563e6532d421c3ade8ef2a01cbc32301067a23f737d5e635928df69733701a1797236714a26336766b44b95fb5d03e76c4b5ccafc55971c1938d684659b66ae09d49f2fa4fc1d934d0fb51005e82743364d6910d1f278d65bd6746a6deaba1c07546681f6cb4724a792ff7a7e51acfc71d265c234274041e56013da3a344d25df54c4946fe162faceba592f4e3e49435a46938e57b69159adabc60c1ff30dfda907e02fe70bd337f0328128634dd75a7c65dab00b4a76a26259fbd17c47e1f9956cb4a0207f6be82af6a4535d685dd62790f1acb0f4e0003ca124e00c83683a74b8211b643e87e2e1da77087feb07a151d4b097c9167491d568342779ba30e2bf55bd79e51d6949ee3ea7f1776c3e233d437e2ca9f9c0d10bfa94595a8fa9bed48a71320f3ef531e71994ff6dd10776bf5a1c6af4a41ae82c3a135c95983c900b6b8f067384595fa6039c93b063f45f267c338fab83d32e3d524f432efca3edd49e7edc6be5c34680dc7a3cef32850fc4faabe50e28bbd0cfd44c4df37b67d004aa403d1368b9a54f5bf86386c9953a12773c6e66af81375b390fe605b71b6573b67e7d445b9a010a5011a088bc381bc211e1e3d1c1a9e681b159cb03e4f513c5d73d9efb716371096296e1caf209e9058c0624ad01db62eebd091f4e21e0c100ba3c006a5577cb5560563f16bbf5470219d1336fd76cfceaae276a11cd2114eface2869315e38ee842113980777b552e0d74090dd1bfa668884365727da465ffe705cc672b00c39fee8a6808a0d78b000d5866e32dc8138a03fc57e3b96c93fffc176d53593ce070a90e77b6eab7e7a5716e04af182c9bf905195a8b71ea1a253250c0c7350ceb256080dd4a0d7cbb1a985717f82e619dbc1a320b5f5c7854a5c2017d4fa1392468e5204bb422ad05b84215391d6e2d2c7c67ca33e3d514af992b5dc232cec362890a4b1b43ba099bb88207a9a8e7142edaed497517df257d094af944587afada495a6e2a0daa3ddad00f59fba51b7505b2c934451d305611f7b70322730795f5960e97a6b330886fb3b1ee5264c31fd94a9000ade96f702adb1736beae0d4a1a66c36a4cda1ac08ed0204c0d7335bfccb50b944936114ac7d62c63a2d4869f4538bbb54ba8c71822de9e3d30cd48c16a1319062e2f323fc6aa5df4aab55dbfdd3a429e2c59e204d4a21658c0599602db63c91f288b7af146e705353457c1a8459ded1523bd452d2ad98a5eab0950b481f11c6f90b237646d3d7ce8c3913c9ceba9aa03b22ef176a34328f9ef92b1ff341e6130ad311608069c313bce3ab874e156a14c9a4bbed05a8a75818c54341ec99a6d1b08ce83a290228337e1d8972892cb0d2b5231a3a1ce97b41a0278512bc63b51970da3234fdafbd758b1cb3589df78b3ce6b2c1be7aa3f851625612e6bf8bb438456f4bf9e06554b1c870c51a320627da886018de9b250a1584b007058f47069f997809d908ed0544e97553535fc12d61444796b91cef3ceda301b20bf9c8647d3fa416111de2d679664c492de3e34a8fe784197119f0c9672467dd15cd6f4d3016b66aaa41e2e3276644f7f9735741283ae29627a57cad2ec561e9b0c952298a08e88da267a95f9809658fd2c24be7577ec001ff55ef64f32942df0cedc745432ccc7345dc32894123b07556918a9a57f8b58fc611d80caa76ed725807647a0f2915c0facf64c1329f232a4905b5c444fe2f0ee65cd74b0d68b6981a76567c9877b00f2ea06d5e7c28e844cb3902b38592bfaffb456ce08f9bb821e736f2bf8524655f9e5900d1712b18f991581451fee2af668894da8af9af8dc5659dfe5ad0f6a2bfa385980bd3a6d191fb8330b120ed147fe804201ec5adabbd8b04a56711e8cd46292f783ccaaa0da2c4a2efff7028fd650995541c175baf02c26b758fd29ce250295d91510ae60da475de3660c812a867bc82e8685b06522d33104c0ff2b154cad9545c616e3147dafe30208502d07eefb1c1fc920c30bbf0682744064290db29eff14b07b874c59840779041f29eab4b7b8cfb2bf8b980eea6e6c36fe5cdc087ee9b986c89c4838bdc07420bf6ad83bc8d4180d0a5b572a1aa1c161d555e5463668583a54d9c80d0a4bf72a1ab5c161d7a18a63ecb1dce4c1b283981bd76061eb4a5b1e586a5de0b0d74d64373ad10018ec76d294b1c1c0a27bc946161656dd6b307290b0eb4c937107879dbab775a091a7f79eb861d749443696bccfff52a53ec7886b4b9cced6760cfca8bc3e42130c7959e575c3cdcf02f1196d98609e2c60c0e653cb4a5f4bb46c2499f4ccf342bd91040c4a4b961cb2f72602bbbb6277e5f303b61306ba14821dbb94330ffb1b95bb064d6c1110f765e0b1bdf06d5f1e1ca48bcd5966f168aa87c82e53c0dd80fcd288ddda43cfe2139f95c61e3239376dd482552d58b2aa6512d97cdefd83d71d936e613a2c3c40e1902005b443ff946a499f6706b91312fbe4e923190928219776afcb8ddd03f0e078ac996a0286afa54643c30ec27b793ac0be8ad982b88a9df9d8465665b3703338f2c7d723c0c3e8f067f835e94c565add4067a2a5e8f78c5948a959c302a3e5ac55044f80eb774c9859e86fd5202d1de0fa3e02aef7e15c2a6ba30c37a4665c3976d07db2c942206b3871e2af1a732bb64ff7dbe29f65faba605383f6c7f19fa5fb332cacd38936c12076c4b7bb20fd0a7135f8fb6e3305b46b5fcee82b9fd3c058ebce4398a1e99c6fa073d3af1877f54511a4dd639a5b04485b49279f9faabec2499cbc0d2211bbacc4312ff624b9582493df1424ab8dc7f17748308357c3fc9cb3484f0848f128f1d7690bd38f86db1285df6a2f09455586275e148ff09ebfbddf2e91b1667fc1b01bc59dad6e22872675a825846acfe3679275836c73c1e0d182127c8c997d2588b87b43c8286234ca73f023556f705a0052bc19ee94bf9448d91b02c747debe7cf1f4c2fa973aacb9d05f73249f2bb6c531efb9cd24c5aaff47a3fd96d14122181c98410f1ecb2352fc13042133d1a4b6a728fa065895717f62dc1da01cf22fa7f4c588bc2b1d34310973f2dd22df56a171a8cfb5bbe76dd18fbb8c5b498f26c4f3853fc68757690e3e599fd63783bf75b9f566c05ad0fa204ba8cb56f7db4f0236234cb70a67c433cfa9523496e4ca2c4864146907d8ecaca7ec30d26d6c49bf0a2962bca479fcb2437568a1c57d7a368ed03c161df06d63ad32b6ff643115395849809911df22bf0f0a592125f5dde492805ccbc49d9f9df9089255d6a27be43aefbef09ce0436d8cb614346ae822b172926f5f3311237c8916377838ddd53ca0c13ab22d58fb76acb5c0362b1bd8c92b3a6e6e30de82385b5cc9845336bcea4c62d584f3d921fc24b1e2a404f60f7f80180342f6f23bdbc8c712f76ba7375d31d64f0d26f6a90f7acd15fe93f55668296dbd386475ee38f77f58b60d1abeb7cc050e15de047402f6c23f26ea33ad4738a5472f31d1798b0e5975a1e87e388c883b490a6bec628c9dbc1dc0d35f4a15dc75b0c9f8d2c740c2dcab5ca55a4ef99d7f6eaed50c3e3766b05c65b7c70044a73c5d31a0f6fc7341814af889a228199b2ad45a1013f1259c74e502e4d43fe3736df154b757d4c2dba30e2f450a65e3d17b3b5ec8c196d9fba434ccb85ae6ee2eab3d147ac95dc16430497b7e2b0283a938e045aab91694a70caf39a6976c5cb65bc86fba383651d9bb2ac1394769c0a5f30039120d503a782423888a607dc8caa4f847d0e3b931920d97618431f83b0856a554ab5ac48df1e600c856a58326e2d0815d9ff4058abf8661e4078ab01cd8a56401f845abb9d45d2afc03a86664f8e9c81853e0c4737bd896f705b27eeeff76354814a5a0f0836d89901622d0cfc1744e8cd4c2c903cd169a9989328bb205e5068de76deda56eeaf406780dbce9e0bad220e56df892149ea4a042fb622106c47ca7089639cfae6caaeb2708a62873c61ad60ba4dc6f999dc89adeed9403ab0199567d9b7e799a54b693df1cdc10ca48b1f151ecd2ee9f0dae0a13ba6d4f5cd0144c8e40db398ca1f8f040f5638088b0ac24ee589a64d5a4de4933ed0a08afdcd40c91d091836c6bb966b632623ec6c774f23c1400ff2ec17cfab18214a17721e370f811ce725a2f8735e723690b92d3070bfcd5aba435c7a6367be97d9d83928bab14adea2637d54524f54b95c8362c060cb59d397a6c89baf82dba06725c06a9605c49fbd26ccdc20bff7c1571047ac2f04ce43e97875f7c875374dacf62ffc007d442f592c6cff6c36fbcc8ebda74452740ea790fb657b4bd856d9037bd7cfbec45d88c6582a3f7b96b7dceeefa7d39a2055b826a33a2e8ee4519129fdae48c4b0b080cc47d31bb2f6851b64b52188a645156ccc2f27440ed7d2555f9682981cd957ba79a7ce49a8d0479cd1fb0ab37b11529ac18e88000f719b443d01f9cb2c2f38007eca9f95fbe0a5c37a401562591cc100bada8cd7b5d443fd6daa05e4ad8db4fa52763e4675a45e9e9a31ac8b720c0c18ef8ed0a6c067acc7d9fc3e7b50203eda036f18bf2d63e842945241445f068d73d941b23c9b5098e30b8b981493a15079fd4434c9de8d0bc737c9de96a4f154329870ca06462105b6931e2dbc83e07ed61dba7f2e698ed45a40fda8782009c740725f3374175f47bd98c67e8585b52f680c339552def8b29bfad3440e4534f191c88dfa1bb6e51457161e5c5fefc72b94afe86baae7a2dcc8b6e353e7b2440b15e6d3c9056ec062eb2faa64d18ed9484ec98c0475c74ddf4e37df84b00d21df259af076808b6856681f55eeb26c7116cb15e1d447c778d034d12cb09603e921cd5e8f63d3eb6e6dcc9d330378d01b5265ee2b22bf3d47eec4fc6c43a3858fbc4fed8dfc0b66403330984ea49cd7f3d192daee34788b5dcf4cd8c024cece1c6b83582ae581f1a88b06c896b770df3dcbd312b30083aac98af8386afa60b3add96a3cce4121789d81831cf1c590c7af98edb63d455fa67a1e1ee40975b52462a1d7b22b7c7ac72d502a2853569a8288374ac752ebad443712d7326d8500121c4be5cafc4994c76f0a54517d426442e9670183e9668543820cce05c1cd273109c3040384b4d58910186619021cd3b4c391d63b8fd7046ae3a0fa22f6a477ef6dbae94d35470105fa7f0abc629d43bebc0854b533e87cd461d3ad7d8a1d8c3dce412904b600824650b89bfb581c519b481e55dd64f894efc3b31055c2f32bc27016e3be362a2c31db6833729baf668b64c785dad0bf3011441b94dc86f1146b2a688592128962dafe0ce45054afa43fea12071e2f5e618b6d25fb9ebdbd5df5da355c37b0d117e71074abc66fe8e5c8c27a27d23bae46f7856101102484de3825490384d8545ce3e76e8404908e7a98bd8d59eacd0019a72464dc33387c4bb99d6d067cb52b2ed8ca08cb7d969aab9120559043f4d34ee603f24d31509b3ca86a36d5ddf94d18aed155c80f2ea0e7feab6dde2679b745ee2f51dbdf91ad016cbf7c6a852744e55dacd62993d6e4f059960870c57fb4263d2d7c31eba4b559ddd0562130581c3c73580b422f6863446883923bb2a395fc65e690158f82d5836cf3d367d9f349e5b42e1b911e1812a92af59d5da83c06069d4f9201f70849b400ceb0dd1ef725f0b031f5d84ea49e4e64fc3e37d8cc23569f668b530b15341c60e503d3c7c17ace183ca437df17b349ab8f212ef08f6a3ffaa82c24d7208d6196fe14a1e5d0ac65d08223a20b4ed3e54e7249c06212f1d67d75fbeb04b8f132f45f4cde70cec1c8407ccf9a5d8840c254345341b2b8651bf9e7e905264a08a754e71dfac73e760d45217d64dc461e0c7a90d98b0a1604fa6dc7091159e7210c0ee6fc0da560abb824eaea8a1633c69bc4c74e059c08e808546a1b5278920e40290cd596d00e0f538390b751d1eac270e2864ce3f2db6304992047029d38529414ad9b9c0478ec067cc71bcb63ab9c8f0819a348eee270bf2c8a98c2b4cbc06c4f8919a71d0a0d447cac32ab43463a56dce467daa60b786ab398a78868b781c61e93ec48315c9b6bda4d2197c36ee6d921d34b846d053c47649d7cee3e3a771cf35499b940d66f451952b559ab52910bfdd049e040bd0b965911615e07a48e4333083bb7420b7bf7956264cfbb7b64ad87e05dcc197245415378ff8ab5970876d633ad58fd7c34b1917d1ecd24a709775c85b145f2542da3c39f7034c0b1923688ba4e2f6eac86aeaa2f383e188c084c9a48757249700280dc183296f58c2abe0cbef0b84fc138527845ea2970bab50ed8c37f5d0304b2ba811f0bfb0d94e39b3ecf8b699cd4eef4bb85ba2aafb95e87b5ca3c601e8d189b0f162843a92dc0852e5b1eae9f93f7f91a41b8690d17c6e2a613df87984523412af658a6aa08408a5de93b30de04062eff41e0ca8d7fb11b17970421945c6607294d722624631d640cd78e2284763b329554d872d32584f841ca2cb0a1bcca07a2e3997ccc9a908b418d170c673857489918c67b6532cb6ee47df2c33f606f5a2813ba22fc23b8e2bab43115750f5ee09b897643aab35137db6e9fa279ae387ab40051006e39923a88d7ee3a23bb64b4dd7859b363638d1725bcf7d20aa081bf229c29e24bd8879bdf0d26af0a16cbbd9d4519f18554700c0fd3decf01b00f7112a423c0c1cca4d3e721327888a30c695463e215ef5d01a6dc21ed3524dc2c2749a3f494ddc208f4b401890a851aa4e4242f1b86d3fde781c216146c1a5593c278bf2202cf2ca7603aa5fbd811323dc11e1b81515761fd0c1aab5549a9f738f2ea9b54992889078a8e5242f33f807ae1b33207439cc184eb2bc8648f68f46cfc0714f6815356bd411b3c9213fb7beb674d5d07cbed45a5b33bedc35a194b0c5ecd66e6e3ddf68de77a2699e179c8586a16eb82fce20e2d0e1a134e3028d00ff372a33efd032246176cd6da4fe6dbcf43ca9fb85c7ab3e7883bb56ccd259b6a209443aeb576ee38d3adb2b632689778aa8262ff6fe7c0fdcd90e9d5e276e4871ec92abb1781af73902c34e4c30453c023874cc00e1981c28102e16c28a8e5484d0adad7956d16ed7861e43aa270fed2d5728b35da529a0ba43420396165ed3ac7459ccbdfaf3d80d56fb8a17e318253c0f2c14b0cc54697b78a0f8da9f37e51244202cd62551a1d3037ba688023989b8de1a6db86e636787f5e883122ae39ff40566908db1086dfcfec44da8b1ea24ca2930ffa8cd242a3aaf23af5329f2603cb92d3fc0e3e410d7f32cf59a1ac5fc02ebf68807f2cf81773cf07d290328747d5929e3101e21a4644c51554379f0cc72f1413442905a0c917040770334a458815e9beb9b58b0b2fe200369c93ea80a609babc465545e37053d109892fea5e269420d5547c6b0b4b7aa192aad90e2ab1d40515c7053529985d8691df9b00fa97e2721680229c2e03af9bf30f02988a170ee3b21736641b96fbbab86320141e52ad0163d8deaa9e244702b760499618a14a7bb0d9cb65e263ce5108ac70cef37bfb1ccf8a76ce09bcc2a347c50a2c30cc3417630b7570bbc63e8a3b79ad035603c6645b8894c670495547cf5c9086005bc0a12f27ffe16473d44de248966a1c003cd4661731c6a5a2a8766fd55a6ab2b557ca8075e088de8a6f49afc42603e370009670a70025bdb8910b054042f673f88e3aab760fc9ddaf487f5d35269c58494ce423d54e289403c697c47808d143a0cb93fc548071d95072291b2cd9494b36c83248bdd4216d90cb25800b4d647afa2940fff88fcd1ec3d346aa4a0882ba47371a4bb0b3a5e17fb56b912459b6091110b929d0677cbd227c307b1e4d047601752dcc8eff9ebcec376bc75b8766c8ee0c6641b6fa74c6be2b4d3b2bce4be99fab1f22dc32dff798ba47911be3b0eef24b10e066e8321c9331487f41cc5546dd1b73cea4e3ad59387a26296590f25066f2ea9ecb87bb8922786ca482de9517f1624541cacaf4f3432d58e83f67c30fc97793155e74d9e1db4c23263c721a90d6ec8dc17ccfd0e4c83f3522d4ee2170249e03a616308bb5330ee67d7d7d710695a78cbb3629dcdab246012707d7c72261b5902dbd9412c647e938916190392aeeab92c1c7a93ee24a4a032295c31e9e6eb5506feb844554ab952d82272deb0ab4c2e22b6e2b157a60515e0dc898a327ae10fa2ff3ea658267d51aeea1446dc73b12cb609404f5b3015bc9001c9f99a22966b17ee3b2139daf7b68ced48c2c8466f60a06817f8ede0ea174272ee307711df53b3f961435c33fb6e42ab5efd0eaca1d09d0d6face4a8f82463dd06d3ad33aba36a255232c33c54df7f5b3cf52eadad847d005de9a501c1dbe28c2534837b897f903f57508643d7b49e04eb9457cfefcf0f6013f008610b2c661a7dd8e6732560ac69414d5abda0f7827d114b00e954acdb5f5bf0b4edbff669f333faeb0038a408c74f76067b76958192d987a73510bd1f80f0e41473783ecab7c81a9a2a69717305531f4afdd368c4963e2660925edeaa9cc84e727d8ee6f3e3e98c6b12eb8c586d9a8f3e9390ea7a2636cf8d39010e9b79a01906b86387b84019e3e13e6d93acf592b3f7188a560cdc0f927bc335016dcbf9680472b4ffa383a8663b3f49d96e38b7eed77a22ef57258ce3459ae37f687d57c0822374b3702df0e25c48d12dda477f71081d24e564fe565dd1a18e3b5a4865cdee547567034b62af1504d770b51d0cba46c7e1aeaf1278e70a7a9ca91a03793226faca4196d46776c85ce3de108765b039acdb7bdb1f60646345f511eb718ace56720074db28a21d857e200e71ea9d45a10a02310e42930c7e8fe77674df3b0e37ff5134442a05ceb090ac8b94115860355e4d030523a066f1944380a744a11e44eff575558d29a35b74fcea0a3587e7804118ab715b8381ddabc798faeeaf1b317f71d847e5e1fa510f9d4d8997df6c5ddff8d38da1126da02992076919925fc0f820e4d5819f206a6d80a11cc8e808586d9a6e52b4d98e863660d952ac1003d3d6eb78ff6cfe59adc2d570f3a63a1195da7ef6ba0e5a6015cf10ccf1c09753f0ce49b290ad09d7366ee3ac9c6461bcd0014fc7e337c74b6a3590a18f5c2ceca8fe861e530f04293dd6090cc15ba122dbd20ed9bdbca4b6ab778b097cb44bba10706ff88aba5a6ae01dce54fc97a3924db401a331fea74093951a89e14dbfd339b9f1ec64d88a617492ad584371ac10e0382448a026c63c7044b6b5c2d16918ad7ddc8c3b400e7f6fe371dbb4575a14923eee12709fab5daea7d86fb32e1c0e92267aa6f54bdf69faf5814d22626ba23b7a351fb8437e8d411369ee27a2aff99bba1e3b7877c83438fc469e35312a0caf4d33fe6fb7a88c01bffbb0a289c8daaa808e8b992179dca9274288267ccbadb5646b446990dd0edd1755df12b6afd3d46538dc3dd6d2d18bd3b4aa5c8404f28db9d6658174c5065c076ba295a9f9543f348f6c25b40abd39ac9969cc948a22456b312f8bb5f757350718c1520d3fd6a48e547900a05fe3c71ae06e864975760494c96f2ec7ed6073fe532bbf22eaaba3b4694378c5575b7cd9e47bebedf809a12645198027434231ba05b5f20b3dccbf1f84a94ecb584d934270f48a4892600302e76585d21b885ef59e420f2ea7f4c6d7a06426425541b57091a82c0f6efbc8480ebc20295205163e4b1935aa47525747e9d305472eec0f6ce7c6d8790dba6122693d021bf3f614db6c0db393fc8c20c38cd9a7d075fa9a77340a5232e382b82292863eb29c0e8e83b5484f8258a251431d0bb0b190c03f67c49ee01eee57e398b75ac5a4b9dab1e5b1723a4926fcf3865b75329deac911f5571d32ec2d99405b8971c418f3c5b2d6aa011243ff0355fc0feba0dc129fe86e749fb6489bf8d28210315ae0936cc58291d59ee1e49beb44a0d64391431df4287b17512ea3693c0f1dcd6f0fad1257b40bf697c9849a2c3ad55992ffae2f5905273249296ab518cb8ccc169054bb0ecc22610ff9f20cb2a1da14d768ef260e7ef0884861a1b677d95c72ff0ee62617e56a9c0945c0514154bcdf3761efa0648c2762a2c56a1e779011802e50d08074cac2b19a656c479f8ecb89c59d878adec662bc9c70b054a42c09759d83c0a6cc83c0ef12d6a5d3fdc73fe71307c76dd9646254f736aef9265eda9efa5c2e3b465f5be4276ffe719a77a54647f8b88b086067bddc1c99bf8d6d6a5de737c77a4d83f1c9f929130f7691055f13d520fda64b1d5a7675b2f887a717b4ef20a4bd2706ee1d90d4104aaa60b70e81430a3d0253fdf0d1c32a3536c7cc398da865173905ee6c0e616b845054d075c9208387ab6e64b53af60d756771e6ad2d3513534d8b14970514904c8d586c98423d1ed02d10a7c7d068159b6bcea6890319e075a529ff0871e9997870c032457deb83b1e5923240dd246deb5bdef2d64bdbec55e3b4f0a008eb2a351b0648b6090340595f7321c8ad4e29127a08393cb568623e60b41e7b339fe626fafb5a154248d34b26f649877403e8c7124d280b97e2adf569aefd94bfcc0393ba46e5f01d93844613b6683e976e67ee9a58693a80db216eb012d85267e8795445dbaf1390cf51b7c928b742b0fb872fbb12ef7277c7cb6f0d69eb37fea8c0586fec4f68e2a98e07bcb4089848c1a62eadd93542e0da984173eb0c98fe1e2622e68595ff6c3f44e03e048dccdfa9569d07944564259765a1a32101910bb7e1a51d56c505a5d4a7ed9f9209653b758230fdf64e307e455a556091c539fb78cec6b7a8d6865fadca4134aff2622a1ab27cc38c492de732ddfdcc0d90ea8c3a946a7d671fc21516fa116c74cb4343d9fb67e813817f2b688f1b88af313578558304e7c84d559d454c377049c1875c6f22120c190d46a50b1b8c43c1412a5a663ab252ba06121a31e7266d80d20a89bd5e422235295d833cb193129957c5c27bb84f0c95fb2f3e9c0d6290a997f6e17ce4200fb42ea89ad599029ffe8f9a1932911363884dda9ed77d8dc1769dc9aa994806106825e01c2e67bee81837538b1a910773b55536bb2b85b5319600dda497c2a1986c2548685d9634f0e0d546b2f1a4226f4f0904d99901ff8e176432055f001a6522d4386fff8dd66106de2b3344b651c199e12f83c1374b883daa73d423b9f0fd70b5a1d5d65ffdaff78722e9e5c6238c9d684d3a908120d142e97e9500b454e2f741883e07bed8796dd19e83423b8e74dcba05960a65788882c32d1f928e50d362c19f6299ca54e0f956e98c769d6dfd020b4f6f5cc7dfa95cf52d9c9459a417b4f700402b6d453a244ea2e36f3e25d6143abdd134a70707c7bbb7fdb365f55b3f14e6c0493fceee15e923e496f2a8911a550440b7737cb3c4f61e119f4a5d8f9c15a3a9c235414a88a948b3843ae49ce0b35d386a8218d62f60c3c5de0dc5ecae6ce36ad9a698239d2000d52c3b11e89ef89bc681af057194e4a29531bc8e15fa64fe94a11aa59b8ad2ddaf99b226945afdafb45fea2517a12be729097665a4cbe587b8ce74dea19b55c6408a853933f4ab5aac72200d5935489a8eac9d45590cdc0090535ef9e1694c45a09a5769671ac9889d051cb65d91eb017deda27fb553699753366a7a5eb654ea7ca6336abf14f3e3051bb2c416da27773d96c48efece519482e4008228d1a10e234421305bea0228615e928ff98de4c929ca779d49c350d1018d88749f2e0f9161c56064f20b79a7276e4cd1539743e5033c320dc9ef0c3d5419520488dda7d7bb2c9da81314e01cc3c9bd90eb90c008023da6ec578d89781591ab190ad807a9cd2fc7ce4dd63360a05b06d77ddc462af68fe97182c10addb9da1e50c2789270c428145df86893286c0a9c7a98dac77c78d5253191f4ff3d7ff7364e701474d0fc273d36525861c7fd1ffb8f5420903d3a662c48d9394704e82840c850ad69d9f9a02ddefa682faf4a98736f80fb8ca721c57d62ea8a09fe7b7fe87ad7bf8d65fb9947ed6ea56d608ddd9dec3aece89e0301de9808e56e4fcd3e7a2cf900f9bf414a583a653dd907723939ca62e95869fd60141f2c7b6c5acb8e597358846d80fd722c483d8ba5164c4c0ddec516c0def803946025c23a88e2b4b9249c6c1544c4c829e853681b600a1a7ca1487835c523038e7ee847a7c0db4dad6309e5acf157352a5d58eb830ee90ad34fd8dc41f37c05ab25bfeee3e72abf89863db83ae58ecae63b8f9651dc4f848cb68384eb4692b0946876b63911b807470ff866287bf8da14d0d1ba5f7bd46c2bbbca241b44af51b89ba869570f17f21352a0d6d84302eadc9fe7dcb5032518debcede7463bd4a1e96dbfc66538a0acd7600f834d351ea04bc7593b640130209a50651b2dfa37957958a06ea42c3a330b967a1ee678f7a236f81204e92b1fd6b639f32f822349c457e666f913f6ec1a529232e55142c0fc4f3d0ae0165b891322b737af444b473640e9f48fae32481771b4414b0c9a93d259cc329fc8ac4e6029168b4b97e7a603474ad305bbd8b73d19e6e2f7415fa49950741fadad0d58db35640ea093de94b07eb3dc4f1d0b02fe57ef1263b53fdf5ed5e8844416a038014124c58c25e22588bff34126062ed7ff8c4d1578469b91a4246f48c8fa619cc11938668e0c335385fd36f0906335999e38cc24354d7b61e5db08a3577a077e0beaa07ccc88740a7a2a0458fa9b77b409cf76768c18e62a5dd7172c3cfe69247fc5450128fd1afc61bd4f7f260dae24bd0beb91211198ad5edda17a69d12ca707a0e37100afbf59ceab8a5b3492946c110b4c2517ac15f116a6fb0565aa8b21dc87524e0fa77cc005483a8d5ca7b04668f1b41c5a4c0d8ec31b9d4674c4c82cd5a570428feaa2a854450b17025d765732fd9b75ba163c10f8fedeb3c44bb4746465245b331451807ce305667436f22be0830c6fa98d31206c161281984c0e7bc58de83e8f44bb2a301a9d135a510b141e3ae3f5cf94225e2add6288c3469586e9d45853f3ae4ac49bb536ffcb1d365268e431fd0705c9a6194724beb4b771752a65ce75e2436a47283cd23036c4de85daf57f09af015c9fea49990b56e91b9d5bb4076a07aff0c7f96aace05e810437aab950a71408d806192d288f01f46818c0eb37db9c051d8754acc0323e7ae1f6b0e22a1ee90b163edf68545fc583009a66cd7d31f5a13f5307be2022b33a7c67742b85ad5d944b8544ee821ee947e8c13a229b0b98418d060099beb3dc8f1a7ed02e0849506cec2b942a78da9cfe59e0e760618a95ab18a5480b2b7a81fe58c6c9eebe3793f1424c2d1b4af8f04315699e7d9786ccbc32b04f3adb2ccee77adedb48fcfb237587e7d5007a5c9a57147a43d686932a6023477a23db6f59f8dfc41ba145862b5c6b545d1c211e0258349b6a919183b40dca891aca397ee2455249b801d79f943ff815fd2a23fc0dfff1fb2f13bf3433d75ad9346940323889f20083a5d40d69b83b010835832e7276e8d74826e15de3281c74c33c14a30a755eb836eca84f201f619a66875a03c5ee1df766129a8f47666d4c41b1021b611d19f38824fd1a235dc7d17e471c94afdcf3ebc7f92755a44b753dd48c57e4162aae88e4191908c076ba0fbd7f158420c4a151f499aeab21af06b0477d2bdeccf414d37402f1728369162cf4a1a68b4c20810a8e773a31b12a528f269034272cd619b015266f29dd0d5f5194751509b103c4fe86bfd007817a7e8f2e2eca96cbc2113be75beb10088ea7fa37e67750f58d1f606be96c26b31abd4535dde4c73ee6da0b2413e034c3ca07c88a9630a998f17f67fef22614412e91e5b231c96420f13980008088203dad2169b626a151a572564e4dba19fd401da95b9ea7c30a3fcd7588c29184577331a9ca948d7bda475cef0942aac924a49c40b22c1474550eb13728143815c5606b4c85789cfac1f91cc10d575ff4b409970bc92467e9749cb008bd5f52f4d00bea84090663b5bbed9962de326578ae9b87b3f5af8243ed62295ab293ac2a6758b6b891f075f9c5b3e58b2d23558e675daa106081ba9d5655d9220a4ecb8a591f64e887b43775fe687aa372fcda0afe60ac8c8fe29c22cbf71c95edeea6f3f8fdfc7c4824f5fbeec0c89043ea2e5e44659ee8bce57d1db8891eb1a287f42a504d03e1c26009ad8e9792c5d3f4dc323b7060e75a4b2f85650b649337ed7da48e14c9d7140df45c9ce63dbc158725e3c3d4e6cfe6b746e1327c7eb05265e8c2d6ae9e5f967076c96dad1d82652b1780713b538c81698a271f4b2e4fa935257b45c7c45600481056a6ed2dc426386c581f41229a366f9f0a1651b18dfbf038d7f6f59623bdab25ab845ac98c15819bb743785634e09e5395bff3bd4c38109e3bf6a4097d9f33e38cbad79c8b65cd37fefc6a61be32640e3b24e942ddb7d966fc60fb8deef1b092fe722e3787bf6d486653a0cd04e33dbd9d480d3b725bc541fe23ce5c53ea00dc502af5bd804463f496daa2a93cf74676b369549f9843e4db2625a06b7ede86fb1151378183077aafa482eba8c0ce4735647bd4a6ca3c8ad1102e2b35bbf1776a89302e037ce37d431b477982a6786052a5d819603cea940a97a62fd294b2c466981d4a15ae5644535ee5acfdca492f24006fa04807783b4b1de09fa0e0aa3104cd8484c6e21d467b516075f6a5b5ed4640098df4ef217de03c5d2ddccf4252d83acc304683dcae0f923e24c0ff12bba81f4b99cd05b0bec84262358620c7e1dfca43f6733c1f0508380609a6561066db0e05c0201f8b7454ea4df46d5840a7459918176669f09b8d8859bfd50f9a9f06d7acc7656d7ad9d9dc38ad22e164f47dc17c64c20a3af57d6042c78b548351406322b73d84cc05910927b6499fdb58449017383e2c549020f66231934f195f7888efe837830052ec231b2b3b7cf9f1b157bb9d5333b7320a8be269718311f6fa031ced330069300dc01187bc3529b1e7353c79273f38ed20c46394009c0c77c4ca15062151cf874af74d69775df6d68b424582b43dc61a4bb2390865b90ebe5600f2c31083731e2270a176d93038f5f1ce386b1a7106fe842592b46a54c7c88503730334f8590b1efd4b817fb6410e08bcfb3c50d7834e7c0330be4d88bae799b53e919601cfdd622708fc275e505c4bb7a7a0af8f9ff6437359faf50a3cb0df1c509ffdb6c954782c17c5f4b988b981c68b4dd4fc9594c389df1a28ea660d4e33c0be82479fea34658bd64f455ca00ef7363c7a2110d7bab6f2587b2efa2552d86e19d398fa14cadc218cb83a531ddbef50eb079a50a4a354be8f05c39f44b5ded9e6ce3bec4cfde8c6c69bc5451c7cbcce69a478d904be02565737bd3922c65baa12f1f564722fdbb50b346c34153513a622fb041e48a8d79819bfe779a3d7f0ba04e4ec9f9ddf3950b5f0065781f626b8f14daa04d9b61623952f8eb7bf3043c2056b6bd798e15ef1e3bc3bb3108486a9faabf48930e9bb8c6ad261539b7812c2d600acafdfe3b98c06ace4d1870c1f0381e5f41d44d243d30edb3f3098320f273cb3664efb858a24d275c98c6067f4e6685f10d5729c79d6284ddcdcf37072c8de4ab83dd35d6fc47beeef3143bacc27be3b9e5c6c238f125c90b0e64dcb4b269b20407cb34322f697077dc3af559de06c760ce06a87319b123b36cce42992bc5e87693673b0c262af2efd248689071e5edb45c0af0e6cb0ef907be0415cb039c1b703b8b84f857944b170ef85094090275518056a398dfddcff8b210f6a505c2a41103a21c555ecff481f6ca9ee978705e54c72f50e52cf0379c24501aca2c3df95b1771ee9867ae24e11faba13196d723ae9c24c1629f69d0dfb8d15fe41264632eeb0ef05c6b0433d7463dafa5b68338e4a1464caadf0cbead0cbb7efee0164d36b1475381801c6084a4c1345f3a56f1eccefc40ad219f02e2a263ff35ed5f011f994de8cd5af2e31d2161dff8843cf9954818795da3f161054f7c537728e65fcbca703f4a5dc8b3a8684af7210f1ba31ba91e652411e0a66e7151edc16af02bed577faccc398771ba9e397b998145571c3d0fd94f231dfdbfd9f46a7488767178c7338df3e0bc9629e3be0acfe87217276ce88ae1bf3023ec20ce6edc1604efba6470e2e54eba518e6a9955edad3c1918209ff3903a46cd044484b2b48a8fa36a9cb3f231692db75d795755bc879f073c9bb634f8ea91a6e9062a42706446d0fdc5df4e41d6f6c093114df2f59bf54d4561110d91f41473d069f287902782a01e48e698ad06c27581fbf6ecd5fb96ed9a3db0ff4844b80ff3988959500ec486fd1566ba9945b5c66a8458aea8f5c2614efecf54816498b2c1ea83d3889121e8f5b8cef16a978249c717e1bbb250d6eff6724789a19ec1e89961d98751cec963dba86af60839a587af6b4f84e2c433e509c87423eb941dc3745ad791191ab2a80ab0df2d0d244e20b7b430cf8efbfc2c0667065fa6a8b290702e090411c9beaa7b0184c07f2e8610baa841339b224f6e80525204be402cf66f7a204d22f002c0be4c2cf0a3c09a7f2348ed3f601338d01a9e366402039fbe704208697412e901fc2909e7310aa0c786e1cc27a907a729c2abe9c5039de2cabf75e49742d7850bc61f435f1a29cf57bbd68ded601f34716af6b9f05b6277c0799cdc6fe0e0954e814907eb14c98944c3adcfc4142b14ce5c0a06cd29bf6602a484300638c58afca5882b9267694ce92574b904b8846aea0d97043ba61676ff43224480f579381726fd59b6691439c5b24eab7eb027f0e00e007571e10fb7b89aa4ae460af869a8d563713970748961f240d41d55b8a30f972a2f060b3187c2fd1492b6495f691248dd956333e173eb0147c767e7aea0bef5d146a57a3b0a57626d4b3d590d33ade2ec563df8e775a0bd00dd2d3eb58b438bb32bf755e1c83aa9803212f69348a57cf6a19bdfb13ae790aa0ec9c0b06894491a7e39bc0cc6f227fb91ba49b6965e58915edebd62f5ceda6f0b1ac00b5fc43ba9f112fcdc8712d6fe6b92ab2243cbf4d9d7c6d62e1f3d8011a48adebc620b12072518d03d27f7f100e4560269f8bcd76d51b1ca4e5f2c2041cc2372365fe1f9c3a9ebab2f97b8004bbc084edd97603e4c1ffeaef623205016d8c009111f595204013f9653903bccf59c21afa653a56b250b40d076a06bd58636a737882a89078a856383819e1e0e674c92e11c0c345f5629b322c9bace8de812aa277240163e8035cf777660ed7038e7dd31910b62b5b73d4e9a13aa3362812bb4f6303e1430eb6e34f08710692f9cf7661c46fa82509380994e6a8809468eb74984312aa0397342f2445e59af558642594e79f9242bc062c989f92ade3bbff1f1bc93964a32541b5decce94c06aff36adaaee3e1b8d15d34ace774a59692bb4e5c9fa929891744c1b933f7431f678d1a8d1ef76cbb6383dcd605d52c64d7d416ea18dd34ade365dcc1cac1c04818b14d6d5b5d6a6f86a4565b346a7baa45ed8d89c3e39d7c80d53c0adb3d9082aa86dd5fee826d4a98b284030aafac82cc68ee27da74957d8095b5e1d9bd89d673c625306113b980b38afc0db357922ff7a5d929e9a7f6ffa61749bfad0367a2c55b68f3c43970c0eb93c31cf304dc38948ca6348b5069a9630ac5aaf89ce73a535d73e9f857f21ce9160351f939cbeecd9c2e14c8dcd6bc8176979f4176158332245dbe692b31a2101415af70e6b3160a9f762d485f1d615d0130c923331f1fde731bd2b4e9b72860b24415b2c8af17144a795c233fe623cb530a2580b03d8d2e19a5345e20c1648fbaeddb00bde2f04f4ae7acc1309ddac8e3dd40ddac24f0fae668b0aeba8b650076f68f33205ea0ca43ef839d8dd7d4705802d554e03eb4d4ca30fe247113b01a090d998cfabb453a69b44cbc38842ed78c54e900ee10944ac4c568380700066bbe7922c73aac462d255069884357fc538823df05ca8040ee6224417827e42e75fd9fa63194442c806d84837b3cb98fc4a8fca624245f9ec6ed96d1749a7adfc386d8f8df6035363a9fe1a1226df880ae63286e07b78c7d283954253ab7b90f379eac8aab74abdda146e8552b5bb1317f6563dd6f2434cda1ad4219d21d8ed0b823fb2c29dbab542c80459dc05df77740d23c5548e2d6ddd8dab711abf28cdaf21c596e0f560e13c5c59f1794a4c7c874d634d42abd2d726fb8dab569b8303371b172f5d5faef99e924bfbc9518009a882690287a05afa8f095471355c228316dc00b4a02b4cc7069b8b0556f2154e7f90a75938d767139561c158aa7b04a5bead15102d335f3936a4188429865fa35a6b18a4daa25bc415bd0e4d762e9d497df106b3aa812753cc72d49beecb19c48d052288a4f9dc0adfbbae6f2acf60f49c6c86965e5d5d70f1bdac346e009622394e47596e5954bc76ec8e61ed6843364ab9587f9e378c88328e1a2a1233b2ba4f6eb8d9db8ee72ee88e231cbcebc10b8980cab232ead62f0a36f4ce6b32e23c234b20287fae93af070900c1746913f5a1615661a438a3f0c5e0140014f19c2de28f93dd90f21e7bfad9385da22e2925dedfeeb5e997142ea7553b225c61f90fcf2e0d7c9f6cb212b36b1d71889942e736187607a6d12e1490bd1cc6a1df0c7ffa46004bc5d8f4c4c8a436a0ad3ef97115feb22e6083d9cc605a5af7503389f0371cee42f41730d7f696c43f35a094ad4571edd9a5355b8a94a71d1e135b8c89d673169f987e8988c623d62ce1aac489b35b1de69afa9ee26cf55ff86330ff636d8c43b451ae37a9391dc00c3a6dc8f11e50d9633bac86c5b695696c9a42b066f998149e1e3967f437f0659adb4fed03ba449fbf9937f2050ee9c4e535ffdb151030b27928bfa3f12d0a278430db6e9f74bcf1c6c9d808dcd26eae2901d1d3fd35a790b065af4c80655f6f90944d7937a0377d4fa7bd859b87dfc158298814456b232d1f41ad0dd910db4aaf07805d0390f919e979d349577429957de2b1e78a9e20bcb6002767000baf441a045f208257bf97d07af50aa9f606ed5a2d2217fcc00e41cfccae0dc1ca3db338e5783fe6b540dab97263d122bc453e0c72e6789b31f9517c9c2c507167057cff2c430a0e26f33399997e1d3d82aa5eb3e6cee1e0959ea27dba0fd49590a2c551a3fd39970f156a43f5db106ce60dbc06f521032a0a17a73c56c5980395c8a39fd63d751f212b5caee22f79850cbbf4b3b6f51a6baceb82a5b397b8acbcf161baabc9e4ce8ecf9a4ce033afba29159409ad9241d4dd8a615a51da94bed885d6a3a34dbb0724d5e665a5bb1f1352072aed8b34a0dc0e163333d036b53b274c328778a742301e8c167571ead1d00aeab2ac6fb63131db3feb6b5d6486b641342f6268410b2f7de7b07b008dd08b6089c0ce7dab72e07074f4c46cba1fdbe6ac768aced23204bbc3f98af3448ac3104534a77501baba5c2790357e8a74c436d3240a5f33b0db5b9a1364d5816e757e7d7e71f967bce85ecb55bdbb580e4ab37ad7cf5a66df9c5ac7d66bfe16c7b4b97ebdf598ed96e63794cd4b6ace5daad15613bea370dc33255fbeb0d5ce9fb5c3796968de51b9e6139866740d7682e4b86417f0f71a04fc7c37157e757e7ea5c97bd965d57e7ba2e7b69bf1ae7c21cce75ef75ef75fafbeabad7d584655996655996a5d9adb39734d4a62dcba236990669ec1abea1365db1f577eb36d5652dc906ae501b6ad34f069010b452b8753e8bfe613b8868ea93f52afa2b8e28fa8912716540a3041d0e7450cb3ff784ce92fd661d76db69f73e147279115dd3eeb31d1c6b2d8665389add2e6a79cdbaad2567dfb0c57e9f759988ce3e6140d86b1b16d55f9c453b38dfee359c4515cb3411ad655bcb6f97efb18b49b3b76b2ddff04ccb2b9e0971eeb52e672da0dbd0df593cf7119309e1194720fc8fc7a3691cc769b7af96b3d9bde5b4dfcc5eb39d4ea763adb5d69640d3b45b6fa67599081b30ced138783b8cee45187b6bb16bd9bdf73e266610db71efddae8cf60dcbdbc5b657e62c11f3409c64a69920f7aaa216a54dc3f1a0e1e2173e0290bdc5f2177a07dbae7df33cd3b46395e56e65b9cfef1131daa37ddb9e7599eb2ccb38d80720edd97e7fbf3de3fee932e7eff59eade36adece6dcf3cd7260c88ec97cbb7378efbc59986d38c00d21c9c8118cde14cd3f799a6b56bcf1c4ebb47c3345c73f51c9ea9f7e019ecdaedf28669b8ae19ce345c631d0d63b1a7d7700e753ad6dacc6acf36ed99d5b62c038140a02ccbb22c1b8176d89aa671cd7deb324d6f17e7dbf7d93dd9effd7c70ae3ac3b9f3ed1e0fcef6dc6324d7bd6fddf44e9769b8d66ecfd1344dcb340d67bfdd7679bfe1ccf58d7d351946b85563cf5cd3701c0d0dd747c050c48c60a50bd0af5482255db53b519f26227ace37bb76d5e4039016599f3985297e31bf93268c8942802202d890c9165d9d04539a00fd4a24b0e95ab91619595c92ec6c04905c75e821eca07fde32c3b9ad9caef3adbbf01c12d434e72c574a23a0d2970f2969292526d3f9c5b9c7ed23449dfd7ec276d873b4735edc441af6014867338e38f77174cf79a7cbdadf7dc03e594ec7f9c31e001b7b4ce47cfb04a273cbf98401b1fd6acf5573342cf201486bbff5f7d608205d710662b456afd56baff51ad6fee2a8e24c445f22dade9e93b54f9c534c076cecb6cbf61d2c038b6b31760e9ec15ef10ccb39a4d99c5f0cbf380a7d63f97b4b97677f2eb11fe0398f980c0b9e7114c2a7e7e00cc22f8e3e9f71f4b9405c9c617b80f0dcf2741989b65eb7d7ad93916d1d88ccf92644db1db32d20f23de79b07e70eb465ad1db2ad7b0e116d71a627c9b6f28ca3ed17bf38e230777ac971f7db863349b6b5bd9381c525c9b64ebffdea48b02dd26ceb39ea470457f403ec8b676ac42219f87bf88bf3d5b55482246d43bf1209bce899faea95535f554e97c1d208a274f56a7106d2b7cb44b8d810fd6638577dafdda57b1166bbdf2eea5e84d9b0df4f4c2636e79c5738d7731e133388f900e4613d707086cd7976adebfc76f65b27357bcdfe6ed7dec1db6324a78bbd75d99e7a3c47c76b11e733dbef39dbef373ce308fbc5d8357ce34826768667eae1b3c5a4d915577fbf6d75afdffbc470d01ed4175f7c71a54952c6d33014b32bea8b41e695524e729292ce39e59453ce49279d724e3ae99473d249a79c934e3aa59c723e34e5e794724e396708cbc0e242221dff847c8d7172211f956839a750d9c28a74f2440b264b5a3e2945cb237124dfc37148912612616ce6df9390c8a3714817cd11e978646aa2d44494214d9eb8f2441428022b4f0c51c1138f902a2a50a20226350821a2029b9b2bd8d253ca8542f024daf0dcac1e26aff06c31ab6e49842188d75eabaaa2d5b16714cdb0aa7a55853a0885524a29b59834a94e2b4a2badaaaed1eab24e69762dfbf59a75ef57471d62d6ad4a25d334ed395e6b58743fa351bf55750cbb9d56651866af8982544545998af078e870c03459c0c9132da250a184c9929b39a30586201131a53ef12eb67134a7684ea2897fa0ed238e249672424a29a4d246c00118ac2abd5e51143d453f435d22ca1242923a5f1716517c2128a532230ca5ac57224aa58f38aa37373f2a94c636bc10f14faa2794268ee22d104733c9ad784618f9c7c521f2bb8c40a50a8dece55cfeb25151e7dfcb4d102e1a32a15f2904455e556dbc89a308a588a323f3471cc51a38040691161882444474000482e011470988231c7888a3181a456ec6a49c7046f892dc8ce2e848b4ec18fd8b01c585a797f4529262432ca2190e2b7233c240111441fc03a5584451188eeac4a5b13d5f44125b54469fd1cf53202d3b084f51d2fe61105a495524daa976aa5592c6106efcb990a4f422f909484f14cb078403e2e85df4039d107837d8387a50c9f68425874da57fa0d707b42fbef8e28b1d382f42fb269fbd6edd6badf394e17ebaf537bd723a25385fe23b70f00cee7d76ef611547d95619b6717445d5ad3f1358e95732814dd3e8576a820a5da3ad3ad13dec7b4e27bad8861da6298e463d75b0b5ee35c4b40d8b2a1f71643dc3daa8a7fbecda696c571fc1514fd9bdbff7d671288ea6df0e4271c2c4cda125499cf474b80432a95c3e6b767816ce30c8fd9c7e5fa70075807def2586e3562af088bdd3749758a3c61adaa720f555ebde6b571d6257c470540dde7befbdaf7eafaccba4cebe752f8eac9126817456a70067cc695993f5325954ea542f7779ca70e7a516dd55315ad785b0c010b1463dbded1e86e3aaba1caf61ad15c25334a18ac24ff92ea3a6c7017609e081de061a3762e8cf451252ac318adc1ee6332976164dd33b9c6e2b055c2bab8a840bfb89a8d889a8479df6f48a7a5df4ecf7697aa70935756ac63b93984a4b3c29294145576dfebde0aaf394c13af57923605727aef5eaf634b6eb653b0bff40dbba85b9387a9781c59da82ec76b6a5e94e5833e27378724157a840375b1819a4c2851e4a626054f60be08246a3f83e10c4fb11db6e1a10d45713602485bd76e338a3d0cb33e390175786d3f311c76de4cdad5d49506faca1ce8ebfac364685fb365ea0423c260522a424584624df86e70d2356da48a7eb75e0e9d1058bda6a6c6036fa2e8e5e2084e17f4bca54bf49c93762f731ab941c9080e3a1a31415ffd4a4682482e5e666c1997255fa8f2e2844f0d2a39e13109b6c433c3e8848997a851e895b4fb8c22091353144964948302288cc4e089223da0f2bc68c217a0cca00b49684195284a305921003dbfb0a28209c5142029a1f881270a2ac8c6414a288a9420863ee515ecce391f13473ae673f04075a1389a73ce5751e839e7841f0acd39e79ca12969bc5f290a2cda06f6a6c25bfd4a51248111c6286ea67ea5284a508a22891959acc829a7144fbf12143f284131051445e83491e0be121443d0d0b9c24a45ae34174779f674656112411128f1c1521126464a4556f058504489defa955660838197ce2f4d730302f27de240c348cb4f59c9d3c041ca9858c1a657fe529e060e4df147c924576003d6d810ce342f1f1bd8d9bde09986982a342105095c30411228a5224fbc27566070062bd4c00455ba08451b7387978a1029428df5148bf71e0c7c8b0eee7bef1d3f5f84d2c411cf95fb3adbd0d361bf924d94ce978a3be667e236c5f4a94311a43963cfbf083f495d11f0a21747ddeb13dc7c595e0e229978eb455cb7af9f30979703f6e95cac3173dd108c9743f61e48c47703ec013bfe76ed62f4d565fdae5d03faea6ae81a9b7c21aded604dd70e62d157c780165518b688c2b367d3d841ad2d32b0b82478adcaa48867f08340483c33130f2710335317176268821def11cfbcc7c3535956599a85af8e26d3de8ef88767e25ff53a69a5799e9a99fedafab46d2b6c76d527fc343b7bae300da9e34c8f1fd7c434b2e31f56a78a696cbef094ba269e79a7b48ab7aadb55b02926131b56d56f476997e3e9e7e9df74a58d87b1bce261177f759986d44fcebcbf5ccdb7c8c8e2d2d87ea740c4bfd7164d2c13ff1e82472e2e6c156b90dab21465bb1947f533d7abd7ebd5af212e187fc002cbbaae236268b71c0e8dad2a1289c6d25816589665599665599b6559d687f5f12c089410f4ef519cabeac2f9be0b41a231240a03233524ba08747b96ce764d6be9f13ea265bba63dcbb2ec9f2e737f079da375daa609d15a55ddcb714ea25988bedf32a781be7579d340d7f00ce81c3cd3c275244aa22da21d961651c8b2b08442a150a8d65a8d5c88deb20ce7189e8b738c1a6a8001c3c545246a696161098540a0cfc7e3e1b84e87c3d9b67b352dcb30ccda5aafcbb24020ecf9a77bef06cf33d5d945cfb46f4884f3d4d959ded2bd77030bce53df7feedfd3e1bd19219c67df7783a786e7d9bda77b6f44a2ff9fdfb8baef424cae646d541445a2e6842191682c8df00b5058d67589ea6da7dd1b7279c1ae551adbe96051c544ef882aa943ea90482451a5b1bd5d137dc3332216dbf79a6d9165f9b0843ca0cfc7e3f1783c1cc7715058966559966559965483f30ea77b716433cbdaacb56d376bad7743360db5a2a85a2945294a7d9214fd857365b9846c104d4ff312c48b7cf215542cb7eaad63a1b7dc8ab2733ed7ba8d9b3edf383b3cefbc5455be9d439ddf1a167db08c07bf38e2bca3dd83baccb9a1776ee86a41f4fd8401a17de3702ee7f97d3be764df3a98134410bd9d7343a15f3c137a07cf70bf2f39c3342f1dc22f8eb097e6fe2eb21c565bacadb5d66dabdbc6c2c2c2b26ddbb66d2be874cee9f24b739e753988ce3cf7746fcdc5a53750a8b57b42996b6ddb42f87350e7e93617eed93d77d1707ecfbae9a539dbc6f9d6e597de38dbf642f3f29e04f1c2c48c7452f847fc11a1115400e23c74ced362afe1dc63f61130fae3f101487fb06fdf8efd7ec3ba1747d572381e1f8034961901a4379c81188de18ce3e1705a385c4b87e33a9d4ea7a302ec8d8de1d6b05e3c455911dbf12296e3ba86e8eb154df3e80b67eaaf62a72c8b5568e5a817ceb7ad8a8693d28914d82996b242bfae96b7b0fc9375aebdf3fbdae940bfdd06fae6f9f688edd0dee9fcde7b7f5fb96ebb87eb3a3d703ab9f3774fa7e3b8df4f8e7b06fad479ae9acbb7b9ceb9d74eb779dee9346c046ccf5f6bdf663cb887cc8ba31e38f7fcf5b683d3c11eaecb1d0e0361e7f00c760f9ec9cebdd3d16858cb6ff677d187cb5a583e9f6dbbec66b95f8dfbb5db396dbba15028b4d93a3f29b822d89d6f78c6de7a768ac9d8edf09bddba19dbf1e00ca43dd7baccb576aecb347dbbbb2e20d0e7dc3977e9369cb1bf73ddf44fe7d83ddd11b03b8f1293e9dcc23188defef86222f517133b5d7ced745a47fbedb8733acfb72e73bdbd4a4c26c7f5741193b9e76e077ba5bdd3c5ae1293e1fc6139362e8eb2474ce6e2ec1a9eb118a6e1da62abbb9d4719070d8a395fe3dc41e1d9c94f14fe117d440805059675717ed987b8b7d4eef3db6959764ef364af751efb86e187fdd3e5edef3ed87b1e13319beb317c3f3fcf5ebb49e47986b368c7762cf36099edf58fd65e372cfb27c3a4d8983d8667ec3d78863b29574c8a6d2f977139d4e970389cec35e36435e3542d038140204dd3344d4301b65676f82cb32ccbb2300c887dc470f488adc5c6ce3deb72957df4fd7c709e9d7dbba78b9d8d90d839effcdd6f96eb7a501cceb1ee758783332952cbea3088edc0a64cf60dc7c81b1bbb66316cbd5bfdf0a249108b6227fd0575b0a718a6f431fb9c32f4f5599783d2f4f43a444f38577f10eb60cb1eaa5397a188a05d264df0823368f569fec2958cf5e955974366ca319d9a38d3bcb98213783edc4307b55ca49db37557a6feeee05c3302b6bdb665228b4545746eb55770c6bdbd76dbc96419b6b693b1cfecb317479af6da65fb771f384dfbf6d9426acd6251764e76fb3b93615111f59ccb3c3823ffe8fb076770fe2a8783399c4e267be7b5cb91bd93753029bbd96adc395de668bf9f9fd80eee1a9ee19ee1191acbc134b6b90d671adb2d7f17815a3e20d0e7f3f96cd738dbb5cfc6d1348fc7e3d1344dd3b4136c87bd6dd93b9daa63a4466339db566dafb60d623960cbc86dc319f6f64ceb24262aa2f37b194efb3dd7f590f576325c0eee6a9d2ec7bdb5b13519d9d9375111dbb5cb547006760e7e70460e10092d7f75af9fb484522428623ce4173f94226d5435a0ce106750ff31b1524ad207435dbaa4c0d3d4a909cc9ae3176126a8444d1cc9f31047f213df906f71c2cd504acbc3293555628d2d628d4c03ca29751ca1e2c1bc2a1ecceb0933b1d171c4c983799de14d0d050223e22244d007eef4116ba0e1e55083bb28319932e19239ef0dfda35195e1121dd0a22879206dbbfcb69794ec48b42fea1675b9389a8f4a5c90cdcd70c912497154b285fa5eaa129112c9477c6ace88490a76f6dd403f754b543b6c4d3b4d8f3d05d07fa8706708c70351e1121910e43b50f845981d402099a82f8708b917baa11f00fac168d99c67a2acf8f76e989fba69e39b31e79fb4e19973f6a4a32b468ed11409764465718580acaae7694923e6de0df2be917cd5d957bdf94652d2f4bcb68ab8c78f9e7f188e0004e288cac7bfa0be5852c245c90635a5294f3e4f3e43a0c1421257a670c114514a408710e1022174010c53a2502386181f4fe325fd8b2d6fa89c5415c2a5cff38da6391bc7a4e8f4c5175f7cd1d3346574427d714ef5d65ec7307ae714f4d2e29043c4915585a6d70ac33151d39dd317e644371eb39af595c270544ae7bc756be77cb5d534b5604e3aabd0e9f114758384144e25c849527a6392d39ce67cefbdf7e87bf3c91f9c9a66171fa5a7ae08d8d4a9575da45e5355931138e9374d737296b396ea01a546b812e99defdaa494345249a5947146196394f10834ce18a50c228eaac75ba7fea6c7d36b9a8feaa494524a2752ca28df7c51c6189f7cd311e6a4265a55d50c6550e8a4949a14a526a5739a74ce4f483851a880516e1163947ff14126734e8afadc28a594a2e89c9356749a74ce49e711a6890a8d1242086b053251135228e18432ca9c5397e3f544c591945bcf49c6b67642e8c44e0921847036843680559b8f9f9d11b02710b8c5f888c168630d6cc61821845c7d4f3e18e183efbdf75e7c5008395e3ea574e3f17c4394d218294adf7b2fc770f2de7befe518efbdf7de7bef139cac7802155032101b6bcc470515d1c6576dd59c739aa016fd4a533069f9dbaf340515eda434832afd4a5430294d41844a21d1d7d4d490e8e707b32316fd00f18ca3cb27dc8835353535a428e50738402613937342f82982d82283c98587f108ceaa80f4a09aca484ccbe2f1e2d2c5eed3e4ce471b3f8858e23b40211001f0f3f6076e1a3b198f4467204dee7bd30537ddf52b49c1c5112f32d798e501219e10bf2999c06ac18f0faec43e3db833d469e777fdcb489e06ec349cad07675c3ce208fe306324c55917ea5c422eb1c6f4266532e98ba31ef20bfbb9e24e879dffe9c1859dff32ba2e4f23013874869da11038230329495145ca9214535a4a2804bbf0fc0e8a33104a67bee88bc4a085c5e9b483c910261367890376d3344df44c268603d2898794b5a1908e471cc9bf0923d99e6daf967ad5a6299468967e251958e92a2d3d328bc89b48a429e60183404bb33dc514eb01f6ac2edfa98d6ac7e0332c3bbdc454be344cc9cfc6f04c952da6684f15cf54c72a3c234f1bc31167208deda02df1d543ad70b63d4dafddf629465669abdbe590e5bceaa8df8e83314e37234c55dd635d7d85580fd42b7bb9501cd51f180a813324c53d64cf87840482450c0d75ce3993a7cb2331a7395f7bc88e8919ad2c5b6badd64971e6fa7b58add65ab53e2662d775ea62b7cfda312bc70d6bc9198b17676bc55f14ce0db538414106527a00fd4a32a04206451a36d1af1483205db5272f8a7f758273827382738273ba9941a2a7675dd79ca679526c6a5179f6564121daaf2ccb32ad9bc1fe1aded77a5d9af6646748f40c3c96c5cd9cdf963c6776ae25b655fd5edcc1a4d8d9330793625f38d33e8f75b6f685b355610e0a813328460202b9919a01122f223d2976922a920c49d2451fa05f2989931881fcc0e8b1782c62947a0fd943cd0fe7394dd83b3661efd8846113f68ee11839c5cb6e66ca9e0ea7494e973652367c7dc6fefee8310ceb6464715b646481bd853ec76986a44d384f99d661facbaa1e67b1485101832b3078d255ab339e141b49921640bf1212245b3402fa9590cc4751ef3ca8275b527ffdba49bd3cdbfa7b86981427ca4edbf3f498eb2b182ca3d9624c923dc9aacb17ce6f4a8e0c6446e2f86861526c1967e02719f88962fb436d197116e90e300a32f9db5e6bd5309c022f0ac372609f9f5410c8e4c2efea300adfbe20f5799d07d5d59168ea8a3caecbcfe17b4809e32b6a87c43976d591a270f59a49125213cfc457a70ee4c7454d4d1ddecc6e461e8986543dc583fa8436f0f24c137bae7e5d38c321bf30a4320c9362574c8a4d5d17bea08c98143b831f3c211655b5cacc5f8741a00ef01784f9f3e9190ed1b01cd8219ee949cdfcc120cc417d66e20c2d45c163b54226748a10c24ac220d8af4f1dbce7bed369cf3a7beaf5766dcf2488c16342324992e48c9549aff08278e28ada0e3b6cc322ead3612765ceeab5ee7533c3f94de9c90a8142e08ce9f53cea90cba2535b96655150c8063969700194161c916248d3e85772c1946e818d112daeb4ed187fd213062518d4f40ba2f4a85fe90549baf4022a5d1b9edeeaafab95b7351286c21833ec87ab2186e568b9fcb45e5b363716c31171585808f9a8b43cf4f1bc5321c6e2b48197a4887533da9168e872abcb5dc6ac6351ba60b71e13238cb1437b0c9c63538f89b0cbf3065688a1c433d4b5c71c713e901f48743cb47207bae194a2f374529c6939121dbb2c5b36f60a2fe32cd299147bc8ce70478edde232619685f3bce9e9a4d8d1b22c4b524b524b520b831d2b472a16b5ac4709bb0cfbf37c3df11686118bb44b215840dd338e732064e0e30e1b225087784bc6c4185b3a0dcb11e311cfc4b73cc3760801e3451ba277f8b1066961568c5b2b3e460d3070661d44a22cf54953ad4d548c4d75b1e35b88b41cfbd4c5bb74f6b09bea5bbad7a22edf268e261a12869e05e72a847305c2f97e70be19ce210f8d3707e7516f388feea5aec73b735d27a00ed3845d462b3d45da1a9ec14eddba855595750faf5f2caa4e1d63003c1f3f87643cdf970300439b97aa92712f7e925a9f3e1b3ea3b00c2c6e0e527250b5f70861045571631510468aada8a01c5c8aa2288afe68f9ebb1bb9e4fc9a7c31070064581aab8972a3d79d2b7271dd2463504d4e105ce80178bd84aa761d0c20821d430cdbb01121147f023781ea00eefcdb0309c436d23b49cc4063171238c17ce16767e5b6c61453fdca6bf4d1f1086948450a45f2908454a4118f22983175b4c8143eebc2c39814217008a41116265e7070a375399eb39a5942f076aa226ea73849bdf14c5d9409da228ea2197699a268a464ca287f861021422179af82de1663be7b41756d1e2726f524a871047d384e33385fe32b29d0c2c280a420a4e4ea0d1155521b94ecd508fcf1416c9e0174f7d9eea5e4cacaaaaaaaaaaaa2ccba29145451ffd7bc4707c88305f2a1ba718a117b2b0e901543142132d9a24e91a5f84326bc2d5a126e1d650e78b228129575a0a4c39f1fb2801563f6afc20e166f972788915a850f0e70917fed1a81f29d99e477dd8270a777e7e560f9862c90756faa55fe98a2a5da185148bf84cb148266224f27d9c400426a64c708a2a8ea61a54c1628a0dee2b35b9914d6cea0329b9af844592d7d5919eb7252c9274c470e0db2a4251b5f7e68c93c6ac3f2a941493103f0b6dacb819da34a1a2c4fd93d286761885bab18915273ecf5543100c751586036e8f89b00974126bd4d733814bb6c7439b58a322126d846cb0fc03ba75960e5a01c54fb76de77038af14c3300ca3ef9c7b9671db3bdb6597e3940ddf388a423c98f88b737561af1786c3be669761845b4f8fa73c8458cd99eb0c6d22910f1a6e4c9422791df8e10901ee0a283d710e35176d0c401fb8d9e6780d4570e15f0f1524821b5fc189561dec4a5555655b0ebbaa2084a1072faf40a3d09a1df2143e02367d922b9834fdfb8b282da90e2e280438b8202668cd65c3928239ab299212e929d39454ca2614e7919cd2491cc5432c47d513cb2671140f1272ab2a5545610f9f8ad4aa819680968096643ca81514e43e3c8b78d185142daa50226de4902dac3c7122453ce2c514294baa8036576874bc5450900be183b407a025208448ea7ba9a0256e8c50ca99c9d0a638ffc055b6a01b5cf97923019ccb2b5d11a5330e4f3c4a694563274ae95bc1341d899e37313a41a7cd7482d27933e3675751233db061c28409553dcbd3393cac47eb767e626ae10cb11fae9ef3950714326138a274f9d65d07548f91199c62271e3ce0044aa2ea3c9364ec742fc2d48e907a53e2f429059f0d63841f2c26a510423340ee076ae6c9a9c3341662e046d368a2be9796d5278b9b674c6ade0803abea735aaf15565575247aa28a78715455a7f083334e75d485e43acca4219130ddcf152e50a4075e882207566270a546154498e8c28b3070c1891d2a7ad331463b7ae1ae7da14853106f9085174c810329b26841941648611304193881620b3b641a5650b1a20a1d4b5648e1c11c3244481021438e947aa0a4648515a6132b9e587143c5198a2c81b284c90eae50d90195283b68d2a4a4832567b022c892293b50526794a62f2f2fafa02537bf2e5eba19541ff5d761c3ab9b47780431a5c8f366faf80195a87c5cb3887d7e5d4cf0c81c725d780e99367348ac6169e0d9b8494026ee336ca2ce2311867e2eb963b8714a1c8de8eda7cbf310e2e5f06cc00773ff2c25b6212c7d7dfcc0d700bd8f9743912a89cb44d3cf336ca26986473294e27571d50bbbab5ebf3ef7dec31a1f3fbab8f6d3739472ed90a6f83111c9cb678d12b1867c55dd1b0afd510a8c2cb7586eb1fc8ae1639ae6cd55e7cdbc9949a4949b396faa6bde54d01577dea2b0ba20b4ae0f10ee9bb35ed44b07009d4ee75405c08b96635bf5bab75e27aa920180d955d4bcae0b7e067e2b72c4a55a918aa8c483ab8c2e3e89237a0074110ae5626311193264decc9b214d3f6f620dea365e4e3f95cc25b3c99462563199cc245450135a37f3a6c811970b869e5f17107ee30e6190213e248638d743cb05535691ebc8cd4d1c7de15687f11a2e68038fc41a550b115824880b9c810fd6d447262f5d6cf2813643ae0e06c13132469761c7a8d8471c51eb17dde53e5e0ed0028140a03f0c87cb6362d7cd22f3481cd14f0cfa9055756f603cbf2e6a789e3749dc47acd142b11d2cb73e613fb0dc6a39fd8f9a97c3fb81a5073c02f615136374af6be860745c4bc7d2853a242f0775f97eba0c8964cb755d1441757e52ec74bd6068a76b46891103e0454907443e6328e5e048a98a2ab489520e6e4a381852ba8117a51bd8946ce0056849849d24437db2896c426514584536914d2cd9a441426e9e1da77486542015d015d7e2a022a535e3a115b885a602a9c02ad08abc3231a99f30946cb08594524a29a5945262ef5db592324449c993cefd4a4a78d0f033c6182d565222c59c736299921164769aa629d3b0cacad06c55559576957ce0daebbaaebb4d68d8acb5d66e1c4e173836cbb28cd3c9a4d0b1f7dedbe1e61338cbe170389c677be2b11c8683e3382c47f5b112625b8325f1d58006cd3d7e6ca9062db0f5c2322e0bd5464c65c908bd845c1e0ab9fc2546070f717485864920839085138ca0408b333841870c9e28031186484983169c88e03a59012641e814355981c81523d4a00a5a486149a48207d2c005941d2c71842c0400042b32e80862745452582028d9bc3005a19aee52c9d7244647e8a34fbf520d865451545068ca1f10c9e2c8e70ad4445109882327321a718403455113450121044d51a51fcca0292a08f6c5863e56c0e1d1c0092da35f890655bc22d0392195e3c12db8f082054ad477f8727813c2f76e9811b8f845181ae7112847b43832e5481523f5bd5c25aa539f761e49f7dae519209eea3acae878184f013144e5e61b3762a6c7df98a24b1cc51b1d9f6d0ef1e08088837c14551eccdbe2c184b878302f0a2f1e4c4b2f6a5abe8a188e7b498ba3432301daad91b5df336d18cf5345a9c5e4dd209f271b7a39501dae1caceaef38dc381c126ddc1040022e008e69c07836ae849110e76aeb7cce030f3778b811c383ecc203825d4cd4a669f04817a4d89c1bd1c6e3e166384f1c626403f484e9f9f758de0c7d9ebe1c6af8fc047580f179d1ff79a3e769e050ba53ca1a8ae54f56ea31708ed135c0e87c8033e45d3a07c491bce85d04b01e62b7e020de0df2a14e87166ee8f3445e0e1136b9f1f15c99f3efbd1ad3618db4f172c982eb9fb4d13dfeda43a0d72f9d0e23501ecc6b199d0e235a3c98d7b8d36164ca83795dbbaed361a4ca83791dbb1c6a5047c97f3a4f97abe63a526c8ac6c7c97d4d75381b9ea623438e103952440ac8248e9ac49186e1928f957b5813611048054a89a31b34f20d1a5b70f1605e7b7144091b3ad64ffcc39cc93e31f77a4acdbaf946b8c7f41f644f184990432a500a94d23582d001011e42dcb871c388124686182162a448d5af524bfbbd523bcd378a184990cb1a1008ae0d3da578308f8aaa8aabc3c8e8d5a06e434bb844c2c8df254b96c8253207a38fa8144dd7b4410b55d1080000004315000028100a87c342d160304c3455f30e14800d8ea84864441589b3248761140519a510320610620c8000c8c8c84c1b000f6eff59562f8ebf03b9c4206dd1c7ce10fc0e811883deaf1502b08f5a9794817ecd2e9483e6da6e2903f91add2909e2dadda10ce235baa604e4b576c19432d01b9f40ac489e516fa4bc7435e89a12d0afd98df2205cdb2d25205fa37b4a415c73179441bc56d794807ecd2e9852067a468c5c39d8c52540d7693a30866868172f1f269eca1ce3a9e2c1f3c95443330595994200e86dcd55af29868e848d4952d43404c7e77be91c332e3fe161ffae83e9a968bc4f9bd0570424333c93a2017eab7471380b679d2ad9a3b3b74ee8aac821bc46253f4fa402eff556e77f7c380297972fb15fbf035434c8c86e352a1bb5e4dd7df0a7ca91ce3927c4ee6a445b306662c9abdd3d99c41cc79593e99d09f3fc4c0766c80246b047f31684e0dbacf3191c3e06357de2c61f23c2a649b01c08f42a25181da6f4f8a69731a16857237ce23c2fa0d496a5a40793e9eba0eddbbb12c74324cfb7eb5578022b82ebd05b4771a41529524f2214bd98d639f502399fb7ac5595897600a2ba14807f94874abe5204b7523e8d4f1d54f7b960a0245a963dd4cb6984b6779c6f1015278fd7f008b764f41e2fd40690c31d09e4e6f6f428c0750b53a75e46eac67e63428dc157c04bf0f005df58018290dd226f31c42b427b09c47411bdd5e20d67ef6f7a712d92439f2b1284439f6d7695f1f7d757a32ec3d8c15852863b27065e576ce1702809a1106ee334bd6b45236c41420685600079fd1bde719e0a19f9a11d05fafd9f7b078453c6265ecda2e3aa2a02c0b8aa3e492b30a74625db1a3f2b1fc387e00cc20d28164396264a19596afb891296c57bbcd7609b22da18e145cb9dfd7ec880f6dda1034728b19851b07d2e87730e691b2d08579c8e3fea154f3c7af6832106c1a3f7e152273ae5a500f1e4178ca437fdbe86b8a2f1a392dc2b9cf974809043207c0be4cc0e65ded699341140751a95322f22aae3198ca5bb41039c8e302a19b697ab01b61ecdf6768f83bc41b748529fa28fd09fd005bafcf391f308493503d62611b0bcf8bed02a530ad72b994c3b1c038d5fa89a772465798c8eea596661ea1abcb646d0d5cc485538f9bb5c0bfebb490ca6bb29fe85b955d517f5980ea1c91a620f090a138340745e8530f801862e1b93864e87b4f913121e87da93de646ff4af1a42e3df7aa4ae9ac70f2260b72815945c5913398bedeb7f2d3e894a492fc56acc9e0b7c031d06106c118ee81757ac4a836895dfe15cd1b53a37f761e1e373afc6a6f5d4bc286d113669a38627324dba5578ff8b836020dbac21d2812d80771aa164f530ffabde65dc508da574a90d96c03cf68d86f1137a1083df9a9d9f46859958ab46b23aea65fe98327a6ebb8a40febba401cc4b046d7273fa4ad42509bd6dd9e16f597d82f307d6d0bf11950692663f50b1a2c81166ba2943fe49c14077dd768614abbce9a5997199ef647bc86421c082cb0b58bd4992f0df71deb40d45274d5265eb54b181308b88d982906682c5ba17a648ec9d1f9640cb0f77c8c7bf1e61d15be9b480e6bf53e06db2739b521f9511f2da17dc094538a32c11365d9f1b12eb1085d0d324be009f2cd537f35c5a4f2c689c7cf839b46482ccafe719c1650ecfd91eee2c1eb1df21c5087165e7f2a28a244af4dc22bd16641d55e55ca513e45e56b4f14d2b5db0a5486af471320d0a6ca12e546f5a5e7b5e9dd26f1a9413bfe8a8980963614d454778f7e1880d2a33730180cbab596938e020f3cae4b38ef65925785563debb8628f4a77b4bbac0dd5b74a9e685e5a5511d680c63280f4c7b1145b147fd1428d90827e917e9449232e53ad3b2519259ef9bf3402ceeb530098aaffca4ccb533317ceff3defec8cf47ee6e4e07e4e0ec0b34c154439487bd3b8ab5c93c63c49df3a686c2c78f8a690acf1bdc778f286736c382c5941170a834f2e3a89d35a80d02c4a7d939bbc3748cd65a8697bfb6b28aaa8829f3076a483f0f3df2fbc3f3f32660f0a52ab046239d91fb33b3a41b8409cbb6813046a6ae68d8dc17231ada49c7c8df80dd325aff615702bcd94c187332d9a6e2c4ce87e834baa063425c493d47141c0140bb0858907b3f04b25f7db342f8640562c52d201c5760efa4b65302afe9bd4af587e962840d749b7c5548a7eb361151376e0fe19ed4f118be16dbcf23c492e4fbd01a721858356fabda7ab1acabeb6614318d0fea1a4bcd9dae492b3936e455d1b7330178e74a54c6b2c061f14f21bf3411483fed9cbe49d77700ec8964fd8bebf6713dcdb946683ccfd4c89f7a95db1ee5a173e33c3e2ce7d5c61d1c1b8576518f380d644c55d48e091d878f7cc94ddd169c638f1e67d480b9381511bed0a96376db75b7b57994f13a0d7c137460537b84c9d35a1b48498e33dad43d3aea7273fee9780ca9f55270f0569144abb06205c3ad15ddd3c129f35a383d8c64c3bf53cd287ac4ef49b6b3b6c27238f94bb915f7afa72bad864a98f99ac4c5c3ecbc7d212aa42cd3d278b15c3df5bf348913f72ef71d0fb0b718cfb43c9206ae0a4e2fa012a8894670cbbe4afc146d26115386ed46a142bc570784268957e70be4f2a5b5e59ab619bf4dc13800247aa5296d3a3bb24d5f0e6705991437a14049341606686d2bcb2fca7be857cd3ffc5947dc1682f6f2ea080005227f0706ac8ee14405cd2a7dfafce4c0e69f81bb300d250b64aef46ecc0b6957288283d2661920d2e616962448cc68d42b17a32789b46da66e3db1532730c357313e3d4e055fc878fc2f649da06c196e23e234b3d6ad2f95b3f2555b04ac33eaa8adc09664dd254105b54ceb0772208244394f841107da49386dff4afed038c1722d294e6eed0896912406c47af1dedcf4dd60bb00795e7caf4b61861dfbec3114bd8c40614f7a3ae0cc617f5fa711acc04cc51425a8e99367658d2bcb057beff420d1ea8dfb67e5e8996727fa278674f1b88378acf497e93f2beb9990d85ca8456b0ba529c668e57e8842a9b6b625175bb78cf285041506adcc0e29e64cecd5dcfc5d959cc51e160512352cb60cbefe99fc2adebd0e27e7ecaaf635d65ebe2ba91bc3fc8fec213391c9fb0f8ddd0806aa15f8be2b0e4513ddef4a46f0aa03a21130f3b8229e6d004358ac954d06781f816eab94e70e5719c20600138720e4d53d38acc41b37453f27d077aef8e1b0b44d448bd5c5b02c92a5a259299ec522ac178765b1ac15cd4271561661b938ac8b64a168ca2a15810e057511feaa03dc19ad65b538968b61bd4816c5b3a2288bc5b15e048b45b22a9a2545592d8ee522581777b04845a1ccf0fe2e5c5cf6ea0f2c554a4008ad7f5a24f46c2738993d216d4fe0b0c4f47d818f075709d946e8b684b78deee83ea0eda06d05fc395078e86b2192c30e126f852d64463785710401433e9270a8202577247ec35bfd4154556f650d40668f8623206540a5004b49d634daf6aba1977986f27dcad93eea4e605e75a3b65ae32f826587ec6e3a703b435c9e8aa40a558f3f590283f1944f3808e409757581e0499469344054bb796e870a33ce33183197eb2ee74dfa7df6cf466ca496dbe54be41f819e1074c837ae466dbe858b47a18e9a0ee90dee177b45c263e50960c0ec4f5002fd2e8c7465ccfea0444246d232e75b16181bf59f9b261908e8250233e558284f971a9122d636e8f3584cc80cd5aea6ae0eb5c242aa4b6ad6528db4a8cb102c012261219589e83d7a2e136addb54b0356134db6854b58aeb6af810185ffbf38d336c1e50cf3f455002a1d8e573d77208a8f5a5c926cc299e56d09fe80f001ab0a46bad487bd4c3b197b62b5783cd2b452a03c2b0a403b144f450bcdaef654ae91fab2b0a84f2d1ff54bfe129baab6487e5540e132bf1842bcf2191495947e07fd459f11748e2805912ef588f0487d93c75925b04e65171817249b0c358fb03e587e6c6c19d28c5b6a1fd3a2335088c8ee64516d5978b9633df8c1be1ff6c848f73c53390ed5f5c68d565163c0122aaa38e01706d5a74f5853a147fa7259afd13a7c2e38b7737ec1c69b92c2c3b7eca9ecf79ff254d78c86d6171dcdc3b0d35af82490972e48941f600983c68639df9acfe0561af38055e3ea3bf14ca1848eeef684fdead62b256668c27e98616df7e7f8807ab41a8307329a2e090745e71f6b1e0989f0f9b3b7db7bdbc113d0536607a93381d2b568ab469db240502623cab6cc81238f983dbe2e5050b93e955cc0f29ebad38a380f778d95ff7819f74b5b44788be49a82b1c965641d04d253ac1370926b26264aa90111b25c6ca7e45689a444680c5ee0790166d31ff769d1ececb8a49721bc1affbca7d1db395711a882d8560420447c34a1860ee6ffd9a0c07860e82ec072050fbfa2ca98bdf9f7a2466b977693af8d9fb1e7c613a186839744334ed18c7e26c73a0bc504a077cb1d111c42c822503c94da7c5a9422d901155695939e102d4ea6ec27e048644c7a749db2030c9fa6c03cea93043d3bd9aa351934ac942de6805ac1c39aa4774e1dd107c9d9907c6a2e6df7feb5ebd3ab410057ac1cab8ed02b5af1cadd5e94cd95af53402792460f53dfd7a0fc81eb49a55f9681f4d40a17231e00c7a27c1ff6fc9231c4cad1dbabff65e6137b6e97cea9283d9cd48df529165fe82e8202a4ddf112ba8f6c7269ed16ba2f87e04d8ee29e73765a287a3f63d89f407fc1a5dc023fb8e8ff97390c0dff3d6e0be8d7c58847ffc434c2ade0e177aa937573cc79258021c54bfe9dbb4051896478ff7497b9979f53670867dc2843f9c8aadef6f7ea17698466d79124a1962bd0515bbc04e2c2606251b5b8831dc82c8ab6a5d2245ba5bb4d1eded6100da13619497bde3190606714ad201b855da7241d318adede493f59617c51b474648c037bd77133047875bd3a209089de72517479d2b90aa3680e66095ec7349c87279095b45d6eac46d193b30dbe3fafceb65134bbb0a14b1e1d37ffa594fc59f4cca2e19337c326d33e9e51347207e9848389bfa27d5661ea10afcb287a83136713b2962ec725aace0eafa15cfb7944ce8da2ab95b54b383c6b08f657a123d52e29e407a813259338b4098223b44ed7433790f2e8394e5bf39391b97551fa1efb449fb84c4a56e9785d25f33c021e1fadb7660bad4a31647ad203298e4955985f7490d4c79d4137839538549d2729b12145db78d6a6b8b4fec4cf527f47c08682d97554a88614fd52d327a224143951b35dd85d996d46444443ab31f0990cd391a2f9ad15bddc4081c6fce92d9b3e10609263c92baed43cd475132c4e094bf899665a09e6831ab0817e06ec6f35948a6276455dff3e0322942fa293a29f70c819677f7231f68ca1747e69d0e568046143b5b167d0a299565fe3792fdb12def53237102da3616445733314b844e3cf324521655c59a72e867d9660fa96ef95c2ac43ff8f5734b74f15f49180e1a713ab8a579e9c9fca3e6dec6ee69e98db52212aa0f75bfc7c098e13c3a0a585bf0e8c097725de79ff485451fce3636972b9044034d52b691289648af8d0a0898b13a57b1c46bc05ac0543afa59d834ab627ec461b175271e41b6653d36786a223f82dd4b53a353ac82da4886ac6e4208d1809c17772235df54349b57c150bd3beddcf8f0391f51cd229ef5116ffd0d080081e4481ee6629119a7905b87f3aa24f27cf4b9ddb029f358c447c01cf7229d664a3e4ac5f6936196a5bdfdc84f2e2af126f010ba9843112ffd0e1364bc8c12f1cc63254de0b7ae83184057abf7a9f73d7c279b027283c473db213b8ef7b15595799b87fe8490a6b25ea117a93be517c71bd5bd092300af07fe847884252d48440c8adba63d12cfbdca40ebaad8cf792eb1985999b6d82d7b74067d166a48ae08adc0b20ba81d78442d37d4634556f1da8ec7480e84c55e7ad42c2e985c04cf44a874ce181d23010fc5b2d68c50250d8dc1eb34e62d09345d66856b587c8ede9495201d1845278bd13bc53299cd7aa9a386f7ef5776e40bc66548f664541e047f4e0ff1fbc3ee0345f6e165cd2ecd9c43aa34d8b9f79fe9db5db3b33e3016b1d582c49425f5e24fad164a9e6b9b62833386f231255690d6c16e7b71e206f5e2e7135c9bbcd623eb6c85a22b1ca447cb89d0a69fd67ebabd12e0710123dfdf5ed2d050a9e439f3010b10382814a54b62c15890635b4a33499eda6cc06b26a7f845450a277d427376eb41f3c891e249fac2111492cb4003d96ec3f93b91b4be5dea8235c8ef35dbf74485ff6d9b77b40eec58490c4bc2ce94b831beea244fb66eb25e4d6e0ef04d867ec643962f88bd69111553b3d2446454bbba9e8cbe58ac13d7a17a9156f503475357c06022f8bd07fb991c0ed7d2fb7dea73ced98157fa459ac41076c5b198baa2e4abb22c916d5a46cd55235b85170f6b5f54c36a1a7df6493ef86296f8daf7805905b06e93c786fa7c5f78fc9106ade198921963224e955b26075779c623f732b813194d1e925a707c49ebfdee4d790256119cad5b26033a45afc21b9b6cf29d86d10ba9f7aaac5f165e57bda6a7160d1ef05fbd478aa72f2311ff4411ff3711fe2431ff411f43102c0eeb3af5280af888ff6a228143b93dc9f5e567acb084773e4da58af21a5b4f1315bc575be1d54bcc17f7412c03c31e8a42151704ae94728200e11e4017364e821c5f59ad7a95038196eddb02ae5ca9409eb291b88c04c2984d4888afa8cbd5ebc2408aaecef5a159aa7877582a73d6a584171665c4776da058d94d44f06eb8463bbd6888272e6beaef0b4158d2a53ce4ceb45e7b60fa31a62933a036e422f6cd4e20f05b777520646194a815e2545568f5b208095b06c38ec9d4409b35f6792b319f97739ac8c5cec867fb54f80475943a74122e8361bdcd31d20b87d57bcfb038e34cb3f280bbe78986f3230449fab39c2d6756ed5e29d2020213c5dda388049e45a7e508ae7746cd27aecfe3cc0c5c2f3622523d83989913e9cd8b2047c41ac411f6ec5932f18520e7f2a4091196880cf158cba7b381c06cc8f2480b3ad608b8006dc86eda44129cd2bda7a0dca465a2d7f4bd9e159112b701d33cce24c9d1bee5832d49a6c86482eb42617f43c084123395528174a2ba4a6325abf61f401b4aab39b0edad9ab0d5677b9ed8b62e13038eb68b7e61f8423b6e23a8354e943b7887073bcb3565ff2ff5173ffe36a4ade982ecdf1c1fbda551c81e61a5586d6be5216f354bc188086cb22b4df43326fa68ee12a6e329ffe7c35d89c9628872a8ad994544ed235aecb4e79d84cba2da88e289fa91ca3a4de6601fb93ab9ed48214a46ec05f8986db36c7cc450b65096dd8940d7a100ce7176c74e16039ae192470b49a27789816ea6131d60066888fc96917193b3ce4e8c5a6dba498d43c4cd6a1d5af82ccc05a47df17dbc2eff39d8d0196488ab2333f29b5d5cf24b5702f95f180fe08244245c8dd0438fdf74391420441178380372b141b3065eb3f1910792f1d3e53f102b476cb348372c44d424847b8edd037098f153e1900f31d72e7f6a01742e062487320292d9743cbea81ce8b008a292a405cd0a26213775e987046031bb1d8d7985ac580988f8e09c44f69e642308586d63dd8c4f0c990df2266c754b8b1a0da603322b2c07dc2e312e319a5f12cb242462050a17ad79f08cba646fd38e941aa116bc02a5e0e46350b35d4c8adb0005ba9630df9fd8bea960ae8d5ab8ff3de7c2f34c7f2b28bf014c184d7e024a736891d717a20c911ddc6b32d83f2c0f5ce1d2a322c199d7963b7a46add4e6d31dbc3a5b7ed569a0a20baf10c27cbdc01190de1a63f7531233e8957348ee0b2a41aebae71943e80ae6f02b143a457811f9606d352cd9dc62f8d16576181f5016185fe45f05a9b515f15c872bac2abc81257f026e170e084e93018625dfd4c5df3be4ec4098371c0406ce7165b43a50ad7c1432461a32934d85f8ed5d56704dbf860424133615b85dd7a08ccf039e8094e157add34b05d549d047c08e149c30f53a8ec0763321e00c76ad3a51b003d542fdc8c700116a5f2e8cdfc27ec429b6b09b844e6d9b32f63db5cc6915fe07cd13f44c81696179cc35fd4a585bfc13d66fbb8cc3549d82f4075394b4d273792ac651018fef49029060eeeed9e8f131580fba8b4b82fe9d005edefdb9ac1279e9353383747342dfb3c5545a06ff46c7a9cce307f9a30c08179a1c2d3eaac4d0a54023b6134e408a3a068d0cc8372d55d332b3ce8f20bb23ccee76f0d2999cd8ba625e3cc80fa8277573fa367f9b58e442af9643211c819931113b5db132c82c25b23b9f389e1744a0e25ddd2bed2851a809cb57a431b251a0a40221bffe2afe156fcf70238bc71cf7d64b45769d8b662fb57bbb63096722c77cc718e5c7cf3e601bf3c8a6e2942aa204dd3d5f9cecedf66b669913701e2d7a1b7cd291b6e23a4109a1f9cb5395810289fd401a8c1874f247db8ca4fd76554fe45653b679e908df3c87bbf46b262eae85643af0185d195bbe05d494ba64d963f9ed5f3e0e97f6652b2aaabecf4596733fe3f0329c31ab7fbb2bd13904b88bc0a25bb1c345f2ea3cf9268090fcf9caecf00af3d6f12e9ef4cce94144a66dc8e8faec3268208fc21fa9c1b0bebc18b3c5c1d8fff5476ba28fe5e66fbf68fa05cd6d0ed6f83f6403f1f11b80f80f38b289abe118d7e3c657283f6238e78d3db561c62d7983cb508721a4fb2b62c73d552ba7c09bfc0579987fdab477de859da033011dcf5855db7cac8494a479801927010953968a48a492268b5130645efc4a1969306d1f594f1d05c9b7e9e4f0a88d42400752b0541ebf0402626f9250f4c151b6a52c7aa9fc0401001d8e4914a0b23dfaf01673002d571491764b6d3d0212304407b6a168f82829c1be2409cad59881fa26cefa4120ca3bf3b751219c7aedbef83b76c8094abb6e7dd85887e6a8f9df026c7afe987192f656f81aa49474d4a0fb62d1c4b8683d8119995039011b16e9bb3a22dfa06a3f60a26448f5524ca6bcee92408faa941a01dc9730030b805c95525c0e174e58ccdcf94c73ec49187cc5adecbc293af3042f0a0cb0893818fec437a89a5ca8857d0539f31f9d787456b27e9c948975eef49e0fe2a23aa7769dbde78a1fe537f6b4840fd7fea89aa3bbc953fb5e0c67d62eda6adc4c2223e26005c2ba228df0aa69a22821a08516e0902da3069e58ac43b914f4d1c61adc16e69c167a28db03de296b7c6d03316abfc16fc2f4c7808003cab53ea82738057215cb9db44089867f1c61320f123c4ebaf1eef5462052e7b7e1e8cc633585441196bffedfa91dc61522944748a456e83a896b36fde63392302848b4d740c5d1b600d548e3aac10c6666ab078e75067eed294bec280c51cbd85c53c079eefa310ce74cc9a1bd3465b2a7d8568ded081974a5f1745d52bb7b8f34a4ea2ccffd489711a3e21ea2f43d0c441216e488b0944611399840cf5ffc95c329214b57d82d7ea3f2b8d0d7c348b841968c0fc89214a489e475674b78ae09465094d58447669a760a8324fefc100f3ac11cf65ae432a1d3589540aac26fee63864ea7ff116525fd94426ee8b9a20b827a3dbaacac01f5863fc35c780b55e7104a0bfd127ed28186b0fec705870548cfa27ddf645619abeeac8e461e6e879982ceec7ceabbc149ff4864206a0dbbe57bae4d71892645cf210192a0ad093fb90282630c1c242bc13b46877b9c1214fd49c0dbc1b018f7c101433f7dea91fb337dc126e48ff853737d572285a6796bcece43ea6bd9ddcc1c01b3ecc78feef2d72c3b01a52591664d0d44859a87ace37cec8a2127834a9f10bcdff56f8a5d0f0b4ce77bbc824a024bfe7804d94f420f8c419b2091ee65816f2909edb47e9fb4246e681a2ee54900b15a9f6eed01ad25d9a827de0558d83a0ccf518b42c0af3cbc6c98f319af46ab30d1ccfb8fbb37c54b63512d52ab4c09323decc038b11ad3a7e8d114f959f223a457cf1a837bffc44ea963fef412afe4d5fbf024ea8a1431cc883a0998b406d963d62c9578613c9b4c64835edd7fb54d73ba939a90d9e12c0c8e8c1b5072ecc795c4265571d884b1418455ff38d2354afab2d22b1f30a754b5808f06638fb0fc62d4d8835587a235eb6a12c46ce7e494b262e6587d9fad550a58a69afed6fec50c070cb5c31761236d435b9729e780edc128e5ca213c3a2ffdf64accfdcf24145c407b75623b9264c735fb02529d0c55314f57c5a31d882c5eaf9de6cc272b7390858438a4384fb2991cc22e0df553472305ad677bb63df06adf32dfcdfd612b4bda1423f6a212991e25042da04040e0be5f4f9c3c0ecbf51b41fa0ec93d69b3bd41e212a9de9d9f7475e53bdb48fb1c7e9151085822187b1d35142bc5f69f55fa688c56ee89c90ff5a3f6506dd426354f4c9b800880d29436ad4cfa7f4d56789adc0ca274f9419c6c5aa22f6b39c620a153d97176a66d9681d6d72c9f8921c6f77a400f2fcd11ae10ea55cd69786dbbe905f70bd10954a89d3a6b455d9c932833936161a2dc6c4844b3564cc0eb711a77d6386b3d87dee5c34de5c7082290f1706ecef8ee2addaa73fe101fe50943d98acc266b2e596662657ddc3e030dc03a2c484cde068a8024daa572f69076a295bf18145213a488ca87b7c86f1a1a6dbb1c1d843140be9b9e7b4e9f253d384b5410fdee2c061e613571fcc449d25050aed2de4c1c2495555cfc19d9c6e2a3b4e1193e0302be7be6d501a97a334d12120385a15216833ff939256dee4e08826417c9297f94183a402f6e035a80b5a0a41ff02923b6677b8bbba22ddb91902b97323c9c575933dcaf908c80e1b2ea43dd147dd943cf81a5ce1dbbb5b20fafd14ea2cf8e16f6badbfcf7b5c2180a7f965c57874ea55de0985107ae850c4dc48f42f2d53d0263fca492d22d1f851c59851e498d388bfc302e7cda532b678f48bd774e55c9af520a3a3cbfd36fe9eae4b085de2e50267d0c29b8982a0ad776cc4805d41f70b0d561977d6a03c7aa652636fbc8687267bd4570ac067336be1d2c3a35a5b7fddd3d24d8f14362e25c2d9df9c47e742362d5c3e7942a8dacd4602a247cc5d8d8012972d75277619ba00fd78c2baf6d6576a323b1f2855cae939294435a85274e61dbf4670827e3ec646554c3bae2f3112f05ff0055893ae307da79f3897ac593268d56bb6198b210c68af67214e2bbf1097d82f7b9ce1282f1ecd78eeaeae8075da89c5cfa01816f4d5b8139fa3f957d5aeb9241c32afc0b2469010960325a99ccd7ca9997f1a17b407643fa89b20cd2a8b52912d5caa243260ee8f425db57229f6338823e413b984461b9b993b09e6500f4fcc1da62405a7aea561329e434de45337d8105b1a695d179fce5d75703f2a6f12be744fe2d860aa7920c9644dd019443108a0f451fdef1acb733553da005fcee45b90a76eb5f10da2d29e25bb7ab7b65e3b77561bf92c6e4fdb6f4d62713a98072806d7a9b00d69e6464a6cb6d22fc27e77f6812c7238ea552cc77a732c79b27229d5e686c6820c547cb2fe487d6e8cde65a0dedac81c7b5623dfd80fc50d39bb4908e0a5df8dc87cea7be27b8f52ccbd1b429cb2573b5a30d0d974909847865cb978b641d486c94de96cf49bd077ad35f19287c68be1183b04c92d92a4d9aa36e8b6afccba20e589e847095253ec52e0987e7eb590da5d8f34e40cfae282724de904d7fc659ec3dd5c0eb66a5eda9afd81d53864f21aeb6d9e3674e4c4a240299f558d8c6aeac97e77d1c2d0e5614e83656ebd49b2fc0a28ebd164aac0e148ea2e7b12276d79eb95ac37c62744d5cc8073b7a40365c741ff2876c656c4e113d85d6dc28223db45d6e882ea01bdc0ecbab522cb91c4e8a970a746fc5dd3906796edeab1ceccc6a4c674f68f56240b7ea26f306e407da7d414f8a300469ec8921fdeda1668c87376183b71d067fdb5ac5ecd75797a8c39f18e4330d8e5f2fbb5b1cb8d478434e4be176d507509009322851fd8ec2a62999fad44804e28a978cd908fe65f9509ef245773299dd4bcbc572a2340ccb51b9823c19851fe82c9a286a31e0e052fddb7413ba321ba17f23b3f0cb4357ab539bfb6c2c8d49dd9fe0b71e77e5c0cfcfd5225a415b38678ac378ea3639b03f7775ad98e4023da22e754e86afeaf19edb4c7af9435568d484f4d9108e6ba08c7251f8cab2dc95c170a4a1432ce7dad4a2420f92604800e5089a591ae3cb1907de75cb55735ed29cee7f5b79d06b7e56577de3f4745db8936663e51ff314d7d84152ed74b35b058ad91f4d4725f7041c862a16540deaa39a02dc3e6bc3204db0cc3f82541a4415ae47d1615141334ec2a6b051815f83b066339b6179159ccfd40bd189560a4e57b47874db74663ac8566d2f2ad5d76ecbae0e8bc081d06b37d5f3ea3ee992d5622c82a528577417d012539f7e27c61f4e8737e3838ca538937477e63366daa600ea5cee9be006cdd2d6f8391bdd3a519e45d5850308cbcd9ca7364b25774a3195ddad025fd7c938b4eb9d55083c23e20eb243e31300ce9ee979cbc23852180c1c8ae5ee7035b949cc0b25fea043d45a0d582d0142b00a51c8fef3a62b41085490543e4368eba1b104b04ae284bba3afd9f16ddf3e0a71ee1068b67591d0b2bbe6d4ca891b7776c78395509453ceadce44ded72c9a3a5ec20fbaade4b1d5b87a119b68384c161d367a207eefa07bcef4d7828a3febfab14a886969638b1bd30312bd794524b1a5c91a08a18da35e83353e1083dc17530af253109e8d75b079d6059f25365f33fb3cdacea28c9efd54c9f731ab638f0d829de1d56b473701b5f934fea9443ad40fe4ac5b7988ac99ecb2ddc604563139e22664d9dceb1b4e0e553617abb128b5f967405da49ed1da8a5d3dac170f89fc11a8f6efe31202fe0e229bb493302b6535ebcd2d327fb3a9f48ec0d5b6e4cc0e3b7983b72b48922f793b39d4f109cea2e8040e948d9538a8a033f869a4a695954b89a40c19e4058f860b9fb94365d0c49a428fe2a451d2cf76cfb7d9596a2674b5d22bb0c7cd01ab5281aa9ab255fe3191a16b9738c18830e148eb9878101b2ed9b45784c0569682b4a0c666c1706df0d2acb752ac21acff757de1852bcbeabdfe0a1687c05afa8f1989cacaddd3d5586417ec0342f180ce02c0341208b07338646a91e11ec19c3c5af21922b6b098f6cf0598dd798233a391a251f15d361fec0c5e6c7a431e2f1cf42e8e7114119ded89666a96cb810016b38f0095595ccad1822f62d97b8d9a0703c2646f611c0526c5485dae759948b29c0f53b686a74500d08d9309f0a3fc26ba82bb430aa004675f7a2b7a91baea5f0686241a6c83cbc629d26c90c385bfff8a9f911ef3b3c73ffe4fad35c404407417ff0c79b8d93123f70113784c4b2ba48eee54c472152984b74fcc260c2a6a4bcf6483517522a270e3a702fa78a05a2a67fb74f831d0035943ac15cce78bc77b30e8d1aecca8906e62ccd55b5f4e6a344538567e4401c29a52f5742443b25d7cde55258888bd022300697dec7212eb76656ac8091488800605ea4f65ab4b1ba0252835359e5ed69907758c03104308939ea0ade137d330886e56c4856c3bec75a29b3fa81063df96575831ff105e10839d9de7d9b1f0f6af8452f6d6bfd49261ff2e5f15fb65b3648c2294f20ea74514d3ed85eadb431248b7ecebc2c89f682be272fad096c2a1fb339238e540fb9d687824d48cda6655617d35888eea8753d4579761d5d133a744ff595d20442bb2f018fa7ae21601510b582751a429740c87e7122299eb975f77b5dedd5cf4535e050481127521e32d18bf03e45be8cfe0f6da52d945c8b2a9a953e57f404e6aabb49f3162178ad752a4fecab289d91d3c5eb6c39f9f55035f2ffaf1a8f27dd821f42bd07dd299c85a4e7d2dca9a8ec7f48e8332d555237ca261c6d90ac1c1e70b08c3e9ac5deb945f11156d91d3a85cd03df0fbb81304bd2fce36584bebf0e75f4dc1e02c961c7ffb2675321d64e1b411884867f43f27de6a6fcf649402ea7352f4d71343cef0395db56830aac1fb107f46c5e447583f1a3bd2bfc46722da4253b1809953ce525ba967194e0ab4a31226f978675d5ca701c294865c0ab9a7c45a3623d50d1345e38215ba43ea33ecea5862ab023c1960409d3e800fc87c60834e3ee778e4f9e4762802cb4094702339831d23221a02b46c1b04580b9c5417bd15ccaa34bb24e906ff583619fdbaeef60edf363574fd8624c5d2a7c75c554301c1a464fd8529635183767a57fae35b5f02e8a1987d3a7b6b76894a65f91f531dfa9156cc7011378299a56dae0be8b30ff7bec165742510a193864d97705198d3120d7b21f8678353173165151998562a46b8187b74d6dd19ebb03113ada5980abb4cf6d59f7b438a4957ebd4db77384b930a36c56e5dc5bd7f1145dc60d8c0d42cf1b2b901c62922a48400323b086944c914ddd8fe668dc99a72eeb5e4a4cc84e0898c2e99775367d1416b0bc6584222b6233dce186abc113e787ace60d860d42d3dc50e4d13b3688387336186157b618a6826df4617ac743d37f4e59eb0868a1882df720d3888724aa7b819e1251342c82cedbcbb33fa087356745168503df6c8560d3cb2f938e230e3af59e360420fe9b33c6523d7eaacd064ac8880ac46af59390c22a74e704c3354d79a4a63a7419043160bba48bea8530db1ea53bb6f3d4b8d57a5cd5d6e371634d0930d02fbaa1eaf1cb6a35e8083101bb66057f750bcfa3f8e0ca24eedc0cc0d235fc5f4d2a0ba82f069660ab6190302959c01bd8baa8fac70e4fe983cbafc7e8dcb78fcf58df8538cdee12f70ab236f0772ad9966b47450b299fceb334c2762d7c2e9aa53cf5eb530bd18b6609cb7c849130f5c367189c57bfb4b9ac83996968ccb2080e94d22e8131aba4816dfe88ce40f9ae1a4b0775484b35e5bf31709162da95cd26b070ba6d0a2d1d438d1e4009a9822620a785974113a5d31380a286a88d8daa8e636339a80e180615f28fcd77f6c1775400289f040e87af9d9808a3ea3630e489b2beac919409444218d201f1f281124b51f4e806e53c04f279e25bc7ba10ad930bf0a700243036d9c7b7e4a3e930150065aa7b5f33390b635be313b8e6d528f6fd233e55bd8ab9a77186d8292a557dcf1a38ae55361c435057268303821a391a163a5afc67acddfa677597ef807a7ab80959deba7e258ff4d9e6eb55a717750dde7687262ed12ccb2bf967b7c2337b93873cf63125828256061c5cbb52b16e643ea3e8ed39ea5013a028c74b24b1a7ae94cf4cc90a043d8adb442e233cc75f1f52b84377bc7416354a92225d42e81f42c3497fda5de01664ada2ac29c06db3d003bc7f29400f67699e154a4c88646e1cd4f22fc46775136bf38af5ecacc8f22a74b9c72122ceffe59ce4780aacc8d530520336f9712665752a25e584518c9d5de7a998d0e18a2c2f347500ee00ad9cb4dcfff145e9c03b848f49c4174ae08388b109d6aeac1a569497600217356cf45b4976972e5e0963aec01dd8ceba9fa33b3af9a121d3dddee190e85bbb2136f54d310ff30c4f87544c8dbc93573f41c35a9607e2e60acdbe69ff7bf4f2c770420216eb667590141bce53349153d12862fd59eb2ea7ecfcf7d55298e5f7e6a851b79950a71389287d48e57c79ab19e3229d69e403c3614497aea70f580512ced212cbec9894f4c707af838f09ebba6c3aacccb040e160704ea530580e7cf5706fe86e1a6e35846ec9ddfe1866a2ec5d244f1f88c75f52a04bb6dbceede39c8d2782b6a8eef3e8dc01d44f934d0f1cd87d9597e763d2f628ee4daa792b111eb20dc84c34144ed33606de49f44105e81eff0bd08d122d29bee93256b2869833fbde0f642620d1e9e97c42f653f04a59c74f0caed59bd616ff0574dcd033c084c5998c396138e76309799fd9141e6ad162504fcf37e19a4f5e7f31b30913543c8d742745de9a30edcbe6163017bb933ef97ba56c26032ac8282243397458fca626f6b69c6ca61d29c5c0268bcb4597f9dd5ec5a525883654e8e1eabaa7c1253ee9e106a1fa78a4a849e9d1beacadf13e4cbbc72b2d14cbbeeac9a7a60496ede0d570f4dfce82efbee76b98610734e3da3f7500f3b58726d0da2b05624bbe41acc494d710e4a393322cc17b8bb34b310e52032f9aec3daaeac690cffcd268cc9698f4d406101571a7f1266b6565b0c3e83b902874817eb460486a3586baa33ed32840e925875a53cbc498f0321015e52a77947aa405838457205cfebf03d0d44dd66b22f66ef6d2d900eab71b37e3fb0f8e60991ca1d206352d0adfb9898426ccf26d49829aa9a0f6da87a358ffa386c06418ce4a417bbddd013c445c25aea4f60759d6fd946715af430f90d0a4105db71a2216fc7108a840e287196623ccc8697a9ca2267d43e84d0e04b43a9fa87f5c97ce3d9a7852279e299d436c2353c47707f3f351d27f5d1cc389ab7c7f16fbb18c00b2da42072db67a658c5ba94127aa3a126376b5cb0a42e5dc8a8b2b0fe9ecfd340e55e6822fdb273f8193597d12748a8f9584e27dd492c8ce43240adb11ae0fea478fe830dd6cd8ce974afa90262e1dd59409b545b7bb3e5852d30cb0d2bb9a403360ba865603e300fff32257192e4262071da104a2574beb006dc4cd83bfaef5183ab7e78651249540dd012f01ed7a438eadb3a4d66f5fd096ea8cebfe0abd313c99766a322e725826a11ab1c56572d478c5a34b29dbd77b9cad06edb70f739b62659a0e9717a1daf3351d2d49bd314fbd47d75c0293a25cc524596c1ebb4be29e705feed1bbb9cdde31e880f6b65e58fffe4b44dc30e4a2d382ac8d73f5c5baaa40642384622be2aa12a93cf0b73fe31a9f8d002df1db59ae4f5a8b3488c2256d5f8550c070bc32de2343d8b729047d0d7794f38f2bc0eac3601568c4bf3518c9639b2fa3e71b01bca454f8bdede47864cd258ffd60e9718a22b78b20079d5bed961e9eed5414460c1f50fd41c5f9832ed57daa271b60b1606229e28abcf1e059f4adf0eafdda38f93b417644991219c0df48374f4c6e08e6dc152b1f78d84b343274ace896607d10a18f359c3d1450fbbbdd10fd47c5455774ae183f95a1b1c5c47dea9d1608c50a7cc2819ed2ca6e723199a7c9b953187803ec5264d0a34bbcd141ce28ef4ff9428c33b076836a5502bb4766fca6dd51351cdc3933b4c749e6a8a8a77cfa5f224a9f79f45b225d88dc2cbc5b0a48edc2b71a822846241a892010b297e2edbab9cdecb483565bab70b5b5b0b605ca3832613271b6715cd788049c8c6492767c76f523126b6b29ce5a72d5b9a069baeb975e5e6877d5433cd781c49625ccb0fcbba6f1e9a04928dd67c9e2ebc8031f089665d0792f783d2400d9299a63960dcae8651fb9a1e55a6747e7bad9a9d0a8173cb56fc5b0969992f2fd3fb6f4071582b53daeaacf624d95d1753117cefb2567311d8f6552199c40536315c012d1b7522c35afb91eb38183a5ab9722d16b7464256bb95f04275b2e4d8b140ea2066ebdef9ba074e2599264bf9aa0031086a9dcddac3003cb2742b14a32828a04015e5cdccd8be1d08296d5e9d619db911ca30c1aef231b4b0bc3da110444f89662bd833ab14e9896aaa80c2c60453e52c689dc2674a378fa203ddbacb9de8e6c56fabd5de9e5ade8e63ea4a2256aa2d17b75f360ea9cd9638d7be8a8a0c624a0dbf481b801655b89cd8ffae55eadcaa8bddde9b5fe280b22325885c1b866f1328ce8a8a07ea42fc50ef96f1c92f154ea7600af599393df4260f368d8dcfe253022c830fbf134229e33abfc2f72dfc7f03a5fe5b034b37a44b629c4ba5a8be9df3c2154093b5cc6b60f5407422b87dacc0d6f59fa3f2bd6c4ec969005fe2710170016949023fa05e078c63883f7218cb72e85d47d262b3c8d27429dde01695251bcbb3867afd67a9a1c4e1cbd26f90283d8c1840c3bb8b7598748ee0cae46a163748221ac34e488c7a10911ac2f36686f4c6f3309cedcacaa48163142ed41d8944eeb6a4e6b2514f4f2553f8cfc9e48da71ce0173d2d43057cdeaf8a95f373a2dddabfc6ff4482504f0f8cd3101cc86f41ce2e0000885fb2c59aadcabae486067290b8af3ee9ad5b885d2b04e65002ce489eac40016b5b9e0c6dc426070fc65098dfba97a4517eb0b59ee48dd6cdc771d224e6fb8b350aee4fab61389d848868f10771d930f3ef210f7a050f6e5197ef22debfc1a7aa524d042c496b198034d58e3a834ba8bbc511612cae83c736cb9ac85c1659c12ee111a26a64d9010328b7fb9e8a43a5ceb1422870519e713e4235baa78fc9ddfac59c718d75636289ccdc29f8f205115b85fd34ece391a0034cab385e1fc010742db498ea7fb4e910f9f400d22f395577a57113aa807d6b02235cb1c604a5bcfa63e3e8d0a43a0b888ca836d40ef3aaa9dcebd873101635246d11f69500e43fa9858a6a310ba61526d798d61f06da3cacc78f13d01e08b08081c04eac97a2f2f11468ad849621be524ee23839862f4782658ebc337da7339373cd5a68bbe6c3ac7405a96ef35798f6325c666b6fbfa6b726485c987b16c4f0ad24416157265e899bc6fa92094f06e185e34b6aba161cc0dc5815f2973e24c0f836bbb970b01081ed84b6f28f686228aaf46936c322a21adcbab27c8814640116e3c89f6e8439b3378180595a8af4e26230ac31da6b03541ce3ed09f267c7e04fe753f0006d83dbdb6e0ed296df75f87aa4f14cf2252391772babcb02a37b463302b04407329a75a50d04352877ac427c5286d7a0d1af878435acac6b11dad37b103ef0cf549181c53faa76154a0d4c150c374116a7470c6dbde080a1fb105e97d345e7cd44bc2d7085120d346ba52592aab5ef1202070a53ed86e9bb158823af49725b9be6b031f8caa9f57b53ef7ee5a5e41dbc860d0728898e935f08ad566ef9bdb4e2529105fa0faea6be8bed67288d0271bb03e0b779ca32e16a85ef837e3a5ea77457b8416ac0edb01f70ad50bff36e215d1af23843fefb5f38295bd03d048aa754ea0b5e46b4216834110774c182d231d7304c542d2519224893be8706c603242ae600b1ea0c24f4860420f59a95ff869f7b34059e51c58dca10dc6bd6f2c28b392e86866285b018b462a9fa1ebad4e0a793e57e9f0f4880c8d6b454589710fdf5bdbae0f75988b47f800722ab54e3e24a3ab35c1eb54fd0ba4b755f48111cba6361b760bf236b5cbb97e97b773b5e660c0f29dcb48120c8ccf92f160f8902c7b96284c732c4260cc095cde9eeff44d3380c635c0bc67cb38c9e9c50180c16205b9a89923c129f8ed9273d15f48023f39690702627a8f6c060aa09f3aac25d51bbdee4e5f332f3e565ac36d622bac7fb5af799850d4fb135cf75cb8c1d1cefb3860287eeb8af6d1f849dc2113b576670d534253d1812e17a98362ea176175b3632ea8487304d8f98475f3367fa8f5d25b9ca2a9689015985becdbfa63e243a06ef0289dbe3522fb58640affbea9304af5d9f0760c62cddd688a75c177ea15e3ce8d1609c7caec12e8cd5ad06640e4f3973ec7c4ca5c0430f97d9b6e1c459eca5589f4d531bde3ebabcf385588533c5b56f2382597f19e9dd2ab54811074a624f514273ae51c0c622819a14f521d0015866301c17479d3aa48cd7d7ff1ef87a45c00c11b327a5c49f944f56d54c58d3473696c04da857f6099f8958d63d9f8e37699230dc70119db1e5debe9b775f4d03096342f13422f2743acb50623c56227820bf9e43396b057a5b51510dbbef760ab244f87e835161b47c02bde7830bd7f58be3286bfc00de318a7b5b628519ff2ece13dafa0df4b493741a339450f903b3efc2675386fbc41fd6549a516b3ec8f9ce3a374d4bb28dfd30bc84a83eff4c16aa6b8904d1ff959519cddae58c10e3000e986c162c887f3d27f3546779a1f2e4dd133ad7b518513005777fa3bae5841cf44e61dd906de6a4a29e65e8e5195900859d0c7f7f56dca98142a13e559433847a19a644ad1bf6361e0011a3613bb31b3e356124245ccd64115e80c640b9d2f1ef74214496ff46bd2353ff71278b541748f112af6c92774b7afb297d28d50a40ebb00b6e5b588b41f535294e0cd40e6258ff3f568352137767d1878ebaafeef9a50e49a630a000d01e966e26cf2b2afa2ddeb5a39585790d67a453584d16f291aa99b4f2cc85292ebdf356be8c288491fd2631d16bdf0201054869992235ae782fd667ecd6fe03aeb6f7e3fd990723dcd3b2755105e7b2dcf8b46f87d652ed256c2bc986af329e1ea974364cb0c64b17be798eda297d4bc0c97e2658a70603eb532482526c01ce3e04c9944e91ee10b7a32a2c54a2077380145332c281de59197216c0715c0a8d64d57dce5eab71eb716cc8e807f8c790ce6548a3e3cbc463bfaeed1bf859e79431c729915f6950628f93142a1085033149d95461eeb89ae3ebb81d01f611865277cab58837685b32dceddfb0116e1c30e15838eee5ad2f27832b1fb669a20475bf03e440a8ec7bb5ea11b471916524b2888f7fc5123974407c835c5fceb0818d59e4ba41628fd9f432392197f9c6315fd2def7f1f0d9e0eddf2c89c2d4231c899e065732ab773d6ea136088f7e17df2b396667f3d15a4bcc8866db42078eaf6d19f111bb11dfb6d174f71151c1ef9d00c4a7574b0e2d1246de9be537729c97ee4b8e060f5fe5abd36f8732cecaafb8144b4711f5b8bab4afe3068a7f12c14389e3469827d50e8d9816a7c87e2259edaafb6e0d1b9819408c3e96b3f4c8c7f52e5697d4973bfacbfa5ee083b6d4eb00c5be8ffa7ab1c668ac0d8c4b697c7d5b07c8ea00f742d64d1ea34fac5ac3a08d23dbb0a415c0cf492a4e4a47f8638c920fee8650e57fc93c3b8c2fe98c0b987ace8f5eb0939f661fd4c211181247eecd9f8c1e834b99ed3e4ce208658f24c8180c1adc7223781f72c8ff1da8186ea3be8b2930ebc7648a80e510d7e603bba9968fc38ad865176ba67dd9cabf030d058e77664d92971574ea972af2cb6785bdc181d978ad37e5384799c2c63b655bf30e5802fa26669f9ab4317c3e6a9c469ff352969e8cc8f4ea8e4b9f2f344fff4d4e0576733800f6d9f57a063ee5286339afce9d458eefd2e880d66af3a9eeb1c295d5227a19ec44fa73cbc6bbb0198ca77c00f71f696359fa1d2b8ecc3e6dd7c74929b7aa024b1f2d179fe5e79391d6fd64306bc4f5df62cf6d03e3a6910aea2c45ac1b1f3fe831e814d19ec0500b10b58baa813822c22175be8d13146a5884314328ca8a0970e754e6a3b6df647cd0e8dfe57d9a1cd5fb57735f0a7e2ee6dfea8edb4dd0f35bb34fa5b6d370dffaadfd5c48fba3b37f8a16ec7e6edca76c38050f2c774f91cd65dc7d32ea71520b8a3c7746707e89401e1e3bf8f019a18b2a8938a921c84270a2a72e67fa72d99856d9a7305083e72326e16769f3ced9919622ebbe78d466ab99f784c2e652889ec171baf5920138dc4104b0416817e1694b0157893a8ab0b94a82304ab58233605b6af0ddb1c002f69f8cdb54e7571661e2d2ee2a6a16717a511790a16dfe601db9091e2a278bd2008a7a05248cae1dbba944d490a311e47f47f26a0f142ab2251de407ad5fa07d8e0db3a6e839f702b34542f69694403cd017d30f8b6bb73fb8d309dc2d1580af363ce1c814375bf83b0a5eb9a0445d83d00fcbec0bf18de45402e163a279a673a9339183bcba479c2b78b2e3c6394831441161a5c518573f5703505ce5e714118ae228f895d494fc5d930f68afde1f462a573027013977cb12e7513c4973b951329e3c2b79d4bad15ce2bbfd6653249c670e251f6c660116d54cfa04318d1211263578743e3c7afbc3520735da9dd0861350bd4f4c664e9984873b5948b752adbe554d1dbe62633f54cdb58ca74459c2e76d40e075f5d2d01753659d60e02d05091079d4d21c2bb5ef647b532bc030d966a42e2a93293eb28ec71dcb82178403269e59c03ddc318c48a268da0f801b9c3c23f2339b321fd0c1c741baf322c510467860b50881928e9d74c602254458e2f28ac1ac64321ef3d5fa0a4257bd96f0320b0e336652b6998467b449098c660e648f7651a77e0f25f06cb690c64902058cd75e5eba238f8a300a326618d04ac19d98582a02404f2952eea71d28f058fe16cf62f15fc116414ff176929d929491042ec66a2f5c001cf769296fa9a9766ad7ab161908769a2a0a680da61a8117016a03e79048bd77eb33741aae4215987bccf3d10b537f0a1e0fb6c9a62b3fd175f803a8a81a2935541b041c1332f8ac4bbd57c324ec1f1a0b69ff102f16e5b1d5a6940a874213a8514d03b15069850d955d7f5c5148089d32ee11f07b13921c61af5137426d7d6922dbfd19f25ca1d685bd2699860faef29bf8a97981107103ebe1fc6e201c2ceb20ed027273be4075f51eb7f1ef24a010585896d05b2b52dc40ab775883cac4964fab30cfa55ade20a8fbbfcfde0c8ec38a0b473debc7c877bc571a5d087060be840e2587271424a12abb22086b501c60e274b29f6a3561123538e00089b152dde6b77266815f4f3bacdbd1056a81e3036a6560a6a0134ee47c64b402b1ec840457ee45fde5a6814b0cf097eaf810e3cb912ba6867bc12ac1847c51a584a9a404513842fa7ebcabb1ab19a2326251c88592954ac2f8b1d6bc2abef88eef7a0a01bec6161277d040e188337122dc7d4ee673f7b7efe38442e1125d00dca917ad11ba52d14536bc27ba9dc251ac1c09dc098a932305e5d642394b7bd0c1d6998465ebd9628cc567ab41d5072f71bc40538fa371996921a4961614bb90ff0b4b95598c228408907d9bf43a01d91a1a36e3d44e63f619526621047157d040e1f8ce8923cd218faadf044bd7c427b2f9e316cee17965c53ce29934d8c46e0f2f3c8cf05d02b06ef5fcfaa970101cb051f6ab0e3547c2135606d321b5bbea726e822991107e1f1b074ee13e95148895fdee5f09e6918f0e94783f9ae3f67eb502517df49a014e9dee6fe3bc053c22ed1977d176ecd2adb2813192c0f1d2cdda72510ca9adc4f27249baf26927e1b6e65856e08f5a6d22782f2305c4d10153e84a7ab4bc012b0dd486a1d3ad799b22f9dc8d3ed1e0a9a0e5fe71e050a54858fd18f7c40d56d44d24731c8cd73c4ca545bff318dffc79936cf1f7fc60e8fae672b04ed6f3813416b27f7744980a272d9a5254a52b1e8aea043507e393c7e9007b79608440bbcfe966463f405b17f6be8cc62ba8cb4bd066eb95163c22f24214c34fdf08848ef8818bbb7c1d150f82760e87942c6f2d02dd90289d30f80de6d68d704d241a0fc40bd89c62b0a56e1e00655e88515940e5f253fb6737ec66688653b302435a6a377da45882888df230f7d24b55715970653ade324f2acacb58afd187c04e1702851ca4282024b124e75d9bb4ab0a290854776a4961d09b4bfadaad24840bb519ae41b2c7e1642f086cdf53016c2e975805b54d4c054c363e3c224a0c5cf890be802804004f4d15c3c017d0881c3429852e85818b5d6824702d9bbee7660d2956d21caab23e6d67139e202a195414e6621e8c67ae97b494334750a12180cfdea5e0e6896cb83693a119d31d3a09c20faee6a70ed50ba4f59b61746ce265f279734e800089f33d333ad91911805484d393d46dc10646a8a7a96b9451982a1ee704f1bf6fe9dea508d1e2651a788adc01e12961c74a089a1425ca4af62986a2c5aeef9883092981252e908280dc5c66fc74ba99ac6b9ab3254916e02bb6a43069572cf9d04f3570a77070ce473e0163664219b024821b697dcdf7bfc498dfba14c58c5e9100ac62d6242a7ca69d5abc03a51b25be57da0e471ae17d229bcfc4c3edb4508bd556a4d3374d637fe4d21f3aa38dc88ae8dbc0ccc0eadf510dce675f750f8cbf7fde195965be8138cbc59ef151b7cfbe23cdc0d84deaefc0ff1dab9c26604f7ba68a69750cc0871518177725d7b1bad9b2ecb464c74164c1801672e94f3dc35c257f26b33f6bfdedda8c6800ae460330ff8acb4f629e8eb19aa315e57d1dac4b5debe9e521d5a65e358a04a6261876d381e4d1b005dcec00fbe9c69646521f38c1cdea64e6497c42f2c8fc1145f2c8dfcee3228773ac142b25c80cae13517374cb432abc1d91619c24a62442870a2274f8f987468c3a6921b529b075c6719008a3b01963e2199c1b54cfdf455e6ab5041f5ef7b8573f82f51c7ec8960f833d2b08cf7fc32fc07e4d122711fe203816493868d80291190ee994a3a81342ff4df0a68465a78fd5068273f0123d064514875e05ecff059b95039d1af95bfc50bbd501f2004a172c6c432a3f0236558685b0b4c0ee24699d2b3040845e1bc27daa75418702b4981b343409c1db06627584604a992b6831c5c5b04034850860f57324295de94602863a080ef547af137455eeeffa0ff6fb44d871a54007dd065b7ac01a50efaf0e57fb931b49989cf18b2a21bbce2c406fee5bc063826074b02d82bc3412b259dbf31d73dc50642d015238f8218cdb5a13fe621a550fe0fc57cb25bbeb3126aa65cad69960fee60837bff0f38af4312d9b630aef2e4a0fa763a757bd31ce591e92fd83ff474eec8c688971956d413f9bda4c59fc9f9da11f5dab3e50653771447380a331611592ac5fc3e2e1784bf4b4cb4b70ceb36380f870e68e85f3121a56d91f08f5a84b6fc7d06163ae16949e8a0f2f0c19716676472d40d1fe3eb6179afce70d4a381c60c086bfb2742f9209ed069064d4ae52cd3005abe930cce9dc94109bdb12e775eba475b967ef3bfd627daa89da30d2b46783270b9a2f769da5a32de8f66e21268e0e7aac1190c6e36b3eba0732bf7aefeedb4fec2f39d5a0bce32c5e7cd13b5d1094ef0465d0b03259cffddee269735ba04b0130cc02a3c76a2bf05954741746825716506abcf80aa2734077578aef485ff571cec042691bfaf3b40ffaca586f57af7992e7ede45b5c60d69efbe621d75e731e3768ec5f0cfa573f65c80c8199afd3996d63fadf8a57f0d47f1ec416960fccae9833cd48f0017c8d93dd5283da8112d9468a61f52f760daf6cfd228e75eefd9aec173729d5dcbea4b15dfd7fdfae0a7fe62bf9b3fb30807aacbdd591baaac1742c23210acfa876b8088b4b4a62807db121354293751fd556cdc6c946ad2c21201a11393a0b7fe217b861a5a97029440c8f9bce28d80800eb117196f71479cc446675901d609786bbd21267b01c509901f3e37035528e907ff04496a04d33719f972bc6cfbd798e833fa0bd48de981709e865d57a8b9a0e1ebe20041a8150e9adc00303117ce753e68ab1ac3fb539de43a0f42b60072c272d0441a0f6ceb7a3b0871496694d5380feca5b2078495e6fc05c4d5087b865888bff6a20a484b7f204005823c8042def4692358836aa3900b060d19968f803e3e747f683bf09cc241370634100637c29a1e6beab64ef4c0145af8cdc93eb13a30438829024bed2a900fc86bf3336a8cd2c150cb94116a798dd02e2458f382d99044ab1852bd50bca67b80a6377135226cd15216e53cbaabc261948474a6b7aae1dc96d5ca01e8dc1efb67caa57bb2e61230e6ce8fd5374bfe8281ca232ea4b208fe979efe16be77018bbe577a39ff6b3cf95c39dd4d9ac45a2c75c7fd8f20dd952b1a95a4f15ef6c18625aa0181d6f46b2aae045368400d80f0f74cd7b2229c5f207dbf39482a973c31338d06f4767da301c0b1d811a1453d677a81312d770dbc89dc928d05efbe9fb8a18805a9627ddd3d847f78951695e9fde50d70e0a6128c37fba5f7f4fc2f6699cc6e4837fb6d001d095b9295e4b8321fdda2871a74529c9637ee2d27c120b0b708178d6b3d8267e54ae5408ed6e6065ff492728090649433136b36718ef34898defc5fdd0ada6694899e30e593b097c28837336841570b6e059216b097d60492af05605c8931618047290df266de3d02ddbe58b0d17aa0d8a02432b90e2fccbabd79aa62d8e5eb52ef0e86a87e8a577853840223c8ed53c4be323b8b54fffa734f0dbbc4a63179ea20fc7290d599efa687fa8f4210c668746da1bfcb2077f24ac23a9eecd10d6545d7f7ba7136c82e130503a4909e6b0944a2746dc921db362de8f1d6e18a64f9bc5824b4d089e64f86f8125d8225cb2db4752b311750413823b61603810d9c9c82e94a3b6b34bc53d43ff7f332d4d2b02b1d4bb0154070eea832b2b23c0c9a5ea0b582c1a82442c7b6c075851c6d0f1d55e7c1f7a8b84916648fe180ea2f6bae0e0f90c5530c0365fe83bba0b2906be4d40eb76a53920751273ac088f5edb17678611fb2fe4ca27e2ee787efed70182ba281c3df61abefae3a46938cf332e90c15bfdfa55f1976d50914636b292a70d1a01b12ebeafb7e82efc4cc87877cfd2e9f7975f444cae3b6bc5bcda7fc172038cdfa2e3ff2bfe1c3f55c0a47aa7dfb378095bdbbc2b7a1bc2748ab2cd65459946e3bda3bfcc140f4b34a046d4b34a56d1cc3ec24ea5f444e67ae34d5b13372acb4544a09d7818ebd943c1cccd700286fbea2f7769a4d3035a2e7123da8e6ef2a1adddab209eccda6376c03c56023dd2fb24c23b674c1aaee7a9e0173094a185afe5f75935e0410acff01e3dc88ad6394511a2c57c071888e59054ec50f4e134097db6f3c15c5015bdf0e3c1b5786002e733edef7c8fb9250d7e31ae4ba16101e1deb6e58b87d4f7c29d031509396869ce83b2de4063502d52bf60c4405a9c87383870732d2752cec9c9c84e1f45229bfc6d9eefe25ccf22c5dff49c6d2874237390a63d61a3d796f38ecd9fec34e1b591e5c4e491d16e4d24bf333b09f6af658bf227a2cf13d33a2efa1e296738dab17ae617f151f6908701b9f8a0574bdefaa171122420119cfc5a234af4434f327b4e3afa33675860d90da6dc007c829cf320386ee14cceaf0538fd80ad8556efaa1e6919ee3d53b2f85fae4542fc1c3f9721351cdc08ead805c02bfc4483a0d1d452c92812030dcf5a80936139bca171b1d7be5ad8ea34c727441fdfacf065bf9c4927e96aec1bbc50220b8ba122717d6b029caa2d2f780e48b89b78ca8d842e07e6324b244e953524765897a26a5d73deb94680e0848aafa9b4cf402ec898941e49554eaed0602d1d16289a4b72f0a77cfd844f19164e8ea404da4830c61181c5f4a238a0fa3a427dd497e5d636008937b9826c5c85df6879ab55af5001d7ea4c4ac7e83775c80f3d08316c83b3fcb7e6d1dcdc27c8a350fbcf7aa866b86e515193023ab1e9f98859317acfe138a69fd71f7bc19412468dc73b3299ac926ea815b739402992f42d7ef34e0dc648ac85c937ae9ac82925ca68b12b37acabc75442c34f78d53daf08ed45037aaf5e78a1be5b0ec3608617b44c0675866c99875f707264e5aaa87175587167d731dba146b9390f85ac3d035b46f63c28a38b977d7c4219bd731282a0e25faf0ece95a243e3438e28eb6c22ea18095a2929828d35496b0c503bd5b9419cdf3e1dc81b77b9413e612cfa486c34a5b1f15e072f66e829c36df1ad3cf0eba80c415a9b1884965501e782e8fc2c5d3cd94a74df911fbfa53e9ae2f9d9eae8c4b1122abeedc16c8d6fb5019332f2cd20c367352df3420556f1f363bcb83ab001eaa9be830d4487074b3995a008a9884e73763346be0300b8354a45b363b843c1c8def716d750f476d83595a1ee8c16659cc74723488282aa31ed96cde6e0ac8662e232b7a28b864a4bb144caf05d143bbeca80ed475f0b63dce349cd41c73c96d09735bcdd1c395047c9cda5cb671cec213c0868d531e6f34cbace768657a93c06d12140f2dc500e29228b9194db0c4025005a25903415f5ac62a8d369c3a7014321686303950a0c260461ffc3916bafaee3efc915ed3ccd9c2888b1d717a314a4370402c61ec19a4b99239399250c9cb7359031cbc1ca2248594b0de8cb81ac3974fec23e4047460c05ebe2ec24830208a2eb6cf1c9e58fd772da812a68d37a57909e81383b6134efa026ce06f89402b702cdc6502339e9184a96d52b1ddc5941a6e539fc8dc5d159a2bddda5677a1b1615c6f52b517d55bc8c4be257d829fef47ed5e06953d688bb067e54f0e79c3a2c337cbea045cd6ab554a52f3ff6f9e3798d03c458202e3ac281a2eb36ee1e8b6883542f28b0eacacddf889ef3cb7ea909f106a09074b435e32d8dc3155bb75c280212e24d18a129f1a5b6d3a9f00bdb6f2cbc9012ce7e17ff1872598cef0b43058c6001a497220339e4411692cbf198fb3958b5d4d4a7d4de040cf317a3f02af297bacbff74040fa54177492145aa1b85a6c33536ea91b90e795c7290cd052ca344ab18e6f4215ba2557ebb2fcff0889870c0a8515c0a218e7216b8485f760daee6326b3ae82397382fd19f72c824fefe7ca2b026a12b8644556bef212c32f59f67ae2cbf03b05cd8295e800d065c49c8822fae459e0abdb2004ae39ba702fbf37c28caa3851a72d09a4902c7b695b1af311c71bba73189b8062b9ffb2beaa2331b2c157fac3df3836e60ef6f02cea4882af16d76115ce9a6ef8d3abe59914921a1e287c1191ce99cd115340af621c3fd0aec3a322a384bdf2206d5cd2952bba42baaa04bb19486399dd29443e19408e9f46d717558b00cb3ef64847742634d3fce58f4a683b04e323916dec4c1ee3222b623b1333dc04058a2bfb7f1a07510caa26a1cb66926314282e8d0dadbfc66d484a9352baf2730019755351687cf8d35df38d92dbff6674a32accc68506ea2ebd96d1d9fbcae11d19b613a4fb0cf3c676e45d4e72e720fc3fa20b2bd1ac56905e08d865f3f38f73575156f67f68f9cb779a02b6b98e3e470d64aea4523019867d9bd0c73d38b188db8048558ff05925df0c29d34b3497c19ec8894748a46aa064f38d4a5e19c3edf050b61962240dd6b9724ead64fee7fdf8e3481a6660c832324a98e21372d36476e641e027a15cac334612463679a8848acfdde6c895a05c915c0c3854b15a0eeecd946fbf4d7843c58f4590f59ae7d9ab0cf25ea854ce2017eba9aebdecc35a67cea0bc689d225e3ac785322c90708533e960bf7404cf5c992ba3c00da93b4d197e58343357d6eb1295dd657478e6206e51b514efb45c2c88aa82dd8ab5f772aff0d6651a365102186eb950d28fb98dbd0b78eb1150eed8af18f518b8455485d97701452d1a7423060f05b770e2be03cb18832f9b180790e21da92c48cbd63944ba0aa18c7d417cc37b729da9484e5183b06523f91140da174602b9bf26509b99c818c13450258d54db899a2a32e8014823d655324ac4d4ecb72612de2022d1f62a41c1d2f084c698f54c568e5156212f00de66338b9ca74ec0621981e6870b1e003166be7deb4d7806c35e53362b63247fe53938bcbe8e30db82fb43325b133d1dff9d367ec0f52d51f91a8f7ffdb8dc5d49be023b0cc1409c1e23b83f4b04dc4d6b6eb3c6d44c44e83fe5ccd730f2e55ccfd78d959621809c37165e18045a7640e10b0e620b78896b8ca94c19fd3b0e922dc44fe014ef9f8d6a8635e13ed1d9208819b14493f8a05f7f4a958300dcdb05737930fa8b32798bcab709363b1db5eff7fc040857ff6b60605cfa11e00f0f0be2a06b10fe5dc12c9a8e76a652771b9407b3015783491d37df490eaff2923b3b049c4873f3300f638d501a31e0b284ad42b8fc724a3ece63aa63a504401d10c334e2a7019ab643a14da5e49f5974274d99a4ca2234681279d43335e3454699b934dc94838aaa0447bb8276837510feeeede2c60bea96218c9bed03053972979b79d623026179dfe07b2d3c38fa371ae02995c474c96f6acc60aad7f46c8a4dd3aa7d29df5c5bbe9193ab95283d992eccbe89af29381c2f19a51a8b2ec2b6f96e995e7c84cf5b5ad72ae8caac7f2b30b617bb7724b2a337578be88880f99b4d1c652578750adfaae5ea2c9351e0e1cc2b7a2c91a680a13b3aef4326647b23ac8603d2b3b6bdc17ecae2a9943ca7e6cfe7aca902bff1f749bb3b3bff6cd482357f31f2ffad0b8f18c77291345b0ee126ed10d5950eb478544f3bf1608788eff52aad4dc3d74c473e70c309d3d4d39d6a5c6e9e1ade2caa88966a29b3d95457c25504967e2910bda9aa6913fd92afe3838556acb9aec334cc40a1f280c6eb2e0db6451ab76329f2b1864b32fc60a9c8dda3a3a3e2555ab957614688a99d74493f6013a8c5b5c4c5f5d8bacea846d151408cd09cbacc940020f74c809ddf9d1f9c4a7516de19c543d266a7f67a1449151d84127d0da7ba07ac2192aaec54d8c9bcf8c00d4edd61386865c0929534cdcba0b5bbcd3363aa085d752cd19c11f2b161524e9f8929d0afa3d25c7c2528a4a2fb1e748258351df836ccea976b9d0846443a46e7fd39b9e9cfaeb6c7e7283f8875198508e2e0ad73e0235792e0262aa6da44cb7eb3a2822d40cb35dd593582763b95d6c496da771681e9de32bfe0cd3727d02750574a8be44ea33d29cda5abe7dfe14424299521db2dbdaf831532ab211b33dba24055eac8dfbe53ba3b83c1b8259489ed7202fd2e0dbb4e2cb7c08c4e1caed5e03bc5613b959b2ca56511f4cb04def88fd76c45247cc24a68195e579a4ee765c1e90d55124691b3e0fd0c60c24f66a9dc5384e49d5d46ed3962e174436b3bb7f4fb48abfec5a95e69fbd67e8e8e673fca8c93b445b23c058449684f6f4c0b646335ccefa387591ff7190b1d74e95c67cac9046ed3d7026ca1017a011d355f50cc419b0e29cc6c9777026aca732158c525612e73fb59c837430a68d4942eedfd55d6bcdc6fdd7e9191dd7f122883188490033030fa1100d8a57a027f2b46b66396d38a60ac165f5d28a64c7df63c33113618472b62936d6d6f2965925206470339032d0324549964d2240455d69473e729e7bc5556c6c536b1b22cb6098effb03229541bb2b25b45a328a5b6a775fae5d4d3ea89f5cc16685ef445af5a551fab4daa0c4b4d529b702e025a65ad2aa1a65489ada294ce4927fda4f0a439c67c7d6d234f730f79da419ef68fa71df4b4733ccdf3ca2a8c91d032a5d96ddcaaaf176c5afd6c1e4260d9412d9b49885a6899ad1aea6a7c7c67afbf2e98ce3b7f5a49f4d5364c738c76b6eb37821941e82a0d69b53624a14525d09ad5f8e82b9eac600f95a029315069822dd7399252494aa23f241815231d1f949c68f39146a586b71f695482d0a58f56a9adb452f93f2e497be0108507214328e0e821c9914f091db7261d70fc10853e8eddcc3e61b02979c3c71c02d37f133aa48ca060091eac52c0c107c484950768500207271c48e1d8c91338cb710b98a1880df4d02245871713902822c48b0d48483539c9e27232c56208524c8c0f38ce3a59e2ea9aeb236d8a45c289cc73328425a5490a521529452e182bc0ed732716ad783873277bf77a6109dc7b9ecc317a5e6449198a62f536df7bb30c2edffcf3ddacdbf25a61832c304677534ef7578339e1cc0036773a47804620ce97921773bcef06aedd94167603df29f4d24c59610bec30ce9ebd12b4f966064d9ec934f9e763cc2854efe2230d4ad47b9c748130c39a13b926ff3c37bbae418e9527d860ec0237b7402f773534e00970e67a3060cd31200c0fce39730773196711640a07a86182121c8bb67f9a1fe0cab1ee1b1003c61fa1dbebf08606976d96c1654b310803c629614c982f33bbf1014488c93034bb6c1dc528ae30fe1821cc9d0923bbe8e9608633db2cf3cfd350863733b8987f3e1bb8cb358603d2588e046a61a974ee74512671650fcaec6776658f5e1620c033cbec73a7a300cddecd2dbff0b0e8bb40223199159367dfdf853119bb32f2bba82ba46de48a5dae3953b0675398a6aafa8ac70f2123454735282afa65eeccef232eeaa261330d63e17bb903e172aeaea3808c483788797966579e2e1a1978cf959b8695d9ae040d003d973dce4583665618cbefe5cecbb773d1e8a2f9a2092f2e32637ecd46d64583865d14c62c60250d4aeda7748110f3cfb55cadb004d946dd24b6e8ec511929cd16eca30c7c6cd13ae02e8a15a8d32166100158c6f98a1acc0e5e781bacefb1f0726e83fdde05deeb36075e00745e9200b4be66f79c301606cf5d18a12306a255c728678cb2e90db751204f721b95dd31c61823b63f6e62545272257f72a5efe65152fef4e4522ee54f6e732957f2a7fb24337382a6c6b1ebf0d4353aaabfeb4f5da3a3daafd7c31058e6a60ef0b6a9d1519de2a64647b50d4e8d8eea9beecc624b43c0855c08cfe11bc26f9e8394f9b37cdcda0e2b93dd14fbcd73f0264c1b46e5391ccdbedaecc76c369bcd2617b27c586bed919dcdbc4a27adf3aa7378f9588ef967cc36554dd6e22ef3b85096524a29a59581f302e7a3e3cc9d2e83b9d3b8aba08a19f48978b9f3b2e7811fb4cecc56040514c0e66b18021292b5a11ce802b4210cf63c31a68e071dc2c0c975ca48e94d0b7ce52b39aa16f9144c650f1ff22b4ee4a3721cfc061fe236780dee011772214e8377c0933c88cfe032780c0ec461f0179c036eb3d96aad954e1e3cc993b0cea4346049af590c4ee44cdfcd9ff8133ff2250e84dbdce64a2efb6ece8313b9922bf91367fa6e7ee434a7f913a7f9137fe236b7b9cd59f027fec46d4e84a5bb129615cd1bae2f3f8065a62f7dc0f463f7f377c3fdcec377c3f37a0dcbfcddb0fcef86e3bbd26d0d5acc0d348061a0b31f6de0b60883d10167e24adca8b3a4d3153b9457b06f560a960c2be310d63a33cfbce263ac155fc349611dd67476982bf81567e222f0203c0877c2498a71fc846a3f2a9e646b4ec19944fdfc88c0b138e5e7ec965d61daeb50cf5a34ab50bcb558b8b57cc41633bb938e9569ab53f1318c1d8bfa15d7cc313af8c000d3be807d9cb1d628aaf98a4a421ba66af02dace0ceb2c30c5e193c0b8ccd026b549daa5e6a55eb58cdd7550174bec34ab5039601f80bd6a9a9336b14d1e4a935aa46d17c87392e4fb2303aef813dad4304d63a57b0ccada80e055f15f7e12bf806bc8707b90a9e82a3e03cfc04dfe13a3c87e3f01b6ec381baaf7c2bf854f035f15d7d29f850f031f1597d4b7c4a7c497c5f3e24ba61ef49df09be23beaacf045f093e127c5e3e2306009b9c95f49ada0adad035a35259e57d357c3d09c7f8ba7c52dfd427f54d0d7d5338d67d521f97fc49c924a1855209e47c50df53ce77e59562ca1044af2c59acac69262172e1656eb2f25d09ddddeee44eeee44ecea5bbbbbbdbb9d068811c404429a773d73c547230825bc191866546c322cb5e05d3aed41e0b101a16a138c33e81b39576e74e02125d1ecc3b773a0149b5a4d9a425cd6c91524a2b6db7514a2975efba5ae5132c73535a6bad61ad93522a69a434b67a50a7c1b4522a6fd71aa294d2171aaa35c1327214a6d6ae5b5568e85bada1966c07cf818873ce49651f73cc9cadb4a70311638c31724b7ca9d65ab160e1380e8803e2806cb7cd3eb8118a8f4617a06001d5a07065f1858aefeea6b2f86245e5e2c59969fa93826996b9b34f90d15bb1fa48ab8283209a3bb19f3a2376795bc39b19a14f90914f90518d47942e9fa0d9dbd027c888e881a64f10d17b3e4147d6fa04cd7c82e8f5093a9a46ef1354f43e41443f5d3e414451c62748365ff1a9cb27a8c8278868bee2cf17478315aa182b48f3721c27a3f7ce9d1afa7cfd5c0f019e3966e94b98cb31fc5e65a53e3ed2aa305509e28192843ed2ac305541422d942574da6c95cad99536cd97c4f22557f225bec46d3145b8124f722caec4935cc9af70b1b1b32a357696a5086741b2b3215c930126cb0e992c30c47ca45511f22d22bc58ac666b2d655190722c5ac149e7e4388ee3720de6b2046138d76b40fed8d93ae6b7e5aae5b9041e131d97bab34b5a5c4b23a0d10a03809494b0b1a3c811a1125197147eb0b1846cc9495daab92a9125da92d173dcc75405051610532e19515af245e5e438a18093e30b99c3711cc7e57c4df413a2e7d2e0a4f848a3524585cbc7729af01c0ab679a982e74fe69c53c553596c11e5bb610c00f0f2862f78c994f3f2865984ffdd30f8df0de7ef8657bc54f1f2862db8df0de3dcef866fee77c336df0da7b8df0dfb77c31f0a1b1f70f7b12ed6fc77c334f7bbe113df0dcfdcef8665ee77c3f8e50dc7bcfc6e18e6e50dbb5edef0fd6ed86bbdbc61d6cb1bee5ede30f7f286ed657173ee0517032adbf2d0b197e963f6e6613588c103e0bd793d4bc7e2c3073fbfd7dd0657bc8ab700e76fde73dd066d52f87bf836f8bd277302c704c2d8b9c635a37b79e23648f31ecd6df0c47b35b7c19997c131efa5b80dc2bc67731b74bd77731bbcefe1dc06bdf72cb80db6de53b1e236d820f71e781bb41f6e82eeb21381e9471a140d5032d07a042a062817a82a92ba03e5a2eda71e2c2815bac6d1c10a609070afd569eaebcdd7a7aaafb5733c0da14585167680d1c20d12ac40c3c22385242a2c3ee099c9891e5c4629ae4e49bed6271bbece7cad49bed6eab0eee905d60da71ab83abb56a71c6274e8b483de499d803ca594d223ba9bddf791e664e35118517362aacad244ace749e8f9da45ce52105025341509ac67be6a76116376d391efce5dcf6fafeaba6b2af233d3ce3fd29a5cf8131f694d403c3c6b1b5b6badb57e04b6f6f5f34214243721281c5137d81f1b4a38cce0831039a28ce0c05a5b8fbcb5b60ac71752a3a33a769132deeed6ac4b74263896c10b4000f1458cb17a70730473429c1aa6f315c349c23f34998548e7741de6cc6e931d4e06e94d87b26fcda4fdc18f78115f722547e248dce6465c8b63b11f785a5c93ca48616ce7a2304d28a560f3a84f94a2f0a81a831773625762cf2f584fd761ec1ec31d0241da82630dc43563be9552562fb352cd9d9a876445338b646b36880552c19ab74ccdc31a620d5926d69050f378575367e61c37c8caee0f5b7483d8d91d6291ae115bf3aca0861ea69bb350538789c56219993c1da4814c69203ff3957d64d1d97cc99f24d3c73c6d1f9b69daa6a25cd06ab98e765d4fd945cb75ac183b56ab2574e9bd3de56d79d7156f74c518d88d20cc1018231f73ccd0908f1983b149f431cbc866e8cc4c4f393385e62b66ab2f57565f668e095485743405e9c8569b4920f2870c2281c81f55fa98535e3ee6a69afa52d58098a78b87e1221ec1a494524a39bb29add5d26a2dc7751dab455bad9eb2c5752c56abe579b7e5ddeb72c1c0c44819137325c657cac85c3973234d4d4481e273eade53fa0ca39cb9f284a491608c4a14351d4cac022f736dbe62d64088b705f716a8ee9e41cd9dde1277bab913381c91c74ed9aa0f39153cb0535f11533407dd41000e064ec8cfdcdd1c1007c40175777777dceca0c9d3b3a2f8a233d28f5a901965a4a988623d60cd7755f3704055cf0101c166cdc3b24d9d99a190a66a5e585f585754ac1a073469b7bc224136d06c48345b905a35d2dd353f6bf3e54ded533ee5533e456357468c5629a4bbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0390afa84470d2d756229a01500409d316000018100a8ac301398a8469a688ee0114000a5d8e406640329387e270501818c55010c3500cc0500c82000804410cc551188c69cf460020f507b8ca253bf59991f2e03b1df20effaaa2d60d730a1f70502bc2aa3d7b2f23d09c158c9f356e963da1ad3a1008d37b6aef9fb419f32917109e4dcdea9200b04948b857114521575d4261e93163155d13b9d7c5d2db5245bf3cfcaeb7d8e49881b1ded6893b5d42a1f3fe4f40aef347f2999511ca08e080089b086d50fb79f9ce35d4e45c386e4463cc0908ced846326e1c8eaa11653132376bfa9c71a0295401696fa807b156a4907c38396e0d7921b9b153b77609ab908bb3a04156342f3576151f635380c2ba8b469d27dd0a2011aedc365b0a86d21a091539f07069e566c08e9bbd129c7e322505279de7a7cc30568a79c1578b7aff8981d2ef33595e86c2ff025fe6eb9ad09277b93e016f88130984d00846d26e06a89b0599c9e9e2f632a9a3b356592d58256eb3f886c73518e203b64acd1068b2cc2a52f59fae05d0e4316aa83bb373fa2756a84148a9bd806d7191575927c54214b4d7e3e9f2a4410c05b41a6a007c6b16857ec646ec517a101ae219851ed94fb938bb66159103eacd92a146017d8e8b5ecaa270036d68d7bbe9f14448b6e7641bf965a444e1f8a3c29ba9dbcb13eb5bfa8ef0019c86c15bdeb33140094e06a62bc88b5fbe39cda26d6dae799e20e80697561fb43a511eef5418cbd5e021fcd229e91efa6bb7e6be905d68b6d4e1a5d80b240fac37e549e1fc5cc862aaa9a5eb8b1f6be7e09c6ff3d25be3b42b514a3393857f98c5d3d731b17ebd25bf5fd32d0f06d9cdbf3a57413786f4eea5060388a993faaa6a75ddd0a603c7f52e6de1599e72d59a8fc8322de34be2fc5faf0d36fbd1a2dd559b1bfa72ae39d1777a2b00cb0dd3204b7c2f18b501453b4ade92e6ddc2616a74991407efa201f974b42d5370045a645bf91518baa8e95f66e435d690e2a1a9dd4961bf09f6caa2c2bd08c11f0360e0401c998aa44e3d08922ceed09f9e3e2f881f8cf33b5ae36b0f2dc0d92bdc8bef705aff6c7b0491bd75ab1bdabe5b69ccccafaaea8aec1603335b1b38d717c8800f1b41aa94be4940a4980da4c6e11be1381655122232ed3c271eb580b2fd002da468fdf20c821aa67ec8b5a8e343d4cd54bd6406e28d77c9950872349b8ce39dbce167e544d68addf4d10c9cec2032551cfc2e26ba9e811651ccc09729fe9a745cf0640b3d91a205c719a316528c92ff9b834230575c4383116e2c7ecd74f2c365c0f67bff74d1c197836e7f802ea0e1f9c3e987ccc25a2d2515745aaafc00137f0105ac04231333f63d3ab502066d8a6ac4e8c9628e4aa4452829d66b064ac66468fb2d7b8a097700d55b691a729082372a17ee48346c6f32cada04dca5b2525530ec2ee41198cd23b592017c79072d01c48521bc31e26f2b589e2e1b0f0a4c69a528e7c3507881d1ea39337e8a6a442c4fc20feba9e5dabe942bc1bd8c167c6cf8bcc11045abf768bc045369f0845dbcc8e81f92277cb968ee1270c9216824fef9c6dd0472c25ffff6b2c064f811dcdedd2218fb1294377535bffee310171604522c0d984c0a5022a749cb43efa9565610c6474c0a16a49bd9834b8749149375a29a770e2c95012bc1b08f0b41ac2ad958d272ab31314853eb01b99842586092a7a2e8dba383159cbc8b8904140ab8f56b08e81e2011fea8c4f6048e5122bad8baa545e185af42492d6661180b830543c387754ecaa3ef62719ff5fc4181ed0b73b4ef9faa4442a2ef8c8a9415793f5c036527e88297fadce25776b3ce240ba50c0c0a3e60730815ac0a03d85ccb857b848545b4e51370b49fd28c36ef2e0b57a22a707b0ebb1b9433d29342bc6749d9d0e5cca870591c8c5bbfcec5d4f63a35546083cbefe2d9b58b619a4deefdb43daed7ffa0a667d7d8496aff950f2d8346730bc22371aa6036b2c525ffda4a404c0824ed8499c483a9397720e8d357b668d9ac684235407bf67e58f3002bf60f6cdac0c39db1f32604ed6f260a58327f7e853b1a8786a38fdd015fe026167a1c0ddf74d8fa27c504e5f0fc68d014614d956909c798ebaff9989ec9295544949a21f1f2702e0a315f7628ad839f916c6b0ee05c87aa04cfd5e11b9806a4b072ffcc4f209e9d81ed3818bb93dde2f79002e06967e58d87233042e763a833b6b89de42311fc13b8a7a92e084a2a9b7082a58953eb164983d7eccfd337761b8bad4fc42bec48d250c24e18568cf87d9b0a173c80a236e800f4811385c8e45a00907b1ad5ad5af59ee17ce6351eb95cd62cda1fae887270cef7bbb11ae0f05c2572a0d7fabddb8e903b68a2565f7535640ac78c09d0d9e588cd97603f649512b59b493311af11eda38678450fd871a146b53a13096cba6ad4c84a1fcf8e5201e05804a684f0d102c6d1ed73ab24982e4f09cb03df46907214252414d833b5dcc77b4894d38a41212130c15d0f49c43f6335e4df2ea41a4767b3d9d669a84920737c45be4b312cdd9015e1a1917a57b2b8c0a669a31f593fe2673f4518979d09550e1f6ceb9c9c20e2fb3fe63bbc26ad2716184f676b7f03342934d2ab45ddf28abe1d86e2af5db92db19b0a2c477865424bcb9a11ea486d2afcfdc6f00f93e0fcf499f3e2044c33620855c46442c7021f7d33b21a8949fabba978f78b44a61f334cbdf0a9e79bb868f593110edd33ffaa4981ac13b5508b396590574e12f90bc25d48ccd9dd53ee092045967016c406e618ddccd7d8a4fe8c382c834ce727b2d52a862b4fcb3d98f880c6f205104a7e029c2e40c65d0767b6899202acf725a4797102375795ac5cf5cd86362b386b1ce9b897ff9002abc9d5707a611a56f351e6798617a06bd6418f1e0e88ca17f7c0f4a98b55ab0ddf1c281aea92afe8aefefdfbc216c5ba8eaec16928017cde199e76773bfa251e922393107d2295fda355a202c628095826ca8e81c406df0bc4f2ce5c05b3d43cfe336403d527204e1b534449371df27bca9f2cb3d8e466c01f2360e0fe809fd8081f8411da87bbc01d12ea509dcede385352638c26c8619728f535a6c49a4d8efd79c2ac9a210ab27d159c9fac66a228b70f4fe99deec993e2ec6bb10adc6c437aca53b25da23a4dc2df79c0c176ddc2fa1a54bd5f54ad5b2dc32faa6ebdb94ba8b308d2660dee1491763f30692c9e5dfb47cf0720e78827745568f8d63b6caf8f14be1ac5b23589f7e846c72ae8d5b379ec16535d228f8d1c0c324c93ee2d63d3d1016561342eeb73ea0eb0116a8ce6f430e6fd346afc4ef2517f18df6053d674b746556d80b35f245ef3adf1d96cc23e331a49b5499266f34c9a4d499e4c9b38316902d99d4dc0f53be79b4ff3e96e92edfcfd456a2e5f228649dd469a8fe720f9a678225f315a084064b4d283085195df4413010efe49cc3bf589340a9e90a43c451af96537db15d645954e45502a49182e158ffc1c1e99249b6213f99f4ca7ef6d0d329374fc0d31752a23d21c3c87e42bc513f94a36c294ac62ae666c16b629b87c8918367513693e198ee97b06f9c89bc743d24ce1477ea02d66e79889b00333f0d29302f6823fb886f4ac41ce319120a8f9240aa1c7afbe7dce34c3effbece272e43c53e154bccbc2fb8b257875c992386c5cb9aa4d07b6a0afbb66d6517d1e59b1999f81801d09876391d06eea13892ee956ce277bdfdcf055ab4874d6cc37ecf5e716508afd7f3142ad4e6c320eb529129d589d97a10d875bce005925985d62e409da2fdd3e48fa501163328174dd81000c60ba3aae4fa9580bed46e525a19ca3d98d9a9b7a4143fe818a5bc67da654f7f57c2725fd5caa821385131204c543765292c170ad447ab17fc9fe5e5300ab31647c2307c174decba5a9cedddfe7abef2a74a7e384868eb63a7e5bef256a9fef33e329484a6366bd6031610e9c4ae5f9bee2ff5515ca89880931c5234fb7c6f48727d973f723c2d2bbb2c9d93a308eedd15a1ecff1b561c257ccd90dfe5310ba7ec013113516f8e7a406c12cb07f65f7a2fcdb91abb22c851261aef89bff5e7b2f5b104b2590622a5a325f7a7063208906b79f8243a1fccd2ede61eb4973b60d7d0ba7508b2e50905540013dfaed0b7ec5619feeb3e152278532f3192ce2e807e2abf1ee807896601dc40fbae6bedfa99531acd57cf0e0b420aa61ff72347d2239b84e267117eab906cf2e4b25d71ce3a7c778946ed7497c286346143f5ce6fd190d36255e962d6d31dab5d7f160d828b498208ab18706385ddfd4404b1488aa3a229f2cbaefe36a8a69463112a19a0c38a3513ffbd87bc78244f065587db7ce699fc980f3e1a7c5360929b7a9e20200f1c46922bef31f72acffb2d28ac85f6fd8f0bf98b80742b6eb9c187bf483a1614a2e98c0ce87f50108e3122b890a3c88be1a9c077e90a4d47dd30c9f0fb13e44ca71de08911da9e65e7205fbe82a6c77554c0ad986742269b30a038d45d052bd96c36256f82a584a15e3952bd654d329f952430d30ea61a626e7c72bc3add3eef554e29da1444278478086bff28ae2c9eec153f2a6fc529672ac945947582813f4be2614dab077bc8b5fd2743c799d54e41fb629f327aa81db32707d3837f1df4597d3c2f158ce5ab9c2fb426115d4250aee50fedcf2cd143cce93851769940f7cf1281fca244e10e1de71f51fbf254c987143295ff387783c5df67061f3c017bd06e18efd8dfd4dfb6163ecda20260a993fe7f3b8e5fdd16a106fd8773b63d7c62791c3fc60cfe296f647ab4318ed8c3d1d78459bd62418a843bcb11bed8c3d1d52c58856e88c6a10481b6347474bf1a25ad5647a67ecdb102056734228238b4212da7a13c731e514ff50bd76c0fd59203ac51ffb864ba576358edd1b37bcafae8d38bab963b6441a852f0d2c5b554de9ecc3eb715877850e2b4af5d855c7749e39b247867a04290ca5844a2ce0012a958b1bfbea3d481b78ab8131545cf6b3286ef54e38e97e731a6bff8c25196d3860cb84bba330b2772a4ee5f923681f32728351405a05ad58afeb76ef2ac0f88eb59b9e6584bacebc369c717a5516bea3ae05645e4617a90b1a7da2c60103c2aa444d8737db04d2b627785ef56713527ab69a0e6e03308e6ee60ce98a6940ea9fbd081c2b31028f56680bb9840e1aa4853f2d823771ef709b67e88c70ff68fee9c24d334ba114e4da2490ddf2e6c872b05c4d10e4e2b5b9c1af0e8c74876a28f4092ca1e2173113da254ce39b772c4e2151adb9dac4152c149118857261b49f29f229bce7af09d52393a5d112ea15cb8b917b76785b0cbcc101d4ee99ec11b4a43d06551703f0dd5a07af0fb318578559cf971a84ed9b5044241792125aa3e8d701f6ca1bb5291cf1ce94b9001258cf6911a0c146770618c277001c9a1a57bf39003ee90295456cb529a12669a0955423517cf80ef365257c5e31d565ac32b49a4c304fa6ab7e7da5f0f097d1fae29502b28c52db12f70d2f09c70a93c28e1c984dd510bc4ecb8c91ecdd4a293450fd6f879c4264aff498ab1137c9404d997b3d1206d9e0b329ea73b1d4908a424747e670141a4c5188c93b88a34a2bd1871fdae5ec29c418852fe45a2d49473f3c3e81c7f4a3b9e985a8a351ae49f5dabeec88107877dc3f0f0ad31035c31349606f54de529e4c8c7e8d62b18cf20adeab30b9b59436dec298a0ad816e079fafd29b4d508f0ff3783832d1c8855262236215ceb444a16e3c272caec7d2ca81c54374df7315f936d137582e2729e1e087969caea4c13d8d39666987cf34cfb12f1debc63eb6d496f6d6d5c7bec6093bdd11982c817d07483a48dfaa8fcd7b1573440be25c57debc94b994e1216e8b6e6d0d2af4e3cd6a231e70e8610a917bae51869cac7dcecb5f00a7a17cc7208ef2575a711387ea98240d66d70fd9af7b02041c6924a075446ca8c0b22342d410d5213cc85a26099b801ddc1d93c29cc0b973552d8a9c4f58f18e6468dda8fdcaffaa4fc756aed46297c2426e6fe3b58034abc2a1370df0568daf5618d531400e5ea86d1c1c25d386c8885d7e336b858f1043326a4055e1b00acf81b845e6693ff10e27da6b2afe8a52badd4e515ffcaf8f1e04c970ee6faafa834e1f39c72fdaaf17957db258125bc24f1627d2210b7bfe128e35c2790fb0bdcfd48ac7b505cbe6fd5dc9460fbec13168966b2738b62e77013d28d32f2a96c074e45ca11658d2ecfbb7bd5985c360432de828d820c19ff747b8595ee91fbb903e186805054def485ac4289adac9de65cddd04d2ee87ab455150312244be8725fb4015bc1bfbbd892d547dfc3df68d58661d22c51c4651bebbc5b782dc07096208510a20ea8a70bd26ad2f58446742567ea768a746c62d5c40631d9e5b55220738700fd92ca17ed29b80382a9041a12652e3cb9ed02114185655bc65b99f46bdd4dacefd00354f37a1651c48e4ddf4becb071374758af4d428a5fb1d7ae5a69f4809b962a1705bc6624d4eee8dd82933dbe986dc028dc165a0348b225fa371a6adb53a6e1ee61675058db207434eb92d591c934c54ce8d96d54f0d44eb6179ac4098751666b42c79fe2f4acd7fb3f0007f6cabed12bd491e49a9d0b9833febd64e408391c658ee2207dba8f804b8fbead2e989108e7f7b3e2468c7aff5d778729a2df5df409b23cca6afcdc5165960f1b24a494142bcf07fd6f920347d94224a2bb45b8c76724be3be61f67e6c5cc9503fc5d7facfb433d40ac7891c28a1ee986a13af644fad9942bb699a336b9abff1549fa8dc6720837820bd9273649cd79ac164ebe44cf008b33d26f9e47c41303d2612679a6b8d15f10a7ab57293133af76112d3a2cf0f2759679f9d3330073035b3bb6d52746d8a38fbf0e7b27c52b87c3c8d00f92914e59b9324f862b07557e8c15fc6c456ee8a684e1875bdd21b6cc58370683554bd4e85a5c59a1e910a004f0aa9f4b0366831e12050f688f3dcfd59335aeba21bde0c90d09afa3f4f04acd7042409698ddc7a374f97930605d5cd00dfb382777e400d05a3165c11a6e5bfd0305be9a49053b07e66a9fdc4ec681855bc278f9b579f000fd8e6d49cde8abf5491682f8ff23b5fe69abdda1633caa21c5774d61f8375fa43cd6c763fc22c1cd7d7c60cf449e0eb996157ca15deb57154e57aacbc8adcdf2ccf10050924d9aac73dc613817641fdaca064fa0433d9eddc0a3c796d61bab8c025960be193be8dd2b03aa74f4d4ab7a1f9bf18f4bf60a0229fc13c36f8c79f3405953da2a87508053ac5047448d0ceb627bcb604d1d2ee52fcf74ff00f3e0349ca515c3425c033f9301edca34ea71d91581076f337b1c29f22b1c0470b116ef639c8c502b3ec42d15dc0893dbda633286fa09ea80fa0ffc9b007e00b4400a0699db57786118444a6e8e64243bc5ccc43ad63b0a28dbf990f3f73cf489a19aa81772a0fcd3a0dc38366073a9a46f13c128f9b9e9134065805e76981263405e37e9af34fcbc292a6bb6d44485bbcbd0e1d06980837664e2f1c05cc31273c0defec73fec5a1a4043bf4e6a7843d4d7b9da7a3edac38918fe0e357bc8793abbdad3c038a3bb599e9bb95738092db3d5bac05dbc50f0540d6038331e471f156a407aa0c08a528d7dc09f1b7cacc0c0c1ae463c38d748dc5f5e131b1d470254faffaa319b216cea4a9826951fcf5e1053571684dd16bceafca24336b404ef18e33abe65e425650a99098df89a8c28a108937240cc403f2f51f5170a761a91501b22bd99ea8501ff6ef55f741325587182490981f5d806c04d75116123c44d728f8ab1f1c02cf85991df84745691f67943f2e96bd725d99b8478114cded3cc9ea0ca6510338903ada8cf9d00a0176a63c39aedcc0d15d395714f296824b0b8b49621b522e30959750f312d57eed0a3298e090cb9196b60a4f45cf85349205403c7ea2e08626e69ec5f606c8a215b71f7e672e9a09901139c5b5c9dbe2d36d5a76919a84f8a35e88b4d4ba98c10b36580013208bae1d4b34ce2e037ce790dd108371e03c843fa9cc8ef890dd485e741cc6d61746bad8e8229344c185e7a2309a55c3501e445e00ad02b46be717c27515cc93c5e434c115de8a70f1621014af9df1b9ff13052424579a8f24f84c77a51c13b20d4b4fe1029f4173ee7b6477534c9d02c43dd3580603407e509e53f2cc31a127779f76b4cca110200b4342dc01aafa558046ecd034eec23c3b473cabd6ff00b5a069d1fcf4206439450969a2c256dbc3bc03fb6e7cfcfaf589889bc32d6c4eef4ad30eb6cfa68ba517adcf9c029df059e9cca8a07d64443f927995f2f60fe891ad2bf40c03fb3683c830fd2be9b2bb4e62a41e70cd11ac4c796df83a4b34649e4e4d8b8042cd498f16f71a7664a9d5a9eba76a1132237f1ef33395ed5be2407f0cef05e9c1bf2142ec60d7dff648c0bd39b5098061ebde9e03b8f79d8e47c435275919ac49c28934977e0c61f8a35152ffe94b14c51c1b3e4612fd9e385ec79bba7d86442ae52c1ba91f8d8ea247620c94acd163d31a7a63639f225e53868dba393c55ece92ea4a0f0480210513f0bec9245548ab5a9697a080b4faff34ecf59aee6c8fc635814e93fc01f0dbfb2a310adfccb5dd26cc24cb4312bbd18f907d4454c6ddaea8ada707e44137245bc1b3f562718c5f4c9884bebeeed9ee8418aa295023ceba943c147a62937b5d0cc71647813e804d6c3c65f1f23b4b1205ca29785839314da8b09676194b4625c2f45dbe3180693a42189a9e3440e4ef561f6703f15a592eff21b9a0ec4261f80e6a07141d03ff21b2b34ca0f41baf813b3fffe260df64bd946b2ff2bf34ee292e0c5487ecbcb2f2fc6188007545ea849517c879f5a19e21dca90e124122e1704c484425447310b5dfa708c99bbd8cb9f3913909bf9e97109c20ede2a7e184f2414c8c996e4ed08113315737e10748764a393278b6ddf749aff1793463ac9b407e5fcc71623658138cb0c76b1e7518a8a9e010cfde8e3980de8af3928b580cec6f276eea84042b0e32e8080c9bc613c2d44ffefc2d55b51a2080678eb5b80b30648cafc8bc9f8e58ae508ff5c5b177caa43842bb1a4ae0fd8eac53e25d63ac254043341b2a756de9d3ea8897f35e794fa7234a0806b9202db8ab21eb08241996185a9fce674a8d7a089cb0286789cc3d2c98f5a729362642e982c5d7cb460c31fc9b860aab254b82f79960ad5af6e333ed7f22de088b68ff347612ce17d3cb4b4f7d1120ff10bdb0445431d76ed658be322d62ca7df61502cb438dc888b6bc579c165f85f80332b478dc26e1d8f2b5fc2e0cac4299579d798833ad979152a463bf866e59a10e7a8829beadb6aaabe8e80ea0326ea948b1b832aa8a590b66ef0fa830589cab8533e571e8444208e2f84ce7ec8107a24b740ffa06d65ba984ecd5917ad2dcc30f716f908d77d592c3e5f8dfe5ba1df55612d9dd2ec962c885e9da5673f4eab648b16aec63e0e232e7b9cbd0adcf4f84455651f077ea8dfb786841cbfdbedd89905be04960926712a33655a4f5d4c5f05ce0870e0f5e37ea9f0f1f47b3731bb52e2ca086ef41c79721b7c1b8ac3728bec521c42ce74226905cfc431671bf9077792d7822e70fabe2aeb991f803777721b16a445039a9e1374dbb167790db4b094a8f810b5c08bb0f1a45949835cd738939c4fdd8c970a02469c4c152a2e25e6d8115c9dcbae1b3d8364641c56fe5bd08d661fef538d4b520108a5ad5fc33018430800fd23bc8f51788816f57e11f070e51b765e063af36f0b175da22a9b3254444dffae59a6c77aca07ab8c6b94fd5ce3e2f9b675d1763702910ce0e45d137cd95e0245da6696fb63b44c295e6507682b25f63051fa4ff573de2711254b0d70b3911ed964cb6506c00701b2b606bc00c16e02113d710bfdf5a57722d5b4b7d7ffa08d6f02f6bac4b105ecde412295213264f5f9c5d38660acdd2dea8d3b0229a03f0313a812ac5205f73191c8937c75be610fb82e19ae3ac24125d320913422a59f8656a1115200577dbfb888eb152184e9161e892d9f2b297ed09b92216acc3daee5aa6c1d3ed6df49d7b42b8b7c4caeaa544110be1b505ed94f3b3c3e025f4763a29d0890a3471432ac2fed4a0a48034a3cdf1b410ae84b68d3f27332583e2c6f9be1f0ee019e1df4ff6544e427278e740dfaaea2cc5de67c5677e3aca52aae2185d7737db83d9b27b83116f6d121cd2c1214df89eee4af62bb04471881d50b089214006829d5ede30af3089bc19ba608fe532933de5d0bb5051c09003096ac4ead50d83fdc8c0f2a288786b985f6200f63ba990e8012a1b98ed36b4206c89e02c06db381f164d5d97933889037004cb737b2435a4a25dcd28fa7ca960031ac75f9ea43b715a77dd79581d760ecb70ccb71bb50386048477426e02cb8cb7fe50c10177e6297556016ca5bee5a6f28d7e07df4d8a1387f70e9739eb55edf3d7b9b78dc5d8e730901a3991805ccd4c233767518cbe903a9e04937fedbebb90e662b53d8e1bc52106927cb579105d546593932080a86bcff30d0c9ff54e3e3945b025788e2ed033d1afb6ffbabf1919ab2f926bb0164fcfbce4170e826cd7b5ae3e0b8bfa4b9729b9494b6e4c4cdaf3a2380ff6bbc2eb0665eb1b9af1d3136127ac6e3356c9f50291d23b2e3c483048034b14e054fd366711b32eb41b08144722994e3538f5f0e4b80d379c480c953540a6e026763621057609adb936dbdced470a32c86389867291d2cb3458cec2c10369464a5ab37a302d83125a2b20ec6a69114bc6534aa9b0faee7b1265e978e24db5b087149d09904cc87232fb647251eee591ac3763d51813b32f2ffd66fe96150a85a7de1153b35e8d8bef5db9a755fd9c1e5fbc70131941bab9947aebd566b928d4fb76547d85a6edd6abcce9471bef5d372a0dfa15e675ec501c11a7cc324704303048532e0918341ec9f0ea468288950ddf5f72f6bfd61906094fe16dc26bca160a71f3cf280bc64cc1e6b73cec7b9dc45f35274e0d6c9e07b052555ebadb64ce72377a8cdc76c7bc68d9a16dcac2543daa5b2cd778257b5be96c1c53aae017aefbb5cf885f23e28f5ffe50657df05ae92ef8d23fc6a60390480aaced0364f33dbc5c490de25c9806727864430603085cd23ab284d3f2950f74f22db28677270b76891a86d5900ef2a508cc7f9da42071c309ca06b81b5360f388720358a8fdde13c01993842fa077e73ec359afc279d4e884c6d98744d12980e7e1e3a5d32f9f7130a3b92e85481b59af00bacf4396201518b75b3f9b5a6539b7ebe0e134a751a0af61fb414514095c834e2a28e9673df047f24d15e4ddfcce4979534c660949e2911f3dee8ba35e5e8d5ee65b156c6793dbae8be0715c713061623bb3e30721badb33f41d0b222061a9ad7c2e84f6383ffacd03abddbe4d0a4b3c6062178cfed4d9155f711077090b7db6d64d01b564bdffb121e2095e00d0122f8e417c841b70c1a2b2de68d1d67f15961790f7e87b58c3f9e3030230c3acfb4a5b16530b37ccf518fc447f3fbc8b0f77652fc29db370d257046aae3f92085024408cc4489677d4a63cf60fa49c72e72c6005349cd00cd776c76cf408811725755ef4c4a8c9906d0d551e01104da3e386479b956af54bba73058771dce0be514db7985b769701bf1cb618785fe3cb503ed35061747280019ca7d0efe6e18ffd29bd8d77730f49dbaee65fe348d940f0884abb5e6a44f0f891a7950d8a8a68e17174538e6bf34715e0676acd7ad96891456485096a2f6cde4a61199b8f859bde5f59b8fa346353cdd922a4978b7857d5277642d9db03c18ab1e1367f51597efc0e543699ca33ce4dca26945a3dd441e5300f9b9a240d8e70267f0dc698517a470d22e102b36db976ba8cb6cbbc352a4b6d0206254b1dfb862c82808c71a0112f0c12308cb47d9e026b07e9185b8fe9a6b5d8a844ce73960f0c49a69d0bc6869ed78e020bc76d61ba20768252a674ec496c5f2708505356e6bc09eaa57102c4fd91bfb2589319d33c3e466f4c26100ab8798b90c3515285ee73d7977f4652c803846384eccfe42259b80dc7b2015bdfda76eab07f5cbbfa40710712c97cec518859f716f813716e3a014a3ace698d158b961a98f1e096a66e8576f025edc7367f4f555d309de61a128499f3e40494ca2ef05032f7d4a56365a2ee8c52dec15377ce929035449c3e9d41e2e140304dd41fcd22362727654f8ba5558e68365a923e06ecb1d2e3220e58eb133dcde8d03cf8a177dbca9c9f14d2ff91ce46573ce7f9bb8f09b9a5204ceab3a7986e93f1d0698c82156adb5f33c701eb09def8582d871276deae1148b1a3d5ab30e3832cf1fe3b40e2d1b2ae697088d05a75b796b28c17f731ead6a9a3ba535b8918de834f9d67893717374e45f2ed4fac3ae9c814f8d71b30799ae9cce0e9f27c34a155ca04719577127946c07639c9cf478ee27e4cd8612f54f3182e3858582c65a064379f4e4b8ef66063502e0b8fd57d9ebadac39818298289513741fd9a81b634b11d88a1e3de080376d6dbe97dc970e0bed5ecab0768ad1eca430ec4eb27c04dc383470a3e06b6aa612f9837c33cd2b146b1c72d1355d30a978e6408d8657fa70166336be973b4d4c5c25deb83257b467db5f9950796003566218130165704b920656273f0ddec0e6f67c1409d1a46a2ccc42d3262caddf3f99da2cde3f1d2e453d4129092c99663f23ebc0086185cf157c34835eea643904c917cc97b9926a2b7330971c36a4e26e98c800a94f6e7e743c20363d8bf886b2d76c3c801cc2781f2cebb598f808dacae4f194ecab02b894ba5b9ace7769a1f2ef4c70f3b6affccb9e0ab52372819bb26a20a958c716f4ce3f2277d3c4324924681cd41ad2c8259af393a11ef89318bd623231f5be90a45381032683c96edc2f6affa008c63fb304443a9cfefc35f7deeda87434e1fb8a4f18721a5be0a2f380f9e64ca5fa6b7411cdf64961f192291972f7bdcb4d57af2da5aef0cc9f926b9ddac64b64d0b4dde5c17f69a22c8721603a4180bac0ae88825e7b60c9f106560f0e565707d2f9884e3d488598715a1453bfa605dc2ca126dc2d5ef9166c9d96e791e03d9ad7b1070c6accc201fca1cadf408928a59f261c99bee0967769aec8e3c01f794293922f3438f155f79d112b7645e9e3603dd9364f2966eb9dc340cc8f8ffdd98608d9e11657b33a177507b41770693bcf4e27408d4a846e6e3204d88639340bd5271cb8fc5e97d396e18936f09bb80801a8feb071f5d3ad42f435de137a8b727ecac2d4fcc838581921000c4b4ab9ef11c98042448a6caac4b0bdb29c74a8196e3b1942f4cd1f6a10b5e620553f31edad520166154c9c9357c1042d04bc4e88f96b435d5d40c968fe1728f24653bb6f5a22f833278d5708c97e06d7a03451fb419c878570e8aa127efab03cfc0f8262d13a49e400107c2361372c06d19228208053c28b35b46631520aad6efc2e8c6a6482e8380b689ddd0f11208c089875876c5f1d30703a8df4e7e159b76d621a5d761a877aaee3f8bd20de6cc154df344f593cc3b96601897af7eaf802b9e2b009bceb8a638ee20a3aa14f5936da6863dfa4dd0c5f574f1185c5e4d7c0787af248a9dc442ec90b03b9b6a3021503f89cb62fdff9c3ea284ddfb6cdc7037b47896e772e51660db8a8f0936a8fe55d8b6858518c75759f965e1c24355b06a90bf6dc1297f005a4794b7e9b58814afd7f3a52f7794410e4e3e59c3165b9ba9bc134ff5fd5aa911e0154a028b8becad0e0fa6cfe49c58588ebcacf174eaf7fd11b6b01cc85b97830ccdca7f563ade93ccb06e30434fcd65cc8f6bd6d5ffe4d06248daa230c879b1ec96b07dc64dffbbdf2f5a105f089b9b76191c1686deb47d8003b5ecdbf068c61f0a5ef4b396b4c31bc10f7ce611394aaa4d5579b983411a0b1f2532a121bd1a782d93a3aa7aec8c053d6ef23b9850b6c0010c229e203249693e531ca9aab348c444c7b03fa639a7c5c304cc29a367776bc8e4e7f926353bf8a58b4f18a24c8bab689d62eb1e12e688f56a577953227fa722102f52b40d4178c8ce0a60a70430380717a0e6a0b0b6a24d9da52ad1daecea8f5651b4958c7ee9692dd7ead25228944767777efa8072308bd076f03b2c04e3ff71bf4e66c255b2a2ca5cbe33b6a0fb3ac66d624589c309e1c88501d087fc66018a717a154df33297a9f9b50a7d7561b6472f3353620acccee707e7e8df2bcf8f9dbe6e70cd79c39c333fc38e06cd2f0bf0071ecddac46795a3fdf36a33c379dab67cef0af4019b3831be00738220cfc37609e341cf1cfa6cec69a568b7e7960cf9744ddc6a61b64edf6736cc29709376771ca2b30d83df72ab0e2f7f40e5d5df31d35cc3973ef67116bad95c1841d9d398b80cd968708d9f29b6652a5b18ad24e8f4a4d4c9a6a65d23c86f943fcb74b1ea321abe6c2f9435df3180deff56b1ed6aedff37ea8ebfc7d9f3fce796006afd67f755cca60e63f735665d8697bec5bfcbc8cd87a8c31f64418b165addd1fc866c821eb76c6f7f24bc9966f755c3a27ec77515a6754629524b14acbfb79196df974a7d39aa322102d108a3064b8a3236d66a2a4a64080a22363c0caec57931dcab8af39f9edd3ff8e775f7bfcb88ee7f07ae632af2da95d763079e412a5713c32fecc18f76b5ec68ca139eea334d2b90f797afc182f9047b102ee4305c8fa4c67ef709ce538ee6156c4ef9b1c2725c77196cbfc0549853b4a9ac2658ee33809c65eab01616576288395d9f85f403637ade67ea2e81afddcc7832ab7208e399b3bde563557ef3c304784315f1271dcbf9c70dcdfe01e07f71cc7bd3c1284fbf7f6b9efb9e738e735f5af760dd66ec1c8715c95ce3dc7711f651c273b2e5e4fb420840843fe5cedf43d63d4f06c950b947d665be61d87b26d5dd3d9fdb43167f2b75ccd9ed23a5eebaaef4f7958ffac0fdb5e4ff4539cbfa63ded793c87ad774fbf0d584a8e68a1775f3fa0ab8957f82a8964339e1f4abb31a2ed7d3067697618bdf4fb302b76288fe4119ef7a5bcb1436152ff06e591ddeb042bb37f288f0c3269f75f51eedff800874cba47484aa6bc2f7950ed534e2e8b304058993dff25eb3cd9e7be8e6df2449ffb59d6afebfea63cf5e592fc9844a437c6f824524a29bd31a755758c3fdffeac56cb7f27c28844d65a6b638cb1f542daf2279698a39cf32da5128d8eb2fa05ca171f5ea6bc58f9f2854ad7d4b34aea118a34fbe5fc85cea7d19ce18751308d6ed0ec92e3a75f06b277adc75ff3f76dc07bbf5533e5b1b1618171d2f69db4200cd68e3f0d80e4d78e3990c7956ce738d07c3c39907cdbed10e5fc564a3d0b42af397a8d5f41c0b25fce0157af1c487ee55ea84871e4cabf6110febe1a4b464b3c36b054ea524a9755cea21964c834aadf0c5c3487c80b62c71f8de612beb328f64cd0c6d3cbc36c7bc14029a5197f05b91d222ba85fad0eda2d55cd0da5681acda239c4da691483eebe8fd26772e0e432982037b9f4b40ae472e70f6c4d5a47411271cee80c4ee6eaf5e60ed3baaf61f6642ee95d7046e63187f4ac63a651d32b060c7a64fcb8e6901e76f87d7e955b82af1568ce7bbb5c5b4841f4548b54a36a44eb5111972fa3fd0823e278b1b0e5db488990f7de1b6d8d4f5f525e4a2f29d18adaeca584658441eb4b69cb396ba5dec41fc85aa87f2b953a7b389e05fccda8ff52aa31845e5f44826aaf14df0f31c63888569dc34be9a5e445d99c2a74916292c288290a2941d482f9de3ca7118f34482d9a438ca691085e435e47af21b358639dbe945a29c849abbdab5711b9d4abcd5cca6fa9b0e5ab68478e552109fdb5c3963688d5ecc9d143e89487f6e563c7e80f752a270673362f97443abd82a1cb35ebccf9f3e4717dcc39d3092bb349a03cab707b5fb919cac35f71d553fea0b6c77534c329954424bc5cd8929b9c53bdc5255b1395e1630c72f5bf331d3b09b8b5d37572c42a796651adbd597bda9bb3cbbb8eb33382d53ed6c1a3c75867ad32da9c39fb1aed0e7a04db81eee37f8fd39af74cadb5d661ade739e3ecf70f1cc1f6ef3a6330fc9cef370a3bd4e9f3b9d79931b22a87adaf3e777bcef44cbf0af4e64cff065d93a6b5ce7f79e45e8da2a5b409f51a42af41bdd63a076ca18dda7a9575a93fced791edfdfcfcadba5781e1acd7c29f4e7be0e639641b084db9428aa42ef6ea34ef2b873d5f6733b2e855c3ce1feeeee714879f0377548d65acb2010f22404260a65bfd9303d14cb71ca86932f1eae12a80099c6471ed1ebd97d2bdf7de9792112fa29afd9963c594298a27fddecb5b957b74f2288c8c6096c50a2550a0c2855b92272e364f57d0ac10110c910f04118208094e90624b511726845942082641723e90040c55ba10624b0c4f3bd43c15110b8202524592b8e0812c53483c05a979a2e166462931820918a433533d6080620a284e8003c4007a648282887ed323131477c995c58ecf138148a3ef8a5576ce79ceb6dd58ec189563c6903fad955e5482c20885828615143eb84471919af2053ba1b938a1b984a0b37a64e2a2c4c3cb2d3fcacff25d468c60a588252728f3c448e9070a4840022f508c6c416483069a7696d1f3738317856e21f30a6285eb29b55c84744ab91cf9a194d28f4f9d7edd12a6531a6b59e6c4137d7e96e94029420b1457a641f68a6b9660e6beead09d6d65b573dc63eec3ca3dc727e696d75a02a5245b8c000b2b887431840f0058220922906802298d100c34b00296252845741142a4871e4660610b294a9e54a842852d4d5ab4501ceff65a1968818c0f4b763862294d1155dcd004a587231ad258292f13300f08685202229894f041cc11506c69b2a5080a51b694408b132d4fb420913d66013f3f8828ede6e3e6e3886e7bbcf9a8d24317131810181026e22d8c567abc667860312609245ab238b1b2c3154f1c397d408ba43238e4255e5ae4b87abcf548eab9c75b8f233fe6650204932452383123e6871f646c53768215f528cae1f6e8615743802ce5b853b3757163162a7aa082840a6374306adaf245892b4bfcf0e4c2094ff8042a5ed82b37a213bca5ba42d3ce39ae27818a3b210817104d4ef66eaa27a450738291dd94a633228b57066311776a362eaaada886c0082ecb9ec25baa45385eebf2b1b9f47007d663e31e99d230a133db4c3d9c0dc971fd951264a71a826a4491945e81e09a8a7888b294afa475da6a3b6972f8214b214d92156429d434407a28db37d71e193fb01f697af4d095860b1fb014d038d1553d32a1b142aba8396eef881268904033823e1f8d93de9581a960b3c156c8d9b694e34ecd7633c1e84644c313dd17550d81ca8918245782c080e5cb8d299cb49c89810a24a05ca937134a90a313ace8a2399286481a2196806ca0c3028b1c2ba6ae8e46861a834cd723131a224c56561befcae9c2e98c1734417ac8a6d014f970a1f9d1433655bb18d3718f4c5d40f5b056a5e8289bf563d3365baf3369f1c60fe38debb88ccf60e9d1043d327181149ba07aac3ace7ba7b85a20f5dbe4444ef1a5062f5e645e98707ddce207a930475c61a489316776c8618912910e5870013255c141d1795ca0e8f851be9e7618f9eb69c71fea435d724ae9915aeb9194d65acb67d111fa462ba3e767ce2413b9e7a75625b85a3b10e60448442cc9b204123ed50c0fbd56334bbdaa7a35a2d75a6bad666c5099897297bedc2bae9b4587f1433a5fda7b973a4b6db1466931a4334a2975515a04c36aedb69da53662a75c9d7acef94369b136953982edbd36024188e0c7101d8088c2851a463b7051d285d399277cea4733407afd6a46855eab087aadf50b6ef5c8544688ae7b642aa3349f7869d961ee730ae5b97249deebf43485d2e66b1c9407ff1349a5f8cd883e38286dfa54d006a595706716cc5f7da28f961efefad85650288a07524c0f4810f994c0237f7d211ef7f167cee33eae7f84f224519efa5301d4c986046712912c8a94e681fdfbf66fcc83fa77ce239436bf2625295d317a9534bd67fdeae3774368ffa781ecf739883e35afe306ebeaaf86f549a59a5b736f707e069317acefb5d62ad9f4b12611c7b393d413d40efa9fc6cf7a162fa1e6f1479fef6fc43c60bdcddbf012ae6ebd4dcdb73e7e37eedbfc8d1618abdffa211f4dc11df33d10f25effea3d10daff7df73b74dc587dfe1bddab7e0613aee6c67dd6dbf0c17aec3d7e168fd9c7b9f8058f9476c36dc058e53622953f8f288dd2aa0e1164d7a18383e8839f83e8c37a1d37621e74bffa152f417ff7d107ff8d9807ab12be5f6910bbbdaf6b3e7e37bebfcf0233b863b382e13cd2a38fd5774f41f979a0bffbf9ddf87ef5f4867763f359c463f455ff4369b3036394dbe0b88bcf3038e50ff9f0600dd599a7e83865322a6a95bda06c620da7894a6bad5a0cd1ab164caf1865abb5d6aa850dbd562d9ef46ad36b955e6dde4c367533815eda9934f9d3c6cf894df9760531646ed76eed4c3995439019fd70c7860198adea9169ea480ff7cc532f74db23d314911ece96eaac4d62c5af26a8a5d49ae1be6411a5bf9831e1669185b4457cb1452420890931529a0401536646114d9c8afc80658b1f643a64c898e9bb47263250686264ca420b531930383bda449e9e18ebf2e5440e067cc756051929301928f6143b769eba75c099e4614e5c22c3254746c6a937e970a9be3c943a847acc2fc250a79203d1cf21adf26d8c1684b4dfcbf1507efddb43a50e30b48fb198646e3d745d71ebf35d4b7dce39e58b3044fbe5401958b0e5bd84d8dcc71a9ca561edcb07a29f1f7f33ea9cf8de7b6900d107a27f416b41a0fa9647d2bf56bc3ef0a2f21a22e7116ce8711809dc71083ac6987b2d571cf5dc23d3154bbca52b8c8aae08e243aa0a292d5a7a7a7a3126677b6a1a03f52425a487b231507d003ddce91884718952f9a4699cc977bd6df2d025222222137c6e60404b9fb3a84ae4c3c619318951d2c3d8c464c5540fa39608652b9dc53496883e259599ecf435901a8a5c823973962f8f538523bfe5cb344a4a949229bd44d3c8044826a25ea892beae08019325e765aecf8912f2a3ca8078fdb0bb1e99a27a74332b5c01042a851fd7cf04d023539823c2f8d0c31a8e2136d96c1528428c0c65ca8c4183043fc1028a09099890c0c18721b211d552713dc61743b13a7c3f6c418039d231be4baf20bc1e99c238398194c96e48618629b2822c8c110ab2fae3fa59981e3dac5934608caa407a2d0106cb096096e821abaf2b60a27aed91090c946c8a6dca0e5b9dbaa21c41005d84216ac5f69a425f406c3038f41b975e41d4a42a824069d93a6710fade3974793cdcc7c3cc71dd3d10f7f93b4e41123b3f08367a183f87eb06a9970f543c411941115121e506e7c5bfdee5b5a850d259efbd40858f297c7c39d3e97bad1104d07a16aba646b35ebc085bad16087beb30870b84d60bd68b6771a017af3990f7ac0d862d9ec3d5bfaff15c1ececbe5c2c1c1c169e1b46e6e6e6e5e3e4059813a32c5d40a6c812108bd65b363fb637bdbd4f0f0e65b6f63730386395c9df5deab72166ab95aaddfdf12aa392e57d73cefb80382eb574dc0b84309be966c499ff27007fd201ea31177d49e2356e955c6b7bdc65ac4b74f30f442d7a4d5de31f485c3ea8564879e6a7ed4abda7697abb5356350deda35f7d0db31c6c080b7f98b872d7fa8775b5e3f76e899b1807d3223857bfd3902ee40d3081e9bba945286b56e434a296596d87ecfbc36c7a4927b58521c768e41758e69c218db6c36d96aefe76961306713dc99b419ad8a73adb2956b97dae5e8f33c30f6effbbc15975d9e5636a07e16a8fa09be780ae248a539dbfb6b3cb9a5f43810f7b872b24b1894bbeef3a53cb4a676d9ab2e2b5e2b0e18e9bf0023a55595942e5da45217a82e7bdae4d0e5e9e9a304a40d1f6152ecd0eb5eeb8624e9768394f8258d181594e0c20811262a92294ff45a2b2c8a99ec8620370ce9b5c7db0d4a5168fb51e649beebaccfc9f3ef93fbfc08440b689f3fcbb9e14877e2029bf2ca6d41fafc5a529fbf8341134ccb8c616db416679d75d75c531c39908b4e6e620ed33a73a7adb6da6a1ed6a7cf7d679f03c35cabe540f4bd3b2466b1d962d8a265bb1283171bdb956c2b9d04e8f156c3103540c1b16d5fc2b0cc3c6786dd6e3530c94b49fa9c73ce9d3b57a4fb560392eddab71a9090604eec7067c7864195cdb407b63c12ecfcb1e7b0d581bef7be861ade671ade67eebdf7c0ef3348c3fb8f871e10feeef5043d0f0492aff17f207e0fec3efa1f48df03b98c5dd76a2b6f342005b921cdb8d6d2c06ff3dd3580a86b7ef72d4803dfc740f73108641f5b0b449f2e9d60095b13ac044924914412b6246458c000c15f7e5c5be99c810909c32293dd6668022253a2dabc56ce0c43f450b644b730922573f19a3062a5335204c7c312233278363dde6678e2e51603965b912eb02c141ce5c005982c42a6a43ad08311535861620726323290691901c2b24e183fe1d232f41093a149cbe0021f2373c229e226ca8b195836ccf1ed68c8778b218b261bec4a8699e0c7f5339c1e6f46a2faabc7db0c2db89a56533025ac805d8162f6782bb20509d5951b0c666e30f8b815b961cb046c8822459dd6e3ad880d4562a83e6aa71ce84d05ec9d56ab06821d7af4296f7973d67a6b2f4835f55a2dd9a5ecfbb6a494198cb9e6cfb8f2188d7ab9ac87b65b3ef441240fe5db6b2d6c29ca975256f1019b46f7787b616afe0ed9efbd3b66b7608c31e6d3af6faf7d4a29d20a3717a2fa0c75c29c797381a8cfcf33cbcc60db975f819e5a6e2b8d6a3f1a9588a88e5079cefd6040e4e4707dd659ade84c697c400f33868c524e49e57c39e6484cdaa7efc1e5915a1ee9c376d8944669b01df61f4949494d22892479603f7ce64ba3a884914ff4895110ed608317a91807d1c7872c8a79409f09e5f288d2e6ff8ebc80c8e7005042f065c927fa5c3003912c3f5e50223396affae8a3c1d5ca7b152f41f55e4c96e0fd8a6bddfde625acbe5b7df725d4bcee400dae5ef51dac84a248ae7acffb158f491ea380fed577a0bbffb8d0eabb17aa79fddd4418f963f2df036392efe8c0fb9d5d9b0b09c53de453030a45e9a0e6575c284ac864f5abd5af388f127a288dd6f01aa5d1a7de67be37df530139ac0da1e21418b1a3cff7de7fe0908f070ef9d497e00872030a4a22a110288d3e0e4aa324501afd1d1c441f10288dbe0e0ea28fcafb5012494579441a1d393a223f1a53f51eb87aad2b28f908b2d7e7401d532e511afdf9ede860f52a2e24a47a15974a91d2e873107d7cc829aba5d5471fd5f7d0d5f4f1f1246f4518f9521cab2f2d8f55be41ed320fe1999bc072c8218729838419d961f56aad5ecd73c29ce49a256c86186badb552b05a795b2d1b1ba530275b7eacb5d69a614e60489ae072922df1bc36c61923d094714e3ca64a8a048664562432c66a9136d725d2ce481bc390f6ed12695b18d2ae5d226dda250c09d29e34c09060483b76f9194729e9852dd9f26b8cf16b84516badad964dabee5c2bad159664cba731461bf39cb95c52533969b5b2c29070429df29b284d604ad1e9491426902289302e341d2d5144c91354d0c0c3152d58f9b678000a0b4750a0b05054a394524a29a594524a290e3b74b9384a29e5baefa11a73292bc77d02e8c79efb133aca203bde8ca6babcb540a58739cf18372253ba03eebdf7c6f8e1bdf7de183fbcf7de1b614a36d1cda8c8da18afb5d6569a420b482c2425dd887880c9a63dde884061f57823f242bf3dde88e020ab0e92ff9c4d578e916d65ab15251535251575c60cadd5da7b71f5ecbd18e7cc518a73e6b8aed394725da7b5e77ddaf3be4fa55afd782a2a4b54fafcbdadf439a7d10c325f309a4126d23c9a455a9eb47879d21205e524952414294549429154a6c8a43392e88cf42189cec820b28804da14462b5ace4969ad564e4a6bb5f6567b2fc6397338735cd769ed75daf3be4fa55a7daad56aef9a1a16cba6d5bab979f10247b33c0adae4d8783beacd873a7dcee66cd2979d56ee9ab3495f843ea1e66cfe2dc280b24357bdcd88801a604818007bf264c29ee06b2b7df2c42ea1614d72137c6da513d664cf9f435362238ac88ad872ee6ccd76aaf0c3b3355b6dc7d67485385d215c549604bbc6455765fc0851a2348b412f67bee74c8a1142f3cede7eaa34cb91ec6849c64f10234a8b36d37c1be272a2b45b0a536a8d1caf1dd55430b302918c1e573782a1d7f7eaecc7e6158a5610426bcd355b0a4a2f2bd2a2682aafa2e8ef94d708236ae9f1364449a760a5f3aa62346c287bdd116f42a286aaecc0ad08cb0d481497b3b4f6d830109329b5cd598f0d0324d6467964cca2a84e3b9571b463a79286cb7362c83e3266ac52d37930b7a2307dfeec56e4a58581442b178812147480e2842cff06244b0aad39a33016766c4d9edaa2c17abc01a102444a0f5b6f6464830991f990d2636963c9b6823ab239fb51d4addd33867d97fd8f16c46eb96cc88e4593167b46c168d2620f3306eb51b7168525ddbe37637891c88e59c6135f9ff29ad4ef03ddbf791618de673599d2eb73d8e62ba83f46189e0782a1ad51e9dba11e3fcacdc0d1206b90a39cfe9e993df0be667dfefc69c03df7d9c899f3e320fae4e4803d628f82f4e1e2e77a9bb9d73aa4ef7a2d3dfde21edb8ffb6adcd7d8d360ec3abf06acef5e7f352cf71c8f6da03fbffedc43611f7b0dc6ce7d1efed8f7cc6cdf0502dd7cec1eb8c3f52f3e7e190c85dce37f812eb0467d8e87b7c67ecbbfefa138a00ed969dcb7f91731f00604fa6c7838d4c3cf06ae402015bf5fccbeb59eb6deeb1ddab3afad08b2d3d799d39c27bf0cf2d7b71be4a7e0f5228cfc99573931584bb2678fb72634148a6510bff74cf9e920c1c7092610f518639f40ec31ead0b7129094eb10ea21fdd7f4b2d41943d6dfa18bd66c97f4625d6db6f72bd88d407b48420fa2f69bd1e50eac9dcb5ea5bdce06784f7ff6fa20dcc0e18199d25c55c860501b037fe3031c20cc18354ae113a49cdf9a9ea7b52a56d54c4bcdc31cb2eefdf7e99775dfe34ecaef99f325df687618a7ba2b8b1d5fbed0f1941cab3add75ddcbefa5d274cd99aedfe5a0da6fedb8031ff3f79cc9ccc5a6e53294fb9d57b5fe65330a40b0bbaf79ee7507528e523ae7ac75b5d78033ad5720befdb37d8332c25081f661dd7fdc73df8c8ffbc0da3b8e07cd6ebffb369076d6de812b90aac0d93fb07a2b5082594555981dd670f7d9ad658179d2b8ffbc9f41b57b18e7f0ea51b07b0eacbd7675d67ec1a89a378c6d2288065127605275c2a46a50750aa58ebdf2e8f4bb9720c5174847a73a6494d9beb419d4a7bb891cd6a8a432f75ba7a8d6ce266a94793adb25e587b51b3218163664decbb7f5b056abd56a326f97cbf27036abd56e4cda7c6badedc609764e3ca3a57dca3953ecfcf365b4623087cc8e49a298f43167393fe74cc7cb3c639e1debd769e907c1a6d861ae36dcbbf7c7793aafea0aac6fbf19aa5abf197b55fbe7e98e0bb274ae246d217fad7dc6eea57cd58e16b85e6e9904bdffcb83e806314dad39f2c1d606c3efbb661c9bb0d56f58af6fbdeb2b0ff77ffa05ba3e07d10d627ed58ef302bc015d609cb3bd7ff53db40654fdfe667cdfea57aabfaa0facddc60b5a97b46380eadfaefb9ee97d1294f2a74ba98347d72fc1303e750feb905d771dd73107641dc83ef6915dfe16dbf3244d526b558b1dca64b39973be4a5b429a34696e95b627116c0ae3a7df83c19e39abef8900a381a3d38f3c6a6a63c698130c2910fdfa1361581ad10255b440f6a38e3fb6817cdcf2643527a9a8e31b13339dfbaa7a4d4377cf7a9b690e6d3e4618ddb740d5c78f864a83fb3bb0f53566a86a64301ceafa75f6269743bdaa6cf4903e43bdfb68705f8154cf7d0b0c55dfbd0d18aafe4a40f5dc87435d55b17ef9f543fa35fb29187e7ff35783d6e0bef2f0ab515fab5ecbf96910238ceebdaf06fed6df168f6da07aca51ce7a998d1ed287becddfe0af06fed0fb907bfa322645312220cc7fffe6576f03862c1e0ef520590302a95ef60d027d190cf515bf01552068fb6a60bee3450ba494b2e32d29a9d3af5eb440840f024320f834a8bfbf19311ab576dae34763f75c858828833a649a9e77e8885244443d88d5f5d320afeb1a21f4fc3a1cd06784c17dd0ed3bba0fba3570745087007438a04b080ce9703372fde9e931ea84f529c7f2bbafcb5a0c86cc60761e72c8667e4c52da23bd7e524f2cfdf8d6a794524ab96cfdbd52de37e37e33e80bdc7fc1ef35d8b2519e38b9777da4b19c1801f8f965a079e8923c7ccac359bf3caca0dbd79ad35acf08d39afb971ac681385815232fd02fb99ff7c2261756a92e03c0c30a9e00c0c35913393cfc25623c741de1bafcde1d1de6d8c1df9010fdea991e7e8f41a0d6f3e897d6fe3da68feb540f3fd97a1cfc1f0fab54ff9efbda13cd096c7a599b7cfdb2d6358d543c3d35e1b444d311442cc08680fefd0db9ebfa2b9f721d833dfeef65525e7da45e0004f037ff2f68d0bf354a5e705ff29e796f98f3df8755aae36f69f99703dd6fd197f8c53f0f2be8619412000f0007008ff1c9ef7f727e2c8753fad1c9f77b928ab9bff10bfa372a8fde8ff1b0d6bfcff848e7eced303076eede7b2facf5dd1ad5efe34f83d6eb1e5af33820fe1618fb0bf0060c673d9449369af7b4655b5f7d58410f6ddda663e990697a0894669f55c1ecc377d94d6dc09a175615b3975cb7e739763ad7bdfe30c74e58eb9ae340dde3d79cb6012beb25583b2ba9820a663399f449aea4304a89c9b1d3bf0f73ecf4d5ab3eacf5d5cbafd960fe34f81e68e53dfe6a78fd7b99442d13b426f0f65a9253a8e4618e9dcfe3558cace03eae62e405518aebbaaee3aa1899233fcd92a3fe3dde929c59e2d46b3dde9624f52451dd003dde902875afe73773c361e98683d16d09d40d8723503dd7ecd752e9494af9daa5cbea959ea45217a8557e1a127a924a4f5340de57e5536e7d1ce87b2f77b1613de6e1ecdeb37838fbf7505db25adcab58625c397ef93424d449a5a7a955567a8acaabd56309d5257ff841759907f150f30f3f1e343bee3e64498c3d2ebb8061508d794a4f3987377f7f9557f565971b1ea098a814ae5645b564880a220000007315000020100a07040281502c9246babc7614800b7b9a4866529889434992e33008a2188841c61062002000194494293a15079b64e12d77244ccfc143307634306ce65aff626492a34192449a9c1d2df8c30b51e1f7ef19cd9abc177ef6d43ba5c80f6720f2cfd011006254c82822a8127f024776fc7e08b6f72eb687b4ddecd8c97fbc04eb06eb6048e8d87238011009aac9807cc8aed1ea55ef9ce81afc5718814c6d42e872f67bc78ac985e43fb33a2e0a88671deab36d4684778dd13991d16c72003b2dcaef0618d42d69f90252027a29b0a9f5ec9a47fecedcbc929413d9b7a4de4d62eee357f44a3a03971fa2dc6339e722abf9ee0b5d37c7829013e68e6f7829b49134ec5f9adc2d10c3a7c40dbee80bd06d7ee25f77e0d7bb1edd83824057b8cd109a6afdcbfd3b31e3ad26098f6480c63467622742d06c08c87cade0d2af99d3feb831ad509856643cee9aa1298b03d0163ae199ee02171317a508d36dc91a96fe410c3ea77de9641acfca481fb6f419c0fa3e8714290285bf8c8c4287ec5f74e5e125d5ffe0c6103a3c92c0a45e77a99ac19ee36e62b7fcf7750ed7d1064ab1e0de5723c0a38710514a0335caf56c2f432c515041776fe5954d4057525d75de80217411082e6c910171798a46a92770b7296d2044d12232412330321853439114f478fdca2b7eb824a6ce35750048c8f98aaa771d48e7178e7bee8775868080476a8d50fe0497a7d7b6a6962098449e98aeab7aa3b9c8f01188bb2c62692192f27f7e9461b28fd9c0426291bd008106db8e4dbb103d149dd81d1b6e20708e3c9d2b378caf77234ab60f9f89b41caf56c69a78ceb030bae1027935a5278d8492c6f066e01b8f35246774887210d80a531b0d3b3868a7cf965a8984331e1d05c56f0d284faa488413c2f83b2d6537b6c7f9e4162a6c983191c8f7422b24971dc3214017f630487555f67ca137b7602fb31fe820d3f71f1db4c69f6f9f70b170c18ebbd03c421511c7386a38ee660d1d17a1e97327c9e8fade19a22aab54448966f7dab22f284a48417bcb40e3c9cb4721645cec2a95cce4f4a19c876af56f49c0703fcddd67cb46bf0ad434278970f99dda64042aea2eb9cfa7af8906f15a050d116c69627825448521563f7622441bfa3cb447981c0d3b5577721359e89bf3c1516504f83166490cff87d53cdb288e4328a8e99396e55692bd2ea90f9104f73896a2d445020a083965c7032d4b0f72cbdcacdd52ce2fd7e54b2a262050ac776a869a3e37cb3e819c53b4d3df4cde9896b41ac826fc7122deb99ce5da3a7ed4e8f2e23f9d0d765e221f991b39a31fb9109e1fed1b4096ead7e01adbfa48938bef0d662bffa74ea8133ae422e74bf64840d22867059ebd752b26764461fa9a016b3fa879a0d3fddee5dc08f3d037d5b20eaab368af49371667b7afad62c3621bd81249b2782214e48e610f0ce21e73616513cdafdfac699bdff61877f124e5ac1c0b63c21dee9b44e1fc66682bcc9fe0691d5c18b09c9c1e51daf55d4f931e6872ea44beaae448f09ab168765db574c629ab060c8a09376a8c18748e04bda16261e6ffb81d82bca89545fb4b7a344214d45f2682a33c457c2b2d0122172b4fb3506a62d310fd2331bf2e0dac26373c4124fbb66cf8cc136ef03afbf86e40bfa71d8b25d09995ba31808bf8a966a6495f6bd887960a4c88e3e28d0ef668d591b739e521b190d6b36ec129fa7a5ec7ba780376825b468a2269559fd069080e6e35e998c2c49a082caf9d01d3e0fd0b0f11d56bbe5158681f2beb74b8a468e022d0250525a45fc8a78ccb802ead5296fbe62f41bf66c25bcc423db9e83629e9ac17b1652e85d0ccbae26a38b0824d202ce1bc091e9bb2efec9c6af2d318704f3fdd3124964029fde8f0e690413eb174b336feb28ae6bbea1ec543753753bcef69003e0789a59b6f90289e6473e75d09e823ab89ec52c12e32976e109bd9ed7baecb34ef40e038456e94d2025c1a1454faca82188fd5ca48575e4958b261d0afb3eedf50d8fc3cde5680f5132484f2d608baef4de55c89f051182f44044385c4f6823315fe1ba9100142b95e5dc0bc5e52856bd6f09b0660145785216005f533cc9a395fdb01df4106201529044bfd03dbe6c3738d736001c723c7596c9cf9fcc3d45d42e6aa062038845e01820e5e37fe8704c48e7018dec2e086272f0b2a14b1694bc44c3030725f29066806f407258a2663601bd2f507e145b304ae9526d0dc4d98bf1298ad792ab59b2c397e1f3ca0b5452ec1b93a4ee9405213eed20778d09514970a29de404c957481966009c16763865d5d051ed70c9c5931127dd7b3fc6ee75351293adae966ca1e1fb38ae912f3da92a4a24538a47e1a7474bc8f0f5de818c943ec436037d64d591f76575c84d960ed51d0d87b1126320f4f6bbf51164d4ab918aa48135887b473f2105aaef034ff871d13350065a098696eb56adb5495a6a6954989e687c3b89039233395de206bd21a4a32327a3a4c21639af6bf9e1edda836297a27a9f4797f5eeda29362dd1a90f55e0177963deb87a4d41007222aa34190c831a4a3ab1025e0d547b4a2fe549f6b3801be9da52b9d278d86efdc8ed0e327d51c75deac97602d55911cd93a2cece35b788890b82754328af66de2e2225b8f6d6c0276dde0926bc876fb9cac8e41bab8fe6fb38b6e9e3fcbdc0bccd7d26d3b298ab3cf3e7feff68acfcb367cf7cdb13165b154ddc34f6298dc593390bad176c677ea5c20b89952951cd36fb0aac599071e66fdeb6c29625ad413bc51d18f5b89e28bb40a5c3b960e37ed46a12efff9774e099eb6249d73a612bd71b78d6fc5100e906fcba73a66e00dad0d68732cbd20f744e17602a928c6d8dae4c7ab878b67e526ca84dcb2dc15e31f538cc353af803334afddf6a924f235bcf95af414c6fe9a7935258eae1418d25a0d47ee74ed9f86828dc4607e63f0b02448c3e5bf270ca220d99d5dec4b34b7d375a956e416d0bcfa4889faa4fde75cfbc47247206ca85e295ba42d8bdde9749c6e826bd89a46d84a02e574eb611ed8ef65923ef68492196c3dea13b508b7fdcaebbaed9a938fe751ab4be988b55da5d4bd37afec2fcda042f573f8d3b61bab52199481a4826cb8eb1bf2e8a8d88cf29e8edae13bdb0e7888b709150ac7feecd070f7746d9622d8bfd88fb12f68411fe34b2ccda9fd305cf81cf84028c05658871e2c55500f6a1f4c7667bb535971da6d39a26dcb8078f93d282d4287d782d05fea517a6685d5bf4de4cf259e1b8984e289a1db5669a958c329b9cd97f2d89e4dba14e1d833b2008960a144cd9d344f05470849fab0c3b9f712d30294901a52093907a34359305b978a1482ccca3994c67254867c61c63bccdf3141e82eca84847eaaba09607ec4717b69e73cefd5c88c02cabcfc90a7f4f2b5a3c367794f41e8f2efe9b8c407c07be0074b2cbcba90ee2c07e65784da33bfaceb169b528627230509a53708c51b8b793e89a57987d5bff606970ab0bbed269531fd9797e6682e87d70eb16208143f3c25a790084c6040a55f8852bfeca34e96ead7f1fc4588473574c3dc4b94db044f3cbe7fb2d886d98597a8c8c37e04d795306d5e9c009f8b2c5d47f309bf2f530def97a8b00910575376c80aaee6139c79a8166ca1b37e4bd9eda087e9cb31c2bef79e86180642e0bf44458c24aba2699baf76917f7db9bee2dccaaefafbd42fab4c50158bda24084dcb43c914b246e2e2ae3975d56c2775f00dbb433449bb3f2d91c42111302e8a8c5db3f17181c8a63aa733bc6f2e538e5273bf196f7167e3ed41fef328f6e9ae1cbb28dd402c94a4772c70d6fee06e9b83c65055090229d80335d4c900f302e07545d95b2a93bd92a40b6c896f8506595869956e4eaed454c9cad3a91130a5a412ba41c7b6ebd97ad55551eb810343dfe234093f2e8034cbc85c61a1fcfe25c1c2bcf3b9094568716381ae02e9bce00cc724daa9e2bb1774f3c669d4a4c1f8377120b596bb8a31629a28a3031d111746c4a6b8c3afd2d58c263056b305283dfd77b444a5b4eb3ac100b31d172bd53502e915d8cfe3e54db6aca1eec50ad1d3f726731ef817b0d3491e096ec2aa97a419e9c533ab27e92aaba1e54425610c6937595912d1e1489e2d2502124cc87dee51ed7b2cc1cd62a634154f03028bd3f61d1cbd090db394726ace3e0e082450f1f5c23d2f9cb450e3b62666af2393d8bef111582e4720d35c15aa618d65cc56a75b7cb30d2b689a6911810f792c76ab5bf2d8af02df3aebfc14ad0b31a5aa8dfccfecc5627b4ba5a9d87d9da6bc1f3c117489b64b8a197d8b6f1ab46795e0311a404701852a93935de7f4898512df12b3914c1bca55c5b0980fc592a1d6469bb0a7c51b04b316210c7369c4670cbbfac82a897a26876b5f3931a67d8330d6a953820981cb5b02f538800aa145ccac40b79faf9c03b9dc0e4ec44d2cd534ae9a998eb0bc57556fd17bc4457d65aefc9c70a85fe1e49df303621c1458f1f4a007eff2e7c48b538e06e261854035b8752aea63ce9f648622cdb33812280ac4b7cd3b8dd89aed83ceeb289a0fc8e6c0667e3c0e774d26853eeb8ce8f757891aa263f7c0581b7a81110a75bb9431e0e4cc940c1923ea40ea869b1c54d4383f4cfecb54740844908b80799b800eaf3c2ea10bb3549a831166f1b35e2c3014132e2f9ad2b98b699b9f340efbcc0ba3977e3403427b9978903adaccc292881e6fbabe0d4ac51c7f01a10946904fbc0bd84314665730e52f4b8927367d96620551414af9df324b0906f703ff51fc30297d836a2d3af5626cb286e35784897c049fd9cb0c0813b3e16444f8e1a017d7df61e9b2bf073c4baaa94a9146fce5ab9a0829606537eff577505a95f899a3dd205e99dd0ae64e69d04f0ed157c2e8ab55ad5305ab423d6cb89d513711491e208396bf976583ecde9ea849027067e7f39fb0ec2b45f2370a8f73242c623c196402d6bca2d2abb0710cd937ab38820cb8ff40cda2192307966757073e16fb05ea71a3d98091f180ab53b85249c6921f4d88793c0e8240f28bc274e2c074349a0deb4087656c6171ea35954dece5e57681abcc040504c51226c59dd441bf9e5bdb3a9a1fccd4d92ebbe814a01300ba950aca048ac71923706cf034fd63c99570f7fc6e80999f083199071d6410a24beeb18a198bd74b72113c281ce921baa0a3fecf14672b476c385fc086d0089c33c499f5465d80b6fbf64921adaef35140e041bbdcdde39af89d01d52398cbf580361dbc4ba3eb91564cf0755a51f958a5ded6f3ea65682794b94d99d4df9adbd75f25507c24d39bd141a470b98b6f28d941b93a0cdd539cdc0fb1bce19480fdd14a02b1fcc402712566f5dbd8e56e0a32b19e6e905cb577c89ef0ce44cc0a2bd1020e0db69f2b626598b00f6d1a73c790c6c3652dd883847dd60533da6e4973d68070e84a599dafde5d0531634c2243b434487b366caa8565f59c30aa16dcf754185328bf3243265fdc0110c32617fd75eff45e8ca7ef41032d19ccc3569f8186d9d9df196c0f946c9e6c30a51ee063d2b757860f09f6783ad2e1ae57dbb0b49d346f91a266c4e53b247293fad1a71465854ed5a87a68eed060b46618d19aa0f3b46cf153d6056324001151f8d7c9fc48f9e496d6a8f23ea0ed2032cd9941a332a4f843cf508c30107dbdf84e306ceae9bd0e57765387824202014e1de2143f78a1c15ce99436e7f76addbcb91cdd38c707c6733d1cfc8ef19de3fc85f6dfac44f37acea41a44e74ef15a5938a606098d6d81a39481cd521dfcd1b94616216166c0160441a6894404336160c7f4d42f1d503b1eb38392c182a9b466f7db3ec230ea350819278f4d18fe7192bf46f714e1d7cf3f11108929a940ccad20ddc4cd0f3ed9c12038c2435fe4522f7d2f1fe47e862d6cd80c02317b9175796100cb16c7a054fc092faaf4c19f8679d2a00a6d5898e8d9010713c8c3df007ad9fffd8aff30bfa94ec34dcf80ff37834b1b850ff7a7dc4908ff0d820961c00f7c4dd069c80ea6dc11a352ee6c5e2dd3b3511ac241eec0854f1930fd932cbdbc8514238024e50e1f72ba4a30065b28ab2877106883f703742142b913c62f70fcf8c99d157375c9c91d1f265e97435d4ceec48d7bd147ad242b5f728722e6c095efde55504aeee0affa88ec1de73e954fc92a46933b28ef4e97f7e88f3e98e4b28e7f9806ca9ddc60f9d909bbf076c282e4c208b7e51b2cc13f2e3ae44e1f476c25e40e2a3ea9e81255c9de94bae5de2b5f7fdc9988d98d1f91b31867e7e30e1ab18cd8f9cf888f3b172d6451d76ed34287e6eeba1c3d2aa67af1647e689b89b069a88def662a1373ca22a2e560f2da03f175c03b83c8f6a4e707bec80809c98ac51a779645252246f4835aeb7187e367144f733ef2b8731e471710c786c696c79d91c70a8488fa8475d305e58e03ef6f557f9a221231c609995509c08e3bc3bc345ae4b678f1c6542de3a7801bb1ffa5f609e8b8337d8e4d698d66cf43c8fd2a2bb6db7ddca9ac3e6238dccad16ee337c93eb3cf2870406b2a6b675a29744a07a59e8163953b26b2615a8b8ff81260b79e8a1477c46c77d760c5c9233c9080001177a228eebc0fcd264c9a0e87b84311df0752ca2976249eb893183bee3c929c9bfc504cc21dce947d2b43c670c43e257c6ed1c49d51750aab2e716792c09b2c9abdf1aa8773fa26d27d8e220e426a284c80d29a80b59133d3491f2c85b6e54b6e863606296b18de428640c18978d50c4d04b5d5a36628a2bf5f683b6238e2210380e9966b2632450ca91b5d956c95f824b54889114792b6d1e067af25fba8d7ce0169e76dcd8d6bc0993b24e21b30ffaec55b5e966d4a03cd10a4dac98e7c28e6b618204d5fbbe0d5c8e798b98a9dd36d983dd34aa735e9610b9f6333f4ae11b1b799e95b0c44904ae471780031de2d2161e6eda6f1e48498ad329c40c0b6ab1a16ea39d7f2d45fa078d3476b7648e964504cbd7c1c742a586092a04a60bd5bb99d839166cdef8a1fd171a35da5ad7f8d2a5154cd9ef0227dce26afbd72170058011f71770c22481f967a7bbf30f91ae78caa7d09893821d88f4af1be9dc4aecd335f886d0d04b2896e1e87733d4e85d45b57e616b65735cbd40738472478d91a2a01ce754f56198ef81a58648a2acfe70b82ddacf0a84769ccb83f053787cc632e7cb98196e93fa36729cf0a4f8733c29b4d670482907e3d70c784edd8b1b599131013055aa1d9a369e3f534571a4cd180362fb66ad19936c95a2d33cd0c101c890ec43c2244fd360a4cf86d30e53b7b3f0a51374758428d08976ae67f2201803dd0e7561afae6eb09afa7d8ddd145f621a8157af9644511c123a405a8840267640508c20595f47a257b41c9435997606fb3dcc3ba033837966fe23cae049555f10f31532f8ba2ad47b4ca6fd6b63b31ec59b7bb85d584cab3e59b7e2a91b7093a0d4b167d30dbe47537e81e0aa82f2104ae859c07fce09661f7e06e2a3edb26b3797175f8d70639ec54ce1aa61fa07fad9961778d91b2ec722e6c00a8b6ed9378057c8e57190f3168d0d2002ef2bbb415e0db770f9306cd5d461c9a4586c62ed05c51919c1b0a8c7997afbefef2f1b65f0133bfc2301247d76a4928f542201ce8923d0fb8beef5f83043a1d5db85daff182d28e6215668ee604bf4ca2ee3eda4103603e12d72448f2a1ae01489250be2b2da5f9f736151db694f7d95866d69e151ef4d3cd334cdbfd44b86f388024a61a61c7bc27769f136842ca1f6625131b49e30b4d560ad3464df48c96350628ccd967bdb0f7203389d7c943478e022476fb5b4c6f46144207f860bc1f80bebad0547d9f550833a2bbbf3434e1f5c5e131e09515898d27f0dda53038d65abc1d5000242bfd619658bff15dc7b419cc163b868d22cab5544e778f7bcc1a8fa2ef61dfb0556e6844f306db97f9366bae7346e9d2b80db9c32b68c2514ea2de32b7e2413ebf4493a5362eb2ef6cfd31bcdcf6b8f6af5fa0693f58368344ec973688d8deca64f9a4b2b5387c6e3cfd1168863810125193b15fc58e80eccb06aab3e29fe09bc8430f29b97d86b22c65baea2d7b4b3017072495f27deb679f7f3399e99af3655463bebe7a42645faf7d50ae5d444f813b9fe83e71eff6bfd3ae41a6bdd05cf2f246cf1f5b1bca18f565f205bf0009367ca5627b497de73089e1ffbfdf66476ab8a9c10624fa0bdf7ba58fea324b67db08b889208409d282040ad074fd32fb5de8e70cc2b6a46b010a41cff34031cdd52bd7e5b898d7d28f9b1786168987cc95835fb1d222f60c2527ca9f15d01a9f296ecb25a461e9600f83c6327f942369446dac3c3309bf949cbfe8abed003fda4d0c54607b111847b638de51bbf5c70ad13e3e7add51bcd234c3e1cd7f7fe839b9c6af274d5f85e0fd4152fe44723f0a8b710eed3b85e7521a32dbd070af8cc76c04ecd29043a823994f9dce523deda6e387da0cf76dd73b220e2021690d0a34a7b1e695845a0b1a295303ca1ace9c188762df78edfae17d66e4cc54c7274bd932207772924b2cbcdbdd49e5e1f60e4c2b131bcba94e1645ee0dcfca6c0ae05c9c45c6bc5c8dfe4bb25d05900e99cb05b294dbb6580aa09e6c76b5e5e760a7067ba55efe42a9d643c1314b6a8ca9b534d13d6d29c6602da589128f17b8b2bae0aa1386edd1437637cba9c2b5a379e13c83a56ad356c7547df0ad680ecd16a6a81e867323f7dad7ffc4b9e3fa899310121ade55886451b6dc49941e0a513c4ee4f1f427395d1ad73d42c2a9489d5ad48de580f9155c6fe25eb044c2219b859a10b55b475c4a77e5b9d6fd25be3d04339de8a343870cb99c868cf2c02d7cbf08718187e147b24ae349af6348e6c8e2eecac5f1c66bfbe3bb6bfd1498ef54cf66d458af73aa7e44db961ee058830e3673121dab3cd7ebfcc723ae5187598e0a109654de9b1baf376745f865322e28d79726af327887b39ec61e24a7b9f824cc997285ff1f0e6ca82f9418601f4590272a7e4adf19c840396145b142fd87004a155d567cd7bdb03e1e31b2cc124d075aeffe7a439d9ac5f981226da64b5a95e881cf9290a6db4d0c7ccb495dfbda942f77361de554470e9613fac4bf910857ed3c8dbd714c7b6aafd060b5d9438d383bf3099d6ca1f2a0e9175c983eb5e5cc4a37a795c8edd02fe92278768e80a3049f750ed01e01bff6aa1c8420fce8083bc2ac98138eab1f0174dd13a4b9b60562c5a6e9f28b423563791806aef7cdbf413139195a0bc52d75643feeeddc01b800bdc161d3035782d16d3d96f5e828a4e70852f0ac3290525dbd024f88cdb60539280611b22960a8f82ea2c85e043deb6d13c06dcd31fb1814092a12e98310161a309ffc7dd98d5bbb35c4ed283b821f928edb32860826e358d24f5ba476bb30c185a663605b44a778365305c6de3d4f5bfd21b2102f85fa247f6516cf948801ebd74598efc08c95413c420ee10ead2b6ac01a7a8639ff0258f7631b809d17325769272fdf026e51b4dc039f1a2a316dcbd686553c3b30a6aea448c596454f9ad8c982f130b71ff9c997808dbc427bd01f26d819d854b0216e3226606b7985a3c9cd572b82d273dbadfe2949129ef90dc53b7aee6de6b6f1a8d13836364b8d1f863ef93655d5d4d52876384ff54a65a24c02e00637ca1d84aee595b5cbdc08b1245e8645d86a03ac4fa5d9f9be9c0b410f11808b5aec9c36c14278479b21aa46a786127948b56b1506712df8c1e478918d8cd3f55b19a15b027bd6de6eba94a8cb8ff9d0dfb5223685d249562eac088551701d140dad88dc0b61738deea8ce017f14d518c55e4d80e314097d512c1d10ef79846df27f24057213a51023bde8bc54634008b26d99a52dee4b7005873ba838f1cb55ded4d47f75f2fe26434b71736d656bfa35f4a46af0549e998d70e2bef0161d98ace46c0411ddc0ee2a007c00a839eae22bc966b500ad8857b68327e33e0730e5014335085ddd9bd85e1ddb1254b2717a8c98a1642573d25e1c444f5135e3c4de825279428e5dda8e0c40aefa23d883274144404c244c495cc0d422f015beaa343343f038bf2d7b62bf120ad46d2e84fa3ff54863ea75634b68446bee72ab04b607de9250ceec298ab8dfe08895b0585df2823476ec0c2639895ad63fb84160a206c2720d9480ea92238adf8c4cb59d5f283b90ddb42e4ef80f82e1635878bea5744cacbc92445941974ad243990db7c85a91f4bc3611fbed3419bf35e979fd7d70e385d50432b822ac94ff22c5bfd8b21db05a6ff57384299eabc5bc7192d8243243b971a76603a4f27aeff961a878738f2903991182095841c7063b698ac4e4dbb830d40b97ee684f21c0688365c0b24b25ea1650889ee735810ed61f00a9956fb6764b2654db0d932d097bb967ac4d496b4a0513cb1769b534f12c968e7d0606c415a090a1bac45fbc47d541683ebc84da9b741aefabf14269a74b7760f38c544f3cb3dd213f197153e30550de49a07b9e9b624b16260d6458a88323c512b603bb5661f78eacf8fe9404a37eec37b91271a7a26207364e45009b3abca46614bcdf10cc183f6045cc78df8a95b9b8a66bf8f5a8cbb3554a47d4e5d6b9321c3e13d80eb67d13a2eb32684a992a2f0cee976e3c8f9d517911b30e17a5def7bd8f3892c1d61d48a0a14cc8eb1e02685ffaf86ba366caf4a3f29a6e953339a61e8e34846f1173edd473a5b6417518c262b3bdb466fcbc124d43a2f0ec0da028639d059cde60783b6dd8654cf69bde13c634a74b3481501ae387abc6bfdaa784b78286bbc9757ad9d2a3d7797c345d779d012635193d23957e8161397c893b6268eeffe604b8415d49100b7ef021c6b92e0cc5b388e8410341212849c043504cce1177b0d1a72f0528e2990b2deaa629e18efec6d98019e4247ead9c4d3803061e327f465dada1b3bd955acb9c18de853d75d6c0b491733ea2da031babaf9cfdc3788ea77df3915957955b7cfba71863e10da7bdaae603825805862418b94ae83c9260ec92b37f94b8fd1ab8a5e0a005a6d90fec6e4664d09eaf0a074addd264b38e2c15f3dd647a198380f0c21e269c47b698a0f8504d7a0e377dce5667c4967b10da526cdd6f2dad95fcf67fe2acc82930d140ac400838a66666bf2443d1ee30df4b52a2a7a28498c5e8695ea7ee8d4fd9b1ec22f01d426f0d2a8c6b2eb8eddfe354fe5f634ab33b73f47967b64ca2d3b237e140f964a19310d937073c6c6dfc8c537d75875a50dc0c82697e92bfe6dc4cf4fc243f606572eb87d936b642eadece303465a52fc6df26e97677638aa2e30f3e561047c5edb3bf5945237aec90c61fed283238475c78042caf7351e367e0440694a51989a034bad33c43b75d07d281fd50781f149a2ea24dff889a7267bce651fecfe542638b80eadea4683b13aac07ae5065ace842e88b43c467c4a7c8955f764b838b981389b7158597cee0d2822d7f69793e64d246ed716d62b47bf521beb03bd1a5484eaba28ab43efad003bae8c244cf7b1952ba226f579babc215adf5362d905a3de23eff02145ee91f445afab5da7fb3340fe283d6d2aad415916aa6759c3ceb1571a9c8f35ab16a35fa683de3bb635690d8d089a84b8a5ef2127a223b7ae76452cb3762de98662ca7a8e059edffb9bce0c433ae48db242fb6f7f3b80b3d85772b9091ead76987c81c9d4b55ebcd6c6c91a57c6fa3432e87c0b69e588aaba0735b01338e0796e13e278528c6ed0aded8af1ccfb6de09b7144e8ad68c99cc820480d522100a9290ecc795844512e14ca7a406aed1cd9b900555e6593db78c1cadf7821c457bb36bca20ebfc9e4b14fe2320839556ab9dce6e008bcc2ba728cc1918c085b912d0770046d638ff6d0ad129f721dbaf5d6582906a7de8b4c34e227c946191c379e633583cff08b469664c01b9441c1fa4aa1cf93e8bc768fb80ede58487b83d250bd2787e094ca838756f0594bdfb74eb9357b792f426df18d5808d2d691477be8966a025cd297f10c166661969c68a206e0ff1757be33aec103faf615d8abb8c7489be2190110dad3edb064956b0b05dd450dfa9132febd5eb6ee1bb28c101b4b6f1801b5e35a402abd83cf262783791541f1c8af50e1f78c2c4f983f980fd05d3a05f9f70e1a1b98885e785ecf55512c82e19f5f2f100db82d46f035fc8ccd42b09461059f1d032095006c38ef750fedc4ce25c64922f25239792b38ac75a7147a5a89945459c8160bd00e7ddbb599a76c205b71dbd5dd77c018934b8c1503dc2df2c8ecb92833cd89d69586ce41ebacfe7a8752db1e654a8560d559255fd851174b87642912a0acb3642bbba4cae63823a9007dd894ec729b5c593da6081910b1ae925c6e136baba3a9440053892289827579a463e79caa48c94e28b65f912dbd6917d5531c099d82f6ff36509dc8481ca598c2ca31e4ab13aa44278269b302017bdffaf7e1a5603afba98f69616cdd9aba9fc4e826a1a495e4e5e0f84960f52511511f1a7c8cebd1a050c5246097f536a648c1ffb4b3ca166f3d0665c17c255f5d3d9e76ebc304229ab7abc6dc4a4340fb33c5376cfb0c74a129de13dee02fb332ff581748a23155a5b310a74417df9a71c8e773c382007d9ce8264fac2c1f352485600204bed1c8086670e827706f8dd5a2bace0425d436750c59105e40ed1e07b1cf70e984901a9095622f5dccba99063c13a6133af52f158563ba9836e3512e5ec9616ac300f538972bbcf6f105e71d9d356ffcfced9bd3ff7883add9327a2531f6b9844ef7439966276f573e3ddfb7ed1303cd16c5c993b52b7d5524f17048c75d0921261a4228077a1674fb31771fe59026ddd8e4aee78f9493264ab9e1cdb2239016f3547457b0f8c7534f9c96992efe40116c6df19639469cf01790a4ddbf11e784fa11b128cfe7c920f3bc05c567859bd56e492e910ac920565413b21ed299a3471d00c3f08231067d802abf65715a7c9412dc8be86f2e1bc1cbfd5b7f7ea30461e2aceb275b2c43801a67d39035d63fe0c95f7203dc998cb95c959bc46a25bdb799807398aa50390903f2049ca833d81fd55d7a483ee899260aa65ccc085e91b35c92dea6aa8e7a6c55e39647955c0e41f137cca0d86b203b3e0aa142a910986847d1d0d87eb1f75cdc933cdafcb437818d313ccdb8e3a0b2f799b3e8dd66a3789a3f19831122d646ce1f53598c5e6337c95b66bd540089fafdb41070573e7e49b67e04e63ea13da26f8e64317b45c0ab72387c112d199eb86b28eb364a665c55e365e8af351890f77c6c46c97060740351fa216bf83f24366b898dd3119ff9d6b14985d930170e7c3d831f754dd8ceab13a7cde2ba24ff3b8b64de7b7799ca750353024cc37c1bd4b84c429dbb1850d3745cdc64f7f460e07822b3700a18428803235491c2a5e1cc294b2e315683628c1e7265c61a6704ab8fd73c3a62ecdb2d2b897e3c47be6d3625b76a0889ebf13c82d4cc26148b9899b8c4df23a66139fe6bb62678eb313d54d713b5271d1d9577fd63519552fa0aeee8e329c027a3b18a56c1c5c458dee44178e127d2e3651a35b2724d939e9646f036c577b792c772623f448899ccffe08f05635cb664af09b4fd63409dd08aa708cd9640fd4d489bc3de43b509d1e3d2060fea7b940840638923c43301bad05d7d8b38a024f833441e79d87bf7313643544c1e0fe6dcf83d8ab0bc76e2435996569f9c46c95be9b06409ac573a496e1ca7ac05fd181ca00e1b9892cdd2b19944081dc705f92a65ba8c258c10854ca75792a650010e474cc83db217ec2997cc28b2a7447a435f83900d2066146844a59e90d151ab5cca9a152d7f1a51f6c6316b6b232eca26c3c63a7d44f1a3260e6d16848a5644f9dfaa91d5fb40be85105bb8b477fa0f96ae95864d62eea954af2653bff4a4754219379ae37855dbfdac45c0718094d5356904fcb128a391b9d8ccc987916be6842d72732315e8021686720618eab3193777ed90d5e2a601a9b29b7d4e3c6f60760d10da7e94337608f11678261c54deb36b7e65ac5a6536422ad6a2aa35fe7a169beaf579211d94d07c2ab74eb4a9dc99a5f25b065fb8824a0c2abc418f867a484c226888b96fd25ab52eade28f25befe6beedf7cab829674619007474ee161aa58c1f1c46ecc1012cbc0ee4c0e31d7b6683356230acf49bf6f4a4e9b5f92cdaedc2813733f2634dd7d1256b07082c9f22ebaacbc855000328c3664250c6e9a0b3000d59f36a661084e612fd5e1fd580aadebea724f91d500c4efb41291fd62702f5706a29ec67dfe5174031083f2e09f406bb633d78d4589e998386c16c259298cb289b570968383f87f249a05fd006405484cede4a29b6623d342b0d3ccfd6683c0e2014690b48a1de95aa21bf2b40bad562c343020772613b108279c190a7c23b11ead9bb5cf9c1c6add4dead401a37e74b2394e2c27fdf19d1c1d6fe4ea6564e6ca17164a4a1e09d2acf05818866bd18bb36788ba346b28b3230eea11b74f9a3558c4584ed9c903e1479208872c1e72f903e60211cc6c6aa9b62d8fdcf5cc33184171006a6c08e179d2905e86a8a8528c121eb9884c99c6dbd0b3e1299504cbc3f5d507ce33fe6c845d5125c0fb8e997d40c744826cdc3227fe0b1526f9cec602876e39bffec4950ee40e30d3488ce380e5193c2b222302c403d63d2eed3b38fcae5919ea949dc6a3cb27a07d5f10cba8e6c7dd89814b7d29cbec346d65181143f31c25befa9725a38ac73b7d884f368096e513ca9b80b8fd55ccec86afb68e6719c093d5e7898a443d7e043a44c50de83ca2df99a2b7a5e0ebefa60fe23fa3e0e83abd562d1d47be198b1410726f292d8164f18dc379a4954742873187d34b04463768d806c9c512ca1dbc3cc58478ec318882bd6a4dada5afd7670e5c44a32f7ff1137f3a107d81a286b0aaafe346bdd606467082879525ecba4114eb052e65e07874dc874ee36e2fb7b9ef40e8bbbe3e578cf730e417c594b4d92c4847c16d0a7731cf80cfb1c0a8f2110a8cd77b89b4a9065ff8e74d15c03461629489fe0209c429393eb6f8d4fc1762a7ee8e46177b01c6a196c40e1bb0e0259aca1d3a2eb4e34cdcab3bd6a3cd2e653d0ebde8e9441aec58ef27949cff570096edaf81a29032ca71811eddc22118a6553bc75b8aaaa791858bbace8eacd030958cd59b3728d800565bc81d461e7c29a3ce1b8e8ae745afcdabf862664f4f9debf3085ae354852081cf38e63f90912d764223485f67bd09ba28b846d2c2200aceec5d267b8423df87d59950784d701a40c7647615edf4e55b8910b1140e91b408ca0623762106f23e82910932220dfa2da06ea179ab4fe4537c9cc48540a1d185ab1eb0bfd3d4aca0b56db4dce8d8b8998a63605bcd4f9d1532b4fdfdd1ea2df5760a50d80b9dca01ff0af6eab58ab18d5fc10184fa3e6b2098dc95f3d5e0c22bbe33e223d19d85b8b7e3c39f297467cafc99bc95b0463c7a2e3d86c60f5b23e9a5472b1bc0292c6ee5e41fae43e61e2a29c09165f409ffb6e8c93591fd0da5b914c9fd39b6d7b4e83688af1e37644989295a0a90fe10a889654a89d4aac48fb4668102f64a8ba5a91bd842b23e8a10f2dcc6c2614d9aa9c676fdbd8a71c0e0b9b6bbe7c2bedac3bf0bb491a03c88e7750d7d5a73e9b5acf66b0faaddc6d5bb87249be62524224c2f3f2ec4159fe6a683051a09d79adc2e05b7412b1475bc5252fa4c58c79450cc3cb4f0850999eb8acb20f061be2999433d1ddbff9755b3eb47064c900f16e81285099a80e24e2430d3400d6e0d1ba51fbfe7748f7b30e214c5e437740584424265f7dd36e53796aaeefd75e0fcfa9bd63003c96e8453dd7fba9487cedc3cc201c3e1833200dca743b6de0c6895e10998ea696fff21602aa3e9925e38e35a0c43393082fb728184d1854e14934b7d117b44bb3b755e15d1e5985fd433b0470e2dee3ea32f1316600a4683cd1a9b339e447341a763a97f68f4726e7fe95256cdf0d5175aa86c900d068cb1e078087c0f6eee06f8c7c522040f5d13d8ea4f9a3d584ab5c67aeddc4059031ff33387d48fcd17d9272f96c66227c8d28b23203f9c304092796c716e82f911b14b9cccb9fbd0212fa869cea310ced9f84f4ab7acc72e263d0721c105eab8887058fd1257c6807d3eb4b372312299d0726a6d29c1d8e3f5ede34cab96add36a17c4edde2e0a5818c53668470a8ee4e999620f22241d39c9bd658e89199e01a00c6f8ed5e6db821a76a1ca7095e31d2cffb1163b74b0dea072e92e8ea1178b979e9acffa91edcb3e6bf4b3b752b2ad171a1c1a1b47d565681dc6aa5cf74e34f412196d2fcfeab370c7a0304bb6219a3ee3e74b5829bce4d764ac1564cdc563f9d6e60702d82d74fcad7ebdf0e01797d08806d14fb02a77a50d67a5839671d1a9df0f127373eb5148fdbd6275759055272bb8f2f8bd65cd41f8ae4b1caf78e374e97b9ee75738ad0c426dfd6dfec78a4fd2989337f49cc2c92ea9a87098d1daa659497e066b4780e11aa715a98317495175238ec51d78a7bc9849ac95c96f3a80ebd6d35cb64a4477820359b02c16041bc03db1fcd7a692d78ec79ce806f18325615311893ce252cfbf4e4b61df79f6052d3a45ac2e7ec0efd59ac5949bd31f51e188a68a5cdc2820d58d333ef13d5f95ca1f47200405a32cb7556d8ba31448d51c443d969da1413b784fec1ffa05c768b2504b7d51b69de9a28c93898a1eb18916b531e92e0f4a9a7fc8cd1bf2e82e839cd7dffd1f2a67e09f26ac1125e7c006aa9135d5b61daf02c6dd59c67f2c2a378e8532b7b20577fa140f5ef16e7fcfb5346500541cbed24cfa378bc288514b151a5c12eb0e57c4f0b498ea3bffb32adee64d46007369f2d0e3a572eb5ff2a864a7646bb3ea5fe403ebbea9d45e83ec915e4fe9a6577c0627871d76bb0dd613f5aa295c92d23795ace3c94d081b6c5bc3a0cedd138f521d0eadf4fb7d9e18e998e5e4ed11e6b98b6efa07714df87e8984098bb71a2da50d4b734dd59492e798e60e22fd18bd74b6e2d4c8564e66e97c6c988ee969667efba1200d4402463f9e3978c4b42b73b193d09bfae1d4a0b96aa171232d84a8d605027e1af6934d9a499f48ca1d50ec473b8a512587970806e92509b801969afe61579425902a02d8759b8a5cbfcd24edc6fef0144ff503dd7f620a551d87582f23ddc806c1a95214d9f8553047887f7f3014a37e3b5a492c720be0b6fb3c78f9d9b49d9d8d23d63a8f9b03df531d343b0d69c607750ba8f9f3ada48845c61c6dc6b15ab551c124fd143f117d33c215c76073f6c5549955fb7a35104766a600fffcaf241ced4269978ec709ba0630bbb979c76080ecdb38e35ab672fdb462a65ab5578668e09083118d4bcfa62844f2943ee18854d1a9f0da2705a7e802c8f8257eb8a2df1fa2576bd897b214313de3b61a45cbbff6caf93353eb4dd1b3535327f86d915a78cebe996dcba88f0555f29a2b36c1e5908b359958d1e3ee8e7d6491dcf5e9641c05e699e119f1b5e6b36a016a771426e9a17c0aa3be45aac02011e99b75b466799fc2ccba9e104fb2f951eeb2743853dffdd5b646811c8c9e7bb4edc0a9108609bc7f88a5519e7670748a7a28719ee1167939452b54fc7966b0f2e5f20d15ac6189710449d234388d5d93c7fe7edeccf225168ef594a689797842a0ccbfa728a96a23cf73be886d09dc4187fa7fdeac867e3202636110f7d8ec41e5e289bc184116e42693c224a4e58e63a2425ffab9310ab33464160d452ed66f55de6d4f99e7908a17437573bf7c39e28e43d316b17b3520a461c8636061e9bd6ad6d34879485088b805ad38f85e58877d8e7c03eab7c135a03a6457f2d63ef36873dc343a6ae76b1bb6effbfa6988b952029f36007557412b8d853bb373043e16ee86a731fd85fc3044d301511a169b8ef20837159824182d01a1f0e1be0eeeb62521ad8f70849678f290feb1840030f43d52bf15447d1dc7c90d7622774ca2bc344d91d5fc47de1f1712ae3d21be171ced15c8173a3bf72ec2bac349bf127b0ae15be68c1cd3d3d880e01b7c943e9d96032d38505e06623dace7114a2a6ba074bdf0b32c8eb290babeaef1a1511e2c3c1d2ceef32aec632d266f6f3ae46be5b973743509f5344ab200d631006eca462989c7a1142ed66d6a5abbb08a6cf3f06e15a510ea8273b669b9c143aefa6a1dace5f5f4919aace2a2b88628f97c2f3e63e36d52c3e2a85194290e344c10085eacef1cedf50423c0cd2a1619120d1c9284e17a3a7d0f61fb7dfdfa7bafc2832db7fd470c7286ae050b61a0d3998cfbfdd3de8fe101cfeafd7028b4fac627bf9f073c6bb12f39f52b56e058d36f2cba8cb41a9709d5d2392edc94e9f17e284d78752886d5b3e3a10bbe9caef9f304eca3f173c8a9cb352093e0bee535f07a2f6db765314b1b7588da16d64f3044e46748be0de84cd30593b6e78a79a7d22a73a6014b5d3cd1955283ced7c4d2310f83f4c2bf954b53999faa1ee10a3969947b604fa0decd5adb5094fe69e839df0af51d15525cff0ba27b27b83f244c1a9b99c6a158363151ec194cd282365c49a60914ce69440a8da952225494fe14198d489804f275c9e0e14d5c1746d214a6f412f9134e25e58c1c6afa17f1c9b26beb6b1f0707d5ea770a577d67c03021b42779e1165f7834899ec89e3ce54cca7c80a78c5a7175dd56186ed07f90de870431bb715a88efa6bf886e91e131f24244d7c4796a7645c63875a21310bb360f2ecfb995e7a397a09a30e7ee5fb3757386cbfd7f98617071937db730375eac91d6ca4dff53d1e59f45d51648baf0998e9111e7d5dd4df503a659291a3014db939eff1688da613eba351743e29f62072770e571f1b592a1703cdf01ce1694573c293f1bb501270dedf8b48d4a84a3df4b7bdebc620c461e86bd87c426998b9c0fce0d3524f0e2528595220f79295833979927b0ce89869c91186f40207a6dce9925d24f9714ab422050cc5f0d9087fd62d9e62f6597f893e9cdff893dcec346612a655d25b98cf1dd91e3ca8c2348a227eb66b2b072e5b2b5f6475b2d5468ac04f61872a3407cbf17b9145151838929eb74363731bd08b492b5949f76f9758eca109a050293948a55fe64ec787e8ceb5317cc780608d69b62a4df84afd1dfa64485b74e17cc94f8115816bec7a825ac05864b8c534af0912680768c7183a4fddc3a262045d4029f80226eb3a2dd8abf89f866fa79f577fb02abb18ac150c2b04a967e1753e5e1031a4f45d9ca957b8f2d08043190c106d18654ec099a4f09b8b226b46cc4eed9c6e6ecde7c6c01d2c27c63bf06ac94400b3b6fc3f5e6be0571a1144f7671b128bdc8c0fe259c7f1a59a98d400bde030805236ee42df887538b0021fdaac0a7d5027eb3c93d0bd8fd03b0a39497d43d0b8a7040ef2c4255dfa1bac162fb39e55eef2c86042aa0de59dcbadcf9e041646d82001fbdcdad32d4e46fe9a04ed227d97652ec3fd49a73fafedb5fd7f34fa32c795e11c192dd3b8ba345613988b6164463feb5041201003eff1f53adaa56788e0bd6b8eb7a611409199d9e40ee56689a714f532dfc65dec036a8da609df4eb08bfc4a21e7e4de672dcf33392b1f1325588dbaf99caa8a79f45d852fbb26c1af84eb7e0c420ba50cc856df1412829711bfad72328c069b7eb1292a8c4660e843e31787396f260b24d5c5e5fd45ed6709239c2ece02d136faee3e945fa2d81b93414075ceea0767731b535768e925c3778f0360f4e9c43d92af28958285ed2a74bc7720a9a3db155a701fcfac7df6163feb8762ca6a207b84cfb8ac5b11d26c413c725872045068f2f165f16ef654ba1c466c7b16aa54e46bb6a69ba554bd3afdaa741d835386e9265996d99aac9d9b1a35d8283719fbc9c928ea34830f115abc30354c604eaa999bd5768d58241caf7c2ca0694da2cd0cc846f9566393f624571790966fbe78082201c592fbc04c02c5066d04ef7994eb28d52fb1b1514502259a21825d1d65345554de97beda8a48b8906e771f0564f9e95643f5d1b35fe8b3957072dd20c41045a6fcb9b957925d9f15571429efc0119a352a59667fac7c8037a491cf3bb745afd00a0ed95cb5461424c8b8cde42cdae64de386138a3fa854c86f02c135808f3619921ac25b375431573fa42f68042ecb1021b9a5eefef21093f1e6f20b16fe454d9f23936839f849873c6af3ebc31b108ffc8176e5a739f4bd5daf13d736489a5599605493e8fb9b0679e150b9db1adfbf60850cdd742f41c24b140c4898d84ff3f2781847f04e119e8273eaf51b49d32752c38f0ad8cda2bd52c7a49259aed8ed2c64b99f9290acc9395f40bd9d7470a19980dcd0dd032b2cba8874b5833e3f088d6f96f9d8044d2e44d6f34e9df5bdb8b179ae8cfcce89e79155a9fea2cbb4fffe316d89d3776fd4cbccaa5c268ead0cb46014e3a8a8bca70e61497acc6b8310ce35692bf638be374bb1b365e4e86d28e28f1c32b42f3fc80eda63f5cc9e85f21c7cbb37e268a6bba91c6dca029757d87fed079d08984a0bbcc6a4b489e147df75b136bb502321799ecfd1da335ce33c97e262eba4a06572f9782136069fa006d4c5a33103871968906c3348ed78d3f025b59e13b70440d218fbaa96e8c8bd6d8b1441200b10648ac77305b5a0645cd2036ae8eebf760089de8d480a3c3106baea753d1c6f76039db4ea53b77e3c2ecf85e8766013eb8985d4404006533c0ea8ba0213004ef3518ba5b3861c8240110f46935f67082f9371a1dee67a73002d85f88326a91e5625192f55016c411d4e99882119e600a27c0051b427f27cc1bce45e6a601459def4e4972f252818b1a85574badadf8fed808ce1663ae37f8ee26725fe90c5391314bf3280983d5eb31205906d52ecdc8eeff9a09fc6008e10a254a4a400f3736214606ff2a0aa7ce5a1f0d79834d166fad3c368726db03a8ff1d8ff26b1277957c3e9de5189eecf4914bdbf54595dfa9b512b7aeb15f286d9a46a3a561188bcb05637f16b7eed74cffabe0b2f4c46bed7c6e8cd7dabc15142d71778860424b74a0e9c75376513216c9bac59aac20741f51f63ed2cff097eb5dfd698306287d7ce25dd02d8b8837e2cca3c1db0cc3355bf696fcf301866e812c84631e545d596b8ae35872ed978f9684c13a30d88a497c9775fe1a5d8130fb38e722ecc3809f5418047a103d7e0be4df4d9a00f9af2d0c794c4ea95b5b4ec026402bfcf6cc9906033be7169b2f1d96c3ccde69ccb7a5f34c5fbd7e9a6fbc5a67c19af8f3b63e9081318d1cc148f3c46c0644861bec34df2a7538d2c14b48c7e472f434610799fa546d6567ab09064bd51a9951d0020b7f5c8966463cae044f7911b8661a0716da320ac52a44f5d25e358bd4b15b3c99c0cda78c1544b8e8b0a5e443bab21dc79752ffd8a75205c98c249c040a8378cde75c345db310fcd4262e09403af4c66c9e03068fadb2bb4b20f623803251d11d1925569fab6a8042410be4096484cc1a2248f5ec33142ba426309bd81b5f6775be507e0dd3faf7156139d5088a358110e6bb181abf8ce3bf5ed479814ef07258d669d3384d9bafd2dc29dbdb351830d4bfde1a766297eb8f0e62f195468ad576d6444b5e7cfbaf7fb114d6b99e35cc77b144c7a50cb092230f9052d273094fd9a422beb7c935d00caee75ee0e7b04d32d5b8aa86d32f1ff778d433ee9f650bbed0a8086798f8d6c01c0751bfd252c6921c52d30e6cf1f9f84adac4e474c2b202063eb41b58578c046452381b4f101813945243ba22a333fa1f83fae8ef057b0a55644f88a34edcd400af8be20c12906ceaecbe79e57dd0287997c999a20058843a5d53e624108a393a303de50ad2ff84e24567aa8bb75d4873b55222f0a6582747d440c463cb17c909c4b1ed925b240f926833fea8f45c3f6892cbce53362043e7bec62f6caac42f04ed830feee2367a104f1598e65dc0dd5699fc1d2420aeefbbb1693c031bc2567e2783fae561ad6e0b2eb97b3c6274075979377b0b86e061250b0e1c85238b51e328f738d9d44310268442ef85a073f9fe37977f2440d41b8040b07c8d43e45298a8cd7b5f529bbc6f4a12e12194bc09d8d6d393fd9c74487584750e5042d795c149966ee6097ee29467edb4c59c4033c9c0c15ecae43c48048213b1116ef3880b73c2850801c1338e1901ac09ed5d4e9d19e2e123cd5fbd9b8f4a2cca38748da5d8f3b9f60b5bf4c749453008922086cb4abeb841a53de18217927d03c4b629bce52c09367defadfe9f5e321d933f6092974474dda4a660f6d76de389a379422ffa3e023da8b6a2c629095d1c8968533257f152980f68034f297b96a111838cf39cd53c47f942c292ab626596ce5b68c8a82228dd2ab8fb8ef2a204750f20d1a273bac497b421c9707846c80769fc0035c82acd289ae895895ccef879da8bee92e14b8da03f258222106badc2404fa10a67376ec7b5220f9c3cb362b1c9d4a0f9c9b85101e590f9d7f1924185256785e86105f903e74818a2acdd6dfeac7a71aa06f4e4172a752cd3dbb41e6b0457c22a358dbc347cc580d58230d3cfae6dbab3bfeb47e466fb0a08cc1eaa8f56d4247b285a60d1e83015abbbc6d516bc1add5893e38a2bf696bf1551f08253ed08bfef12fd5a057eeb0d9891daf13047e2f752c3a954a27c6713c366140eee92cb7db29f0b551070ed68466eda856232807aeda6c9576ba0520992ada2bd8ea403694f1bb82e7854f40f8e519412b1aa607c2aed00a1a415445a428da8a87fdc50f8556b45f491326b3eaf7acea7747ac514a7b505ce5c762cbdf8ef44dc0506503b26d42f4fa0eee5703326135ae2d998e8a953da72f75ab91e3ec3bf89de990e98e4af8c59de84a1fc93db35fa9c82c9a2df6d534c06549e5f1a69f6b43502f08d65c44d7485c37f33a6edd7bc0742d82b280a01015a39152a43a55589693bb5864e23b08b4cd58d5b02aac6cf2f1e60b897dc363234504de74144b276279ae7a837aa11dd5314afc76ac781139828da94e474f7200885dc0cb928d57656f83321f6257922b341990ba8ce938184f48dfd625f9be5044061e8b1f68be419fb8d10dab8381472ff840c97bc3b747e7e218e9480a8aa77a0402638ee64b50d1c029b85347d3fc6d8ef76f0cd94872a0cb9ad6b253ea86fcd519199905c6c5f352015bc38a99521e327c8240f8cede8358fce404138d7670f5ec7ccc67d1460021156f919187399585ce01fed32db4fd6c4a199a8de09cce993883fc8be3db193edae7be7deef4c2d85e61acb1f61ee58b2cc34ee4288139c1914b6962651bc22378ec68985e1d55173fc70b1cb61c6d54ee7352aebf93dc3ec69258a93e2f15639557a358b83092b63b3d0c9075d472620c3cd9de98ac538638cfac250d2a832c36e46f79c7ab0a42277029d8407304524bdb145d970d182fbf9bf8df9acf0ffe74f86e9d7667562bddd56dc77685716c709ab7141fff177861cb18ad52e5b8f329749b117247c26fecd9d014ce4004993b419422ac915692c820dcd4012ca69e147c0a1909e9ab0f156892345fe8314439ff85f02e4dacbd6f395806d54d0594484618297f556f5be14c5c6d12cd10c9c87d584bba7394b536e0ac2cfe342a265db7a18f945a708b75a244f9453b7afca969cd7536d852627d63116577a63e5f8f53ad02de048b2bf7070c53ca97ecbed2ca9ad4c4daa505877d2ec0147f77e374c96b3eca1d5663769ad9e71293bae7c2541b6176b557c15372bf9e8e795addb3c3b3bfea44ffb937d3813f2211fac3caf763f614011b7231a3244ef472016b7d6723e26683a7448dd03b96b580d44b03a29743ebff76282873274c5102613081120498dfc44470dff5047154aad08edf077c8fa26250b8513ad00450ab0d84e0ae8f771554e00819bc4fd153684281716f1bfc93cf51fbd27ef8244a43fb45966e9b766611c229eaaec219823c2090d8710f1eb2090a23fc427f7acdc61f415ab86687364a602feb6af6fc8dc47c7989946a02055cb186934f497f04e06360f4fa5294a99412ec86b566cb53149009eb67953fcfa9999edc124d2cc287465da746c43b9effe464cd6f4b51c98f8395c00c92bd3dc9ed99d1416ad57a5a4a47c8d8dc3179105fe23af4429a3e6075c633181c18c07ff4fbe0264df6263e95e518f923fc93e86f2377111fcd4bd80a0dd09a3d985a9fe65cfd12c777cca039c2c3ffba120f9aafac6576c6921ae763795da2fe9d51d4a219285a12bd20bc6652eb8c25c0048b48139bc11b6e2fb65ac0e7d360ebe4609000d3395e61e3e0d8897af8462e596b75a28345a00371c2111312fbe34adaad125e75a49cc620588fde8c2a6c03054916f47ba5d3ffa878a127b4f772dc2fe851ed7041627e3a078039542cb89a2f73c9ed5ad484639b27f887dc235c2561cdcac2c20e99845d7d1ab4761e9c1aed19c6fbeb23d4394ae3bfacc13ed2176a2ba4684106b3153187c773e9dcf2be4a98bf8fdd63b25ecea843f3d9ebcd3e15f961581f4e69850a75551d3883e8993459db64638e3d6444d7986f45fcf18d2b011990f020bf51a78808b812866fff339a5e6ca0d0b3d47f3af89e56df7ec9774f847fa6b8b3c1e15b44690b50d4ca563edae422c350f4889167df019616753fda91bdfc4700f02cdf1249767ea4a3a12c82fcf50681364e69663246be37df4ad1afe6998d24081913434b1742eb3ed4aa34054931d6672cbb1963d16ec929c84f2fcb243d21d4d1241a802db4f5d9df145f9609fe7e7f9588dac1a5419f3d94727727b9c3becbb47989187c0321089acc41134fd183e5dd2cc669e06d036132d086120b3a4d27c2f919da0c044af35e9647a868d9c8b10ee358ca784899419d7e6509a615e921ad28022fd9e75fd278bd68927abfd64aae8f81f0a3a187dedb115edc325d8813040ca9622520933fae4e160541f717462bd34161645c85329e98954766b10fef9e866f4790e031e1eaf90c0b5507b8514dee36d4b68116f095bb7523af69aac48c41e2c7bc1159f1431a6afa984db0f94897f1389ad75469a44a9cbd44104f61f6d7304e77f1dbfce71e35dbb405ae2d9b76582f41485d2d423b5322fdaaf52325109361310fca53aa5108a7c3712cb0e43cbef287bb6691cbf458fbb5a7d6075c13ca79c43ebe1d6b767df198f1627451507e99bb16712f4af79e8a5574d9ed5e97a767d6ac6117626f5660875ac2cbee60d54d8e8f2e37d4102f82dd948629f09443a6fc404e3c6644874d79fbb4e859ee4c06b9caf433d7842ff0a3a2a435b2b4af7b606f3810fd298b9a2d97642770f47980c7e80f5beb59d0b5ca7a07ad21f25674d04c2552072cd034c0635a3df391db53c270908ad59457f718107115c2193572199a777054ef710961d64dadf3f44bfecccc12a8aafe073085098527d93f7032c45203dda1f2f6fa313ac1a0c336900f043ded733c4b54279b68c30809f6537b0c4fa7d9041160967842e7b7a98deaeb8a08e7d28ae3775dec32472f4b82cd1c2ad902a5dc2d6815f7fe024134b7b3c0e4727a214976ad1df75ec1ba749d6c2b4a78d15d423542ea56f1fede80815051b703a29e23bf87f90627f9f320e64aaa5d94adbbce98648303211f1510803e6f95bcd9f17b18e8ac82ab5bcc629ec47e98c0088ab1aa10d28d952f07a045cfb30ce1627e991784fc37bff54c6c95f7922b8ea4eb164f466a6006957684969fa1546662ad8b43d6e70b4f79f55f1689647605e00dba379f37952ca94bee682768962a03ca9a7a552f3cd86d523da4a09f973b3bd98b6dd05e1c6b50616eb1d2d846aaffb7091515c882c3526a7739893907df363cc55c7bc3915b3dd6e0bf8fffd626266052bd63bfb7a1c927aa793ee542ebe60b2eb9104c4d12903953269b539325925e9e516ec5ccef9456d6d0c5ec04d06ea541ebcfdc66a095bb034e934f046bb20c1dd7e1f96b72ea51f9b7e97270ddfce983a519c513f7d13f642083c7f53d346db430b56e05f642a1cac74109d7cd326975dc6f4ea14c41fa8e11fa73b5ad60a631bcf3b71b0403ef68bacb388fad9a50c58225d79909f2505a9ebe372c50d7077730fe93263dd709c8414ed6313514409f6a338e02e85243f0a5a064a913d8458ec35f2ba75a6163540798d4b1fe2b0f851b28ae12fbb022f7095245ebaf70d08e80d6c7f72a12cdf4566375606af479e01ff4402e9c924146a3ccbaf5f19ac2c4c4a58fa753396658c435c8c06239e12d98b4ca370ab78f528d9f1f40965de9d7b0762a774e4e061e3d71188acb5382fc7fbb2585d5a16ee5ad61b2f4ddb221f319492710b44a7641353f0984eae35b79bfb828f29216ad978e605b24cffc86d380a7e9dd3e46351918d3f499b04ae2b43f5b2e47397ff5e0191c954fb02710cef75015a09e348f5bc6dc2f095db172e8a78f737c77551aaefdcb278f8a5e71f8b14f7d57af1597ce26cffbeeece632b838f028fdf7c469dd605ceff52063f28ab4f46026379e5249a270612e5c7b367327eea3ce909bb25f13262736b5f8041064f346372a761dcc6e63af8ae2aa906d7faceec34d139f1b39add12f4d91dc7614e76ffee681b2dd3f9e8b6163a6b450b75d91fc5a3ffa93754541a5feac3fa60bc1b945fac070d24651e4662716845ecdd4cdf0c469f6952cc5e29fbc23543e83049ec307c1bef711fbe9dd52334de40abec1a786d74aa5f6717acc5fd93b6741e915d5ec24a3f64e13498a22cfd0350cb83521fa38fbde3393b544a9f474b5341473bff7288c3440eba4f63b404bbc1d9829fcf75def1de5559892d868db80d5ffcf2dcf05346a9c55524d23b17c30b7cdc4d4021a24186f43d929fa34553c9c5e3957535acb199e9d8cdfdcb6a89c71c9fa4148f90bbb353e68c10ed6f7d293103b056a438bd5dd1f2c9e6d40ed908ec54d693a7a3da2861ff5e00e9484adbbb03fd755c3484a55f5851c3653f3ecc3cc8836a28bc9b5346cb6a8f28103a7904038e55a473dc7fd3354e43adf2b856314769ebb2cbced51dc1a124252fb7a735e3ebe614a1f6798fa9e05b4c76df64778a5d102a794b850fdf74a00637eaeb6987d4be2be86b6d8bc7409754f9c20048deb29298360267d43662a9d293abf4ed72f83ee321cb0168b44d145d107f126b1f0d09ff4bcb91d38f171b8eabbe367f4805eb98c7e1612004f422e81c061e0a253c07b8c976b45d8b9e0ad5dbf185f482ab51b622866d90482cb285bb6df468dd8d4de8f6137d7993e1d30e4263061cd7ba01e5001cc5bb79703d35166a4d61cf0d3f8d71c4a632cd57f9c34f90660ab8e9027fdc800a4570e56904d10a237371fbb544549603811213ed6d5dccef0fef161164e30097e1d219ddfafe9b52cf80f3306331af9a71666e5badfd8f08ef29a4367f446cd222e7971094130726a64a3f3b6f968ea22036f7dd13231b44bcd9219e29e79e08d210c41aefa60885f505a8a837d4c142b7fa83f291d6b754698845936ce377e7ca4e2b780b6ec1f4518723b0f55ba0e08e604ff5fbf3fcbc0d392bab52173c273d07f8cf7ec0b497c13dcccf368927b4f9a43d5b33a9853bb295a3c60523cd7ad526765d1bda09e822aa579943b4f4dfcd0f9e57343bd8e0a046a450716b3ef380117e6612a197b5193fe7311ad01f5980b3a2d2402cebae285a7b26d8eed208017fd395345b2b98ad7425fd9e5702f47b456ceb45ae3d54ea6d2a3a2d9b5d17df110ac66479d870c3ddfe75d852b43e0289427a616aea845a4e1d7cdae88a39633af13e0042a2afe85d83671e8dcb13bfa1b1311e50b45801e28d6d7c929e3e68be580d687fec14ec6539b749a4346e3eabcaf5acaca5bdaff621cc05e604121a753727710d3008b253b3355927ec0b52f51c896e138bc46012d7f1803b0315be7d405156277b42147e0e6419eb85a73ccad2eb4d6bd19b21cabec393ef6630c5bdd418fbaa922de70804dc0ef19d740354eb9432fe9e7cd763e90f069bc03d79e1f625686fdb52a7eaee21575bdce5e79096f95196faa255e59f1dfe37ee82aba39056eddabef0093e0cfe5467e52c1351cd030529973a1e88e29e43b83a6d6413184edf755dce789faac938aaebf09441e74bdd7b040a3f030b9f6f45eae9ae0eaa8b6e9a35ec2192caf7887c580d1981e19d5b2401910ef4ba6dc7beb6aa15bf5f90c235443122b3bc7aac80840bd72ad7c5a73e408e7d276ae8271d6ea298f8f57673d16783b7bbae9c6b1cd1fa72e38beb91a4835a7feb3380858990e46f161005629641ab237f12946593404d161c9f3bcf3c8ce3570142fa4939d7ac18f325e3bbe07a887484bb3170afbae3d918d494550016cf24a114f885927df2e25586043b9e96a5b7bdbcbbabc78803ee24bd7b4568b097f2c09301076cae1505d38f3bd7f01deddf882f9a6df16223f7c0b7939e932636c2f210b141da44585567de680d16d3133793a4904feb424c2016ed8332e4c364d9e6b1fb48621c38178dbb8e30360d0248060288ddd5adb29f1ec5668db92841c9c859537b52c05bfcadddffd45755dff5a30283938ed626c8f0b0994556a56413b03f50c8be2d86c0e3189a90665d7190458930f93c16750a0f64eb8b452bb9410ffb90230dfa9dea4c8a5aeb8f13630c7f3cf96995e8605df4ab527bc2ea03be834f26b680cf28d23d583b117b0a5209af2bdf2aa7dbc27d4d659462003b1c21005c0a334aebf3ca84e560fbf53993b2500942dabb4a5fc45a5314c14a0d2029241111bff1a99b131a05094d990060a4413e882bee5aef456a24469d88b68b89ebe66fdda67f0794b992847498b92aac130673c1e34c675849fd86102d21ea8b9f01468b70b148dbf13e156e8e9404641402fa810d505b30cc4a4a070ff9b1f4eeba4974c10412dd9b42cef4164c2008296c2608b73dac01090b81757591d97b9f0e0b7a8bedd6e0ed6af85741c1248ab613add39c383527568c3cbb926b4fde41f0517a9e9e2f09217b6fb2b79452ca9402cb04a504fa040fda5f7d67302fe5f93d3ce41e3944e6799f9f2f8d8093e7cf97a1e83ba1e879e7feeb2545863c9fc7c9ac4954f56a8eb9bd5edcc91ccbc6058167dbc95c087c42903bcacac1f33edfb12870bb4c5f64bdb4c1b1e6b368c88d95a3e759dfd9fa1669e7e7bb786ddcaf3c3c7876dfcaaf9d106424fa3b12f82639fc00c51451aa30196d2c1c5cdede7a9beb5da1b8ca62fd9d9e505cd9559e2c1b23c8df8babdc8d207758a44fffb2b0b8caf529d244aa3f822c99f2f6148baeafbf05c9df5fa7fcbd6d0295b7ed59eff584f469f8e25921d28bde60b5d5a07f7fd5855c08e670bdcfbbdee7bdcaa20f7b8a641fc9f52ceebb900bedd3975344a2dfffbdc5200cd750caad0771f4bceb7bdef5ae8ff3097b7c42fb1f7d185f438b91be7f9e5fb122f07a1ad3939f4ff87db34268a3184ab9f52680f12b78ef4634e68b1086f47d679e10e97b6f071781395cf807237dd81da024a5040e50aaac072849b1b17dfda649b94709f24a29cfce7b1e36afde5b7dcfc3e61d5b52e6def5f5d881415e7d7b25c85c87574a99c3627dcb7d15cb98ed3bdc55eef1e909891d267754189930b94bc12448ee91081988728f4348652119a2d0cba20cc912cd903b821c3aeafc1dcc21685cf15a842b3b93e5fff84495e5d3ef0b02875cff1c89f7f414cb3d3e313d9d004a9d9fdc23143023942532728f4fb4c83d3e41e341eef109922728e099e3a635f53d51a5e5a4079e18c6225680d126c696d18993fc82e51e9d30e518e091eb2fd286f9bce879f1b880d1b23fbcef95696e56bf86bcce20789dfb47bc3f955c09c50293dcf93d69aeca3bb67c820b96cb9bac136c36c1a7f2095f76137c96777c90fd67cec27167d9fff3305c05e20927aaec493373b28499a1b4cd647aa4c3b72f5a0688322c11ae08c3410cc9f2633fad86f21a03cf7f4a8ed389da509d966c9c8d0d27b59ad239793f005932e49b62890cceba608e94b5c84c25809b0370d531d7c340f58b7045d8ebe554963ff09025d72eba2e9e062eeef1f7f6b129ce967b6caa1a9bc43445f58fb1094a6a6c927ec3db1e23cead6fae0883c57e3ee58434cd60b1ed25651b9b0b8b8db06dc003ae00cfdb97c00385ad695452957b4c32271b20f79864895caba5d473b362c4b9b33afe62b8b9e2d57144015780e7fa236cc00372487e4d32de215e4ba417adafe3ac0a18b835069a0bd45a636c71abaadcb0a16e68950f77a23654a772f727e31c743f0dddc9c948e9dce775b5f67725165f302bfe7450b65814726bed0bf947cd5c91662afe644ae57f6f43fef74a990b452a23dbce7e40465e61eac639a09ba5513cd98641e0b89594527253ceed7db33c2ad4ff78c794565e978e8c499c8c48d4c8a8a26c9abaa155b4aa4ae765dc54dcaefb2973c59f4c4519148b304abfabb99b1ecca7ff490fe4533ae459098630dfd552613eeb5590dfc24ad90b45ca0ac5ee2df7cdb2d1fdca0bdd29774e666eb693148dd73f9e277a58fcc9dec7e450ebebe4bec232f75c0caa2b06e5faab0f1272f1d5c5d79ff17d3ce1f7ae56f871a1e8fd7ceec3a2f7f63d4f28f9c6b35fc67f93d967ecf8a04e1d3ad92bcaa252903c579884a22cdec813237d8fb4713befee7af163854561772fd4fdabe726910b4b2c72138bdfe7f3b966493a06891797fd1d20d85fed10e1bb1972982a49c931b2920e79feaac5648bc5fa2250caf5eb98e983396620964b691919b2487395d288cb665d717777979387274186873142a040a5e9470c5bb030e1834c950b0e16413411f383cb911d52005a2133440e40ee918c12649218c12a0e95b7e7269184656f763dba77bd17f6683aea704fa0ed5ddf4386de3dfda3eea93cfa7ee224ffef59dfc277026d2f5eb973dfe73fa9e9e8c5cbdfbc3ff2f7bcf75fe123d8bb7092eb6158c704da1c27094da0ed77de07efe0241a13687b4f4afa09b4fd0bdc739be1f7ad9045bffb23bafaa4fbf457f868fe87933cc804da1c5f9c54c204ea7052d35137c341244e9a394e62bdc4234ca0ed935adf13887b17423cdf463de59bd10cdb88eb09939a74bc70921bdddf799ed045d8a3e9c8c3ddf7c8c0881d7cb13b4da06dfb0caa5cdc23a949026d54df7e1b75b847fd3612ddc9151ab13ef93e672a48f65ab66c190175d89f801bd5ffd95c4a536e4cc638856123276fb947313ce0e0d197028b1819a498ca20207d607e2f3924fec4ee77bf842e6f9804ffe9383a12baec5f14264e9612e903944c9308afafa8866133234b122899a659ee09054a018d00a314814f7ec0275d1a44a2a868dbc6314a868a4028dc0b8c97303ce455ee31cc540d70c2401581f192eb0b4945e5fa32aae7f2b7218136f27e6f9cd4a40327b991bfc781214c3c8119c2fced8f264e02e246132799c08de4aff0d20a1f18cd07e246f2392a698b46874b4254ee7cd97484939ac010fcfb8ffc1b2735d94fea09846de39f3ea728d43866c83885043c028c12187800f2b381c981851ff0898250acdc23982b609ec62f58c62f53be3ce5d7cd3d7e51b2e4b5f4dc4754a65eeba45d296d4a29ed9e5d69b79cf49372c7e49c03fff629e7d396dd32b7cc3ded5b1ad2ffe89cb427a5e00ad7e8400edc28edae3b66ae5d670d2c25c6f229eed92bf798dc3dc7fcd9c6657777777777b774c2a3df945399be94ae00d19f07e5c1e5ed5ffa9ca1fcb67572b6e3dc662a876aadb3f6dcde821bb802b9faa0ad74caea5e946008c8a7723bc22548ae4fbdaeb376dbecb66d9bddead3979fdb6ddb3ecf1e0eec529283c9d1af769625768f443fb95a25b9ab97489ea421d7bdf8a2944a19d2fcf261b5be2d84b9bceb12959e66b32c82d9b359deb009e32cd3e7d14549168e1967961b6a96297ee11ccc77d6cb3998df5664dad31bcc6f5610d75f7c79f3eb42cfafeea0fbb9fde6e26b39f4cad1f17c5b0802717df27cdbbdc16be69ed70baf7bf5dc59edfe057a67371a8adc831ce881abeec1da59ebd4521c9b97215daab2b322a0f414649645a024c56956633a9965ea348b33cb1d6e7dcec17cd0e93a930b9ef95ad1def9a8248249a0f9bd72cadfd36bb7cbdd7b57b7bbf77a9775bfdb3a6a8918c8222761b8284769d7d1aeebc2ced4eb3617e276df5236b1b4d6dacacaf1bdc5dcc8052ac36ce879a092dbbdd73d92f73db8fdb6d9ef40cb7db70a416f559da3f4abff61ffae06ce9e54f65eec9e7aef350b478729d73d7def914b55f6f02ad3708837e79c9c4b205c66f5674863360b049a44c5d58b76c56d7652c73205a00aa6832ab82ea59c4b8420912b5fd699003a9f86dc1c407f922bf95482a2922295dbc5cb014265bc96225cff1ea54cc9df065ebfec499ecf1931cbf3bf294e1de6df59a9f27c4e2afb2be7259c04c935a22bf71a1772b370d8bc0300b769781adc1d5e278b4ab9bf43ef270359bf2bc3afd5c886ef86610fceb9b5c7847f979a687d4ca5a418833b9f011ec7d91767b9afdc77398e7b6a69bde06ebfd454ed7f100b6aadb5d65aeb07552b67ec0abde0d5f00c82dbc6d1cafd0bd6db3637133c73cf6123ab495738c65d6aaabf5ad5caadaafd5ab9ca6df38707c3d3409b173ec852ca8ebd27839c5caf0f56bcc3f37c133ccfc746a6dc2265f61cf2e75bfac3c440c6384259e9cb2904f6e32365f78b23dc7309f0ea9fe6b87b1df3dca5a68aa9947d1201f363b32bffc9eb1fee76e267f173d7405d6f3083a46c7e8ce98a41797e90908ca2192ea09878353cefdcf93ddebd4d3dd75d6a92f33d9092c8256e5d4f80c7ba4d3d6be9675f84853113dc172c46ff2595f3bbf5979aa64bb982172372bb5bc3f36ac7e36ed3faac1a32dbffac8cb970c52fd7afa1099eeddb7723dbd390bedda5a6f9db5fcf51dd414bdfb15ff36fe1d9bbd4e41201f2873cdf3616042cdd36ce5bb6bc6511afabd62c65cf3116ae7f901cb2380cc544e04e80ebdf6ae66477281e9db7bfed37b299937b64c38600b947364ff947ca62295c31f6230bf22c3f2a43bd2095253662bfd2b047364e4680bc91c5f2a6e0f95d6a6aff6ed56e9965f6daa56c49e9366456b2ee64bcf61fe612a5eca016f703b7b8fe548a9e2bbe5ec39d544ad8cb6d95568074403a2f7ef793e14bcaa663f1cbb2d65a6b1d07ebd86af19469ac745d91e6986f7fb270d0b75894b5561c775a91530452c1cdaddeefe5c31269844449d3e44996628b3837ccc0020d30d815b268355476a626cda866cae9c6d929d57cb1639a2a3e8a04068b4d2d9e1659ab955cd55cc9550bb7bd728f6a8480d1d2bc4075430a56a298612452031237462d64ab06060d6c0a5adc90821caa36725ab8026631844e9a2ab67873fa547e851d10699aa8dc69a0b24f77e7bcd338a519d2d2db8b37e7194c375be994a31a36947280b33ef2fb732098024e13388c71820a22b5ce1b15eabca92255431146fddd01304a45700063c509ca943674684cd49a4061068a0c3c406d5a500519625763be4851f9c85eee11cd152ecac8fbc4f494a04f239a25f9858629bfa4cc5acbb21cf7f91cd1043963457e716ca6a0347853ba5bafee421e353d37cba32611652a72fd0a8b425894b12c16619ec451b8855b069b4f9926912c2a1292c99c26d0c34419848929cfa7715b944190fc7d963b8bfe729689929a1c474d208ff2b13ba03febcc679ee433a3c963cbd6caf759965bb61809048977749d2cff84ed47a671dba88d2a167bcc93c86772b23f15b72c5b4ebe21876e9c91ea1eba71464ccb3afb98c1d36d39b9b3d85af254b324074b9d19b08861349acf4d22252842ac89f2e2830ab240319a4d9edc2c81b3a52a89d17cd82452eac1c816467a8803a70d1546b3891f3268c9e20d0e2a3461a494430a5f842011851c2f3a18cd179a44181c71051152beac90c51aa309a64a881d84e41007093946d388931c105ecb3a35e5540a4669eef10c972cca2410a89e76c62ea594864cffcb22534a613748067526cb19a83b8bfcfc90a1f4a70c38459c334cbe806036334ee48ce1c02f677c38c3243671850a2a539c3863e60c0c5c9c0accc942c48920b6ecf0e5d2e1e836cd94214269862e638a1861a5c9941846336230a85814c952059322a844c1a40e38e6c7cc68060a2cdefe29e104470927374a388591827e53fc2818e6a353ea08129e7e48aeed394515993a63191f8af017351a6a3f34221a14b52c478cb54d480a121574e07282279e3ac045072955fc58d3c3134e744f461a4cad4d0d001f2d035a4b01a569c98a2751c6025a3731c50435c7814eed021a550c35315b6a3e6a31684fbed4d60480c60343ed8605d0c0d43ada0bd65acb516badb549e6d474a865510bd393009acf40eb363ee56f7c684fd41a0b3aad45cd862466d0b85f6129062159ac001243511935a30b678a6081c18d940b8a11d7d26ba4d0bc4b8fbe42cd4ccd4bad0791227a0833829912d56d96a65ad072c3111e82a8cae13e39e22ed8a8e2429228a46e58820509e81c5106cd922e41c4e0664d112a80aa751720da14354ba686009a2c008d4badad90c19146d4d48a6854d45c10608746c651d0522dd4064093b252eb2cbc507102275968b8220552d638611145992d3ebabcc1c289923a53b0e92da238124e55a7875a9176a2a72851bba276e5041fd09c0920352738352b6a4234166a1d270ca255188d6983ca467ed482a8722826876aad4f896c4d9e93326a297d5bab9b20b956cb24d75afbf5fa598588158b96424d06063423446a9ea59170c2a9b80a401880fa164578063e68a993840f383410c900902b3dcc8132c50526168830f282e64b5a8c16430e4053525353e3683e0880e658dccc973e62cc12b45a2f11a5444d01b40e68676a2e1a9b21b2a8c9a9c5a8a86d408341635283ba52bb34233aaa8adad398f0aa20b52ab534496a95e601adaf7029563f2cd1a6891b14cca9d2c6a7c9155a9288728592a7aa2c2e8638d2502815b1c589599337545af0c144448c152a43ea6059811b2bd95a21908034341ad0fa8d3739d268d2ec50dbe2a43644c3c2ab9873c6a3351617c2854a47e1296837338f424e5785f131bd825a990bb4ccb88089eeeeee6eefeeeeee6eef972127650405fdc84f02494ab76dce6dce39e79cdb86c54a6d8541bb2d6533beeeedd3974f8149e4bd2c61f55eb3386edb7cb6615b69d7e35155f69f3ae4d0c44e556be887a1c31567e4f9a247b9780b5e83177b9cd1809d9f8ffdcce027fe296b28f08b62415c31a8d3318942d8cff721ba039e9f7f631201c97e3e0a9328003f01b09235d454f22f3e062f1665d16d70ef57deea63514ed89eae7861fc7c0f4628faccabb32c7a54ac88cbbde85156c7f4eded46c3fb9243df5b1858ac98c796376c3f984f48830225a0902465331e174e3e4a99bf2bf4a9aec5020a6113480800e10813c85f83d0814c20ff9fd099dc090ec30f31b8c08218e8a52a88eaa7bdcc1b720372c8bad0c4126d72e77ecf5a77c7306877f5313ef6a247813fdfa71c8e053fedcbf94ec526cfbf61f1078aecc786b8625090908c22af92988d94cd50f892a46ca937f05f856e8394f9c7f139b1d08170de524a29250f51286f140529738f31b9d24dc8525e251fe9ba5226c9021ee0e2719368a9698b3a923726d10d96d0e73d8d16bdadef3923adb72dcc79486384b00453c993120a0b4819517fcffaf64ae80e6e6e59910574c8a12f06729bb548dbb7bc64eec51bb925bfd68e0b9e1ece0766dff517849c0f16b9cc4919c78385662c733f99dbfe7385226585620b7f58879471ab304608239c443246c4d654390e96d6033ed276999486eceeeea6ddddcdd16d16f1ec6f76d65ab1e4aeb4a9bc2dac75fb9893dbbdd871f2f7f9e37e0bc5fa9a9a42b8cdc6a25c91f319adb05893bb9ac3ad5e7150985244c82cd7e7f224eade06d0099b0c229b28ca48d5f3b8f945d9ca20da796bedc3fe47d63dbd6f77c296401cf7ac2f84853f212781b81f217bf4bf9abdefb04ccab80eff8ac35bdd3057a714df775dfdee0492ddb6fa9810dd0bc9289a715fbe2490f7d2fbaf4a4ac5f367ca3dae81ca0191f3a8f4ac9765352a19b211040000031500002010080544429148349c2b82b47d14800a87983e5a4895074371388ce21808820084611804411004011800421000318e889e07087096b0dd99a91011fb19661021d233b82963716cc38af17c28008e81cec1c8ff4c45eebab3585d7742a609951a49893e877738406461864488be2172356b181122d8cc13c2c9c1c5919d83cb41f18d781aa5b0cc8610c5992f880b07fbe71871cd6c462cc721d6d20cca44447d26583ec23196981967210787205cb3d114f0f3672e4144ac662ac5e638588d38fb2affba11967f4695908e436cc74cb2b8d81854137234e6581e193c7326df690ed1314c0ad1213cc5a8c8d65a73cbeffd617db8774576525b4634cc705b42cbcd1ca900db3a435d641b44bc01da7983301327d173f0d5ba669190971b298ea76da46543eca21979cbc8eb2caf249383c50de63435d23929e58823cd1b8bb49a8113a03b1277976602494433ed7056481cb171a61d9444729c30c5669f261c37beb891ac365271a0aae4a6a13d5c18197994264067ee3456c381b11c6e505c8f2bf3442b6323a10648e3ab1572869cc44ae0626de5f1f8c126c3cb08d14c5844be8d40409f0daf7cdf38b3258de70084f2acc82448edcc9028f44620e3337994c3e69e5d49f66832240ac76ec373106b5e5074142180cfd84900e99ec9aa006b3f9345d01ba3bf212237435de66d246c83e51c503b66060f22a263a62af71ce1822c77129343faa8015ff72ad083731c53217d2e7c829711475a0ef237443840bdd1bf0d3707b3728c9633832c966c406343cd1c02693d2bac080278f6a97438fc6d432cf60cf684d01c6377674a45e91c825cce2456840c32c3070924bbcc880ed55389cd520d92f07585248fb4484cc9896f24b03ef322ffda35f77fa21d23a4439d15456f30f95e70474f3bd35886aba83e949592f6c39969a1712d49c153beb6b34e43857f63eea3dfb36035373af9eee4e672a1381b40521841f92cc5c4269b68a5e1e1f50058592a1cf364c1175780f4e222267589758b1d1172f1ba43c19f17b03398bccee0486b0863b30f649c719244c4231d4012b4b651d7c960d8248d4d0cad917802e34e298440b0b3dcb74347d457d39c11b51ff1ae65d9b2178b3a424ce114e91bb9db089b3b336a09e47c064f3eb82122ea0cbf3c33c788f60c5332e5461d8efc1bd2df68e2b8d850d828fbc69843d4467c98e9a78d55ebbff0a73b4f3c11b1c0334f92dc365c69b4259e839636c4e266aeaa8cc867125f02113c330711c93d5315d5dd10ab339328b1b6018da3b90d4b0e048e96e1185b6ce641c2e518570ef0b7669a2522a9b3b611210e662e297f3d36833011f16fe62cbb1ce1c2318235338c22646a6643c8c0119679c6424424e40c9e8910ceccd54b88e86731e5d139c4ea66ae8c88d066868a681b1ee7106df4cc7988c8e6cca6e4b5e10b8eb170674c29a1cdccc48a057104123d737d4404cc36ad324f9c916bd1b3371147daf4e46823e969dbe4dda532279044b493c3512151238d69912a34793eea05a2a5a6e43b6cd7b408036a21f3e03ba3e697662844846166ba8878436ccdcc17e2651b3f8eaf6f64710c27c790e54c79513a87d89719c11391a059c792d3862b8da6c4e7a0c58658fc4cd5a48c1b440e328e626e407183bf8d2e1cac37707354b281c301fd46f337e8021911370515290373e4d46e5818e42cdab120b11ae0eb1d6eba077cc5685c8366ef336e795b036438e518fb2569fd13d88123d058b1114d10ba40a4c3f26487667293a051d5f193d65f2b8b0ce393b3ee82a4cc68c239f76abfe5a03b786406995dca0cf2a946f38c1acd7c206530b13b53097d479e8c88f3a84c4bdb30657dfbe691b015fb84a150f26aded060aa0e193d923e15c1ebb8b0e7629ae43821cf45b951172e53cc30074cdd8a9e85fe47ac2cbda7e954acd8262a2abee0b20c94cdd9f173442a322d203d1fa0cdc79c3dc83207bd0044219a8d194faf7c58098bd09ad568754b3c505cf3d4c30314ee074eed91fd4bb4c2d192bfdc96dcd6d58bebed043c556bd0ad5e25a096e9a667fdb0f2f2b8fd2c5a80a3cfa1d7266fadfef3c22b70ee1997ffb81730f061d0c19a6c3e943605045f79cdbb2e58303c8a6ae18fb8f4d47d798697c83362a0ea40f0dba7211e1de1cc113afb110969ade12f8b4ef9273858ba011a70594dda0c18129419326c2e7f610831fdbb26aef25f88df87ff11bafc145d705e9a9e46b92f207759fb7f3dc6d98002a0d96c081fd7c748d24a4be0ed7eaeb72a0a4fecd03b82c2bc9cba05fe023347a809916f09e6f5b34255d24423cbaec85f9e9aef30a3462e2b56d133aa3bc75f51aaa6f4a7d7a0172801f663fc25690470ea9efd1ee40d328c4cf616908ca1d37cec7f84d6b699ac7bf701d6091b94351374e1cef6ffb97231f48461f6c0cf901a41ee7765e92161700d84246bc2b105dbc44439b8a99787e1e63ee66339dd0bc47c9ba2d488fbdd5aa7d6460c70cbe12228eac35043c7d5aaacc30f7b0ff1548b75ea90c9df97ef46d92d0d9941c5930493ee94e4e21554d3b21e3acd4e4b57a86b5d29835a79b3fbed14d885859a815d4eeca86bf2f451cda44f21052710ab643a2e4b7590d53e50721d3e7004a0da3589ade8d8c44070da03c4cc3a4141dbe464a7d1b2b1e29e47e4e50cdaafea7e993a259bd35e7946a3f4a2b650879894baa08b61389a5d6a4a3d65c1c45ead7eb52ba5935c522875607292c50a81e1efe042541695810f550d2e578fc1bf5d825a3db4e850dd01d3725b4a6786cd257e84c21cbcea38381d29c8f1474521d579ea21aea764628abc365896860d9a4b8776c0809be1e133777d22ded197d94bd3328c2a21f345179ae1f81b3b0ff6414667faa0f4958b16bbee026b2ba1749b0d2d6a56c8dac85e7661a545ad1a09a3c9a327c4a23d21edc5b4a6bca3e0a92d6407562a2d0ff464e9bd5ce94311702513bdc6ad6d6aefa80c639a206596bf0c2ae25407ca6ab3f1abcc12f98597b7337d79e4cf9a42bd23cd4b995f0122d7218c8cb00f611ad3280d3b32e4ae01182bf285162f48d8b5f6d5e63e4786a334bc19ef46f539491568d014b733570b46cec57eb8c642c5cff8bcaf64bfa30cefcd2f4f496f82c0750d3ec0c9d13a87a77409722e79e0f789f3c066f46961eee7b837b3e51cb44f1847330b43a92f4926eb431bd32e077989b198aa705cb754b5342cfd798fa9ae52332ad8b858cfc25bc7aa44d66669fb80287d7a142cac268312cd46a731599a02788680afbbd74faab939c7ccf4713372ef3f274a79784a56c448b133f9ed92dc4f82c197f1d32ad2feccb94b6e34d407a8af18672452e8b5eab6442205f0dbeed9210a2501137b71550f635868753fcf8331039f561f7d2cf2bcaba9af1342146a4f377be8ebdf67aecc1e6097ad72cf6ad680ffc7185663559e6dde72eb885669a4cb694c96054efe35dc254b653f9187fa25dd729c2c25c7d1851a47b9f39e5fcd335f92be8c913b1aa6378a0b8953534961765834ddad8248dc5ea641b313bf566f3b29fd2998cf1c87c48fed56c43dab953d0978223e19bf505c5d046fb718709bc082f9e15aace00595945356931c187b9675ba296a50f981959a3cb9c11283647e2e1981e0055312ef1c0fac7ae0ac187e883950fb708f38a022e46fe3867d29fc75ce585f8ed16fcf49af3a0753477455474f0061b23269babab615905836ce1125003249e85ac04bd604105b0d7bb84f10080c14e4023cd296fa96d87ec4c224ed8b483916c4edc3d58c5aff682da2676f164dcc70adac854544dadeacc1f60ca0ec64446b565f4ac758533dc59d8db3c0c0851e72464ec6be59d9c758c440ead98d7e087cb34eb37d70d626f746ab9008e5f4e39a4652417f27b769960947c23c6ef9965980b36f7b9ad2c02bf53e96f7f02406706dd3f6576960140cf6e65183df3e44bc7ece2a933aece8390cc59e8072e16e2ce75f0bbab640d7ea59f77e70fb2604cfd8109e9835b149db65e50c0079eae1e2721fa5290eb866a9fea4b7e6cb87c2c0609888e5c10a4f03f49b1a8420e481f9fdf8a789d65e9b4dd3beea03f17371ba8a39905effff78cd9cca332084668c284fa8e847bcf385d6f1c9c0704bcc3e1c6c6ecc61e1cb1d42cfad49c2aba0245ab17c5b7b068557c169e632a54810d24c85fd0ff663070c0f64808357ed3cfdaaa12ea90ee85dc9d940cdc82d07e10320a560a7ae4a9d38ce220b7726c176d5ad42ebe0b3a408b9aad2d65593d44ad4554708d90ff141113551578960269527a40e5e182a1d88df58a69e3254b54aa7f0548ef4c4fdeec3ee88e56fb9490084f83f68ff0da1ae44f2394e917bfef2014c4f4577f4a4a28ae2cdcc9eb6352e1af656eee3353ac9787aeac67ec59cc5f612c48fb148e254256644f581729cfb718c2dab8915b54a1d483f7fd74638d77b340d7117a5ebbd17b88eea113f49cc3d61255a4433d091514a027586ee10ed161a207bc01adfdd89183c4922f16e576dacb410a6615ffc67ee1072ba28e4841ae41404d3a841f5232b32b6eacb13acde303507acc786baa1f55b41445c8c1fc7ba10068815e1d933687544cf47a5d5fc7edfeb0a6d48f20dde6cda453d1a9b53ac593e01962fb27cca61eb1925520018a5668497db2e8423441711069045eb3dc9b9aa7a9525afec0248138a6aca329d848745f534cdd716dd6c2a137a8f2390fcda328f1235597ed31ffd7dbec0278b9cfb7f9d5a2fb24ea061dbe08082894de1c36188889cb85078cb5df5bd1a7c5c89c65caaa72315321f6a80bcef86522ca9f4488e0e5d0991c5bb5b3320dfa2f8c9f81f5a7fa13814ed455ff4153cea38a78a31479de365be88326a263aa67fe3bc0156fa19552c00da1ea1216e976ac33318fc7ad05b45c5ba4aab22d2de3112f0b9eb45056e279e0da1848b247daae6a0887bf15a553aa2c9461117589d14525570e08224d2561af9a1c0561cd8d8cd4614fabad61625f7e5eba89631b3ab9ac2758dfb89312d8e867852388dd878bd366a8eab32b0f2ce4be7a400285ffee0c69739db235dd8b8888d5919690e1774d1a79fae993d959d280a4e2553b490a83e8e86b1a26612c87006112c4fa67fda2bd183eb5a962acae807ae66059cf5d52c8eb5c302880a29b719a5ada1da3689a18b23794b6c438425cb894a0fa2b417157915d576c1c08418c41b82e622eaa4823e2d95bde890ed537b6d0d8f44f2a313cb89b30653a14c0b676f89ae343dbd67a4446fc9e0683379f40ccbccc0926f4b6c78d854aa75a5698d4ae0f31a8fd48d9a3eb82496995c6274dfedbb0e8e899ae04a823106cb315b329401f544a089c26ca2248c2046ba01c47a350cd1e1f5f801a73ff6632f91e7be66d3375e35d971c31d83191cb8539278e1cbfc4978395764c9696dfd0f7468b6bf9df06adbbdca7795472bd119d2be1a667a47fb91d7a08896e33d420139c5063cba28f9c5f57e37eab5541e2fb9a952a284169a3e49ce54c7e06215c20637a7d5b94ea8a95106995096ea83f4c8180445b3fe9e426d8920a23bbb7443abcf0518c57c746189965941b8741492e009a9bbba5e85f75a9c1da497bedd0779cea1f94f59661245872116b0c3240c7f0c28dce80db161330b14038afec436ac2b81545f20d55e9fa1b404269372abd4180c7ca1685adfbf83bcb20295972f1d1352212c393e0ea120d223700fd86873010b647adb2e904067f87e72770531560a8ed977a8e5d06906eccbac9300b232ad7a057802a8cac703dd777a248dfb2b8ed9bca2cc5ea3e82ae4d0dc0d87b4eed6cd545a602563a072a348e5a66b47881cecba5708aceb46413156f2262a7eabd4a4a68f99d5e465160430296486f755ee5971a65fb362d48300b9d0a48333591b58259e6e38d871597f272ba7d63b858292687703914698cbc486ae1e38330185d616722125ef51a7a81eeb27045225b009d9b2eb511c0d94124ef18701185c91c399c449123655cf6ee3b6f1dcb5e2aa3c203846cf7cee1e30baa23dcfc56134ea50e89e567cf2c4e07f4bcfb674940757e26956bfb7a811abf803522b225054c44aaeeec98b5f5de82c377a975d480232f15c0991c6ffd30c3b77f3faa6935fde2488adb106c743f2a133f5c45746519c7502373153c1e18bb428a638d6946e7b544f983320565b2710f57f7b349d8aa7b10c29339df2771e07a81857252ed5198e1e56822d84df5dc6212d02e76a87f0b9b6a5f2f2705c32905e4c8dbb9c3562b1483f55eb3352d1a9af3f1b9b9ebbeb205b809ba8e1712901325d486ab1088a797e595d063a16b598e900cf7220c5aebc48daa667c30bb654900ae8626afd641951b07c6dd4f6411042d39086eae014dcae8952c1cf03c7b5cbd8630da58c2fc0dcc137ef28d8bd943a794c243b36effbd9bae696c1929b6d838da2650738a6897052ff0734e98c2e2211c424eb44ae82354b306aa75e783330c004933e55e79c66f9ff9b4d8eb6adb8bf957f89c8e664890dbae31e6ef7886508ebf3b33fa1c78298db9a5cc59d18547a5c005cba4ebf5b9a31929260988341da39b3f6035d6346e8cafeeb30ebb55a836927f54057095c32739bf1e6fa332eb257977bdd95f42e810b680a2c1ae445ecd5d24591c3086191c207d7c29bb9d57e6fe7f9a5797634c1eb338e9010fd2fceba5c52650316a1d0a995aea3ce871172e1f59557dccba85aed00a70842919b8d9406353625c30d41e8875eab2f1053c9edd107ecb901bc8f849e5439e0c6c0b7fa9e87a5425caa55618dd4f01152a8993cf6da0f669f53db979eca801242d2f874f30454ca3f41c58cfc1eb3ea265df52a3603add21629b6f420567c14bb792a969a4efff0d37d12b2290628d9bf0ef97f5c70bd55c13b2ad472db14f7ae3e893ebe9998d82285d8246fa59958529351bc58ac4f9b55dd95af97ac21b015feef5ef1e3a22c3386bb7e2ea4132d56b2693134572284d3635be66339657fb127d96c90158c22af1dce2e696a02b3a4f812d9ba574e656eeca85ae630e6f15e69a7cbe9e010ab7864965d15534d9a53bdc76cfd38b2e8c816f668806cd773faa13dcd7ea3731cee2e03b484012cc7e941cda2a538ab0c585a926d2f0bab71cf25530e70a631d7860991953888286fb9cb597e587e70d793954c7c13010ab3c9726cf3e2b087e9c619b956d334130c2aef71bd13ee9a806bff0364a8071ab4832455bec3a1027705a15aa4a919b406fc9da0ebfc67a89b6f063f5b282c5e235c8b7403d3a3f3b047b6ac0ace3c9cc8cec42af3e61ef06703de861bff39c56c8d987204977da346fe065bbe0304c1ae6033bbd41a651183fec6ed55f56aa6938694dce86b28f5bb86ecb769e15a5c1fcf9c571d828d8a5b4937fc0a33da9cfb045cbf3306f7ea5d951c1805e152f5ec358bd69b0bf082ae98988781e6d9b1d3596751a56a5440919a7906b1b81e72024660cabb97ccfe670d95b40a28c5f7d9f388a41a8def465d651abc236a18c4cb4949ee703f7cdb15d87ae238bda52fbabcceefa36c006d136ae0af0a48e2fc806006cbc3eb795f342c8d7ab3cbca83735924c44e0b7018b9a05e91712b5662ab2f2b5ccd5635c35c58e6f0307f5536c1677283fa912f845ccf61782b4416ad584211f108a043cde0f8c7b60635261b594d1272cda6e46453f36df86283e6f649806f31e365e193f8374d431721ddc3498fba8187c3b98f52e0444de4e6728a5f269a9b299c99c1ba8e83654268cee2e8ff130f5b066c83667c735d72ec4d15d87b42468a8dbd8ca7f2b90a1f33e917a806ac4a10037ac7f68c52cd05651f791c49941ce3b4b3bfbc9588e3f1e9430968a51614ba8208084b5388ed8e43d5542a923a33736b5bd56ab66ae2dd74975f038a7f06a2103cfe4951d10861668a2e504fa8458c0ec5ee3365188f4369bd7e2f3a7eed3609f8fc1d18bd477228fcf0bf49579444b73522919e55c3935e9bd193344d782d237da1a82c11f708f6160dc9a65c28325899814997f8106e0019c28e3c4009492a8f7c296a2fb14bf3acce3f135f84c29a0d8002fe78543dfc4813932ee0d70bbd271c8f853d98c63d31b34a4da1d08420264d536ec2508ba4bb5723903d49d87ae0f5e75c160a76a66c1d5a6a6a6bda4ca50cf3eefef9d21816e5451da157c323847b4e2cdf2a454e3c65e30301bb115efea5f1e1eb997b230b7d7822b83f19fd03c601373759dfe42e76118d3204f1e2b62f0eab25baa5b4e04049c574cd4a1ac217e24d98ab86c61b0e2725fe06af2f1e6c68482eb99492e2d57c3a7f47c0151480db14aa56ceffcff693b9b585c0ad4251bed21644d4ad98dd335b250d71378298c842453bc92a11c3cbc579e71ab42ff5caae14614e768fb59434b08e1a748dd9695f6850583e0e695c0b720d2c90c4a26b3c60d17e9868f4e28e1e9dda5de672982854331a1cd2e7cd354239381ac75a06bf56bfc0bb0513f3f1ff7f24cb8bd5cb65f9a1d1c3a2eb8f8a3900d02d50d56ea19cb07dede18c7db21b5ee6eb39592edee7f98441b008268dd75d7d1a657e848690f0984d964109e2d0ab246692fe69ad119d566cf0e911f85d4da6b8e1090e07e3408535619b651b6391b41cc262b1fc74446d9c1ddf08379300f7e338c7a6a906084182aa3a1d5047061fc7b93a20457f9907d374c67338d810c1269d13bdabf82e632897c76a986d4e2f5eb72e31309830e10157f332b9e9373c772e6ea533606735c41932d90caaa044fa9e667812ba67caeef801465f1061b41e6873e460702c1f293b0f11e615be6cb56f309c040247ad9720967efe39fe59a3c098c9866f926e5ee28b7dd2d3779b9ce553d859dbeb96611157252ee2a8e5e38af5ead72c913605fbe0e205102c9d1c4b1c34f319c571eb57c91bc5340200ab412ab16144fe7a0c618e940895732fdf587841d20e906a585d96041d75654e809ca63152308eab23f892987b1680b835d6eb67811aba3f23f9dabf7e75572a014282ac7124372e86a07e5355447e9174d7d18da5f62417f66ed9f388cc69d6db6c2c7f941c45e84b8f1f02443b92f6731a1b7284b66a5994fb33087ea167d247ac4e71690531135f6b5632c409349a7cc5f26d8922c895e11a4d431af643acf44b73c13d545ebb585425e48888c1a824abc3d54a023eaee87066c9b911ea873131c29376118a22e11279fa29d39f8c7b292f22b877fdcc41761ee2c3f5f9fbd9dc1f14b8554c9229bfd08532cabf267e3e0d3185c2883ba7bdd22e6c722ef24850f4ee89eac3be9fd6daac7e7f7e39e96aca0a45216af7548af071fed672f1e650444e502aeb18d800fdd6afcd342eeb0c264f752a1c31e793e70991abb851571ce2c7c5a5808897e11f8ff99aa31e5c5c31e62c4f0e442a5e9c7d05209a3aaa562bc7c7ce71e31f06e78a111d5a583b2bd5411980831bbee6cf55a806bb91ef7fb3d7c74358f4f8a39670a1076ab7a92b9644861fa041f66fe1ce261650a611f47711abeba4faab15dfc640c4d1347c81bbf81ba0726fd7f31eb5ea84c8b0e0f06c915ea1a1692e7a6d0d4df8223016c02c71791d8dc452c4d40782059cf7e2b545de311949193079e22347aa1efd3f060f40cbf0ee89e842e831be539d7686a01a8190c9ae32c7421e8102335b465702b97c5b7a950cadb2ac1b8f32fba2161990f66edefa2d8a3e9884a1ec0d8eda1e40739c6666202a681ff45b8926820ffe86a41009645d07530681c88307a2d39a557fe46c77ccde6690b0398c36c851ebc559e84d53d5e33ce93153c2b433e2869c97465a4296afa1b1763c8a3c190461c69fea0ef2b3d8ac2885adb330501058d86c30b8257a7c5f85d8ac25628eca1bf1e0505f2d24e4b4e24ff08757e98764298ded2b536bcf0542622c039bca5d20551b7f63ad7f6307887603d6b8d670c669195842809674be192de2c283b8dee93dea003e7bf2afa499d8655a7c6c472549d7e0a34ab819f39c4afd905dd2679f38667f9dae903bbdffcfeca7b8b9648d50e3fe72f572c13eff4d554e3889d8f415bd055251e1a94525880469287a5b45ddd4d4bed87d3888ffcce940320d17e460c2eae81d1769edd1768825577f7c78e1ce725749d3f9058570d064e7bb6db3853c8b4ce55e9b65aac2a5958086eb9478b89c8dc219f1937cf844de5899b831eb178df72b849bebcfae8d3eea446e200ddb37687ac9afa34483a4a893ee301b59c10a534a56d2563249332a01f95a1a7d34f7c03043c280c74908f365d2178747d49f733b908487909f312f8adf759ca71ef0dd4384fea12dd5860d169735c66d54875333bf466af3684360cc1178184b31f6320195a8610c77296f674c03f26d3365317204499fc5158b81fe24b770368648be9fc5bad27d68f888f68d30b1d9d9ee124ea506ca2de01e328c05f6bf61e5510bf9c50a9d37ad84111830b94e6e0816b7c8604475eaed37c0dc4bae8d6642d6f421b3fdb9fc88732b1fbe44e72523582ea86804b07ec0261fc0439ef1c9e68ca93db3efd3fa9586be0061a8094c3f46c0c2089a00726ab976eeaf1224e230e88b27d1ed50bd1d013394cde03315c6326a36a8a451f2663f3c31a15859cf2793e05f084c1290d97a5e9e0bfbcbe01ebcdd5a31dd4a049d052a285e497c751cab2c0958b89a51a36a050ec76f0eb66a0df18d4a400ed6cc3114b8eada7a178595f0a77e105d8fb3d69da482dc402869ed6dffddf98f0b3af8effe557377409cccacafee28e96c2ca9ce7507d4e2926d96c015d061a25770f049b0145bc1bc02dd6e60131df550d7d8862461526b18e0567707fcc30fe509ef7a2e3329cb48abcc067d4c4360fd325f7ee0f6a8aa72d60d233b70bb315ec9c99b6c1009838d2b7063afee9ebcd6d50ab71388d6637869575de32ac5e60463b44fc408ae279dd6d49be0e7513ba05e5a7f424767be69e5745fe85d73af877733cabd9d502b550d5c4c65223ee397d00fc7ff18c68f418256f193bb655de6661d064079591522db77d0d178d4bd157305e9776440240a5e6c13cc884b96fb97f80531a5dd442f9459da27f65d746f63cf8812dbbdc18231b5622b4ca54a8fd4237808374d7f9aa8623686fd2eb99639dd65f3fab52e2afe10b664c4a7b079b0c92197396d8f2d1f9f5818d8b2db75a2cb4599b9aa5659b0301266b1fd5ad8bc01212803fcebf687f09a56160f40016936c25865646786fc619a95b35cca58531da0044ebd1d2ea0446d1aaa7f624e7086546d6f8d84487bda1fca1785883dc86d7f359dab00a6accd7cb009b240409dd36b120d299676f9b37d91101bc7de1fd0539c059dff5ff0dabe7e880052dea9101e65185bdd616ade81572831531b901aa2363252d9c9f409ff952b2ca23a82c29bf7fac4a3f81681b86c8211f9319aa32e67fdf5cad0e24b1d6908c1ae7aa8d77fcea45c9e211ea0dd6c315aaf5b93adff20b65132df3ad500f367a34fb359062421d9619ea4a254cbcb72f1ccc79f47f6aea2295d43e4353b47b5eb7527205aa93df6696c28c63d8edea6413ba60a4b4952e794b0654a64697c2ebd43609d80cee287b7539e33290c1dca03665c6d3bde36f1dbe3106c154a92cd7726215d1611f3ac6cdafafae7fe0a555800bdce402404efe89a718211ae7a3f3c08386021e6e2b773d90ee48cdb98b338c38d60c517a8b7777da0a2067a75347d82c01782728ab6364c9f02f5f023694794ba0e04189f28bf03a958efe29d30b07bcab030930fbb7d2d69c9ee7a81eab860c086786a8931f308cbd49e84ef6e265c1447b62a62a805aeb3bc354492adac09b898bc87c7dbc0c8f550a4c33b5e24e90a50ef2b0e563a083bfc1ff6e784f67e408b232223958e3dd05cd1a5f2c60f5a442618a504043cc6484b9889f0307ddb6b36ec30a119303ec76a1c1d66deb3e1e7bad923b27c15c8310eb892bb9cf277466bdf81a4b872c1de33d2960698b2a487e064c152b1db5041e8109b02026a336471cdfb7fee15908041c4b289a233f8dc178276d60122ee094abb27169e8f85aaa7c2d1bcb4d12587340733bc181eaaca79cf703d30c212e1c817d2e67cd619c4df763c10c1b04b5e4fbce37671da8f606344fba87ab551e35d1a1692d799a51b61aa8035a9d1c99e7961dcaa0e10cc98531dc469df4a000b84ad912496b71118a7e7dcae00a01bc4c4c3620201cbd80cec4152a81f0bfd864cb8b844dc9c07c6d3d56769693052ed39e8cf165c389e6246ac8ed8cecdd0a34dc600cae71018d010e4048ea05ca7d6ea420a40d0368fc416e27c1a0061bda029aa44e14e6b02039d3e39c0632823caa3bfa116c4183741b2f94754a8d505c6986d7ed28aef7301bed05e751aa80e28d41c31a3a5e89e2be3281c5190775da42133fac3c726fad16b7058ae3fb500e36a43d39632f960cc9bf9856d5d06f9efe23fdb640676b1d1ed07df4315910e10a7ce7ae9ceb12fda92c9c61aa51d118d811f78a22ab062d303ebd1a6b75b13db822916270bcf507394ed0d49111f233603cf9086ed434a3b4122079819d99a1934fa55ccc03cc021daba3fd13a3c9035d9ad064fcf33e45a6d8d68a9f762491bcf9bf9a2caa470269a3821bec432f6d9b23b9c354fa9a6ee46c8a180fdbe6aa5f1c68120681dfe5125eba1e7d356a516b6ed0e354831e7bd98e87656ce4cffc31e603e7292215d1ffec6c7ca5d0a1d98cc03ac9aef4fb8c8647e95faed890259571e938cd3ecfb6fb13b9ae2254e852b3967d07bfeb0dcc7579e5dd62dd05ac000a821401db46959cea0b58f7b1dcaa9a24792a23212141c8b11d4c7932c8be7a395d25a435029d8851a830327b19c4eb62f9d6a1747aba2847b7a6a341c153312d1800d8cdabdb4d0f18ffd1cbd5eb562cfbb28eb479c9b9fd7324b40843dc6b700c671ccc24a597f88939973c750ce32af8961c87aa231bac4a3232ddb7f680fbb7397bf850de52edb66cc652e91636ea8dc498cb2b4741d771beccacb6fd04a77c4278bb8a0919737a72e84cc4409a5337e04c18029d7c2858743f75bd6370b69e3612696c825690b079609ea40e0352e8d60d7c72892fa11341053051164f8395f87cd593e2ffbc753cb4bc7b62918d80a3256486fc58619b4cc7425be6b6cf26c66cdac0afe0c02262d53ef5c133c3595bfb0ea5dd9c5230b13963da25b14dd5d0a8c5048e586117ae9066e370065954cb1397612243279e6752c121da3262a7d869c386f9d34d3b7784654b87c3e760122ff308606376c72d26eaac760029ba561c2658b84a86a4b32df3d4baae870b01930650f65de60652badd08153feb1950c5469e840678e058992bf2a8811769d1304a84a5140a719cc4a61f344492f9499de48888f698b678521e9cd9b43f2e0ef6f416a85cde42bbed5339b1a9aa273efb0045a00c6d053672f312a508a23388f337fdef20b31983038e27d0662099ad177bc2f144620911b74505215008ac66646175912a3bf0ae4a788eacc0c2ea80a85b18117ccd8e4bbde51f5a34170a0989ecb352ff4cba3b54ab825823e4b794f4672b6eabed28ce3edb533ccccda382d03ae26c1d6b2591a374fed14b84334b709820a1ef9ba6ddf65d4fc180f8257f56d486343eb5ccb640e2a1836cdcc75d83ff7fcf6ad5ff8c6706777d132ffeea82154b603808372a3645e367f583982744a225cd159a48f1e34b9260ea66a43efff0ac2cf0eca6c3a0c87890205485455268d095083f228e8d3a9eb46a51abc8910da507f92597aad9adcf7bc411cb2d99c1225357f646b34cbe0291a139d22c8e6d3f6ad88233a53dc4b551019f714dd14ad8921e5194b662679e5cc24ab5c83b43b7843267035193236d2f7cefd081be328cd26fa28df5644c94cc9ff7dc1438f3d5317e6708dd8e6b1cfc0a0d2231728a1b809c74cf2c0cd1b92b076a25ebb86dbda1058cf25a9ddb2a7baf7d7c1ac1f2e47c143a7460d36f9002064c6165c20e2f2854d0631a9c9bc2e216855837adc7a20a2e2767947711dccb28f21e593625cf7e6af63a4a1606235fe62fce5a7c53a531e1de0910e62c9d9c3a92921662934010b54496d6831bd0c08322238a51fbb3313d5f4690152d9c5567feed56dfa27909559262c6a2948fb72a75240e1008a5e9a8763d65e83aa46e259fa4028b196122807e7591007e47b692c9f875c2903a20dc55e9a8a3f311f618239afb1c58fc3afb48b9e2ca1bd22036aaf9f284604bc5040629f50698f69aee27aa9f832bb032628d84fdfec5837b19d624def97008f12d43074f8cdf3cad2a142f9a5aaab36b08ba64cc1a312cad4ac9f343415721522232e23288b81f2c812991d68f1b2d284103281044e2002139880d64f422788900926ac04093941040c0982422df87d860f04635daa1091cd15c1211e1a1a5b45dae5500763a1fe853cdf15b8c2064e1d79fcfb589687a842016baea7aafbb2e19a46918bd2e5603e120a2c5b7f46ef07a214166f9895d457fbe8d643c8cddda06f9ea4613ef482b7b0bd991114f7033150b2398f55ab9e3466099272c4c36aa55d2b2513d7bb3792942f12cfa68b0aa96364f676655cafa54b3bf8b215181a01657a440defb6e824e420cac0c23c98baecc5cfcde4560004cd0989cde3c0536c5e57b84a91f1827239f3e9fa3ce0b8229ff6efed457c36869e9b68415ae6e56c304b6c2d3f70765e1663aaf4757ca649ad54ca46c8b8ce81a60f757c24d5f1ec76cc13ef6c3b1036e0bf8bdb2ed3d4f55097685070991e4f6744fd2234353433fc6b1b3bcf4063391416470e81cb8a8e1d98cce309c805b9960c8e61e0a1883d41d2ca4cd1baec0e53d02b486f47d264aa66f5280de4e78ba132a0574bd1abb88f89f6222bc4063e0a9a6f4b77631f7406bf1ba44e22a6786f948eabb7bf855a8c7acfeb2d6eea729c9d792edbea96a5c451ffa52665538d6c923dbd3f48e041f964d2a5e48a6470bb26e463db66ecbc177dc70d639dfc225771fe7f1f56c0e47f289fa89d3f69c86eff669f6ddf8eecfb8a9fbd8711c59120856944d78c09dbfc9dfc12ff302317885c600d55b0482c5de75f3a544f39dfce22dc9ee702e69af2691c855698841b7d5160ca45339905ae43239a1a11d13c47896885e249dfd82cd0fcb3c9ccfffbe0f08dc4b82f21a12fbbe2e0e9750d6670e4a5e6ae8071e25af0d12680e662270a00aed20d1ffe08405ac6298d62cdaa85571186e964ad591d3bd05cf43bfd4beecfa13d0aa3f274bfbced28fa7a86fe03f28df14721d3c7ba6c72a5216317310f5e7e8790eacb39e0aa62e60df5aaaf4a3f10842d0f4e4d11d228fa2cdd0f55dcc69741607840680ea3b5a551dd6b62b872e9aa31462290dee01ca4b138a0b155bf5d31d6acf52358da40c8b04671e693579d3c862967e46da84398215bbd7e5b2582003cf6ce2683f03c2b77104346816fc44c9c49829179f207051d619b1770456e4b525b25dc867c9e8219486a2c40a58a8602f77f3a1c4a0f8efbd3fb80660fc29ca9e80ee2fed8020d81115c9d034196320654f4b81523c0a2151018638203831dac49c1351173a36c29f39a84ad5a5052309861d58fb406c0d43e5d4c33aadd86aada4bec1bd083a4df754cc259e25ae957da2c9762efe30c8e564c33da3e99b4e9aa9b2417c3fdd9118d888c7620c534a632e5b54fa3ff10e93e8d84cc16fec72124df714e0018a928e9f5c8f7ec3f9f8a04a1a86033d5bc47bdfbb42abc65a9db220598b96d31ba79b52c6307d82b5f07e8c6fb64db1dd3bdf60bd06a5e9f406480b2ccb238ca9eb90c66beaf119eba242becaf45881af0eb5632c2d62b1628032c9dbce3bbd158e05d1d8ddfb205b55d144f631f6a709de24a7c38ab795d2bf7375c676e0cbf0fcb6795f9e9610c3baebc2a11d967fcbd53bbe2be68e453b7bed20be91e154e6743d7eb323d6d37d68a12e80728d490c4cef85ba9bd8ac813227da0c72e5c299350ae4009ead31409081459d0b31bc2c642c9b918c3b4af9245c69d305ef7f9084796ef0b48008efeecbb7639f729a322e8a2f6bbd6e42ce4be95be50ae149e6a5c306a9a7236d3578cff3fa6b1c0d55ebaf90699ce35f488ccc8d95c28baf28d1a28e57dbc3eba823acda217e117158ac15cc286b815ba3704eca21a24e7865d05a2744ecd474075426e335ceaff57f2501da27beed22b35b0b80a690a97fd142f582d735e553ff4187dc1f6a80991498ee572977f8a3b201e57070f39e2e020aea1531620cb37368861d28f1e12c4a3390d5bac8ec29487cca9f7873a05f39cf9c51a393076bab0baf2112ee3e90db4c93adf77517942867b6ec487e6a84f4500af3e0f1362c491850ea65bcd2682883212c766c734a31ac951702f8f70c0635baa33be439db3ad0e305c27f2d5aa8a232e480922fc1107cd32b567cfc2137ee34e480b4dd3239d4f129f07091ce27cf990be96e889e721974f10842319ef118612f73e67f8151c4326c31bc6fb780987a069e14dc13b248706e798a9145ed3170d7c58ca45ec26d70a7c281954339c2396574c2ef2bd21215e434c64e3735480e1da715f766cc0630cb6acd27d6ab9b3092510df30d95645cd266abc034849103dd65b6b59a53307c467df5a0bd896f27f4104a8f0165b74ed81be94bc1d4a78ea1c5238101a1fac7c3eb79fa902dbb33876711c7103c035251341e7953f63d921d594414d6114bf297c12a4495aa88aed8b143d1ce664a3aca4021afcf56d5b300c2d9d0ed5e4301a0e93387650f815380897a21c5f388c072d7c9e3438581485e5b004edec2f6538b22bd085f932d7c1f1a920387ef885c559e8cce65b950217c5bb48f94270bc119b64021b6d94d56d6defbdf7965b4a29a50c51076606010709b8f73c3bcf3aff14bd49ff76c36972f0265f9cc33de8f0832081b3e39c1ccc1973f271d4a0ba7ad0dde460cfe78932dd72399fedb7db2d97aae769d6f1a3e7e31adfaa4baf885b7136ea572d1f77ef3daddcbc7ab066cedb635d3f5510c8d583aedf2e8aa2b7bb7ad0e1fe968f5375c6475f645a7155dea7159ed451d50213520fba7e9fabebdd649295d3ca1d80f827e61e74d8e8b4729e9d56b2cdd5e5d0f83590aebb8ed70c7a06f7c5b08e7f877f279870c5d20e0d6b71fd444f0b84efb5b2653f3f5a4830bad7ed909028e47e9010f941c2bde018bd58d988477e90a85e4c786161f3f383647c5ddaad90490c398487a78b5196222b2368ba15322919f938068d60a082906988b87556d354f7ba6e854c537d8a6975d25d9ad371ce794e3664a6162952a624cef9d619a7a9ee5390e9eb6f537d21515249412e491a793852b15b618a30ae13384ed4308a51c442c4d208618aa410c21266fa772b4491d66bba15a28052648e4ffea88e21e007dafeb318eedbdd07240ac6275f4803fb3c3c7cc7c3b3dbf554fc7df6fda94e492afeb6264cb615219c3a16aa08ebf8de17ba77044642154c6676ac030a580d0dc817f4ac6d94b10e2a90c14f071f540003bb8e81ae5b61092b3d772b2c51a5afbace0ba2907381640558af888bb764b563935a73ea6db1d82929a1c455a2887765865706a63744cb580abb3065bb2c623c32457a46bca4b07664a67a266a24b940216989272469c44e4a0b49c28c48db01b9c23b010aaf8a92dd57b047c4849e0a1fbb1472ec82401d69ea9140210289c40e0c698887b696d6a54a055e14293d2c263b32233082f47e640ded18da9011e30793d111126feb08048f68ccae6d9863851e0d2a419af09ac27a3086386df186f0e8e988fa32a3cb8a5d171225786024420829d38362a177220b4f85191327bb27495820c2b3d1b51b73d5f381238a868b16bc1e462678288c44f44c38d37b52e395d8b253c14aafc8939ed7d8aeec042720bca31ebc2faa1ed8d3aec90b13469ce0cd1861c49b6aeb8510d3c3c18507e28c67228a7049ef8510e98ba7f6a3d743c75509bc361840529c20e2a8b742c487dd11b61d9a17312c4953b4c87a238e601182448b47958f1b5232785a54f05e1425f56a14113591e13d7de9a540db11a91204059e182444c076625cbc30563d22a9dd10275e158bb5104a3b2b218a7850ccf0b0c0ec4cd09296028fac4c88d80e4a919dd719ac22533c2f357828b8c042b4d463413462e7a4859d9930595232ad372325afa410658c498823bc32437827b87a255c8198e22579ea4d352da5d8054120f54e0041c4bb52665705cccec816b62b4c51784098c438b2e332a497c5d5db71a5656a17f6b41b367d48d1f50189b7e50311cf4899dd0a6076c22d5e57c04489c1a4e808ef6c082f8b6b77e5aa7734c563e169d742d3ee4c8a21245e13e110518f4799dd123054b6d4b8a214a505139e942356437a47b8965cedd4a6743c85359148c1b3d286c47bd246d4532a13056617658bd5959e8d283d2e263b8eecb60ce1d1e0eab5b8eac998da89799ad2d443916207a60524aa16887863caf05280f9b0a5d775a557220a8f09139e92233b2f437665b8b4ae785053379e7648d2b1a967948e294a38938474a688c7a505b3b5ddd990459616ac4c89d2573bf666cc90f59bf38f66c27a7e9d73ce6f8621886504f9a4c4285ce264808864b244ec4c862a4993714aca649892c8188d615b22c3a5af3e6797cbc3a34509e5e98da982748e697264f2f4c69028cd52de97b447956e36b425519c82b4376c1076423ffe514949b039255d9e1e1b89221e7d814ef73ff4495f7dd013c668195325fb9ae431615a6b4d965d2855798aa22896268ce098a5711c47d33cc70cb19d676643639bd273f897d9c24e1445515cda25f6c86c543d977d6c35270dca8186a6265fb62536122659efc9da7004e77f721e31bdb6266438d5b9059d7371be75c63f676eb7918c89a4fdd24977694ef784b7429ad4d71403a6b5a2ce13a6bcdba09816fa053f5d2eddf1d8af0d1ba4ba34316a62b0f4f537d6c5b4388969ffecbceb5835b876d66d706dd809105c23eb3cacabaf2356b7c135a8be9ec135186b278264d882659c7abdb2153e9c6066e284185436ca0a604c5fdaa814097689eabc0b154e0cd243f2054b8d9c2ea7dbe574bbdcbdbf01db91c601484155e39fcd3797d3d975977b4eb438ef8d72c86f62723f1005665d6e53344d9483054852939f5f96748b76b8f677ffda9f05deae6ff9af7a57527d8e17e30c1a7c0c6035f074a013d0eff7a0a4abcffd2e435c94744f7856ccf345cbc5d813916eac1ca9aeea7d7db9d17937d2b2dbe097189d47e728d5e936f805442fbb0d0ed9faaacb60463be29df5bdf7ee5f73b9bbc7d3722ae18a288b4d0da438f8875a6bef1da29d05876634740f4a891e131e9e31ad797e7e7a4bc6f147fc81d464565484298c08c4022a1c99e062ebcc12592f8a94273ff9de9b73ce39e76b9be068327542e985f1e40ce0226c855c2f6c0031ba34097d38e2a262478e165caeb68890b802333b5262e48c97942dc3610605a6ab880cad4306930ca95c0cf5b2dd62583125572cc86ef7de8bf1c51817e0b60a459160364cacdbbf68075b64d82ddac1b290c562e9eaf6abcca052060b979542659f50b149b42c1231fb640a153237c8416e8b0a02aa20e0686f7e5b8388d0b048920c2049ffb1d0edab56766cfcbe5c284a42c362a7040e21d820431fba08f86fc72807b98ed5350464dae7b68ac0049a1519a89869dda290cbdb5a82317e45549151e07ed16db76f5107884c63136086d23a6badb3ce59eb9cb3d63aeb9be4b6f6fca3dbc77d750bd4ad2a842f87615a7cafbd6065c7eaf6c76e855533721e7ee12885882971e11a860d41ea900305991c2ab40c4568306314abf49cf38eb07ed3adb0eaa88708e1c38377a4756cd7b35b1e2c947bc6ecded1a54358b67d34991656dd48b3ba7af8f9ed00397f3ca3504755c7752bd4b165c62bd5751aa88ebeb70dea50b2e9809a1512173128b6333bb41a2340f4889122051d445ac7082e52593bb79c1b36d05a6f2734687def9d044dd359a430471034adec56984347bf752bcce164764db164546c8132134c7265484a15232e35a6b20cb9924503fbd6033d722c51c978adf58e0f31ca0d1f2e5000d3840693504815838592985891184b4c94c9dd0aa95e6421790787306bcac945981a4d3a102552ceb0daaad81896422932345076a670ba155285e867b7c229b61c3b399eece4f8d9e121c4614638b56426941adbb1faf9d9a91ac79f32be395741c027e69162ac4b48ad56f5ef50a5ab46c51f812e530b54bee61f02099ac60299aff17f1f7d0ce8bcd7ed571f7db6adf3056414513756f4918b4f7ace39ababb87774f058b14c18e514d5822bdd7ae7f4fd986d6cb33db6afb81e6463687b6c175571e729cde9f603fde63527631f3b359c7664ec34e59c73ce17ef34959956272c014b0845026b67c4b53e8b7bf60e89f4fed833d55fa6c5a6f0894cf83446f884763590a2752c7c32ebf8cfdcbd1300339aa1d66f27ef9397bc97bc22caa124cd3f35493eb73b26d2dbc9df3af6c9b7a5fdddafbd6449962ae941d7ed5b55a754c9f24bdf8e8b74f5e9bb7cfbe7dedb72d2d429cb2fcbbf522e9988d4a78bacefbf5d60c11b76c76e154f3fcf7c5a52455fa328fa399ca17c144539a0e8dfa893bcfd7e1e1f0c7ef06f3ff765b0a10127feddfd3b46a926ffe62b1ff5d90c561f0b4ce065e983c14f49be8849723433b1eca448b97d9edf0558fc7d318947df386a4d9224be3c1c6fddb16db3f8f66af8fd36b09f5ffc15e78c5fd439e76cd5dc0f63b053225dcf1583ce753dfacc3984d049e8249c71d6ad70867006940902fed7bf357973a45628a3abe3bf62de9991beb88362b72bc9db71556a8534829993cf6964a5370123b83db540fa7613a738c30870cfbba716286740c9df28508754c731084b8077cded57e79affe5e63def08b8f3d76810d76890bef627fe4639c8413befa048efa7bcca2ceffb88a4fd6ac0061b3229dabe21f7683246570c2cd32cf3fcf46334a8544ffcd3a2fe1da434aba7cfe84b778ed2956743c68083cbf3c319c8bfa0eb3731ea800dee8933fa00dbf78f1bdff89bfb6a7c13c218f61c5ad7c226b4ae3fd53a0b90a4e3a3407cf1adc31d7de71e3371ce7a6f2ed4af4f247d7add206cf58a39bec665922445c85a48f49d5dab2615df11bc84954c7b8e2fd4cb37c7377de579f3016dbfbd99cf31025abffedbedc5db8b7f5347fc7ba16e3ef9a8affc0df94da14efa3ce8fa28fa84cc8ba5e4f0175eb2fced788aa208c2cd14c5fde7b9c51bc629bdbae2a5be5f04dcaf164d510cbafd3d4bf14b8ccb1b0abcfd798a28872bfa6e3fd698e6799e8f934e6c71e769be8999685e3c71aa89956e02ce172fce5f1587062fdda839754daffeab9e349fd2bcf81ca4a2aa93feb901ffd4d89ca269e2a5f36fb73f73e7e3fc8dcf344d75bd25ced9a86b5aa3ae2389722869d2d4673b0d0d4d1a64f61b4ebabdf8db771fe72b1ffb36f9fadcdb3ccf53bd7f9ab6df3da22608e38b2afa22aaeaf0bfbf7f03feace127e753fcfdd757fe78aaa63e4ff1b192898db09288722835e73edbc7711c552edab11c978baf165043dda75ba18bb1de82a907a05be18ab27ef69bc72893b0c949f8024df8224bf86249f822789e65339c79927ffee59cabab071263f54cff62a3f33cc31948cd81d4f9cdbf80f1c5cf2f7e8e629b277d6668d7e6dae0dbe7f0cde3dcdfa82beeb73febb7e63e5733cef872f15e4cae29388720a44f42bfbe153f3fd148344d1f9f671c9f675d7cf2c95fc94c661225710d49921c4320779f54d7abeae49e678cf15ff24fb31b389b1feb7478e49824b1b5e3887bfcb70bf58b72c0f657a18e3b643555538bc10273183f0ed2aa2fab3e8c026bc6e75f84ea4c607cfe1f8cbc5447521d330aa401dea0ab5e15a7b98ef360fc60547fd49fdc23026ffcc43e201a74556d390a3c6de768879b8e31f6f12151f5a3d232dfb1e4e2d6d9e2f216c26d13b71ac26d2b7b08b74d846e6f5b27410b6dcc55303fbfb5695fef05ea9edfdad15a9db546813b07e5f4715f747b347ddc7f90aab80f6e40a008b41bd82e925fde7befbd570238dac8aa18638c813ce7ac9c73ce1a058a5a6bad817be3167b6f11056a51148199af96ad5f8e8630a240ac7d4f45a2c0dbcd374b511cdf547582baa8ae5d3471b79b416754fffd48d0a1d7c7efd0eb5a05f6c41e06aeaebaac5fdd018b246821139f2186aefeab5a6042aaabdedcdf41d6b0c7314d75f8e6cfbfee21ab8e248fbb87c6c8aafbeb1ebaf7c998a6b53ffcabbe79fb268e4f836580edc1d847c7663080fde1ef2076fde6d53eb107893d3f032c10faf81760816e8f5f011958a0f4f127c002d13c7e200b54f3f81160816c1eff8f05ca3d7e0d190e6081ccc78f81053a1fbf010a6081c4c74f000b943574907d2cd0781560f4601918f96870f3ab2db2809bc738df0d231f0d707e34c04969703e47be8d6f84b4935fe31b41d749f5068e3ae61e1a7b80fbe6ddbbdfac37fde61ca0038b8126456fb87e1f4d733f458be15a0cf7b94f8b9bdff2e3f86eeedbe0518ba9dfd43e0fc4df6f799a5ed4830f447554b5efb6759cce06b66375bce14c02a0d9f303cd8e4ddc6deb16059ad9470216ca18c3c063378559615a647dd53fadab1bc6bcd75eadb3bb66de7b516b6fce32cb22bbf7669cb3c27ace62e68bb3de7aaba5dd7bb7288593e59cb3c8c1baf6de9b8f543f38e77c24853f987e28097f14654c0631df890363dd0ab7befa7abbddc2eaf75e9cad700b4ab8d5229fd95a3484db465a0c78dc32ea586bac6b7df575146a79f55577e11651cfe5dcad706ba8e7f0af89f932f68b5cf6861626a25c312147960f29f012b1a24b982a4131aadb65ba91adce701e62fc32868f15705219bf3e827379c895610a91e211bfa2ee981675f7de7bfb4081468a8a0b314455485e82a809995aa1cac51512434688cc14a4aeb28fb6bbf7de7befbd6fd40d227522cac6571791d291b5175a535ad09ab103c40c1af86944a3299339e7376a10717a114685940d4dcea860448d58d14203ce398e7fc042251cd9f862822badc6194e012e289404c9b80ce9d08073cef9d0eb2d3ec4e0a444511447ed448a13133db2ce0c19c19ab16f54c629698267fce48a8c9f97b92746221968c599a9ab252928271080eae2e408c5092fca5c651f639b7895c45bb8dc0bef0162acfab2f7dee7a8420d1a5986b4c8c810b322782486890c242c1fe3990bccb01ad232f262232221094c8c5a1932445bf099680667a5dce5d2c04f231a394c814e218facf868d2630d6c4b06591515264cb010630bef71445366a7220be2ee41433f8d683d9afaeebb2c53eddc452ae72e28ae64d10846e55832c60576a54badc84c9b9929844a6789ca3c877f3ad549ee881685a56c112f25524e54304382c58829154b5694119224a4c7150bc848e1b4bdd37299c815f317510a3b60cd47991145229248ec88c182151330c686c3dc9828118bc86dec86b64cd47c00b1d1f5348589dda2c2142a20d64ef82ae3430400d82ece6b482c2b84b0c7980e1b4d5f419a52889db8c2ca574a706b586014cd60a18595132a40e263868824606384f82a07177395029d1212311ce211b5a4e2890792158c2b636386ac2d41c3488919fff881143a012285d3122b5f3ebe64806d065ada10d49119f22e5d2e524da8b4653921d302aaf1a4b444a82482c80955a3d2a3042b6233a058005281938e2f2a5e8468cd08318a60a2bc307de9620b23e6553465e3445a093eb8c2a899d91434169552172ca9ac94bb88a2fd9a0ac1c8990828352429361624b63c9d184a333bbab170b1a0baa894bb586d008a97940a5c7424b58d5a828851597104f6e1f7136c35b5abf2179c203fc88890d12487520b2c2d265acabac8149de02dc8a1288ae2a847b4a89baa9f1b51535662b817fd7b0ab652ca6297cbc191213fb6886122c5c282dd104ba1c9922c5a4d3e88d058b028ccc42aca4950ac85184e4eb0985142c38a4a18168f1c17297e35e79c739e53dd9af69ccd9b733beaf4830ca21e552bbc78592143d210233ada8a7864c13a91d639e7699a7e59828c3535301a2e80ac095141491991362b5946565660bf456a970444cd0a1031685d405738b240a5c8d252e16acb4d4b8ee8e2c60632ce14d14ae147530821afaf232e50184bbac2f12566b9586ce5131a56da5995cb46a330162e4e0885601c00860b89303c60b030a34577195710da115cc32ae305052d33a89cf94184224b04170a2b1ad8ea4cc4f2f9890f8715902e8abea8fbe6f505e5b616846e80c04a09cc04e1be8ec08531c6012f965a1faf27205940acb25107b2038863558b6b88f9eedb05d5e574efed5aea32ca39672e36aee1de7b8fa459197972ce395790eb8b2449b234b9a44c2e2733ca7c235e595e535c555e4fdc4bcd6be865e635c58b8948ee2449b519b5344d7529d792906b48b865662b6debcad91ba54cd24fda22da1a22da6a02452459d8a4303462a38c13321f3cf440d125b6c6f62387065c081771ed1d9a676a7b64f1f353801364b0203a132535e4d2e01ab335f6044aecea87c98b7ea27ea2c69f33b71313499d2fe99894772bd4a2ba580d2a6b31691d49b93c3c64d7e767f5c1409815c639d73f676e975546005652332cd5a4d9ad306bd8d71f142c0a79b0097984f0a9d87992f2340c524839d9bf48c5ae9d26e99aee8049c9711cc7714cb96f452acc62ea3add0ab37808b3a0fafadb23fec7fff81f2288534cfdd6de8bc3e38831f0bed6aac5503c2b8e78f9d8cda71e83067b7df4d1a0c1ac81a6c6a863344833d43443fddf571a515ff1f27b6f1e35b94bd1dca568f27344f939a2e4adc491b71267a6278dbe0ff6e563a6270d5a73b3b1f7ea6bedadb9f76deefd9cbe7fa3f5fd7bef102c08161604231bfb72ba4bd7de6befb557df6befb5375f7bf5c559ef9df1dd38ebbdf516f9389e7c24c9d234cdb334cf13bde16e385cc617773b51b4c30d979e38b4434a5343535393f1adc9418136ba0a50e0cd051d1c1d9d8caf4e875c735a9fcd698376b87ddfe0dce0e0647c71b65a817e1274fda278f36d05c18786e11e8c06f10f0de21f0e0bd20c6b86708675d30cd5686874706fad73ce17f89ad2fcb3399d1ce21f560bf275ad926c38270d1aa419e61e1c277238ec438be1eb04c31a3019eeb91f22873063464d2d2dedec4c8a94be92c3a1f661a02d01ad8e5ac56379096778802436432df1edfe4046959163f68732689a65306bb8b1e51efcd8e836b4a1d1f1afe4985b19bc0d6d9c2c06fcb76136b2187619867bae5a06f1af0ce21fc60de10c43b51bdbcd8c05b2a3cd5006fe1de9aa7bceab693ffba0d21537438ba2e9634bef93f7f6b58dfa09c6ba7d32cb02919fc7e74bdb88546f5fbe11e91dcba59e79f4751bbdb8378e5fbc64026dfefdc0fcfb85fabd18e7f304eadf417aa31db45d85fadeba9c4972bfb971c7781c83807b849dbe3f63aceddd2870d42850ab3e8b61a340516702fcc52f22d55144815ba87314a8ad1dc9b22c4d922c3de0eacf4fee21cb6dea74e6077c9f24b9f7ef20fcdbdc5aeb8df556c711055a6b9628feecf380df334d4fd403fee2075ccc68d66f6a6cc79b7e20098066d79f81666feb3a41f7ed03afaa556d8e6807b29ba81dd1ad96b76f7515a16b9c2299260eed20766b31f0b69f4ab9ba9acf47e06debe5afda5a6bad352f9975efbd17a3758c1f638c31f78d2dd072ce9a07b5d69b6d8bab1815c25ba5e393274739e0fd7c73e43f76cedf77fed8cffd18059e6fbea9e61e2fd5b534ea789775acaeb69775ad9a60ac7bd8551d9b972c62e63733076b6676efbdf74280f76c669639608cb1ce28879c33feea7b38d45a8bd7e93aed2d72185f8b448140aceea1218b415775e875ac6a591c987b71c37b2f6e08e6aa2b6e8bfd611c8e4ff739be0ace2bfbabd25a6733ec289f54aacbb11c85715bf03a16e91ee78b2af89c3f1f879e8f433db8a0657b7a8017b4ec0fff0d09fbc35f53f3b683d8b3ce7edd0b6980e3d3d9aace04703ee73fc0f99cdf9fa3a220c7563fc8c97914e81ee78534b8c0e1abd2f26ffed6bcf917b22c507efc17beb2860a6a7ecdfdbc31ba59ca1a721e8fb7b7f9b6a7706f6d069c1b231b75d575fc15f846db735fe7b3765fd5e00c78bd90f61537ecf87137ab0ebd31baa0d52f8c75fca56fbd603626ee299bc15afc3b4a7cfc39bed1f6888fe3834ad71d2f7d5f94c3de776768fa60e42a2ed3f2ac38a7accbd614b7846bc219e15ae01f2e4dd81efc39be7284edc18f6bb2194a22fbc32d70867208ff4c944a658932a9442a8f4a12a59105ba79fce5080b649435ec2111655119c2021159a0f1f19720ca0fe59005da1967c4b4962b3afe72092f00e7f19b69cee3729ea646bdf18d30f61072ce67e31b41d745f5468eba9dd81e26f6879dd81fc6d966f800f8f790aedfa11bdf2ecbf9f6131bdf8652e3db51687c5b4aeadb5370be4de5e6db5534eadb641603fed3b79b580cf84ddf46c1b79d580c7b6c2bd94b2c06261603cef16d277657b13d98ef2a362dcb7b6c0f8d695fafefb1aca12c77b9cb5d728ef1e62a8fa1aab51cea3847e5b0e965e237039952a8842c582731548870000000050317000018100a0944e24812c4301c4dba0314000b68ae3e5c4a28918b44911848410c833108c34008c300060064003148398ad40ca42093a9db75f300474dc53e72c18c0e6e3ad66788aae16a7d26c0cb8fb8c46904711c1f07f012eba95d9dbc1fa1997613f73090513b74dc2fb7a862c8ec0663305b5ca80bdfb9c9a42adad1b7b2b4214131af3df910bccefc5982bc5a231b408a447590d0d89969634481700d2dfd6e427b4c40c3229a6c83a22ac01afa669028ad111a525520d2cc30c3c77423721a3b9077f8c22f9e167891ae8e084fe99856a2cd4b69929ed0333d3a619a54bde833c40e56f622b6a80e5cff052e2cb1953242111695c892c437fc971f5e8a60606369141f8a1848479b10dca9e41b73204c1236fa41d54bf754848dc5ba1c3a5fac8bccece93e067d7e3817f658f18717bd27a55d04655dc17cc0ed1d77161ae78b8cb77bd1a1b1554c7d82bc8dfbd6f701765ea0083d45f458696812ce275ac0876ae06f6b5898d76284141cf3b3f8626dd2629b8eb9e103995e6ea6480378f800f1a53437adf105ca31d90f6270b432950f510843481e77eb1c6768b87a596e7e26f8d772cfdb93831f047cc2097f0164931abf026efe219d12d5e0adafead8fb2eb5a157f194068f91d65ab7b6fa307e6df5543890c6fb8badd992f01e4463ce602121f6c31397251e85fcad3a0e7669bad68dd0fe13f2081d09f40f901644318690d4b0261c6803774d6e3c69ddd2a0674d9b36b0e2c6877c27f27190d004bc04248b5b5606476902c2cb5524722eee349c9bd2def709156def718c2b8aeb2739f357ebf6103fdd60886cd434876b36536af5b0232396f6bac86077a53f74b7f6a1106234f689a9f21a0e01350b07a664af4094b473e68ab9838c1d15c21b9e8238a87d713deb2807d569beae5a32103057bb224577b4813bc45160fd6cf45a8101fbc5d0d489c0969a8601f570460924549dd8a88211ad9438ab29a9a17370bf672a55868ad9b987c4e7a2490936f438c4f9eb0c514f49b3de6fb4fbb11759245dbb192eb4f6e66c60c4664098c79f736693ea5aa34f276d690f3967653d8f63b6603dd01908db722219b0970dc9bab3e783322897b5b4fe0cf927d3dd95df2efc19d34ec7016004ecca5d69d427ba67857c185eeccfcc0bd1fb2d69a8b3a1123835b49d83be4a0ef507ee877a3b175ec9f2103b91c70d919ff02f81529bfdc905cc4ac920b971fb44c556e74d0638c1959a35d4c35b1dd35e55068e9eeea1ec00e8b423d3e88b23987aa34fa62db3a9e7e856d6e92c73aac6119521e6e8870bd423c265f17ce5fbeed492658b984d0c74d182c139742bf34f5e076690d17b98a15eeee9d588680b23873041f30b0e819887bbadf19547dfbc1f5d9e4754c68ec846e2927c05b4a2e02ba045a96621d531493228ac68c78e8558e41863652f3972c35503662de3feae8121e4a2fc2efa2a74f8a83e62ffd535763aec0be2f143a123179604dc7def083375548b6a72e9e3a20fab616c60148e0c1a60be0ee92106692848c0580063fc218043fdc9e8ae576ce3663c01e2b51052100472b4db5a91c6b617ae54ccc6cd3924d0ba38c4243d601598065f01b562add7fac23f734370cda378879a190de5bc83b5844681954be3c487336551013940a3a199032e3cc8515c20061c37fcfeb763a82fcf78e0ae18fb0a181e4662fa44e578e7358bbcb2af8009bf31d44a679aecc3278fd257c001f01f2a1ce1f6b94d1093fee3643f3aca34131642cae8c56f91966920a769301ab21757a2b4c5fcb46d834c24de3b550722fdc5bff031b583b720e9dc57c0cd2ed371034e7d74aa686404e0557b2ae6f6479b7b7075f45cf54e405f89e2ce3afd9e2f6e02571c15b4249c68699b06db9a91008c5d9e844f27edc681da40ee752ab64049120672d5c9d10495f31dae045be7375c1888753792b06370d32b08f7a0478c1edae63f1a28ff36744114af0e69c53a12fd0970402440508adf2f308386d442fb55fea5856ae55b2c6f56fc34cf9e0097811d43de2b6aa8d13115b042852c4c037627a4ff39dd084235fe80e4894fe91d4ea4ba9129111f6808d8fe9ef91f54b2002c88a72f3b8b8d76875a2b810d439ab2c0952828741c445e91066a778770b6939f75beb123c602224c848b553f208fe2599e666626e72b7df84f6e6566c5700f8518d82c500dc6a9ce7799fd87bd9470991f3c477c3a4f1613a0fd85d77cf811dd12b8a3cad334565491590a6abebf4cdf3a280d991aa942d13e8f06fcaed714bc69dc4cea4fea07f9c312dd73074187d312c50a74bb07310d5fbac6b73be14ef597a47846cbf004263fd262569c338da1bbac7334831b2437f018a7249b0710ac554ff6794b728cdbb6f1f92af98b6149f6e5ed1aa47010faeacdb03fcfbad7e2f293f9919d55729587fe949a76496b1aaa408ed5c6d7474873ab48045eca81a22d9271fdc8436f2491c328136ada86848b57a879af8dc0f5cd95af1006670bda69839e2226163eb273e225187c0a867bc3c34a7cf4b702c44b97d2e9866397618474271e3e2d57087df238923b26ec87226654ddaa804586343ca7c0298e89de33dd99cafa5299291fed2df849e490867b7960dfa1c31578bc9ee8dd0d9be806b252ec1f815c570bec20d013336ed30c9867521c34f760c86a012fdb92c23d2be9976198dccd418d86b334236c8af1db0d41f91cfa87b14f5529ac8a02595d6322ebb9f85df215e59986a6f899b59391e88a8ba4fd22f36e12cff222e568f367a7aa0f8b39be4da4f6f1ffdb2e62d35cf0493f2c5cb96c59773d38ceeaf2d5e6d498a2b57bdeb8f7b505919f0f53260392a80d837333033f17295e1b4289230076df365a066b48bed67f25b5c5dd1b23f54bea9329ae774c10b013a13a52360744a6c565b1fb9a8bb3c1f5011d1c0e4ba74227d2e4329d90953ffd89017786b4eaf9d23b4dea48238bcb8a6bbdc234a5687ade056143658b5521b2dfa6c4b76235162bd8775a3b5990ed2bff3004abe081a3e465cc17948370704922d55eb803a23573bd1f9fc17eac911c5c96b0caeb30e0f49c0a9f69d55fc0753cc16a7bc8236a8f04db3d1a41cb8e752f6b1bfdda9997b304d5bca7eb226658927144d9ebb17b781b29ae01712dcf27671b6496abb0ba776d88198c585c8ad6f259aa72f95fe899fe918fbf8b62e5aa1f0b73c95912beaeb193253f88b003dc57a30d2cb5d68b032ca4443d28fdf8206aa8ac74dfe437f204d708b104d09b6091510f422f51aae698ae5a7ea84b54f6c050289900a3e95789d6c21c224367556f040de815029ec0970617bf8aae7b7907a120f61e433932c405b6a70b4bb69b76836a79a1816e1fe45177d673d807ccfa3b0154899b4fcb16f9d2eef4b44d1a9f26a3723d95c8ab70091a85f7b1fa00b74e4b7fa21adb2122eb82b91ec2dbb3a3bbebdf55c998d6773efed5b7ec25263711da0f8128a04bff9893b8828448d5886ff01fc0bd7b071bd84e770715bb30e402b91a91252cee307ae3b37c28d22223b468ed7a56459f11924ac4535584d1c7b919905fa276999eb4c4298f761881a5aea2886775cb6750bd129807de405c8717401db818ef5a39b4e2bf95099d97c9fe248047bebe2c40cb42ecb7c58a0d90e4d6ce6bd8f889747dc92e5ed19c4b1fa98a366c279677302d37299bf8b50e4b084230b5bf157e8caa6d987860d64618deb95e55c9d8ae34037dd746265904eaa17361dffacf2e59ceeee708f10667bfebf677508233dc1d57894dcac5b083cf37817e61cdd0802f97f7253beb2a54c8d5a69692184e82149f9802aa10348a2318aef83bc68f37ebc9dd6f6630412f08f39a18c0a817a3ab8c2a8da615209630bc81cb68d340fb1e746d5def4549f9666a0af879eb1c2d11b8c4a9d3d66d72f55ce958cee7429d4ea6b623374489294202b8f546d00e007ae3121d0b7e89c39a733ca9409d1f078a3e4c41fb9fd8243204fa485ec171d59035c5c399ee38ccc7642f089b3feace63e752ab3e0e4550d3deb8866491998b623f579a9de2eda239424d2ff1eb21da9ba2f53a363a2fe906c388cdaed2c52fc20728f319666d69ed4892041a93ae02ba9d76ec51ad38ae90af0f6f402b7cda0f0dd66fddaf1dfd3a538da2038787be43df2f5ec883f6ce8ce07c8d9d5f83df2291b3774c545c52ffcda49792ec2500989915568581fa04dd4c92b609122395612658990d659ee0c30a5a65aaa690e41d79f6a68d844228f500a4ad8cdfcb182f2ff273e64d8a1722cf50bec38defceaee76b7a77b4f4ca198ba1a81962c1d50f6c33bb93816fa1f3d602f82853a75376b16cf324a43b400de4bf6b2105b11051cfc93b8f57fb312c77feefcfccdc8c516d429d4529bad67dc8c89c816e944b8a9a4a94b9ff6bb4b217bbaf674b2c0c1de62dc9cf96a375356d4500101104bd392722cfa540e13910560c6c41296e497d54286037f6170d2a9eefedb7a995d5f8035c01c7e3072cffab65cd54f547772642d7b71d7ac79fdeacfa7a94681f2371498797ddf5a42c321ac70856d3edb6211b7dda7b12fe05210ca8f6143b8329e09c6a35f9ba418bb4905d1f4f0beb42b5a80adcec3143e151149627ff7ddb29b8f611b9bb418d71d71c4719f3177c09fd638a723b74ac5dbc90f89abf30635921a789dd12d88f398996ef4ea4f24780193557cbfe1e75d32abd6f08a5a7e773e34cd67876a83d9b9317ddc9f7c6cffdff8bab617cf4b8c3a9dc1eb73aba822ce7cbc76b4efbabcaf6145ae6c798165708d4be6d52952acea379f637ab3b8347c5da231e4af180bb76bf925ffea0dd049893ded05bc81017f1759e0b34e3e516c01cd33158300af4fb6b4eeab825984bf183a286ece037fafeb054aeac6118c20f0f4099fbe17dbfed120756747f153d64159d5545d7a8a25753d18528157dfe7da238d2325e3e0f05ab231403f5ecd2382b02c4a188876ddad9be2c02ea2758fa65c5d8130fd85c27eed76a5314b80d9decd09896405c6305309caee58e451ef0ea5839b547ec0e3579cf9c9d22af8bf7a1dde9440d01abeb98aa5006157914358e3e25ed40fad7596b8311057a5fdb38182cfad4464b206214e3f5af10ed68d23156d1e492126d3cd34c73d51016c9ddfbfd9f6259008134047ea484acf38579afb376bd365a0a1cc0c6eebba1ed74b9bce34b86a84e4df3af327b64a0d578acce9b8635271a761752cacfade845807bdc093f5c5c342fdb26a3a09171fc84048e77aa0bdc0552e41d1f2a4aec7db7dc0a0a332d7d81f87fb75c7800f1dbd6376392fb6d7a4b261587b706a9053aecc77b8a5127ee131d09758a843f7dc1df6082e912dae676540e675de057f3a940fa4cb541d5681ec067cd9080b0d2795ee745f7e976a87a9efe0d76316ed6f6cc52cbb1a839155055bb135db6c2e37b6eb9c42536ee7fbbcf3c336bfeb26f88adbd42cf7088154a44ebacc0a2efddf2d2efbbd90cd02f4894b9537f92fa6c9bcb8cc368b5830ec7cfe25303146c51360aa4121a44b0a63dfbdf5aa96a0eca9431d424585db1c385e6a276c9aa5345618738ca8de8781a4f74d7b16651690cbcd89e88ea89d2b0cd9cac71d8d4b860ce49e01180541b76d955f273d241f07f40018a387be0e9196a1aeb537282c77b5c6acf8765cd40a2e7f536e75b18d543894998d12cf2ad79056c15c68c5a113770f206cda4145096c59d1164ff981d97e9e7a74975cee077ae31c29ba4967ef17c8ebd4cb39641fdc2cc7d0cbd4f66ea4a5b60b23495f2b70c0c58edc8f2270a6f8b779408e18291b9c006f069f23440032f5c2b8089dd6cc8119e27b55249c072dbf76469de695654ea1233d068b3a775ee8630bd08626ce36daaa672518bbf0e2510e6c0c9a8d8c898a209e3a4f6eb3b11da9b34968f3acefa0604cd093e2588f0963e9290aa597e94b1083e587e79d67d43e6409197ab711effeac96499b09fec6aa25b22de4ad35c8665db8897c062b62c2d2866a268fa3fea58c212714ba90b90d156d3405ea64103b6db325af6966d3a162232b1fc54b64a362f88481a04206bee1c3924fb3877790ca52b7f239ab69d7c990b27060c016e10b70117c9b3d90dee9ab79b44fc8efef85e338c4247f7246d38d60f7effbdf78a8a395b0ad43b2d2a38df056a9131340ab22f64b1e9f68ed6a66fc2b7d901f4ab8b116f3b43372cde6c261326eb43974ea83de2a35624b6b7e077195a3a23056b05aa67a42d1af3b8751937b656b066b7ee78540c4f53e4ef7d01a8850cc96af02c06a253ffbc5b3dfa5c5cdb1b19356e4d13474605705295b24af56ab09026f282c040447744e08e7e3f93db9d2ff7ba205794d31fbfdc7c4cd4cf2f0952be41a51dda466c7181c631e74bddffcc33dda05398d21f9d8244b7c9ab3aede717cd827e3f45bfe5e06cb13d5a7aba09b86ed5ba3762910ff9f51ee4f3b313d0ac7c7afee32762da4cc327b67d6af705091ed13999d9a9eeaef7cd8d7779c840afff1ab74627615007e4901f8302eb86969542859df50b2c7c5d21b389d8933bac8b7be3aa40bd2ac9983c3866963a2a7f7fbf10c774c01976c5bac03f70a36736b268daae4c3c3f5ee66cfba755ab5ddf383b5227dac557f123aeef6502d47107ae6f6d8e176c917b7ddfb6c84a841872efe155e2fce278636f02b61e17242d8361f853db23d22d2cd862a84de2c6d391174145fd4b7851f99a8f87d34bfda8d995484ad96ec935b00fe8f43277292f88471f565cbb12aeb463a1fcc57a294847db63bdaeb6a6d1af291830e62e83508cd3caa7f054069c71d2c6984c0676a8677237d9dd9d0bba26d0cefd7e4ecd9e651234dd19629aab6799b3c24c95dea818560903946db9364072db8b9af49ec41663401feb52de97b9affcc281f95ff4292651cef14299448db14820c2c31dea2827d1102b74f0586cf8c15b91a189ad8f6963e21cbcb144b9e06d8c21c51772a1e65426ddd58cd70a6f5eec2b730de10eabdec7c588a82b6d0b1c6243e2c5d83710ad591caaa5b80a3e5b6c78a85d5d9171cfdcf4bc1801a104d061334b5fd278e2d0231b47a29335639ab6d16ccacd43eebd37f03a6a0fd0a497b78728c66cb5d8c05015b002dd679d4b6ecc1b1be57484d63a2bbe3330a21c5fe0b753e9ea80eb341f87aed22c8a8cdcc5d8ef1aa2b1a9ccb3533f5a055870dbc1c5a7c5a1ced8a780548b4b04d576009f9dd846c8711ec99c893cff5e0aff94ce19945a633879854b9de3978465664b806f40438f654d66d4906a642d003386ab92c98e400c474e0b9ac84d9b13c102d3c3c66b2b6ec5accb635d2afa3fcfe618f24fc8c953f8f1b0f8a246fd85ad3a00d4aeeb6ac915a89539c8f02c9d8823834fb303604cee3b9a9265cf346243fd9da2039bfd3a58a33225c19152fbc2da08972809bc1958d2c5fd816e995c75df242fa26678b2b05e5bbab589ee9426b874af714f7eb1c16f86d57606ed504dd4b523b2e101aa525c1c516ebe5002f25e96adf9cbbd0ec429684c5cbeb3079ebc893b6a5fe887bcf27509ba63f2949af6632fae6993bccc9f5f3baa976b7862bde340989f7c4616480a05be8122c8d31881a8cac52a5106a7673e34e89fb763e555b09c4144987c8b654669c5c24ed8fae69ee806ef8e140a9c630031b2806cf33f6b88cf722a8ad78963cc022e51f9716333f516d52b6152efd926b8ad1d8ba7a1c28def59e9bab70ae2d4b25d330de77de0c34308d9641f0d9857fbb92b2466960621078154144e6c83a4493015c8cc94996b4e8f397bd6655ee3f496aa3023744c911b861a019fd04b8f95ed04e605abae63c7c16d3e20820f049e8eb6a1b1236c710eb094aa21ebb3c570c86eee385b5490f230719e781ddcd8df6e73088202b96519ee2be7dd5ca73df1ae4b45cee3e059da5e1f9ce7ba325dba2dee3ba68c86721253fac6c594d4f15cc8950d5dd581fa9b00aa8adbc14a7e2878260e21abe8321f4a32dd57c9abb75cf2bc31fb1f32953321bf6dd96bc9ebc6c579024226a4e1e80852909057cd5dc493ea61d70a8430020b900d736b009c3261003c67def0d9843b86a2e4adfae24ca1134264d730ddbc314456067fc2f7c6c86e400d54e9474abeab69c763462d91254fa1f7e9e2cefafa5a933a29277d59d0c302e8b1f4dac23b70a0ea68ec29d8c8b2e03c6cba65c09ef36f734e03e259743fb223b7a9cc7f9f178542b27432b8708229a25c2cfdd3097decb30b9104dfdef068a0f64bb6f40e0f5e2e845aa0f131efe0e8026bcc936762667b121b07e894628b7088d87b90d00958181774c2dd1225d324c69db625801f846d5037ed7f1dd6cba6464a03883e0ed863fd746fa11a91b987dcfa7319ef66b91f0a30cb6a48fb3c61be83876e063dc2fe0a88604077ad5d72722d4362e902ea1d6ca5c446b7337b52bf04d4f619e85a56e45aa24d7c5410becd53437cbc034e61881aab8ff180529920bedfbf9f9bc5ec52f14b63f07e3a401f625908af98e435cc724311e72e8ab8594b840fcb712234de8a763285eed2208ad237f26b90f725a4517442262bd196ece29b3846925c7a3f6f51bec5034fd4842b232e8f8160dbfe7adffee2f20506728f57f1d6fba31bf34074877199512cad23276a992b987088370c779f1f202c6a88fa738c9b995289e1063ad82de5acd674cdfa0de7e168ad991c03d04e242263fd7c6462f1a127366171b1ea65047d839cc6fa4e5c7c577070d2426c82f5a90de92eae3750e5b89f564cca48742cbe822b29f3fe76b7f3f97180cbe96a12777c18f94dc073cc081a3fc34471dc7459b8105a1e766d94115ce21876142225803e2edc10afdf0696e7f7d645d6b07a6e52cb379a836703d291d065d63a735f662746ffaa0b378d61be3ab450cb93373086f222071766f018a03931c24d933e34a5f95d510bc8cbef1aa05473673a750205981c0b4e0bc80d09ca41d4a22ec90ce785c7ec252c53a0afdab88389e06429784a8ff2f1833c10d8f705634c736405a91e0c7541a4db37730425992efe4aa2180df555550950451f7c42ede3a8e8ad1d5c33fcac63b619c0ca080535e0663e1d7274eaefaac624e526621219723a45d1d54258c4fc23dd8178113fa72e29f72edae28d663f8279d36a4ef1fda77bba4fb794e1f4c088f5228cbe13262e0913f0501e88214be809175ae16a23571f9b575158cef2525730c8bd49d12903428711350bc986eb9eb6c2e80f73469d221596f18b9801fa8a75bf1de4afc01d56ded5d9524b330dcc2ad0a22ad90097cb03b8b1131f33471dedd4e7115065b5c2e62d0248b81b3df919432d226d9edb78c43f8539391a9dc39aa04e9de016200f8e7c7162f62b1dc2d019a3226420a5f36dea026949249e814d41f4c75a5e7201ba153c0a68fd36f780220ca700da159e1b7b6061a91f9e87342398d3709a6af3fedf757d1acea52ca378b1af105d78e171e6b1ce28d2f904581ea82ac325dae6521c9acd7589259717d32cdb4f2c7688c635b20cb0fe7f828faa28454308323405aa0060b4b255496665332ca0ffa0e240ea8a0321a0701ace322b1be2a1ef065d852b670ccd7d5703220ffa4b9d69219f704bc29e51a736dc7912d9c355099f3024952627f8f41e67d0278bae2c0cf63f51b6551e11cc512f803472df9306b708a7a4a813ac0463ab7b4ea0581ac8563d39fbfcc7c092fe682202780e60f9a802c329938bce7f1b40fb8b3f21727f6bcd547e9574facb3e6820f73dee7d6b69ff807d1e0d504453b6a9b3c38d8f29a87dd4f48869584c38f30cd5db575d4451aa307f8d83b2a7e5ba5ea35d9fb1193f5e128c0d0442f0a69a3806b33ffed120a95b232da26b2b7537c844094dff1ffd370f6f3452d2860f957a903e9eacb9e9ea29d7a3b026a9f98dab5b07acf2b8564f233a31c5607d0e0e0340c75b263959af551da5fce2de8e0108526828842f8a35bb3b0c37cc016f6e21c4635eb78265dd7dfe09add3cb051316c120384a2cef4c641970bc621e824fd8a4798ce3ef3edb01a1443a202ce49a4c8330fef41f3e375c9e43a434f06eef07adcc0188ab4080a91194f754c2db4e882d6fc629788193ade617a2ddc0697a661ac0b0485fe9ad4a5f899c30b7cc7d9eea4bd53e02a32dc246cb2abb90888d83ceee53cb40a3678556d718d9a285d0eb5f641180e3f9de5b23d9c39a920a5d96fd78bbb1bb32fb65c1ec4c2a6574d819c56c3b4c2765cf3832868b7394612097029190101f9458b32a36df26f5168984eee2732a6a9436f9869a056843f2aa806a9a8b574ffe1184828380e726b953dc95732239a4e40ae2c88374bf9a6fbb019be53cb9aaef78a221d329963cdfa7ca4f4052aff42b820fd4eb0444a82e9910e821af4bbd273f40b79546cdc085fe982c5064c078c38aaf8ded8b54f86dbdaf27d294040493e9d8652910e718827bbc69b5b25dd5f8858b6635bbe7900d488925ec6f1c39f1d38d469f5f990e1370994f701bc23d128fcf96da4d0ec9e6f83c1abc07a5ef0a57479d41d945bd015a4f1443403f7fc9bc0272a343bb5832be24c9188879fafca4a2857b1e7a4e4b92c7cb97e5610c5547d2abeac07b49bed4d8fe95aa83218341b2743bbff33ee8882b3ad3f62940a8405a4b824310b96b77c999096a7646776a541d7523e430686c7b84a995fb45327e04f2b231befd5dea2e95249a9ad3abf46daeebd724ede623adcc488ef0867f29c2c2ab1f8d60461bcc3b96aa0c81dd54cc2689914d98bb1c79008e82899c150664bafe7c85552fac92d50d230d48f611a59de72056029ce4b8371fd6551ddcaf088fee25c2aa3e204f8d13088f4732aeddbced3e220ec49b25f928057924d5da67a61967933528598a779439e0b21a55f7947a06dafa442d4fc5e43fc245dbde919aae61ce3a96108d6b2c70003768f31beb71b0ddf8a6d28f392e5ca0dd1621a8674d57089a0b9cd909030c435256fa0b24e85433787d0ba6a981d1c2814aee5762aa0d811d14b5be549ea241015495ab83894755a97fa3f8b5d9a8d16652558d3b45f4cca96da643f588198fd535683a395e139cc6d9b2bb6557468f2880c23e7ac7c6cc0c893612b55f71eefbc2c837d0c1599bcdbc51238878a5a23bc09e54a20015feb02f35930135b0289837986132759d661478063c69c9c4eb474c3d394c97843415636072865a639c0c5d880f945a6349038988209bda1f1702b3e1b30e02091a35afd34a7977747e8d345d1d360322692e543ff33a32f38b4a9b3a1eaad3b1f08be813d8408491d9e08919e292a078dba6fad369585b43f5165af15ed02d8e86605a03ef5f99b0c7d92394f703fb29bccaf2ca0496b6a35acede4711e00509f863fa9f8c216be7c6643808346eaae81273bc89d10f1cd31692f6e9ab974c1063dfac945a04a626811677044857b3ce1513845163f1a21c57b1b3a1edbfed2dadcf26eb3ba89282e9f56c5b207a34fd35aa0156748289d3768c5f06fd39a53963c84b54880ffada98f2c57e2ede12693d35c7da3569a35869402b000ad955aaef2064e6a21ab8ddaad79193deb1e29b3e11e1deb13e3a44851524f8812433f67bc44fafd16fb1c585e9cbbcd80b75ce0ae31a44e839fd93bf7fdee1065cb63236cf189478018c3ab0f10142c174e0971986357d1c080737708e3bf96eb791758e53cca5ecae497264753ad17c8bf37824cc8b1cbf6cd6f1c89ec570d95228c0eff877ca5cf193d4df5bcfbe4854925d6c9b080b7f954faeee45ff568ef3a7f9c3bc8aee81f96368dc0a15c1d8d75463c1750b0a2b1b5b6a8b68510ffdccc95563e10018d9adde0981fa7e85e5caa0cf2e7257dbf163c74a0a6d0899c96a26d0f4329546e552b33599e7e91988a395d6b488d6a94c58542c0d882b4c4475676c6b09510222fd5ab1b15006ae85a655fb8521f4cd42070aab9dc2862f525bead8190830bb9fbbbaa7e879bf2fe44cd68db110a9c28a11f6e8cd979b51c19f0a66213238855c50a2022d5a72c36ce21c5aace10361f5c4b333890da0d2040df2ea7d934805aa1dbcc453a136f353bf1a909bcdf83eb1eef18b7a6b8f345317b0d86bb99acc2c57a93a8a575b6c515b88aa454f93ff40adff2639a009bc930389ef631d215f304fb901c07d3df3a13714cb4afbde160bbcdf9b9a446b686c67cee4e5cd08e59ece417bfd986331b2c0bc441b1845bb8260a3b91b2b1cbd922eadbdc5982c3a76a3a09946f1dd8624afbe24aaf156a7a5a488a75f03f7892d8df09887402ac70b3198964698360013f4e17adf9e70fada40b0f5b3258177665cb2d0baabedcb06079b5380b5f4228cad250d49669d95b53f66d05621bab7901cbf5f30ea46abfd4d263274d87316ab60f454ab3791cb28636754817b1e54aa328a24c0f537ded1396f01931875410fb62443f8c375f87ca7c9d2cd85b53f56aec63be022ddd24688bd70e4b7bb9883ec1104d8962fe3c87ee314b4bdf8f575e32b5488cc1623d311fe737a3305b28c99005b6b64d870d56c94b6d7d44627516b4cbbf6f1f01f04f4be877dbbd46e99a29e79a01680eeb46de61b059ad22d54d35695fcdee16db50cec2e4e03c3f6ce07c9b4c40651774e41b08bee9932d05e537d4f65646b3a39c57453a6f9df3cf1ededd11dfca0bee136c020887db6089213d07a5f29e79f700042651b7d204cdcdb289b94330bfe9e1daaf37ce820dc45a7f41f01812c950ebe55469e169725b707f0b4f62debd2ec837c206fa5a4d0832013c922d8fd635faff68f38639b9fcedf10b9e7f918bce0b6891fe5e5637fd486198977dc1ebc5baad18e9d56ff80af917f7f90e0b9ed8d28a2bceba9a6b87f9844538d9dcd78386f26fb8cbcfc7a4b09ecab59b0945272c0405ccf71f90364543440a8003fbdc4b93b08ba6b69abac8c7beaeef9f2264e6b9650d82af858c43d94d21367d18b7166d594a0777c4aeb8462e7122284d40c030398d1e1c4ecf7d205aba39ad945eca7a2237c3672afb9ce03d6ca05ce39a15943b3620f84c14ebcf4a0331748265a37a724fb736541b1fdb4ac99df17fe4d3e972afcefd630547cbe411f387803581c7b1553f9d78c58076fa480f726243a54ec12a03a09729ec40aa351fec651968e5a8a8d08cfd01a1a71cf325bc4d6df72e53b75c5964202c9ed168dc0b0a2477ed367015b110ec6e93946fb115a97c0b216a462d703b62a9186b5b1c908d719566f78357e32e2aea3a22eeea9946fa0196ed813af1325f6484808f662a6b4d8e78b52d2ce8f14309bc7dda6e08a8df0ed20d218f845f568ced1036fa6e7055bf48d718d65b9713729dc3f0021b31041bc0622d8f83708d6e5657d62b3a1bb4e187661f91d68baf89f5afec6327bbaf864f904754b7f8700d6b9dd00769813db221594fc1ae81e46eaf586d1b55711025cf26ed0022c5a36a39dbdd448ed297c7a6da98c7965755db6392fb2aaa7cc481e602afd092015eb5a351375cf0b1a71c36c29c4991ede3ba4478d5b226ed567dd0652b68c6eb0735e2e8f9dc8f238c4d366878863d16e21a978d5fb5353f6989924d29f9cdcaed65819cf2025e2db251119e90240fd8579eeb80b6ce3a878c48e2279c9b055c5f50fde2ccbfa2b93345912bb48a55a380e7a591e24b8fb384ad9434489969f0cdbd63c61a6a65ad5ec4faeb52806574865795f273e08874ea1154fbacd62894fbbae7f3450d13e0718da5444cd975528f9bd9b09c24517907fcc52984afad424798f507ea75e849e38328cb7cce6a0642ef47f10688df17ef20cbb84020831913b60aa541f783a0541eac5cc16f6ed75239d6bf6d87b847167d34702b3dab31957b00c0660474c77c62502950192a0a63f201eb03c8e574b4d91a8572e412c95c9618c13a9990ea03a4a59190c4a3ab6a9f301b6a93374010deee37c59993952708b22308a41719716fbdd84ba01c4b98ee568fe0defd72a678b71f1ab4f2a982399d5418fecbedade0c3066064a99bb9d24bcf1b138472fc97d4b8e3170c471daf3e621896f67d49faab0115088948826a5c71bdb9abc815ad840112037795be927803e9e126bf0bef21bc7dde4fd7afa16937cedbdde811b990bfe29727ce22d41dc6b1bedbd81d8d61add25097f1863d9c0d72562f47061f9f72f4e421100e181496f80f9e00eef3c2bb9e3d4de30523d76ca885076cf29c96f91243d061fbdaa148fe7b81be2228ab0ee10eb7536e4dd4f5002e4157e940bd0eda01d5c3b2c75756071ff7b62f6449422b0da3efbd7446975f38696a2599707aae57aeceacaa08a40b976e4605ca2274798c826ff8e20b9755cea7bcb4e887e8b3c0e48a410fd49747d6acea6844f153ba8c04f01e9201dcd875f431e61cf7d15dd9d216c58109ec390ef98d8fd9c95743ac3e182bf34d306f9bdf0d45ad754e61f8ae477478eaa7be04773f94a502197f8f7529169719cd0f98652afbc58e3173f05a38fc46473e472a27ff5867885d723c9fdd0db0b6e5114dc6efb4147414beef73b018b4fd4e91465061207cb3c3a2d21846af5e0f8106b2f93d6065a77d64dcde827927c702420811b48e6cbad737017765d8556b87ec2af55b1e02d90f64fee4e542690c606f1da299c1f737841181e2048d9002c9d35cc39388ec5ea31e903b782bebbda364343ac9331697f444ac8d3e11a0b21f8f0a10c682e2f220a3d70fc063abb9a1cf416c6d4f245fb6dfbab2158e0f02063d71eceafe0b469bef0588caac27e0e5a15eb1938db27bdee18bef5644579890e5c6a0f19724a3045bb971a3ac2bdbe6164c9c41d86586c806b61172348028da363c66ed529c4cd328601706599c2b2b45b54817f4bb160a3805b4245b47c5a4ec82a9a3ec54561181509e9de12e50506869ab98256d74419540832a1a161e9196108e4226425f806b936ca5a7b8add9e091239bb6ff447852223889aa325190e0b65e80efaa7aa8d332e16c8b4844754904a6e18bd688ecad025424496ef0f333aaa98da8610c1f009ff4b74df46cfc2785d8968055ce53ac7bd38228b8f7930cb68fbb8971d6369626a24b455c8d24ae561721cc33159b15d99527ec559290d713146e6c60e1d14371db8a0f245343f5969f92d528417fab78ea0d622c288ad4048be20867ecf2c328bf7d45f02f1bb047a0e7a1a38a9094acf512d9ecaf2ceb1448223d236545a841496bd8b2f5289e5ace8207637e12ccd8ec7bc09e8ed12cfe3691630a706223cf5923ce6369f1177c85a6d37bdc1846a94617c3138011cf002060cf48dfdb3894cb97bfd6b022e4a69c801c9c3198a6727e8e4bdec592daf84391f113901aa20b2350c685bf4a621048b5e030ad2a5e3deb2cdcc55a5586836e76bffbb0f32f369444329be46b50c989de12f346b2e5624fcbbc95b4cc98de217551361914f9b913a841835897d6a23d50af08989e71d39b520d131115738501116d557292d9e767a73425accf917f59716ffdc996391ed97f738680a699478dfd395162f13406036834ad8617d9369ba6db9775a298a44ca292d5ee1feeeaeb33b8bebe430e3358bfcc19a3d40987fafd5a979e9c3fe256f007495c0ae04e8cf567fc1eea64fad210339d51bf118bd79f485e31dbac5379bcd15d31a7f9d1b6c303858bfbac52b90cb7cc2dd8c68c7b273249d90bb23dd0565805e3f077ea8c58fb54e1ec95accac65b1f8cb93b6d65ebac59f519091dec22d91422e3e89d016675ce3c957ec4bf12926c1311073908471abe5a40ba649ce749dc9a7e8ce04bec9cc50d77bf8516f297bcd29da35d14c00fee0e2874d504d3626f1f163d1d5fbc5728f6070a9829a836d0c0d47fa9ef81333ca19c710ff7331fe7e14dae315339106e2afc498b34f3dc4ca9989591c0b50a3c7dd634f0f79c17444b7076b3d3a37c057881ca74b5f554a1eca82abfc468689ac0828196fe5431244b620effe5a2ad11b3609ac9b1a3a160ec5af1beebad47be2bbae2956928ad3b70535d58bc11d303c7a3a4697a201b29d44f8849817c827f49f9b64035fd657aa22d7c097ddbe325e606d54c49c5d78e18167f00aecdca3a16ca5bf52572f76662588ec869d526e233f6637344550c544d0ea0eef46734a44795cbd8d076840be6e3673e319775ca95e4f351d2115c99e73ef451ac3e913f6ddab6a9e638b8addbce145bfa595795d0d2a221dc8c2d403e63e0be85e7b095b26f100d85a9ab5e1d3abafa5532f3e71e0074c74e7a100787eab61a5917a8bd859b49743ce12fd925bcd28e230e95527bc646b69ea4775c6939f66e6f5a2078081bd34a92e15b08bf08ec73b5f31c5c33bbe184d076669d73f31b6ab54cad0c69abddcf60b3ca9aef79509480266576218c4bd4b50185f1d7feb093ff3f8a1481fe312822142ea7b06c190be48bb6020f2016759666bb5981664756fa0d1dd3265d966874571e4915e9d805589b3b6512de8e344b58772685be5119ea43c50b1878e700e86c24c59a081b845f86b4e67c6b9ec02c5b99967b395e9a7025b4876daed71b0cb30d90916208e2ab924786d2e8be5e6db5ca6d34dc2c84e97f159390645dfba4c6f0c931f7df0785bc051e4c0e1456d62604bc515ba2840a8bc5b5e2213be376523f0ab7f7c42ca60977de3955b2ac38de9fcaa4882de54893ad903d04a4b6a424fb86a767147c9f596319a9a667a562b6bf5fdae1cdab680406d9e7390473a074bcdf809119608864a922af4ae4183545dfbb190716922a3a2747709f0c1d009cc7ad4cc6deef1435b0e90232e5f8f5b548bfcc3b496cd36e8e9aa6d740729dd5a47f597df37bd8765092b76ccbe7796d07a3809d3a84a329fe41765b32ae667ef1a561d2edfe4b8ee21d3e6f2f878c5e24185f0ae91a1f185f70b22ee69e4adf6fa35235d0a8eade09bdfb52899e524b30d39879d20b0b2228d15792ddce9b6e03a354ecd8560f3ff1c62091cd76809bd3c2f6221dd181b8094a37a74c66af73c001187d6bea4f729665be251eb49a70f64759ee2045c25e7dc39c8ad576a67cfe76deafe0717423b92832b4ab75601efbfdc35af0b5911ae8328e9c5208c08f9f7cca05fd20a862b4bfa1a84114598391aa64c21161a19d71b09f4f3a5efd5d887975dacd07ee25d5d7462058e2dbc7f90ccee1e967bdd057a44ad1c0d89c7b8d22013f1e9ccffd4cce086d11fe3230a41dbc5d0cfdce68ac6338a283bd758f905417f68be9ea1fc8d089c4ecc6ed8bd48925bf790526e4b69b01746b9ef640e01aeeafbc8e5c9827523e5a485bed843e6ea035af8ce226f75c8736aeb3d9c8f03ea0c4554deabd49d10a73a9a4c38d8fdaa00ea9dc3b7ed9e0146bc858f1f3c3673e3f0e1413b1560006e722b156164f00313b5c45aaae0c9a9b7ec3d92b20961fbfef1b9f5462b85c2db9db6ebf3f1dd4af5b378a62f7a23f8daa8d907fdce4bf1cd0c7871e121afba90781205039f34e0203411d40eb873382a6c2995c777789621c01a15cf798916bc4c81fca241dc82fbb719845ebb732acf938dd450d72ada5ce9b838e0eb37089fe4f814abc9ad2f09e44c2298048eaf6e6f8b954d1416b82f4e0d4db50ba96dce267db46902be740079b9dec5fcd940921edecf3ca5fb3f689cc022cebcbdec8cd940991eeea1551f6d9284778c14dd8967e2244d6ee3c82c34e7c3de02d1b6220ae17f2d9a08ee1ed8786098f930ec7dd88792a804bd17bb99fc2e27ee46298b381309dca7028be9a6bfe663816a687d67e673014df7f1a704a6e0706592cb16de211692d486ec71abba9a06dc6a9a75f75580394bd701b1ed205e0d9a00c06a974dd29b03b133e01ef63a1bf2b9496b2abc85a877e3371b51c0346978a04e6eef50a20cb235c0b413ff2e0a478b2293a921b5b6f967deab062eef48f333cec36e51ca7a67476bcce1454251c1bd0bfcaea4997834f5debe8960b558720a428edbe9c14ccb9135e19ed6335c3f41f8d4f0c4d545c1d59a836f11f95c4a6bbaab9c2e82a422785f930fd8c936dbb5b9c09667c497b2c2d2c218083d439327f8fdddb60a859e9e93e17b462fe04646621f4dfe2ef27e4833e2d98a7dcd07b4493239f86791b88b417f90ba802996d596c9ef7623d1ca3a83f72e305d58bb2075545ddef981acb4a0a2c0fcc77c7bf048ed58231c0119754ad1842397a5291fd189d3ea72b80c6351476cddcd0352cf767ec1119a32e9514a404349bc566127a5e520377245ce1d0aaeb29f9387e6401eb59d2f9202e5be36d46899900fa4926e1f96f30cdf13d47466834079adc3295bc6c7172448f30f1b5580269cc91f4c3840fffa5166a7acc605fab865a0a1f3e5df93bd6185148257560f2ecda3d83182685b9ce0c8c28841fda05f3c8252de5f5255fc5aa2e6d3a140c18ea6a4e67a99ffac94b481e76ef1862466d1e5bd9a678627be9101bf4d7f7bd2f64f575f324547bb173767fd690573c27b511e1c8e330e3250e5ac5b771f421cd063509877a9209b4964b7d69b1eaef2e545da2b64ea8fcc6b973a790e9a8bb87f9667b65bc855d3e8d6275265d10b6595d9910b77555af2d1a8db7cbb4a0095d24f8d61e60e21286734e5826cc9e95cb42d075397bfe8b6e479ebbce4f6edad02f9e1615440a66260d8851d26a3220ceda0258b55877778cd59b9b3c727d777b8072edc59a931664816710931bab7817807ac85838422517640a151eaaff4bb3b3f2db0b63b513d5bddb6cc249de95430a8a6839589755092bb673d8329c14394d2ac20132fe3b95b17b0ee3c2d22de70b7dc661d2a02dc1ec80c2acd1e98f5d3fb00e2f277883520bfac986752a9501785e854ce06ae3b7d2f146054eedd2148071e084526377849ef7980ce24912580cb9b81e4b0bd2909dd0577a69fba12d3cc332f0f8f112064a977ce43d2812f6d11ae918f1d92ae9a145b5fa06f25b6829114f505dede4fd7ac0e29b2e7b1aef6731f3311e06cd0b175ca4b31143cee1f7bb93aa4cfe4c1f0aa625c082482929b01599f0f4c4aadd0d8afbc3d00b963cf74dbf0c3b45788d32c366dbbff7ecc37fa9377aeb18b3e72793299e57d71281fc16c5cd9d71410504dcd3f0e42d87b3ecbf3b7f0cdaf005cb6cf5796c411d637e2f16a411ca06dfcbb15bf4be091b57d17b12b0bdb2c0a4510d199a4f7645058b3c3c9ec494ad064f3a05cc2ea3a49489879a91d7eec431f5b4db536838e20148e7d68391c4ac04e200dfdd887ae48b62850f52f716b8501bf55617ad503ed3a449d285d6e41c2670f36b5fd429dd53d195cf97f88ed0db68085b78ad2329a919f0d486caf5e522705effc913e2fbcf314eec0d25be7b991e4dd9e91fd86f70bde6112c460008e62d936ef543f424259c36580ff9507fa093a85fd09bbab4a7f4566a0c145d38d0f9b4be5dc350ea98ebee3ca0226972f9da5bd8b59e9644ad8de0fc06048b59914566b28049547b24c89a7450bfbf7d746186b794f35f4d8616e2e866c4863d39cc81fcc170fdf4f324a942e9f6c2ce1980292d3fa1e9496034d2a01a75f63fa8547300278044cb08c36c25e773502c58bddb318496b9b7a03e5a4c602f479732997111650cc95ded5213aa7e68dcc129f2eb2d4a62d2b5247f3197ed8a7ac3f434e2f980ab6304062d27772c519e2a8c66336d69461eb955cd67ed4ffd9f0176c35455653178c1e1ed6f45cd92e32d95df3b4992a2f9a29918abbb5a87df84705b6ec330992e98bb57a7171295ab04cb79e7016136953de23e918492aafd52baba2b5f416430411481bc200b266a881b7852b66b777f9478a12fa11d13899c2d70fc02f633909c81b767c661a5deb1637b4a956f1e0f1d21ea38a12f34f52fd4f5c52a596ba451f7e3b99dad0eb13467c4ac2426fbbcf94762d48ea6526378725bd65ccd31814f584e9295e766143798aef9d1735ae20b0734c8316d9dbf8a23bd97c5e8397a75bc9e786cbbd5a02bb06d95608e6dbf2bfb67a23c1a711908806ccfe3ff93dedc9fc893fde5e96efeaf98c581efdf8e44657a79bd051e8701f80d26bb1d1e58da901957d65be80badf088822c0517d01934360ade9f1e0a84800ef9c4331394122d12653cf8d102d39672e448e9347e89780153805c6c9a306bfc94be4dbd32ba70b8a8ebb261bd9906e0fc68cff774b7ee27236580381917209adc4815085fc5ead9654431fbc6f6a32f64f41470f8e636551c40186d04058b254b7ae382402284233c7e9f27cd2390c98a39936be48b3874378a92a837f49e0c4d2e105f5358851a116427e23ae74a6df69a5783161e481b7d3eec05d650273e7cd91ba54272e115e110517f33323f2d4b51c324795e5b47f6fbc4bec04561d04cc792e04d54991604de278f135ae00ead0f44a36d74bec0edc6ec6c3850862f205342115cf3547fb2e72c311dd8908e89975598243d5a7b94f5a433a4ca3b00bf4b60f1b56aa0a3c3ffd49d072bce1d89b835e8eb71ca8176dc4b8cb3abf6662e44dec9231e5d732027a16d74a204b67b88af9b81156cf8c2bf1e146eadd10b29e191a6423473d38dc70a01e8e11f53aa7c09c946a022aad21585e30141a3707a9754076360f0fd5ca4d883532674dbeaa218c67ecdbbd30679203e3333349314040046e1d198419d25e6bd8dc45ff788441300010ef732d7f4c2f9de2bd7c6ee78f83ccb66236f12a80dfd84ddd5ada68c9681329654a520642054b05630520e71d4a032acbb23c6936f679c76d26eff5721f7ff9cb5ffef2d74c00642c25499224cbaeebbaaee3008973722b913ecf4d1b574ba0b22cb9b2237548922449afe23297b9cc652e2bc000669c322dcbb22c398ee3388e244992dcbad43ecfbb4ddbb812a82ccbb224754892244903cc98329dd1d9b66ddbb6d1d2c7cdcbae2cb5d9fdcff38e4626ddad8e4aa0b22ccb92c63380f741c02e974bac15772cd319d148e9ec5257bfedb12eebb2ae91461eed576795a8ab33aeceb63ad3eaecd6d901bc0320000140727f027e90e025a023c127f72be055a3809a1a6f5a63635d93ea18eb7fb5ea587fbbb81fb711bb9a95c776e5ce23d1c650a8a20cb6a73d0f0463626464f2685d3468dc68ad635ec35291d2b23ccb52f4652b8a3260ba715fbead0dccbad9f49bd36bd9bf7a164d09b38c514aab2542824c4963b67a168dfb09aec34e97000d9915486dc0b3cce195c70a8113744860088857cfa2c93aa06508e64fa65ff37c10cc23727f005644274f9f4c93398ef3d1f4b2278f13a8ec91c119963f949474ff8322b12241457ef2e82d589120b0223d140b1458868bdc10212232232223e204952b3009446c4024e8950411490441c4959281109d222144e164ca0a788c8865e1040e7870831e90cc6e966071a2c7921eb60b2c43453e86e0418787123c30266605ee8266c03dc049f024a193c4cd902c2d8c91700d011a32c510211f3a9c74b3c961337948ad1b5b7bdb305e15b1681c4891d7106a51c1a9425af9885cb9bf2cdd4d0dbfa07fb2cdb24fafa496ac85a32c611478b445b9df6699750d51d714754d25b59674303afceddbb8bb0d08b637bd8ab85978fa9d339abb6631de366eb5f219db756b82b59b3bc6d6863633fbdb77b7f94ce8cf61335dfb726c9f15614ce0b81111dd94b99ad6755aa77523ebbeed5e086baadc04ee44ede3bae7badf4459d6be13b5aefb6ef4de9c73ce599435ed29a594d20eca9af6dddded76276bdabbbbd7a1ac695f6bb53f59d3dedaab699df8537e0a2985fc917ecb24714c2177df9f4de949574ad69ea4759ef6f747d6349792b5f105b13c027f51f78eec1a1b0da3c023de4870cb9f593cc775b6394b983b9788bb737ff80c7c8739a3ff12316b9cc764f53bd9ac19f80f2e836983e43e780fce83ef4c1b3bdc1daf715fae83e7e03acec371f01b9c9c364e77c77bdc47ee77d7c9844b291075b94d1b75a4cf53ddd77de59309975220ea729b36ea489f7732e1520a44dd93099768267d236ed3549754b3965baa38ade02c7f6f583767dc4a2073bf6865ff048fac4c8eaec0b83dcfaf05563e83feaa7a9e83e0364170ab9f4db5a975f31955f3bcd23fc17d3d9b556dc09ef4b3c1cd9a1c47b22fd621dd7d27988ac0719a55739af54ef0e838b95a29cdba334a293d41a3bd331b2abbbb7ba874f75aeb48491cd6ce714ad901de604cd8b2fd14b2f69c884306134c9f7e3654dcbc1c33f55bcc91caf655d98a1d6b56156514c18e136a24e7ea2e150afabd31d4b18de73e47d97e73d9bed7bb65fb9482997efe5787b55bad9f8d76810d039bb8f219f75e7bed53da699fffd531b3f56b63ffde12056cf68dd3aead3e2947aa3f2770c52ab61498fa22106530c1de0eda5eede775f7bae06180f4799da6699a0663dfb29a351f0aa59452da1f05bea0a0a1a19f1fda0ec5ddbdd25aabfdb1f6521c9cab8d538a1428b8fe9d5fefdd9e1b53c8f747cf8dc454de3e472a95ca9b78c3d9459aa671edf5d8992acfcff244565aa86b4c1aa9fc42ffbe8b238efa95b6577bb58d1b75de472a61d30905a6542d2b979711871561e68c7d0f61ac183367e80b18311ca867f4db259b15c2545655da5a011a69f4ac89bac896cdeaff216027bad936011366640a2352ea8bcada49efb2ce2c755d21789c312142b208690869086998ddb843e6649733ea2a8d3336c3982f02a917302f4d1a5900e3c565e5a4b10531562daa54258d2e784b1a5f107a9734c240d448e30ab99371c58d44bf26f182c6348cbf54173b452a208c2a6ff7e953d7b4f1f2f4e9accb96a7efb26943f5947bfb5ca8ca9df7914ab8739db8b0df9e62beebd0fe0d617c64ef477c25fb5315bef2e2fbd10494fdbbc5041443faf1c423fbfb8a074ce9abcb898a17fc30ff12da8f0947541017d38f9debc56ff9858d414b8e09c7ceb5ca2ff451a3d89c0147ad3913caf0a2c1841508a96bced07709e96cced05f855dce19fa2da1937386be2a05a2c25338d2713ec91488916231b612a3dfd0b5d717775fe4421beb5cb16983e33a57f6ef5cf48a200c1d25a9413aa7adb106b176fa142e9b33f4450c4c872fd7ebd0c57f955fbc0e5b7290eca4167b24847f2fceff6088ee31fbda8f3606a4d79c712fb570f008fa83b169e37b9b43f58528eec797d9cb9872852fe0f125e5cafe315cd3867d1bbe3cd89a36ecbb2a7be1cc5d69f432cb2f4064ff0d8c8d5a415436c6ace2745366ac2feaab698cac61da5fbaac2eb3b6aba6aedb92e70b9d2adb9a3654f565c4c81124ac9e4553427d514aabf52161d6ab67d1b8cc889059e395111e23384e1a79b511236646805097111e233b312338465e30c242ac0b5b80e1773098274c0921e491456bdcefbefde1c0218fdf7be90cff300a991a34e0d619b35995ce69adb576364d558254d284951b3af0ea4b2a251325a480359dbdc9f46d0c66cd764dd9a70ca767982c7f8fc1ac699c66f9d39f2c2fa21f56fcbd1f46b63ed81d3b6487ec901db24376886c96d758937fcf306b341e93e5ef1ad92cd26b683b3ec37fdb305eadfcb59dec5a5019966c89c7925d53e28104fb5cdf8e37f424fb9f7e9c5c73c6bf43d34ce371ea3901a1ed68a58ef6a3bd34971551b16bc58f7a433e7c3c79a76b3e1ea6674587fc72bc2c1ed190d73011f50c7f5396d98d2dca69653fe9e064ffd325b3975c41d9bf45d3ac59fe3f043c8e76b27f0d4b3b73c67f06cd197f3bda79224cdfc6a00eba3ca06eecdc96954321ecef5f0e568b540c6b26e5fe709cc0f6095e5aad569ea20ad484d4cd28d4da228c09d503994e9ff15c3c17cfc573f15cb8b0c5165bf4773d67755babb5f6de7bb1e0b11bcb6c569658f07782667dba580c6c970b752a4d9424f194c94a7f0577ac6bbe3cbf0a6561e9e6d500bb566b376fe5b55aea13d3f770f8adcd5a79b8ab96a48961ed19fda10b8fe0bdddec9a91a5f4c3e174abd3469d3339b62f07cd756e3318c5f088ff8648952159e8400813010b01e380cb1f6eb83c61a511231317a1010b4304c1891022404a1f1e1f278a0421824f6bf3c1e1e6135a64ed6b091e1853649661e4092b5b7909165d8cb6ab675e564ada1d59ccc1620ef66828e6e049c58e592bdd94a15dbdda6a6db7d76a6dbd4544cdeab166291aed8d6d555baf66eb56b9b106012d09e2c2b10ac5346dabdcc8d6aeebbeebc2b1ee541d9ebad38563eda9aeaef3ea47b2b53454c5ca10d18c2c67193b4eee37dde4d850d0c9544da5bc69caf3bef3429f660df969568f0e6443f3e85dd6f572dd573a812a17981034555637952a567aba064d03a1699f21344034f32951d7e0401659df744eeb34cfac6b38a0ae4149720929445221ab9056f27c5a4e5a529e9e9202297dca21797e07ba06ddace05996f994e440d7dca7f242ab257802d17899554cbb7cc6a8633ea39ff43ccff3bcf923ceaba75e5f9d0997489f775caa042acbb204491d92244994cb4ea3d7ebf56ab56e3a6f71dedabca579ebf65f1a5e0883035356929ee7799e4703661a1a9ace655c89f405266f04f6d5bdbc1695278e291044a15028d27b2892e79d4ea79317d2c0fbbca7efc18ec860477c3cfa1c0c082773581c77646c7aef49a18c2298f4261229fc30c7d1cfc60bf1bbf7d8f3727c3fdad7cbbe62af579e3276b4506f0a4dd3344d0357315cb5d5565b3951dcc21ad06d89d4d31ccb0d8ae38e0cbe4c2ab8103031ae337a4e8c1ffda65f7ea491298df8e3ea0137ac0cd17f82fa01b3c6956c31c471c330c411bf10c7558e11c78711475618fa87ee3f1ed4351e53326b1cca64fd84373b9459a3fa7e8f326b9e982ad5c3c0508dc2509ba7abc29a3de6d455a1678fa9c2ce1e6b5548b34799524c06cc798011cbba010c387d7749309fb51c3e8c3853a977f9969654aa252576d4d33183b858e842a14b69d675f5c0aa1f7d083e2a873de87bef129e865c42d38f4b8873d8a3f42e6129873d307e979094c31e38f5a3c772b7fc965bc43185dc127e7915b6a8c291451d0c332a1cbd4aee3f85a3f7207716b1b1beaacc92b674c57a46e0af2ddfd0fd6f0c46978a9001960e2bfb23d9cabc95c4d308351a8d46a3d1c80698666c227d287164e5162f4de248c421cd9452aad39e43aa81bd8bc459e60d3b76b17db3af96ccbd975e7ae9a5975e8e5ee6f65c4559e3c4d2ce3b81c73b9b4d1bdc3715b73b2be528c566dd94195fb4bfafa5aa1742814be0af7cbfdbb6b7618efbdb9348a21d7764eb821fd9fee8821a642b8e232330cd251b3ccda24fc5d1ce9c795b6e1bc6be5a693fc6c8a32d4d1e164d45dfb661bc5a3d4b02797b3abe2096b71f4758d4511ac590bfd0a4050a345f7165affd8ddec2fb753b4dc9a3af52a60dfaa39f3e18fd285546c9a32da70f525354af2bbcc6079b33fa9f943ee59b22f77f4352e4fe4dfc5e083c8e7646931653727f41aa578c4c4553d10f81c753ccc7d12996fb3fb26b36fb375b0ed659f1d09e6c8164fbe9e41e75d1e8af6c3dfa22eb84bcc081bc099f266053f4e4d1f3acb56293e9232190b008199990885a652615459a5c51042931d8a06412ede881c8141efcdc8045084041f8b96288176868c2477bf9c0020e7082c0c147089e15dca067892a888c18c20e26151c82c0524514509228c9c201f3e70764469758eef7a857ebf66ae5ac57db387b6756dbb8d145915aada8727edc6bdca8f35e1be7514ae9a4b3527a2fc73dd7c2c9fddd4d4eeef7703e9ddc4f229bd5add78fcf909f1750f9b25165dbb53774aa62e534444fe389e8343bc928b9a3c3b343f6b848fa1dfd38ae0c025a12545694102a862a51452822541654116a76d342cdbaa7ddeeb57255a3339c99b6785f6e9f54a6c8f35d4689b821cfd1cfa75b2891e777396d700da3584c1bdbfb0e0522cfa7aef915386215a4858a161e2aa0d495940f30ca8f5feed7bcfee8d2a2e22a48eebf54e47eeb2d3c727f17d2bc8d7e5401e5fe3a4a5df1948fdcdf1b182537edd8ebf57abd5ead56abd56a61d3e9840253da8de1e6ad5c487390dc980acca30444ba42f261bb769982602a308fdc7f4b404dba42f2d1ae1faf9f32a82ccbb22cc9d7cfe9757a9d5ea7d7e9757aa1bc3953296faa6688426df96c5abc7969ee66ae513e1c2f07dd0f3a1fea68f645c9fdde5e0e68b6dd0f3a1f6645adc621cbd7cf4976929d6427d9497692e5fe4ee5c1705d70466c526c91fb395a8e96e47ecc7591fb4b9c11b99f6e5294413129b2a1b22ccbb22449922449ef85f7b2919a1297080ddb2849d294c8fd2e9746bc44a099923bd4f5e3f553966559962fb41b446e21aa4dd90de25aa1a159510b74511c70869a8da8196a869aa166b9411004411004c118dee6c28f7ec51d697215ad0ba0cb6bb8ca57d04faccc3da504e5fe2e86d78202cda34bab1e614597d69c0182e98f2eaddc2e310df8e0018fd8059f6f4108cf2bc61346ec805f07cc0236c28291a0fc9203e21050aa40a10265491e2d9423507e4422442154d0040e142c80d80108c88e218e04a10b29ae80e1489156a42c626a9c0d778009c1e96e19107b4f885084dcbff593269e04951da594d64a2bada10c82742b0cc9bd4299fb27772dbfc9f5bd7eb5a57d4dd3344dd3aca6695dedd56a4ebddac66ddca8db469df78d3aaf7a5ff5bc1a8ef4a686a49b5abf541f7f36b54ea1764da1ee699e0632653233192296952cb9be0ab95b8506caf57d0574085d628286804eaeef3a684e7d5b811b54a7abecd8a132adec708d3bc8dc4fdbbd564bdbbd566b6fb5f76adab671dae546b7f346d7fb3eef924adf2d95bc8bafc9e4dd53ade148535334545e6db1da0dc715aadc1055e586e0d00d471ab97e2ae5dd50b5c22cf77d9aafa68d2bc8f2159dc60858f16fa10e4f96e8273e9ea2a1aa7a586bedd5342734682206822db306041c2886ff8d126ac88017f0dfe044278f13088cc2494e1e414a436373f2431e6964d1096113bac51df9b9baf219df5c6d18133c030da5a3dfb53b6eeafe34f43c3f0cccd2012e6e252e77dddd9345a24994b45c49645d9e73a6bb89e7fce836a9bbde93cc5e265542fa90640756eed17ebabbafa6596b378e1b75def77d258c4d27140a85025329550b6c090c4ab0bcc0c4bc78217d35e842263e9fe41469f2e3eeda16cb72f2b135e1c191ac8c40b059348c31892d41c35812a5980d70ac0716234fa83c614eba4039c102745245ca0914958742a150aa9624ae96262dacaeb878346462c50b9328304c943051e28507030303f30206931f181e0d614c7a80272e422b61188a5486477d0995254cc8f87220531a020060b3682a4f981322146c16852999e1d110a6e4c8003caae487004a4c2d74a1925847dd53cd3ac2149394798cd9666adaa0b97183d3469eb024afcec1861664d0581012e6f039c092f89064074c8315d349744842e6203c5c1a86d5ea57311f83098f2530f4a004cb0d4daeb868a11db1438623aa207a614a8e0e9058c00d7ec0696205f9430f7dbb7aadfe2427d75a6b1540aeb5d65a9b14e11180263f30d45a6badf579e0539eb0263eb2d7b8d184070f19566a10a990217422e799c4106309196024f1c20b1bd41013eb99012646eabc74c1c3058b95575143cb0d1db36c28a46af53586145cf38435b1c19bb490035e4159419ea2d4605292839590a1f4130389490f1d3e2634e4e071814367c5861113397039a0615b32840dda122062b84e88e45a6badb55a772224d75a6bad158a5c6badb556eb4a8ee4bae3805aeb0f0d15a6e4956badb5d6db44965bf2843d9141b679c29accf2481313406c64653ab7206012604bc4d05539824e2aa42d812dd13d7e80c75902017f0df86629ba812101454729a5737eed9a0fa5d46b0f0ca6e3ee5e2dec26288b67838a32d8ac2da4c023be82b3602a3888c7058c573fa3dd6cefec549c3a04d580c305b56ad8b48041c302e722e9c10c168a186a0bc5e03774cc12023aa0e0718504899baa63f5ac9b272c48873cd2c43a20660db0a096a685c582c290f4000a8a4448d3348d1b0115e1386ed451244864db058704c80b8ea407a73d10c7708261485a9019a2722578600c122179942965400264ebe97eaf487490e454243164b059569cb4e04c3071040963e2f5c26af52c26644c6c914779c29898e591466bc16033589256026ae75e57bd3be7746fdb66731ce52cc9fe0070e066d10fc709a811b8384256a13ecd1e8114f7e1d09edbba26c668e4a34ab23ffa720c2057ec218cac8573eb9a171bd6dc440fef66c50a938be866d3494f51d0a66d62d2de1e94837e826e10544406bd7a852101fa110444104f115d18047b4efb9f707777a7fd84d312a6843ea24c097dd04f0113ae5c803c614766b92b75da8bb52ffb16052efbdae2c6be421a00111de9c9dcebc08082565d58a2effd64ef5b7e00ed2cb1b384572291be237d87ff0b471376f90fbbfc8df670e73d89f4f73ba1fb128ce93d7184b1a1b9248e302631073d42c6e238b62096bd9fb905b15c7ad268c22e1ffe12f6fe46079941a8b4adb716a964333300000d5317000020140e88434990c3589c46457714000b5e78465a58369b4aa391589443410a63208e618420800820841064cc54d501011e8afdd1e7a2cb7042620fd9d785530530e926d711d86ce5fa4d7d3000f68df28ac2e5255c7a2d59bc4f802cd7d26c83b6a5dd7363e1ae310e8e18360d62953f691dc0b5417c40c2afaeafe75d2a509b0d4893738c9e4e60827a992a10a3312faa3804435459220c0f45df2c9654356faf0afe857fe815c70713e031c92df60922ab826e6d3f49e0197363a12dea1ef131e8b33597ea186213ac73e7d77fc785f5a4294ca6e95117fbf8fac5a542f2fc881ee3e661f2064c586e0a8fab3fdf39a70497d03baf5f1253a48d6e11b7fcd5e6e2e8d513614715451bd45e7a5e9e9335220f92cbe703f2d1658c18ee4f372a8deace2f1c5035d6201aa32476d9b74c60a3d5b2e4717bba54b62b77eca41d795100c24f466b86dedfcb3b543b417aaa6413affb8910abdaaa1b76a1080941127404a0682068852b3b9b351a8561de616820d993a6366e7968e2b13bb3c0def5a158a72a6270a00add7050e8809b1610f1e725af1146837cd0fa5978b5149a59c04fd99d08313902ddf414efcd480333b55ff10f8ff1424510a947088a177065b3fa3aa77a8262aeff0161818c5ab3674b3984cc1cb719233de85a30699b1cf07ab890f9fc72780eb0ddf539d2408dd0eac812b9bd0785baa4a535cb2f5f01465e5011bd1cf66065dd3bc4226d96e6d79559ec9b807ca2e1906ad8de6003a738fbb670c457c883a5a948c29c756aa7f2d8564b431a884678678c283423e3027a16a41ed0fc75d2525c9e4b8f7a497d44e22b2df93e58ebc5d591cc8c29345b158f5d2410f41ebcf1a7ede63ac2c023b65642ff218e37253ddababcb87aae94eb9318ab45b43abf6668690917cc5f2300e462d71a420e3de21f001daeb864ed749d146df19ddb42ec84f9c44a67b9c283ce55af157e1d95169f1776cb310cd157da80b7c706fb0282f95b3ad032e6bfd9321b432423df15338bd731498b9b0066f989af110d285db485a0687a96937d229ee8c29443a564643ad078df68665876c78263f1e80b32c5bbe90d50ac662bc9c064d706f105c1c048a29a892abdb63507519bb4a362a6820a858a69c3ddbac0ea3d97d4b6eea2af48a3818c57114a17563d931ae0031e984a48d565afb17eec018aa12f86a2acaa5940d2aeef20d6b29883969214f0e19111b081fc12d108dbf24aef1d789166aadb100a825d93ac0a5c7eabdafbe47a9583e4baa7b2b2f5f0aab03b08bd21e6be7fc59230aa0f83364409025831110e52a1f73961b58eca0baf88d920674718f30d87eed494236e371f3b8933a3eefedad4c88f41aebd3d3d9b2459a02dc65e0c3729a91c9ed64a1788cd593bf0f8db4a28c2d8335e0aac67f8a013a658d51e99547e9ff70114e1eb6f2df872f8ee94652e433f6479ce304e71ad0a77f6ded3bf51045e0d2c0f4a544f7a3ce41663b89972c96bfbac9b3ba75dbb8fe39a1f5d35ef368090a716afd27d2756cfe1c72b1bc92c53b43604ebf05a1106277a71a8e0d38d60608655e29ba8fd2b3adfd5c68510cc4b4d72424b9ec7e54239c38da5af3823563a9ba3e34d65ef1210cd4ccd69dfb3432e7de7abf1b926097f66061c68da647f4fa6facc98094974d68a36e9ace387faef923901d305704131bb06025ea1e349206c49cd6a04cdc6383758b0ad2f90724a56840ac6339567d603252bff1a04e7ea61bd59cc856bbc8256e26052cc27e5531928124e3dc0d7969829d37d95112baef911d26946514f3413a5f3ac18cff926b67b8458794ab04c9efbb5004a31653497530f8392c1ce65c877e82b4a0746f7e65e61f0f4604bfabc6aae77ac8f0a6ba9a7a64c23a3846df46d8a99b87c3def4196d2a27084b2f520612256bf0de77850d5879356e83ba2286a06200ac6a94c8bcb3aab951d71ca6b50d2d9e7a1118d8bbad533b8e28e2869d46b680996e413cdc3bb86f65c81548272d3e82e275eac11ea21b336ec876019889117fe5e5b55f85733c558b8566f9a9c4c4ff034827392709dd679820039c13945edc77803b7cb0355feec87314e6170253c9c6a064c914899ed13027e04426e44e40ca2dc4f5b412b72f6ee65aa1ce61e2448100632236881a099ac7f213470984c0f924488342c6fa99e4491fef05408e82cbfe06735865032ca8340c1c77fa9aaf01e75e64ebd393d75d7548493e4a1beeac98cd01253c425a541dda7a3a9e18d2935ad0a53c6250018427d823d7a0352ac899bbc02a8fc603d361133a2e1a2eee2c51a995d30276388554d7e84a9065d6381c551106dbd808509b6f9f2ce78d4a28b80e97455e7511741e40751595462cde5be9af8c232fb4eec3d1898f5bde12b5cd8975edb4a1aee3ca8ce4e7c333576b13a4805d5e40aaf57cab7002123c5b0977a31b01523b6e520f8891135080271e98db58a50d853e4d716eb42a28ba7ccc27a31f5e7dcf8a261bc9d83c81073c767cbe778f4d5bcfbcddf50bd50a94a31a2dbb5809bb88ce2db7d23d587f8297a800d7c8a00bde8b7c883f21c0b4abea1306b2cc2046cb0157db5108cf411f793ccfd6a2090750491b666d1bef1bdd5c94c3cde9f72126d3365d470823b170b3c179d1ff92b28da3774ef9b7b7854243143e3e786aab9c022f9e5644e025cc635a5e34e8f89b570a24266685e68be11821d13193379d2aaadbff1b6ae99b934f923628f23dbbd4332df49b008eb0b7b8ea02760810119635e3126ef9abc92fd3d55d308d15b293d8a6467f27d01c6e9ab5f7e4d0eb3d746e231e12daf1542e7d72f18271a2759a284fceb8d1ba07af45f36c45b1f4a34824911416ee6abe1971b0af8d3cfc1a2dca0c427fd0e14a51e857ce097a3e2519b129ff438768bd8673bef7873c4a5512d0c9778bfc9f54f1b21f72cf251a4f07e78605fc2e51d63a46215cc5d7d0af0c981aa32b062b20cb95cfbc1d0335d31d4f0474d8a7cd4e74011ee51e0a7be8e8b721acaf8cd9f43c5a895221ff5392a821b4af8d5afe3e24f8f129ff2e3488af2f56b6bc8509c5abfe4ba3e3de5107d192692b22da4ce6bd017772e18372da94fc543e4f08084c138cbee402d56aee4ae492aab787616c6e1dfef01cfa56692ad514a5160bac5c5dea1dd3fe521ed6f8d82b2c0d3aff812ff9b3f257dcf1b69d4a2cc5f82d6e5a971f615ef3e02ac10cef12d2e94276847cc31a9101eb2c5b560ae79906a5ca236c3151df29f6261ff2d2ecad685a14274fe99cf58333470158f0253b761fb47771260d87ebffe09edf211ffcb1bdee4c9e60971a6addf4e3f024fac6f8bc63408ca05f72a12e0234c5d697e520b73b6d2d97635d3b15ca8331c1a25cc0e5c2e118c27ccb73e152f62a4b104ada459359d7fd5baa27c1eef20c8c435c34760824343e68f4c149fc70b10c7846a7a0c6ca6003fabb43bfd9898367a6ac39205f244c4655c8d78f2e9a284927b110673538a96e8b0a4454a14a7e95fdd16d095eaaaac8b6ab3f299264742a0fd2a8870f0bfd46538d9ca78a358b6a015c9943fea1378a04369979229d881f4391d094b6e9cb79c09aec4ac77c2730e22fa8b82dff34fcee1b43b8773e14467caf7d9055a0a23e805e878afdffb286891cdf1e32ea5e497bed3cf5112198b8212e4279247ea2368ce97f5cd714c909c3c380685f767238ac8069e603ec2f32351a38a0570c814e96424b36d0b4314ea8b1bce880ccbe0f8cc8bfe22e98177bf60c44ffb02d49c7bf1e70b9c7b1c54415d133956081eec140b5fac82787c0a2b7cd2c2072068a1b1f94a7667208a656839cbd9b4449cd172ee509b96d7eb99091c0f556e144e76d24f4557538791b87d6967be3cb67092af80dde07c50546e14ec988f449be5ced7aecdbd08ba9183262f48ca3c74608f777e9261efebfe88a47432e173362d36cce8a76d23082a65b36692f2699f29f8e6cd673aac9fec79ea2b0a7904b0b26dd0cf585e65272878b678b63091e9c6022d121353482a6fb907bcdb33a8dec65395a0fec5a18f4a43c9f57941f7a0118ce8572a9fa2acb9d15fe0e0fb78523e5aa31e2d4ffb6dc9617ed28bf13b5f376d4bd236ef7bc21fbf69a31cbb1ab053d077ff56460ed1778ebc12bf5a9bcb9feafec330a65785e0ce2bc1f76a93da721c61951d74134b15b3dea213f6470fc985ca04ee42397c3b4303a98dbb9d9dc10d8dae1e3275d3eaf7ad25ee2028425f812ff469bb1ddd45895e6faa8312d993052931361473b7203d5edaa662f85892156ca692dcd50636b4ad506bfed6324d6889f506986586b1a21ad8986d53296964725286fe36e20f3bac64c8d63cc3792f1b8c37003618b79c75265f323e4a6156ff195deadcfa7add390dbdcb35dd6e7a4278711ae4a16ed3731eedc935d7b9fa2b976d1e0a8423361ab8cd1df4bd3a9cd53877d6b8a30ed99b9e840a94383565731ca8e0ad25766bc2bf41623c9aeb6d29b03fdceb1af2e22cde2c5aa1779abfa24a6fd099c02886c43d446036312790091a094d6e5bf793814b33f8d79ab529e9abbdc87c943d94da181d7dfd62603a5b3107cbdefb728f6cc4b590252a13848648aa9f6441f59f9b68b45b085cf87a88cf4fb696dbd0a10191ec366dfe4d7f1ff6b6cd4e30ec7595d8e31ca806361312de772bca523a3bbd549ac2f9c781b8b9e07a943b94988029a58144ea95c96ebc13a124dc88c29c9850b7546ef2b54e6e7321cd24a2805df5356a0197a0dfc0fe866a05273a4e2a1744470e00eda87cce49d6d0b4cf2866350a451bdd50330913123dd65ab01dae38244032623070ac8082f074d59f071a7117731c59954aeb98d7b495ef9a965f41d435c9cc3d6cd3de6f2a8da1a655c5e9dfd5b2574804faf689f5992f449d6971c5203be70ee0b86243f9d4310e58c4f7a8f9b0260145d942612298ecb3ca843c54e3ce2735eadce152e5ab7eb659b96321ed18994468f55d4e1419f90e86901929733c8f75ccf1375d2356dd1bc4c04c0b8802cacd901608d9d75c343686b3169e2658cf8b2bd6a254bdb2b48e9589138b2bc6d4823b2d3e79a14b6c0946b2ba878b2b4ec621f77184c665674bf5ab6232cdc2d6edb78a6a0866a10d8c536184e32f2c4c7529540afae4c1dcdd7d2f1d5ce0b495daf5fad81e13d100b546a16453b8645804a013b448f72d993501309bfb5ea20b1586d2754b560a45b251d1bf4ab4038d0f1fef030557cc4c14f0496c7c096e828f6d0cfa18a5801df274cffc123da137b21db72f43be538d124f8bbebc9158313cd523368f14f5caeb82f38f8106c1f9b11f0b15b530e4d4d0fd4f8954124f06b94aca2e6aa7947e7d7b40b0d83e398272250cff8d8973e93d22031f3b9355ab9226b7278c1c75a5b0f3d1c346b091feacfce36968e8fab317c10734a5388b3b19136056509c5b660c947020370fcbf73267b6b167d79aed6494b81c1ebeef92536507bb96a2ecd828740889721f6ce0ee2bd51d6c144597c9a04128ada21c3cec78b89bc7c3cc2368edf8228e4c95a2a7d7c8adbf5afb1652ecb3e518d71e59dff03ab9aa790a7ac6dab1017662ca846f44274c915d93bdf87e9601efafc685519c57b38fa993f91ecc42afe7e3812227f4b2479a52a2506b5df7dc44518d9a9682e8970521c321561f6cc122023991bc9e87121453e06216ff71730ea0d9d7f97e8b2f9be228dedef8ad7c3683306c73a95da6e6491636a4d121e6f4c593290363f843c74b757c755c7ec63898d245bb4971eabe60a9c7931cc762bf11b9117a23a4f37da42606ad5e63a1001e83c4690cb297470deb2391249f2f0fcb0faeabde1fc039b0e0548c5e123e9f1d35add64f8e7ebf5b60c233694f49d4fc6792f92aa4cf0e06bf8bd3d6f32a610b0db6df2b3c737be26134ae90b79758f9950a10448131cca970c287ac2cbc545cf926345088dbc9682ba001eab64598b5539eabe58eada09e426e433e40011e125ab8a3cba2c54f046c28237f498819d7f54cde7773d3322248e2d1da4d13ecfc648b25ae8712315db971b46e14758c2d54a80ab0df11c0b955d3e8d663f6fa007ca2817fda1751454e58a2fdb5ca3e9856886780b3f52bc26defb533b44c2b8e200f9d50e8a375978c95f6358cd5e602fd5cb9a2c48b92b5286629eb8252f479a4873c8e3f049b9d07949ae2a90b3eab8495c5b7d3d41ee1f6e5ca8750dfdad2c1b6bb82d854012c7c862c3ae85afac748ae3c664d5487f81f9713058df04195444dc5573bb5b75a619f5e94d415e3960c2f61621aa5fea699d041099cd939762d72a7b14147bb329cd83af1274281122d94119f4b7c1e26875df7539fba7ca76369ca45c9a59b1cbb7e933b5a4c70931a835bb07ae21d763459652b42711af3e736d8bbaaaece8049177ca8051317a1d412064716a3662c0c5c8a517530e35c1cf8c81e8eec6183e3f81c9474c2a477db553d013d6000ed72f74777c92dd16573c39766ea5a241d2adac3869a977bd0d0be1120ff27d8fda4d6240a9f338ca59d1c0044deeaede384450ed0040a0c9f1cc54ed00d0713989790e4ed0456e3d832e18a10d7dce1e0da1a3861749bdbd3458cc15446a2c7c3b0e09ff6fed2e749dfde93bd71d9335991258aa888f958934c010e68849cda461c6ed37062cedb925757a684017121d671dbb5377017c9f0bea69218880c1983f92646b15a1955824e1e68c395b890dba5ce6d9788afa0380a0d617bd6b92fbb27b922edf56ac880ac07a119f44f5acf5e540b830e629d706b3e762ac71ac90d4f68c4d9b842ed4587f551e3d314dbab93188b1c5164f395f8f7976568834a5924c1abd78376486f4783e7597260b77157151b7de8af822eb047941c781f0311bdee500746cc2c1974a547201cfe600f0bd35c081f8e9bb13e7a0ea2ef12beec3f697f2a27e896a82165ca85607848d4cf16c499f333cc7c1df82ca62fed482ae3a6a8c77c67dd42ac85e304a597ba1233e55b3dc3c64eaa2c458fbb723d8406a96f5430993495ad6144771f3a660c3a4a6f3c3adcd5a6ca9ebc49a660c76eb6cd03aa5524a9eac310570f238cc67556f5999b24f2fb716a6c0bb7449d40a794ab10a8ec56c441d1b6ce926565e2a2aec40dd34884f7a70922384886f05087baaac0785867d08bda4ca1d1a7d58ccd20592d7c28f6b1ea3ab8ddd2eee4ae964538beb19920bc8caee935e0af84e4608870c02ba6f6b52166f47ed7fa1b45a339dcb4f5b423178a2bc8dc253ed8974824ec6bcb610ee12b8e5f752452f31d6771531625663d541c95845e846e8de8157fddb45a8bcde9d6aad2939b6991dd876a5237818aa365fb78a692454fa80fb49c981eefb4e7fc46d49c105651c55a6a5902bc73c1fd461c3f66ffdb64b06a8d6a00bdcf7ae1337cd1f05edf0fc8e728e1a675fdcee3a208c2c00207cdf682557326a0da53e357ee6e88bf91b31259cf75fc066ccb5a414adbf93ba2192b91b122de055422f04ee7a6f990844f2dc4b0d22fbaabfe5b0c0fd66840dcd4dedf5c0e52254326335e6f29806f296de936a366084a265ab37efdbda95921c1a4ac252bb3c521b147fc9db6bab739f163f64cb480dcaaf28b31f9d7316b4f5ddd96caf50cbfb41107fad09081edc020d769e71ba54871ec088fb9da8b8f117133a4c291ce31cd98b7cd2ccf2a383952ef1eb199520216ef094a087fdef6af7fd5260e94676b0093f8572416db3336d84ac9d4cdadfde5032b1a20b74ba8410cf479abef2a27ddaa180af8b3c4e1e87fe7954d0ab919d3a0d51cca10bdae1f86b2629a052b82eb482c715c7a0c843dbca247764cbabf732c3777169bf9ce21626a20a31bfd30cd1ca2b0d9e634932e99030bf248dd5238b070bc00e40d1925be6bb528f5b49a9f8ce009dfd4537aada92e86a498d64d7367ad4e8aba915368823ac59ca6ca15ae27dc314c299095be9f46f1962a0e9300cad90546def388e0b88ec46149e1f8d4e0f5f403946efb88f4a8fec7a766867a9b3405a78f59fb04030eedf543e6f3a2591067b4c6c651cdab1c1794f55d24f26304628e5046e188cffdd401d31bcd0754d6b22c93ad4f2a34819a277d8ca0d6e949bb5074f612d0355e4ecf683eec35b6a24a6c1f749f030d22c026c588280f4a3effd872505896afae1ad9dc896614ef2b2fe8df0f23050790c06c5b2b4e8159200d908113692f5553b4cfc978cb7e3f83125bbf22b332d431202b0bce700deed00e939c27a280a5325429ed8038542e56997f5506be99f39ae27d9f24017e3326ff69f8f59737e71369329121116a8b7c45af7ffa16e7b128cce9eea0b3a80d17580ebc7a524e533a7d375967ad25b477b8b9bf7f756147cfef28c8ee8d014b4c874137a724e18731612bb50640f2596ad5490006776f4d67a84eda401808f65e5acd549f6c65c230cd4073754d34ac8aef722b2230d6bef31e99c35fe5e4d59a4a5b408e5bbdefa2a7559e4718508a6c07982169dad8eaca07b5cbbfa8d6a04a15b25cee49f75f93ac0631cdd9fff8aad5386b3b20d9fea453bd8e31ea3ec1892f28bd29d9718b60fd1fd1a649cdfb2d61125fcb800ac44acc87720c2cf65d9bdea4495e4900b8dc2e56c4ff0c30cc3b375b747c6d894ab19c459245b06e2d49526a120f1d95c9f454a143a3d485142208e7e4f0e7431b135277c9d6b9f3ed8309ec9c544dd7503a90c39c64e9d90fd8289b5fdefdb321c6e1d114c9c451a87dc223c37882a9592fc4042c961afe8937d18a18a0d9b5f1568b1d43cfc47c927200ccef07227f3d67db5ed0321567df70739c9e16acabadc0ce8a6d989875475bed6b13545e2291d3a7b9633b29b6b93a0683bb17dd76522ba11606ed12913e339ba02ef769809dcb9005584e25e4a69b9cb70fc9904f15e93b804cc7f3b8142d183836dfc78d1ec4c81182c893ba5ef6184f6d46fa3e5d83a0f25fc274a96cbac37d93627f9527eb397d624f59cc032021678e457567a1ba5a1d83a7f6a9dcadc98170ac182bd986610e7a68f040d82c219980e74f0409bc91e4621f173301ba577b305bb4a2799fe0f9064b0a2e4d1346438f61baf21ba188bd2cf75feeeb8b267c7399c6c6d5597ded3d1f2d1347c8c402bd3e1bc2966b6d714afec798cfa5ab9b586bf61230a937e45b04d7282cfe1884b9bf35643c6479beb9210487ba407fd519dc158b91970b201d6cdd97032a0c0796f017d0f7e2402e8bb06a5ef4cc7c0772d1a9830db51c6dd146911b1fe3d5f5ca77f41dc47d712e1278b81753c4fd4104b0294fd8d2124bf9ff9025eb5af357d6afee8ad6212667bfb0877208fb9b364798e1cca417f1f08407f24a9026ba23c0376bde4983314837caa006430e140d87a057cbb938e007ec13989ef56100d79d8a611e3db0f579a538a9a5469368c4cf25843c81e061732bd4e08a9a78779077a191c4bbd489612059e7b6fb12dfa5b1c0a73cfbf601b7bd152584b4255916141c4210b6f3ac3327b68747c8546cc2da79286e444f12e83440d689226dde22c67be897333179d7e97d863550651935258a68d5ea01647f4b2b3317eefb2bea8ab6a9e40ff39f3cc388255806ad9345b4bc1e06951b94f9508e9a9b47271636a52704ba4c934cf7808f76a36a0186092865da37a8f333efc927c2dbf10a1d9419ee129db8646c714318aff81b73fb1cf707610cd07f84417b802fc51ae6844c98f5be482d11e2fda0e4c3f4c4ff131e388795fd77a731e314799438b27da7ae9f91d5de9c86f7e1a12467b83d9014138c63c2ee8612952e60ed4e91f5afe216a1b169f76c367680d07ba0deb49eb094731b3c118fd790768d1672e58f6958fbd227f8ee5fec79d34d22ff02f7147511d377b52b9ee753a50647c82aa244c0441ea9429c6977c62b1ca5e71002a486a68b3d438eb5ff4a697794c3474e0e0005bd4e457ca5ead718203e7c11588e62165cc7262897fbe75ee235597d18771fb64e3986d00218448a33137074062bac336263449679282e5b9296fe07b1745b1d78f2e9928eeee8b0fb334ee055a3c3f4b2c4b459c6f7057f5a2687857e1626dbed395039fbeae72240a08ebd10d50802daa1e3c2e443af46d54e8ec9fa23832dc409fef68738d5e12cad37722bd95242801253071bf658f0d037f626c414597b17f8efc8eaefd219438f6ae4aab8468cc927c0dabcb6c5e1c36d821574bbc6d08d5ed12df451bb52ac92c5c1c4c350e96c9b74d23fa3b635a24075ca93354eacf242d0cb98becbf45c97cac667512b1929959dbefaf108d40bb6be129e6ec93a37efa9a817afd66eb66e12121dcb382f219307378a1c112b1db5828eac91a3337a09303de23b1a1385ca57a068081914605c013b85a3b9bc05ae47500ede31362ec0bc57e03d69342b5bfb00ba98bcbc8d86e0d0dccf40525e5422e95c94c5f885e5694a8e634df6e3135d77bedb00f43ef3c2a27b75b2e0dada4b105895f29b435b39cd2cc79c84548143fe3504d49f49c1ab43e14249ec8a355c1100d41739e02877b03f7f90de24f2229c2b581ed0ad9b74a796a109a1b08171b49599b0bb38e92cecc3a4b5dae95430643bf130c4966c2444d42b75c77d718d7519b58508b969892788a770040cad8b0a1218393c899866aa03662609a78f7dddec43aff5b13bb601c97a591ed9a601a0d636e3e75e8bae71e434b5a8742ef0dfb6e713d613448769c03db44add37eac89f21a42f04b98e22cf1f119bce39a20117cc12459f1e78b731c6d7c2b39c183c70cca9d5c6e9a442eda0562f5b4f41938a823ef3d21842462e6bec3938f3e4110572e1993225980238c77a2bd78c71e1b22e86930a2ee79e6b630740c790f921c8083a4a928d15fd9d92f23c9b9243ffe6d8975805a9ab143b97037954925d651eeafd3935a9d6401ae70688a7e97fb898062abe5eeba7e93bb25e859e153a581f479098b48298651b6b986583e9b17fe76d83c128fe11665587be608e0f11a72c1374c192817d3900aa88d0f97df7217737c9818239ed65051623f58b48ed02dd5d7f39bb7f8663d2abf6d8d545eeec3a33c6adbc27d8a02e46c590588565e3838092772e320075827410f0e8bf53ad07c1533fba136f731fd0c84c3fd52afb34d0df8a967fbd4e26f9e88572e9d4e39abcfc1d93211356c0b77c7f95700e00e938452b07f7d5a9147cf0ba5df2a3c1777a01e948a6934e655e012bfd4ddc5b5011b0b912bfd01ffd53a01b285b595688631fdded16b427290e501d9f7a74f7d7d8f728a4bd7039e779f4a943352bceb94003a157be2fcdf471c1c41d7da221b06e69c86f7441f757682b4107c774f0283ce378f4b5c644885f142c88d06b35046225815c461b07b8568b97916fe1dc641df65acd41f0125ea9775731554facc3a40cfbbad5347897288df7d58c3b0ad3e3d7a2666142516c2f462e9e60923842331c377b6a99904faa92b1356610d10f7b7da8c6f2a413da41682285fbedbfe4d9566054f1605811b791333f863f2add7e427fd3b39b6e110cb50992fa81e74d38ac821633d01cd28bdf50b87f014d852e39b392e74b32ec6bcf73894b00ef6f7347fb068155905a8fa778e55217a7ea138c6ee60c1548205cd849db31447dcd23aa52160ea753e72539bf0cd5cf7eff5923be9802beb0c27a90dbf5e36ef3106a5eb86107a6d2059e1bb81e533cf75c79b34af50400ad28f9195d42067099b8e70f22273b563d56e8a10b6ea0ae98b1a8f0bf6dfe5ca75f534ddd39ea416704d3b21238fe63ec5cbdc472fa150ae1ca57d630542c0430c74be12c12720821c82a54c300e81c32a48eb27a3abc2fa52277044765cf3c025abc60be465e0a4450c3c10bb4a2aad71f68f2e697e57585d3893f0df3d352c11c7b9bd9b72328865873c7a14708c466a1cec8f3a149c300f457a1ee9ae7ae98f830842ed3087c86b302967080889403073fa9803a26a292e26eb7e69686b9438ca418fe1b7cc60f03846eee7e4b7bc93f79a2f82690f7c7c0aa4d223ba7fe91ede2bc67a4b059e41b52d6940ab7d4056732a26558311531575f94e1848666280ff4855e26e566e451ce9e7e7866448aea79f7e3a4687dff5a1e03be47dfba609ddfe251df1ab3f183cfe51d07b96a535dc06edbb751cdea233be548aab7644cd37adfe12f23f879d3ca2619edcde9aa6ed291b87964a0e668a17136d1a277c787e9ae43a8c7d4362e30812f431d58919380ab36b762390162c7daaac6613ce29f06f97a089c1daf0e3bb778aebe35fe3a9bbf414b8d650760450df7d2ba6c9fbfd689fd58921d9abc6f6773b2e9aef83b6ba6d630fe05ffc23c208107fccfab591d3f1010341647396c9665a148275da3192f0a0eb13cd3b95ca0d5697f2f2063fb9a0c5cecc1dc5141f829199245862a2fceada38a72f02f64c77c1d62ca4fe6e39b06904c8628f6cb9350248ad51e25e88b49d22752f5b65fd23fc903c85a606f86ed40a604a1796aa33d628b0816bf1b1202266b8f4c55bfa44d6dc364030a494feee7d4747907532f8336b994a984e7a3e8a8456ad86bbe1425ecb304f5a4a9f944cd000048b4ebebffcf83fc3974b57fc1e3f995c72c82bcd4f85258a25a8212dcb21d30c2727caf15714ed5973f1d9b2b8aaa640484a9c3c3a184524f6d81bd7f250b8766df3b44b17ee0cf6286dde61f59437ce686f67f01a386394a5b32de8df1dbe8a37501c47f376ab9bdac511fd399019c48d00aef0cf43b8583a328d05731a05365ccc313c7117e87b7286be8017a8a6717c8405e7137aa2d6d69e3090d271e47b082a79d91994d1978c3ae5ab8c30fc72cb91ace49aade1e43b922540d76bf0c1b4165231342e53ca77cf1e13cdad70f703a758d7479074790dcb601da7eb34b7b06448b120ece89986cb0e0320b085ccee4a1c29d21025fbd690ed27a00efe3b5e8d935a731f5a83c8249422d3245899c603474e586291a25bd8bbcf66391dcc72151eb0c4308a2fe686f831c0a43baa026718a9849e572295fb2e61089c7169639dde1246133497b3daa6cb468a4174368685db1eda901b2c34943e31fb231c9a9164bd9e1dacbf67536fa862e42661bd7a87459904c062661c646489e8a18fd1097b119c8f9394679fc693ae212cb482e80783f3fd36c62fdfbf5a16e6453816e0ebc08ba76e68685fdefe4ac7621d3af338576ee159252a0de75579cca46ebc40ecc05a8912e9f8fe0193b813b8827c2dd406aee4d2832b7054543d3f88369daeeeae71f50feecaef0ff60de99ba75158006a0a4f6fa0d399b22d72e4c688d64a1f5515471b7d848bfbb7fdf21c6f218f95896988ce315fe5188cb2ae85e0175bcb2db0f788b86bcf37bac74b87ed6cd156f2954d5937e2196f50ac1d2218bf7b1a2603234a804ead775412411e0133e75af035d6c1a0ecd20e9bd00a5ad32aba313450f2f0bafec5cf2541241d904201689b36b425823d8da783b1c2bde25fd325b0458b1f24a6a4fbc5ddc0d64c003305ccf481c5fdba6c7a80abf1e54c77af2fddb79d6ee474ee81ec0e185e491a1495542ad7a021f131fce4bb61094d0b8b7cf96873dcf3c754f1c98da9cedcc3a8a6d3c36e0e4b9215dca8c327f56665fa130b89bdf60ab4041fdd5b1c186d0628d93ce211c5c217f395c2339f3c61e1efe58cb0fa7f3fe4f2971b26189d874ebc6043776c44fbf33831b83b477b1b29f2800c0b319e3c197f134d76f4e36ae1f410c91ff9817a9aaa83f0af3b154a5085c445344f6cd0d717379c4712ee634581a1bbd21f678c0b1ae34c5008aab6727fcbeb1b493f16287fb6a89eec787c313e7a92bbb428fc6e0c4f948336e33e73a24583cac3f1d8a0b699e87f6fcc34be56cbce6d611ea05b8a5872980b046ee86665193684dd8fc28be7c6b11c4f04897f26f9e8b79fd0f3e0aa3ee590ff74e97d3eb60b8d94cdea5a8045915250cc013f0fdc50127f58b01f834e9f3e8fac003dab919126a09555a63b12ae2df032323dd4431a5830bac46bd637f049a0680fdc6b93956ac33f93b600cc6827e8e5da15132d7a93734f6ffec5e70d3f61223b34bb1026957db0f8e0e2ba80e26821a5d897f5d95e0a1c0e38a3438a7582819a8bba62fb61649171251312472651592f145184c3470322eb32c53f7d3fdf898482858131e781d82745d283259376c0542c968880741c7d4f0fce648e86b09f44925d0c0e078c54e30d3066cfb5eb95ca60daea3943cb4a3233783d7703ccbb40d059fcd91724daa2410169cd7370231b3dccca3a20c705de319db616fd187a3e5d5c4885640a7aef68d356ebd1b4033fe1cfbf315dc7108c26bf83c0cd4df0271a67485dea22273efd719cba494201fe623f5bb94b9527fcf14670a9928689a1c21c023f096e17cc456f803549c45d80ab3babda843ea6494899d657e66c58bd197cd97cd8f9144fae604a59535abc4efc5e6393fb69bd152695e9fc0dcd543f1d0fc5c0667ce7a9ea568414ece7fa7c5693119dca6a36c326d8345cad4b0d80b8a4526b610ef1e1d1425c3323e60178c26278c9d10631b78bad8698a0366a36d46860a88da59d1fb010e39b61ec2e971c9c4d374c36f347954afaa8399a8f755591f8ba8d319d96fd188fa1f358bb098822d00f44d93ac72677c37d918887192dc10d35ca369f943a70906567630729d312dec8ff7b23d8ed4aef0edf6002635b138335c1bf0df37cc7b37d71b9f2462928d238ab63e6dcd8f3cc343f7a5cfa480c5c371a81595803504cac0c99ae6ba46a6fdd4ea376e123d34c2ea841e7870b19d55cdf0abdc9bbae06d51a7b746b540e316eb58ea77f670672c3bf13802d5846e8fca4b330e614a574815862d3532e59e581107abce22569c44d2aa30e7f2fbfe6b23deb7a9def828513795af40aca1f1886727e43892a60fb0072a97896b3ea699484cbb94a08200d3664a70576a745e7c34b1d18b900904c7805738fd9f01641a3a5a2e8c69a75ee9382d8e26a749c21e408eeab75c92881f82a0e18aae230bb1edd5e09f8a2c46c2f6b61a34320096669c645760a3b4329b259a7eee6868065da68ae28224d251dc290e86a944bc2bf55ac79c65e318d756f3a0538705924452dc76e81fc73975e95d0a034a966b5952a1ac2592d6f6311e8329b932f129fb5d5a8d2365bfdfbb754abfbb3a0df0f8ecc4dfd52de0db8ac4c0699ab6a93c21e46f9bf81ce7f5f18fb1243642be507e27b9716aff2c3439763511a311be693190a758efa9341b9d07ac6c2a6c12c5d173b155d088b8a3556d29237225d6aa401e5369804bf2787ad2f2e152687bc92aa39e707ecd4fe030cb63ab4961bea2642515a75d5469afe9b0aa83f4d0a26d88e2f28a8eec6fd398b3754315e5c4c3ad98760b6c106d1514da83da83d9826c7500b680a36e3c3e1e8bbdd3e14039a0a356b1d92d68b8df60c1c5008cf2bace0277bc161017381475359ed54fec4b7c528a522429e395645a0bd4f435a356973cd3ff918b71e8f0fca2327677727240a40bc66d5ff00b44738e347da074d038ac588c803543bcb55078d7769ffdc11c2963ae4a1f9d2bcaa40fde0a29d247e31af1e8e36cbfc50cd0718e4db1ab402c4623140f46d4926baa7b02ab66605a1f2399cb17b05655254d125e2d612514dd22500d61495f57c67922624f9e654a31cabd1148541bd44642e799fd4afd57f5f768581e2c7a6a61bd374a299640ddc9fc7d8896110ee81b117bf4cd532b26a1c1f926c87d1c71483ae65811418fadc341f293957abfec1a4f01b6e0e6e614979884be99f12241899af43528f7310b34031b3e85f8c5b62f81276cf6ae7f7b5c38a86fa097290403d1a7c0ef37752525a9dd0750d4464525a670ed2c8f53b0fd954d26aa21eafb9d4bc9be367142d350acdc78f99859c01adcb72a00d734e2bb403fc6ff2349ae538c823d6023ea8b7af8022ca8c11b3f45615f8f04ca712b0b5a3f4bddccbc9e33d08afb9e1bd22e0b1f123fa8edfa01bb8a322e8a27486d72a4898aefa887b0172549bdb703f55f9692ddac3608104e5d3439b2a3affcb4e2deb6763c21c30ae458463181ea3a6b592214a5c86b545384cbdd60822251be329d80e2e6b75b3147404bf061ffbcb89a41495afb9d3fc3f30fc4ecf2822f823097d5d46a01d4eb25cb452616f1de4d574ac890ac822b285e63c52c79349a45d26accf8abed4d9cb11c1d089ff265eaadbebd3fad6b81fa2d619bd734d615e524f62c51f4165b29324b151bc6638a583b88252c5e050aed5308ca74a758d8a682b69930788ad9ff942ce77193d6250870c78f45ccf8e9f8561af9f5fbee5eb26a5765fa4d6f81f0e790c08906aa897c7bec392410ed763ea16e06a7aca999606331976ed02bd02cc5d209ad498b8142461c7a42c7848f19849a7d74fee8092729b58872f43de117b9d23397ce7044715544ecb8f4c47c20899dd2f76a2694c754a278a5b712bd35e95b4dd86e143ac45a936ce2e1843f127e10a87cf4b60e8135c4f5c04dd8d5867be0e5b0cf1aaf8fb89e3b27092e69f010b0fc0f3d8780d8593f66a2cb44e4401f256187f213ee0bc4a693bae8bf1ee7a09fd89cbabc6c17837bcea13a7ef983441c485465f2a60dd144595f65db97229875c83b57838cbb13b39c5c98759c0865ca4c9e91c751f41abe0f817b84495eead0364e643e6305525fd0a938835149ce857db08c489b55baff68945fa6fef427872bccd16b445d210e43aa9171ab68f5f7ede5cf6014825beb0ff63093102340f9b09d7c1ff9963510037089843232489a3b23145cb4434cb36c4b7103b419c50e4a1347b4bb658f02f32a26b50ba6cbb0c489f9ccfb83f41714b9832f5aadf8aec06ca096a88120245506f36cec01fd6e858a203c51c56560afd0ed1b3996cc0132f968d1cf1cd7da2b3e84d911fa58fb93c49f06e0d6c69ab074c747e82112a4bd41f8de0c82a25c5f02b09416c03ba44036e3db9e82498bbb50bdb2e13d78f6d46e9f281323c2dc1a24e7ff9682a32d4c860308681b08a1dcf21f68d42240788de483bad240e5c570a66d5523b93f6896b2a4152bf0b0c9ab64d929374a568a05cb270cceb68425f4e912c4b179d8d67a0b5ebbf8a93d9ca45747a3004e75e5e7abe4d58a931d977215d2461075519f8c1f3d9dc006f7e31046887d902b8410573954807ca18b6685d6dc3c8ade060cfac98277db151681309c29c4d8da814c61b0531886b1390c49482faed31243f7c3fe58f41b5994187674597616f829865b2de3bdfc98dc1243b2ebe0c91677a00aefa3dc753e332dbdd79745a0f57d1423906259524e457611a62c8c7422bacb6191f789dd7ae18d586bb3ce382d92bf024d9af1ec9bfd2b68f40c0830e9f6ba85a6816f0ea1c0f71d908fb41cdb292677cc96390aba17c5ec465558998d252483999a9b72bc0ab20768e44f9d0e005a7a2e03a3138e585e0522024ed4d657562951e5dc840ce63cbd0af5cd49a657d1ab2a39c03046d325afc21730df622364857f8b39c45ad9ab900542c4650c360f89a58f22afc2a4f1862dc6a949d734c6ad6c2d431869f52a4487de936319a00236e9a73fc4c50a8f94b349694bc4565d5d43b09137ddd1fcf52d3010856ccb6899d55e9bc70af14e6785ab43b7403b5c814ee4925e7c7510ab482bb0b652a77d2152388239b362eb48897a9d804f77b1e75bc3b047760b0f9ae2619ea7fd1e084923c1d57cbefa745a18431ced5bcd458039203a93f1399b1b219b68fb3a422251ee91695d58e3ca4de2412f85dcd3405eead43090b032075a5f49357ea1bdf38367ca07472091b30341caccde89b53be2ad6e984de09baa752d7a2a6071b0e9d27a6ee83a01d1a199c97d52891bd5c9d01c7746745092f61354d11c77e23ab7aa951ffaa2cd7ef436013812e5a611b9e1a26a81ac3dacb099c571c3363b5a3f5fe56d912d247d521b5192b2db9f80ba9ae018d44119cc2bc28f721df864c49b23ded11a6ac1d4b7ea04a4061a43a8b624c8ac5aadb7e8a001bf102853fba36a0f7187f79d64c3cf0bbdef25b36a12066bf08ef4c9423473fd466b30ba007cabf3db712935ae73ee7f270b147f60eb4f8d7e282c2f0140a95273cfae469b35efd97b6232447f9e9b470569b348c98f2b28a904a35e86e73d09ca6b4ee3959a9ca0a0fd0135a832549a3aef58ba33f6eae4688ed929a688141c20a681eecec6d161f027cfd77a185ed4f9dd1751ae153854b73a48e7979565c6bd80886c791d15f3c3b6127863dea84ccdc3d48439b46eaffca5ebf55d32124184d1a5a3d195e373ae3b6a61cc916632621812e5dc77e421a0e6f05e2109766307d688aaeae0c6720773849be33e02bbbf2fe8f85c464df940e586e921f08336f2ff868289a88fda9143ab0afd20bac730d03d8446f353e403492e87e0eaddace99ac3a4246651f9917ca16494c4c138b78dc82d6d671e607068b4266f2db7bd33d58c7ed43e5fedfdfb37b6de531b42528a85d0277b8de6ee70c0dca52f3049280cc3cbf6696f51983657331ad8905a63ad63a4044beabb35cd5dd4c708cd0febae3f8063c35539f2104cfd614841431770301f5b06c696def9092fc2b198f902061542326f84f32fdae7c55984b7f805f13b113f2d730d7142ace0195d783bdd3aaba28a328edb3f6ac445684b86c5f5e562f887d60a966c31e854f9c8b29fe4793f1e0b055544ff080cee7b624c0bccc3b4bf275d9e16dd6267b7b6896538de296533b0416d591bc5e5a243cf830bcca03dea144dc16177bc0ba1d1035226369eb4f0e3a1f0ffcdbff472ee1e8999b9afa61958988895219d9d59537b4fd52c3528f3c0679ad1eb88bdfba5f61cf20870aa8b3e45274983064edcfd6a4016b81962271a41af75006faceafca90de0f2525f1c4e1c53c73b56fc85a248309449e0ac85162e441d7044cce8c9088988c88da123020c50e1c4791101db44c223629e87724ed5958385b4bf8b497947dd891cd64ee16404205993fffe4369cea80ba754dcaf04845dde591b1825376da05f31ca93e24486af5dc3ccce8bc735c4ba19ae4ffb4cc239e9cc61d08584f76fd2928a14a91bd6415d2c7056b9fa93080a4bdd6b1cf1274a68ef677762f74d5bd583bcef180c48666d3c95b482b946021feac92360db1e64adc623049a6e4329e1f155b09767adbfa7a375dc6c488a9986095d5030e1ebc0dc21c51010dbd1d70c4453c058208847cb65fa632a1613264753947294cfa2f5717c1260146ed0d7a20e0bb1c7b9719e633212fe7a6e65f6212ef24d1723f6a103c3faa0f5f621ef2fb581d5c0241f80e5eba50ecc6d70eb439712c681373fc84877c2a119907da5324e970c7c20cc1ecca0f268a2c18cde08400f10a1a2f309aee0ee3c0e7f52ee3ce5c7d5e1d425614a4cf18a8044f7f0449587890c28ac4747830fc1d53d4c2a96a224bda297c88754b60c7b58374d3c1f3e51dd344df8b73e5c81f7bf741f32213fbcad9e17dafbeb26648036a6d7caa66af14624e2681ffa819bf4a0f9d3e757749ce233ce9c451e77084b36f8d91fa4f90f1742ed31742cd662a5dff5ba091f90acd90c74495a610110771a3c65df40c694a98f48e94a35a4934cdd32b0ba2c7bb3b251a4af4357c69c28229a2e87acd74bab1a39dc33b369c956b3205d3a25e0fabee9e5c4c40b9b6b793da403c06b41b3a3b5955b0a0dd0a0d7a7917aedd99044c80ad0ab353f36002c73c5511e9ea3f447c40b499c0f91884f44011f24513e0402bec8057c108c701117fa23b6b80b84d012fb57d3385df336e53bc287b22534fa55085116fca5d1ab24f4e0cc6a47863104794395a9d6445f53381b025ff12aa7d1c6d4601a8f21653aea8cb6494b5682c0674d43ccff96b76567317c952d223a54e394c2d1ab516eb99be9bf531534db036ace368d5c4094594d8b012d876c2ed19dc0e920731f54c4363c640655028f68a015b287ea00f3c8f0a9e9821e4f50afdb4a72c77cf1e35e41661c1a6b2f6690afea7c29e8c34fd12fdab8ce55c71f479bc1d3eaf33a734bb3442491c8eeeeee1d16099208ca082f5dbb0b1c0b57da6e4bf5b9b4daf7245b9fbadfae3d95d249b136563776e7c6ae4ee9c017ac0a4f8e2d43e027a35bf9d993c593212704f184d693a2ed841e9a5085295bdbf55505b46f1492448108149a40e10814a06cbc35556bdf8d499a8a15678a50087d20241deec8363cde6c4e8f9f7447d5ca361c541583239d9e0e838ba72e69e3b97061a5c5c3262dd65aed7fc49b53c66a8dd1567d23e68d877668bb1be33ebd5a26f454fac343add5f2389d7670d35727d5dd0bd59a4aa5a22a1555a9a8cac8bec11fc1a6dc7b51b154453a550fd88bbed18e5f543b5085c1f811778cd3bf8d077e0c55abc53bdab3f28efef0e83bbac3a6a7439e0dea9274f568629c6aac1dbf8358ab765e34976fa59bfe82ea4e073c3d55a98a40157dc530daa95a37f4a7e38d477469617971157afb58f16db6d9d4df60ac3eb35e984a1bbdadf4d15e208dee97ba5fb8b7354a4faf7174d397935515d9f4d80d1983231d06a7fdc4bdfd82337199c575f7a958d4256d642f1933efa2bbeb427757bba0a98b456bea52d12d74f7b2e990c642b55321e5f2e3665349cf5ed246667537b7642fbcdaf31d2a9fecdfc668a0f02693ce30eb93bd7696b4418764ccfc2de9ee66ac5999b5e7335ed56f248eae361bed379beee0ae3cb4ad9d0e491b74c88b813d54fc60c4c3a43bbcf6cc3e7b5eb503559a6a47c552ed48554bb5035fa07ae5c72507f845fc6284f70767dea1902823adb53a212bc9af23b5d63a459612c4e4867c9487e42610c102eee08e199531e63e19ce058f4377e4b85de941506fda73d259374b075e76add50596056c8c617866537d577617e784016f5837847207004a4c82d7304201ec3b635f19fb460e66634c8b9cde38d322f7946c8ca594f1614c846db71c98dbf7c68ad17923ce8c750b4eb49c80e284099c4c712285932a3881c209134e84ba962bb2931638910229a574c204d2c914e9440ae9a40ad20914d20913d28990b51662123af1c1300c66d0498f131f9cb4a0d62aa3d65a2b113a60b1c9111dee16f20984d42625a0b7494e0966cb6bb54288bce1801a6badb5d69a84fc52945b5a8e6a52e4e48626474012e80380a145daab6c8d0b822fe4d60411a2861a7ee4fda365cbd7f0e36eecda31076b2ea5b794c61aa91244c86b569e0124c7625beedc7b21be72300f15bf71345e4cbcc6bd17d3bdb06dbc98c880dfbae198773e193873cc4d21e6b09a5b636bf0d530576b30a6618ed6e04cc3dcacc158c39cacc19ad53252ddb48d933538d3362ed6600c5fc493344b2a31266e383619df06497be3708d86b7edca8d4b38c317f3e4b28c69d1f6bba5cc622df68d37062d58987289b46938c3a80c76f01203c68b938b8b16175856545ab030e51269d370865d5be9944fc858800fb2680246184d4151469964431d2786700208278c38f102275a1a0652662591882647a484f0b9e0bd262aa027685282263956dc28a47c103ea2a23d44a4842849966590e8061609d62f80514e395f535e505e4d5e432fa097105e476cb52fd78b6777dcab05bbbbd88b055f394f48a9b55eec89274cb00ccb9e2842e3418633dcc4c88ea7aeee3e110318888e3cc1f304eb8995135a1800c658db9cfc382165672160ed02278ec026a460f336f164081110443600c07e4144419a78d2040a36ecd9ef16b0223321a589204c3c61420b4c4881625b6bed1124ee11221bc5f60008db732244850b72f77ea490878c182a9287880461828809269d740a090a3d8472a8c910133429a5acd602514aa9bd3ea51e6bad1d6a0db1b4212240524e59efbd46f9e50a7909234a9073cbb7906412792c80010627ac65425a81074ca41582300c3dd206490491031208b9c00933644128082351ecc0880b783083c903205eadb8e1000d43cf6d69396a0922ec16a16c0af4cec72823adb56a21c75b6badb5d65a2b949c3d79590228289f203791b39095c8974e19950052e2b5ef7e414a0ced0e459d7041c8dddc4f4e498287dddc37741e04f4a6f9b7e9db2b50772eb6cc1267ce392fb74f3a3a2a750780fd3ab8ad7df6ee5983006cfa20b61ecf17230b491ee61736d45a44c11a1bb5e566e3b2a5e9013abc247e32f1704278e34ccc7e1ad8720035c03564ac617fc7d26e2cc608790d0c1101f055ccc1c00921e57d3290be9acb690f861c38ca2d75dc6f63eefd4c4edb970b9261181120b510f2b806202dfb478b0b1b6eb8e136a7844f4a92d439c3380a4344963abee7dfc23d19886d8cbb774b1f23a078ca786d851889e2fd1ac57b317452257a8c6c258a6ca1214ae4c418ab4dc228892773ce990413a08b25e19344cfc4322c4ba2b563124566122b950c67388aee929eb4600d6bd865c26407311162f2b31f131fec6eca4b2641608cb1b631e981490a484458df10615cc26889d712434b18b1037e7fefbdf79e029e4c029e8c4e0a7e820d7596586da883b510a444151bea30890286042860099fad7200ed85d6b3bb96174bb436931e763700f9049c34c7043410a1238414ecc0881f88de1218f03004932b38014910254848d9f89ddef09e3101f3e156c122a93ad0262610f2a4edf564e237ed2763a3271331cf9361914d20b21c21193b7a343054fef055568ef53c9a8ce577e8d1602efcae1ecd6db9f579343622d9f12e5e8b1e4d75a18f4d50a04703c48e7ff179f468e463cea38171286546fab133ecda4ae7f1bd1829c6ef8e915dc2e05e7035c0ee19d1c11f3a18a585eb62112e709ddca1934d9843a874f3a806f88c803f44894590ba4c8b3adef098b05f72071f76fc2da20af1270a1223f8830e3b9e1ad1840c4f060201036255b0f6fc7b3510f3992ecce7c9ccf88c0565809f0ccd99206f1d1592739626cd71a7cd7b28d343bdbf1c2fa25eca4f665ea3f33204c921871cf0c9601b96618c6119ceb659da66281d6bd7b608c468a592e92593c6d854ba4fe6da63bccd3c433ede34eddb168163095fbca45c7a3ede72e05dd22cf2b12dc7ccaecd4c67612a1deeac5fe97797489ad321dff88057c0df909b10d7b56096a35aea4c28e3017e32ba151883fc8290e8d92fe8882102ec177484919d8d7ee8b211e45639dede90093d1afbec067a03bdb4e61fb67c96d12be09cbaa337ec18e3edf5db91e7c948288479b2d10acfa3995836c2f2afd85dfd6987bdd9dd69c73d764cdf587df58a13381df294b29ec20d47ad9c0e591e6e368f4228c4007090524a0949f002e6182125c8ee34ed8822fb3d664026d035d03d5d432d24a147332fbb8c67cb164325c850769427eac7516eb5a5cb082a3f1bbae8a37c236ab258ac56abcb56f2908adca2e8d1d0cf2160c9c7e7c9c07325c8f25db6da70d215e454a8d004134c30a18f80e21ada8e350e3f63ad0330eb23f2b376cc75264de3349296b1a69d54d29dd647e42e04426f9bb40d97347db31d6f39e7211cfa08973e421fa163654b16a8e9c0888991119e96b545ad56ac6f15b5845a38101111111111111111111111111111111111111111111111111111111111111111111111f9f068257a82ae30147405a0202b4c31addc0517c75c095e83b2b5b33409b2c292255548b2e53515172f6d2d175cb7b5dc458b8ba2d356b47521282a2ada5a5ce4975ce81b966b34d056cb4a6bb3fcdd85162c2b2c545ab460c182050b162693c9a495302a7285222b20d1b49348efee6eb55a3a8956ab55d4e281a2c307bc029772c02b9eaf04f2b2e12b3d19d3e3c9986ed2ed035e41355dd195cd73c0440209249040c24a29a59473ce3927a594d25a6bb5d6de8b44d4484499234746022da499f0f6ccbd37d248654ae74d9d5a41fd621148a4520996b4c66d385e42996de6bc25cee6edd2e9e680b74b74c8de3e5f556e7a0e2dcee2753bcda072d37ba8e81e2df47616b7b7fe462ce5d397b8acdf93d13a0979d39d76981d6fbf219f6a07e372d01c3207ec70e459b05a5b0ed9f22398385b9456e09236c2c808f260912ff9eae2cf4f8cd14a969452d639273532a274421e9e2abb2724a48dc8da880cff301d519e6aaaa36a2a718c960acc721df171a95a73aa8aa876848698b85e40aa1d20cca242d0d68cf354bf396bb594dacf798cfb316777f842ec207ce420e4417d688f8ca148e43512727ca7d292d2810680975799b3ce8a84fcab553db99b09cd58713072dd77f4880ff6e61020eee16a44c81d15ba48f20d768a6d36d4e4f5a42853b130adda49428196dc25444c3a8449553d1091eb214db20489901093a126af38739f14c599abda111a8222c4a4896ae7aa5857b5e3caaa1da33b053f192ddac3863c1b9eaea40dd2a18e585f4d0810dbcce92272475d3a08f9e69e628b6998424d77386fc33ab5bafa86a49a1ae1680f0d821a893333081cdfa45a0893a68ae89f5c5d4682d041c810de1e09fda149e20c509cc9523af0054c3fc168539f38d3d1255b5ebea027146d797a8422d9f2704be9c017f4bc4ead3676d236bb0ed8dfab6f44b5a474e00b4ab459a9a63db5fab66d54525a49af14569803f5a44fad524474522c2a3508a10629a5146a9a464b3e32c69eb4bda3406c6baff2da969e6e4b754a27ce40db522b271f37981764fa2e73e578d9f3e7d1c8db9782281705faae14b4ed25a550bbe45ef6db6cb4d3d7a3a9b7df389822f28bc296bf971c96bdcbaee9726dfb4b5b1b5e3bdc3f8fe6dbfe869c3f4fc61e72578b285bfab1456cfbebdab647e585a35684492291a48dac2795421368b6a77452ab149114eba588a456299d142ba5935aa5745e5c41281da57482e41049b14a472b56694a4a2749e9288ad211cf7ed92e409c72eca3486177f5c7a5a545d9f49673f1f364e85d80367de65672640cc5d93b1720547a9e0c3dcad9f4302bc8f45de96853179e6e65b5e933ae53016285e5a2085a9462a5745e5c5ac44eb0d4e2cca996f2b945da1c42082175812182464a298410c21b3183e921cb77f4674bfa739d9052ca390f79f4b4fc90adad393286dee25b6d552cd58eaaa562a976e454b1543baa966a474a154bde68d3638d24dfd45328aced62bf202a48d9386a3543a9b9284b893356c397519c39ca4fe2cc3c6db2e55744d1eb2879529f4dfff0e9c756284f45f93547dac02e1943dfd59c52cdd9599f5ad875ef4bd845c2ae0dbb2eedf0d0a6c7dc4ad10fcd2e75f792bdecf5f4634f3f9b9e8a363d8c0a32fd8a14363dd6e5d73ef1b0a5aaa5dad13722dd408b91c658b718552c5591a832b2dd7bbfc518b7bb65556a91ba5caa1dd54ed501b6514a6f3afb6929a5df6c9558f64cdbb6edbe4bba37cbe0f611ecbb695a46af695a49cb6ca5bf576ef85ed36e268f33ecea6efb30b58c31c62fc398da7b0c53ac146bc339e3a75612db638c4bf8259b33cea52ec59ab4f42b6db478497734e3d25bb0c8b0cf4a73f5d9587734dc4dc3299d944e4a87b248d146a1c2931d80fd82a8e043859e0d77d82f680a454ac7e6b66d1bedb2d78f20db360aef21d7a9ea96ddbbc0109161746bdff2ed9c73aa8cdc904cb7767bb6cfdb7fc46de271abb763a5e374b4b47ddad393364cabdcaa76e00b2a1105b51cb018adaa15adadd6aa8c44fb1eadb5d6aa8cc41855aca8eac1be68adc5af31c31966553b2a9a15a4b092b25e7252f891426b0a417601f60b92829094f29bfcb6c96f72abf7927b7ba35b0e9c7d67c6b1617b8a88a47ffb6ddbe352ab1411396ffdb66d45e47ab7bbc0106183d939edb3bd497796a4b16f9a86adcdf48fb931ada25d2171b6de7bb2cbef549e5e55e49d6ecbddf8b0e159550b53b5acb5d2b56ba66990180d52b5a3da51ed440c4e2c44614a1498c4d77a652d3ab269ec17545474b40db05f5051927def93775e49eba52956f67a71dcc7c274f9637fd274cf827b32e6ea6db54ba56bcaf5928321225bfd63ee7a798cc31abbc96ef743d24bfb6a2bd51dfc949a458a95c80a4cfdc0a758f558d3eec2590e7574d176db6c48f7edfb974aa5b20dc72d5d85cbb75ce99823c54b132c19619c610a3f45e466fb0836bcf31eb35825a50355f816aa6c5f49b1b69d7a3bee7e4c08e1762cadc5b1e196ec6faa87dd954810a6562922a9229747972ac597aebb146b977e31c4fac676f8f3f7de7b4918474de5506f3160549fb6c7976eedad7d6a1567e2ef5344a0cafe5a6bbb7729d68e27e5cd66e5569756a27de9f1beb412a37de9a4bbdd77251eeeb55f29c592f6b337ccda5b6b37924d11d9f6f8250ee64b8ee5937381945a653b0cd975e8c522331d6e362fdb070f3bec5ab4ab890577c2fe36de4cef7ec00d9ea43b88a396d2812aabb1af3a7c336c3ae4b26f267dc25ebaed4a3cecda6545f7f6254ea3ab0d03f21a7d3b527ab390ad56b5eefc54ed4c1ff28a2b763c851b876a270ae16053a9aaf5decb93897fb4e7c9c46b25b97b79bd3c9a175b8ab4ad976478aa63dd4fc0a44d089b97ec18e3124749184698d564510eb97765423fc8ddcb48f6ca98642fc8a976322084322671464a1bd8e3b31ea0ac88ec073ba278ec4c3dfdbc3a47dd3ee8eac518a31d6fb94c0a57773d9a921945d492cc28de23d4bcda0e584e98ad228ace9e2359cfa6cf5ad969a6bb8c087d35105e9a93655996ad322271a67e3bc530faa2a73971463b3d0d1267ec293e3d251267b05358f3a48df9ec32fb96d52c8b1a87374ec86b84c0c718e27a4b3d28e922e4f9e783e42709d0122126434d5e4f8aa01849399aa2da61b5728c90e7634dac8f3e48e29238830931196a1267306df324ce14512871463ea674a0e5eea1ce625c606473fb05412127b5d22916b44f46fe5e97a0dc597ac456fb68de8bc5a99a9675d96d46035dfd86a4f3274281ea23d4a0adf97a34df2a2d90a531c9dd291376fcd5605c10690a523aa9556aa552495961a148a552a9542a551578b03054375a2b94f1f347e868a094d249e7df8e556ac8a98a6cf8a962c10963bc73d217e13f6380852329ad528a486a15e1869f31c58aa2872874088a32e564b4e531f4c1639e6260b63e6a2ab2c9e747561aa718988d10420c9b1b8e18d65a5be73c9d300e6c0cdb38298e4411c4c658f6eccc361c51eb2be48ebeee865cf70d357d19d59c3833299429b4884aa14791c77dd7838a2c6362958f9c0c1c55df0a196b517a241192170211254d3e37648c977340444953bcf1ea06c41f9d851cdf994c3e32464fc933a24c3e404371261e6e53122671a622f9312d31557b23c6c8bf8733ed6d36d9314de333ded157c67a342fdf0e43a1c0fbc21d46b3e40bcb6170517ed1170ba441e333667c86b661c65f26cb0f1072a9632952f9a955c40285c588454a5e912e49bf21df004963c6230ceee59ae527ce74ef8725499ca94be20c002ef10e2b458f06c66591c927cebc5cde8424cefcf2a69f38c35dde9424cee8cbd50ebbc345f733687cc67bd0f88c19a771750fd4699c86b681c6512fbf1c07e3b5dad2e4c352640160658c180d70d147999ee5e7d1c4cbb300491b345a58eec205177761468dc175383bc6b1d564c4b8105618f173c36840bd31833b451adc29ead30c334ee33d669cc6679cc6b1bf60d8b1c360e7b817519ac3301adc0cae567d23a2a43c0c2e633d19b95d73140a478be89388927fe1e82ba2e46156f464e4697099918c919fc13d19f3e2d866f25142eee8cbe59dc967bbfc491b269f3f79eaf23efd491ba793c9475e69c26434f9c4e05e44c5380caec3d9308e371c30e69cd3e65d5fe04b8c18af5cc7811da3c561702c6eb90eb5b1671b8e16d85f701d6d9db8eea5711d1dd2e202cb0a4b114b110b13d7e53944c9e4c392a4cb1acd268eaba7fc0d07441baf469e8cf68319c1c481893043a0426f7ed35e7a0edb492210225e04da4bb7db493db693f48dd881c8692bb216b91751f7de67d8df16533a1cd8f43764bd52680afc64743384f63c9eef25438c997f67f14d7758e635deb1fcb058b0c6651e638c34320f999579c8463006f598f7403d46e628548da32a07f3cb712c1ad24295772b45ae9517445eac5e047991135133566f932188cc6b1ceb2a725757b77e02a80be0138879004420448f1a0fc04520a34ff20238ea0278aca14f529faa9e410047bd8700f4933111f51ec7186a18975eb94ee37412727d874d383b56ee97eb38508a7a460f10b2cb518f79c7f2e3c25d8c5acef2c39284058865c94a918b4f172b4532f488bfa6186122f7a81f653c461c7146cf8c20e2cc9d7124ce00e02bac3d7f32fdc5399e38c37d5e1f46c6e74a11abae50c7a9893147bd47cc51a8c768dd23008f798cb621e601d03835917be5604c79a5684f969f192e6923e630607472c788c1f15caec30100572f65ad5c29e2ec58ea11515386889a2cefb2d1799eccb497c19da644d43c0c773ae24e1c77328aa819f58f9ecc8ce1b81c1933ab15b9c57ff46872a48d18d42b2b9f040f357f2319336bcef60eb3ae8cd9b65712bc19a39ced737bce7eae93fb45777448e68cf6e489c17574080caea33c2fb82e6f17175c3723883ddfc275790816ae9b43ac70ddb70ad77d884eebb2285c0682887a1e56e064c4f19c8c381ed70f8da47b739671abc1e213d0a3d14c7f2f3effbdf862fcc6f2b3677cd92d3b9f82f6bc5c91eaaaae6aabf6542295558bd41e2a4f35a2697a866bcfcfe89131730340484a0d366fdfdb93d1660335f36ae063b81813857e464f7cf7f26ae578a2a6eb16a5564de56d9a8a1c73948efb2e03db0120f468b4cf18eec998d7965c0fc7a3a3c8ddc968cf9f8c725871467b3d49391d9da6bc08f262f582489c897a6a77ab2186fbaeea6b2167a42708ee08e7c321893355f788a8a955191e4dccc1ae01eeaa399e2733af8b7237399e3d5fb708a38c93548fe77b104e128cd726f06446c5911169fc90bd68ec66f1977787e254b8cc47c6c8ff5ca02cf4b2a12d51fbe5d84e75475d389b116733d6c9998ea786c10dbe9c5fe23a1c1ca5cd66fb3de5ee6d8bfc640f160f631db90e733a09395e3be4ba101c6d78d3713634e5c7fc8ceb38b033a6711d8abadedb705c6ee26c4688b31961c40f5bed6dd145b51232eca80b5e4a892575e5a83b723450d7c5f42a7b6949a2c010d1de6c808ee225797b001a0bb9cb7b2640ca0344959617325035a6ad74ce1c8c5d40c81d76edd8e1a1bcc243b8097e3d893347f8c88ef308c225ec112471461e42b821741dc13e184936c4537b5cf508dc5e3a4e0ddcf4761b4acffac918a8bbdb43db906fd2383550773f26deb8f9cbd543eba28fb2ebfaec887f768c5868c7b9d924211f3be4eec661edfe86cc38ec6a0b797607c31ec02b081c654e5414995208e5d353e4a9b51419be7cc5933c2f9f1da7e654f56906ecd97b60cf30dd037bf64c3b21cfcbbf88286d94bbb7e43220106768883372027146861e17c8c0db284ab91b5173d6b7df9e4675854f36f112bf9f7407a2d6396244284aaf1785c2c8a842616464a180028a0b85c5a0c8a0c8f02f4608add02ffdd22ffdd22fdd04dd04dd044d45a6aff1f2d2080339f2108778a8e5c45feed5c408af71af2667ce209c08f0e1d42e0ee68430a0463ee382ac608dbc1006d4cc3f4dc3413e5bbd06ff72417090cf2ee43558e7c8673ac8ea35f0424e0199231faf7138c819e47bc8c78c5bc19aec18b78235d831174408036ab2af600d3e8e9cf6b9e138cd30af9d02f2508b0007b912c0a9d1b40884e8810f2f82ec91fe0a11c20703763dac810942f6f1aed855dfd40004f13500c17c96211e6ea719e4e1af0cf30f8261822e4c11644c9a07f7124ca3f762d6fec67ecece5ed381518865bf15da9348241e98c418639dcd615f8f518e74a875137756976ad59d3dddda3b7daa9374d2a58c888aa84dd79f6cd51088a8d8c9d89d0c18e33d61afa7f39d8c1d51dba2665604bf88276ac5b39f92121cd9dd2dc16a5f25438cecefa76448908da53d2aced4c31091ab842122d7df3fda3769b42fe688313b9e76c4d7c37dd211773c900de0ecb76fc4faa8e36faaa6476daae511a0b6977ede1951f2345f7fa7dd72c463c76fb3798e7878baa77bd4b69f57a37487da6fbf1b1be027e35e2c659238638176c0fbb6dc74407a77ad87434342793e085f6111d501330861a6734471715f3d6b3dada7403e3890b73938daf6b7489e2a1132b4e9ab8985c974192f7eb6d59d6603ffe90e73418410f21a2170de76c8d752697b1ba682d9b86f48ad45c60012e0801e17a0e10341583df4c43d57e2a6f968d865d1e8e3bdedea23b6ed804a8414ed2d061d374714d45474541da7843fa455694b25392cd94f490e4876575292032b6ef436ec0e4321e408fdecf793c0b5e721094af33440ee86e33435c7d93f6e07dcf04d6c8b417738a7f9fe70e29e105228eb1643fd7b0dfa635a7b79bbc50063e870b6ad11510f6e5a63c2f9dee511c53eec9d7cef519ae9fb9e8cacdce5a6121c886ca9e486293b4654fd89a270f6ec9ea4017ed1bec20091e5df0d8511227773cffb79fa68b0cfdf9bfaec58cd740779d8d778ab7d8587597035d9f2720992fdf6bd941d633f252b26fb07dcda3b6b31ca9d20b47062f8daadd5b20d87b5277bb9937db59abd586212c8dc31dc5b7d821afedec04efe744f57ef40516badbcbd0522fb286d085fa9c035e24c275f23886d6fb7d0c37efb6d81c8ce4e5fa74a4445a87386c770c26fa50d64eefb1f73d317b7a505efb99d42520d4f66d3381f7077367b4675861dbbdaebb2fa99cd799a7f7fa779fb3a8fdad8b3895dd43ccdc3899a356e5b5a58b21fae81e7e7a7eee44ff053ef406d786be1afbce9e069076ac343ed036eaa6fe0a3ae217540b2f57e415a686d5cb27910c8dc3e4a7bfa80d8210e6be1bb7baa87b7a7dca99ea2f63d566bc4197a9a870fb5517b4a5dc309b8468d887a415a586d9afd82a62cd94a5649b6693f252bd7c6517b8fa606f8f764e4e549ca4bfd1ea635e2a0a7303e19ee13fd3bd1a3f67c3d51bd03b5a746edb96b3c9977d78bf73636c2db3c5db3432d310ffaa781bc77b50efb79ab6fea8d8c500b582947fdb16dd107dcf7b461ef1be703c680c98dc7bcd540a0d43bea6d6a4424b09c8264e3df1c3fe086734bbd03858a2898e38465b7efbd6b5703f9d352e780afef8465dc09d33b50fb6ad4aeba4644c11f70e7888478192f23b61fe4b123ca47203b284dedd1c43877805f9cb9918f340b59f051da3ce625f7fe228a0302712884ae275b7e0ac164cb4f2b92d4dbe392a4bfb1d7917707e2b1c5a58efe07d4271df2f858dfe0c7731dd6318f1f8f31f551da1d784fa6c3a73ce2f127100804dec7cd06637f1b8f7aab81401ef44f77f251dfccff805bdfc4ff809bea99c307dc3ee08e3a83d306f3f6c8fcb5c2887cf2c9271feaed40ed7d5d6074907d40793c2fcfcd5738ebc663fe9e6e3aea2fa697f092db311fe34c849b8e980df54dfd7de4baa9a3fe028940e0a63754df1f704f49a76045914d3f7dc019af6931bec3f82077f4f0dddcbaab4181140103031818c0c0a00370c3f8e00384103e8803c68718a38d3e18194959250e181f269d3e50181f6485f121c3db44181f6cc47153b0e5780f85d26678354172e8c981f2b0e2c53135bacdf086bc18edb846f8a6dc6c48df5e904bd75cde6f4429a57c892b69da56b7e54aa41bf142920d5d3d1adabd5cfdb0a687b4d3cc058688a90d6dfba4bd7d06ed2f75d5dbadd1d6a3b11bbebb5cd7b29f4cf69242e5e5e29a73a5b5d29a9a32711d7f45861bbfa34cfcf4acb4545c9994ed9156e102e836a10ab84231bcd2caa4d0253d3d3d3d3dd7b4a3fd7469955714728e565a2f5b6a159793dcadb4b0104f750124f526257675a757783a979556ce596945b1d21272715d1ebd041821a70d3f6a7ac4a3e92d64b8b577dacef4881ed1237a448f5e102f1b62974bdaa043241ed28288335853200a44818032cd6a66b39bb5645a5add65a9578a9ae48e85756ac953794cfeca5b99b1d0974d754787ec995f2c9a85054f3ff8061816c0ecfcec58ffa68f85f5c11a0b532463404f26523d5d3c3343328fb48e1c69b56ec613e35d8092aca48d3af5ac9aea0cc9cb11ca73e4de1fc0b4605a30ad674f778013ce1f9eb2b87a5c43928733477aa9cd8ddbb5e7809fdd6aefb175b407eea8eb9e6a1ff00a58035324d3213af37932714e4d573e994f930cca63dd317ba2a3e48efa644fb01fe38c6cb446f08b33aa685551a40a561255e4cc29ad1598cb027d692a49f2a3b13b424e2f913bfac4e521b7b4b4b4b4d05794021f0c3a3a227214a4091552a828da43fb29f9d92a54ec800a1b50c133e5a4824545900d75f20b9a42ca7e5354210a5b25a2a66832c592a0297830450cee143a53acb64add52771ed845c010beb73d28c59114539062488a25508a24f21203fb08c6529412ea7921122f46e048f0d332820eb0d0d25aa03c8f443ac62fd1acd9bb3da503555977b0a656291d9b5568257dc26b177337ce4879755237e3e69b92b1544a47bb54169c89db78e07d2db4f035a55389a4300264b5176730ac3becda243d3e236d9ba69b8d8a155b5e48acea41d5da14e3724405a456a8044ef4863dead48c8c000000004315002030100a08c442b148940582a4891f14800b8d964e64521ac9d324c75114650c33c0104508010320023233238c1200db9e7305003b0a107ac13c1bfe47ba93678d60bfef8324fac006ad68009b395987c2dd5ee8c50581328c6df62b364a202aa0dfd2fca3b89bba652d788dd0e6267b7620bee5d832fe770d91fca3cb496904568ae0ad9ee96a8ef5f640342c132e3938bc901588ec91adb3b640ccbb9e7fb7d0aa315cca98c5032a15dc98e38351551455893d40d660a037bf5f1e377e307e0c6549994aa45228c0b340d08a2f364e89c2018a8fa1f4f77b5e2ab70fb80fb804c56a344ebd6b2b186d7c6f072f73888cd5b40b799741987b0da129bc8898338896c5486700aa1b181ed103bb1bc8861cd21388055e7a1e5a0296c9165a51303ed8a483053f350b8a3d270b410418b93133bed265da279e97444e46fcaba654b08ddfa526138f9cdf786e86bb2acabfe00663a0c036b0cf4838d37ae66cc153cf49cfc72295dae15a6448e243889c4c4357160bc52ba42082310d354362023b824c8dd8351d50736108988e8c1a0157f4e3d479508d325432936282097faa84ff9f09f5bd85e7981f770561e2c9578f8586cf51090aff7a3685e8612b463b87f8300c9e6c119aea0b074809efb4445d8893bb11ba87a60ad5c760b10c892b3627818b19a22cb094355758b7416539080e0af9b81bcaf1721e909d4edf48786c37acae74781009c19f2ba52442a79a0001ccb045c6dc77008c13eb83114b9359ce3d2065ac3e903cafe0736ce47ec71d1cc04af02568ca4d28137e6a672ddcb339dc5e11884e9145ab6b6b1474ee514be3c2007ccec84d10391f61318fab6563788ee308714d889eeb3dee500057d4478ab9b58fdf0be93829744a56af60acabf4b06ea5112cdf3b970dbf84727ac7aaf832c74fbbc375e607c9482797ead0718375f040692db9737cd23b60a2e0171c55c734b4950d0e9bd8edd5df79d6a67ac160b3d84960cf734127babe854273aba7fa2df2bdac21f4fb39cceabfa4422a2a4bc52c37358cb5641ac8fb36bbdce42fa77f66685a91a2e59ddbf10f504e72a7dfd1bceb67ce3e95f8bfc17c2b8aeb692b9bea06defea225a5fb1559b2672f732559f74aaa7c26735016c6df9d1a509815c5dd28a27b62be82a5a933a5d627de62bf522ca2a748e8e393d053e1700ad5e43a2511b0f346aedf7c838b20fb14c9ccb53a6b3d29dbecaebebb702c3be65aa4dfaf220c035290547eb83512d41d2ddd129421da3e6c12823051e88851eb9cb283070f92c5faa36e78f0045597e770f4e9b2632187cd99c651f8f6af335cb4b4cb2c197507249cc6d3c6cea4a3f17221d72a53538e6c6c7c954da9286a413abe66fd95f665b5c69e69345388c5873bac01e666c2ff2754f0f97674013da91c9eee4609200d3b4ba6594ba8d8767ba0e2238d43f2b42a5144a18ffff765c21b5320a8337cb3e88a8f01fe2316c1f882c81980b0bbb330c819b765ad5b964f3d3d0dedde144b9136a7968f252ffb877d062758f46cf478529dcb444251cf4adef55cff26d901c1022dd9efcc91b20f47d0980ae0bb7201a16ff6436b198c2925fb1ac36cb9bfa60f0e41e70bee87273ed4fd56c4238ea0c24686478683bdfa0396b0a1d54221c5bf488c56f5fce88de8eceb44c78cfcf08067a9aba7340bfc20f513777371320314bf8f2303eac3041ea9f21e269790eba96365fc3e329640e1b9adff52c6bd0276b89b841c34fbbcae5f363fc038ef7312c0014b69126015b99bfb6020ba0f2668e9b8865a4a010c701a0205de9fcd1a68f07dba6d93e44603452d234d8f6288db9dd5193841048e5b5e8d176fb1f4a9ac47a570322bc4d714a655c9d719a2b0509c6fa593a67cf07dbc7e19be7068bd0df5d1bbcfe1e9d8f4576c2ac762d4561931e4b785fd5d10af4732a6ff945ed37e9848fcb8d5a5a6eea2081e0698a7f74bfe247154f05b946b922f15e8045b55ef8f88cdd59ecd10e35e6915ceab84298bb632670022fb70e581adf304cb425853e6c02ed46122417883bb73e73a480b59a60273610b9d3adcf67349caeb0693646252548e9a9fdea17f27f54ed0407cfb7bf7cc8cfdcdfa4f8719640dc30b1102e313da6cf4705de60994422ea5003ea8f60fdff20832a12bd10b8f093a4eec839ab5498518cf47c61f6e407d89e28fad9a40231b44fc8abba1607b4fc971b91af946c9c7c11a9ad8cdcfae517c874856cc2a9bbb32aa0257925a0167a0c9e7f18b91520707c9cc98e15278806d4a1fe63cc79a491dc47d64fa5424af6d11ab52ab8d17ddf6dea4bb80dcd1e1e4565982f1baa827626586120b60c737d8dd5215b344e63f6d14ad9753040dc39374a5045d4e4c0eca4d4d76f91b4e58009c88a7d06b09edbd6e9d5f0fd789352fe10794ec1629dc9c044fdc27a9f6639c26a1cb3499114a31dcdab05351350b1238572db0b3c046742c20b681eb6f162848630885d7483b01f852e818e588f53fa45b32c981ec02172d121ec39e0ce635b8d697c1c82e676378524a4d49a18f2f95993a165a870d2d101246a94b6d7013f6d2e4034eeff059cdf2ef4cfced2cdc445b6fad7c566e597e0a8c455713f1d3dba299f6c3b4610e233bb4300fc4db59d50560560a93e4444950c76ed2db8ca25a202f650a7aadde8128348c2ad7e816c5aa0d84b921591a87a1e6b840d4c1a37c33c5ed85f9a6b8b0c9f940426da35aeb251f53ecc597f08e827580bc036926cfc531b67ad0811e284b8b638a9a31a80848a1969933f2d04de58c1dc847f5c17cf9d9b8aab1e8738b2909b3527bed07db56c2874debf1244e0f749d8fc5a9347c2a428fa4a49f48d107f9a23e5b07ca14ba6dc16c69caffb4a88e6778de406b44f0636c0df507f8b12938c1ebe49803ce9d239c33f06153800685f26b213be4afa448195a1430beafeafc449666558b3274b78ca7a06d94fa2c97726e1b59e81bf689947f7c31b1d3d9a92f1733e67fda2786ef9fe0fa5e9180138e84fd14d118853d9b6f7b2d135a069fc396339255d25ab309a5c8e03b580efe757f70f81b74f6fa00d289c509eb9e5a9fe5388434d38ea608b09485abae4c32b8fd68feca8ecf4565b47a4057ad1cf7cbef6527ec3e6da01f05384dc8b66928119e8a30419747c85038bc10121fe57eb030cccfb05b71e072fa6bbcb597c4f6d80e14f4f00a8e8a2cdbe636f486a7bb5240e4f708e7983d29e4f6bd9b2728bd4809800ca38716d134cd76f26b2295d37a32ed89a7c410ab42e9c9321aa5c96861fe48058e82192a95c880058a04edcfeb7cefbde7e441b5e56d63a9a5a4ee2569ab182292b8346e226d0cd1c076fa4fba6cc02a5f1cafe6d770cdcfe293a76278095a89ac2d069c7935db1fa42a378bbefb1cbf8d8896f6d198d20ba9e478515efca2af86a2b3d853da5e1618080d1d01450482be157ee25f1a9e3264b29d2026e629936d813250a7ac2596b304b6c62d2eb27a87b388f57fbb1bb2416f4c1005670e4fffa3ffd3db6ef4ed8a497a2969d7a64a5a2d2923de6245788bdd20bc5f510d64872c281d867e0b095a9142d3f5bc1377af709923de74c115fa798cabce9d5e35190a57fe6441cb9e1bba9882217f028605e86b6681a97a5dd870fda028a912ea94eee689e42e4b06637a0d50a4b2d4fe4590a49c1f7661114aa863557c16322aa15a81a2b5b919666bedb47073748e3c3a4596a529ffa6bbd46258d6b37d7619eb7ca7a0970bbb8f9e260f0106559b499db55d4c4c36f790395568bcd730b837b189580bf8b9c46d37531e1546c6f16d128c7744a8dd7e15137e40dd86351532149d2a2ec3680e8c349d7f3304bf762042acc2c4e6619ed98826399a6375f9fcda30630f2a8cfa4904bc46800fadf0ae18903f192d205f01f88233977874af31ba8c8039126a89bc2faa86c595234d9d58f465595bd9f4fdade0f748c1968b25bb41bd7a58b291e5a90665bd6feaecb6d320f18eec23071707b78389887f4f4639e6f0cd8fa33f88688572d9e8896d3114bf55a8fad315c4f847f1464d1ffe417de2afd7a9151cbe56d2790a7aae5d109e036598436a33230ce7e91479b9dc3e4c99ea2d2245c38b241d22df43236198a005b11735e962ea25422b3b4bb6796a203a17d3ad16ab1ec37f743c36fdfcc2fc9e6ee206e0acd6c2c0c7412f4ea14102fcc0794acf870bdfee803a53d2695813b245370f87fa180fdad3aa46ec062ac687b8ee8d008016d27d90a8bb3e882bd81b3fa282c0e51c08ac4282bfd39d2229d59988be679e3480d26dd46d2741a74c72ea0a8eefaa1f51376eec2bb4fd562f97700bc5fab008eaac1473aa44dffcca093651d3a20ed2180be3643eb204a8514f9053616dc5318bb17e0acc3fb36965ca642e98d1beda89468b75a4f0f0e52853c4f5ae65b3c4f196c302e80112220d1ebf9b8a496417cb2db6aa3f5a991c266cc800f6abac9eecd791e373dc9b231018adf1078948def581b4055261d88d51b6fadff24a57c9c650253f7a0c90487cc4d9304b6c95960e266f8150fcef4e67807194c13f755c87e7d59b3f854f27ec0be19551d4dce250da0d580e51167e13f7b885bcee2bb92f0afd07e56273c79ae3651eb8e1249e42c8b1bd4e08c913552f9f1028649a3f14e85889973d41755e57b70f4f0938c6cbc6e971a49a8845b2f6f678bd7649459cade59247b5d7d63433e1b5a7265ca682c678ebf0cc89d70187c274a330cf420c7351bf57c8ba0116a1abf69ac6476cb2fbf1cb52246f2fd0c8259eba816f6b41c8cf79a9c03891d874390a93bb9883807beed2c3a468f136e39b4a38584e2a8f6eda251a5ed7553a52126ac737d0ce2f11eba2f146f6deeff8aceb22cca5cbf4d9c616eb66831dc08acc7984b5debfed6d988ef16e901fa8d24d243e4f9ccce6514ce144f27c383bbae8dcf8b89208f4eba3a16e9abc41c059f32b26c009351e7c7b324ab6b693465aa401d3655f5269da5cac62b1820c46695410081785b6e863329398429a19d60ce58597db7d1465a24945cebff3d064a6c701d09d4ff5fee3b3c04c031a4950f0eeb4bd90826de01afc03e9861c96f2a1c92a17c4cadd4767eb441f2ccf7644126ef22f83e2fc6635c5d72841553ebad9cab7e9e9d75ae62318d7604c9a998606345b67c680f0e7815a1e8d7e544010dde76c21ea5c123cfa1e6329f1eed10584fc4a808f6e2d0ab4f1db331387756bf03f071af1175514b7e3612ed9036f28f1affc032fc44c73b79f12c1364febe4269208264858407775e34aa3dae9731928fb7fbfccaa99113856a9d3051c4bf26e066e353254b972afe71513c06903307b481647653e90323a9cc2d702160216ea3c3b97aa93d245bbcfe4ba9cf8c980ead36c9b78b62803ec33fedc0e2619c240ec400b9898ba15794239a1b8bb85549eac0cfe48c2bfd230e0c5fd02f22d2aa3b6244e0ad3e63964826ea8e00590303deefd87457c16d3d42593923811ead551ea7a723200263c1760929d0b9457ae5ac6ec0332b4aa052d2c03027ca5919b3d1ea32582ffcbdcea01a390e9407709704a0f9bf60ca1c1d70d0212007d100a7307ca1ff594cffaa8059157f5a02b542493966f935b30e0ba09f459b3a964efae7fb3e6ef1bae1c8e7fd54eb7caf187abe8840b2023c289152c841da8048e2f58ae0a94ce32811aa9379c17fde878350dfdd98e305e39289f25b05f043d05eba47fa1570df29a0e438a18d9c6e8d47bda475a8ebdcb350b81908ba298523f23c79df768dcb7fe0b566a7f863279226ec44255825f847607995108f7499d817526d4f072845f0e59fd16e7c00bb6348d1668bfac84263ff0156831c507fec00fcd9ac4cb7b9419240dcd243859ef9fb1bf6a58af448bd0ef16aeeecb98970d9bbc47cf16689273f5162d6e557c309bc21adb4f51d5311eabf11030fca7ada6723ade7eecb3e134d8b87747a581441c319d7fbf9c2ddd94902f8f6b5b1eb0cc9cc492036e529d987b82e17ca0a519ba37a01206126d0016b256a2ac4cdb31b40bece7851e096205667f971a40c034e21a6c1e762a8f9577eb0df10217cc1042624cadb45661aa926f5e7021f0fbe97a78c304441cc47780a2534c8d87e3dc7a83aef49b7a3611a2714481488899fdab1b80861af50ed9a00f296e30fbd54807d77082600b907f157267eea39513edf4aea06ce27820d178381e0f6e0d7d5aae29ebadb5a2ad068b444dcb698d586f0aa72af99c942c0096b8d2b736879f1af88a03f69c4e23af5867ed8a41b254428201ffa1a1634f9c1c74cede91445ca9a09bfda7d840352e6f5d0bbd249a7cae22efa8d5f18e5f4e5c8c5ca8460868f12815ca6693cabb380b4334bc5e3ba9649ff2c1d318b00f6c8506b468babe9f2d9a573ae1a279668b082c2ba6d7741181d58f4b515a368fac8efe614fbf0ac8dc2bdc87d5e10cacb2f13f7ec730c82b2eadc5d59b29368711a50b205dc266d65fe30f962bbe9e3d660f5b584861571485ad33451de5f16fcb00e043c21257dfa48ac5cab175bf58d834fc8c53ba86d0a6c8778459ee4f735dabdc3d511236c5ff0473cacf117ff2d9615bcf2b8e3c877aabf67b53db9b56d9c869b2f5515e3bd91a0729486cb533459fdb125a553a69480b87fbdccf79866b27a72ddbb63e969f4a31035165edf8b5df80d16e4427cb0f61c980bdc13aef494888cf61e124994612c4427395111d6739608a79564648bfeed5f54df817d215323ec910ea2306f8094574421636051159938a405d7967a7dc2dda60cba9590574b882f0dd9176d0bd550ee1398626eb4129a4e444f9d9bb45e4443e0e314b52e1f47e51362fef4a3cc02e679bdcd1c8892b09df0a9a904d503f8855b9fa113d59999646aedf1cf915941447bef55e60e4d5bd8f9b7a0ab9449bc229851c73836ec00089ce086661de82a9b11a9690e8a1a1630504872e1d8836ee36c199d45011bb73540d15d6f688a6a25c7472a84f13647befbe0460e4fec3436fd0c71305016d7f89ac52c33b98df5e9e550262176be40a453f06939ffac9e547c0690e8ce25a4dd9e1b42925cb09d38a9332c297ae016fbe9dac10150647f4f9e19e007310868be3b914550b53ffe75e945910e52b37eadd9448478e5e8067f2320d1977a1fd6954d5fc7177c8e804ff3cc5db9164572dc65baa51d88736d7cd8ef5e44c84b3d96a1dff1ea75ee8806d7dcbbacf87d6f441ed6f8df7e410a509d51881f366e161badb27041d477ed1080a2977e41009db9faab5e3d02c03c1f5d7a75bed9b35182f6e68f158a291335cf3efc2b34e44fdabc6d780d3575b010e9c3bbd2c4d3ef0662a11b8cd9d42aa316fcde21b206c5d3522bbdfbf002fab3e9ad59be94999efaf77ea2b9a313500a0bb32ba77c7448ee7ed36d622ba5146c30e447f11f85b29e8c52006e289bb3625b089209b6f5eab53d5b6c32c2d3d6dc05daa934e8cd756931913913e64a455d6f6cb773d157c8135c6bfab290d7a1c3719162774a52a1cddffdbe898257d37b535d054dd9162c85e76255fd5d680a77687bb877dc957d55ae0a9dde0f6645ff645bd36786a77f83dd9957d51af0197da1d6e0fbb655f556bc1a77687dfcb6ec957d55ab0d49cfde39bb210dc3c4915eb85230ed47e1b0b78bc8a3934c026a3492846e0f0824fc2f854ccbcbf066e36308febb194ef33390ee3d581231c1bd0b6ae4a1d0769897303b0eb7f1eb94c82735bb464f82fbea6615b45c2bfedd941761d1fccdea5a261ec26c75959374db340acc162a9c0e8640d8c942851163bcb3e92a84073815818582ddaacdc02cf16b3b653a8b01f8097561a77521c5918d4b49fde3805fc89474c1692471fed9fc60e12e2a05ec22ad891120c34858dab08e0902541680bdd53960e004fb001b69f1ebf30f5d9cbcd449ddfa0cba2b31c09f4084b7f0c81c135b44823922fb395efe8f5c8d0e343caeb848052177bc7f115271054632d9ed40714797fe7366f9d070aab8c7d33fa676f935ba6ebe67381bd0cd8f032f4ae829e28e94b7dbee938976135191c4ecd675e3520699aa0e32f1c870b38a77e8c35c000ff6b0e287b58228706af610dc6444029763855cfa31678d91f623c720cdde83666b9c0a3310b5ccc96c5c7d5e0f3d52554a7124e84a3f6745038bc1a288a00b113162d1f162372401818696f8a9731026b52cc0b6334cb1737a832b9ab01fd8032655de11053f2aad3d8e5aef512ac14f1952140a14a7c6c78eee8be8845afa02d0591609d10387e51febf9b15c0c2f11e0f74541cbe465ed9275d81fd0b63b396615d88c40b7aa5c64f5671756a9aba5bdc49a4ede61ef598ad59f1629785387ed366f52d47225fc84e6b2876373e544acf4ff3938f6832b44ba02df8d5e94ceb91088560adb78ee5ac48f5bc21ba1953e9e4311da20414965df06ca4b81953542d5573d853d75b4a9073ce723ce4bd8d31e8b68df684bac819fcb56896b4a03f9254ffc7b9e4e582c1d80049ad3aee868f373e1c4458dfeb485fe1ceeaad4c89954636a81206f6d7ce22f68643b5aa3dbee171717d348247e5d47506ad3833099718ba79c2d6d3fe7edb07e49dcac46385200eaa54e8e2f61b646068a28b5fe82735e13d75030e9abaab07f6905f20c1b27dd138b9b29967f37ee6b0051dba09c5ce50b37d95ad009faaeef1cc319f9021c303bdc28bea4d6824bba5291c1d2075c3ba699378616996cbf01a96c558e5e592b1a6e50815d1920db9866c6a8b74db7572c620afafc72ee161318a25424a3305d2c78068a9ba90becf01c6d9d69e60c192170e0b6ae516ad1131b88f127d85b28297e347cb9c0adaee5d0bd2f46d98d33265c9f13dfa62a7b5e1849438998ec319e211004e5a0b94ff6e88d8c791418ed5b957b63ca9e8141f446fe75d953a7ce35ddcb614bcba87abf4eb3515e4488d7f31b99b0c84d32bd87742c8a2bc7991ee85ea0082698e64040219426e1bf093ae35bf24d12bd223768fe5905d510fcc3361ab122359a11608ec40adc67e1239b1ec624b6e129a3f60232a6989a120928f418d546ddf4d866775d85f9ac1b4454227a6223465bf4a70dfc0448393cd806ef017eac20abdc4b4667c8f195362d5cdc837296eb7513214be6d9acc1153c19b58965074f23977c576f7272d07ac8e08f763d3899d77d09c23cd8d0af7cdca2d7d943fbedcf984523ad8d1043c2abaf41f22f9c1e70ad6d706cf8ef25a6673cb54265e15fc9c9ece5e9ec75550bd2ef9654a83f2847af9e2cf3bc5d41a1d3cf44489fe0895ca1fe531912e1486395c4a99229ee5d5511ae39382388c6aa118580b783a316da830763d99b839bf8fe065d6e9e6bf2304698240bfa3190d399a11a53724114b47f77f99ed5606e124fbd8440908e75667baff8e6e4924790d047fa2ad0088ff963033f10b488dfde095f2bc4feebdfd65c855fa1010c9ce79197eff8ad4f7fa6c8013e348750a88207ffc6c590b80beb9d9447c192a1df023793092f420864530a835920be1a035d72ce43d2c55cbac17aafb83fe6c15486cb129491b9482688df4311d948d0d0e67b58baa1a47fa0e9e67ddf1a4fc13c87dd4eb2b2dab33491fcffc717c84e7a94bb600ed62c6d5cba72b2ba3a64944f8046e627a64f458e4c2282764d83ca602437f0612197a06b245b56384c92d296476b901abeedb12792829294b208e5c5452c4f80266677a10ab32c829669a7fe13f65db3974f7911fa31efe03719a32727f75707ea9a003ea374241f126466b47438a9f7ad7a4f17286705d603091c956a4d1f44e8ce90e21d214a6a48176874321624f7398b3c3bf4d91da83c1350bc1dd4741613839b0ee401c2419eb76f27a3003cb4dde525d6672d8162effa6c19790d4a7dfe18d7c9562f642a294ae576cf5cee279200ab362be856f68fa7c54747eb7906b3d400cbac44dd11ad9f5c185596bf71ff9a42ab5362ffeacbb4b080a1b6e0f0f154084e57c0e58b08adbc72aef8f6dae74b4873e2f10396eaab963939217fb435545f68aeb5dc6a44011659ad2abaa69bfca0e0c674dc9317e64f558cac29802a0b4d538538e3d34ffd7413debd9effda2d3b8ab0e35fffd2207bcb939869ba2f75ca71e824d5fe2ad40c7966361e7597fdc0160724e941a561a59ebab9e207a82fffc18f06ced6acae456caa8706044936cb4666ab531778d6cf0df57b8929599086b9163234391b4d851ff5dcb0e8ec55cdf81a2f06ea99e10e31ac77c69c271e6e43f1329c5a25fd82430e52a6492f4dbcd76f054b18d4fa9c81b21bc2b36e2561afa1974a44d8e86c712160314513284d8916e73495624eda06b0e1708d82f1e28df2779737c862e0844ef498d4ec6e2cf648934d821df884e318d3ca4b000b0614c8a31c37af78aefcb01fe0bf0b70a1c5874db12165d50a061d80252be30489b4e0a399bdcb4f427b28848070c086357a76fffbf13c37a61a720094415946e955fbba63488eb171a74002a63e4e88b040440a3f2db81d3ade7b5c868f1fd6708c17174e622e8f669581d004548dd470b4344f7af6fe7ace3cc9eaa0be0b687767096dcccdef8aba9e1ac31564970f854ab1ea60e411bc00662748222969f728510d01f56fb96b8864e87ef2930b37d6660e07fde3263a2721b4bb46bef729af0ae2d771ef74aae3745583dd18fcd4f4673a5ebc3fd82a5832f268473fea6643badb3556024158893b73ee80d2bcfa20eab5c8cca90230e0fe9dac4cad9d7fc3e10309a6590809584659375367a06bdb102a9a44311c7138cefaa9d7e31c1e5c234fd60b14d3b2bc9d813ae2ad33440d43379a9c894e10b9485ae365ffb9083945875f2a7175f40a23c753be76180da6a055587cd0bb6db57ac41e433c0da885bf9b881739a477ec0d2648a46195a2634d952db5dc40d963c414023b4c260d9593c64540c9c3882dc6af513068a4446458218901492ca5886d2736bf012737da846216cccf05caa8ce624e0dc3c884744a4a0e47d9312cd3b493f90737e0d3fb08579b65e9ac46afdf71f5ca3c624b6a15e1b38f2909e3739c8620f372b06a5e94285de1b0ae0385de3891a5748ea91189cf35b462514c4856295bcb516f2d1178f3d188ccd2b1bf4468f00b2c84e9d7a55788e50ad995e40629a8cef6342c2c9036839b5061e6c1cc53442834d651fc9e7d1c26f72a16487a044be4ca3085eaf63eb29489be6a3380a671e6d274f3f4a9569c9a717c69c6e1e11352f2e5d06c7e8a712faca2cda693c488f68f10af1711843ef966077dbd9eca9daaad860d34ffdc23d01b7232c8303950533257c2ea119704d3862d90cac4860a7ac3588347db13c1b083ce904b656d96ccd53e5d14ee89c93fc8e3e2f84dad94961445acef135aacf5669de2e4ad5bb5f491d1babab3e0c863904e1bf8c6e0cad5ec867b66ed0ca7eefe090602ac1d2c6657eb111bf079e7e5c39b1672bfd3b6b365d925577a6c1a4008630f0d04c82ed33c547c4e973de699615aab6b4d768e8b750bf13f0a5ded828eb0facf39deee19ce1cd59a787c2336f060c5d4b026420042d392625c472af5e7b97050a35c30d97f9df7b1587011724a4b42e68e9359cad49c700d3202214043265be8e819a28f22a4925be0542e706907d3068b370cc20611e9ba51b31c37c79e0f9c176fb3bb150f7fc947cccc278b517b713484ca49cf9b4b3d3d62df2d19579e98188cd76d46407b24dd6e4de9a64701d861ff6773917f6a5f6420c20553d5e9eb15ec77aafc9e05444ab8964d9be06a903a9ba29bfd9c2b1d5557159fbf9c90d2f60c0f4cfa825cf648f8f9dfcae79818403dce4250f8edaeb189059967103c3b1135e7a164e987f543b3e580d5253e8955e6b2d292f111eabd1edfd2daa8af512631a29d6eb050180b3e090d7cbf291f1b0dd26735cc28fb6dc1e9b2324ff818be6e9702864cb0b687a1686988fb803d13f52ce9fc9604cb0b47f57e6f0a95f1b08ced6905f8b5e150329ddee0d640b2f4e122c0e6ccae4569e1e730c0f109b4c1021c41a1462362673814c868fe9ee171cde6a165e1eed4e775e21251c809d67fbf84b983b7162cb9a31dff9c721723a1729c03d36366aa981392fd1af50e262ee06f9de9c88d5fbb3d2f6a6cb4626fb218ca739bd5a34ed26c7f80411fcb8438b7d3c7247a70c3e31e59f137b52ab98cfc144b8282ed228a5ca15c3b24c8f99e662db14e53cb3d7799aa0d8c86a20c4d38ab5bb502fd668536090586b2c62cd90470c7ca484fb5f52d0f9ec49cfc42dd1d9536a43d43246e7fbc78eb31d863a2f32fb2b4241e2f5a06764a744eb73520d14a872470fda3532401d8e6c14488df9f4efc1f54f200a454a4ee815b3f91d88bcabb85b7a3881ad5b86b4b1a4d22c030375154696451705d2b714a2fea5814242410a0dbf80fe53570f51680a62c1a49e7d9b8fa3c7965f10ae35838003621913e954d337d794a5b62aa11c7ffd2c33fd7caab51dacb8846a9add4aa969899bd376ae0ac9c59646a2636e68f733f929cba23e40dd989d2fbc6300576f290d6efbbe41133de3e93ca81909935c9856259cf62897c6186f743ce72dde3b6a812f1472ae22a002de43dea2ac7a61bd35892a48a336221d1500cf1e87fc0a95c3918c5050c9ef78e119af8b95f12da22aba616765db800e99ad2a22c6e85b08e2250efcb3f56097ecaa1438bc4fe432073423f68179e92856b950225af8f161a6cc457b0908aca8d5474261f791983830794aec7d9ed68bca7ba55ff66a199db492c57fbbd9f64cbb715456497f851ddd29013aff2c933482f459e2ebd45deb9a90d2b86807e9c9889c1d573565f0ead17b67468919c4192454507553306b0cd53244f112d4c4480da590ae80e87625eb166b0548383e6b591c041fd5f722110bb07c6ea80922ea392e400b37a53ce646596149905690d860bb771df76e3d53c06a8ba51accf468807503f34d012588a27c2b1c651e39427e18962677228450e6cacc5d61bd9d0085761f9eac08a95e33bce4c384848bcc7ddbaa4fec82568232b90cf0862bfbac55818e24c14e22ceb2c7b2d535e13cb302b12c92297749878ee625ac824a1e92db37c005ea75e1bab812b0bc95c6e75808ca24cab992e72d433009ed1d05942075e45b6838f29c78a76665f37b0c4f2f19e97dec1420b3c23f0c12ce42a8fa66ea73415c1f1cf60e345424cdd98e8526d9a0406ae3e291942845436322509c1df29e62020598e8f30dd1642f28297c5b98a8fa1a429786992b8b93adf13369b54e9b15aa86c2738f0b9020ae6632be9ef953f248fa5a0bd02293cfbd2fc7e2baffa8ddaf8e1e4053c7568f9d4aafbfe92b08a02ab2cd0938768da11e4acbf2091592dfbbf51b291c1c45fe0c3904d311d11e4f95efcf1ee1ecf5333dcce3ce99b75de9e8f071e88e74c4089630d97be03a4787dec3da684fe113ae21185f4566e688bcb0e465bddf1a82744e0bcadc225c6ad0055cedbc0afcc6155d042464eb65ed08fc2d46b8da2ed83b89a904fa3c983a8334df850b0cf8320af959965bac06d9d7c86e409aa331debaa705fa90399942e8980a5fe18e1ec7d34280e7592555d62920e60e19140235934f7c9e7f0414b2ddda8ef3a3cb279e3942bf0400804375404124744150328fc4f1e82a745b99b43bf7a07ada6b921bd3e1d6a5e0baa2168cef7b013e2c584ca70b8e5a7c05064590c12f507c8e24e42253504b1384c17fa871656218511a75d80e8dbad1c7ac5b0f2fd56f514f85d918897273af029976c5d678f0f7299d993b0e00a7c8d871b51833c893f17cf2bdac1cb8974f95c48b75ff083602f62738021d03c9e5a1962d871bcdc2804d09c2d5a2c0c40e288245122fdb2b5ae67e35832dd91329b5d4e5efaa47bca9b115b2ab16f64275993bf6b5f95d782519a2857347ab3c26f48e8bdd21fe9271ae6fe1b641cc6c90806b5ccd15dd3c30ce143e1d536bc92c15bf7a10d3948a5868ab9914f98af638c42a26914b41f84c687bbb819edd14b44cf420e5951f1aa77b62316a4a609fd42dbe55e6c4a8a1dc60b0c986f09aa552581613e6be6440b455a88f6d115502edfe7239a60e711def024403e6795471c6efe36a06ca2ead6d822bb5423b48273a580f85f62f3582281519d9cbd7df22393d4a09e9092610770b9a45ea188e0e54234589a5d44e78f803013503d054cd8e69c1c57987e97194856ca787f9bd0761387794e4618737910ccc9da06b5ab235a7165c188f62957e0e14bed3107577661ae063525763a7fe7686bf75739a2c2366318340e67986a057b5e3cb44e2c284da229fdf6c38f1db53795e1774908499d5c285d6d366a52b8b8198202a5c2b38f977064e48cc4c40aa2779c04c7242fdc0e0bd8d061527c7ae7d1970d80a0e77d0791fd0275f263dee441ba0b2814e9af3495c0c6ee86800df2625795357cfa65671955ae1e1bbcbe2a6415a3586cc93714a6e0b246edc7b4ade37442bb0f0565ea9e1940cdce0c18ff78330493d2738491c693b618fabfd26796a3f482d91ebfe27603db9de01dd36db1868ffb02a94f69a3c7e0ce468338f5105d91f20a7e0898432282dfa2ee38510af94cb38b007fe81cdcc50823f2889dcb054fd84600a2df408fbdcf255389546677632888c6e9b83b87ed055065c6ea2a5d1cfb115fe129d93468e954eb35c816c08b16bba21bc68bfb36e569c993145fff62feb5aeef809654b99cfe2b07f92d56bb1ab3d480f5dfd7169f3e6097fa7765e242b9a9eda40c191e275bfcfe2249261c23a348116eeaf4aa9f0234756eb0106df31703d2d6f9d6179ca07d8582c17ed5ee25c7f2b4810e8cdf535d6b98aab79906cf960834d2dd162c1e81df796a5e292f64c878cbe754d3d0aa333976151e7e15735b59c256f9ac546becdf8bd95b1b4053f5f3b3a1b9857ae9d100a9d43f9d1ac3e6ede2b26402b997a41ff28a52efd3b4b5ae76f29b6b33314873b8c507524e2ee84a99f952121e20f432e749686717e797afab11a38619bf6950c5759f3e895da3a6b3e99c8fb638b7e64c12f804e2308889e6842a72fddb011766c6e32f1d4cc7ca121d0cba2326dde3778447a895e0a95e932af67bd9c867b31b8e44d0727c287253b1d24b8ca186e7b427f1b8d330d16c6993fea1995c676f1b9f53463c7172fc9b53e0c2aec9b8ae2aea5a9b4142ed3407a82e746deec733e96b6f8396325bb0f462600a7b4efc19746a8f112d04b3ef2dd83b539c3377e4c2fa311c076a2c3d3435d1062d09d3e36f55dc43ff1a71a57dbf73b8fe9395e7f13edc46370d9093b9edc7f10fc63309f23b466c05277bf5ae50c5b4957e90c2cc5159942420b206d2fd330fcc610191e038ea941c31bb0b6c2a3b7bc5ec1717f9334a18d27ab264cfc26c238912c67c1538dde2a93a64c1882a586320f1fca8795c433b0b2fe39830aeac6029edbb0b1fa2598c8041065466241c84c76f8454279004e7981750bd2b96144e3237ea61f46e441cce51beb88a88a44fe94795fa9c932d9ec148b1062e06c1e6485d5fd37f8f72bc83815cbe576cc87867a21fff951e004786108866eb7471fe28c6c110b61ac219ee03e6ee406c7d605b1627f743646097c27d2f97ae82cfdb994c5427efe1903e53d74ac31e39bd822eec6681583c1155503b2705427d6a1084f9f3ff445045bcf2468700538ba7685e18c24bae286e529301dbe69f14975a6f10b818e1e0f5c55aa1507522c0c675f98d3809004a9d8f0863f2184aa1acaeacae025da4011962a75db59a2d329f39352dbfa52fdb895242cb43f5df3c83709f8b193516527010c2cb222955f1e9a878911ccdb9d13f21a168ea45f8f851ad6023815c76ac2922be1f1c905a7d16a50432991e0733de549a09c004044aac35763b19f28c6e3cd8ca7087da5f74403655778f0f6c6fbad0b2529122e58d2a03c735b8cea371aadf128c60e0f7646dd26edaa5c24a6545b0cab02b7abc2b1107676b853562550455fe224b5fe298ad30a50d8d8c17f3f07e7204b422738715f86671f1f1c7f202ade093d42f491119a19d43f8d186f202552372dc6c08a3478815f49d85721defcddcf5dfcda8edbc50d6e339cf757a8977fd773176fc02f3b8f839ddd15a5206971d1bed9676dd45364994366d3bb25411a1458a50151b9b762da0c377a2860fe24be87e58c6f082b1f5716997e8e50c94490f6da952a3dbc44953c2cf53bbfc9ef8ffdd49bc30c175913216aa8481757bb9ca07b418e7103987697790c5c99b473c5ad6b4101215333872716050b13244d11315b0e0f3d99810ef00fd902faacb4e2c8951d549e8a9fda8fc1cd0f6026db0fd42f7f31aa53bf322a51bae70824ece27ef4cdc7a2082fef340734c7dab2b32419b9fbcf8c475acea1492045ce7d961a01e3a3506a0879fceff495adee22b3730d934f290cbeeb10e12c53205cfb7851c439485d4d7f0bb88bfe0f1395a7ca079806d7e7bd81462296c01bf0ffdf33a0e0555dc7b74afe6f788d18daf7838a9e9702c0521ee2baefc73b0d556e8b1e4a7a29a503abf7bae6f2f34bbe4d9d5996ced90097d3b26d90b2a4dd45c5b14f5bf6ca4129e2a259a5223f3891c9c4e8811989f84dd32c401b8cf87bba99235259d6d9a97069cbea99b2e5294112816cd18c6dd9df2745371a6e566422a303a7607c6a1d86b792ccc266b96cf4bc367b32fbedca964d5e2efb39d01c28f103580c5068e112bd5659c16ed884a6b7d7c54ece02320c55169c452af28ea1b76245a18766ac85d4f2f60e79d948d753bc661f5ac0ec688c1366c3b062aa4699b1bd527886cf70049c5484e7a8baaaf7a1010d06652aa7ab68dc72ff7ef64957af2e01e0da48dc8e94940593cf9a71f2b8011b44578b56d3fee99531803f34a2a0dbf535a1b4630294ef72a56629759f08ca1d5ea139254053dae5fa1a30bfaa268a8143b9d304a813a8985c7467ad9c8807cd00f96aa8101e130800c2e677c72af3cfcd4b1008757eb437fa09a49ecee41018e1f498e38a37283cb8cce4ca262999a76a5a195f22b93e1e68464f294285de77350a0355699b1f48269abf9df14e740ec4bcba2d49f568f4d4309b93d28e2489906cabb3ca8e77b437c983bef87f87c86b78dcffc1958a390e9fee473788ba93c10435001f3da5f57207f929c1be644c3b5025bd823b81c985457a0e81d849db89089f6cc06a2daa10d8e7279b286c3d4f97c45f409b534b271fc3298c5bdc3b9cfbabd987d6f011e010a7a39b8bdad3cd6e1ebd01b4f4638f9ff75233d2e56755d2193effb81fb7451ac49841d3ea3e8e1d1a82482b846e74078ed60670e286101a6538e48eb34d9bc1af99c7d3c01e2af2932e94aba33eb51de1c0c41d3b123729848ef534b15151daf575d570df883657d4c4a40db1090229d48685f97534a5b021ff795526c1732dd93a75b942bbe739f159729c27e1466a4626e6bce9f1edd6d8f60469009b37e543b4896dc8ce674e76767be8700de6f81f771e91a1e5ac60f6a54f78a55167d8fdfa178f5b510d3e411f55ba3685835b210edde40dc7a10eb6cf4d724065762c8f326212da7a339e0dc1c9ca43bc14006e4e9b5d979e23cf1ca9d78d31b49dca11eaf9ee7ec44bd465a2edb3360f0b1a2c995fc0444fe8ff196a6288fd2a8aa7807bf78a3953701ab3603b75073cac1224fd8d24a132c96504da294443ecafa8c8a72e3c37eaa5573aa83e971a1f481c7583473af3d381470edcb3f3ffe5aa4e27ffb131825031c61bb1cc5c9a696aed0bedc461625b6c41d786d02eb589fb66579923b0dc48af7b8fc8f7bacd6666959eccd36aa11dfdcb51add02fb0f47ebfd82360d02075f217dac7c0b02667b187cadfab69c2666f316a3c54049a49498480ea76afe0ca6719520b2e84fbdfbcdff6558b59040474220418879793694030a193037002d835d26d9513e012ceab3203d48b1349a8f9ab2071f9434f2c7bc242499566d6141fe9da7964f8172f921d3f1f05501c578572d6c394e2f5838e3b88964b2940fd3b49d13983f97b153205021bd0f6b3c3244a51e7e9e2aae98f2284004c5100cd3fc66948a96e8a2074e757b33004b603a02938f11417cd0a4225066b86f53f233230c4618450781044ee8baf2ac86814a0b5322ac4dff5cffe06529391a05818e1f1c9192f809821460dbd620a62214cc357ecba108d938235e2020a7bc40e6394d99490f271ab7496ae049eaf86cf0fbf1a5f7c6ac8c83ac38496c563ffcd5aa7afc9b7a935fb4290f315fdb2080d99fd157566ffc56ad85243598cc93e5bc0cd441ee56421c91b1c13ca2d39cd4ee2e9892e0f2204e98ae380b1c94f1c560b2b18e736ec29989db86f46a45c021e046341e9d5fbfeb6a86624ffce396fed159c79c10a4c38f881d9930b99919520158260cecc77b9369e38a7100162042dce3ace8484d2f493ed508c4c3a93e6da02e03dc6f0f35bcfe0ca3a4b5c76c5ee93f1bc4134c430037f2d3c267dd9261f96dd6db1ef67d1a7b106aa5268069dc34789b78eed724c22224e8b9ea32423ecd0ad581f18602dab299d85c9493eea60d3820dafe0ef25b4d7aee54891280ab8c29a58b32b7d1ac5808ad6cdec57400ef68cf8b83093710a0e6bc686edc4cc7e61edcfe2584f0f8b4c716f66bf49db0ea718216072f10edf7ab66808cb8e99ce5a02de1ef19f90973f08fb447f6e31b8489a82ed89ed9f9c4be803e9d1e93f4e2794e07202213f237acef1323a274e7871f20c6f97dce1c84bb057002b9e182ce1e963f7e22ea88c0dc4bb05d10edadfa024ce15c3cf4b42020a0224b71b5f1c2319fa1e15ce256edc976c05c306581287e89dcd2e81b667004cbb547f9784390386049ef40ccb0fa3e063cc708ca4ad45ea17c3706841904436809c9dda3f174a6ef07d7ba39bfa76ccae9830109b89e852e70e34513720c3164717862f737924356949054ea617b6429f8db54014c0e1b63cfb7693b71f4f6421c84591abeb16ec35d192c2e4c245400e16f9b5687c7e6045081489da1d7f3207a55b7609afcbb024ffb943e02e389c3011a338e377145cdf6e01aee8c1217597f457f5398d0da196728364b256c421bfcd6f406955a12bc06d066aefc095df3fe8c6a659903b9628cffea40d5175327d0f06c7d8fa635e3930183c0bbd3a40ba172104f223d66bcd018656d31eb5051d59a7870fea514a97074f369874b6a28b7a457c0f9bb0aa9f6021ba621ae5e34726d9b77cfc98824e088cefffe53b33537791a69b2a9c1fdde8efea6dbfddb68eaf9b712eb9a31b73dbfedfced21eed293d84e675d814d7dca1bc40fb10e195c3abe25f82f6c335aed1068f3beb58c6943f2d432cf22b4507677e090f6e6be4057bef09ce76bcc2f0a95ad3f7e3ce21d845da5ef986fb4ca4be925100fedaab4aed7ff841ad7e1d828a9f782faeb4223652a05fed61506b95dd347cf1724058cc16024560ebff17dce3c8f9e18ae44b1c181cf2c6d172bc47778c1fdf1272a8281d7e99a8ad69998c2b6715326b93bc9b7bf1e43969463ded7b43aace019228678443f3535a80428c5f26f9606d5f9046565e12be8a602751f042fd52d20281e64e50216c355ef7cdae0d9c14564216009f83e2fa022e8ca918ba30543faf78554410b7d4ef3fe221d7f3cbe2c5d33c241e735440b718d5f646e41a229c1bade073192da9063a77f5a94902b9a238e0f492e48493578e9d3e1a3b378e0970a5f28e48565783f1f13566c7cbecc1ac778f082619111d8158bc8ae0988e576420d520941377b1d3cb21c6f382987d163a8eb859412f3536c806392cf5aab478f74372877966933525de7ba9199f4903d0f0a7090b49bddcc7344d8bbcbce921a49b54211b18efb9e5366ffac4d9fee1da11c526ad52d95afd39f296f29116c465ba8ed465e405336f555b31ac33413ee008a73c9aed84a08ccdb57e35354163aaa2280e28a0bcfe9afbbfd93ad0383443387451592be0a264e505bc17fec3d7309ce2e263cc4fd55534469e1d64c76d0718fe365e17c6624a4f77daf0944d8295673812bc08f0915817c250fc8de87ca4acbdbc10223b815aefa8a02c9262b97baee0558f386b2d0c3b20c806f9a1d8d930cf6c5953a46fab283983e8caae059f0a27080fbcb3f128370422d0139e84c0d1bd24337924b02c17cb1f97c617f3d1deaa202369f37d547fe0af8fa3e38cf4775f1dd193051f2c84b9a532617a5225562dc7b85121f8163637be6f23162e6ac4f11f1607fc67b3c39b6769f969e08675ce85f04a2c9b4f0f0f6fe5711158e8c2b660a6eb67a2b7664f2a185876a9aef0294205ea146ecc09d130ce6dd97350876e5082b9e35f218018cb90ebe96e6f6018899d1eef4907ddbd252f4e3acc3113acf6217964bde81d6b2d2ff36d796c63d30082385b0f8a8e818710c6855b7f208771f11166fd2cfe237fb21f3f2e5f46c5ef30cd700238793c0dc9702de45d8e25de94361879115dd49162c49cad22d75f8721df500c44b5e8cea9750f7feda1079b4d701112d4323d3661467b9444e21a38d35a0d9fe6e41522dd3338c9d1caaf106998dab225bced60e071b254aa5216352b6f979f132d34be7f05bff9f541cc8448907b680cf1b80e0c06aa5c37844ee5019b8f44ada564a852ea8a4defb26438e99f522c044fa07428e3f5a5175a437e467e4e2a9818f2839393235be5e51e122dae40983da4564be7e93287abd055fd7da7df520740ed3cd3b6659a85a89d8e37898f99a54605cf877c4f4acddaf79e278407ec5ddf3b00cb23a25595f0ce818b1a0323ffde0529badeecbe96ce8ba22c5c93cbbad58cef7afa44833966c39a3abdad41bc875a9fc49d655aa861b1a448f272cb6247a688a9adaeebf6260d5e5eff598ed3513757f3d76526759d0db7329242f3651e58a0cfabeea6119c62301a73c89ee74bcc513871c5bd41ef299e73c068370d39cbed709e2649a4f18b2adb37135fe061c8e62507653c1289db93a19928525dcbdc33ac1fba600ab47a2e64e80aa9e4b14227adab9e18a6d0461b5c61df27b71c150aa537e6f376763b308c1848e68b2f20e1503ed73130968950326957a87fdeb973850d6740d09758fd1bc101711818f03ae5c0b2d2ef41045387882890855c961166a4ccaf0da1479fbe1b717e07f957c68cc280286c8de4b15afabaa7b1513cd2af004d00c5a63e9803f937279f58cf43888b568c5d8142f87fd62dba63de7626ecdebf7150f1ef383291dd5530ef36cbb1d1cbec3d81a2e343e85967fe37230a14f591499628d8f8986b9dfd6c191f869e99db9b8f427f46a17db38e49f73315e928c1a728dfae0c8e15771299fde7337c94f085779074289f6a86ff0573a9296c995ea393f627bf3ccafda1b0efd620fb220984098bd6534d9203d8b7746641fb9900f32e2c687c08022006658e3f88f51ce02f45f30f00b8af1248401242d1c25413bdd8ca1fa87ae195b2af4f7416345f02146b9546bc1834ce41fb26f308a9104fbd91f90bade5a784092edc8dfd0a7261ac21105297bae1e6df4e9616dc3cf199587cee24f84780ced60b6392f729ab27b78016c9841003c0320e313602c8b9e4dd88b47cee08a0ccb891b2cf1b3a7199a34473e35e26f0b6971cce79b327a63860a95c104f1bc499af531f7634f288d5305f25474f835bd35f25b7269b98de6ac842b2d77df0fc4e91073eb9c9a2bef9d0ba0fd097a0e214bcbb1f5e0eb87821441f0cd825ad6d010149510fea5e1a720676eee377582d36a74b7c89ddb0d1596dc55950d92bd62b9000f4fc8235cd102bf92c0e2ad23e11201f729f22fc357be040c0028610280f7b1339081d98db77dd9db8186d5d0dff70fd1a04dd2cffd131ec6454c9638f7047b1b85040d22b973c889c67c83f7b2b1d5c42c4f4d953051baddeb8dc3d545556b576b56dc0fe4602a6b004ed636cbcb056943f787c96d4f62ea3daee3a98a605848b05ead10dce4b92d8c1c841444e12e8d9a27d00a73c31b7eda101220d7ff4dd255332c17f034000f3b156dd958f75f41e1ab0648d706dbaee84e8f554e310f2a1e5dc31bd4bbf74e9ed825190d5977a8488f3cfb537159de1c46d34b8078b8563a465987d1a489b7518b280913770229b106248b346407ea470022ecf8976bcba98897f14678007a9ad4efbb5cdc1680fe8699a0415e4d0ecb5a6b3515ac04ba9ec1b7c88009d66e8daf17e4f6281aa81b9700a15430214bf77cd711677bd8a00355ee58fc221379ebe6ed623a379871dc1663d3fa6f03aa49cb635ebb554702a3f5b4cb65a1374064de165a48ae73e1a244bb9a7ab3e5548aad5acc7dbbd85d2ca4cf133ebb52f912dc7e83564523e838738977b33737966738478bff6c97d666591168ee766929bf7f9f764c3747a5520b1e5527885f13b6d2c74b4f0867ef4684a25ee2bffa99f2112bb2294eb200cb929a2fcd0a67fb4e2888961ec36182e5a5c64b53e19fed99524858c490a22e85884f352805644352c923ce1895a3c2ab18ef01f4b84c5fc875685066db077ad3f5632c28247925d80d51bd303859896b9eed0dfbde9135da0d2a17da01a2ac8c1c43ce73cc2c7b739d48588094eb6e9add1030e2852f3f128cfd75e290165bc6553bb46716e355b6cd5d2b627cf1e2a4b6bd36a2dc263b6f4a2d2f6ec5127049208f445d4a9bddd3e4cc640a25f22849892c7a5537ea845492f994ba797594e55d4a5e4e8bd48a7c775db10c0637b606b200bf8cf79adf02cb84d91c1cc36a8d501be50c21bbea538a999e48482e606f1438a15b7a0d31b7b7c374be7f4eac1f383fad5e694640ba0e7e6400c55f955ebf11a33a2c1a09d2611589d135b1babc8fd92027820642b4b5437a8ac6f34a96171db6da0f789729fe46092d4036beba011745c380fa598f2879be223fb9374cb7d170598c29c354b325ae865b1e9956c0b139d8dbc201c3f9d07b50c68f108a0fe2235bb69b112145abc520d63884daf9a32a0884d8f6f519540cbbdea3b590c68e66ee75435dd5aa3cc178d8b2a32e2e0e0cee3c1ab67c2db6197c214b63e057816fd0798665de6ae7921613d06eba645bee8f278d6eb41a2e86abb994c935daa23cee92245700762890bc2a51b4b4c1236def33da258e82fc424c49614bafdc1a07ca92b37fe0f198bea2a60f49fda58e29e8d627388d75d009717c5fa497b84f3361c280350273b11d9212a134f37caa2a7e3fb17cb48ded46e71eecbb8da06a68f890701d9981dbcec8760396c951984bbe097b598bdc414223600e59345f70e802e6f20ac80b008dc052be5db9a2699abe0f9240e8b821e18f8ea8c9c8bc013dc05fa9a9c2b548b98d2f946108d2ec8f8aa0f64f6e2d5a85affe94846a8f118c1d6ed78d582022d6ec00b0e4fb5084bb45481d1e1f2b7a14740c823921c1b0d78caa70dbd65c379e5420a3047dd3768e192e222cbbd1093789ab8a4b834d6edbb2c8a27b958261b210c8a1530f12eb0d2670b9008d8792105c4489bc7b6ab1b9c10ab29d392acc55dd533ca02c0a71ddbbf7652926bb7ee23b6e85f76d311c48e15a64a46fea0804884cb04a7a431650f6ac4e150c3852e8677f3a7ac2f62d6b4c24d1d4e9be2d2bb07662fd440d30db4adb5880ab2ba7c27bc07700d9cb279720d0505c6634e6e7f82b5fdf6240507ef72d098936e30a731e011eb8b88865f1d9c3d3ec87e6bb05909d255acf88050b537f6b20ffc42506f3bfc08d01572077d18b7a09e766ff80ff39392567dfeade0b682df0a462f4aa6a6d7ae375e50c0913c7d65906715dd146118bdcae2ca25cb2c6dbeededb268bdfdf20eceb396d48dc2479b56a88591bfbf0cfe79c50f3b91572f83cbf62ef85ee04aca48b3b23a7dedd8ed0c80b59d1b4450db29ac14bd192959a9fe4ba24385040b8c0a894c55e1c77bdba6970a23c2652a7499aff52d812ed886e882aafdb37ea7c94c9f1f6e4043f4aad315146cd19ecff1dffc1a833dd40b96a8de03f3e6119a5951b34d0c54188649f60d1499b4b815496f8f60f882c49594a247d02a0f877405d3e17ae1d2fab900af136ebe4354f789f0cfc0a8a80c2e916045e00cc7097b02ba1ac6a7643c17a09d0bb8558482040063bd14a75ed92604963abf13489d649aaace56bac656fb85d5c2f0114e5f75eff9ccc3f3d10089632eb1e8c9695572f2a778b87d0b5c146ea2b2314a081b85955cb38ecdb583057d0b0ca03fbb4f90fbf80c80a4c62d5e0e32cf9401cc91166a43517de18d8910b26961c6abbe1cc6886e07ed034a3e3b27dcc6bb6662ef135e81b6f468e6f59261e59adf2e7b3dd9e52f7678099f2a02edf35949a88e334bf2347966e22d70bf574e10eb0c6d433249050ed9edcf55b3292bd012384ac0fa20f825401d0cf937e2932fa8f33069bc4228d456769982f974ecf47487ad564878d6eb2ea2549749ecbacb81eb9c6714182c9202ff8b0a3460171f82313040c354be2ecdbb98498e80b53ef04f976e637b9d96e2a017fb3b203a7a640773ccec6c2b7cd232e562a3a0949a61622964915b0a8860e1271d9a637980616d3949d8c8023062801a4b38273e9fd385761d56a08541e8294f45d180eaabdbbd040ff3403c85b08d0584a33c484b2aa2d778d0c18041af0e014cb302208ca721513d55010c2ec74525fe5364e4fdbfb66d0877de21d2cbbdfd9cebdfb3623756224110397c8101d09f94b025054da333aa67ec735a4939e65dc09f0118b0401d64301df5d437cda2b2dbadb1b97fbcbfe68411931b3dc38eed03d087f8c01aca14fe5ce5010700d08428c36cae22db81c0544c1c5b2f4f98cd60e89b16ab37c0020ea788deb040e633ac977f5051bc55684b13c774cdb5147bb66be3303ffc27c88056b44d503a8b49430f224360cfc3f4c4a860965a7505a446634a42c36d1e4d210fe28bf5fd004da664fcde5b681fecfad032bf4efb07c14cf12602fd0e004c2267b751627439c93331647613009927fc608817db4bd2c286a96c226001468b2871c8ffef5c650f25a0484a16b29ebc2a7355b5d65f3bb144f2742acf29d5f79e42d2d708918b32043bd105c2abfc8afb30e056d52b62d797bfed574e62d7219a7b51813d6e6972a615d5e7e08f3bf71f3ea7d9147957006fef083ffe227197f13fe5b9e2cafe2ea442708836c76646c7ef30c04901813d8832916c83c2b96a78e8f5012352146826cb534e47ef756af2865b3855d622656161d4a3fdee0f43a25a1523b28430bb41ef5f3e5c63dc8d644baa888d65ee0bb5544373007f230db4474b3d6c3849ca68c0360b6aa255d0a1227df669d56f051bf07d78aa2541e2ed06fca322913be11b2dfe333b1d99df99c12d6484ccf46ba7528090a5895779400895a89433ff9cd835b95e1b2fa5f95f65b05077aaf99b4b9fdac64564d2650d368c915e086213bdd35c89bbec7e451f2ef0227fef6a0e26949d4ee97475579762b634b19031efa03581de0a4b6a27d8ffc7226c1649c7709c09f08d6424c707d01b3244b0173b18c361cb2128f30b2021facc4bd98d8eafd4789115276e5b290306a671203827d2e11caa27183021011dcab7182f37de10bfe8d1df6a473180d9e21c1088e5f17d5f09ecc334c7c380a5e2599b5abc58e687331682741cd86abcfcbefe643918bd3a0d0c1fe86d8e41d1836600e08abd0c6d7763eb0db3801eb53b76392ca704d5f702e99f76e57b510fdc6d78f7f48e5ac47a82e05c77921c44eb174a51918898181fe0e08e1a506596cb79d90d0bb16638aebbc2b9a8329a577758ff1e6debe7fe64717aec5b910a0a3816340d63d0b47ef018ec2a2a4d256c381b9af166393fe83b6c56ab2bde986e7407c5d144df3029c99a691d7075dd6d4204b3c79c0676a99d0576a80dffd4eed32270f9fb74f8c584d8bae557a3d9f3ad6a777d605c47b60f65d0928f5d16095d60e6e9c128475c1aeed34ba024f9f4fa6c096fb0cc473fb69f5806b8c9e34e407a7be78692edb97e645ab42bb7cda57c409fe94a07335a0df863e30343a01feb24ec0d249cbeea5de2ed241ce127db5bb4463fc607f800a01c54743a0b4dfc6bcf24db9306ad3744e35eb475df385bf39b9b3bcae6f06e9f45598a7a10a4c1de5ee52ef020028124d99ea1f3bcffdfe8efb278fd8791ec69d2b49001c3dc920d29d24e95a71b07b14de568dbfa40a3b29077c599664dfa8736db43b100d779488248976e903860bf89cd70d719a21f65bf7d30bbcfddc9a4b0c10942050571c03dae20cbdd0951008ed07fd2db5161262c90593cf992090792eca1d69100e69995dba8229394aa0c083c9451c98ce81b344b206e415b1f378dbf24e0999c9056ab10129ae2b1e6a61c3a34f80c024b5f0f35bee0704edb53da6c02d42798d7ae69d89116a6ee970af0d9ba449c9b5d486625ea274ca027f30c18f13a32579aca3c049a05ca567cb7d353f1ff1fb6d9935743884d64135498ffd48ca8691bc7375ee5544f44cdf551cd28c17f974d74941aa19ac55d2dcdeea41c5028ce36a8a45f563bf369b6728a637adc3f1b5ecc486a4e888dc882e1042ee4482f570435357074515c5fe27118dd744d5d6304ad154890477222a24faca2e15e617c01245d83562f37bb73b465eaaa704f5f022135ab375bcf9e3bc10dce406bb01643d31428d0373f1dd8a2a95e73cd46bd10bb661c087f4064d32f9aba1236668ab4719e97de6d134889341612b67e994840a271bca96c13fde8a61ff427988fa9d114efa08f8f4d65a08c85346dc3733a06be556434c782e590b278147d853558cefb8106382f730ee26d872184155f8e71ca3f052f543f4709f777df7af33befbff3cdbbf9de4ad213f4d8381b58f7964fe4d3233fce45fdc221c47aef663d1475f8d5e2105e3b57fe5e1a9ea2814d484b0fd427241e14172599e0f4eebbdf7be3bdb7beffc67bdebdf5cdefdef3ce9bef7eeb9df7def8fe1bef7837de1e7f6d9c47c6d709fc3a6bb97586911fe1ae21b988a19ca6c9778e9bfda10504c558d4e9f4d98ee04f827af56a53056732c59326d6b49c4ac39f67085c784ee736a267eacb0cb3d279034352acd9194e6e82b3051f2001e4c66a9f1cba6774cb11e77c461f5374c2a98fdc6f23b36559db5cdfe30d0695156ada1fec4adfe9082a98e1d6a0a31b51c3908aa686b7ceaa3e140530e55cf3420a100a838516054b55ccaabb1a72b9eab5704b2ad3073c203318b157166b14bd182a2d84286cdc46ff2bc8b9d06bb75c4518fbe2b3a7117256e5172513fc61265f16ce39a7a0893cc3a88a847bf0dc81d6bcd3cfa26a24eb45e2d33de13bd2a3e7e82381333018f8d00bea99fac0c6ef8296db2185faf09ea405879f54436ec2563abc629979829d77c14624847f8b8f5b86c28373d0c2d06c99030dcbaa8905a577cb935b1ad6f50bd379b33b42e3e9606e705486b3dc156632c2843b0fb2b18518ee03235224e3a4f73ca733a7cadddd416bf31bb093cd0ff2f4c8e3e07348c3ba7b603b6e74c7c0c604400eeb68b9767b6b1c0b41e3e481e35b944b91398192a1007cf17fba728551cc0c743364dfdd69fa0707e2837805d931216eee47d77b50dead461279996857aecc2f2c63911763b522725cb1f70b2d38d72263988de33268a0b075b131a0581e05c444e893f1b6933024d7d2efb905407f90dbfcf34cbe37e7855c4bd1703ad592c9527db26800377efdd7e8e286405af6a02d1c260c8d50dad12bf1ec2c617a0ad7a7e94a08ee0faa441a01212364c47187a8b2595632b28d8541b1893ec8417ba9b5248fb00fab71753561b5bcbd372c5a26b11075a4f6898ddd83ae68760ba3cbedb022e5e2bee3267cb440262fc0be50bf8d674e1be3859182d4e62c84e96edc9a028eb7cfa88158086096979e910a7c313c34cc1d3980f6fb1438a416a4a927c389e63ba4b159ee3efca1f799c5f9bbb43b8c41df4e1268edc49ccba173a0816ddcdcdf16edf9a85e5edfd84a5aeac2b74afd08c1c5c2399c3a96511a12309c9e01834e7e758c7ec8bc9c47e5e6d98167b92498e5b064f5bec9d441e1ac925199ff30f4eb02e66f4aad2b3362b7f7be19509442d057922b30130b3522752004cac7cfabcc6cf225ea094cd9e7b0cdc1b24c37796383decf148c3ba2487945716850453f962edda41d59c8e19f7466bfd32a6ad14a88928f41a0c7528be32d22a89a29ab0a685c830ca7ac000b0d8b44e9953db2dd51a609fad05b5b50ff7caec2d82b32437f5f7fcc7107c1b44b80d197519f320b1303212e3222c8f9487e825d7549e004d0fa337e81ed9062c8ce941f2461b1e279d51b0db58aa5f075d1e6c9f39efd2a0aa715f268c0a26b343962ecd0f56005f58a506fcdbdcbd9ef7474aefe5e27cd4117a62e3b8a802f622b61945c140101a84d89fafa566fa694cb50a79421707508390b6321b8c7bb9553aa6a4ac5dbcc578ade566b90fde50af3a0fe7d8259c3b43dbae640b2051efccbaefac8225a11e0a56ac05c83dc9e8fc6ec0c98217141ddff90b809bb2c741228b0161398855bf3f69600db9df1c6917125099e6d8c2e51dc29916ded49282da8cae64965bac2f602b9a6934b571d828f8d93b79547e72a3ba557a0f3ccf2cf4ee18179592f374a6eb75b62c653b6fb037e44bf11c47667a0549325ba38e065ce34b2118e71a65b188d42af95d69e53b2965b4c2d3735e21c037a8604bac805d9983c92cdde8545e2c9e4b0557454238bcd7c0de141703559cfafe3b9bede0a6fadd5cd6ccd958e229c6b00e5c74dcaa5364a279d60642a3a2386dd2103825ec5f841db1c390a1b5c9acf54d46c0f3c317455a8f314c4e6893a88867dc327b6c75bf2b9227894b31fae8080eb5a0f0246dea59e111a29dee4ae5cc597a0ae0c7455fd6cbcdbd7d4aca3df443dba0465121ec0c0ffaf80a11e586305a95b109b736e91e264058e2467ce9e1fed9882e83221df8933806f894cb4dcd2e48425d2019ce248b52d1199f2a29289a559d461979274c92bc8b0f58716e3b21dd7d16a75000b94e788b3925fa9015fd2cb5db790545a7eceaddcc6677ab3f8745f8ac265e0b35147ad2b25b0d585b01a77dfa3ed649d4cf82a1557abde7c9670e5407c79543e0095c2b9706a8f4cb3887536674954afad762d5d3721c492eaac756b5aa8e03abb20eb635f0985e83a33f97d3068a5f379cb4fd56b8ff3a4c93b558797b02264f889343c84403c5b82ef859fba6df576b2ca0c2912d31fe927bc6517440f1a4ba92792d12d1e6a8d6dad697b675a737f30decac52af5341334f7ba6419aa676095a1075c30bdcea9682ac152234deb77a2bc338bb6f6632e691429bb3b5ac740dbc43edbc974a46947b2d13d0810a3e59a6473a563597097760744099f568f8b30c5b105bce594468cf82b484ad73b38bd9d9ddbb22aa91dc37b0d5b760b37a670c7f9ce565f9bf73eaceb9220d8af565d8b0c30d3d9c1727d32550805126ba3b16017d4d6ef0a8b9102b31a90bbe4168aac82196514b08dff6c878ded886af4a188e6d6fcb5eada08a232004171bc9d3eb238dc5225875c0d2cae1a1ba0ef7dec14a2e0373918670f0f58f8c337b34e8398d13eb9d539c0c7b740ad9f4e7dd098ad3dcb8db6d3290262a87bc8697afc45f5fd64e83447ec1951e68d433f54f08f44adbd52aa303cbe3c33120aec1f263df33b72c95bf323290caf058b28c5992f8c56f8b7262ffccf83435b2c5e930f9be80f0a5c161848282f2f70b50e3767b5b8dcaed49a193ea6368efbc5bd8066f012298ae702b6d682ef466ff046a0c71b5de6ae5c3b08bf820743fa41f1d23ae0d85848b516ace413da9157c9ded888ebbd8e3f3fcfe574844ebfdfb545817aa4e8e7c257d9208226b528df568bf04c59f91a49a88804f8cc49f4c9cd57007a2adbdb8c9cbd11d2ac0b0766a92eaaeafd46587b5952fe234579c6e4148c6bfa24672a103af40d010ddcce6709fcd82ac730910d39c112f09681f35d628a72250a357af5329c8b344df4859649b8ec6aeaf07eb9b74a658eeb44978006609c089962adefd81c257121ba7830b1cb6c5e3dc5ecb29a4090a8348ee6509a4930b3cc01588444d0ba5e0f286b871c70b54718b17cd42f4f6acd740a3ec23773b424d08e39f4a490f43ff13444f4edc4d981a47c55d31aba1ce24bef477a3be71cceec4f2ab66ffe26bfca061367c6fe3be0421243a6fffddfc936c74bd2f4752d1547e0b38ffdca04042f06f560e1180f061ceb813bceb14ddd8a3dbb6727af0d3f90915a220b8328241632459097d68cbf42c56c89202f85e5bdc0f122b0e84efb036ec6dfddd880dd00bb4230ff46749d2f9141926a2e31106ce7c872315f22a0ff0743c93a486e4788e06038ba6ea4067b197c4b3970003ded19ba2b9fc2cc1c4c982e2e544b27415173a7a6814e1bc85d4a35e574250e6880369ac8c5ec7d4f50bc4b6ae3b3104d05cbf8d55393c3ba2ca64b0b42fb338a14d42e695166031ead534b89d16719b3cea2cfda6f83e155983fd7104781e114343940f9bc2b9f294d051a01416a212dea38f05241eff77ca9b6537a0b4442fdedc408fa8a3001e9839a676cafe23397cfc7335ff283ad9fcf0bd64932fafede0ed370f9584191346316909301b503d41cb52df0b49b7cfc7352859395a99bae48275b4f8fb537c6c00fd351fc2ef2b40ac0573809c712ec31e032b95c01f1792f31f958bb0feafa8e3cac4dd5c9e716c3d30a71ff415768a53d099e87ad4bc8e8d0fcf50ec07051fb3b2d588299ded937254d714585431b50e751bf7f9905a73b92612ecb5c45cb6527c4bab07c71ad31ae045bafab5e8d352b30ae32b094cc6516550e70a2b0632533facf49a1a87b512b40b0f247313106b135a89ebda544508a94fc8bd12546bd8cc830837a5f631785323e61fa2e894bd79cf075685d02062a1c5117087f45b1f105cc67922dfbf3120a661584649533fdd2d534c4545817c3228644b28befdb7853ec1b5c99b321777579974cff8b9e54b44755990ed2957cbd78fad556cdf0ec2c0730d5086f938ef3a384dcd991639b7657a95094300eb5be83be504719f48ac476677b518c4af391009dd032e5ce0a1a1786881ef492a258650832020401ea218fa4f95a82cf83924cea01ff843bdef34f98bfef47cd646742da4a046daa4b00ff148958686348c1d89af62aa03bce70c347f6dfa9daa779a2872abbc492842e96982ec3e6a0daa4621d26cda91672324c44802f092eb324165280dae3a1b5a16ace28fc2bdbfe69b587d9c0d53f221645345c9ed95226894ecd8ec898b64222de2e6b6cac0285be08639ae8763dbde14a910911434af20e993c074a25da03d9105262784fd2ef2e755522eecb48c43b4aec5c4a0288c590b3c4a21be1737a5ad8dcb6ce0b026afd3cdcf92ee1b047ce5276135e30dd0469530814cf4c49897482e16e2b865428e97f612035899d9a3d806583f159407fc58cc769d82e5f557a95184e6eea40b1c4cc62a09e2c124df844794a29f046c8a05944f3ee9a2b932f014a0f46dc7322b130b618602e515c396dc07e2950f4de1864164904955b41f3c0c4344a22693f8de0ac6adc95e212b5933089c84fb844e68eb5d72072a8dea978d2bd6c722cade7161d15e3fe8695a93cb063afadfc3c62efa5d768853e8c9047cb455022069bbc48de499c8055e0f66a18040b7183ced8af3dd938f9badcea3073c0a4a692117b643eec8e055e81c55f12551030ce6a3a3fd47050d2c6d38107fecdbdc3a75df81f0a9af4d781854aba5547a35103c09926415b4ab22de5ae95742369774b5b13431e01b12d13c9fd2e8fac65a6763c7e6fc2f55ee24b47ed293ef7ed5bdb3b050515921051118f47baa3f3623eb1d6f142e78b8ff777a55a67cb78bfa55345e789d3dbb6dd27690dd3172da969c6b55cb77638aa665e53d638cb594c3cafe9e28d332eafc67d7833197816783640ac24be02820542cd676333c35a799e6bf081feb99702ab89e7354b66dc078de79ee7cdcc18f9723ef73c6ff529795e8df779de8a06cbe77d5ecd974194e7d17835fe793dc0d54743a383433dd6f7b1b6782ccffb70bc137cde8dcf78ac6ff59e67e5f3bccf06cad7c1e77d34accf0bc1e779349f9782a781e7ad3e1b08ccac6a2cf03250f26160b3f290f03e8fe6f3bc6f157a1f0be7c6c8c7c323b2f166445831f16abe8ff5b3c2f93e6fc70712af05d64f4ed20aea73967f05f0582b1dd6cce7ad7c26088f039e7fde6ab5dc738fe57d1e093c9baf04cffb66be6fe579415e8f67e3f1f8beefa331f28a7c1febf37c5660ce063624783d7e70f00de1cd7c1b782c6fe5f907f5511c8fe59fa5713af37458b9100878eec48b6fe5d178ac1aaf87b7e2f27d36b09527b4f2be8f453f1e373ade8ce7d5f8b07e42f0f13e1c1a9bcff33cafc6f360def7696045e3dd7cac159537e43b3d6fc5e3dde47c9e4d8df739f940f840f83e58f9ca3d1f1ecdf7793d58ad15ebdbe0fb3e8fc663d5bc1b11bc9c99cf9bf12c48e2799ee775f02dd5e4dc7c1ecd8647cde7a5e0d978deeaf3589e92f7c1a7e4f3e0cbf96abe196fe57ddee7c1bc0fbe9caf86e5ad66569e97c45b793ade8dc7c3f3bc251e081fceaac6b361799fe7c13c8f26830f88c7fabc1996a7e4cbf1561f08de6af57dac8fe529f93e8f830fc7bbf16c589fe77930ef830fa766e5b16c589ee725a15e90126c865626cc782befc66305f16e6e6039533a00c123c2f2be8fe6ab79af85814f8e92e77ddee7791e8fd7b8e92bc0e0c1a102099caae0c50d312a08614304522a07100065036ab84c48065cca0b055eaa8ee1515ec0201125043fa5e0c50a1c50d58813262f20c4a1543df051e225488f926a81134936d043842442703c241bf0ec40322b9304127a8783333a40e09931f1bdd430007e519f124ce8f27a238419e51102d5e08777b9a08acf667a70ca9ac17de554a05a22c8eaae638893c3a3876781f7c4fb19a4b87b54ee5d39a972ba1d16cf5b8103dcbdca5b429ed019bcd856f338239582d08c37c490e38ec08c2a33be480114303a267815bca1411850f4166219990543456356f80003242240e145ed0b090bb8722b1004191f63a0704d1c1a505efcc0a0a70632181040ce624c0d53b88059bccfe9032190b03240100c1b8099c0970c70c132c366cc1827100191cd468f0f0c84ec58a541c40d18049117e469f581262a9023034b0c31822b3482510b2b21784e029e3236858627de888108004c801de09e3b0284e784e001eefdf0b883a0a5ca573d9cb83b51f71a31ee1ee61b459a233a3345ee6e7a2b042e74066ffebdabd993b70ee14b085fdc292cff6d63237488ea6d6d76a4e1eb3de1eed85b2098e1364beb29cfdbc3c7aa835516552c1daa584c342550afe60431b40c503533c3a386840cc4c0f2f2c3d28087124ed004a08ac57493018e074166a8ba5181b5c20a849c4dc3c326c946880667b52408ce8c04aa6ca8a8024225869b242b244cc0238daa993255333c38b040158f3d248baa9b20167c354b667a6680b001839c0d8690819543950f213e84ac785c3d36415c65649831c146a86668e5633564a344d5aae9a7870687e689aa550e0d07ac57cecfca63f160bd7092664ca8518186031a1c1a2b3c80f0007213e426831a155676c840c3c1ea8b0c3361aa46b0c22a6938c2072c180f1f342e96b7e25919b1a0786840f3d548a9e9a0c66906030b3c9a9cd50c8bc76ab5fa56352b9bd50d8e07ac550d949a1f3ea0705c353435deaa866584d5c3f26c36587510c34d08334e5459f0440c2e1a5607364732b0d290614848bdcc1d1ec000190a880123cc6993ea610b5251132649f07d83043d455a3e6c6aa6acf0e454468c32c80881071f2e433829228d0b8cf185160d48c0123d2009429c9b079d4f500238cca840195d4801c30b2c505961eac9e984a41040d020033057c0900004bce042f88107f88660411a685c29abcadc9102145099a24106ae0b7ce18503b468c0140ae061871b42165490f2e4b43486185e4c000b2e4a1ac8c0185f74a14516065002071a6628011c17a880165980598096aa1f97905c3de068838d34c0e8e20a2ba268a2071d7e0c59a03245ca12d212d88eabc71862808184113d68f991051496929096781b6c8c214604bcb8c28a2814d0840f3b68a982c2129325b024467e767eb8585ea60d36d08800185e747185150a68c2871e76d0410b07c22a549af48075d0c1f4c487056778b025071ca6a4f80c40071c763a286ecaaceef8ec98a1e35b818d13ac037c4d7c4b7840dfcfcac70373785626ac6806217c1e781dd470b0dac00bc2d2c0cbe0e6821f3f562d960f1e16b47a70c063831c0f705c37353634351f0dcdccccca731a2d382348a0aae6a6e68665810d929a9bd5073c6431380db1d1a1c1b11162f1f0e1b178d084acdcaac7cac5223343115534167c3537ac578f0f841050851385c7c87a5910c367a48319282c8f66e801554c988e5c337c2c1c16d34e0aaa3a2043096262f06660ae109cc4f0b13858f1989e10a219132a504543130294160c7c14d1b03a21861aaa36b891c203080f20589230306d340e0aaa86f6cd09331f5a3537ab1e3338d60d39c460c3b280e603cf47500c1e18c04d0916d06233c710167c39334e5c354a584f7eca17192e98d1c149c22181860aab841a253254e1e504190ec043062d1f2d3d44a60070519573d68c98a107900f2f8a07ab25351b208901a746090d8e8d05ab244b27ac3ec0494241154ecd520f291ba50e9092706e9478488d43555469c07a21c1a1815dd08306270769080b06a8c2b15102818643d5ccaa35e3d1a4408333b384a604a2a29cf3a6888d120d4ecd063d8e6e446039dd6800c4031238c8a0041b139e449110038e0d5813d42328c912540d0f560d0733b015921b1eac22d449cd4d0d0eab67e6094bc88937d3c14ac84c063e3736e24a09d29220363819dc2489c1c829c7888d0e4b0496083407a82ac102d6d18c093320dc88c0f2c0c7cfea0319aac01e6f555bf1b056332d0c61e1872cf04d0002c46cf125010750220030b8a0d3b8da8b3316a8ac3015058a1414130a4a30a1201d12462042595e7451e60d1178a009d1084338d8a0a6cb04c258c200292035817d80018f1c1a2ec0580105134b2841002f5d9080a253a40734a2089954993ae4a0adf00609b408b1cb071e6c8d68094e0d133d6c49728301324d30015382440320393836351e74d08029a8fcf8e484c2521212ccc8cfce0f97053d5849ec70430d3fd43882450210700018a44449822408101605c4c411858809929b1a1a0500c08a050c40881f706841004924c1c39313d192223c3c3a562bf8e660c14149f0b9c1f2c0b7811935bc0c7c17f82c7053011e657c14f8c6f8c07c0a5825e0c3b1ac78227821d87c30e3418f0d3e0d3e0c3c20ae0b667e7c2eafc5b2e0cb99c1f96e6c6cbe9a9a998fe5ad3c8fc6719050792bc817d766b3e7472cda7bfec54e337ab1dde6d3940f065d2dad67c5a776ad53753281070c16ee5e848a4f112a344a44abf84ca99db42cdc7d7b2b831018d13b5713f4019b7238675c8403a3bbd2f171a2bdd564624244eb267ffc1bcfdecaa05531590b7776ec29cf0b8474f720ffa9320567e785a3a3e3027501e785d302a530faa22d504a69972dee14d6c2e06220a585c1094e613814b66b4f528a33db379bcd94eeda9333fd3771d7928910264cdcdda707039b161037dca9b6fba451aaeddfb6bb833c40a6803c717710cc38672413aecd92f5b158bd4fbcab4cee5ec27d32c43b34cfd0dd77bec4527fb193bb93a0f304ac75010ead0bb45ca065270c14dcbf379cdcbf35cc38c343e3233f245fd0922222e03327d8803b1721aadc479113f700d0c5bd48cb7de586bb8fee8ee3bea3b3db66f9a1b660eda40969e2003e94883ae143899ed019c70ac58ece975887da823bfabcb604471ddaa9c3f171e2df44100152b2a513968ec21396a4c2a958136542447774ee9360eda42560e707091d1aa23f3a7488ea9334c1919a40f7ad25383ae981f363a13b1afdd152d991e24db759feded5c371c1dd61f056cbc675064f6db3ad6767fb5df3d63de28f409bacf594e76562f90881105a5c2bd2c861e06875822436d0800f51acd1411343ab0c9411a4011247e44045ccc86a8b1d6c47f4b070548687d7aa005a0861c3c9ea8933784458e15075f484052c4b8c4183072bf3091d8a01da12397680aca078a065041d5e8ee081048fd512235a4ab4f1811e7e763cd68a8a09e18d9601a46879d5b1a2c1c1891dde98c2c9111cc8f185e00a2a26a62594386a89e0134387115930a0c41247436c7c533081010a48515983014298f119c1812e52983144083eb032be18b864b0421c2d08914a238caf160615c38831547405e0e263b2a18e383430c1901807c07c3f0d30430b29d907b210238a2f0314785144260017015079c2aba300463cc0880694e4308212de06d048120016447409d738c2fb228812a2a0b28221ba300008ef893047a4ba082344152639783e00600a007c986244170fd0e2e5a290b1e1657746905c168f8a096e742d44e1028e86ed150da024001145bc914381f48a64c023c1c881a9030846af870f4df665800e3c30e56e022eaa7ca082a22b1f14217332337c61b303074ebc06981c8b2f68bc500e61a060e2c807300271003f6570414711771b04f05484040d8a188264c4cb0a9021a484131fa8babd5c8a871780c4680104544c77a12d8610a305306a3060803b08313451a29b03881b2dee2b287421e8e2099af328e3861b6514a0e5ac32621ce1a48e320ca040033498e2c006ff92a30c11398009e30643d9108608cabc00a60d9bd10717072160a38c68470d529860068f80056694593a8008221061b50f63ca2803b241a5c54509262ca109a30c10229a70c00b312dc0f0041777a860025f4680f23f3f5380b9238d00d8c03187163e30c045710717551c993ff0c08411633c7147137334118e61c376010825eee0c100696021002adaa0828723eed869a8202782b9f3021640dcb182104e34e4d083152440c9e18e266080293333e2001f292d778c50032d9c21b3a08043963b70a490020a260e50a68a8a6dc71b7170608e1fd08040094820eda800019ca84df508338082d10e2b34f00429820bb40268a16247971f700210e38a11c0104266070d5d9c715484833113b082c90e9c963065a44186174d961cd901b500211b1d251c59c00e43761c79c3080e170d4a78a20d237674d00500634c60082d8690e365875f60d320811164807d21421d1dd8220408b0901297270fea90c0035a402069a2234a122075284086a11fbb2a53882185471d4220208ca51e9ce8a4ca32ac3a5a00c11143724448808304eaa8a32271e5091ea030001852c85147d29814a8e8fc4029620611d4f1d2414c08506c81a18e2136ea706511a5873762380bf030830e1494c929a08e2cc098f154061d663861dea1c1174578110177f73bc2a0c3011de0608c15130e90d270bf0172d381bbff99f76bb2d6dc5d049b10dc67c8dcc8414408f540e0aee48712d19c4fb1040ad35589122125499420517244891125414a8094fc28f151a244484828891012a123424684828480847e847c849424114a92240992244792184912940428c94f129f244a900821498204099223488c2009420284e407890f122547848e243982e4c89123468e041d013af273c4e7881223424692184162e488112346828c0019f931e26344499050509220244147828c0405050105fd04f904290112024a028404e8089011a0202020a01f201f20253f423f497e90fc1cf931f213f403f4f3f3e3f3a3c447c827890f129f233e467c827c807c7e7c7c7c743ed6e6a633589eb76797b519236a9870f7216fd900a1e33d718df2d059152441f405346546df766ba67fdbf45afa96a45b9f2585ed6b439d1d6b0ef3d6618973ad3caf76f7296fd530719a2577c6b45af372772a2d1a3141dc753cdc7d6adc7dcbe19ea9b8e17ea9d098e1ee435a3462dc87d0dcdd0377efc0dd3970f72577968bb5dac05d03770cdc2ff08e9879a24503c4dd83788b260885e9f19e98466b98ee1353bc432643d4dd7fb8b78c803f3cfaf7ee88bb93de9af1d965ed33165b331cb86b5bae74b464a5ba96246b2985b12c7fad761679a6a7c5dd57f0166bcb8f58fcd1073c02faf0081dd1b49eda8f8fcf919f4aa301018d497e6a4878b005f5ef9a457097b7b6b6cf3dd896ab6813c482f26d4953d3d9aeb30cbcb56272f71fde6289e04e562a5c7e9ee9b2125b2b9abb6be0ad151bee0ec45b2b31ce44f7e3b5f9f5b53a83232e7bc4a7daa1daead0fb241da24fa32379e29ba63b5f06ee5e7aeb8bc2dd5dde5a0971cf61b2969f0ad3ddb66235aba833283e5598abd90267390ff78c734612aa6d2825ce49ee0ee4ee5d77b7e22daf0ebf3b9d83e37f92958ace72469ad15be9c574b6a363d69cbb270cb6c9537fd48c8e4f9255bc7177eafed1dcdd7acb7bb9bbcee02e6b36eeee798d679cf3d35be9f8379ffbf3be5f3e6996966e5bf3a6b66bf7ae65c6355add9dc6dd67dc9d95837b3bb83bce5b7ee43a833dbb5c33cd923d62d63d344b6e7de61e9a257baecde53af23c699a95c708929b986be5799256b7eed1d7d69b4862bc7bcc73eb33833d64bd78ecd1f935941e77efe12d1fc1dd31f096b7dcc7276dad51fd7b47a3b486e9c57657119364a5bb8ccb2fdd7dc572f70fe78628727c8baaaa98af7074d892f302144ea1f0a8dfe4b43cba840db4c8a9dbe42ce1d467729c95e3d4593936ab2459a0907d9435248b4743c879c1a947bd260700364071a5498c480482b428161437435933437ce53dc9f12bab19d2468b0c317cf4637952e4380c38cf4acd9555ec86c562b1e85773c362ada88d0df5565656de6ac5fab258f1e80df59666282b0bd5b757d486daac6cb2d4d80cd93435389a21b819d6102b30d87c144733e4ca6ae6f3a88d37f325c767acd8d01b4a73727c6648de2c4a3384eaa3349425e44a90188e867a0c67c8952cb11bfa09b91224463364af94bc98cd2a7633c40a8b7a43ae6489d50c1962c573ea23f6d10748800b58065a44c41843440c2d44c61823c6d722a2811691315a569cc48894d1d232468c166e457d8b1891315a2b4a848cd6473d9a440ecd6a87b2680d25424608a5a144d0681111d32242a645adf838d902470cce10a81b6ac5b7c0a1565ccc8c022d2d36d02200d0a29b21e66a6786c819ad1b6ac5e188cd60a065019c0a58a045436f2c60238488192d221568d550221568d1d01a21442ed02272812ddeb76a2287489830887c41e4019f8b30339443bd288787908ff2e0b162d1aad5cd8d1616e571539483c3836648074935940657336489dd4769862cb15bd12601b56702a20b10a6111042eeee4f85cb1fd6d875b4e3ade2c6a4a6ba08c5559bfbfb03133fe87e5002abcc7e0812fac0c64dd79b0f5dc09aab49b39a42354529d11bf301caf5b6176f1f680f26284ffd1906139f0a061bb70e4d1deadde3f2bfad423df5b0a507a61e806c9963dbf2e9d3b66c71eb161b7c8b92fb96d6c9831bee3c6c71eb93a2b699f6230f5c78a0b983630debfef519ee20469f3a749631d46ccc55d476a44d3b74a155ee0e36ed30b3557387241de470777024f1a566fdacc30f3a4cf17ca63bda78ebaeb48adb8eb756f3d4a1055e5b6d399891c3971c56c8c16797f7e773dfa7225f9754247e8dfffbd65c4f1bddf8be6ee229754f6d7198e3ac9d7f5ea1dae210c6dd56a17038dd2d557e4dfffe8eb4b9666c96bfdf56cdc762a56030d9dbaa8943cbf353e1300020c8549f4d5b85226a1a72d2f9c4b49bcdfe0d776325227a2c568abe1e8b9532eb2b0d010541c1fb2ed197d069d6bf6d5dc9c762a5949cc25d253f536133d4d5b2ee7a34d6571aeb13c986f64bfd6759d10d5d6e70e10626f72a54e80bdf107403cbab50a1a3a553a8804d465363aeb99bc347fb7e899b6030590e5f1b30f0da76adedda9bae4a24598d6cd03978df062577b72108580319f72fb1aea14b0d37cf7dad86204bf28f1903408c3bbe31009000301dcc9802d1a7c9001084eec8fb4443981776a2818b8374e7332e75e8eed65de4926435a2c149cde51fcb24dea2c11ddcf8daacd6b20526735a4c771add452ed5b2c4d65d6fbadef4db8c4322a23d0317f719cc199cfc8423896ffaf36d8733fcb09b3ccb50d7f06f44435f5a19514c579be8ebc77be2224d562a9c4db366acc312e71adeffb9264328434b6392ee13d35be97d1d21d11c269f46f7498b818a6b6330f18e012a8616b833fed6a1708054d75c951870fc5cbdedaaaa2a26af5ac1f0c5dcedd3962b05c78bc5d72d18b6c0e024ffaee4d630d8b8839f6b1be34c7734ba5fc75ee002ee646ca9eaed69da9634e3da0b53eee04e7dc1e705bfb674410d7074c186a7d17ea4afd9ecd4daca5c98a2515d5dd0715c4f1638ca7ab37cf1baed68cf2b96a45946122b8d24be6581b2d91b2d4a161d7770bcaf5b50e3474a56bbefe34d73986ce18b3b384bd2d5a6197d2cffb9d682e96ea3a2b1168e1ce4f96c16117dae85b9bf3098cc3c711833611183a58bbb873fd66c334f2cb470bfc6a2e46095199620bbfe18eee0d83d80aca1f98f0b3f6a5c8ad217ebfa392fe144cc345a6dab6452cd5b87a46d95ac8dff9f6bf7b1e05cdba1e04e651a06930de97091ae3a5c84e4e724c92aeaa8a0b23f86baeab8e84c92aca152d5d1dbb6b9dc1c3771575a8e0977b0b45417d996624d5f4f6549ba5fd3dd3fc9e98e96b0530e6af7b8a89c0f88a9ea707770572b62adabde3d8e6227bdfb273098ac867136a9c2b8efd7545d7cf7b830e62d2aaa1ee07902213bf7a9036e35f613eebb855d822603d2ee972516b50dee0e6a2877d048ebe40c6420dcc19d1d699a15bf25737dfab7cd53235f93a4cd699b93aeccb10392b8927f9fc0dff54a18f088b7aed8d0e37405ea0a35ddf0f156938bae39d9486273ea459cb5a526b5780e2cc6737ff17607cf6bf7c5e2d611e9fe62217730639bab0638d6dced028e76ec212f1497a21b8f15476228fab77d7db8fbe3ea2ec3b83b92b74a9d6b4b62fdb9963a3731d7d7ba7407c9129355ee4f23a7dc4132086e8bf1374e37d692e2a21cccb8867359d9c21ddcbabff85e4bc997a2e08ea42f927c7d6b8e7ececa162b4e7664ee2fd5565bbb01f7f17724ad611263cb84351dd4d62a599b9b026cd76abb733a509489669c7473011c497c1bc1c12d7eadf6f471755b1ab59101c97a6d896d5c6ca6bb833625b0360738bb7fcbcd5a35291c9c7dc6e2ec5adb6cb62bfdd2d6707030ef36bee56ab39a90fb58fb1cfc725bd90e7de9e830f7f7ab7077306a244712bf99df7631495fe3d719ddd035376e317201c72864dd78d3bfc9c620f45651632c8abae674dd2213ee600e5ff1fc8c8d88767c7c902439527bd23b5728fa8a7dc6463a43e247d9f7cb6bb32dd1c61d1c6dddb996eec88ba94eac450bf3bb8eb4aa71ac34a5dc5f5a2b1686e7e25babae2d4b4bb5ad38572877f09e36dbb562b571b75d5bc37426061c9f245fbc558ccd4cf76d73254fad6f627daa6dd79a6625716ee35990500d1c7916d11a0ea988855cc0f13ec5b44ab58db5c299575ac3611016c2b0708413798b852755e6f84b77b53c81fe6ddb6cb9abe6136f55e91255e506e64a9e628d55b1719e1e9cae9b7ec625cd98fec5146f2a6a682a4c78a562666ada9bce7f734f5482b83b4856d126796b4a18bf27c65074fcbfc9a6709942abfa9ea3c5542bcc6149d3ac75052e67ce58a4ba48254d5cc6b9154c7717755fdabcc28eb45dab821a0ebe2e32feb5755f2cd28daf0a5f1cdc6d15741a3ba92074632ab4c0adadeecbdf964ea560aacb93556fdddf2fb1a833a5a34fd62927b6bfc9a6884811e3a0686d98ac54574a56fa46de9252e5ee3edea75fab346329534b526c7ef764d5350a13be7f8b366f4deb675cfbac43b5a5b66b71ba9651aa485a256da5aa94e21b45098bddbfa31b5faaad136f4571d076eda5745f8b4d0a858a16942ad7ba9ad6d43d85f2249be3459c4d7ddef6cde18ba33bfb773a23b6f94daaed93558a7ec646549ff86fddb76e7d976462fe625cc6398a6f4b96837e5d49f21cff264a8d1593b88c73a4ddf7d4ba5a662c7e5498fb3b2ee17495e174dd9fb148241baa3ded7be2b26e4c65cdfbb75cd26e5bb25e5b1b97a2461b4a6d11abd4cda59c805566db8e9fc5f075bd89f934a58278d41c6232dedadea2987077b2eab0eaf32946cd1cac328b128a6a6dbb73355350c3c479a442d727c3fd3a852df74ce17686292891189b2950f714727f5b5062c21f4f129b389b218e3c69246969f9a1ccf2a48564cdfdfdbcabe447bdf8e2ad5463250ac390ac3fdaf0c9aa49acc3709f98243fdc3adb1a4ec4645178ed589ffc8c8d461ceacf581cef87175bdbe7d15673bc5ffed63b4d646e5b84d3b5e93336d217db7dd26cde178be1ef4a65b3f9bfabe69756a6cf5db794d1c5f6c55bc95af4a2cd26597719ef3ade5acac85a667c6fba2ac16032184c866b77e9888bb79c6eeeee94de9893d2689eb31fb1089bb59ca81873326b2ed644c6df34eb8d529e8be97d6ad252d64b8bbcd5546489b79844e0ee60c67496b1d18cda448b24d6bc754ff1a6494c61987460eeef786b3ec5ffdc7e1a7d5d3f8b3f7e0e93a4f8a666a2b67c9ad476edd3688933c5f9b1f0dcd613356e4dbcf52488df5663b3c422e5b9f88effa315ed2945b47bdcee9f10fd49d628bb7f52feb63f6251ef1e473bf5132107abcc9eb4de56cdd0ac7f73c35b286ce12870d1d9da6713855b88cbb5fcfc81b750701292a4c5a1e0c3c12a331dee13ff88c570bf7e5d6f50afebcd44daf7a99e4866e2fde2c557e7baeb51dd75dcf7c446b58fd2fbbe156dad76ea9a915ebcd809069311edd01750d0a993b1953a2e39e132ceedbf64dd6fcb7587195f5b86e1b525ae5d9b3f57f378db36176e6b65bc1687c2be35578a533f62a3a7ed6ad397f57e596ff8e439e212d7eed263b13ad4d5f2a9ac6d6b5c86bb86e3b54fdedced10e949da898f48b28af9c7fb3a639b7fdfa552d4edc7897627695d2d8d2039ef5bd38a9fcfbcf55d1a123f4a10d550294ec1604054f78fb67e9b1d37c639fc5db776ca4f2bcfbb4ff3a66bb8efe76a2ae51febdf889086c48fa245abafcd5bf74442e790f851461f20dd8f7729560528882e113aa7b82fc0dd67de4a4ac147d29e3e3f20100fae5dcbf36415f5f8d766b5aeb75da132495651ffd7884a712ae39c3fd4d592ace7162bd18ece50294e213902a474d3f516de74552ac5a9d186b66b77d5143a43f30906dbfa772561b052d47d89c57de2adb138daf0880af75b690d231d117104e5a0d626de47addc5f9a77cd365b1a91f9d2565214f296110d2da3a19d9b314d876eab65e08d197d4f803b923c6b5abf288ebade46f056d12d0a12e3c2c1d9ceeb28fa1a97e84b67f6f9dcf5daaca695daaea5aa37f19eb5735c3223c96a34a395e27d2dbd67cc88bbc7564e9ec0575bfa79163eaeee19b55d8babbbb12af1969329275f932fe439fe7edb7896676975b5acb74df3effad946b56d725b4da83b588a76d712efb3de4a6fb613e7dbbed9ecf837598bc80d77d0ac79df5a6229222edcefc75a443c10c1d870709f988e19d3d7aea225cd73a4755bfaca4ff597ea9a2bebd3e8ae9614dc55260b604285e21ca6bae62a1deb2de6ee27788bc937f484fb498ad76a2c52bd6da92dbdd85e1b03b7a5e3677cf12da74f5d6f434d96b8e1f96dfef3ca922d3bb3529c1aef4be9f3770debe4cf51fd9fe4ee4fde5a22e4a0be89faa475fcadffbcf2f9a52c99f9e22049567b137735b51d2bd5f6d614602db83b146fc1485022c61dbcd83c2b1d6f7d1ade2ffacabf2b49f5e351c916771f6b941228778fe22da1152c212445488abb4b794b88d54af2c5c1f1c7fffb348ab18dee485aa5e16c527d9f2affbef5e6a7c2364ad6f3d6260a8e6f9a3893a4cded489ad51a9bf46f5b9f38bfedd6918eb7d29d7de22fedc69b666cb339d6cfb65be9b5b4eadc936a4bb5bdc9c0f1f3537d6d2dff26a3e0b823f5892dd5679366aaed6dd31d8dbead66bcb5584913fff854f6c5cfafe92eff79858ebfc5377548ad24b3939638d7ea6eac9fc94ab7a52f5eab6fb2243ae0c554e3444c6eaaeb4dc95b48c880e3efcf66fe7ded93b419fddb4ec15b48743b63d33c351e9150f0e29bb639baabb74d7748b2ead09bc3f96fae88b78ec0e160fe4dda9a266dadb58e70d9dd4a33aed9ece7a7c29a3e49abb666f7fdd1d2bbef8945b182e0ad23a63b78adcde6277767e2ad233a75d7119bfa73c622fddba6bf71f999c6c4f89719ef6ab7bdf626da5b8e8eb69ae795cf4dde32a2fbafd15c49f1478a77c637f1569ecf2d23146f8d798c8cd4b482bee05dc3745773145f5aa9b63b92acf9efdbe8eb495abd89f5f33da9aeb920d3835a4eef3f8d3e55bd51bd6b76dcdd5a528c81c2584a5f2da02e0efe7ed22cb1489b8ca6285945cc04a403d201779a8ca6680d533a7eb6fac7f1b5fe6cea7e5bd006be0e1d5f9ff43e95b5d1bfc9801cdcf7357d1ac59bea9bacf54386fefe2c5e2cf25c7c977e8e703abe783f7fed33fd9bacf533d5fab171a789d5d4b486294eac24ad549bf5961f0a1cef6b7a2dddb15dbb4f9ce96ddb1cc523c51b7cd0ead03fb5b65f624d75cdd1cf3cdef211736d997fff489f4637be22a5ba08fd9bccc786f1777eaa6be9d3c61f2d15ce269dcdfe2673f7286ff9f038881371fef2d4178b9424edd6f473a59fffb6e9ebb38a4fa3e3bf289e9a6e7c6f15697e13539c88c97bd2bfc9425d2d898668bb5a1211ede81011916435fad2ee7b46c16044365bbbd888ea4a654343a538f5b6aaf597433a5cb46d6999a484fb66b39f238a9118d3c61f4b710a28e82c7fdbf0bfb6f58f444c769367d3f8518674b86897ab156dedefd340355a20132dd00ce22d100aac32fb7d8aaf7b90e6afe95bf30e9f2abffedce281c3ef9bfad42d9e2dc02a33bd5feb1aee8ccd277f87e36f5b5ab2c563c326cf5afb91e736ded73c42a5a86bbde600abcc7635bbf10d2fb63a3f55f8349b454d3bf53dc31733aec160fb49b242cda6d07f29f756af2c79eb1793bbb6f4c5c12b83d6cbc67d466dd7fe2d4777289321aac364885e6b7b9a0969b83b8db74cc040cb040ab44c70c2dd73bc654297d98cd630dda13a94fe68f5b5545bba2d5db2cb35d79b48aba57b47ab0282e01467ad3ef776e4d85183d6c249c58dec409955d45ffb51a455254249901c311204f4e3b323424b47053a6cb8fbb55bff48e9c697ea2e9e39dcddc5a373f1b04061502968b36659c64975d7a3b16252ebdd3ff9ad2b135ed2381133e9dd3fd9ba77bae9aa24bbe9aaa49b3029759f499364ec49d366d9f8bbda3edb6c3132ea3fe9e8e222a31ac6194a9c9282f2649647e36f5d99969eca6eb1de25bd7565d2f506b5b565dafd1328a33dca0fa53f36eafa4ae295a62be5d1d695c968bfd4688fde92e67fd2d6bd93ce98a4556d8fa49865563aca6127a0202858368ed8086aacafe449269005c955248cb625264f2852e5ee1b937a7cb740c95d4574740e75cf53f359311dbf76edc664fd6cb33af74e2468b7494041e33d697e53d722102868f7b8d9ce0e7dd1978e4e0d9b408d9075acf456f26ff4e2fb26a6b51c6692a2ab4dd869bc274edad8e92e49e51ff353ed699f4f52b94fc258e6849dc687c542f8b2816bc81c209410d79027dcbd52215c4366aec7245c43a82be112a20638ee6a0097902dee4fe5123282349ca94c9364357209e171510bb8bb8c24ab5118829a242bedd43b931c71e9a2477f1b298d2b849ccf3187c91a140a6372cf05821b2e10c2708120c505c209eeeefac0899fd0f501d535044105eede82bb3c50729d41b396491051b38a61ee2f521417063cdcbdd1fdbbfe559a1ea6687c6c8d4607b45d9c7374dfcfd12fb1140535edd437f1d65c44b3c48f15082c398478a2bb47ab81d2bb5b477c51f0665ee8eeb190c2131b984f73ef2400d70646ae25aff5ac700581c39dc25c419800ffd4f526c260e3dfdcd38fd86916a3e243d89af76dd32564c594569e59a495ea1ff10df1be36f464b042df76aef4335efa113b151dbdcd323ade0a83d1ca331b794a90b6d6b49dedb30cb7aee4d79eaac8e776ae0cc2e80c661c45184bae0c82dc9da4d1ec85c146d2de4498bb57c5dda3e2ee2cf7a698200e3536e0c2e0f6f303fef0884f358022eefe85069f0ad5055c4de7ee4fdc05a4c77de3ebeeade0eea9e0ee4db97b52a2b87b50dc3d590a9e1418d7055b67f0c5fb5a7fe809b92e00a23338dec7899516becddb6d43ee4ec57dc77d8abb7ed8e03a83a117e59e8223c0e56ac23dca13b336d9dcbdbacbe57e96b4f16dde341bd448000355ee5e8a535b8754f35971ed2eb95a3eae95d09acfea6af913771fc1bdb817daccdb2d71cf89b7e3ee55dce5a3f503faf06c8cb3f7e4c4cac26b72f794506042e1090a4b4a2724118d6f9a366f0d2b75cd0df57092f4b223f79860d9e97254e7d6f98a896f89b3626fb61554989212058a4c2a2a0528a726a62728289d908474645414eb01795e654c286147a708092388406488903265ca9471afe6e2411d6f739745578e1cee3e2bc39543c5cc95c34395bb6b7c5fae1c3c9bcd6cd79563e4ca31e2fea230261ed203aa08c3a38027c68533e4c2298263e34e8728059f1c6d7831cdb8e6bab962df9712a7dc3d0477ddc41035d622d7cd1077d7d526f0c92a56994999a538b5349b42b359d43fd66a98e7c79a37dd551cf6f1f1f9d1c1f86644c17de2fdb6aa7724799656eb26a329bb455bd6bb1bb1be310aea6d6beecc9bb6db96a75abae42fa619d78e041d39822487b38deaf29442c1d98cce704fb35b2d59cf2b4b14c4afa7fd48f393642df5eb77e5196fcde18b8b82a16048b227270a8a74a62b2677d5ac9f9b99b5a9fe6c469231ea5ab9bb9364a5bf4489bb3e30ee6ed34fae6f0b09c6bfe9d3a45b9f569654d1f5b55c9e19ee603e6b74c992f15d5e025c1e97252479d6a8b6d4bbb93c1fedf2dcf585e27231e393e3adb77bd23059e4f22dbe8496f5f3d6d4e553eebe1257b0f2326470edda325d9ce75ad8094e4b4e513129654c777f5b356b6564ee5aaa8c92b75553c81d6a34194dd11d4defc78956764714eea5a82341e786a5a81b9742b3cc4ae7d31d44ee3a68bcb86869bb77c5bd277574e16ec711ee66fd1b85d9e1811d357504c1615cf458ac94ad42edd7a1944c7c2a182c1cdff5ee71eede91111d117077f2e9cfd7aa76f78abc98103a865c6730bf49fe2d74d2c4c1f1af50706b2bf9f50ad0707a9242c1572b885a41518f0aa070f7883c26cee2c15944b83b0bde9a6301423e3fa00ff8c373b1b612fe985fbf26eb9f9924cf39384841993398b42d7b7a446d4b184c93e4e9b1bc9c0a2081c5fd8a192850230547ee0e8a4e1e75f74270f74070f73ef8115f9bb707fe800e38d82008cbd3c0fb310615364e10c46fdbe6daa34dccfa6247078769969d90beb40e0959e84b67f7b870470bc35daed7d240adf567dbb5344c98d07c8a2c276686c8e1e3be421c6ab8fb8c4f1c42b86f7cf77d29fd4731a9247056771de9bf140567335ac3bac45b9f228d516d8ffeb66dce4f858da63e271b6fad61f19ef8de1c5413149a71ed6b96fcd1dc62adddd750eeae2088c56bb5cdb98a543f55d574bc75ac98ac4fc354d75ccde4e79b36eba5558a3559a9b6a00d7cfa7ac9285929b85fdbdbb654dffa2559f3977f33df9655663f62a3dc3bcda6ccbec452b3aa2da63746c126b466a550f06ddee3cd6feed747e36f6d757ea8d9147c69cdd2faa4c8b3232f1629aea2bd675962916a2c6afde22649d256413a7a3282c14292fc1c0c66ea2955a6d84c92f6c5836d27d5186baa6314145f555791de250aea7d6d15dfd49f737f35140a62b26af31f47bead5599e13056329a4da1af2dbd6ddb7d1eda7ddbdd91e2ad7acc3877cd17c54a6f1def8bfa47ca44b18c1b3b196d8d95621b3bd96c11c6b25dd1f850fa898d2dee2ea4c9efbaa59af6ad544f7ed42c0cf7db661e14ee5e06adf16dafe93d8308d9e5a77a7aede7f1ff7a600d2abc071cd668c1dda570ed2e1dfd275d0103050e94e1ee4264516fab50484f50e08e9ce987c1f489777a47feaea5ddd6f6a41eefd39dddadb46675a894ae3681e3efee533d7dddcf55bc4f14bc553c77f4dbead6609559ede93e93ae36cda6d01d6d47ee9bcd929a6a4bd632dbbf6dfad2e68f14631b8dd1d2bead9a58acaea5ed733b3c175f1dba7525a1a0c8a4dca5110a3bbdad4261a7a88d9dd0b8b983466838b117178586bbbb9d8df5955e6a764618bbb1fef9e715f2a63f279d4184bb8fb03380b8834e660061465518a43b859981731f9728a8e9c52335cbaa7b9a711205bfb49ba4597d5fd34a5f9ffafe7d8d6f22cd275681825a9fbbe66a9a958e389bb48412b4cd55fa4514d418dfe8ecb1d8bf7fdb4b14fc7147abb453d38bcd93cefecc7b46f1b6f46f522838fe6eac1fa5c7bfd8da8c465ba1cadf4bf7b47d51692b69248564f4636d1a8da2488ca1c61f97c6871a3f8fff240d8a4ce9eebeef539161e2eee5799fce32fe9bec021fa8b7fc26ddb7da5ca968a5da8824ab11a8b5ae363551908e4be0f81b43510a8e4fc75c739f292569266dd76a5df7db6a188ff4733bf7d4a1a03e6fcd6fe26cbef68fb45393e48bda96b486a9fe9d586d4feee81aae385db70ecde3676bea9062d88924ed965a62d251bb5b9b3096e1232cb35d7bdbd869bc503676fad2ca6c176729fb7c1a492c75a18cf9a99eb09394d1e228d8eec85c917e57268c651b5359a88d9da0b87f642071a730bce9920d06d35bf797025c82a87ac4ac3d0cdcddc60322e5f4b424337282824238156bc297024e5cbf0567e1c5b3d7a08c72408c12429c0def02ef87bbc7b47e72bf2697c450f06d77e36b5baaf9ac2018238cbb78cfbce96b6b4b8ea1e47519e366b631296534ab619ab1b65476a495fe489e2259452d811b56534c4086b7956ba9ded51c5975597335930405fd58bdd3b8762dcf0496b4c2f0228c29408a37adfd4873ef440bb0450070c060a2cd7fdf666a6df5df3f0127fecd4816befe9acf5af3597924e0c4dd69388203457498782e77f75a4460f070a73b728b9fb38f59b72eed262b89b1d953ea9a136b8d270e34c8eaee55b8bb8d09dc6b687571e4ee25707738de7077121ce1ee23707711b87b08dc1d04eefe017777c3dd3de0ee1db069c3ddd970f735dc9d03eebe017757c3ddd340c3a1dc9fbe58c27bfcb8fbf596177638f9a32dc92aa544620c75b135d26fee7654ec2f7692da18478db91af5507f5eb1f784c17626cefb7e58f3be162643c2b5abb31010e8c3431b79dee6fd830707ffc1b3fd070f15770d7ec088f01fb0eb3f6042fe03e6f21f4a52e03f945cc07f28c9c27f28a9c17f28a9f90f252998e0456812770cdcbf8cd63f286580af312952ddfed346c78b456a6229373860d5fbbe25454a774dbac101f57dd32c2badee3ad28c6bb746c9aa429531e35fa14b729856e98ebe27233d0f38708fc3bd1708ee9e917b344cdcfd0cf70854c1e3ee6490b6d6ecb63a24318803c2384eacbb1e45f984a0bed001474e5fd8e9db58be17dc6dd75e3b2505064e29b67d410131ee1218430c77a698cce96985a5a82725b2ee8b674d509c42a413625252484de151544c0a1494702ad634c309bd95451cee3e81307a7ed728b8c8828bbb87611650ee1a976b7913efd3e83df38977748936d2d526184c4689a8ae965988e0fe17dbb098c361e3dfb4bdb646e90e4b65c2028c27764a716afc5defef2a5e6c4437be3a14e7232cb2c06cd76a6d4b982845c5162c8e6c960603d3054c162c78b83b0f9836dc292c85268af7f83a833c6629ea2e0633030346c7dd41aa6dbaa42ff2efad37f1da26302c77bf620d0d5c01066ffaa2b0178501e1ee5764b9e2e614866f39faba02c9f58f749ff88a0caef0ea2d475f14766d78f36fb38e35dc893a6f59e1fa26fede6d9babd9536211a49db76d6d3d3d306dee7da3bb66932e193fbfcd66f3a6f7cfa7b2fa7eed475aab08e3e33d6b9f095005972ab63b856dddbf287d51d8eea4c1607adfbfd82cb191ed5a0d456770eb9ee7f753d96d4b9b4dddc3607fed4dac65f8bbbecd9b0a379cc27edb1a2abae01b0644418590bb3eafbc266a1aa27b57baa335008d0640a00154f094a22ebfed5a5bddba01413885e1da5d92a23378d3f5764f18ac013e8508dc7d36db6f24c49d8807a9796b0ad9e342b28af83480bb13ee3bb8bb047c90020823e00ff8c343569c2dd5fd0880e13efe3e4b1eb3fe0dc6d3a37b9c05250a58140288e20577a7b011effcb97b9c0e0c1632c0044e77a538aafbbbdded60c0e83d3db4533300c8b954e27deb0d2a0c1720820508b1002d0b204b51c7d3538aba1eb3e74bbba97efc05302de0c8dd297df9f7a3802874067f579ba5aade3be5fece18e1c2a5f27a312162427774a3d5d9b5678245e5a75d7b264a29bd545aa0bad1866f1b3fc489356f7a13772d696c686e689480c164889a10012644f43e19eeda93e148e21915acce6099e1ac236a3cd87aa44c988859d3822060c8dd7fc497c2ee53d83de287f2d4c8d79409130abe6dfc2e3cb87b166f4131bb2ac8968c625031233a4475a38dc15b4f40b1cbfb7e8f59cb9effbc693d9ac4a2b8ef538dfb3e55b8ef53d1ced02c459dd6a11e2d8e028385214ea84ff22c4f312482c1c22f6baee5c7e15cb4db96b41bca51294efdf9670a66294e69fd524fbe949136cbba89e86b28e32492ac46643582c17060b0108ba1fe31bc3f8ef6165ab386a686c16ae46b188c48b424ed8432cd1896c16022505d69cd676ca8e6b312dd7455ba528ed7d69b5179de301c8013707b51a37ae1e2c57430e6c5c74db3cb1c3477d982ee2a49e98d75b9d96c4de35b97200ede335b0184316dd7c60470d3dbd2ec0d67530042eed84900eee0f823167d782b890d24b105bc67a6d8e6b4b5e54a521a266994b466794b220a1c2976fafcb7bb04258970bf0ee363b8c57a1f092ee163b119dff65991b081556663ae39f30cc7fb3ac4234efc9b88cbb5ccbf6b2e34cb7c2b1a5f43b9edd1e6c65b69e1f8fa241aebdf62f80428301859436c0b4722a5b1fe2dacb99a476839027470866fb919ddd15eff353a33ed9ed5307dcdfe6be18cc6b4aea491d69514ff6b2159ff6be17df2bf16beaeb4ff5aa84f2b6335ed26fa4f5afaaf85b9bfba5a1a51151a01b55f1b411dac320bf7eb22e070f7712c22ccbe367caafae4c6a40e8bb8e91a16211416e1c31dd3c270bc98a6ed48e2d186445081ad5022b63811d1c4658e5d2d9fa8de3dce8e5cbedc4a77b418971b18e392fbcb25c8c54543a8e1ae87a062081b6c94217caa4c08322489c59d798a22a64fda5b8ee63069c4449510500e0aa1e360106a5c4ca33bf1da5cee9dbe043123eb79e5c7207c6e0dc2495acd7fc993ea2e011126f70171b04ce046175709b680238d9bae4a44444430d8dbbc4353db7459c37d5f03059d4e24283dca13a0a093e8476c4434247e145cbb4b4661f82316737f433d2e7d7ea5fcb7fd849d48f24caabb8e22b0c03d0c75b5dc55f1c730040a3a91fc9c24c6b45083208c0face0031670438d0f4c39f11da009971b413c70fb9870afbbe29ebc0325f81bb37156848836809865beb17173630d1110600039aca1658d57541c1c78c2cbd7450049b87f481cf1443bb6d1665a4c6748fc28a178a50caf949988bec625dbb5d8b64f1c9a65be8566199ae51902059d365daaa180a7128b1e0a3863272f0d39dcc7fa9f2b0d2adc69d2d07994fba46103eed748ee9f11ee5f11eecef28f087777171a55dccfd8e28c1e1a70a2011132f025032406ba7c42b87f410001c2fdfbc1073239b8c8e0dc45e6dd4566e6ee34b1fa4a35572877e7c21dc92cb3d2925966254c4a9175ac1fe5fe6dd9e57d91c6fa514d1be328a9f08da24650800bcca84093017ca00d3098c0c0d0140c6860b09102c725c5183dd02bb07c378e293ca0610d2a46f04103131142b6f4000a028928a2e03141102182885181268e5e204316002668d0b82c40c5dd25e0aa401811c0c14586072e0a387189c9c16b8a18a38c7f01b8c1fdb3a106291911119111209f9f21a020285b5f6c445f42a71124270d5ac219c6d767889d3e19be18aadc9d4836247e14d704bc78ab0d9db7dab079ab8d29de6a83c85b6d88e0ad3640f0561b38de6223053c685c0ad0c05d0aa871d79738dcf5650d777d8980bbbe60e1ae2f5ddcf58506777dc15f66eefad2e4ae2f4adcf585c75d5f3e70d7171a7725e0042d6ff5e8400320ad2e7c70571732b8ab8bed2d3450e02d34d0f0161a61780b0d2fbc85c614de42030a6fa1b184b7d038c2dd737eb47244e0c05d22e4b88bc81cee220202771149c35d442ee02e2263b88b0818771159c25d44827017112dee22a2dd4564741711990f9b1f3ebc24e12e2f3eb8cbcb0ceef2a2739797ea2e2f327779297297971e7779e1c05d5e78f8d0e007064940b92b0914dc95041377254182bb92e0804683327c78ab0ccf5b63e2f0d6980d786b0c196f8d11c35b63acf0d69825bc3586086f8dc9c15b6376de1a73bd35a6e6ad312a786bcc93b7c624796b4c136f8d31e2ad3125786b0c116f8d01e2ad3135de22e30e6f9151026f91c186b7c820e32d32c4788b0c08fcc0b1e0021b9c1a1c771f373d6c7ea0e2ae1f9adcf503cc5d3f7ce0ae1f56eef2410e77e771817320c50a774971c25d5274709714ed2e2954dc25458abba43c71979498bba480ee9232c45d5282b84bcae7ae282570579435dc15858cbba288e1ae280e70579428dc15c500ee8ae2c55d516e7057949cbba260774519dd1545051f1ccce0f0c8e96141109b1c8f0e30a893f1b9e0eedee42724b92bcc94eb8b39dcdfe67dc51fb10833820412a80f652284eed7b51a9e218104122813223ade9f49e2ac2b5850b06a6019b18a583196112f663b63edba220c34ba98a3842ebab80f57172238065c8811834507b8d06274db56cae79a71ed7573737383736bd16e673be64a318d791740600ad4b6e6a3241470c51404780013ae33788373f178e98d391666d4e0801cdc1da0e4eefae6f07db2451deedb8a96347548ae2dc02ce998ad59b6c586229fea994c2c23493f620b0f3e215a88d1628b535892f690d0c2c83d8bed54eb6a7e4f59e8780f0e58b8804589850ca6e90b576509c06208163614c6003066b83b13ee2e840060a608b9d5750515da96eb1547781659b058fdb90456c8e112f2e3fe9382bbffb87b9fe7ee59e1c40a20ee5598e1aec4b6631524858df775153e54c841615967900a11dc3da21d1df74feb72ee1fd5a9f397b5fb97b56b0a21787062cd32a530364592298248b1e4c97129012290424cc65af74e528071299830cfbbed06ee4a8015526491624a8a97bbfb2480948287bb1c4e6657baf3b67769072e3be8a2e8e20e4314b9286ecee4c5e4c504f6e48a02e68e6b770927957ba7dc3b8de38f632cf74e204eac343b7e595aba9b517d331e5f679328586566b3350aee7465a2e0f83b72df173fef68b6d26bb5a5d2fd3e4bfab499d6330aea9dd76eebec7c717e93beaca897c68b739809635989735294b64c58b6b4b1937e92666d9644fa124b8d6f84343e48415da9ad66140c359218db9ca69fdbdbe65caed7d2bf89543f557e29ea82a2062c509441b05917122073461920f8fc861351e8b78d3f66fc2b7489f95be3ac89652e279c30c1dd89d05d69c6e56b5d5fac40f4b1d873d32abb8f05e7a4f1c7fff15a517fd4d21835fecd67d1f8fbda26526afc5d6dded5fc2828a3d4d69869ebf368fc1c1e2f521373b89a98c0521355b8bb26eb792d7875b509bcd8d27363270a0af98bb73d3155add14a6be46b7c44617f2d494bbc372ea220b6349ffb6dbb5aea13efa4f011a5959a27bd6f62184edb9c4fbca3bb52ac291ef3537d0e2b5170fcf0f329bebe7656bf46fff6f456f185f363b1b4845c426d890a798a425a813ac59464340a4916438a3a81221551991494260ade37f12a024268962c747cca848866c9427774707e2c9f6b14d4983a5170bc2f297bceccdaf4fa9ced72fdd1ead98cea97aed5f08f357f594467b313abe002a575d723fa1aa1e86ba8b43729b336bdad32a194d216280c4629beb3d98c22512234e34d65b7587fa475eb7d625ac37adb7b7a49819727dc1d2c5284c2461bbe74ed33cdb5fe91ed5a5a1def9b3a24246d8be2c5f5ee715dca707707778f3381cec6a53777b2dd3fd93daed6dcdf5978f18ce634dde6e39e8afe486b78f7385d246329f6f66f455be28bc527f38f575361a7dd3f198fc6a5187642620ddbd519a4c160a5a87321a1c4dd88468d8fcbb5fc2892b4b977923aa28b0edc750412473041091d919202719582bb214247b41e42858e4849511b2d423b021f87a9c41abee9aab475b6e78f24d6a1e63faea43811935a8fb7c2340994823773a671edee1d1dfa9aac49149c01059db3998c96f5ce3426f1a558dc395ab3b31ddd8f77e945241bca3869f637fdf9770d6b7beb4dd46fbb4ff5240fd5a2ddda8ad68c0d7d0dd71d990ea5b589ce886443e5df760df3e870110579aea5513054ed5afadae696e83de94b8982b39914ddd9ba1f4b1daa29a89b5e36bce93e4db2ee5c7bb4647474452c5bc2323dfe3b7d599dc437a566506043076e55d33f47ecc4e4f201c98e0f457c00e2438d0fde430a7ae0400f6ae0d0723731dd1941a787ee8c40e96b040a33317d8da0435fb44a0f50f4f00385ede8543a7a7839f87d573e737503135baa70d7becb2101ae2d24349909718fc6c50398a75d203594e161067707c31d2cb001217af738fab35fbf66f96df5d6a619ddd13c2cecf8f0183a08f958c086a11c44e042aea53f399c39380e70b88f17d3a8642a974a8d0a8d0a152a43a84c711b2136204d4e0062d0b2fd525e00ffe1863774063349d6d065c31160beebae5fe3f28c18ae1a7cdb180080709c0b003a3560b87be92e1a9870f7292951a0c8a4be284f0887c0970254115aa8d0b24593f535ad51678c3e8e3467fcc69bd9f7a9468ba318cdf042ee85a919cc19c219a4cc80c20c4b6638e23e92584906387cb64f7316e52e1982f0bf388f565f2cbe926490c960c2b86b8d5e7c5f2fc2b4eec39acf5a8a3a5cbb4ba6bde99acf58ac14a74a71caa8c8fd8b7d4e9ab87f44ee1f936f6889fb0753e2fe09ad5e0042040bb8bbf7020d9e05de38e10255e4baa0c3dd3f9a65824bb0c60f2070f7950a669413150052260e776731318026a02eb8224c1277f79af830e569cb0d488071770f871084b09367869c70f78f09a9491b3814c12105eebec28007312418820b1832dc9da68820e6f0ca80dde1c4dd3f364ed043963a86582389bbb38c98d5107193809534dcdd2b8306d405151dae9cc113824de06c86f73e31deb4ca4c4bcda6503b03a576bdcdc007416bc648b21a3181b3ffa4d969eef36be0d7b4aea43563bb4c6edd69cdd88b170467354cff045cbb4b4639d29eb4bfcd68dde2932436a260fe5db3d9aa0cbcd59619d7f29b7a36d3373f553e357d5c2d6fae4f626ce230d5eb76fedbb6b9dff8d257bd5014b46152bfe83e49f2acfdf819ef6ea5176b2be0e7d98cd6bdf474bc76df3aa346d49ab19acf1858f359b1d62ffea54b50310ad25de99bbb7dbe7624056b3e6320b566a5b3a29906afdd74972b15984441fdb656cd18b5664cca1368cd4ac15dab610a5ab3826feeb609b99a6b18142d493b4d3a3e1253cc488aea280adaaecd5bd399506d9f333adaa78dff60b554747cbd7bdcebfcf74fa0e3bf7661c5b25995390c71ff84a4e013531a71779137dfdd5a5fbb8ccb2f31b9a3ed5d6da214f78fba7f2110200937457e20b0be0f582998cde8786dedf3c0fdebc0fde3c0fddbe0d3e0c3c0fd03e2fe5de0fefd70ff5cee5fcbfdf3f1f570ff78b87f39ee1f8efb77e3fed9b87f35ee1f8dfb37e3feb1dcbf95fbf7b97f9efbe7ee5e1977ef0e77cf0e77af0e778f0e776f05ee9e0adcbd3952e03e135170b375f84d57772a97cdc18aee60cd68ac5f771d475ba1fe932eb6a0c6dbb49634cbcfcd66a3599e345a5f588a825893fadc153bd1d96c4697ec984097d012a809f4bf16ea5026d4b43bdc4f2ed931edd611c92a864c28dd4ff6f450fbdafaccb3d98c8234fada31edd6a158ace6d991b992b6bc67e6d9b78a33b296595f6c4dbb69252b9dcd66345b8df110d3ee6ca986d0dfa759627146d6a7597efb5fa331fa54fee51aeb8063ca0881918a139c9e98e8d3938cc288d02aa30fe30b3a7722b4ca94f1c988e44d78a80a32a826240a23a24b51c7c333bac4224420442d0e8220d544e8db6a4f29ea60b08bcb128b6148db00ed023430f445b3a1da28b428523a83958a0a047d138b9a5efb54544a7caa10573f87ab2fa5f200058d5ec69fcaeeeaa46319ec71cd841cb6a9cee0c562e80a5f08c7f15a5c7e7de6374f291d933a2f1458183a3d2d4945214509914e88453dc9a4c210c60219701c4932c6c21716b4b83bf8e2ad3cf95d2c38b170e4ee3a167a58f8e14e892885218530589531aa7051c5892a48be3885f550b1c0dda76899b27346c23d219935e75c2bd4e11466d69ca378afd0868faf2b94fe4fba50485b934cb366997e5c95bdde8dbbb1be544c05262af018ef6b291720ab8899f41bbd867297702266dadfb4b193fe4f92dad80927e21c66dae7d328859dc65a548bf447edb659223d49b34b1b3b1d91b47aa18c9f34de9782528aba28a02b8a8ebb67904584e29292b9a08e9891b27973b74b51178eb74e1175000244fd00d3f485475794912813a23ec8c0dd6bdc15e5c39dea46cbf35fa3af17a52ffaa2e16f7c53f84261f8b60360ddf534022835707e2ce42533add99b868aa233b8c56a9658440206f1883f6291e7c92a6a1e97531d2e27375c4e6efc109ffc8798e43f4424fe43f471bfa15905009cb99ca8b8fbcd5d4d22f8e26ada4d4e9a6c98c48478337171a7b0f0bfc674730ac3b5cb64c49dc29ed4f1640b779a0385e5c7659c0b6b35fc04cadd29ec890714a6516083c2340a3a77170a471416ba96c868d752170adb355c1aaabb8e757fb9c4722531ee14a644ba2bb1dc3f1feeae82bb4ee0e2ee221671e6f9d2ee137cc0dd364b9d7f7f892f2cdc98dca779adb7c318597862e8ffda5359db04c2085244169e042200461819bef0a28b1f8e44c085bbb320e03d2048001cb085a74516eece6324c66d97863f62232ddb49e9fc4eaea22feed78eb5a8c8c7b5feaf75119bc2155322c685c26a196a4b29acf639a4b0ff5a4ce71416f51415854485081112c45db1a018902b16c4c31cbe7a286cfc1e1dbe6c088395a2cee5440eca84091327147002c629cce5a40627599c48710fc97acb322849ae2676347183c29a68e16aa280260668a26587c274284861422eb66f73ae4578dbf1c352d411bd41c4868bc80c0a13dd1d0c16ee5e773d8aed7a943f3f96f0e22bbea9759b09184c644c8edc1d28280a509425539640c1c2b83bcebfc2c54ee29b523a9943f95006dc8079111e1137873d2eeede105fd6abfb5108772f0820b6bbf783bb4340ffd77cb835f754d6e6eef5b08507edeeed409255f47418ddfd017ae7e0f9adc7e1eee1e0ee05c0ddbbc1861a001084064f8622ee5f1924248f76524242058395b2d156f342a9968c89d6c42ff5236ed2ff49fba57e6327283811ef8cc9aaf353d90b05e9e6b3088a949971ce483a33b65f2a2bbd943e49339f5251bbb3c8a6f45bd421656c7ed451ae4f1a914631b3cc4af7cd27a3dc3b9965561a5f36be1492ac4641543058294e01ad0144c6bd1475375d9576c64d506fcbf566f4e2c54e3e404041b91a352ee97ebc55ebcfe3186b793f61dcfd69614861eefdd490cf261df16d7f4977b40170e0b3c0470d26e758733567966ef45573f95d3d6eac725c3d5bfc0577b77161df2f5b7edd05964105fe803e3c3ce3bb402e20ee896e7cc11cdc71d85d200ff7a8f8a854f18706c21ee69c42ce8c880800000000f3100030402c1c90474452d18cbe6d011480016bbc70944e1b8bd32cc9410a294488314604000040404434480318328d1812587d7ed5910f139feaf04c03fd453ed280880f93f30b5645e6f41e5f33eeebf6264831936ff7ce8516281ab4900432f715f67088022fb8dd30bd3a8c11e1fe44ad83c479357b98c7bee4c69ef32544ed8c03bf7c3ca089f8c830f1b23a08c921ae2ce5b41b79e9462ebba8f77a0c3c48718ffc98ee446f1c44e7cc86afa37e7c8198aacdc4611a18360db9a80d47d41017b028a7ed4af6f2eb946b5b17524f96bcc75616fea5ec8875a2b7b8389cec7207945d3a63703314f35e880547134c0502007598381919304b245aaa5c20861e3fa15406cf538b295ea059d88f827be7eaf9fb05d57e45a99728d5de47db8cfa023f865d8b2b3bfbe4934c5057cabb13a4dbe8af7c3009aee06141ece203955fae46369ffddf5a54d21271e16625361b7c61fd8c0ae82899b022accc7dac0e14c2f89c4b8b0338edd8f8a4bbae1afd1de46c09382cf37b31992447d2579a1cd483521386fc1c21fc93357020291194e5a8651fecdd80b251e0b12b057a8b86e0953ec3f8b808dd103bc09fd6621f438838a1088112efa9d31a835f838636f8a885f63063021595b0c7a0d8c83d95e630fb9cea9f262babe8758290c9613d7e1ce3baec8ce8d956f7aeb5c73c30200e0f0611d7319b521b9f51fffab6c73e4e25e23f809d1194528adfe565a61fd0b0f6c1941ad1963bed39a793b8b3e2165ab07623304e763d979f3fdde3a68b18b3215fbdcb47a27b15805668582bb07eb3176fa762866b0c9b2ee3718c30538bce95722d405f786ddee957e168348b6bff1c9c039df7f1a0f27b13d0434ef474ba9cda56ce93209f801c60c0e581629dcc457bcd599bc5b6fc4e70cf26c968cfe56098147ca7d64b86b943333e93955b471e5007c5febe9f4b33d62f6a38ca3f8ccf4dee45e5a9c30ec62cb7b5ec779779685df4359dfb44eafe4c44eba4597572fccee8a89936ff6f13f071863e15e8689b10a4f0a39603355028d80fe81238db0b90dafdcdade8f7a23d0d0935182e39014ccab0fb801f07ca182940166322057321be16b7a2477b68518f98ec6f53dc1e7a07c6bae794a6d5f64d4ce3c47487f5a4c09e1c8a192f35ee2f7db3979d5c09070b34f272d388b7db9d608cf95a7b5efef30238f661f7cc07ea7fb27659265fee6e0f251f7c25fd0023021cd45e727ee0fc570eddfa93134ffed58877463a38bfb789c47cd648b8e5ff8ae6b806165593cac6765738e58dff8a1e1829061eeae81b66c0ef05b929e2f6073848e8b9455a7e4ccde71819826cab9d2df92c861f50121f748693542e146bd25e3f251f30e4a3c1c7fc32cd3839f40d51fa9ad40bed8bafd5f49eb47ffac498f02b1c0327580e173256fee14ebb7753e2ac9f40c1b2939634967251b0c29bf6fd356ce2db6f87f2af75a7b73a5ca4465848aff1046102c42bede43fbd239d8fd3b8f5d525e214a6d2f6c80687359312296d981a4de593eb94e354bfb431cf48cb03d74e73dc7235242c6b102a62bdea0d5cf1da997e250967eda5147b3915a0ce3696ae95c6088d786e0144efe9ea6f802ccc406ab06e01a4f5cad60e08fdcb69c19888fdcac85b0f48d95e020c7da04af68fafe371129d33fc478d1bc05d7825ed88201d49421c0aafc636081bcf52852d00e2d3013ddb6a41bf970a13c0269aa3a09c06ab5cc94688e1786b1a1fb9b7f8c8fdab90a54ba964bb337c94eeac1fa66888569dcbc76304326f7d58a14b1177ccf274e2c61e94e942b3ff4ea20c88e28eb9fde8c89a2666c31edf8b9e948d315f7d3e3bbf86bda06506616ccefcf6e978494d86372455478deb289f20de51bde409f6f96b701961df493273c80147e00175469c3a74bd366f2ac9c602e79b44ef7758e81f9ffcd25ffa0ad85b75db6d8d78ebbde8f7f3a0b546fa957ca4d55965ffcdbb01e0341d24f8b26de73920effd4d157ea67fbc04f7d01ff863693ce0114fc603cfd50f4bf896dbede5bf91ed3a77dc21da92b5339ebc8e3d47acf799042bf9a8fb6558fbe3ff6523a2addf77b077281b1fa9ec1578ff5471246e541bde0d9f7f50a26e125cf92ee0f9f0a7fc601f1bdcf470c2c031db9de4db9b493fc753a87f1e71e38345ec1f62edc1c6a0fd92fbf36c0624f235f83a9ddd032ceeaa0d183de4ad87dc8689fee8f7a969f6a211cd46e02c1eb27f1ebf5d471adae23225700d8f618970a115ed16878757fec360d0a2e470ce90de7fcfda611dcee425d71a8205845f966e5d7708c9c67670a0354b7ff8ec1f01a8acc9c7043cee56daadc339a4f48f00a318c5ef539460af60ec9f17598ad6355cb1e637bbfcb2e354bca0f707339bb0b8b6bf8dae32c1bf153dae5676c223ec664c3651b52160128719cac70bc5e28a8a1fefb0a3605522e8232162ba07dda2217a33752f7f98b3b7dcdab4d8b379e73a685d564ad2a707a3070f5a41cd2772cbbf700b68ea3d4126d79697871886149b12c700194a3166197ccd2902a0922a0a4d3853190cbd5e7daa3d5acba6f0d5c4ad1f5e865bde7fa1c065df0ddb6fbb69c04b91054847d45c55d3d95ea6464d947ebc1f988f07f5855f980dbcd96c580cf7c695fc3078117cd84d58fe5c4a6782da668b9cf94087fc62298c44d74e800282c792a03056b67365a71b6e6469c5c86ea8c9d3974fd8b788dd69290078783867a50e2d3f38b3ff00298163507efef05cb66eb4029f2da86fea69c6d3193514b5a523de9f850675bf69333d0f340ede61b26e8545e249811d044dca67922b2202edf391866f787f79876215a282db848f0a6f966000fe9f59d4062e39239c1f3e7fb0f5c5434eb8f24c2140d17022ebc198f21c0c9e770157d0b5984193b8c7f7755209791fc10aacb83bda2c1a230eae3aeeb53f06406ddd091b406f224602bd8ec02f597b262172e590ff1ff63b7c189ac37ea0c3d02cf4b78c472ad600f031b884b57cdff97922ae6de9fe1c24dcb4c0763197ba4a4fd046d7fb2bd9d2e37744e392e943c93f5d9ccdc7119ea979c38f60fd43b8c4f4e288dcfe753527e07acf3114a98573736cf5f92be14c2a496e4dd7ddd7eda022f79873968b17de2a0c79c902073990d4fd393eed29c4abe1ff05a780c21062b7ea2a1fe627bedaff0895be3f5e9f6f0f8dd7fdbd40f1eadcb1428cc0395d6d66acb929409785df17721d13b2e533084da35e37fea83d7b5c97c1befcdaefd216f7e0ace087ee93d5332b8961fe9073add42c886d2e3ce7efc53291fac3f3ef65acbd785a2b93f70b6c649e259d6c99b5aa5afa80f508300ababad5d7b32dbfc5dc363a4ded7f5cfd43ccb9b1b5d0c02348b232305ee7b7d161905f2c0d829effea21dc8cbd2e920621d775166efeb097c2fcb495374bb08d0733e79b1b585b3c1b81577c5faa6fa15fd73a218637729363caf93e99e341a7f7dfbc4fb0702ebd992f6d072dbc42ca80f8e63438725a028679e9520f5ef53f5a51ae687807c799856858a9be2e16d707c52b794cf9d4265ae417c5999227daddd3abb7259f72120bde23675fd9fb4c5cd1f244f5dbb2ff05e622814ae77c748643d3f9c5b8fc8c5cba2d333fcab4e39059bd902fa55e9f1ecff27e0fa71bb02a466dd66b6493cac9be46b13ed8df509f6177b80a2bd84c4b1bf7b365c28460f69fac236ae9c9cd92be273b001231439810af37d67e1d82ae205ea122740da4ef872743f4eae865a687c8d2b18f6811afb1007a7cacfa89084b7a252dd26b3ca7d83674d6fd37d79a5b63cbbbb3c04f3a4622291287a647e71d313567899376866de42385af8da22954ea177748832a9ca8484673be8d5dcecbe7c10ec8e72cd913ea0cf0bbf382feb2e20d738720473f053210a8d306cfcf19d23cc474d1cf27d9bb2409df5e91fa6c2f417021f48ad5fd64a1308a60152f73b2bcfe0ed9845dd08f9a867de9694d701be3e7aa2e45176ecc644d1f83ad3cc0306d7cc27e768aa2b3ca5a49df7e81a6125c46de727421b8660583ae698083ece0b2a28ab9c07d474b8124fc408bf139fbe57133315ffed141687b7b0be04b165dfc1f5dc800b9f832a8a3900d96ab86223f56dfaa820515491ae85ef8f715944a90d9d5a022ba0a2650634a1420b8648a7dcb0e61c543f35ce63271540e6a89b0bdccfb7a08473403db4f01230eb969f8f2c089082300e771b615808401b11a0d3dd679bc32dc256bf086abcfb8bc6f051450b5206c0d0aa7e32d17fcd5590d7f37f241499c07e01500b8fe4c57c04a3617ef6a18f32a583f667dab116e4ab3d254e56e42fbdbf3a180bc2259efb103e4348fecd0ea7acda7c316258e51f60ec01cb087cf3f0e3ca95dda71551d204dca61eac6b52a105b7811b4976df0e4f6b4c68615516a37131e1ea7019da74cbb36051cbe8c3071f51dbf753858fa37be3ed2321f2438ed55a0a4b971c5d18912f1820f773042780239cea7006f0c53d1f2cc9ee7c4280bb97676461b8b4da4b93966c03afada44b0d7837b5443ad8bff8bb8bce56ecdc4ca033a0f190807803950342c5e78435cbac019354863146bb0fe8fe529f64175f8f888e9611db6e57f945739f971024493996399ec6a6523364a61d671c5dadde37e82cc9d446d77bf040a02c768117f2626a3b98d985506f9dd6eb91b1ff9654c7a19f450597c04a9cb42d9d10dc25382530ced66b268356d80eb279668ae16ec758398c0c6dff40f0c6414467f14f49d707c4140231c0a7cdf3977d9bb98898657117576788078cd8c103ff55027ba57654d3830c4fb3df19bac1c84657091e61344db6348041b8b943e6a52888501df8f40a1ccfc3934e4654f6224e9db06cb382e8000088386e57ad77d53522ef595b8bb7bc9d4d02b144da5a2638104c44021a76f452052e8e49829970578e8ad211291d7b7c2be9c22fd50036c0e5ca27df8fd9a0185b141201584a2f5b713c9abc049a92ed77934a4a04bf89b325c1c6f4c1230d94d18fcfad81e46d1660ef8c6134836182057176695fc03e321183a1066548b5b8403801574dea7e91edcb4954451cff214bb74a4540f5a6e03cd9ec4429de794ab630479380797efa1558e7560cecc6293436914ea96afbf98b111289be50f9cc557fa9ff36b467de91494f2a0b1f5b074e50a274895b0b0b8f53571e180e7de77e7e9e26e1885198fb045e3191e5452647a4b4b46991c590b9bfc807e9a7ddba1040f18bad3ffd383b8f2c109a75102de9fcc4f1e14f2f9f223dcf7e5f7905c7e9f9d18f63558caddc1e277c7e14c0a1794a0ccb65842b057b411e2e4249ab44ebc82ebec08df6900a146e6ba2a3f12640b1f752b3185c19466bd11efe8a98a099d41cd7620e0a83b8e92f47db9a7b228b626e0f4f6dc74b001773da40cff2b0316683d8807553fc0d79a41490ec376bc3473ae75a7a3492cdbc9424c40b6829745b042bcd763d805e070a878bec0c74c28a25adb2f23514d287a9717145bea193916de2e915097d27d7450f60b98e8d2cdd0a129a83b0994826cb60eba72684a88b38b9bccf0e9c18aef84cc748c091f8f1f2cee60322bba8d7e516ac9b42df2b35a282eeec57dfe126fa255c390efb6d88f53135b2e69808843c67eb2fcbc18bc147ad8d00262d6e6d3d1180cc1e963cfe1f12d2f99c30e39c9294823e6d4cf2d7ceb5426a782409e922abe390ac437ad5ce1241c89d40def9c148ffd6ccf291c2fdc8c688d03acebdec8251bcc27d2ecd60325fa5a4588f27df9be329a1bb0e517000281a295e622f00df1d5c9b49c600d7416c075a6c49681529621e06cb3f01f8a5b5254841ef6bb783a8241563bfd5f0f73fe9ddd8943f487f2770f1faa0292240001a96104212a72a6140a31a9e9e9ef8c50618229d7cdcb42ef51733215b11b1531bd8731b3fa71d9d2fc2dc6c521f66696aba54f3ddc8f78b8b95d85284ac9d08f5fbd79654c5867a6ea45fc18bc67ef2e93d2162eb463cc63ae8482cdbaa84c8935c29decdcc23c14b12cab295cf0ac51ae71f6177f130add161d2c41719a53c9605e862c3100caa7b21b29d9339213e4cb1c94c7412ae6d54a189c97398754313757bb3e91765f5dd5e473bb1552aeb3c44a1a434cb2c429761fca9fcb354018958edace1cd742d9bcb773ad652717683c5dc5dc5fbf931820ac39b833eec181ca687552707af069fa0cd944c18bb22b122068a0d8c9d4423653dfe110da88ddd2a27e64c42cae091875d96bd4e000ae415cbb066b30a0b1e830541b6425c18a96d4c8d0b764c5650e94736b6aeaf5156a0bd4322a61adb855a4e24656c55ed671bd2807d006f562fb5fea3159acfb58b034b2381517fcda07bc368e4ea6b08e47d650ea7b96a884aafc276ca0d655ef7dcc9862f00fd2335a4c30db6d2b0dce60848fb56029c899e0c1bbeb4237663b09d840adead76cbb9b04a045b19bbb9ab1c62f7680f6a9e6098ae1171a86a604eeb6888c53bf00f544e8bac7dd05f16b632847b84bdf0bffa619147104c0885ef11460b1bd00b4ffaf89be9f1a47d3070e7a61eec9b515ed3c33912d7790b02ca741fc1b9abad19dc8eb65db52b13733441fbb68036429e9ab35743839f42620a1ea093d603705a4966d8266fadd3001599ce6e43547d2c4737ff1a35d13243b34d265bbcff58d55757e0cc5c9a86585719f5d7446828518fbac1cb5ac8ded926d4287a72a0d2201b10d3d7a8b359c70e3a6003d4f26ca31cf55c8ee949caaa649e9b89d0b0521eb497d23c6ee2b56ef17c9d27d4156f7f1020245f3c33f9dc81532d93863fa8506309431e383b21dd85c127493d9ec14b95b6f9ad2645c5cfdbdc4603e07a0dbb9551d876b7d614d34aa3c0a92a6be59a1a15b52b23d747efef0b3c8c83f51539e3715457b6063cb3bae6815d234b9e99fd463f43c5e00ef479ef24a2c8482476db06ee287e36bf8cbe459aaf27f6d5384a7dec272fba8c48e73edebddc05b2c452ed3ff070b6ea7a2eaa22cf8abb0eaa4f0676eb2b0f9ce567388cbb721fff5ec0920ca6beacf06a5bf8764650ada51e0d76a76c0a1b32c1867338d6409f60b116e2a595dd857e590db1d20d4cfe5ff2652af48df948857745ff1a404bc2e3b692a9999106d4df5279cba05da07fa6a1e6bc75b3135daf6a694709df5242cead25d78365bfa7f5fc93a2e0d7bdcb49e03fcfd7a19d093e80774f3ec639b9fe1bd362932d316bf3ee1105879d5c9780c9eeb2f2352a7521cf271f66b3de6d19666c54f68cadaa080d41a7679d5aa0d86006301ce35290688c89f071e3e66c77a2b0306d5508b58d5f0911c08badfcdd44c9a29a35e4dc96d0b168ff2c8e44ab91604392134e3565c53e14166ef226ef9ae7bd8f6c7fcd3b47d81bce1b35ce39b36ecfcb255124c803e534129ecb0fe520ae71d6ec0f970b9326e7d9269ec9f49b7115926efbccb9353953376b535cffb95b722b907a60122ed47bc2981f4771eddde77060100d674f06cf85316de55237a2876b2ee42ff875e5c4ac438e3333eae98ee428dfad9f9318d1c7c36bbdff00fa2143af1e48f04e9f3c56ba6856b818687df469cbef16f75b1038fc0fa6f3a08502de4fa10012d005b1907126844ce75da93b1ccf7422eca4d507c966aa523465c2cbfae22a0234a5fbe71e6aa8a0dde467f98b5e9047b4ba763461e6aff2b9fd8d0cc7696cde3cb66b8c79b9cc8f1b09cdc0e5ba468ab36b463ff7ac5a1fbb56de20a7289e261526a3da96b43af24aa17d620b28cd8feda4a53a9419099ffd56b5e0271873c544774288de8e5abf39ccb07a4bc577328aeca239d7a567a26f67aae07eab93763cfeb62fb9adaece9e6da4cf6ac12429ef54e830115d72c4ec43c0b9f13349acf1748036047c3ae76708be97a1fc6bfc55bcabbd3acdb6ac0b76e063af5136416517dc4982009963459c2c68d1fd8d471eecfb6993eb7664fadc4146c99acae96ebff0db53d5d9629e4931da40be8b6ebfcbb512142621c8fcd6070d8882c94f382bcf1ff3d1dc45e3af7e50143b9040e3f133a3d0d8c37fe96967270eea6f2e36a9aaf1d72b34e670fde9d1d93792c2c7c174d3e09ecf3a0f7f9a3f285bd5bffc8b8651deaa4f72614faf7420d47b19c5e1413ccab47d46119020cfe20d1a051267de54c5d53aa580c5ce680b54c4b487780fe29aaefb9526a826b16357612678f9de4e180f33e2d9f9d9dddb0351605e231b18f077ed3538004920d1e66095a722dc1b8a71d3de851faf87b0651c6d2348c01494fa45387554d9bfae076ff53018356924fc07fe0f053d83e8e588ad84bbca8d936e7033ddbff1ffb7694b107dc6c1df4ab15069a76a021b26e229f668968131d6c0036fa5f52a6e64d39b4f41d686eef8b0cd13780ec006c2d51b614db2b9a016d00700d1c1a57324236f42316f302dbc7ed6e9ae5bafd7198ebed49ff1a38cbffce0ad0fe9cd8e11a3d5f2eef9feaa826db9d58de1d492df5f2188da4d41fb3325dfd88a6058d5a0b469b7971d9df8ff3bd811d3cd2247073d7524fd3be6d32173cc841d2f61d0c6371c85c6ba66e66ae79ab1bb43d674d758e6a11e91b3925c36e24beda922f3f446f27c8d1721b96e8bb06009db77f7d8a15c5bdd233cb78e1f2cb7ab2c0be2c7026f34ed9c60848c1146e5667251a28ad598724b80a7f33a1b4d2a4c9d79e4a624d08752ac2a0ffadc41f3e79b93383989489730aafa6163b5939cac80a04a582d71df1d79b2d95900ef99ac212b250aab023038b145f3807907581c113cb21f710c19f3e6c631e9079902e1693dece5756a41137781fdb15f25b377e098a24b44f034c3567b43887e2d8f4f3d0f1e568bdc5f136e9af62848cdd0676dc439a35a37eb0a4900b412d54c6b1600db6669ead4d81cd7e8a7d3aabfbd773f12e0098c5b11c63e41ef33c9dcc364a15e71e73dbb59797265d80b005b7885edd5c8dc2b6c6ba7ce60edea56c6fb8688a37b47f03c201a6e8ee789e16e3d9809880622c7e1128de11c8501e0447de7c5336ac3af5e5487821432c74805731fb81dda238f2b5f3cb7b063c275bccfdda8f729c4d04beecf96b872b6808f4e1f536f24dabd720e6a2570845ee947f68d49085b725e75bb3f02ad6b35868fafd3d4ea52cf9a48eb71de06b7359c04ce73a4ccb0b30a2bf047656d6ed9b49fe2a4ae342b97e9bb3ac86bfe8334f7bd87a3e12ea39617703561eb1dc28a5d3c50e978740c515268472bfbd301034d914e3438af0368963e4d95137aa97ac3e7c6d19f71a116c3753304b17b29fa1d02ae553402919f5007c5ccd6b5f22e73bd5fe66815aa3f87e93ceacd4ae838677f1a7ee30c58be1e6161f0a10fa381969ed0eb7a64f87b10ce89f3581f27266c1d15c57fb3964a6ab6f05b279a1bc6127fa1c86a3aa4cc88dbd6d531cfd6a511c31e43c7afd01e2049834733489f7c06934a0d35011ef107a605e98d807745a180e87a801787b9ccb6ec41d4753238bf498f0a16dd3005d10f30f2e4ad46c803989b10e629d4f845a1fe5a53233e0190f151f68c1649c0b8f32cbc8bf765ca8d46620221e111471001d74d280480241b3ed16394da144e6388597528a96945fc014f406385d45b5a168654562eb5446feb29f1bbb75ba05126904d007921e5703bef5c4b756847338371127c61f67b19c4ce5ac549d237b1903c2f2fbe43de22afcd1e6d02e54406d4206b4a8fc7a6889f23e1b79b940a5896d6aaac5692fbab2c93d2b16ed6c41ccd6fb0a84bf08b35e69a40cb4f07b7c12053f1666a04b0cf5ce880096d453705eec62c474063fa96bbe213d9725e19fd9ebd28b60ac19032b6368593d54a6ae40dd68457429fae18c63e405ce29da3860d9afd1dcdfdf64f46587c6b4090c7631b8c8551eb141d5de4a34e910521998e25707db0b54a501e97562fd9d5100bda7f7032496396cc7943b03686a559a7beb67a3b9311c38fd0f3d13f358938c72708d815c6108c4aa32285697ed1fb0efb1d03ae034e7893f574ebfc74055690815894cd6c54adc50f51a1633b74f741697b052eb2f327f3c30f5e3707ce680d74e95f376511005129c6e4155f6c02666e48dc99fe7d62529d7a5ea68ddf8fd13edf89719132d0d8457fdb6936ed1706ba901e4f262c1bb4926b5fa1b7ac911f2055de2d5286ca749cf5b1213626c8986d002d62f3d1463cb9434309e4e32a30482270c7586aa3c3c893355d06478affe84e9a1ac7625125bb8f6737b29786942867be43edd4cd4081d55604d75a7d55b2d33163f0c7d0d521ca2b2cf8b7d51c0ab51dcf1fe54f8fdb6e2a6115e20cef382c7993f939a43c66482388ce23d1cc208fc7fe2360a64b59b67babf359709f04ca8fee18607cacd256e88f9d77afae3d1d5eb6def3dc32bdf99e672707d80530523cac61c969cfabda263a9dfca878a699785e3404acaa49171ccf0013c5f2c55bd73a58b46398052250644b5c8009bfa6bcaba512b89771611f8429f389e3dbf5370430e8b09c88a51443c787413b07b03d24059db1a6db73378350778b44fd6eef9d50dfdf2f8ee825afaec9d2fddf885eff0b0f10f29164993e13f216eee8effd9cd7a17a9d7525744842c895a1faa8a8b5864e8e60cc766e171b4609a5743bbb844f6429969e1b0f5d5e8cb51116bbd097d25996ca4a3441250f2e3184ce79418df4d3aaeb54d1fcebcd0fc1190350069708bb4911967e9d12295d31485c144f2d739e8234ffd9e244878b093fa87ebe6718fc9ce94d7bdf9e517f3cf43f719a32f56502cc9cd13d5b0978279888fb9bfc3ad79a2ee7d497464a6489f07507b7c4fed4ed17e57fb5327b06ca9a9faa3713fbdb43b48bed16edea940b345cffc849bb6bccd789121f15ae479b27f0b7e059edc35e336f5b375dfa5b2526dec144839f24553eee2ee5f92c07f435a6100371cf50141ab7039c5fdade7bb42ef7fa0de47e1013058b9b88a9a742e2cd0d11d41885441beb935e16bb7f84b27d696504d39e2c225178ee5f11700c88774f8d4beddd11b1113e5927f93b0dab21f0ed5b2779d1974a6f440780d6788a9bd8c6cba1ead60277d0c890263351c6212d3fe951f181fddcc36ccfed33802504f61004eff8e73941d459c6ae1604422edd6cb6759953c95d77f412643fd265685bb7b7c4d6d5fae9aca3141d6b185156fc1186ef65c410085b3503ab1203a8714ec277a177fb52ad253f041264358a1503a4bd73bc1707ff12f893bcd6b4954fe49c125cdebd4722d1950fd4b4b45dc4f047fc0feae3588eca4ae91da6e3c857cc77d8dd4488163bb5c923e479d37e30c22082b34dfb15a5d61576fec970b49e42dcf0e425cc33ed0acdb9811010ea612f6b9d479e8c456b8263ee1f03123aeeb793dd17cc14a81ee6479c2ee5c8f24ca81a0c5eda7e57cc6b863d39aa4ad2f28a1e4001d3fdc5d10eabd0323803a8eadb8efb57727ea573a82fd8cfff82c8dc8f2ff4d949775a1e1701a25e480fb0a4e33db5154e5a197d42a4942b745374820939f7a5ecca721897c45e3f73f8e334b5a1b5a3ea388e5ac03e696fc931bbe0863da9a506f9c710609e26a3200d88fee5688630728e873031d7a2654c5790b52bf01b3ccef39d5683b856362f918b8cc4ec73ba17ddd14a45639c060e6a2558153c646c5dcc5c8d62eb4c35173677780f40e64932a4502ac91edd971bd4256a23ab13c95ac314193b179442c99323c2296dcf44a4696c9da18267bf5605fc94e716df6317b9e3a64087802d61583984486b50ea5e87e4315edcf2d7e95a6db1ebf1f019c3752a1fde153a8a65e1c2e748af6bf56d190188a20b5d9ab1d86140e1394a562f9c0b5014b7041110491dbe0ac594330d763e0ec10a4050d853e0ec6483fa158d24c1074e691d40dce1ad50be9393212c81c908d11630217be8ca1f36d5642d5962a79d6478ce344c42c1f4321837718148705a5c8cc328b081214fd6b77618f10001356ae33fc2f467fcfc1825f9c5023a070b571550d53baef7da9e9feb935d4c06c03819e18fdeab24e82e0d462a34050ffb98808020f5b0351f23ccd3962deb6cb0c7794584a008f28bbc09e8440abe81fc54c6dc1cbac67e51921c55bf5e55a92bf372272c4f34754e9c2eeb3d5714534bc27a466b886bff8d802256a03211498cd6f5807cace33403fa6546b336dfa4197c1110100f2d68f0b4f8196596dfee821ec17c16515ced37b8c0299931c60ef5f27813038769786a794c8db0d2970a9e293d99a6865c534bb18bc360701accfb0c83fd7099ed1f3c74a6eb8c8624c14a629aa2b6926f832170b078a5304debe84edb75a744aa2903579bcb549f6cae842f7d828620b7c8a97caa8a4b3e28d5dc3abde384b993eaff15fbf4e8043f03a9096948786e6c7afe65c24ef4538ace74763cc7c2c0826cfdd9577e7f9042e9891492ecfa972c75081c4fca4708dcdd7e0f1c364f70e5c8f5bbc37b509257c127749f1540aafad67630c8daf27e30eded8fb9e5390e7c79cc4b1d48d8e973ad0574a3749c233a9dfdf992be63b45bfe134768a9f22ddea046f9b07f4628e6c33119dd093f02a39f5ee0e8bd85945ecaf037f9139ae1636722c04a91be43be2933707a3a2c6f006347706532a21cb290f4ebc5bcbe296a9d9cd9df061b8abc911c4a4e822e1f8eb4108189dabf47cdc88af3bcf81e0c848675df06ffcbbadd26b30dbf9cab80fd6dff83d0722b112dc16bcc420cf89a893b3f4f7521a18ec172c0e8a8c748574871b2830f8d24fda77552cb9f88e8ba9660ec266f1d8549081236e0f5edbba6a867c31dc051474625a01d6e5f407742f87255ce3ea78c2a8512bace0b1f4238eb8f38eadd53e712fdffd706d71129fcf15d6ea0a383fe662e4e3b3359f1ca3aafdb4902da0d9331f0df7e797b3ba56b6bc8be41c865bbf9c155a3885b03e9709757ee058a339eb48ba9a888a67372aec0e0b03c14fbd0122c6611af291b1a3235db641d1b20bba5f5c32ba5eb23bba196e61971c341b20a85850e62676062350ff83203235f4ffbb8ccf52dccccb593f7cb74d64bf91eb04dabb73dda5ec7dba5d316ca0f8498bb2ae3269f7b8154f5889116bfdeabbb1607fb3f70f8a422b3669db3d2505c934c3a65ebd002bdc61301843435f16fed5bc54ef3d191df487b500c124074b1d344da6fe3077cbc03c860f7ff6f42008dcfa9f44bff73d01ccdcd4d0a6d83a9883e0aaab6477bf5ad8d2710a6e436ac77e070ba9b61dca6b45e1afa591ad0f136b1a07416e79b59edb33d6300236a6d5cd9ba9668981e0610cc7969286d9f9eb2c87eac183a6e1bf6e33905f78c82a716e67b1d0ec240942432c18282f86fc75675f96d707c117e6f62354e7ef1c7aa53eba1bee0131101b3d7a40e6ae18a32bf09904c55a3e51b34b2a9a5024df658a3fdbd7407067ccad0e83ef69070995b4218585370eb2ce8200af0604d7faef0ef5e665242361dcc082014bc5482cd98bd1aa48e2eccb12701474dc98bdf50926f7d01b0b7907226038b340fc0a969ab1e593d0f8461ab8417a38130ebc319c1805fac6fcb3343506c0207f0a395906eb75271efba53d762490462f5b8c73727e0148ab691aefe5321c1c182ed4148ee95a6a6dc2055d7878258422775f6825658093d6151707bebea2df310f829dfc881d07e785336a0dad8528aecd862eb3e7c4ae12b482ebd0c6424a0d5e929ff172ec90de217c54ef61f5a01dc0c91f0a0faeea1bb87754fe5b9025dfca0b0e9a93d6d48074778be120fd7b8da68311747ceb11c9406f1fc31154a0d246a1cabdfc6ae12d39b1783246c6c914994172660768623f46ea4d65ce0a0bc3a79d974e50d25b945bc9baec61148736ea31e3354fdca815c61800c7834108d3e6cc2bc12bb2808c69366b609374765fe30c75f40abf08f5188d28e3e9e90a515b3ec843af8264ee570a2fd69171eb29c2052525987a9fc338272b749729e42b147e6ef76b7a8cc7975a52264053f6b3ee3ea0b295d269f1ace681f6144f8578407112b9aac62dd76224886cc4e97408c4fdf4c54ad4c998861186d2b3aa110da91f3662354b19a024363316018b58727f02d0a10756719c32446d83d22cb72cf67eb3d412fbb49a30a9755057017a7bcf60d55c60b5493ed5fd0af205fe74b5bf9971b9bd9e0fec257a8378771bc1f5b162acff224f9a3c3e4ec2df767d87d70a45e61b023cd1358b6baf7611a2f5cc662c112f627ae454ea6093a30650a73ced5e0efbce14c6a5c795d6dfa3945c7a60b584c10732899a4510ec103eb4529c22cfc07999323fea7baf4d4c9da6a598c9ef38e8ea5879c6dcc3f9c6af7955c820ba5aec2d12fb8360077a946752ca9da42986d1a6d62f62d1b2b89f479e829d8b99952d18110c532b6b3cf2274a11ac4a46286f01690f9852f0689eb3200d97700e83ed19ebeadfb66cb95c3392547f8097cfbae2cd33b8d028e25e517eaf59d77b11731d6b2edd738f8cf17db86da169cc59d101c138722984c506beb10cdaa957b49c8d319c77de7c4db9c7f919f1ca32f0a39e477cfd471115bd0362ce518fe5da9d9c5506969717b1c1901282721cbd39c16f58855e06a3b27a0ed96ada7f43b96e44828f19dcba012f50293d3d6bc1e0d94102b4360a32a0480c120079cea417236ec960c92c0b5891b1c0c214e892ecb21d70964e3643f718c67079aa3723a5551f96d4181a15607d14dd1123a6ccd07253f422c8cd7e65d5cd822dbb45d759f4414d1a4c31330ccc78c83a8cf9580627c5daebe2291461aaf7b4da15dc17bec28f7ca8db09b1965aa79e45a82273e4a4ae91c1ae01c351a6df030f664daad1c2695abc20648eafca2ef7c1dda744a07b938afaae9e75aa82fd2fc21c01adfa0a3731c805b598b73698ba60dce8ca14fd0546ad3914ad594590b58ddb04b858dbb8110e52a03312254d74aa3bc54a67dce2a7c86a46c30e1626feb383ce2f4059b302fdc23e0f39679e15f60136c6f5ad3640e03dd720ec365df9df3c5eefe776a15facfe7aa9ec7e0ba8da26d5fc2a98edc549b5a08814074b559c015ebf9eac8db8e8e32bccc1c47e3a4fea2b0d543cf89e07df70c33c100b399cf948733620f8f37f6e0d476f60b636c0f340beec8a4172a2b683b89422a3a95c04e0b2b7a67754bc37c6587a970e9078a38345b54b78eadf29976441ba7e037bedab102b8186a86478ec3638d73bd2415c7d32832276d500fa6fbe31a32fa77d3df7e376588969e832edbfac18c41467dacf03e5b7494eaebb587f59ea0f750b26f2e139aabaec8cc061aeaf43f28831feb075d6e4b90e2e2531c1e5106679fe7d87c88f11cdb08adc81d0f83c3222f800995e2890ecdfeb71ebf5589847ea6044b3cc11a84f0474dc9e4879f997d4b540b0079099f752f008956ac1f33fd3eb066de54aea5a0aa84deadf7ff809b4094aa67538a3ee04017449211a6520d349774a1aa2a6ce43480b2ccf1b5193bfca6db3aa3051818f801f54730ae1e3fc499842643f02744dbbeae3a68b2b90e57e82438832286a1294278270f373d49dc36d38609a90161fe0afd865b96f94507160fbdee88ed911ac1032542434c782b3197c80fae3551dd49f012b51b4218348895627aa767aa28fb14ff1e7f6078a9f74cc77337cb1cc7b02b5434caf2a7d42ca8bc9e9e162b37610fec1214babbf255319ef21d3ded6ec431b735687d28885aecfe8ab3707fdff90383be78765d23fea5e3e198f7b6ec2e201d27904e811824710b5b8fb543c3d95dadb75f67383736bbabc533bf10c4ec0ef841baee2fa031eb9ee0ba44e00168cf9c2bab26e0d5f044eb895a7b86057ece3c6ec87160ce9de83e19b0dd6028139e71269ef4bc47ce5c8d954a58e52af03c9444d6a2d630dddcbc8b89ef52c129491aff6d8bfe0b60cfb1c1bd7f1413fcc60e32f9d35e9fa02887d211a68bf69e87419188a79732606e9aedfb4f86b8cb629a05b2256274de2a00447b75cee916c999044c751c4e7ffb32e644cab21d6dd7063cf1cff257128c2b094b6ddd08e4ed6184790d5e009ddfd1d715fcef2c3f06bd13fbc3cd49be21b067673dce397683ed5b3575a3e5eed21e6859f8a1f81102898e57d72815e10fb3ddac771cf67b8c2ab5c9b575487dd742cfa1ba3613e1db97efbad6321a724701f54bf239e3a1dd3b3903e7300f85e783f43035e07d9e2631a5b7d43a1e0297b44c758414c5ec7a4b23c2930c661fbadc9bc9d40bf56df9a6666fd46053f1107cf95a113c0f05a6c6e65f5a72e02cb55f13fff0a8febb6531384c9612af4c701df67727f30fa0543334c031335a179b0db5e19dcad9ca86a490939b99b65905da6540809667e23162127cfbfaf867edec71a71c503adc6e0fce2e81bcdf6ff18179151621e8190313b9c82f22f7b49220195099d30092795c1548bdbf8e6e1d15a77495d6dec05eead2fe58e3f3b16de1c16fc2be42b53ec28b7b77a5ac323a0fccdda3be499a680067970073563f1a28760f65f478266ef40c821943739f5981d700dd1a7499cfdeaf362a0e9a5e91da92ad5d488f83c10e19a6b5713ee5df641cb7f0d8f3881ccc4bfc2c7fe8df38048e83f88c1b44c3308df6d1b81e0e3d78a84338aad012b67c34cbc132fdbe2ae0caeda43ff37e79a7b6c89047bc7cec1c7948e96d26e72a2fe9c8c05c6ea066a935b1e7d076256cfe2baf7b05fc518539ef3bb3f66da0ea7e2b3b6233c037e604107ec3a8d3c008848454ddc664d88dc739783f1bcb6e7e0d30690f43901cf91d9f831aa380097bd400181957f129f336db2d4c7f954214c8cb29afc2eb189e66bf4b5fff03efb080b3c57c17a1fc053a401a584b4add0e33b34c8056591cac4fbfb107187a3b0ed1b8c4f029b594e53ca25c9b82184b1108ea7de521cd51db82fa635336b06412f8758ae6cfa08ad8501c7e516750d30bc3e81db0713159d5c9f931aa06bca99378097a6a481bd03f3e27f93c66014f030df87b390c05754484c0fe9b446ab7e4461e217814199ad666e00d2d631978135584f1634c97b3682979f7d2f9cc785b6e3e0b4ad51c714d10b584b4bfeb7a2423fb8eb839b76001f701107e3dfdfde1acf95a47106e427a6d1ada9e07520ced70f4fdc08f7e292b1bc394e8a3dbf4ccfc6a223c8cf7be4f36abece5d35b8bb033a323c5b0e83d0f7a2bc5ee173c0b4f4f331d4b519157d55359b19a0711ecacebaf84ef05965ed8dc02b09ba67fdbf249908f6b9401dcce05e063458cc9ac44364712323fad3de6b41d5ab31ccd12b025e14280b3ce0741b507764a0cdad3a16296e263b5e448cb63be8bca15cabaa97417a114bd3186190a3c3266ad897b70a6dbd806795b0fcc8d719ef2dec642eb511dc34f8473b2ee0de7da20293ad47c24085422470e5db665084fc461699c8a4a6f85118f300430069cbc37c1731ec217e9d98a96fb2297572638d772bc0431c2cb8e3ced4b8590dc62c61e3ddd968b920d261d7878d169b5669b5c565012c5e1686b6449f5ca0d8f37213a231e84d8a9f37609fcbd4c5a2118cdaa3d90d4873447adef531325f87d02954cd0ae8fa30d37385729d78cb6c82954fb0aa2b02224e76f330d881e79572a11615b9cd6f03a7922246ec72fab04f6e3816d93c2a9977d439ab03a2b6b13e18a888a361bed122f28caf4aeee5ef9da6c2a81528c320f6409e6d2f26de88f4e74151e57d6c3f889813d411b6ba02c278814be1c096ae4f2e1f852d2e41c95f86303a225488b6899c79221550084c0ecdce55572ecaeec0b2f7991c29b5020648b09a005673c9bbb572fd63a16ef9bf07669dabbb509d061b3da2fcc3575e89d68a606167fd62847e7a3a23e0f41d3583def749a78b21b4404226aef2b05b287ef126e2daa4e5f21e7be88e9b83ecc34d4e84e9f81c556bcf97898ee96eaed577103b0051bab1d0e277d6c0d487ddb6b88395d266cf4154e591e07bb3f67b50f18de5464b2f0e257b7ae41b3bcc2a5b6f86c41728191714bd48d6a57dc52fb99d4c9a739297164091baec1365c992619c3957e5c3301a15558960fa3ea01fe523e504150ebe24ec6e38cd4fde5f46e3374bdf21c9841daf71eb93dbccaee1fdceb1651f04f2439f96a43ec3e5720802a975f4bee9a9d6b25cf29d7ad54dd7b75c37742553ab93ac0128ad7ef7c0c651e7deff8b24f106b32bb99e6ca6252f9e4dc1dd37a698a3615e46f1c01db9573549cb8fa5c574d7f1b49f9c275f102920a55032ff04a22fb9df5f0559c9f4cca6adecc3f299fd815838890d703b37f28a107e3239b5537655049566600e4e36d5ad14bee46d978ae22d6b8f6e6ddc4552743e48271042c1e0e642ce649a1827b20c806679a2b9ab0f543e1bd6a3f59dcdf6bf3f7f51e6cc54a11832387f8d18957cc7434c06d3b24ff7b90ac2f70023de0207aa45c083fd0b53f72258cd25fc0d5a91e998f58dd715f95930d1206b7f2c166b7ec9dd5ef07b1085099474dd25e9ea09b954e327efe4d57cd4870addf14c51a52ddf78152e2bcd4d43f6ebae0574282ef72fecfe0a19bbb48627c73780dfd16deb1989721d80aa9bfa92d645be0861fe0515c9c4005d54dcc125af115bebc0f0b4170b7fc5295ca3805e7b9f68e50fb4cc14145c60103fb697400cfb8d6ec80318d027c632adffd850c89937476ee05c0943e6f9c89597e97db6251d4a97a97c4ba11996517edbf2879a9363d05b1b3437fd507ca9a990a1631b5c539d847abf157f07c44300c434193c8fb9d0429a53cdc2df1e9093552ca8e0524686731f775b0edcb37708a76320b5a344f4e13083c7c755a2e169ac5af52180ddcadc8cff5541e5a7da370b0934b0c54de4d41c84e94bfe59f0181bd3cc4654c9db564bc5fe5492094d86006127e3f71312ffa4f80d2e16b37dd3ce52c98f35b60f57662302100fd76b70749883b39030322668655654473ca00f1a5801bf09d0bf26c91712dc5497389a17b2ccd36238d886f0266998856a7a26187557e9fcb9d035f1f2430c02a4d4e0bdab19a94e56d3dbc827cc9d585e5397785f0382bcc2031d9842aee1912f121f4b2058d5153420bfd5769da38b5a7b11857cb73991cfcbc87ec13c5022a773e584ddd1a526dda60e68c3c5a99170503f80554b2f15e7729b2bf5f12dc634ba5905470a283d12cf1a5bd3c1c724caa7ffcb4c83a8ca627d72c76659f07c1e9a468228b5c3996610a6b423d2d7285fc4d6c4586ba2602c5a5357f2e967532d4f46d7862978989b069fe207a59da105f419413237536c3e47d5c31be390465e97ebab39f35f3ce4cf202dc412f9f9b3b2180e597418b36f318a892552a945a31d1a882e7c0cd206003862dc693dd7b37c8cd89955cffcf511262293cfbdae0c5580fd0266c196c0960d2835c23d125a15c89472a56fcc102609c569a8a39ca464b4e58aea630cc10d9b935a6140d5366654f2c01489029e82ecec49b8d0dca0d1ce187b69ac8e119a999fab7cc3cecbceb39ddb50847cfb476763ca41bc090c4d82a54781692c23e13d29e29ce45c67685accd6c360faf6b07257839d6e95bbca006d81bcbf3f9b1a0644a396098401195f0ab3e382c44a7e968f41e447965b3bf3c4268527540b213812d60501c10a0669ba928593d8924c0ba4b00396f56dce06ed9919bf1881e647c80296740e600671d41ee8de3130f61474eaee1ecde15ba9933384d84a3a18da15df533d37241e95838e85046d74d9355d31aa05c51a25ed0540fb9d0eb24db8175f9c34ff5f258d4c084e73b4f3d44aaae180cfa115b06cbd035f929bb680a727b3620284299fcb011a33b95756e9fdc47e6dd711fe516372dc81f9b4525d2ceec644810821d4904f4828090146e1aa54add8c13392db3ef98f9f707f9c02e53fcadcb4128f5500c9bd6a0e584f4c366670e65df0696bb507c5b88f32e5e6225aaff75d1b7963050df6a74bdba4c3ee482802fa86d31104c858147ee0cfe04c7279157ea7ee38d58afb0fffcc6757abe528d201800e3e853f8b14cfb8bf9904a107ad92b2784e72c7de181dcf28f42cd0c2fa3d1c6f5952dbe00f7531965391ad450cdfb5c7846768fa4943dcc9e8d373b3cc2319de128b6a0c8e0c6b8e68eae2b09ab94c6e058fcb89af8fcb6aea03b8c5f011513646cf3f3f9144307466042da53a87436dd1bfd84a5561f39a14cc48cbd8dcdf138344511922b7221a1da61a75c214b087f39e9b84abe0b029d831d7ad6e3569ab2634d2b9c7148c07894b5f2b30e0494b5f4f4fd54374fc0bf2a9d79171a456781ea17e66d8924667cf2c458037e477ea8b1626373660032cfb41437dd3c16426642ab106f50e0653623cac915010f52390a25ed591ac9a0955fcc19d1e525a4053e0120b953a421c5f63b133205eb220312001350c64d0bcb89e2f3145678f39a69072b56b6818c20d8199a701c0237537dfc7cf3e07e889aa2298b18813e7a7b46ebde8b2180c6bc898b77925952e47dfac5ab7f46714f7ee38bec5225b457f8eb86589a32d65f1bcb2ecd665e1d52d04f586a2d72864df38b78646716283870ee81c92810d08c75ddf906862c08e0473b2a5f5d728e51d518d4255ab49ff44494489ad2c042516d6fe1622ab78619cab2f03ae98340d8af81328094160b4c6685bc022fd96b2f128b9791c89088fa3fb0a14bd988c428fc05c8b885510e326685d46ed7176fa4311b19f2e6671d310e10d88bc6900ed5cbe594362cbcba98a8578d0c16fed30c466693428a0e97760ccaab71b0189178390eb9c5a556d1bf46b4eb44fe7dc8d8c112240bf827413d853e7028ed3a3b2d6df0abeb7a3e9c2bf2dd091bd4dc8852c27d5065c4bc9d5c3f554505bfad41b87c699c30f3e47583525fc47572eb3a02123f61350d1a4e854906dc7ad4f3e479d1f94bd6045b7f001e161d7cec5fbbf84ebcc54f7c34f154faab8b22f3644c84d3d70cb8469a713182e60a76d09c45615fd143f1ba57ea613f0d21e0092d4816a57e79951dd443ea8e4c3b1c9769ff08da372e75d11697c055728d82d5c5b143afcbbf9687a4f1dfbff33a0c6411607ac71c3949311fee8f4577dcf1e3bb41aef5a5e67e44b9af1a0751bcef68d4b32f765e8d1c3ad26b103eb2b94c508b042c5b02c18c4dc4522868dc6a799c53c6b51abddf818ba0a89a376c4158fe6844160f82996304ae58d1168f6522f45d52438f23d7a99a73b45a5b0b3ff7dc914567d1f664c32a83f488e820346f849cff560adef1f273f283ccce726ee5dda8faa45a757973c7ec472751853dc562f7ffcb332e8659482da27f1c643522a4643506e9d9ebb8f7c0956e364a4e1345993245e43125f0040f5b3d53e67d7e83b87dbf1399ed428d04a105419f00e88411535d1cb312e473c856dbc502bd381ae4f91bb14231f677bb909155a2bab73c5080ec822309063dcf11b70349bb935ac7b372037e4552476b3b41d13b2d216520cd7a4f00eadca5e32915d4083cef205ddbe10e5c1f87348667e36a6f7b3c16d154244dbd74a8ea3626c025d24285b1fe82780988c23b3e9ade162498653d938f423c004a24b457dc5871aa291648f91097b4b86ffe5b98eeaf0bce2cc4845a846a0a741324b22119259a3a937695150b070edd691597c14b234110a7c5b82eecbbfe27dc3f9fbb1866fc133ae382533f3832c35304c1db1e63f5a1e5fbba5823572ff11a84109dcbd5e97e3c104e175ab258ffee170bb0182848036815a5d51a5fbac2c3c6a41ac1ca5a1885c3c60333f5fcf96ae03e9ffd479dd9fcd631c54ceb9f41aea4361f30fee1e76809ee9faf31fcc55f86e59d72040dc42f6566e2f45fe62eba1691ba86722f2a1692660631f33fc658dcead1a90968ea4f5e974a38986f43a060b401f1d7bd33565fc825eda32857295c8876ce5b48525a71159d28b15952bd6cd46927c028499edc0c71c6fd2bf42acae39ff9f88f45390f3b78ccfec3802200fda30d57c4b5a35bbfd3c44bde0ff91a08a7057f6602ede684e9d386526920d5b8285a4d756756266c683ea2b8c1295fc79572cd14555be0e02ca061b83bc1700d413e11b9638b279f43ee5dba4c9dcb7bc80f2a9f8199f68adf54516201db113fc2c82b86285031d6e7aa0e601292211685077fa1e99d90f0d410a6c3f0d21b6dcc503bd485742cdff40fae189e2d8cecf752d55140180e2bee3eeb9d03d97388a61dd20bd6e85887bcd249aef40c16ff57b34bb060e851c5f2cde8ebb476e05baae1988a7e8193fa667d0425cd904ba3cd0f68efda7af0e56d6d0de85eef4c1eb61b16cad24e0f51d1e5fc7b9ed94e14730df039fe5d2d5f2c9bb9cd226dfa2d55a449a18f959f1f3c5712ea02716bb43b47546cb56f43a93229436bcdf7416d5b33bc457c2b380ab07d3895265156f5e609e0d72e4b745bb8877d552096a36c9edc0b3be568a187ffb83a23354c2b3f065e6e81e5c00cc68c4673de493513fc136fb9dcef48211df39f6c3c16a6f6044f66648ad850f0e0d6f0aa20fb320ceb84eed296e0df36f42ab246fd894a0f717c54c268dd8358c0129746334df233e5802b7dd9f326dd93586c986c92cff2dccd2005cb30d9afd99c22fe7b121f421d172200520aff31b64e94e7af0c9ffb199d74997ffd0b44d65acc49d39852472ec0c99385cc74c1c121c5720956561042ce482573b9e9498c3e7f15920ee8dfc3496460bc5c8631402c093e0db82977eb3c4bfac4940a1fc0c61f222a785d3db02ed5d9866f1488cf04f57d6e9cac5ba0344dfa1b36677ddd30137714c4edfc5a0f55536fd00e763bc2a188533a2d282386836505c618b4dadea28f2cf1fb591e6025c4bf53afc5761bc8fb7863742b91bf13681603278f1fd0f1557d910ff670ccc03c8abef16d505e8ae9ee0a4c690d01824394a66e00395599eec82a79e53e4323a72e762890f022c67802494ba6ed5bd1f0efebb37605fccce9f9e13a13f705856c39af3640f8228fa1e61ec9f2332c1eecf83164d4fe0fe817d359ae7acadf795c0be30924ebff2c37dc1e1a52e566943074531798d41352035892418bd81b48dac687abd9780b2f30b421eecd57ba75d9142c5c95859e731ed0c74a8678fdbab6fb0202f106cab0e85d8bdd16f4013fecf7550c6d5426bd116712be009ee44b815403f559f5721c683b99cadbeffccfbb1032c206681606543e72e704aaf60f6e1ff5eb3113f7366c7f65151e1d555d0261cf7d832e2bcc3a260519a4f12ef46ce36b4aa883094c45b0c38555a0f194ebf8ce4239bbb27c76c1655b4117a80f2c633d48236ee7f76e8c4edbae887dc6e1c34b2c4df45caa1a4e68d25e103dfc6ceeac475bcaeea266d47e13f6a6a6d85fcfa3af9c0d76461ab78b4591b9385a51ef87c86a49a9df1aa17407e2df1b184d23223e9d647fd07509f48d10d175ef2a03d6aa8364eff81d25a1bfa3e1194990ccd4bdb4d9b033d4524a21e5f83e681d67848d258add97e6b9730bd778b2981dfc4ffde80665844c2c46e12a1c3d301808330f34908cacd38a1ae2015bba174b0b5e9c89ae508578cda45ed2710d2f9d3c09687f765621cc4bb1f3255b7f463066aa270b5c23b0e5b40da726763650f4230a5acb696f3d3b787ca41797ac0c3979753ed644243d44ddc82abc311921fc6eea6a6fdf0c25360eb07c8b8d96fdfeaa8c7737ef8fad80f85633d8b9f71aff6b8992f24f9f8ca72ea77a9e1897d5a715f8fdc63808e19717a149fdb19d7281b524c4863a9d1a6f15e6cef0614094590ff17e21d1ebeee1e3c362ddefab7a68934b33f2efc8f33c12938508ebfc0264678dfd5715ab4996e0a5e745075a35a1c49994ddfec15e4b9fe987dcf17c32c4a6ae68105a4cc9d3372db41388c9c5506ca5847cc239f7e88afe9a4e0048e2a30b79d352997fe9e97c70e57c41597141e340183b1ec6fc826f7b364ba6bea2253b10081d54c30803fca6ee031c3890c4abf1418752fdae819fc4b99595a764c4c33f3374a3640571165a9e13120dc1dec0d6f9a2019a438e4629c53b49f72a77c2ec4bb561bf2a13cece5f68cecab0cebcd111ea7866f1b1abf9db8b20273c3ff45a32767d5431a2d45dafb6cc14a882b0e12e4ffa3975c9716638e0b718007d53006611d768a5517116af5f745b0d7de262839a5ec9a073a39cb450e0f53cae67279261179fb388de4a0274e29733de78067cd05beaae7b443200b4bbecafeb23360c49d61ccf46ffef765e58f1a2433b22946287da5187dffc088dc57fe709ce1e1d5a37c2ab925e6f2291a213cfe59ce726cefe2ee963d4f01a1bf8bc3bcac009b8a7cd22f5acaee2a79ee6e4a8cd220e2792acc53045a656abac5f92e15df2ee709372b667dbd08722137f587b9858511b882e387fb510577372edcc6315c4f10ab08ced682eb5ce8fb756e4cb4d7239bf731cbb40e49ff166db1d9550499d01bdc1b88e613123a2234d0614ae3d09bcad3513eb2b10b19bd1edb14595e419427471bf5570647e11122608d748fe0f65ef394331d850f8e2b01444d83c2fcf86b0d98173dc5879864b5755d8fb9d941f0ffa32f11c478e84791ce0d303830f68e65c22d03326762ad3f68f5d12a65c86c6e75737be1f4b25c207d017524e843ed9335b7d4cf4ab29016a5abcbddb8cf61643c44abfe453d5b38c379d02ea16d3ad4de2af64fa9e9809f9ec9d3187e2501663f11a0bf14b05c691f68c5cb848503983989084121addccb11b8da698ae04bc00bfb579117133bbc44b5a011ac3365ebd9e03e3fe3281bf38aac50ff0e1777f37efb3473339d57f73c0d7c2ece95b5212546f19aebedb4488f51e8c4cdde36cc39a5dbb5c7821c5b8e8b801f8bb71232f5c25b3bf47836fdb11f5c61dc050effb54ade56f076b5347b0e0f47abce3894ea19d034fe32c8728f05139dcd9c78fdb1f66b213bc34c369becdf8aadad6320c5b3cb42f729fa86731c911850b3c1e3f988bae585c77ac409f4c4db50aed1a2fe03c0fedf34ffebb973ae0c95da225e6189f580634df0f8ae99af478537db738b5397d284254e3171ae48e01b14123219a2a6f007b3b1ca88a74f524f64088099ecc5f75a637890037b940d5ef8d8e675f4c152ae6d822583d5b610c58364b596ca071d03999fe20343babfed2d6f2427a2fe74a9283b8faa63a7401902f457948ea36e16a59312400424b19304fdf89fbd6799a39e5c313127603ee232624b54a2342a4502824c76e660762e4f9670e0017142ee26ddcf3739c7baa1bf45b72f86fff4f4aee8380ed4c4f40e3c1cdfdd9369002e77638a57cab032503471ef437f59445b98c0aa47627e3db815bea20b7a575ea657e493a79acf12767010f0ea19dd2245f97a4fb90de8664f167f0cddfdff016da7966c7d1cd3abc760b67fb1129ab929d7a10af23f6bfcacbe372231182b8bbe2b27db1316ab86f6e5aaaee4725cd03692bb6cee4c88ed98dd423054a6097f47b6960592511cbaf43cd1ca7d33ca9cff923d125146a11b61cf4e64a6c2457a339a2e301a98cf19de57849213eddbc1fcd2b8ae16ce9ebf613d73fc2e704c4b6dc6dab463fcc595f8cf6b333eb179294580683cb658a6336fca0e3d3a004cc51c9040ad50625a8ee3e8cb8b6c353fb1b5214b3f6b68648a291c065f49363e20557ff838602960dd9014d6ecbee53bb22d5bacbf0daa792867e2c7e10f9e196472240782bd3cfc81e6d2aa744cf4947e3dfd3d63745ae39889140efd06985649ef4eda4070059abffb920a446557c618e9cd540fb356f1cd5da9d6ed274059f2a3931e0e5541a71cc5c8851b3804739fae8693d48351e320bed4e38f1760c1f54a835b7554486f443f474fcbb6bcb23d6ae5ce9bdbeb70e0cf6a70a581bf1ce39db46988df9c4be23d1831cb9ee23c430140d32b1460679094fdaf1e297a03e84ee25a114f5b6a27542155c78229c2540c5f77c6d3e48e79ed2a997ee553152b4d62dc38d74c9a2bba1bd8f4c0a03cc263bf1d3474033eec2f4a7b2c5643530c199c4228a179759f42efa211aa65ecb6ff64cf85ff380c471761e371d04a0bd0c031382201388d3b0b27bb68fcbafe38d631896c9f5e50d88d1f5b809ec141df8c219a9aa14215208725f1ef012d14e7ac7f3968b11b02aa583698f53d8c0b091d7ae3924674008e650d50a8468f2ec3e78206c3ee8904e1c180a8840b89784be3e3193b3366acc1c134cc6bc0fee9a9d24d596a1f4113de6e04a0d29ee0fcadc02f4f4d639ac3bb5a519598c7a7bcb834ef7c9c9fda0ed2f2ebb0711f52271b84e6e5ae8f49fb443a66c2b41d69a096311c623b69896e75169fb91a66ce3e10495e4974566d727fbf152547e9680371cb64d7c2ab8f8ba1a0600540301b310efbf73108edc774edda143579035e29f4b98cbb4a24fdde3b0823f1078632db31e5985a41ba896de3dfc576d04ae9cb25347660bf09fdd3f0b8d444eb7665e9dcb703e0cd0a749380b15872bf61e2aaa91749a0f28226f141b90310abcfe2fd96c1fb8ead7056aafd1d978c3add1750b9bdaef2eb2b0fc894b11d576adaf1dacc0c3514d9dd9d4926ee4dccd75b9ec235ffaa2419d3b8db8928d6c420dfa2138d1b98db499f19e7a5d5e24b6420eafc65ba74dc6eebc2d21e41867c9c61d256af1539cb8619fe74576da5d428b1ed2a2cebe60175a00c9f44da128f01f5b119899fb2143bb9278b6e07a4de1297c18a77920808305365ae324b5cc0ba4b5200d06a0257aef6ce29e6bad82e97558af3d69ffe6f575b34c9dc7742a4e953a6093dc3a76788d3c5d3440eb14bff6595c80ec513cd6256c9b7a7ddc940650439fca782064a737691ef51f72b2631a72773b29fbe9c2ebf52c1b2b8bbef634a50d20722588261644e709036eed163dba9d2ce427a3f68bb20b82efc7bfc7dd19d30216b89693ae85bb435c15a01ca7dd85c7bc68ead41e67337abd92b1aa23e4ac1e2b9bd1109bc7bdb3cc03f3050dc780e3dc7bfd57aa5fb3823019ed2593e8c610314f2ea6dd64cf189782c269e0dfd942a7ace5a9f776c33dd6eb52584cd325c4fb58e5cfc646eb683fada58eee886aab8a702d22873c67830309d6fca7ca2cfc6e057a5ba0ee3e8f6377789ae9c2dfe763abb41750d3012359f5c1d45ca45288f192b8d09c2810dbe9e13e68dce26061c6df012e784374d25eb3733df35d9429e190f3d0f9cb5fbb842ecdad199df87fce50e64302f36fc9eb4e6001a4cf2f2b69893f22fce2855321c2ffe3f44ca81e9a742d37386823884cf82ce83e830916d46552a6287ad4e5c655fb5639453780696d1293be20caa09e0718a80d333218983f7d0066ce2069c51beff485d27d5d2e0582235ce27319560fe2bf4979100d4c46b44b468e8b2ca790e35a2d909eb456311b7572abbf8343b402e4b379b80d7ab9bbb60e9b88bab88625e691c77d085a9084c4f161480da38d469db7feb49ed5cc65898f1d23305ed30a162ea28d0823e7544a843f5322d565f3a89edae3d5159fd31fbbb9e828d46d61b3302f62d31e696dd17991eafd6c649eceeca0fcc282c75f8948f5ec436a42146065359c0250c179857be2275cae697b1486a34028e3e5a082912f187d5331e4224f29dc35ec8cf0c397d72472cc006d00bd160e03c43b52919f57a19b9eab6e1bf17f3fce4ce4c172eb6687f5233a08512aedcfa735cb66967dec0757367dcfc71c1060bade939d24295f8069d5e766d4d64fb3e8f68fa031b56e6c290392ea9424815f3a6fdc26c08392a05ea68f6bf6bab7dc1f9d19070a9807f8ed6349dabcaf791a67f7f9a021f12299178442bb15c3449877a7b1508588f4948622aa0d98d76df81fb9a26d1bfcaca37ed896d409aed197c8f32fa92218523f96dc24bca6419f5598e35308389928f671742d9e67ca71d2e15f4404beb20434862ead87c4166434875d57316730efd63302c0665894d7f49685aaa87ea6457387501c40b856a41247118f3a876470bfac5edc597d19525cbb44aa075deb606601a3637b30d8e0e8543f900c64fe89d30de3c83357d28b5ae8863b0e7bce8c0e31c9e8a061ea4b802ef09a3c6b902a22e9d35d7f348491ab89de2239e1a18073fa681132db2c9b464c126fa9bb7b87c267a930d6a89d2019a0d793898ac20605170a35722c554aae9d69943484a0a7a70f466d45c0ec2c714f2dc922906fd4a68eeb3a4ed1bac8acc6ec2c1cda0b5aaeb0401840d79b26f44fd0768b9a33d43974fad8378645989a4db03607b1b27d47fbd622d4cbb4142bf86a229e38f41be19721cd685c4fc15634113959e00af431e208754d778fb7b5db8f1d1410888daacaa478b0dff29ef610233e74da791f3ebff268fa2767fdbb582c817e79837b70d253e9520a39856faa8fe8fbf7de9ee07fe99ba7e3dcbcb4452f170f041d6b0be9d67c4ebff8b6b261239dff23329d1e09c8352bef25e91f6d4d36ecda8bb0369a3810a0a3d57855a1a33751fdf3f8d5ce1d806fde29f158e7349340dc471e5e5ac6de6550a93b643007b138d778c17c2eea191154f5c6f13b0534b823e921aab9fc902446170585cb5fded702dde50a9149e520bfe34dd2544e21cbba02b92993580e8d3f17774399627198cb9cfd265af605c88993c279eeebd122c00c8d78d488ac9740b522c305fb6f3c53fb30f3ce757b576808ace3126280a59444eaa77ee18cdd62f62d644a579e4e499339743274745f1ce73040ea97bd3b68bdb35283e6b254c18ac27cb3d79507480a99a6d8ea2eb9930623dfb081ba1b9aa02c7b16e9623dac752841154a160d47f19674157251b6a552b71d241134c6aedb260110ab71539652d52046251377a1bbc8048d3b287495702ad7e2483a8cecc7fa6ef881fe60064f3be181b6b948eb59cc38338025e20d0cde8422a3ef89b66880b49a959a615cc302c38df4fa29105dc5eb6d84144d3ce639f436d83b041ffca42c96c31661f3a6bc4068e7a9f219368b66476f4894eca4b69367f523d423598ab088c73dbfb8d99f5bcc1cc02808dd6868f100a7f93be6a38e41da609e86f396051526e4792866d35818379fd52da904bdffbe6c1d2a14163e6f20c055ca7932e15ac356ee9886bb4343af7bdd238498925894f259199a9d85d626d12279698b12bb9845f1cd0ba962a6ca731ced2ff6a924114ba2eb661af588c7ae48c728c3536ea64f30695ad97b5b4fd5333c5854aeab8775da0651fcdb2ea23e72cf53636ea5db154f507b0a44c9ba3051a5175bbe21d142862135358ea3b1f4fa03544e8708bb1158cb91386d4a5621c08e3fe43ff82072461bd0a42ac2f64169d74f4da815d168c51754f224c25157cd4441006384708c0563225f68d54efabd222e0b4a862e2ce068db0c1a5a65c2e1296ac2f4880079c814e555e9a792c5e7a869a7e81027b0c14196123d3df25ffdc26d281061e1e0e097175f9183de51f2a8210f16d74967cfe73001fb81591c3a452db8400b5c2f562e838c820c95b78f440fdc72420568f049881f8eb7670c1e4a57bf8a663f0433dcbd17359589660fb681a5c06bca9d102ef17607a44ce0c7c69632f81b8f47fd03611c61f1c3969cc9d4bc8c7228ee507fc18707c9f559f9804c3a5af6e0b94cc20a09b11496d18a329b17b80d989e8577bda2d78ec011aaab0a487e663e55700728485d0d1cd8e6f0d4315f20b4c538f5b859a34f09ff21647cd4365f406a05fed5ecf5d814a9e7c9c7c66f32f3ad86d2b23b50de8f7a0386e00623f557b40deb76ec0200f5b85d892ac9acec40f2ff2c9d962c77d8657a666165c325635300d51f9a59f826f4877843e7c8b8a33110d3ff6d18d71fd91a9cb59dbffaf7122dac081c9aeaff4bf8cd5f1ce91667d6ff3700f4d2c9c22e8c2c2a64e8919303c4461e1f853c6d3fb80d7fff6ae345f0bcc3f2945187fd404c44bdc381adec03ae621225df03760db5c2e86a7d028370e4fc56cb08145cc688da4aed3da3344dfa05dab5f6d839e188f21a805ff8a6cd95be7f91c3a86517e9482f1aeb4c7d11ca29c61eb6ec0ce6ce3397b2dd7d611dacfe86d9ce33fa8ced947e270d938598ee378e2fc8bdb7bb782d2ca7d36d7b5f7e7ba9aebfe6d90b30be1a4e72448a2d43d3a4b83d0dcc97c114b610d335782926b25b8af39c5fccf09651b908d064a42ccb9d7feb55ad6a55bfdc68dc70cf7789f4afbe97d03135d3ec345df0d44caf3ab42c90e31c441f10e544e3eb04c1537d53aeac441a392a2ce66c8732be5a1c0c3c3232378740b6af61cf90fc35485964183d3b989ad99b562e6cfeca0fa57172d6f906d31f72ef4fe3dccb00849432b0a2c16fbd9c1b99e0db2e0e9eb827b276f03d02ccdece913cb1148b0eee74a294b900741cb8911a238c095d8693c319430729286b4ae2f3bf1cdeb8c8bdf5c9206107ddcb3c2001b03f713ee23ee11e46dc8b36b1856184204964940a48da0467de0453d2d311ca01ec2bcf1ad1bae5900d18b79035f186d4480581900f3a69cedd93409b5ee4e8ca480deb4c7855aaa84c67980b08bf66c96f27811c85cde06189301db748e47e02075858802eabe46f8af10e3c749ed463a96a6065f8d806e01a25a8c9aa356ccfc767036155bfb1c4525672c67a6b6ae4e6138ef1e90de311a3960dbcae5b366516a4866701fdec2f1659dab7e5cb1b5ab9c2c39ad1c23b6f1229c35600c7303b01a5f3dc418a1f195d44dc0e336e198d62cb308bcda50c495e1b828a78abdf83ce88352b5384603ba6e2df66bdf199c9be6b9cc087be14fcd8b70cb81630fd10d307370342b8f619284cb60cb8fdb898227fc6b02e906ecbc09757efadd50a0d1ed17641c27dcf4e289efbe2ddcfb0f1395f68d56b3f18940aad59928255f9154d297d4e9c58741a0de06e9df01897276c11fca2e8abf977c0134f5e04253dc613f9a9b182d850a9aaad36139ec5ae56648e15bbb35d21facd9cea369d8cc9cd8b2f76ce255d4d62c3c5f78df15d83086a0dce96d64b5a1f7c65ab85b965726c46ee3b6b8be2c86e6c2bdcaa4281d0add4a8e6910b59285ac67b262d89bb0c62efae35ada987a104e4180407c0f37bcecc6965c8408464dc99399b89a706ce1d083f3ac75583f1e4d98313481dd2578e58570dc3ac69b13b8fc6052d79f082f12abda8716d1a942cbab552f5f62c19944b1ba57b3081f013ba1da4830e89e5eb71da5e3780babfbbe5179541e87703d9f9ad199a063b2c917798f02424feb55e9152f1fa2bd144cb7ad594e27cc9f6257cc730a07ae9c7804af04e11b4e73481ae617855fceef33c47b53bc63834ae5c3c5f59a210210163ff4f12c108534f6f6e05717adcb786313797e4e6b79be408ff28c6dd9a2bf9cea26132999247447af5cbfb8c798e3767974e709c7bf2418b92b341dd50980faa4c739cf6f89e994bba45f9dfac396600d39ed199f7611f8f59330a525ae393c376fb52824db11011c0523c9ed1e1aec1fc6a20d3c7eb3becb0e4d07240c9d3c84a2d0c14e0a4c24eefbd7ff6ee9e2b5611e2d8bc77cc2d7afd4a933f7a98e4a65ddd81227f747bf913827bb4410db64f97051d32598f969f6565d84654455091a07d24748f4d27934691973e256613296a851357040c52847fa9c3846d3112700b7494358fccfbb757cb20b00f9d21e7192a64535978f3b7906eb88ad261aa61e693532c9c5efb426c3e3380998753c0471601b4ef1b7dff20ceb19cd8143d949ad36a0b0bfb7da2ec066190069995d596385d16e1747005ee1e39be8d61743686c8da3f3a4e0c92cbc40d917327806379f7ebe408b140498ed0f5d6d5decbdb16778d9271106b3ef40de4a7c16879f4f08b218102127f74e8b3e11e65011f2b81e443ed3a79856846641e0030eb5fe44540022a290202a73f432ea95fbed17eb5086fbf976e771d0ec969ff918039e9182668da0415294d7d1e1723f3a311a1222417f48702b9569a2d775101c3d8a7c2b65225b3f6ec9bf11b54d48dab9e04252636dd1addc584247a235b1685ec50083a82c99a697aaeb3898633d3c70e225946dca65155a62ff19fe816ffd3415df9f296fb0c6cd187ecb2ee9170797835551d304ae95617ef456fff5aff54443762974fa40902c0f759be1c8ca343e3cf05ab2e1023483f9afc162577e1b661e0ef92cbc7f873ae387dec1651b082f4dff3d9c16a68f212551fde9b32b82ac9090cd37af15813284eb0551de913a320bfbab92d519d8bcac88d38354eac8f9a0d1f14480113aead8e231b56aa7206e1188099f3b8ccf92fb6aa6d55ad2dd8dece8cf28316ccd78a103ab643d6986ce86438fcbd79c1831814a3067cda4149a677143292c1eb664fe953328f4d2f6a75a4c877bbc5b208383f8b47b4799849a8ad019300ad3d5e6dc35e8a15369fa3c6b7f05c18abb4ec16219baf191c0732238a65d72c2eece8de203d5c480f2cc3072ee3b04fce04d33869618366b8043431c70ec2d8951f96f5cd0f3467bd04d630d6e19fcf4e125f65e302add4d5e25a0d2c6cfce104124921ba08c299e9abb6b5d98f00cde04246d629461d4178661c9fc62d3367415ba7a77e3dbc7c83acb10fc56214a8f27f4d822262c2f3714c93cdb3aea4247f73b2cea746601594f9ff21b69e6ccc0b5ba696c2c086f49b50c64425565dd4696fdd3d71e8de2ef8ea66e15b6d7900456ebac906f07813b741de873c3696ebf51c7fbf7880d3b039cf9bcc22f7c6c94af71e668bda636c041c497a9e71a8cc76854939f00a80d448c82c3d429e89c0d4a5e240d0f36202f6debc8ffb7a92326a6892f20112b7328448e8b5eb279b260476257d702b0d5ab1a7415dea2a1e000a432a2466db9d40a07475176bd25f087510529650416f250eda8d8c0de35e51fc487ab9abf35169ad2240e8dc21139bf17fdb5f88d2b28774760f0041b3e6a2607e492778eb1a96c195c5527eed557a787545164c13e4524958c22da27ff3bc62dbae1fa33fdc2c7e51968a441582ceeeb895b051df03da6b8daf322b0c3cb0799556b8164405cc04eb615dfed2ec452da99b26b9ad8dbc9504aca0ad5b834a2fb7f9a828824c53794ec2eea6afe0816c80c2f1d26e66801d473d568d5b333ed731c00cb3f5159b3a7f1bcf6c246c3c91b1b4f354765fe82851a226b68930196155d8ff1d60c71a2fb08d9c7c3d5a3dac6be63459d743b2471b5c3fc4d4762e678614d241ba95bece12a599eeca3fa3e69277ca8a8018d74f00d3a7734d0a559e6e07a1a705add129af825c02b61e20a047e5001c50c7686fb3a5b4439005a8dbd8611dc99035362aab297904dd6ab1c9a55790684e7deec6f9669ec59e784463b7945f5a5d9c171826a3470e282612dfbc083f1e07fbac920ed7f320a249b0d1b60a9312a79e0bbdc9e13f26a2d1b628164fe1af321426d6d1149d31c493029df8a68232c589aaadf84ac0ca07686c15f3007670ce135c6b4f6b0672d84029f073026260f3a28b953d7196a46735ce2adb1f58834f9492eee5080d8abaf4ac68eb917e65f71df26f25dbc644fb180f0614c3e67e99c7c1aff7fdcb06b03bcde7ecf277572beba459c6f6bb60607d14139405cb3d9416ea4811149d5c383087be42cebf717c585c6df0d74f6b643a16b6215cfd166fd53b33c1c91497eff3bb7d463e429bf024e0b3cefa810cfcfd8fe67609b77998697463a77e9ae29f524e21d033bc84f96463e2675d9c70cd5af52c6987fbfa16720574c80c96639192800360fff4dcff0a680d447cc349a58e78c5809d0e1bdecaeb322349ee71a7f07c4fc622c4b910934da51e6c56175a1a2ee7680b8c26e2cdd458bf4b8e7dafb0d480a89612ea8676d514c476e820c196f03062b18021bd760f8c5e5b2666d1925a9a2031df872e5a229d44022092ff96ed2fa9824162c1c677224a0c6f1604d3e7a26a62e620af8e424bc603c990739d39ab9c22d65d4d755cb4920acda30b662205fdeab138ec639d471bab7e8203a329ac0bc375ffdc3c3fe791d6dca21d4806d38e646a0c101b82e1bd078038cf65b321dc4af1ef5ac85c0a57f17cfe6cb808c87abcafc5cf699af48a41fc64d8780f7a2e74b44e860d2f743d39d8c8b56ea68e337085b66754e869ea1e85849e27990931c3f68fefa84e45960d00d41b06da260cd87d65421776064b621087e6d98fe7b534210c7027be93490408fa0b51b88c8e0326bdedf2672bc0de17e795f4ebbe00db444b7291a35ade60df48f8be8354eecc88b4088885538a8bfd29cde0f81eaf624591dc4627cdfd04a27acc08c30954d76ed1a9504e35d9d5e0da19d9a5e187369a93f60f564f3233dfde4b49e3d6ab148e485f05f0d885f6fe2b6376f9d67ce449d271f896018621788968ee47e4e274264c52b79b444fd9600170b8acaf9d87ffec521ecd446a30baad7e8f758f279608ed8bc39242b9db34f6ffc19498703445a6cbc28bc1f3c9ee66248a6d7e6a93ba6baaf9a79a4e1cc3cb14909656d6f22dbf307c87e3777a2088f4693b59c3fb399aa33cca8ecc6046432dc958022625680cabb678a93c8e221872104ec32de279f30ad637bff7bab23134329c1dbe41d84d3053ea2723ff00c9c04a7584528e25e13a0f20faa48581fbb11f4e8081acce0b8b20c51f9049793766b55f8637ae5b53da3b784158f9958d72da76f92d1f6ac788a7a0430d98849f4c3572f8c210c2183420c796880f4adae42b9412a989ebfb901b181a66eb7033ee16f063b7a2060e1dc64446a80c2c22405626ce8510f663052fef7e44f5d46d4f3378bd2d2860e71073344ef8354e8fcd2fb561222f58b41f977f2bcbd4bfaacf66bcc6b274284fc51f55ab0d80f0c565d347cc0cac9cb331be740cc3041bd87f2b06d1857a61479e6e6151ef1ed4c1330c0e6bf185b528c03c82142ed3a3d6edd5a2660a17b66beae81f3df5914602b32e24f8a1a67b6c41430da36b27d0123c63b140f0ffc9e72d1f3d4ec0d796d74f7f8a3627176ae95f42d45f06636fb8f45d3508eeccb987a53d31bf201d331d7249ac25913290790a32947de6638f9cf01758bf71905cf0eec524fd17aabc2107de89efd46319a6bbee2a2e331964ef82fc967009b8d3e280db38c71797d8b03f7cf95fe4b970ca6ef8443a3f16be22e337a28bc483710f6825dcabfb29893e55c791d55c02d8f2ad10e40b185ff3f6d845fce751f93b0d9976eb174be3f24b8b1a6c9f83fda4f20f03056ff8fac7b4562fbe41e26e0eeccf980dcf8f1a0cdd3d25f78141d7e49e00141068fc63d142708a9b1736556621c6c0ec2693f367f4dbbcecd8deeffc398c202fcffc30ac75f6b327fc2dceb8683fef33482eadc77304adc8749159b7491e1e119e6dfe7ce9353f41a90d3327f92780d81cfa833c666187d07a28d3cdeabe6882e969af7a622cc809101ebb5a69b0c9e454a888678a572972050e41373d3e5f2cc8411d681ea5f18edc332d052fda78bf32158ac66df8e9127b954fd9e06e01a5ac0ee8d1eb3d28e1add3fd35ee9af3742e00f70de2f1dabbfbb02aaffb3f8f31583ad496130a36ae9aff37c5b08274844b61ef534729fde860d45ce2c9cd859f24a3d2b1b69f91f43b8fdbbf25e6aa4b49fb81f8a6635109b692486ff9b9f3ae20a37e2f054151a0c6b24b2d1f1c6312a617fe8cc0ebb089bdcc61f6aa2e3c5bf8ec2eb1ef47135abf75620b4a30c77b4310ed959e6a26294cd6eecaac4fec10920dff8f5ac755e2f2d4a78120f63b5ba919e01e2a5c3e9c89f903f90bb0ea60eaf3b789471454e82988871bffb227ff309d515c4f680fbffafdf61b38ec6cf2e9cb11d114e06edc33af03eef5221bd9f7317f47fe44db378720487aebd04632dfee0cecb5d406bb9e371444a59f39e077cf52b68d07fa2d61a25e0f9f1f3b25e3d659d317fdc85e2817a9713fc806d98cd17321b41765c8773c7f7df74d913349fc0493de8bd5f96b020477f6cff60144c56632ec92b6f560dfac2c818dde4056730deb906be530f7659824ee04fe21c74408ce6518f5a4ddc2dfddedcd48d6190f88f6ef91ffd373956c94d32720be0b0cccbb239c7ce2cd0a325e5c019c47a392be3b40a6eca482c1087641ecbe25c99da1a38c90cc06a5277bed9812d1f6203c6220b02c656f3b8f93c2edb3b1374b433ca5ee487054b5cfc7c56e912d7df504a8a810f2d3fb1f573ca523c7cd22520c6bebfb48addaf2ce762f90796a8f8fd19a52baebee12c95af201f567e62f3e72dd53c9afbdfa0db3fef247a41128d57920404ec95e30bf2f86ec7ac7aa208a7e6f30eedb9f77b8c1517333cef0d2a1efe933b867fe7fd9156d5f82580817becb9cb953f1b286849de139f52a94e9e532c1690a8f285f6b6bd6cf5daeee005a90e973f149d6c64666d95089dce6e60f64a71ab091f09255d3de31b5f7d4de0c3d824079002ad8f71ff73c43b147abe90e61de33b4a1f39351f95f3590146f41c784f58af229e75edd7740325c15c0326bf72df10e34552d7e81fba2b9f8bafda56327877f31c0e9b1129a85aec6e8c1b81c8ceff275557c762eaff5fe2533e77143f202a06ce755b8de3bb854e69d9aa9a7655a9fbb186540bfcff635ddb94dbcf035dfbe3dfb6328509eb810757d77d7f68ff9ee389dbee7d166666f447e3d9b4791e4b9a233e7af586607ef0fd7e9d3af3965cbbff426047f201e1fbf7511de162ce2060fe97430b7d8150512da9874be654e01f1d123d5d3fd43f9946e8ec14bcc2410353c305082bf6c1bbd02e425b1482ab7340f4ff01e1c01a5e7724003b1378695c73def73684c1c3fa8f01822286ad1026911f94578048c3d8e1c0e79e6d4365a6c814af243a08393909cf3e1f88ebba3e640db08fdc382d06398cbf20f06131293808f51d2a3eb893623802110a7f7cf60a7cc751ad08a9c1d1ca73a66700073ce4548b1c84861e70101cbc1145797cd01f4887e1400250561c86930d3f39ba197668b10b1e54f5eb8d05cb1b4bbc4cdb9b2a96720a069ee6a3cc752e90f9f4f86c872d696901fcec783f7e5df38558f0871b22a8adefbfb2f6857e09b90e7e9504dff94a825a5610ee5c4a78f0a067844e901716f8b90b18a54b610e4c483c3da6f6c1eae68968940386a448bb128d00d37671b958afc53f5df4b2ed5de154d73f14e8bf02edb44a66388d440ae1dacb50571b63913f4420de17ddea9d0c91994eb7478b43a8b1143eaeee80f15781b94ccfd868f31b1773ea6a22d6cad2622ec370b91c08b27772191f77df01c5bfd954dfbc0c1196efd023da4652c061b17b04d9a1a276aa87403e125997db41715797414ca16b504faf2a21b718b08fa15aa63d7ca13b19ee6ffdafcecb20244d5221e54991ba6832d900f7b1107030ff0fc04b6cd1935604413e10f73d2aa20cd507a9c4ce0e7bcaef7f3f7a8f6b291dd77493de3d17b0f8fcf1567b443d1f470e280162c99fe346b7b717bbea02c5e85839c0f8e1ac1d727b69f8628cbcb3bf1874f9008db553acc415c3d318f3c89e981118fef8bb47913b3aafdf03d613df4dd2e9e8640c22556f5055357dccbb5e69619313267f75493ec5e125c0a00a137d746f39695840df850f92682ff2d7bc16f49f4ecce747425108fc5b9466826b8897869b19c6d2516f8bff8e97dfb893843de3e1277ecbaa7892458e5eb4e803ae620643134a1048af246d0c62243000304b140f51ed01606925a051803a2fa80bf4800c1d6ddba86dcd356d47c9954bc7b22391ffed9d80c8f4cf07504aaeecdefbb8beeb5406c6863d7559fef2d724011aea10b410d3c5f558cbc6cc7cdd0845cd299edb74c264d139e343ff0a0894e2b920d09f4c15e53a8bf9e884d1c1f24547270c179d10733b6542595408a17c4ea1500224b0c6791810010e9948674ca4132090a3e38007d80fc1c726e080165a0d90408e0a0c50210516162001054c00859580154e60071760c21a939a540a25c0c77210c038c41e8d9368e84c692442b928e7008c040e9d206000c663aaa1d3a731d53f85428c063ed6531fd538a1d0034690400150271080994ca7040c004d754a2299262000eb7f4a80082984c0a13f7342001805001540600e9082c28d02a2f0a400854fa6942bc0c6014f38146fca67014e38f4a250a8a2463ca1d49a5740f500431ac0c61e4e9ec74b501ad0c6102154782cd108151b4b5ec078b9006a81050aa82081084040e7010e68400e03525880025038c18412107000120c80334201083000018810420000b0c9089a10518da0c9152bb04123840a0f24d0a052dd3032041d4e2c14273a70920319bce1c40d1c2108b9210032b8918010508238042788434e5f4f54aaaaba6244960d543f1c1184020a00c2151c20aa9052021c201410c20f707e20658145555d618496aa082da9d3a84b95aaf2d228a7022c070239a527959e545515ab2a5185e30320692891648c8a0caeaa0f3ce1c4182f17c0e9e18b4d6c34c9a4aa6c3001c14692aab2b9a492aab2f14165473692545e1e8b95aab2a1c46612495227530f7850997848383ca880f343a6b289640755553db1e60b48caa82a1b1d54674a2947917c508fc6532192a7d1234d19386972704e5f68e1c40e7aec604265250738e0a3aa6c1e71c4860d925313294b55d9dc00914610f194dbaab2b1410d10418491441641c40689aa3a3d9a51c9d425352a89297da90c4f5d937a12c9a7ca9f4c564a23534d8dd2a8787c26992fb6f429d493a92a1b4512a92a342713969f2a5565b304223f55c084a92a9b380ea92a1b43aaaab2a1811d8508a9acf0d41aaaca4605363390811732a82a9b180c22081e54e0e11348555550f0305d18005255367f3c8f97d05841135d903a794f55d9f8d147559d7c485e2c6924b26273829f9fe188aab2e1638f9f47b3d8cfcf9086836bde9696a8eca8aa6cf4b0c9a3aaf0a85a20c71d76e08c443675d8b0808eaab299438eaab289c3068e7f2c541a9aea29302a781275d38efd92182fa61d92ff15d38e25f998c83c8f4f12c9074b0f95946987a59932e5cc992f3c758d69474c8ff7ac99265318d43f9633a5d1cef38c9ce4633a95c4a44ea61e1e4660509664d2f224eaa69daab2798344f2c1724a95c4d82f89a92a9b25dc68a20d36d650438a14d8a461df7e498c0d1a2320917c764adec54b0ae5cf2545a5aa6ccee031954e5850a7a73cdfe5ebc834e6e7e483f23f85299946a3d288c75279928feac8fac840428dce3c1792d75149543fcca719a57cea4b911a95c4388fa37c66b03981195555fd94a8b09a43fdc253d7e0781538ae810aa5c394292dcd94296ec5636543465599600c11cc78a06263ccd460a64bc553d7b431a85843d7006b7835b81a5a0dac86aab211836bf8d36b61a14742878401c6c934b229c11724b0f1a28b1188800b2bb6b0428baaaa40b0c9a2b2232c42c03270810cb82ba88954c50a2bacb0c28aaa0a4b1a91a92a1b2afe4a55d978600afb34904852d82791a2b83490485448242a556503854d074a3eb7aa6c9ea8aa0a041b272a3bdad24455d930f124d3ce935a1a1c1c08808303149b2558530272121189cafdf8e90823aa9218146b69a64c61696030ddd21694e996b658a0f45c4ea6d2880786d273791edf59e35d9ec74b9e720b831d854e4fed9629534edfe383e23195def2984a27d3e934512894258dc8c06081fa6346674a3e584a3ed76ea925c8d1a54eeee5c79aaaa44ede63b7fc5853151860602d32f6893e3f8fc6411f3b62cda786296cde7e6a745263fd4f5565536955d9b88d19077d589a109947c81442468f1572a65cd3fd930a64ca54a81e54f449d51f831af550418d7afc4c89d4c333428d71218c1a939510ca090d70c49400474c081640bd359570c21c91a552a19a154e9806802101104168a0872f8b5455e52914992faef43d349876703000080e06c2441f2f5a7068e06233a6089c1d3d426b707680a84c55556da0c2e9b283aa429d5c7001a74b195595531aad2939972b3344318397aab21153553661aaca06cc97aab2c1808d97aab2a1a1aa6c76ba5455c580aaa252e16ca94085a3e5920a47cb1a58185045a02a41e268e1a2c2d1824385a3a501158e9627154e96412a9c2c27a870b24451e164e9a1c2c922a6c2c9a2800a074b26154e0b6254382d8ca0c269c10a10b680993fa99118d3cec97a952ed6aba050a751952a4ff214983ff9a47cb0a446259f9c92fb99110a5581112807e54f9d5445a9d388e6784e89e6780eca452b80ce805e851cd4fffd296df1d3fb69bef7de7bcf39e79c73ceb9d65a6badb5d618638c31c61863666666665e6cb1c5165b6cb1c5165b6c3129a594524a29638c31c61863eceeeeeeee861042082184f0bdf7de7bef3de79c73ce39e75a6badb5d65a638c31c618638c99999979adb5d65a6b2d96524a29a59432c618638c31c6eeeeeeee6e082184104208df7befbdf7de73ce39e79c73aeb5d65a6bad35c618638c31c69899999979b18c0d9f6b8cbbf06cb101e7853baaaa3abd0b75747ae156382fd850e1bcb0a5c27901860ae78558e1bcc04285f3424e85f3020915ce0b20543816a8a4aa2a2905b0ff24d38e2599b4a41e85a2a2c644fa2a3fa4120d624aa6d1ac2a1b19aaca668b8d960a65dac962bd8a4d0cd58f7331edfc97d48867c4a5aa6cb0cccae64a55554e6caad858b1a9525595a36c9d6066ca64fa02c6456bfebfa04ea61d2b5ea5c7078d09551a394fe964aa3ed407f54fd5dc91c562bfa4c67b50a7ff71520ec9ad18923f983f79e1f9d4169b182e7085062b3b33f485862f5f666872117385862ea11aa8749932e5537ec67b4ca71e2e696038e361bc847934a630241f6bea1261e0f9d4963326d29746674a1366b1820a4eaaaa7283f4c5f383865028efb199c00a9597463ca353458d2c0fcad59c5e0f5565038294aa1a33032a84c684820186e8e329b7a81e94fdaaaadcb089525557aaca068a8d4d55a1dc0458b327931a14cabd8852ee65cde83e8f0fea892699545595729e93090b95f85060ac5789ef515ec57a95f9a5aa2a3b328d799f1dbc8786ea4f5555e5e0a0cd6196d90226746596d9c285ca962f134c687e111386ca4e962d601c65eb57393886a5aaaea44c24300efa54d5134a2649994860f889504f8f4f9922f93d76424d99424764ecb3a6d080e34218a9118d8c4328292f5455354385e3425555382ecc0001d409840a9dbe0704c281182d406002a78541aa6aca14500f150ea8860a07a45355219e510eea040ad95f33ba1eea41d551ea7944a9d1894a55994e5fe6a78a98d1951295934fad26354f7ac083aa229d4eb39a48242a34fc0000365e54303c89801318544efac8c3491d4eb698400eea04b2fea71f10d1a307453c7287163f88a1010658408c54fb21cd1d8ce0f1802756f8a1ca140c948488011b3f086002282c02c58927362048184514e0c749061be0020c1a6488e1864436c053c80c605451d86ca017b1c01195cf151bb0518026f2033e28c3073ea4b803871596e0e243156a6eca68c00b01f0a14281068e045051011f5cf022c67d22924a7ab0440063d4404962470f7404208c3ae69840ece109109040d347984b7ab0810455b0986004253de814c107163cdc8434b0033421988149e4a20136ae1832040598c0a281226c6af00489175ea0012f7d3cf2a65031030d24a006b3013eb021033c305204141458c8f2030f6634ece101a4e741034a2461268e61e2010b002a00440b6496e061042d32b94988d1f34388b323006a78e8f2e3c5135954008a3b5ef83909808b0b5f68c13f1648c4003fa49a103f5250a8a2070e943859f3821e5e8f0c1938b32604511c710618ab8b35674a306585303d50630dcbc11d0d28801c96a8d9644b0cce0658e0a0e68e291f9803071b28434d1494842bde60115013b209d3022643f4a851a18f344ab8704599343d880105b858c28703a459411c86e8c083b7910689338ce8e08e1d17d2840193420d5ac0b0449a9c2a1b1883cb2681f83862a3492224e011890f1a7d6cb2a30604387c80f0e2438082e78c8f0c2200c4102f6484e073001a0af901b5020ba41011c10c26c70f6a2a8c2c4138e2822ab048f9bc800f31291e4c4851f1d9a1104aee20211502240af000192f38800aa40645c2f8f90205b5c5212870801d293f502410bc01e5142608144c64f971439a3d504f769840f5100ae1b4870c1a78e4120ee34445a905484821091227ba02295b58a10490530b9b9cacc82212303bf86013391070021384ec30077334e0411045766802056c68d2011bd0ec5003142e301512c6ecf0802f20104755c8083d90a04c5638296ed0b3c60a1138c01c6084e92122e7912b571c61a5c74b02c4f8e183eca3e78439dc00e20e1c7e302d728507f6e041801ea6327c983be8e0bc60e2818290c6cd105298260a532680e68a34a6022422850edcf08185d20ca034c0874bdc90521757d8800a4c82f8a1b4830c5f0cc9e4cc074a15002385d6c2c989d24d03cc9023093944e071011a4dbc0af0d1c373451804b401852235e0b92508a30555687884873560cc142b500f2069f2e4014f54a24693ec703788a10e4bea2041f1041b208876b88094031a39ccd1840842902430c70660082f3c343d789247064c76026854a0030d34f9e1060c344788f981199a36d080515385242a8c59681800813838e0c50eb8e8d0c8234e62114c0440873320712df40840d0e1074672b4d87083d2610b1127fcc1031a2774206164822a778c116174481825445a5c096404468e2102d8634a24a3940d4c1044105a6614831a878821c60e19a300c4718687316430430483334560f2060cca88b660001674932b5a44681e8fd6648a0044ef111058c0430a41444e8ef8830c086a0c70468f01a4100909741e39e3811d4e5006070b40e28c4bd204190f900190332cb0e1032351ae70e0528209158ee8108278e5a086e8a08d322cb94ce8f0834542413871c91ca1070a4ec08708d7017f0452891321bcf03a60800aca6c608dbf1a27fc01d892467e882c441440123b70f03b83fcb1130645c29b3086cf20569cb0621579c307874011a025c303365c370e19637f26095fd6196bec95b692ea811c75d802b0f07cb082288b1a8343c6d45025864dea08ce5883c58a33acf63811051a328a88a34ef9e1cb64848b3e6a14f6453bd24817d48f49c08420c921825ae180147ec04805087d3418f90011550928e740040f7e5446382677fce183339c085ec7142b2c38814320de813bde1b17f0618de3804202940726927804e4000402a64ce630c3839786081ba87862e60d079099a24224213063c48e182023246086992f5d8c51228b2f929859000cc8cc3c529083900d0a31040d00429184ce28811148a69c2181d00696108109393c720a69e911e26711376e0819808907f2e20132326088163108b001991f3250821b30d6ac81a347065251c4b460660681642006285774baf410460600a005cf4d228e9303204090896924c027072dae182de460023a72d0e18543b44cb1084f0e6e932e3734f04226872a8f9e17cc80fac1010f4312e008ce971b7098e2072d6d5099410f3838d3620a3eb62c1c2840847380225f74b981121b1c1a4141ce0b37c471850c60e8a80115372cd18295352a1080256e1893c41552b8500111dce000107cb05243264294c1810c68941b5ac0a24c0a0e8042f7a0b3813242a8d162c95a002bd34509124c72c21d349429810b1cd041821617b0219146de188171821e3698808a0b0448e3c41836ac7110e8a29253243658c1c191475a50210e1b0680c9086bb45079350c72870cead0c24da00611fc9148046285366a30ad3192a4aac2821aa437e089eaa1a9014a1734502d0748c8f801c30a93c861c612643ea00901200941238790b178a02250420e8020b3b698440d03e8dc31975ce1860d6a4c79610c0b645859ea906293311d00210259bca000798cb9218617f4186207670c04f4d8e48537c800c444d29387a580012e20a60d1b84f1c79a14f010c3812f901062c6042ec46060063d40c019581a11a380285248428d1d50616a408809254b24c121cc09b68c010129e6f0218c0f554cc085115980c26411299186e4028130386c0020073b7a52005308253e7490332a18125492432363e0400318540b25e4808c0f228081c1044b9880073e0618109410c10a54d8d1f9f2071737f8350659c0972cd01000002bd860c79791034e3c6c98407e712834a1025024767ca91010421e35c441c2400bdc30248c381e0f1890828a13c6b809dc8101336b1019610e313c1898c00d4e5010458c06bc4c7200105402e50af502070d0e10050c010a5e92c804120cec0093901731544060061270e1072f0d982948a488157c808647b858811f30f8c10d34a000053d4830c2071040431064048283019030020d5cb82861802d6ba441030244c0850f2052746027110504e18342ac9060478c4b464b4c2145d8510362239042ec7861a78a127784b1481676470062a4b04ca9228b2e82bcb1820beee0e1842e5c68d2c600242659ba94281d8a3851c9a94ba4c1e60617b064922e36a79111d73bc0a58f05f874d173050a5caa5824030be8411907e0526fb0c82272f4e00a171708d9a204450d9e192cf1a14b2421304366063a2c0c6516e96280199ef0c2897879a104339439e1063a9c90099519741031e208125ac0870c3b4083442aa404830c6cb0918584297c583214e12f4853882670c880011406c0820d5810c88042063600021e539cd8c20823acc51677a0b698418404ce40430b1cb6f470d3050ec2b43ab6604193821d58c1018b2d234052870d7df8c0450b212708841213eef8d1e2c5246be838440bd772cae1924ec00936d0f2425d21923970e8428b14218c90e40a191690e5057cfc3101046c6146162c4a50c30a99110a59443896f4f193a3489656c54f76741102cb267e58e1c8203dd860b9c30b451c80a505576089428d34d288522a014b8809488e00a10c1d2c2bc06083d4620947a6247cc8253510a401130e09d8c00506e004319188c10e8b6c42c49319660c4952c06cdecc5162022ca8f0a403571c5963e527062140e34a1a2d3cb103111928b902c40efab8e9020328aecc908045024904122f570e40c602d6a4b1440c5610a96111331d78a1b212461c3c6776eea8aca471648a18448bca0a95397efa10202a2b22a0992402aeaaaa04e2061d293c515555b858573012435555e14980145cb496348d545555e58916a19c3df6782a0421a2e2f8410544041095e830c7114ec4604315834e75811de0e0e440a950279069f42492aa7ab283aa7a02890e9c50c2831b24e28406380c28408593020f52586401154e0a6e54554522f99472a67ccf4f2a852a5268820329005155d50a395358604185147248214c5555554e89c78764aa39395f1af9d43fa19c87c7b4e64539a533a6d3c907f5251369855b1ab5e00205724ea69c1ddfc94999cea890f3a951697473509f323d7d343f8f66052cbc960f0e2dacb5960f0e2ae4904c674a7e32e3001f7260c1cc143339674af575a0a69c520a3522e5f8c97bb0d03095a0c10a25157e4e7e725125fd2637014021070057e0246004a8132807d50fe9c7493da8d1151c54d59347aaea892337a870f0395d9f528afadcb7953a6b3e27e854550e0b279050e1982089990a0445704c4813df042f55856342ac2a1c13722a1344c031a1e2f1293d4e0992543dce8353c2223825e851556abe8453821a23a82a9c120a8053c212550e4e093da9ea2717559c12bc9871a73ff3a9d208a78407e09490003366724afe249389fef8e9e4674a55f5c49042aa2e4f20e08903dc519dcae0204004ef71e12745832785a0bee48852de93432251993205e70007a8aa079c4888509a040d8872068e018ed0020747013823a061ad002c808f69c129c012380558a2aa2ad49aa5030ca038c27680c70d14b03d4fb0b4a1011efc718017cce41148213c58f1820d2689010e29b060c4024d88710a40232b08010bc000810e487ee822820c2051410f60f8827a8290174e552ce1839a03070b3d2d18623e2b8db03e78521488448b0d57a4b0000547941d5830a38c494ee805bce0840f10b0c08b2bc85ca09484103fe06206053d501e3c11a57e11dd30c007bac0410b01e83005060684403004b4240e401820882f42c605974401011606d043420116071821a40f9b1f72269923cd151a30f2020d3f68629015f27001021640418a2d78a012013d0af9621306e88d15c20e7b4c998233802b700620028e0022499d4c948a179866ca94aa824184722f556ae9074505c573a634aaa51f94ff38e96d498dc9924ca61ae2f1298542d6547a52e8e7940ab1e6234af5bc0d7d6a8df384806a3e76cb94290198c2ca183800c8c4420007004fe00080071c00d85055392f582027678a1933392994f7e43c81c11340fe00618d2a890a07041070a4c0c1536bb0796223e589949b1b9c1b35a8e0dc88706e4cc8712a38510ea9aa0a84f2a13851b8b0acf9e044b9555589a970a28c0022915e0dca7358d89293837a1415fb8f12a546644435e555a038112645f241f1808d49957e544b5478cc7fb1a902a6aa2a0954384f0ec1790245054aa1fecc191f140b2817ede03c095569a61317546850cef3831fa04179aaf422356850cef32753cd61314e05e2f9d4962d2493c90bca678b179e4f6d2179c93442fda4d0a03c0cc964f2f224bbe5ab5f9eaa8a8c7d50679c84c52b6ad4e51f0bea7b7e52a84d7caa6a8448463d1e6220075a2d9c3191cc947c4269d098b1a4d1a9859f4753554ff67892071e653429801a3f315063d06aa1a4c674a222948b32c1418e08e55a323904cc9429acf9c0103a7d0f9a6a22913c65b200191858f3b1c01ae7c14403289493d0b8a4a7aa461530133a9942664e20546ac4d38227775495e996b6542124c0921a2a30422577fce953a3108fe9745da89fa69231aaeaff84a5f4684ca90a750291dc0743542010a88e4a4f4af99fcea0489f660ae9d33c2934a5aaaa1128890125825032475599c999221a819220aad4c92442e5984ca9298f3a9d4022948be078a28237aaca927e64835741996e694b979d1ff75255a61dd4db904344fdb897ff1b42fd788f2463940294b98b0c03991656f82ec3fbefbc3184c1a4ce3bb2a58feba30fc86c30c6672b5dd54a48ef1ff365abefcacabc36c3fb82c9edfe3a7dd632edfa2e987c99c678d743968bad1f53fa3bc8efb8368e96f63197e5becea5b58b29743ee6b34c5607a3749612ca3de69390edba7d976c71493da67d86904167ed1bb5318f4919767bfef65a87178f2923bc6c19575e1d63b7602ed9eebebb46b7fcf51dd331e79cddabeed1d6a01d53c2f55c73955f7c095f1d935fed77515e0ba16b8f05f3b947ef7b77dbb2ee8e8e79977c6ec1f66e731fdb1cb3d2d5da2ff7b0d9357b724c5ba13f76ab17840bf28b6332637551d9ec9f63bc704c76575cb265f3556b5dc1b4b6af7494ed6d0e4615cc6ef7ecb57defa874ffc6b4bc1e8dbd58f3c6fc6e4cebb045daf8b2789fda36a6bbcfac6b4dfe92145a36a68416527b57f5065bf535e63387ccdc2cbd34be1af3be05635b8c6dfdaf2998f61fbbee1442c9f845144c766b6cba1aacd01b8b694ccaaf3e946efd7b7a1b8df995996cfc5e5cef689f319b33f96e2baccd3e0b4f30db6ab5516bfd45fa183463b2babe5eaeeefbf22d63b28d9535f6a0b4ab3a19b3a1b3ef99d96e6bb909e673bcae5a6f0cf68d6f8c593d36575b6cd145c74f8c593b56d6287d2f63ad2e8c2925376d909b5973d7c098eeeaed1b2fab4dfa6a2598ed3278ebd3ead66acfbe98f0a98cccaa5cebb66512cc7edb8cd95d12da87ecc5b44cb6c81a93cf167b179352791fb247a15d2f2e3782c9cc0d3d562b61e5b69c08265d0b5a192d74363e7ab998b7c55eed1abcd6b2df622ee7dedd77e9fb1ae36b31dd37c9ceeddf7bc1cf62c258e1bba69039d9d062315f85f241f7bf98cb664330dfad27d99215727fad57ccb51e6d2c36e9a07d4fad98b6417abf6137b375140413b66be15dd72e086ffdc0f4afd1b98eb6b5fe58c57ccbdd257fb1768d36a7623ec7ac5cf4b54a5d6bee81b9646bef1b7d91f57f9c627675f7fef2f8afb96d29e6dff6fc5bd3774f5f47317bd115e1b59056b60fc56c4a6b8cd6b2e516841d98ce645d0e3677cfd2c127a6b3f69b5db6ad7d76d08919f9b5cad42d63fdff9a988df9417ef6b670b1c7c48cecd6db564266d69e5b62d265ed5af1c15f7631a7c47ca6b43246a3b3cd4696c494b542e78c9b35242695cc2d8417dae6cc717b72c4e4afbf6863f2b943e6f6c488c9a47b7c97ae6776b9d59e70605e77b91f7c8f3dc9ec7d52c4741eb9426b217411cafb8488799b5d0d99ad4bbbb67f32c45c0fdd955d7f45d99c7d22c46cb53ebd0fbe5ceeac3f0962bebfadafc1bb96fbe54f8098b197f4e5523a3777e50f26abeeff68b596366eb9c95c4fe582344a7aad645193f9ea8235be73685db59dc96c525aeaff2cbb31db98ccea5ac7d860accc2a6d76c96476bfadbb9163fb6a66c98c9172a55f6b3b771db24a265db5f9dad5b2da2621f3c174f88ed95a1b73cbf5314ae685cb7e7bee32191d7b6c9219e36bd065b4965eaf8e49321f8c1d2dc7d58bb537d68359a93387b759e8cb31331e4cd6ce51deda6cbbb0de48262ff620b3cd9ea34bfe0e26fd77ef83af46ef0a1f92699d5bd436f38bcdadea60c2af9575632eda2b2dcdc164675dcb7ecf3d2f4a7130a5b7abd15bede7daf34766bf8deeb2badcf165eec8fc1ba585f455e776186f30613b499975b390dd161b996d2badb7b5d3afb1b50da65c145206a98d2b767d3598dd6eac94fdbb6dcf3d46e662ce2474ca68748ddf2213beb367df36b5ffcf29325764b5fa5acf79ff6d894cea2eae651fd77fb73644267b2e5d7eb3da6cb9c687ccf8ae8c8fddf5da6d6b6cc864ce312ee8ccaca596310da6a38db9d8d8abb7c2665cc8bcd7bd85ac35dbeabbcc844cc72e5783ab356d7ee6194c5665a4ccdf4296c1a4b1dd05b9be15dd65e6184c76bb7b59f962f4ba160f329b3326e563da8bb17a05992f725ded2d3b737e3f90d9e4a25052c797594b1f0653be6c5b992ffbcffd01996b316594216c4bdab67f4c589d57db95b5da3cd217ccf7cdbdf51d5d302b6dabad5bb9ee7a47fd98565af71abdef21cbea7d4c5b19ecefdab0296cc9c77cf4c67fdc98465f778ff9d86bcd57c24afd9b1eb3b26efecf638317b23ca6b7b33b7ff2e9a31cf1988b5da5f7d125dfebb760b27a615cb67163dcf68ec92fad94f07964e7cf8e09a3c7a78f56cb5c755dc77cd8f2ad64b8a263c1b48edeeb18bbe5fc1ad23157652da35bb78fc13bc7acafd2675eeb7deda81cf3df6d943a0bfdcae85e1c933dfaf0c6faec8d0fc77c4fdd9b2de756ca154c191d6b17be66198daf82f99e6d9152f8363eb56fccaf52ca4a97af2723746e4c09a5adf5b2babe366f6dcc7557bbf546615cef6c4cfe6a5783acdeffebd81af35628edd7165b5d8eb21af35e767ca3b7a5d6d114cc05995c0d3617a393bd96a8198b4e182e3aa831cd2fb41805935679a17ddd7ec927ef0bc934266c6fdd527fbc648551f229856a68ccfaf4d9fb3f764b8f2e6ae6319dae44a5e229853a83414e4ec08b654bd96cc125e15b0f3e595d6416255bd1197ba2e6af25538844f22abf966828618111d5636a609c9831adadd25b9392ca7f8c3d547a52f34f8fa93ed6099345b27752c65cb7b137a3dfafb9e56531382163daf8345ae68e5178176b5b7a6782d9d631bfb858a491fd6aa2e6d2934290bd77c5c918f32d29595a1759ac6e99899aa10d3dcf8b7afc74e6316331e6fd287bd1c590515fedfa8431e53fb622dbfb2047fb6f04c664b7de6dff6bf66f0859f3e9e119b512cccba05ddd8dc5762b9ff373f245732cb2642d4a17d9f1628cd5e8aebeda167bca1cf409a5786ef7f08c9a0c4e4830d78dccc168ffb2abb12d275e4cb7a2ffeb46e9aab2c9276a7eeca48be9ac7ddc2e1d5348ab2f51f36209ab3819c15cdd96efd3d5ee75ee3651b39a2fb5d38339a5c030f7298512c174ef99abd4c5f7a47cef36865c4c57affd16a3647ddf75173587483e3c8bb798ce5957ff5db4ccf9991a534f88e4c313975bc3da99519508b5989536acedd6271db5bda8a81965b28edf29856ad0491693656de71e7b8de9f7a5a2e6106ac4180bc6a274925fb77b9259942bda670e763f2a9bd2f724eab2779c846036bfcbd7fda53b08eb0d9952f18a09d7bff7edf0baab564a53ea492756ccea9e6b5e575bb79d976d06c1bc4f422beb8231f28bee76813b65423838f9c05c6e5b75cb28ed15edad201089f475c19d32211b9c5431d7a25e63ecca56b46f7def94095d2754c848ddf5dbeee58b9aed28f43d3fa92e5101817e4ade76ca84a6130f4ce6d04af84eb27475b14f31238dbe98e4fbf245bbae1329267bdda86c1c17bdbd6e7512c5bc2f32c7ccba5b504c6b3db65569ebea2c593ae9c083d2b15c8e3574ed227caf25abf2b16bf5c6f5908a9adfe94ba52d629c3c3199bdecc647db5ec7ebb627f5f884594e4c77977d0ea1b3c68c6f8b9a1f18e698e2b9cd024e9a98ad63636d753b269d332ad6bd8040a8d347b8532674c60913f33ea7efdcadcaae470865059c2c316ddfd8eed6651b8becbde744097692c494ababac4e32dbedb1a3a81985c4b4ffff6a8b6dedba943a51f363c1c911b345f8df7af98db7b1f6b6345253f231954291a7383162beafeacdd461b4d59d9e298dd048271c9855d2189f64d23de9d8d71abbe54cc9bf80402010aac7c4bd5326649d14316bbdadc5266f5c8bd91bbd987640a093694b1d95dc0b0864baa55128f2e0848819e58d8d3d8dcc1e6dd8a2661f1d2c5f74220c4e869835ae78692fc88fdb8d923a116242dbf1babbd5b51b6b64414c6f8edac590a5ff7bcec19d32a11c9c00c15f65a495c5d67c5942333fb06f74b1f5be1fd62597990da8f161da479bfb5abd5ed8bca91810c8747a1e4aa61208c4e353823b6542b487c94ef293cdb11d64c76a63c834c0d8623142261b3e07e5634e5ab752ae7eaf9dfdc79aa8999d523d627898fcdccbb9ab6bf973921dbc667e66e55a9decd7587cf8d48a9a59d3b266c218af33b4ff97a37b4cd45c32f1f0f08ceadb90291592eca999fcf459b7cb7ac5e59a1335db928fe914fa7136a499775d291bb39155fa28b7ea6b1e6ef199313af6a8b5d0eda2f0a5a839d5630aa5501f92fc62f3f894b8a4a66cba2aa50f6d7cd4ba755b507359a6decbad7ecaacded37cfc5032adfef03a786d961da637e7e8cbeb9c7396dd66e999cfaf6dced5766e0f56dade4e99d017d3e4081bfce70dbed8d8c9a6a534dd5bab366d97c617193b63e1993036c71a6c0c61fbe86e0a3549f3dbadbd62430be1cbb84826136d5ad0cc466babad695bc6f8a53f1da633bccce037a7f6f9b56e3421e51a99d3e79e2e4b5b134deaebfa6aad57ac972d646f9e992dcab8e47bb0bafb8e5e5173a88520963b5d7354b61b5f84feec36332a3f61abaf5d6dbfb4b5bb98a839643f15b2de23f90276feba563efdc562472699a8d97a4f03d5f92ea474b547e1c3ea2e8b9a19163a2f33166393cc646cd75ed44cf2d399f71ec6d1e74b5b25744e5a26997c56d47c7d68a8c74f6ae2e95332c5739b99f956a32efa8dcf3d6b7f45cd2fc57323109af04608bd59bae08d5ea17c3b65423564603a95ccdaf577a9fd5b9da839b5e64321e82286ea938baa039303bf76b23d081d638fc5d66fb9a3d657bb7eadf5fbbef46375b07cd191ee94eae1c16142ebb6bd4abfdafb18dbb8e6b59c5e8d894b066ef07a9dd7d8d131c8ac75517364ec134af1dc98a58c6cdda25c2db696cbf55a2e3af86f9da36519fbbee5a04fe85127ef71bc63c36cf6cb247bf9feabdd1aa6b733aecfb51b19744745cd8e797c4a8bcc84bd5cb537766b76f565a2e6d4c944fd0163a674cf687d92325ed575bcffa5e1191ffaf6a5786eeb1143996d86ebb573bfeabd2c4318ebd15147217d7a596ba2e6c605cc5afe5675f0add53edac678862ff331d90cc67b7db9af1dad35498e1898ac9dd17ae3b36b21b317355bd3142fd339b6a8736ec106dd36b5d6b4240d53d6e8f85d7cf7a56cd02eb9332d74efd5785bc768a97ba266520fcf2864ffe42254ec2ed3c9185decb822a4d44567ac025c2685f0f273430b9d638f2254c319e683ff9ac2b8e8571a592d7d9a0f99784ca71e66536498cc6f73fca2cbb6d86a276aee319d2caf2d932b5dccc255213ba30d45a654632dd3b1eb6adb3ebfea525a51f3974ca41aaaa394a7508c39cbfc48ed7f437fed16cb266a8e55de4e9910c3321d8471518eae296d703532f3be2669f36f071d5cab8a9a19a2e40c571ee3c5de5a90a193123ed9daa3902d7cacbfba63f93c8c503e5767311f520fcfe8f1f8945a8ae7aeb132dd63c620fb5efddc5a0c048a0f456195b9dab2e4eae8a3927db3a89919c687a26ea74c685199f12d8deedc7d90995d2c6a8e7f325939f9185ac531dc2913ea18267df63dd61eb3effaa57f81e9dee99332b27592dd6ef79a42920b0c53b27f6c2375c8374a0b5f98f09f5bb03ed96f2fa354d41c6a0c21d74fd3b6586036067dd5a65d9db2b55ed42c42f9f4844ea60b994b055c7bf0b9d6da726fc96ec9ee527fcf32b44f634351b31ad3e94c886798322b64f7dabd5ed97bee9ca839644bceac75c264d1a1a26345278c1aff226d90f3c5e6a03bf7dc63cdbe1535879a3bc6e96cbd4d527a7bb9a3b4814020d0833b654219e819a57bd956adf05afbfc899a4f3106389ffc662384f272ad0dada83972bf699b7bbee87b86d0519942b979256cf90c99c76e4eb614d366850cde66edbd5a47e113352ffed4e844864cca8767bd9d322132ccc9b89a64cdd149660c1d37e91252ca8dc62b216417046a3d3cafebd8ccdf5276e7d041a007814e2914962f3aec8d5973b17e774146dd3d336d61b8e8d41f0302d54f030241e7c585496f84ce3abdd239bcf145cd21c630c573dbdb291382201e932944b2a75008002dbc5b485db57c9965b654d4dc9c85c70bd26ecfb5cacb9d45cd3a61b2e8449f5069d4ec94ea697f3255321498afca5adb55f7b8bd74a7639f8f4e181dfa7ca2db2913ea32018eac259411c696cdb57ef03188270d89049a8d3659ab5b0ae3edc7de5708b1c17575c270d1e1a93580402d4ddc29137222451432e68dd663bbd1c9bfd13633c1741cdfc24a23b3d2f5e3316637b62a7c189bf576eb12358b522335eee76768965aa28831e97b4db2eb960efe7d100482d2891051c298ceebb6ad37bef62d99c1980c1b7bebb1eb1c73465909267c8db22be9651e1fb5f68bc97c2f6bd7ecb76b2d3612ccee8fb6be77965eccca56bb177284cc295d74b51744e96256b6f1ddcbd1716bc6d808a66b8cdfb3b1d26f16522782195963cd22954e765b8d713139c2fbdac6974bb2c6758bf92fc6b6125af68ebfc950488bf957d6b7deec79fba5adc7313da26431b935e777cd2e06ab6c8bc5acbd16bffa7a45c65f97b4234a08a63b7dee6fad6d0bdebaae98f732cb3c367fdfef7eb7e858a9238a159335fba674b10541302ba54c1b7b47d9ad8d97a859274c161dd0e9532310e84fa65aa241c70a080402c529a27c6042fbeccb05578ccf455bc584dfee9cf13ba6edfca2e6d49f69b10251a89831ca06eb95b07a7d8fe59b238a076674cf6d7d12327a66847272449962ae552573492d3bf8bf588a69f9b273f1596c74318351ccc81cbdcfb239870ead1335931c04ea1ba24031ab74ce76b95cb15667b703f3c14557af4ae1aacc5c7c62be58bb3ee8f439e6ad4527e6fb2bbfeba2ecdd5bdc2d8e284d4c162f8b5c9975cc7eb9cae088c2c4b4523216257ceb4e2e8e4b4c6b9bbbbd2af428ebb32b311d5c1d9d6bec4acaf03989b9e06aeedeb5e5f8fd8dc4ac72f565d5724b7aed635e41942326d3e63e5229ad75d6fd8b9a2193bc7137de2913a2218a11b3d5f620bf83f7c626e9153508f44ea99e32513830d963b69994edbdd7ad63db47e74f3e2813a9ead0f6d10945f643438c44bd2406040281402012c9ad804020907c234a1173d52a9f7cc8226c0afd89982ea3fd6ff4dddb8da17ccb4d04518698cf3a66adcbafed217f0dfdd0d06227530a0c0a14ea4155d1b1a2a3658240a9350f0239a9228a10535abaae93d26d7bd5d520668390dd4b9dcbc5cfb128404cfb9e5f7cca17564a57ff602efa96f575b449feefb5431bec9d32a113944da6b45c617374f8add2076319289a4cc9cdda6bfb65846e19743093c9a837a3553274542ed658c364725b0e9f377718e5df17f55f81ec03e592c997193aa3543e080402d5f076ca8442502c992f99357f0b769531d25ac95c4f65bb4e1bbc00141fccf5fe3ac2f7daad8b7b899acf9446289fc613668142c98cae2dec8e4e632ffa5ad4fca91f2a533e3da52d8f0a9449e63bd84ded837ca9537b2599df0e5b5ceff65a86eb7b3057848ec5f8d8b30ce3731e4cafcfbf36d61c3b868e4532635d4b7abbdb1ba15ba6526807f3f137f8ec8ab151b7f64232fb999be9e5f7b7bd5e1dccf7f8ad3f53f8a4eb7a73306563f85ecad6a85b1407932dc6b2df5bebfd17bd8f4c5f7d1d5d573e5f12beebc85cd4c5eb586bcb1ef7ba379892462ba9bbaf65db76bf9159d74bfbdae505a33ff936981e9b418f6f357decdfd760ae2a59934fd658db83d432322db35faf6c2edffad8769119e5adece0737f1994cc2a326d65dc9c595d2d996f4d64bac7a06b315ac7a27caf884cc8d22e6aad7dd7b6d5ea21d3176c2be3655a97a5ab1a32d952f8ce9975cdb49fd2605acb545acacf3a7aa5b7906961bded2fad8df01ba384cc774eefabaf42c8fe169dc1bc8edffb828b7a837c5d06d3f66d7d3bae5afbb2ea31988bd5179daddc7c9059697dc85e59bcef7da520b3410923bfc8a06cd2bd0c643a28ab6c923675b021943098fe6e7bca2c7ce7607b1290d9e2a3ac46476d7cca6cfc635adbeebd75c166f9b2e30ba6ec47a9a5ecd1455f6bd105b345e69abd37b2dbb6b2e8c77cb9ba456a2d5dcdceb18fc968b5b6fd3df9b0b96b3ea6738cdfd962fe77bff51eb3637db6aab496d55a5beb31ad7dcdc26a2d4366a1c33c2675e6e88dd16629eb86784c682b5b951d7cb005937d735e5f5bd674bd07ef98ef923a491df4f6dab5cf8e19df8bf59d9f3bedee57c75cd12bb7f6ce2464cf1e0b6664d862cb47f9391ad7a3633629d7ead6de61856fbd392694d0dab7dd9a6385d6c931fb46ee682f7c2e8e297d79836e2de9d046dbe0988dfe33e6f7dee275b4ad60da86cdddfbbe36a3d49b0a26b7dbb0dae56ad3ebb2bd315f6b669645bad6622d3637e6f5c8e45fc7ed565ed8da98dd2a5cad52761bfce81a1bd335638b2dbfcec17796ad31e36de69a844fbbe9ba568d5961dbcb90c19a82099f8d2c614708a933a360c2f89edd5dd649a66cb134e6bbd445b8e4bdd655ae684c0be557c79675de56a567cc4b5b3395fd6cadbb7e82699d844ebeb3d432cba419f3a37f8cd5d24ae1bb8bcb984d76a5f2b51be37b102e5173e949a1fa6924bfd04f6a74f29ec850c898cf95bd65ac1764123a928f71804031c1f4d6ce989b64e7bc7a8c79198d7d3f5ee9ae5bedc598cd7f1973edc5762f531ac6b4b1b2fade2e85dff84ce96d9f298d7eaeb0f8241028604c689fff5bd8aafcf6ab4b309fba64d252eec7ee8a7f4a8598bfe7278c6394131840f9625679996c92978ddc373e09a6db2a1d6c175ab8245be8c56c7efb9bcb7779496f4bd49c064d843380d2c5e478ede55e8ec5cb6cfc11cca68c3eefe8a0adefc916c194f76d8decc1668e0ea1a8390d9ad0f527851c8f99e1b1277b40e16246fa5a946fa54be8fadd2de682aebf3d58e3ad1536abc56c1b65af1623d32c266c97e3bbdf2517b39758ccaf9457bdf1beeaf07d0cc1645af9d76d06d949e6e015d33665d03565d9fadae6ac98d55763f5fdfb7db3ba24c94b271a62337442091410ccc678b5bcec32b6f55afdc084ecb26796ed4aaf7ebd8ad92d367bac95c2e54d97a83964b231072854ccf6587b6bb8a8a37c1bf5c07c964f2eeaaedb179df61413527635babdef9a65bf44cd7fc6fee9791e33419162b65febda1aeb63af25df2826bdd55dc9104a17e3650fc5fcaf4e46b71ef37f318ace8c4ef042e9c0a495d95babac456b69fb272683ddac95ec9a6bf73f27e693ff626dfadce3781d8b9a49fe2954080dc98786e1a273a634920fa5895961bc2ebd2ed6e84b7f26a65bf91e65d7fdd977975d62feb2cc4f3e6fde52325562b25faf2fa494d21bedf32426a38cd9ad0d3224a674d755c9de6d5ddcd88f98eda36dce2383d1395ed288299ffeadcf52367dcb9103932d487935b74a1932eb226637badef57af21b46f68898d1b5dbefb73e6fffdc10b3c656db5d969fabf5c6a2e633a510ff193bfdb958d119a3634567061d2b3a299f2e20d0e9fe97b7a61d100804022d666f4d25f7989f5c014588499b94ce3e675fb4aedd0b2588e9bada58a95f671757bd5080988e5fb6b3913a7776e9fdc1bcde226cc922dd643aed6ff5fa37bbd6aec9f4ebb55fb294f67194994c77cc5ede77ac3ef91a93f9eca5b2fb2b7c561fbc6432a4f65de998b2d60c5a32237b96ed776b3442f92a99905d7fd15277dd51f87c309d74ae5d76dfcafab55132bd4a577f55cbda75b0dd49269514d2c8206397fdda9564ca0ad9df65b09db167b707d359bed1238bb07173ecf260b65b66e5aa1ca9b7ca379259fb71e5f5a4b44ce3ba3b9830c27821b3ede073bc2e24b3d996feb4dd514afb7530e3ffea2863b3fbabd2cfc1648f512e0a9f41c7ae3f0ee68d4c593b96153a76e53f32bdc1f87ec505fd76cb7764c6cb6fd1b820730619fc1bccca2e7bcca5b3e6fadd373229dbe8357aafb5a25b6f83099dbbd8ecbb96bbd65b83493f5afbfcdfe87ab732321d32c9f4febf5c64fe8d55b60ae9ed5a2fb58accff6a9da5163afa2ea33691c97ab9bdd4c5ea6c9b5b44267dcb517aadbf766b95f690d91d5dbbc77ecb3a95d690c9626d7f6fd251ba58b43498cda9c38670415bc87454bedff7f7add6d6b3844ce6b6edc3c5f4dd3f676730db5227ed8a6d9773d7ac0c2695ab5f6bbf8b3577ccc66032a7cfaff446ffb2673bc874dade7db7debfedc52ac8b4d4becbe2add1c57a692033566ff1d15857dbaeb4c2604ae7a8d7d81ca5cc415a0199f6dfde4baf5d8bebf3fac7ac1e19dee78e55e8acac2f98ac5a689d8bbd7cbdc7ea82d9d5e975cad8737cbfab1ff33e28b9caf57edd4561ed63367f5442ca5c75513aac7c4c78dbfdf8225b8cadeaea1e733976a19591f5a5fc5cd563d6f85c5b682d7c94bbd53c66df0b9b335bbbf0989149968bf22f67655bb505b376bfe59a4b68bbc17bc76cee6aedda2af36bffed98962eb9dc467fb7b87d1df3c2286564f43b56d9960593de4a5dbaca9a9f324bc7a48fadcbefedda5db2ce31dd73f23547e97ff457e598f6593e87cf96572b691c93beaf325af72fefaa0dc7fc1a29a5ebdebade656f05b3db6f8514d6fb1a42a782b92c85abbe8f8dbe31a36d6f21f4e8fef032eac65cf0d5d77271bcbf20a36d4cb7ba3e7e6e4cd998d1d266a95d8bb9d95d63d6aeaed5a6d54647d95563b66aeb82ffae73ce3505135e091d649161ad2d5130697477ccfe63d69bfb34a6e3753fd20bfbc696168db9e2720c3e19eb19b3f6bb0f29cbea27980ddb72d937d25e91523763caf7d21d3226eb374b2f6376bdf2d7fb4a9dbb6e27634abbd65bfcebbefaafdd04b397db66594a3bc674f131beef5f42d9558cd96a63bd1c7bc818f70b63726cf19774b4b949d8c0980eff797dcfa4735dac04d3b5f7f6bdc8b5baf75fcc78e37d6e596c906364124c489b25f4f897d696cbbd98ac59638bc1dbd5b576bb98acd9666385f57584f44730b9594709615d4f5ab62298eee3edf637bee8f27231256defaaa3f242e6e41633b6f3a50ddabafcbb6a31ef5df1fd9297ad563d8b699f654ed6c8ccab848ec594eb2d6c2f36296d423099db5a8e659450de25a5cd15d351ea64a54dbecaec8dd2c68a5917b5d61b840c9be5286d4030ed6d6e0beb5dd5d9f7a4cd0766334aeb5bed2bdfd89eb4a9623a95ee9fe4759b75ce491b2a265fb75c658bb2e5df9ab4f1c0a4adfa72ceb107af7d4cda4c319d5e291b6516decb77491b2926b4bfac74b1ba6aad5dd2268ad9be63bdeeb2bf4fba8f36504ce9b435cbe465df8bd268d381c96cf33bd76ebfa8d368f3c4a434327edf1e7b7749a38d1393a1a4cc725dfdcbad479b2666b3aeced96f90ab7c196d9898ef647bb3ec36c88fca68b3c45ced3a6c767631476b8c364accbaae37a5ddb0618b31da2431d9436e46696cefcf31da2031bdb165f9246df5cad5d1e688c9167d0e9943f9abad8e36464c67cfd03ae6e68cd6176d38309d59e72073ee2da6b4459b22e6ea27e3ebb8aadff62cda1031d7b7aecb4577f6cfb26833c494fc6ead8fd67af9498e36424c6f9479bcb43e4b6f73b40962feed0b63ebc7ed41c8d10688e9607b7bef4a7769ac2bfe60567a57b3f01b3607afed4d264b6a2b4b471f4ac86c6b32a38ddf4f5f4a6fedb53399f2497fd47685b5a1ac8dc974faeedfa67e9dacd6fb92f98dfaffa2ef19bbd0db92c96c75b71a6cf532bb6457325d7c9429bcce94798ced834939727db0b95b1785b12999d7d9b22d421ae353177b9279ab8b94995df7df7add924c28255b77b5283b5eeeeec1a4965d65cacf8309efbf6fc62e57273d92e9ec8fa3eb0b1f7c09773025bf93abb2a430ba072199efd1bd5619b3f64fea60f6755f9bec55393af9723065eccaff16b76d973d1c4cd8ea958d2dbe1fb93e3219572b6bfc48bff93932a96dfdef7a4358eb7237988da12ff6cc6fa5be5c23934268af5fb6567683cd06932f63eba5f4d88f3dd760baf5056bafc86daf5ccdc86427bb39bd5edbeb5ebdc8846e311957adf625745791795b8db6457aefadcd2632695c2eaf7baec2bfd02232d9d977ed3d6fac36670f99f65bad8bf15b94ef7543e6f7e57fe676fb18751acc87d5c2e6dabd5adfcb42a6b5dc913e496ff5c6242113d2d8b7297b8fcf1f6730bf3578bbb6d8ba326e194c86355e7b7fd5665f753198ed5a65b23bfe62fa6d9059affcead119f576ac0932177dcc3d731b5d48a9c1e4761fc628f9b9d8dcccc86c945a669f33baac7cbbc8640d32daf8b91599b1a333d7adeb4d64c24b2bb56d1f5c0ddd250a0462d9d230e6420a22f375b3f2ebb76575b98e2050e890c9bd2057daf65e199d7d513373e3c62d0dfb22c5905969fb9254aecbde627c5133c947a70703029d1e0502bd3d3d4a86a4d06042ca96450729745d9f5da2661e7ebc23a59049d77f95911b2e0aa18c64482164b6bdd6ddcbfc4a681db2d642ca0c26abb73de41bddda6b5b930173ebfe9bbc5052d6f17918a1e433434a0c9af4d69a475a237cd68ace169d305c747abc0b08d4e33c2110082586944126ad6bf9bdae55575ddb9269442279e84fa6fb54b22e241215100885c6641281214510c790b9968cc5e8626c2c25bfea1874cdd67fcbf247a9117b7ee226a40432d76b86ce36b6f6e22fab63e5d15413a90a08f4da045260303d2e17fd5977ff366350d4fc4c274c96e6b65326e4640b2980ccf50b5ae6cf19b21b5f133587fed4c3f8530e6590f2c774e96cbbca373a2f7717046a8c5a704879c164f2bdaef636eada4bf92e98fefc2594b055b65c76f563b28ef139f9eef55fccb58fb9246b8ba9859252be4ff998f67dfbc7f69e84ee42b9c794eedcde0a5fd463b6aeec565bb25d8bcd751ef3ab8397322aadf46fadf198d5b5f7926b7dd1f23b6cc1ecb5985d4b9d85aeab75c8d69072c7a4f7bf6d83cdad76dd82764cf891a38d2f6da3b5c53a66abb5be4519bad8b6aec782593d5649dbe3272375efe898ffccdd5b5e2d7a7b3ec77c5c5d64eddca27db7e598bcb8d2787b450add611c93c5a7fc6243db683bb7159d1421058ef9f5be6525bb4cbe57b856305f7bdd2a47e956b5df570553b606a1658ef25f8fd4a5bc31d77acdccd66552aeeb1c7363d6c75abffbb6ff5577ada8f975404a1b93bd5b4ef28bd0c947dd1610480a1bd3a96dc7da5deaea5bcbae3159faa5abab43b6d643a6c6a4f23d77afcb5f20100854472998edbef195bf5cbc1daf0f4fc83909420a0a66337b1fd7efc8ee5b978a9aa90da17c318a4a8c69cc4badb3f07ebc4fdacb64dc29134a494163c2a6f13db7b5ad7fefc633267d77e9e5eb95b1f71845cd3cdf6aa49c604afec520ece5baa31933c27e1546bfbf449d464a1953cab7a474266373f03e1bc6a70b19cfdd73ecddabd439e9504831c17c8b42cb98a57c19b4718d94312695cbb66fffd15152c4e8079d4777a9376bedd252e58a08e55a44281769d1b1a253e50a1352c298b7d2f890e373f89cd206c6acf73d16df99b9551fdf124c1bfb398472c9dabeba1735bf3595765c94f2c5bcb46bb5ed31f37706e597d4981653db3b654254a490607e7bbb545efbce1d7f8b9abd98b0b98f8ed98b9197a4b18b096d7cf7f565b40e657c23985646f7ebb5b3b2bf322782e9fed8aaacdb7334763366500a17135afb5a7469efbb7dbb6e316bb32fb7e86acafecd3dee9d32a125458bf978c5f6b6ea153587a80faa99dde283942ca6b4ebc6c6ccdec7624a27ff5ff6bb96b2583f04d3dad8343ec6eff2a2ec57cc4b3dc65bdd5b2ba67dcddceb0b6d6cce2808e65749d9ad972ee60859fcc08ceeb55edefc70dd455f15335a76e372da9695d1462aa6fd1b7bfdbd4bc6159bf3c05c5fddde28977cfa4ea798cbc9f6be2a371799af26c5e40bbdad6cb557bdcfa3988cfdf76d4d4a68ad8d0cc594fdfe591bddebf8daedc0941046e61ddf3f31bd5bb456c2ea31dab7960d529c98b4b97a9f74b12dd920fb26a67fb3afd9ea647df651cbc4a4f7c64ad932b4ed2bd625e6aff82a7c67f0dfadafc47cd8d7c5b55e5749e5a349ccdaeffe9a6ccf58572466bd0cddda25abeb7ef58819af37a4f07a7cd9371a312f74166f84912bc7efe6c084544648977b5772f785454cf997ca7e777d8b123e11b331d82ecbe8167c27eb10335ec6cb6db4abd528e51362d2c824b35f8eb429a341ccba6a6449df2f270588d96fbfc9ead67bf82e6d3f982c5a16ffc17ab9716c6c93195fbfda587b8f615bafc9bc8cb1c6b7717df78f9cc974d7dd7ed99ef7952e3226f33a1aefa356b64be65f56dbbefb7bed3ab32553feba8d42cb0bd686b792e9eb32eccbba5b321fccbe8cc66eabc56897e345c9b491ef85b035b92a7b5c2799f5f1655e959b3f6cef899a5912f7efb446c7ec62ceb27b30d9b27f592f79edcb482f0fe6b7fdf6fb0fb646d9692493aedb76c9d56bad7b9fef60b68cf0b92574cb95ed2b994e37f4683cf5c38373790dc994ce99d25e16da66b737b4984e982cd79fcb6b3a98d13a06dbbd8ff0b6b79b83f9b841da2a64ed0c196c1ccc5ae9f3ba8f9964dada7b645e0817f3d5a873cd3eca6e1c99bedc63f746e8eddddc60f243ebba995f76ebe52b6a7e616e1ab1ffd62e2b2fb3f6abb35fbd07cccfcf007ace5d726383e8fac1c79a948b5d636b35e37bbbde7ffaacae5af945d5ad80ac8850aec58a0e7ccca4264c952b3f2713cf77d109c3456756f9d1290c28361b376e6a30237f5d4d52a730aef54dd4dca41b4626c30b6b74d6ddaecdbe0b193ea625a413860bb5a11f9d523a61b8e88040ab7571b3c87ccfbdbd0f36c9901b7b378accd8cd6573efe1e2d6cd3789ccc71894ef427ece256411ae2f6e1099d6d575e18bd6dd76e5ab874cf816df58dbbaf0baa4cf90099b859172b3152394f06930efb2ed46d7a2c7e6fed542267496ab6d155ee6dc5f96556e0899cf5e061f6ccfd1cad8a533985e19aceb59dac890fd653023ebf796bd2ea96d861e83d93c3a75d0f66b965e4943ece40437834c16a9a58d51b85875db5290e910f2a5907553781fbb37814c7fad7d5bd6fae5d62ebb81c18cd5bd5cd2c6f7cb41afeb8b8e159d85011d2f5c749ad4c3333a7da9348258dc00329b65f9cb256d8bfd63ff633ae7a28377b19496c5485f30bbbf996db735e982e9dcbdbd18f6d7e5a29373dcf831f9b548657c8e310bb96d1f13b6ab8b3ea5123e2f5bf998d7debae85b90b95ec76483e3668f79dd5fb3afde17a384cfddb8d1634a0a99d9adcf2dc9de2db9c64d1eb3c5589b310b2373ccadd6b1d2180a6ef098b22d87f5db365669848e191a372d986c1b7b7f1ffc7e4c3edfd1b102998c9b3b6665be55b2db4f63edf856388c1b3b66bb776e57577bad831c4b7053c77c52aeb79eb25c94b6942c98af3a7b665dca97ec36fb3a61b8e87c018140a0c521467dcf4fea9d1e8de944821b3a265dd23dc6ccfd655b8dbd39e67fe5089d3dd8dea14b13dcc831af8d5fe9bbfdb7aec5bc8c9b3826fc05ebbaf2c2fb6e5ccd63dcc03199c1c7a8b5efad60d6fa927d477ac370d16123b851c18c52be5dceb67233e37f63bedfe864c7e85df935b73824821b372663dc96d7aff236abdc4ea610b31b6eda98f1216cfecec2e8ec7ee33ff9a04a93710473c3c6b46eddf8be551bebf38e718dc9eea530b62bd97bcaee6ed498cf32c7fdcdbdd83f5b4ff995af25d38e4e182e3aa65b6a56dca460c6061bc21bdb7b5617a36174ace84c9d305c7440a0c5216620b841c19475518ff61f4776eedd34e66295a35ff658dbabeca26653aa8a1b3466adfeece56e0dbe6bdfcf98b79dac705106add36f7882f9de32d9e87b489d294b139221962833a6b3afa5b5ffd86ab4323a1239e678953157abb61b93defeb9cb54d46cba25e9e50b151d2f5c744cb70402a1483d3c239f1b32663774e9fc775d51f7706382f9dcadb5b967ef7609357033c6fcbe8e36ad30d2471b4651f3f78cd4b89f1b31e6758eedcd6165b9ac9a2f3db7296ec29890416f165f7cb69f8c4dd41ca1b801633aafeebe2bebb317ba5f51337ce2a6041f693769ad37ca44cd5f4c28fb69b5bf9ed2d8d68a9a4ba61109263bbbb491f53feae47351f3a74a7ee631276ebc9870c97731b67f7039bd1f4a3dea4f5d4ce9f8c5d56a4bcabd6e654bdc8c6056c75adef5ced6b8f8c5478960ae5a6bab94aecbcb71bc8b2571c3c574b0f9b28dbe6a2b83d12512375b4c47e35bdc98cbaf51728b9ab59891b218e1d386aebadb96c5641e57b5d2551a1b737f2c66959419d76770b97574c5236e4230e183fe625b1d1f7be9251fd315d331bd7e2b6cdba283578a9a7f1e4d0972e0c68ad916f3e66c6bcd4ddfad22104c762cdb73965df7f5dc7244dc7c6036ee67ed3ad72dd6db1eaa8ab97a41d6ebabbc2eca2aa9987ed9ed6e5c9b31b49e428d489f1a89d484527fc60373f9a2d1de0a5f95cec6db86b8996256c92a57e7cf643a4931bf9b6d1bab940fef73296a16a15c14c5e4e824b54bb62763b37f06c57cce3d4697c7d6ac5577a2e6c572a74cc80910371d98b0596b59b2aeebc25f9723eae689e9cb757bf597a5b132af123a313bc2161b7b8bde77ec748da90a08647f8d89b298ba69a2bae6b4df457ae1756fce9db96162f2c3e5acdd4b79b1d54fd4760b08040281408ecdb85962b6ebdad7f76ad155c9a4a87914fad00f0f4e824c3eb6532654c38d1233be8fbebc97a3d0b2f331494a6e9298cc42d6d1d26a59a4b25dd41c27b94162525aebbff5b559d4dc30b9396256a62d32646ecb63854cd4ec40201d2b3aa9d38b6acabb84e1020231db29137292c98d1193418e915dfb9efa3baea2e6c621c7b0de70c0fdba0a9957b7d7565bb3103745cc08639431562a2b6b28a3a8b9197243c4acde0c7e6c143e67ef16352f7eeb275b12f353ade858d1d1b1a263b7e858d1018140201609b919623683aebda68bf2bb6f9ba8d9969ec774baa1c720a7d67c7b9e3237424cd6ed597d6feb5e5976b909625ac8515207e1bf1a9dbaa8b967b801623685afd78a504ad9607ca26e59a2fc607a85d2e9afc79ca899b931e49d32a1196513189993bc1a3bc85a72741c19848ddefa4ed47c4aa14aa3c674c264d1e96245878c4e182e3aadc1d3cb23a26832fbbe686577635f6f75349e72168892c994efb4ef925156e7be2e267345bfee5dcb9c8d0cae7bc9acf4be7e0fbab45e25bf25f3b5afebd8da7fffb5be9209d7b3aeb976a1f5c18c8f2bc7d85e3adf0a2d2513b2fdc5cc5ebcf43db3934ccb1cf5c51cb95a16bf4a32a1b46cc5866fbdbeb7b507d3294bbeccda8bfd24ad3c98dfd7d76a8b55ba5ac61ac9ec5ef59b83f6aef81aac3b988f5d5c30d6e68f50be0ac96c4faebe74c94ba333577530ef62b05fb455be6c96d51ccc185937cb6437cbcd4a8a83595f948eefb77c642ee8b63bb206ed73155247e62f777e9b72d71ad7a33798bfa85bcdeae565ed95dec86c79dd93ef5de5c71add06f3bdd7c6ee5dd7d9ed923598abbbb6c8ce18746c2b2353bef8da72b5e372b77c91c9a2954cda75fde17b322a329f639145d9ecd95dcf6222937d59c991b295ccd71b91e9af1baee8d8fdda2efb9049dd9d17a4cc94d12a0d9915d26829f55b6d9551421acc162d7c7cd97e73950e1632a96df7416ff1427ef61232dd85d79737a42eb6d566305fab5ee97d8eb1678b3d194cb7fc36e9bd38c228d78bc16c57ed5ab0b116dfaadf06994cd7627fd798afebdf0499904afe4a7b59871e6d20f319b3cbdaacfb8590c260baabedb0420bf9b1372013caea5c59fd5b6bdbc7fe98f6befb929f8c8b6933f682495d5e17179556c2b6d705b3bd4568e582f0bdb3eac7a4de9036f7fdaed9d13e6665f53eebff0f57adcdc7ec25adad913df9d539c77bccd7fcf97e5c1c97e3c57acc6fd7ed7f9bd3b6aae7316d744cf282f6d516d95f3c66f36a97ed656d838e6fb760be6a1bbc145eb91a7df4de3165e37bbd295f6bdd5f6bc7ac954a69e57b2ea5bcd4d631d9c178edaaff185f6ece82e9e447c99c7d649636ea744c0b1d5cfdac7ad7b5a2cf311d6366b8bc51c6abad28c7e4d61cacbd5a5c6f7d5d71ccbeef55e71afcd71a850ec784d0e38a8b25bb1aa9952b988bc558e1620c7e758d970a666dab635bc9643fdb54be31ef92eed68dd2d1dbd68b6e4cf7cbf9b7cbd66396d5c5536b782cb4d84e9950294a1bd3b6da32d208ef57faadd998d0b687f47d63cc357ae11a93b9efbaf836ad2c99aa3163d707973306a97d90dd14cc76472333a3b6d5da0ea260321bfb5dadb0f66df5c134a6577859b6adee2edae84363aebef05db85c6d8be97b674cf9ef1633bfc5de37ba134cc9b032e5d6b8bee8cf9931297b5ca3acb7dd870d5b19f35dd977adafbe029315a8d48c8ae54251200e06410c84000c9c975909a001c314003038241a0e0663a1348f74ed071480034e66608e38282a12484381281c0a04811800601886410084411086614888d8299637b7d2027b78e4dedf876b74bb3db4b8235ce4a2572c0ec5c3b5b4544231818af692c732e7d8f19ce2c15bd7d50261928179e1fa6152f184b9b74e29389acb06eca10ea293a1aa23fc626ac6a190dd217e8dde89bcbaaf35981a3aa2950ce98dac0942cfb9547b9611d0f325d9431d598941674b87df12c4b28854aec006c8a9450f556f5a9ce872ac1a624a1f9648feb812c4f6eda0bd1ec650a53384d14212635569291419efa2146129e08201ae220cb8b324277a689b549afff33602681417072165fd52814dd0037ed68297eeb33c081386ca0255de5f8bbdf0e623aa32a4daf6b29122758e55b5a338f835cbdd58e8e452c6cd614ec63d8a51faddd5d4c6c427ae5b7b871a943d11352202e2a7ffc92f9111040fa114ac1dfc95af52fb77b9c248a343a0dd344032f4e616b85161d44af35bade24e88fb295a099e0fa9536003c5f82847e702ce6c034740f844f20086ede7c7a9cacdabbe94dec26e5c884f4c6d2ebd470513d1d21d3ad57e162a33f029ffe98937895c99edc8111337a6ae485db1cb382ed20b5567824093e33dc08c22aea77d1ec622b118123258369e735094af685fa03ea2f64bf4847e183152167a29e8d2282e17fd234ad5d78a23fdefd6c7f8b62e9e8f6a98c696a31572cbc36a907aefe10a21e995768ffae9340b555651ad49b3cc9a452b5a53196091a7c60d914f7ae9df308a58b9300c6f5588cbcac21cd036f9bc7d8ae258138103f04137999b0b253c3b95ef0d0d4bbd75d9e5a2347a43904f6521fb26d4562ba068353d769be9c4c7869810d5bc71cba5e5ebea2c1ea26bda410117ad9aa835c80c35030a5971d06d79c5723552d59950099b85824186b5f2399cccb9df2214cfb3c4b36ae24b20c550830267dbff3cf6ed3a76ee855829c0af75a85e7962add7d4dbc59974ababf690b6f2a45034ec0f48e58895dd1e6d0f8306aac8ffbc9562322a933c2d9fc26a978e12114911cc075183a06408fc4674989a80127f57bc4ed092fadf6f98280b375c0c3233776887c01c0d108ad92d4da41e4cd5b440a35635fdc018a1b3417dda828b70b34a685d3e9f0325c8301ba214448714fbb50544038ef309da1f3bfe0d2c285a4f826c33b40a95e066374a51e4ec02718aecb96080767d481801c408e138f69a59b7e91baf70d6a9dd11f8035f5af79fc5b637e9501560f21530c2f5ba721b875c1e64359789c084883291b43eb50dd64422655240555493f1b87de0cc47befb20ca4ed32c86138c4b07ae5fb840b8fbc20d8ad327ce21dc3c8bc3f13b2a288ad419f865dfda1c2f04754cb049a6bd3265c643be2763f54aa3314b8e53bdcdc5d2a4b7ac799c74835b4827261952938b31d61bc9594a3f1794b80f73fbdd85551c4f834a30b18ad1630923b3bbdf467676be39d3cf281cc44a9534b738355cfa2b0f18a6269d7a8f51c0580a986b814cabfdc063f7e646e1e8e242828e0a1e91ff5bddc7c7b2418d79e04025b1c0a92029b2e682198f814142a120890717a6c80bec39aa52d5fa46339f75199f342e6655632ca16730408459b5b6b2686516425d410402d58de8bfa6c820ba22564d0b3c75d75840c093d7006c914fee577f6c54a8c317b1b65f37cd33131a6ae0458b9636460ebf62af063da5146facf1ab12f0cbe80455dc8b60495fe10b7295837a6f9c687c512074d9f3f97222c9085488beadf1e1e2b63f647c8353832646ac739fca7aa51e883acb47bf2690e8c9a93cf1ec34a61cf42bd536984a57b200fc48ce88337fa20fd5d1980cd7bc03d3eaac6ebbc870099d70991244e9b5dd70b6e3e57ea244a40bd805d14dce094f390153909874746ce5160d5705bde6685bfcecf601c3ccb6980ad688e14dcf708ddcd0f22e054644ec0f76e06254a4a242fb166de4ac1e88fdedaae4d27cd0d5874c8f2ad781f2051d7597376b3ab414cabc40f82afd415b792e6648c046995206b886728b767dacd3783bebc537f44e7ee8709a94369fe33a133a9fc25377b459b69bd12b70528afdc2424db55594318340784c53fc310035e63871e57c1891134157c80e63977aa7e0054986b23082da49a545fbf4319f664728dcbdc89dd7d12f0f77d42320653ea7fb02a8a9863e5f6901c3175ea97600db51c8d80e58def1cb028d8d58d799541dd56000fc40b2c1d9e83ccba98123abee86d12ca77f622a0c6cab006b86553e0222e99dac87ebbe11727a06987dc64c591863f25fbd1074f8ae8e6ca3bee2450b38e33fd52a7d4258280582aca2c40493a7aa40446ac655647da15baa647e92ce6783a28f109a5fb9b6d0486113fc870510c60ad5003efd8c70d90f4abef03ac2a0f036a71b42dd7dd1a67214aa61145ee820e8c1d09828aa1504d52e884ae5076dbf95f653ce8617096d5fbae7905a713d832108b20842cb403eab0e1bc655c30c98ad6bc8251e91dc7924cca71da821ebd3b29f0e9291187366790f6781b109d4e17b51f2d69c6a1f2a1574a2bbfd39ed9fd2a98a7536a0f36334aaec81db682ccfc08b3d17c1a93dda73eaa64e9dc5778821285261c55c9eee559adc2a8b12af3cc941744a5cb95d9f8a23807ca8d6ec035e04508c5e030b12e0d140570b411c0a7cab820d3c282b7f00f81f0e3d4ce25b8822b3b8ad66117f1925ebb361acec09b1e2553d6a1848c9f83f6df1842e91a630fa4f78cc5ea10dedc8cf9dd0870faef2a34c4fa6666486c41d815c45e0f8fc621c92cb138b542f217de1b2e8dbe4d4957259cab92edd447c045137952cdd00922bb23084601036e8452291de25b0797ad1148ab7a29858306f251a59c781e8de026644eec3de34272ecb8310f75c15a43034ba8e781026750064506f18c71c94d03f11ab37b35bebe8cab04c12c1bf99433da3c09e11a2fc1dfe0581a911a2afe05b2cb966a5b5a380989b88b2900e93ca20d9c195c3488565e3c42e39ab63873112755c2933fc64256232a53db9de210e1e95daaf44f083428d89b744ec859c231357220472657539fe8f54594b2123703be2719879e9680edd653c66df2a31808f19aacacc2146f0bf61ecb4f030c4f9a62f684f80684ca1c204eaeb5f5406b385ce1ed4c4e2f0544e8260a00af63423e444c43565e61d08488ee20987064428e08737161af8c405db9ec3a3662f805209636081e14d24208e7e3b3a2ed1b0877dd7069a7aa039896362671ead7bb8d6717345ed5401263c58974b65693b54e6536debe7973cfb45a9c8e507d222a68ff4c412750901a19370ab65fd56b306f6917d578bb928b1f45a1958bc02d10a667e8e812efa16c8637cb54385f13ad41b653723d2fa4d4b023a87d2102863642f49d17a62f055c14d9de2b67a4c51f61c8c9f5d4a4314903e68424c767c9204c0ad7cff4a585842c12a20f89fb28b345ce68ef675c0c56a298bf7981ba2cf3f6b17b3001ac5132ea8094410352968d0b9ba714c7ae176eb0b256e54b67af58b3665ad71c3e4a9d232f6b856292c6a15e991077fc10247f68de442049658fe88e6971d6042e1fba85351d60e7c37b6113d93695689c848bf1ecaf30fc27851099daa217c70d78760b985d84f9a22343e460c07471336b6e9008d08e6713132f747bc1338141946c2d6540e46da54c75fa49258e53d8c380b37110bbf48d5359994eabff96ebc86e886d4638ade6fde5b9db0e763040dc1440acc3c0f5764918ab764c47f4c3e843fb469409b11ed065c13ade64a5e163a6dd689491322b35044a2eba8df236c64af954999b82ab987386abb36e17b7e8c888c1a7537414881305a477aa69e1ee4fb02a704af82ca2065500940065541a880c22051e12710544240bb6aa292104d58891d846f048998225c1294041e62945845d422a488a04415318ab01032896f046bc4884849f822a4115844adc4d888d59fa875428c3b9367ae01df0fe0e32c1bc1b7a0c1109bfc6956bab49aa0b20819f6ccb4ca6090fb1cc724fd11165c81547c1c415ccb51005c6eb3f2027eafd05743a3061b50abaae0bfd4c03c0abbcd0612c6f22c4b0c67ae99a438461d86179966688a899beeedd3eb0c92e824929e47d866894715ed44703a5296de4328e080e56a06bec4a1468c6aba872e4720885704231b7be38722e8cda0636a3493cbfcd34f99ae47b00a125200bd5795be50caeb29cba860dc9a9457cadfe15b879ae0149880f28c02d47af74699f27934f8348c83eba6168752225a7d254b7772c0a1334114b6aa2097858279518eb8ca661723550b28536cb51a529c2725b4164a25a11965159ade3592f9b895130d7373ec9b13389a704bbccb93a443e624f45025c3752cf6c6de1e18665596180aeb91d00ccf0287ca205e32f01bdde80ab30c927889ca7306716793a2d4b1ed56c9a54fc2e4102ef4513a7c1cd2b314551cc08d5a257badf2879af1c00f25ce51beb769f182918db6d75c556450e65a386ba169dcf054d0c85090ff4458ad8db023b9a53c909b8ea5f15b1c24a796a682095ac5911603bd57eae65cea52b1a4832109fbdcb537bf6e13508ecd7468ba3de1975947f0754bb85f9a96966886d028834c043120e428b511846be2b20b61467a1ff997151c9d2e8b9e42de011899dfe1afc380fdb96240ab5c96127c52848726511c49e7b881282480faa10a00a1cae2e6b08fa671a4d0cae058ddd9830d72593f12d34898769a63c0ed0f347683578fa3e0d41b3d8150edf06a835002820ec0198af417199307bc991107f51f3fcca7fb930b8cb20d5f78936727fb19c0c0a82802698a2fefc447c0f1e01ce38a03156440a62c61cc6f3adead54579ea94755564741578b04fbc009a46ebda2e5ba88e838e3f7bba507ece62aa5e5038150a1cadd4388d2a545db3741dc0f1d71726050a992c02807df9a88769dee0019a362d0686215b1165b222f3ac0b2580389a8274663b0684575b01a9b903fba61f9984aa112150f0154a369a48b54cc07a55110daa33aac8f8dc81f5db14ccc767149d70c27299a46ba988a7c501a0361eecd6e2de947d00f819109d5c73ab4479dd81ecb22367109ca41fe135f726a37c9af4e1b778328ce1e992a76d3da5b8982eb2162ff6490b30d8272e9baf85ad17d9c32063b5b043ce44c725b44df9b25a3f4aa28f54051175738040e0ecb1f39dbd4773bab3b438cb7062a0a3e4e6c7cab501572341c023ac083bb7d7e4ae40a670580e69c4df9fbbd98a0838ac7ef4da942948f188103616d7b85a5347a4825b1a24b73794303d00d783adbdf8732476e8bc1078fed6d110fd5b6233e4e02a639b6a9bc617a8de07f1fd464d6ef454619a05be572c8e7ee0c539e0665d055ce0d655c51b98f5f95ec85b00ceab32cac808c2e9d203dc525899ca46c0fd251bbc0d8f2736e4f4f24a4f53efb8e20d9920655966a3f8a75dd5a004333ce237639ddb4ab6447b8b189bd0f4fded19a49d348c33eab9918e2665dd0fa3c30e52f289957373a0695205817b480de59408f16480f5f18dfaa54259b0743bd61e15e5ab8572cecb5057af582eb75651851a747b7400460d7cbb08b3f825ca7ce629b370a0b34fcd331359777c0b3e3bfe67065ed027fc009ca758bddb9b761b4a9f48f8ed6190c2967050c96044f945b3ca29e9c4b7a1e4ffa2b6875249d34292105247b32ab6a582af121690a44ed898eecaf3697ae68f47c76a501b91236144d50249f9a66426ace1304ba694d6f492fbda1e945f7528c1b459160e44614c88eda514eb22093d42a539e473e410191552ba7e2727f910eecb9bcf4afb2b3393dae77c3eeb0c1281ff922d6b7e825d884d546cfe6634b056ba4f9b34e30e53685767f66b81f574220c5a94908c9014573ccc82fcf8c65b63fc92e1c8b9174e657c5bc024fab99835933a1019f76c4944bc28bb98fdfb93287277750c79f891ea1b13fe526ff1047f8699d32473b0a26efc418c07139a9a7ee58479f699e52a33f6a933f45127872a534a9960c482fb5a092a449cad47c4ba4cf97d1518ec62536676a21a6f56792f1a2ca0cfe1a3b396763e928e74234a1669b4bf514cff9cbe9da9eccb37f895d3e8731e1a831a5985acfcde57a0d4ffab374692fcfc33781659b4be3f4ce95cbe75a9c3cb3a5149557c34141b3e1a5fe94ceede569ed51b2b473292ebfc372c25c0b4b0fb5a09bcce3da9d2c67e7525c7e87e584b1164c8f94cba1e535db9cd4b378c95f4e877b425a7b4c59ea9c8bcbef5a39396603d3655e0ee784b8769796d339bbe045c28b5c5f097b6a47a8b4a1d2a422f5948fe213a07370a9bb3a67321da995ca242d29a0bc148300c965a5f62a9fc9e971cd5f49a77b2a2fff8becc09c8b4bef6a399bd3e3da5959d22045ca93a209612f4178e2507d8226408afd517675aec5f1bbb4f200cfb1f7b3fca51f77ef4631389802fd652c660cda4079a9cb2ffd0ec62cd0bcace54b9de55d256320d9e2a8374bd9f6f234dc974aa7c60ec9b6906b15eb38a49c6bfac8fa90599ab4daf7b0d2f0f292c38dbec92111d5d2238be9a24fae9c1f5ca8848bbc44cabcd57b39bc6c34d66d7d27f8e8195df1b9c72851a7465ed82175391b33244db3e402690b35243a4a44f33020b5ab55d89b51f7f9b1f166c156b51ca053eeeeed584b1152112fbc20c4566b5b80817d0ff543a0a1713124178246fc6f47f8080a1fb56ee839364ebab89471d9d5646e2dbc1e28ee7e785c4dfc1a266db8823db55a4dd71a84897f16db8e32b90cc1d4c8b82e87d0e047d24cfb6e091847fa97f3a2ef574cd0077d6ba8b3e17e747533781e5ec914b86acbf00453f7a61b68fe32bff6b9b58076328902525f56479493df6ee0a18d6ff66f688152544c32ca9804185a6eeb1fd074d3257f78958ba4f49ef057eb9f5aefe6b2af493f3028e65ec82d003362d1673e31c451e5e864027ebfbb32c6cbf6f3d988806c6468dd43c3f88b50964e542b8bb9faf1890be2f64fb61155ff508b88e706d6ba6f205a122033991f2cf6c440bcd6f02dc98925ce27466b40a6fbae68fb8c7fb07eb84adb9787ffd6276fd1f51800a1ae778a5a3cf64fa0495a0112537d2014091625293e229fafb4d25533b16987a09a0cd3f7d4fe6e73bc1be8bb80ac00fb76f058d4f68042b9ec553d6be6824fd5ad951374d348634ce5020904f0dd4e92b5c959f04df2db751d5c00c401fd2c2740637309d98ba57ed9487dbcfd428eae91412d2736195c2c73fe01a1113a2aef680456b7a932011d04052db2b004dccc2cfd13ff327339deaf3a7a7f2fd1302500ce4880d0883c67211c690f40c52ccc69ad44761cd0f997295943c2aecc3b1e29b7a114bcaa52a0f7eb451a30d849e85a4fef901021e07f0c8b03c66505746cadd6f046de81a60adf1cf76a752b000364bf2db98968da1a454e788e684cf1f98efea3c02ee017946a26a2951245143946012b68c0d2574c45d171ca69af4eb61bb1855a059213a5e9ca2c8bb4a89f1b983edddd0b3b844cfb2bcb6f918a1f835ac8ef973bec4a07998ee50fb7c7a41b7c19f8812aa516174f7b341e18609e630820ca34558c2a0818856924394ba40943202568be10d6227b8a1c1865a0efeb13e16481db4fc4f5006fb0b94be92884f99a2fb4fe019d22189caed25bc6fb4abf33cf83586fe91f61f6bf115ef293a861a22491324582200949790dec74ff10bf79d5a662fe97350aeb48acb20fe0ba59652b7640bc498aec0c7df5ae9f8d55a23af7ea81ac542f2c6e4464a817e479e9e70398ce4986f50d314e45c135868dda753a1959516c6223b98dde64ce1f4c907e714b0474f1e47cc18005b9c96f9d9681c6a65a643e9fe7115ed0405d44a3b01900d349390f8a22e7e341eb30774a2d599db78ea5d27b3af19ad3a9a2f87a50c9ed7cfa8eec94cca8dfcdd702524ddd55c0363d78422fd528810ba4c71ac91d8a9a876620905d8e8ba9e0f3bb1fa37c6408c08584455ecc87a8fc31b75090a4b6a7e8426c72cf8d55d0f87f1e100974c0eaca43368814f86051b7d6cd9e463f433a071541c15f27290066ef59d84624b7896ad043d6375f92402b0d08813bd5ec460c4004843b0f88119829554452f721de7526cee928e9099b030c581c71cd37a11cb1aa11a71f740a7405db19d2902153603217edc23c0a64244c313f42b652850299c43ea453f4203668ee8ca3c3bc0449b2341aca0f3dd0d196f9e63f426be74289741bd454cee7d025de9a80efce932542606beb7c1ad313a2661c30679b418a5cc4ecccc69fcb884a80be11649aaa0233a608d2b14207b0e73875183b033c05bd450bbfc5aa6c3d7bdf7af91f91725d3d95d50710216591828db5545d85a77246fc38f5b3aeee41eae679a1eec193adacd372fe2443a856ca267ecf9425c66e4382ea0c216a6642e435c9df639eea23aa102c655f1aa93c10ea3c2988117eaa3eaa539420f60670f2df4999ffbcf57f8908ea803940002b66bb6a82ae844e496b1e25a3532d0e4813a14337cc7613b4c54f4e64322a8ca0014b0d8c19dce859380917036048de5f55dbfe7b3e25e394326aa73f39ae97c12d5460a8e265ce5a59be23a3682bad30f858c60a983aa65f14103b3b774dece1646a3d43a8a742fa096e95326db68d60fee719b5e2bc0887e4210aea9f267f9f168935f48a97ba8c35189bc56f1553125136aaa603dd837d925237ce3f3339f04814d41a3a772d8a4fb89eb2e84337d531600f2ba57a6ab13f4e5079e6e470f15827412089738684cb028a04f67b89d12cc2f13186aa06244d88185fe0681f590e20e38e9f848e14b0826563df04206c8852a4ba6e716d1420583e1e1f487e4dfa343e8821ae57f143f1b6075204e7d2e99df31b7c52baed4096c0393afbe67c835f94ae5d5025091e71f810c5ade745f9d1b14d3826d599f2f986c620fc21fa4d161e79441a4ccbdbc39bfb43e722bb6efc91c2b9dbd6487be10e7e10047770d1f3a335d4e05821946353b7c89798f7446779a4ef9925cb235437ebc188e96715d13376eb3297e96781377a7fb517be6b76814701e48c1b7ee2c5dc0a37007907bf5607ed706e7ed2461dcdccf24087dfa933ed195aa5636fbe9595bcdced4b75fb0a237e4e62eefb70e23b6dbe7f0766b93551fc25edc00437c4a65a3979ad398ae5ab4daae40ee5f7a33abb920b74bb6ee449711e8dd9371f13362a9ca3557374f2e867852ea7bab479f996134fcca7351cc34ecad90ed43942f7190cda8bdc10419be39906efe29ec5682fe734cf75fb7d065662e79bb36afcd75300035681b79bc58d171d96f29bac42bc924d9c1ddc04e6dd81dd527ad5439f051ad06335d1e39925f15f8c387d9a5969000f5d6bb08ba65d3db4f27f18f27853cdd07e9dc8f6a0d28341e558928d832d6d7c94cb03d0080625a1673d3462404e2a5ab7a10eccdececdb36c1b1f7146463b2a2390ce8f147d07cac61b7f5e0feaf7f9e219f4ad5f337a3309556761d343c8207fc03a7ad76185721da64703004630fcb3107debc8b5918c4928c539b4b285e7c8700a6f9bbec94cd638af952025f804c92bb38b887afbec6e71a84c57783d8b6b72c4f13ec7edb82fc25fbc451f576ab77ca033c124c7f776f35b2c39191ee1e6c0ad40a4c0b6c08ad9de6e2b33307bc61b302f351b3824f9131df4f3e23839a44af2efe9b2dea7747c08bf95c913ad27537fa63b6f6a40c7a9f8d32ae07891ed931f92f4c47966e2902c4f7775a53a26e9e178a6d242bfb757f4527d11c4d3d4b36daef572af17bfd54167bc11d7ff3db2b31e06dae18f9698985ae5a765cba0759afdeae6466cf5ceb90f3c0bca042004d0aa9272344039ed5a15ef26a5ff2efbcbb2a6b4dc04f93b90155a37658c3578172c569d3f83f74b103351870976532a1e41b542b8a782b5ffaf4d15bd8adb9ab26a387ee9cbbf78790778395965e4701623e5483d3459732ff1c203ae7983886d205d8c814fcc34c839377858c49df8ca58c3a328d5ab73a30b27242c858dde7e186bcfdd646403895f657120eab3be38605039730ad9ab88df329d5cbe8e9cc54035ff454906c0cd1e20bc3c151a10bfeab64f350d5e9222cf6afcd6fca3730b13273ea2bb52dfce016d64a9d5fdbce3a04a26dedea07a34da7b97cde5735603b46fb195733e617bf4fa2d9ec0fbb6d3028c72a38912d84b420fd062770bd508a8d0fd7bb50b150d68db9b84706b2b9f6db882f0e67847d6c9997cbb6e2abd6956d25e1bf9489e9a6588ee9af771224cb447e7673ecfa314179a8d6f17326515684a38231449d827af6e544ed3892e8469d0126bde6fc367e38584997627b774556efab2036064daaf12bf5907d73fcb064e138db2f3a82cad950810bb9a8064ec679dbe414364a9ccf6a8436e1f15d9a020c2203db261c4bb5e665566b5be1910ad48c945a0f75e4c1c3640aa1424a88308806a6e1938a758eb43310bceb5eec048a34ec5f8b3e2a2bb11f4fc60282c467dc41b3183a62a4c3d61001ab63f52087740459e7fdf9e542f2d321519f56b4d551a33706972569732f1e32cdd42d444243929db4f261b3dec54d8f6502ecd420e083391178e8001018b1e9b4f431c32e02dfbe21e2c1a2313cbd43a6d5890ed00d00030cd4af153f8830f231aa4108cd46188b31d0859155f5480dc4dbc54753395b4bb3d60adf40144c126d8948f048d3a7d4069675586158b9d8213e32c63cea7ebe8fbe83ff643278fdf3130a1194a10e42161e63161f043c1d6c602a8799ec0e03a817e7e8801b9b92e642da8ea8dc85ac917c7b37ae6b59a55e302b05a38f09ee83f02d64712aefd100b63c72755dd731e07b941d8803cf66e30fef884b47b60fa054818c030e80d809a0e6e25fcd7245d69a91d3b98df65b3a1d09f495c3a545eba51dad2cd72e9dc36c1dccf40f954ef4dbb63ca5f1a978f51dbb3cede5b40869c43c596af9508535c85ea425ad32b115e17191b5e44a94a80baa62f0111836046a3bfa7eb168d6cef8d74b2f717b1d7ad3a1bf7fd73d89e19bb396e5955196b75015342dcdff75c7665ad98bd0e9eb96b7506da3240be35df538975534d3a8ec8d1cf93b7ca47e5965a2d205a0bb32700645ec691867ade075c1add3fa313746246646daf6d6f7dc01a47727900e851aaf6eb68f745d2ffece927f5b663f9f2817c06014b1bb248189938be115a8c3ef17dcfdf520874feab420df29e700e1ab83e5667fd8418c91cc031d7e0e8bc1bb512e23d559a0f568c328849eec5835ba0f2579643185cb8ef21ffc9f664721dbe0b2edf2e179fb0460a50597826d7034fce66204e8fd79468e697ab2f9720a1a79684f582e62fb3d5c0c5dc7dfe4074b6a458f97bcd1664cc904dc81021c858e2248bd49061a87cdc68fcb9981cba3b03c66ae3b7a0295ba465c8d8f445dc7d242473c1784238abdb1f1fc32893748b08c3a9887839d60514c78aa19b28826b044d813c408623d1dc6e9068f10ec7955682e0ca33844821001b00c5d0ad788269fdfcee2d8549d0966cd3ba19bb673e201ff203d61a56c132717e3fae6c96f9f366188a9cfcabddd49e2b06bac149d9631b049ad3aeea8da359e4c97bdcfcd6935b5b2ad82a0dcd0f5a2b19f1af5f60e0e742b3e0058f17c69f95c1b827386d77f73224b3d48e4e260f4f1a794559fd6c8624a07be0c467cef467377aed776254821535856f91d0d628fdb7d44b98abcbb49ca1ea67565c88bc84598c37e4a513785dc48333a20e4ed21849bb3d3d6edc90d8f6cb05654513a04fcd6384b1e75f330c52aa3e37bc631be3f1367586f7bd675eec9b00a11b7e63394915687e0b9d94d4505ef52430c0458cd69aa7bf04dc036efd11d9de13728218af394e20201f05171bfd72fa3c25067493b9ae581f1d0c5861b8083310df3aec067ec52a99074c69fa35643d65b72ec7e836e849347deb4fc70de6f1e6387d39dc5707798f9fe34184fdf97f893d9d5b19ec5461a60c531ca0293a44510ce9a8ad0fcc5c3076757991579d9464a06a63b29c2608e69564f79777e6b1fbebc93c27bc1f4c50082038a529b0daa860a9932bb87c14f82b6495cc4ad5c31578dc2117350622547f49e396caa59b00301fe9d04a98cd165fe542be27977d4bafdf818df98e93f8151de1a2cb7d2183d27f7390545e9716ff5e3ddbb3ad0bd72e29e61fbc04827d46c61bc9f8716687c177cd20dc8d12265ed0b3f36bc9497778960c3497956376d977dc0ba16927008c7d74d02e8dcb36336722aef55e7a109a152452ffa657960c6bd0449420228ff47a7932cb3a5f4d75f182478aff14cfa5c2a0d0c970995828d568faf2a1ab85fccaad34151ca06b0195bb627faca0806afbc0752849854a48214a1ad033753947b437f1e12fa3bb888271694d59145e62d8b9108ac5c1d64a59e5d781461b7215bfccc5705a96962a8a3281aa930b840ffec3626cc90e6208d882257f2added717c9a874984cf4dffd65b4e70a6c01bd3257963323283c10f3202b164889a31d4adaa269d84f3d9404f55e96c6e922f89e100e5b3b9fcd6ac884b123fc2ec89c23ac4d27a36fab8c4b378a35f95e5a38034fa949c1f332a73dccc1235c6eea0c4a6201516318a5bde845887fae4767b4427d22d971cd5399f7fba2fb8c554e15e85b2458abae95d25f76c056f37381b9295b7f196b01ed1de14518bfacab57b25cfe34d6f09ceb13ab8bfbcb0c954c35d86724ba7dfef277c254dbc55a05bb2b5c7f582be12e95e9571fbd2fab27ba40ee22d2f19d2b530bfb9ded0e9b4ebf27009da96e3b9bdba6d7d6de66793175bbdaea5688d6b3530514bb856f745f335046a85645228ce721afd923acd20bbe0797a4bb31ca37fa48e59a82e38afbed28ce3d81fc9710eaa03d7a7af98e934f623799a8374c2f3e91567398d7e499d66905df03cbda5590e8e53ff9224457fa0313f0739ac6b00ce50bfa4fe205976678773d2c428aba87721647485e0fdcb6538464ff04926bce6f5877294e18d5268eac1e51ace3faea13ac43377388bd8b43f3bf2f33ad62f3d733dac394750b61638b16695261d5fe427d9df4b5afbe4c481b742cee8bc30fc8437416c2d08e92425f9dc60eeb011fb7b585cf5fdabbf3662994f52b2ce0cd100ffd742664344c962a867f4f3d09e128e91d68b04ffbc54cab0fe3b562cac470b6ca9b107f429e86ffbb2333eed41f0460e5fdb512025e310b28930b3881ec575ad975ca6bc0ee9f34f5b7ef46eeb590c9d5a99a331fa9b02cf229a4fe775134f9a2ad0330d1b2ccba0695652fe29a01203e3a20698d87276b93f0771f92838413bd301a9e515f20c0b914951786761beecaf29528aa1d8241a824c47003cd08b9bcd838184745688b08de3b91f81de903b95f4071652b3a42260ce14f816807e1f367ded033d37de60d63ebf283d93a57a80ce95114410a906111ddb4ce2a470004969e82879d49f179b353559637c389869032dd59b6ca8ebcb790144224d5727e116243b3ca730f4ba44704d4d357143dce916f6c6aa559d745f9f8086be3302e93bada21716af83859626173f3b265d6072faeedbb72c14fe6fd1c026adbcfc76138ba9c841f27c636ecab6ca184eeda4991cf38c99b9c1e6eeca04072a71b680b16df19d40a24b4c99815d8aa087a9ae168091e69fb774c28e860d535e77eb6786629da2b28d571d227b9eaf447e079ea74728c0be85eaf6e1217b346b13bfe75e9df81d5d63edf3715d20d5fe4c46f9d3947b0586ddbfca5c3a91d1687f6293c80655e8b47166be3cb94205635d76ba4687fc385c7ed64b8dc93cdc49070c3fbb5679f4c549d1cb9a9bc6fbf2726d90b23fae6625f4e2f16ac82b094a24bb4921d3595bf13f89c877729261ce7d06780c81c8f6ce67affb5ca834b9a03a51787f81e764b92689da13e8ca20e420b1cb70b9398225b86971f1d72a384fccca1d895011366d81fdc713f1096890bf0d837cc730ff908b9729fbe5d3d01dd3abf5a702df58d1c74b7eaa79e9d6cc55141a83d305702a6ddf080703a7b49b6397832c3c23959605146e27a2bf8b0d7085f05cd6fa8cc4b57304355d52d4b21290ebbb6b91e3d668c7425ecca02355b9bfcbc4ba10ef01b767d87b790b8feeda3f2334a9d188e15c7a10df5aada37d9bd4c2e69ff5d0f2679b2405b1e0198518d98af24720b80a6ae00f8ef9f929d177f6b507627b07dc36fef300858d7bbff3bbd9e7ed2375bb81e16028ff0cca56f42ca73637ef885ecaa3c0fca9e99a3736634fa5df0932e2e81cd7a097733ded755da6d5b8a417429ee75efc02d51cf6a0de944f6d3a62da26b27e8fdd2096ef8b45b5d84734be6f7f81d41ebc025af23ef4233be11d61f9743f12abdbef33d585f331597eda2109c7fec4f7fe21ebbfe5533b0e0440efd5033c19a62aefcb7a13ac23d4be3ed04a37af7aa34626a1f7d559e08738ba352484a528df2902f531a2452eea983994defa5b28a434ce375f8823797b2da74e47aa4e342045debab2e7974e3e235c336fb7d97bfbc80e7cff4dad89639b5f73f348f0661037f2c45225298339fda2ede6a80d7c105ec8c7c4847c1e630207222d2d6ef389b3f278c09b91adfe1ca01d23a0c1f8e8dcf97427b905d0c43e999ef3b49925060b943770ec330e2d9c77d15ea0801c71d0901600fb147aac33ac97446ca6f646542ae6f07b8700eeba9652e4efbf3d876e205f2de7d9e8f78dba48044372ea06937027aef78aebe92c26a3a792891fd04f10c771a84efabcfebb418a8a39f341ac840689923adaa6bdf547b6cb5903d27f2000528b694ac3f81cd94b773e79c27d9f083f471cbe815cbf6bb27bf93fd31c0f9d6ae25e4d17c653022a771d3078f6b5bb8b2b8ca725a36537af616af11b06523a4c39e0971dc85e06cf6aa235ad883b6d2825890d8087f01aa53cae166f29805bd071e4c4e762f39e4f1eef280682c5208a75d127d470d2e6fbe8ae33d22ef763a18d85e36be0f42c61b66cd32ac21f06fc06ac495bbcc5939766bf56bbb4c2016d06a4d69f1b77d4534e15af56fbc39cbc017af6d7170850b2f58db8bde322e0226699e359ec180fe2f9390597bdfd54659716fba066d09a827c6b17a8680d6e55c0fd295e846f2e30f78bffa59ca582857a89e6b301e893bfda3b0b757ce6e43053458f89ab1b023c969e5bf469b642fbc72a0c3c9702c1df64cdbd3bb91ba2b243557ac27e7b4b6533a1bcbf489df62c4452bfb66aba0ec242becf4375dfe6cae7faab15bdf52e4e4b5abb96333a9378469e47b0cc30ccb9b94b2e60e73d95438278c373f9f1b43141ce0006bc673f6392a7d3edc131c9c1e3f9718b0cc26a200ae005b4face43402c9365db98fe1378a25b0cb0fa954c962760db87dbb3e91b882b0e84fa719d5fca6d5a732e275f0919e934c41909da58dcff0430c77a18de544d6503cb78b06ec58d6c0a06a2f90f9c371f7bc65ca90e38140d7bc5d0645ce0ee8e6aaadae1cede5f2f5cd1901193987a0dd7933d875d685ba05a3f08df40481e945027dc0bb10baec1ee0726228cbdeb640968d68623488df116ccab198dbbe810cd637fc9df9cd7d97806a8a5196495749651932e1e4809332e402e8ede6ffa45fb752b54be3685b329ed2a0abedf9aaad29fd5b488d837de830a5a314c24de2cb8d5bcab760509cfdcb8beacba20ac3cb45403f8d50eedd1ff1f9a55d68f1fd2c6ef1f9a91bbfe284970bc2771bb5f7700245b42f8fef6f91b366d46e977a40fcf07d0f95f5cde67539c261ed1e609628f3b711e27eb9a80e3e44a094deb37264db34c9043654e6ae7e0bdbb762e74e52f9b6b28cecc3c6e9bcde2a1745b281bc2068d336d0d46d157c63d84db12cd06877e1ba8a2ff717658d4edce6926c6d516b7cff3c600ecaef2a064f44fd2b285c1f4040beea35e6b1b824d74f9da83d181e27acb2bcfde1bd26dbe2f12282cac8e32d5db553e777af177ed78408b3feab63d2a3de824b0a6a801ce309f94c1f8a72ded9a4ef60cc5b428cf84ecf8fc5652c5145bd18b01ff95e876cee2d7bc99107e33fec0395a03c1b285a6af342526d9eb281c183a97a3629728ed8294ce68579ee629f3390ad6f92b7554b3194acf072b6e4f66b39082428e47e10c8f0411fcde9680869e7ac3a2995396a3ab698b63b8a21444cdf2cd8d0abbdd07e10a2fd8559aaebe93bbbd65aa33137809ca9cb9b4824b9a3cb4b51aa24097768aa62b21262d93f5ad03410d2b4f5b126ed5bae4bd52a7da09685a7817201c5eb75b8a9487e02ac526ad9a6de9677bf2d034131676777bd6de7cc16970e918e47249f3d6d8f9ca9cfa457e645fe99e4dedce8d728a0f76aa437a56e03483ad5e943250230736ca6ae06b1f0025183bb80f3fe48da0eac25a13d12b0786eb94324269c5d39c0345f153f298843a090be5e4955f82b4b115fac469cd93438a513de16a7dc345d6a061b09ab9e9c5d430b312d0fa2e0c34d4305347d6c015e531be7eff71fdf5033a3015741c7db7d51aa34ab4e0ed0ef4934b675bd52db0ffc4892260c4964f315e54bbf0d2e5e711edf9d7f501d3f2830d75817e31cfbf46129912130dafb1e68583cb49567177314640760a0f98490b8cf83b6e88d89507f2c6d2ea4c8d2c98b5c023a4a844529315406a70d08ee001e201e8ab6cda96aa2df54ba61b2e0d5f81fc9f4eb22158932c8381ce96f2e319e32060b3ed25c817542ddc59e91d07d3c4437dec450b0233c2d6bad17defd0b233332ca884ab05c15173cdee53251832d86120e4997781164862f79fdcf4771c84e389073836a14b03b92d9c2ebaeabe2fcba31b6f083a97b8c5f60b5d631fce116450afc542aca6b5e762a98299de01a5db0c27d043ae30c19f7429f8993122af94f69d71676700bb4df6c9e4b2a373ce61742a3d4df606d4df3911d8e0614621ecc300d3bbb7726602da866521e72ed354caa9843a0013a3b04c98ab70061ec5718913603d44b33173d6c8155cb251f8a1db3b8eba17c71890c886dcb06f3413b0207a12814759a43fc4d67ebb498463ad2def00968aa0b90082fbe7b8b9b77fec344e859a47216fda3f1bbf3760aa8bb7f78933268801a411dd509dbb0c7f5661617fd37798e3e734d2b35264f870ada57e44bd6973d56d1587dd629e29c68bb9ffd668a5c3477fdb4d301abbce8cb4f27f691adf232394de18467c19d7e4888b91bd90ff2af40d53e7aa3e135990081477426ddfa3f20dbc6b028e3c44f70a3a60b0a6d001303333333333333333333375d46fbdad7db190955292e5e18daf419a1dcd24539229a524c64c7cf845daf01769c39d99b584d09c3c100b120bcf0a2851cb2baf768d900de792313d759b637aafe1ac57b14932f1948c41af868312a5264f1c35d2459f86f329592a5de65eb3708286938a99ed8f331c4ded990ef793a6c3ec0f339cbd04d5a0ceabf32f2ec3416cd275ba6499fbd09041ef7459d10aab6821e2ee4e5e1354ab1cf818c359a4d7da98cc7f92580cc74c4ab41bff1edd2786e17c31fa6ec2f6b254309c46f69cd810226c63f7f185935f9f5df00cfafefc91353b2f1c46654defa7a6798547d674887c74e1709b545e0ab296044d47d6ceee900bc798aff1b6d35b82f2d0f0b185932443099ad7cb6ab4ad8553be7c3319d4eef2b859385a8fc61b954933495e202404081f58384893b382bab022b2095cc7158e25bdd22f263935af38b286036921d25bf89e0f2b9c654b92e992fe4c4989dbb89144aa50bd7a251d16542229a8f167a518e0830ac7395f53a1d71029813b8ea5c0c7144e299458162ade4b581933f02185f3658966cc195b634245021f51380969311e171be386190ae7dc52d320c3f629d11cc116376c980a7c3ce1181783a991b33be170621293709bb4be7235b27687818f269c4acaebb45159926836b2960267c2316c935fd2269afa19818f259c6a9477e909325a85587c28e1702ac57d13b3040b5ac0cefb3e9270d624a709a36269d0ef23e170319fea159f3d25df473897c6d5a4c9af94f28c118ea6a4933d2c2899f4453867bc6eb509a164ee9708072dcb27b5c62a4b490ee11433252546eb26845392e14466464d0f6dedcae0230807f9ae69d5d25bcb3240385e4cd1a92aa6cdb6fdc161e3766c8abac507072d776246bfeda307471593d74eb7aa0525fec1839356fef22a33212bdf1d3e82b58f1d9c9226c984ce1335fa24710b3e747072bbd4b629b5b5db9e83636d9f168d3f52776d1c9c566f4fbe24d359d6c90d4e77faf664b53e366d6c701294523e57974bf825c238f8a8c149d2202c6e7f9e33f8a0c1b12d25290861216492c7e46306e7d294f164f0b56c5f195943816f21b2c58d3404d49941012c66e80b3e64705251b4499bf24eb8e53f627092c418755d2dda76df1f30388999b4063d222bbca55f718a2529a5c426b9e260ca4b90b271f5334e5a71f81a93f469d2a87e29ac38c99ad484122728694c721595182fbba225a9e270bd6a1bd46249e162a93868ca56db6bb25d3a0b15c7bbd3dfb7909fe25c42cf29318fba50d11427259cd053bdd8a45b97e2643a6b72283925fa894871fe8cd31663cca429c9284e4aec4d4a5b10a5945b44710c279c2c3d96a4c84986e2206fcf6773cefc251d284e921c4ad854259bfa4987c7278e494dcc551725c3463670f40d1ade372410e2e189b3b5aea76528717ebe4e1cc346d125e5f6c6f4958807274e6a61cbe4d3325153b989839abe98fa7343ae22366c201cde2a810e88a4c04313c7cb1eb779514d9b5847d6b4701b478b1b3916901cc7054242b4701b2e810e88208f4c9c4fdb9510272ebd241b03332880c5043c30917ccd91971b4dd6bb056ec0e312d77fa9322bb5a4ed222933397c478db4c441fa6f08a5338556897b5462d93ea537d49496886c71232464068d193466d098711812328302397c8703520d1b3a0c8ec28312fe8e98c9c798863a32c70e1ced3189d36f5e8958d1384a2549e29c7d2363e613b46c7a366ca06410b0c5024242424274e4c0231210f080c4f162e97f7793471c47062f4996d5dd927c479c772d7d656f7cb1ad112765d25a10a1c4117135234e526532dd2679cc756511c7fcaa6462c8137f9398220ebb29ead24af4ddb43c12710c27af5d4d696d93a30722ce226342672452b2e0718883f68d292ce79892ea8eac215ec1c310c75d8df937fcf2c5e5377678d205d282a7e05188838feb858e293bed087132c94dd69bca95e4268dacd90de29829ffe6bf4ca1f4565c070f419cc57b839e16656206111d3c0271dedecb5aa566797703e2a04a082d225fd36c4ffe703c4b7dba339ca0ba591c3cfc70ee7a0d25f7ce6fdcb00fa73e498aa3e1a265ee150d1e7c3887daf6bc2497c624ad65227b38a8f28add25af9cdc4de2a18783ce12fc2b7e9ad6b58cacb125c1230fa7ab56bd686da1f7e491351d060f07256ecc7b0ddd65aab4c3191012727687f3b68685b313ce4d4a164aa00322012884871dce7e629416a14a8c66721629d3c1e75187539233a579e934b2668603e14187a3958dde06216c4f0cb31e3ce670fe5d13734aba5c9aa99b83871c4efd23be8205a5aaee8daca1c5c1230ea72d25bbc9184dfcbfcdc8daba8d76c01613583278c0e158954e34bbcb174c6a3368e4c1e30dc73cabd1d6ce0f6fd1cee0e186d3650d32c80d6a5265bd361cdb82d56bbc60412dc98693123436e836c9e53259d670b0bb9284cad57e91ab1a0e4a1235414bc494a92a0d671d796752ede5eb17cd173cd070f212ffd2e4652f789ce1d4a34dae2a67849ca4190ea3b4aeca9f6538088d77d2454edc982adb8207198ea5a46042ccaff4e58b083cc67012b127b67ab38cd5ab010f311ccd55837f499a9135bc814718ce63d9276eb0245d3cd1c85a0d3cc0708a0d37a5bbedc89ac88d3e16787ce1d8e9afb2e2e9223cbc603ac933ffc98b49490200267874e1ec7eb9a361e7c424e722246446160c98111232230b2c668484980b80223cb870b4cf5259841495369a3574d43801cfc882013b6ae8a89181b6e13516202212121212a2809010ef9010118f2d1c7e4dfabe7c5f0be7303133595d4ef352164e276d189915efc336848593caa58c49caf7787e85b38d92bb43df32c6d558e130ba4dd07f8de93d57e124fc55bea87519ea5c2a1c4bd893fe37a54c4a3785c3c5598a5765d249964ae124df896abbbfff2a89c2a93d93a53169e229d10285c3a7263deb1fa16e944f38ec66c98a7ada3259e784b3a5c85462ac1a3d9a70d2d35c9ac4bf623563c239ecd52f5b2d8f259c2e9e58e5889c76df3d947090cf70afe92edebaea803311c313f4c12309c7d07d77e2d60609c760250675b257bcb78f70d61151a74573e7097a18e1242f9b248d12db248dbb08273bf5b5285ab1922811e120533a512f65e34d673c8670b24c299a8989ff1eb287104ec964f5b1248545d423084713844c11a6ff073c80709057a2f58428bfb8491e3f38fa8dcc9b2a5808a53fde1613c0c00466d098111252f6e0e183739897a445a6aa8af5f7e0a0528e90b94ee44e8973e438c68307a7582762b26b714a4346081e3b389b7cf27f5f52b3ad8b0ece95af43334f8e77d1efd8c24f702c0a1e39388a2849eb97bc17a72e143c7070cc99e1a4b314e3eceb1b9cb574ac44897e51f7b5c1412551319724bc5b542b13c12678d4e0984b0ecfa86d6155a58e1de538e04183c3ca9a0a42ec5254563d66702c372bf9bbf615caa480870c4e615555a349f76ee5086ae8d081446e34bbc0230627a9dd72d4c993edcc9a1678c0e098a4ff8c1ab3e8f9a847d66cdcb88143e4462f0b04f08ad3c63a13a5f2dae5762000579cae4e4c826bec52c2e53a1d08a015a7ec2a5d139e51373810002bce9a92e81757e264bd1b5913b9d16a0301ace220da35ce7beed733aa8ac39ad4d49d2659b9d9236b2515a79c6e7253e8f6545171126c749af62f29eaaf91b51c22373a692080539c446b9ec855d1f2232e4c71b63661767b4fcba5386729f935d3f59135529ceb5d2d45bda0d51dc54910269dce6df1cde5e06204357e8b938100447152a2b425795b63a5141a590bc52989e3ab39d498a416b9d1068a837cb52475e691ebdd46820c0602f8c441a998368dcd7a48931a597315ac270e5ae35f8cbe54a29a143492b413873be126a5f7cb2a16ae40009cf87364e74f6e7f930da443c4cd6664c180a2a304a9c68eb221086013e7d0a695e9629228b1a11b04a08983d03cf916a22c83003271becda3b264cc959f2937060160e29825c5d29854b2bc375de224db898e1e216627463a0206995b065e18014debf0bded53253ab2a6233ad048cee045116af0820865cf7425a62397dbe2468d901743d0ffbfca43a5f4af92042f8470125eeeedc492492a684138264128d147652539a302610d2f7e70327d77a755f2173e389fe0f7e29742b69de4c8aa1e5ef4e098b2f9f27a99895de9c20b1e24ed09ea4426a5ab1b5974c38b1d1cf3a9bf12c776445f2c0bc20b1d9cf2cf9c6017932a8db332b0838b1b5d202cca80f0220727eb4bd26b494a3431310478818383882fa9fafef135ea1b9ce3378ead497569a3f4c206c7db92a1c205939478a5458d111d2127a87c5183826d7bc8cfddf4ddd1202be9634fd83cd29b012242c6e6d5ce2675aee0850cb8543249ea47fcdc15c1c218bf88c16164f04d92097f67bebf80c149a84edd3e8d9a529202f28a5388b018b26295ad096601e28a53b0381afa7305405a71d8da5395ded24a885ec4860d2e7e5971ecd4bb1464a9f9b95e57716c3741e6db0abfb07e17441527217e949f7caaded7a21000490567a97f490e61a645020415a62fa5629a1436483febd41320a738d66f64f59b8a27396ebc087702c41465ecf712c4d505734b8e1b9c0029c549e5495b25e88b2646a40610529ce7526ef76dff7e79a50c20a338a66082c9246fd0acc12424242484458c9b00228aa3996e5edf08b539758e0124142751e4d2a8a5b95808f70b20a038e91fe52676c80c2dff27ceb6aa5faaae496e9a3d715256258412ae39abc74e1cee5faf342f49b2e7ca89a3a820d366eb5900d9c43155991215956731df9a386a92f23bff04ada0994c9c6bb49e25b97e4b9e6903e5702d7c04c7298889f29a74ca44efca8cac71a14324a1952d6ed8a01112c2209738c57d0db978525c2f294b9cffa4d0de95372971f29540dc642c0b17d37117397698ad190974408403209438bae59374f6fede78aa05ca91030108e890102b804ce224785cc6346196c44999a89151a77a23ac8d84490c4da2a82dcfec8d2c0df30308248e49dc24757abecc5b37110079c4294e12f4c444f192527464ad04ee3ad201208e387aa92e3bdd6762e78cc50ea4671a00d288937097922cb761d74eeb0510461ca36bdad9369562ecd0228e9fa5ee6f644a17d5375a005144ba32a9142625292a2840390c48228e9b49bef479923893d2580041044aa5df3f4d579dcb0a821ce2a084b1103539fef5e2c81a6615400cd15a12e7d29b63070e198014e26c4912f4ca96e946cdccc882015e2347165868014288fe92a4caae66d384236b498be8406a0db04b800ce2f45f5682d028276b5d4204718c3f7aa4a5e5a54e8b1b395e8b1b3916d0458e2c4242d2042c10722081405d14bb5392b5c680783795ac64b27f405fc5c514993aa32c7ef8aa9259969c6279f47d7864ff79ffc891d18c0fdbcde630bd64b1eedd83deedfb61971e0c72fe17c5648913ee3cf4725244e54d52df5df0d0ec9c7946ef60d0533959af4b4551175d7663c7160e28870369812076388b34794de9a4ed9b53070201a40ec7d426f969cdd05e264e87630655223e4c5e853ee5f01d0ef001c81c0e7227d57c643c7e544e0091c3d9d3fb7db54fefc67138ac9b2489b1f45bca5a82c329c620ffaac413f388fc86c38d58aee0b1b115aa8e2e1c026e38a99432d765650ce7a7361ce3854bb523464c86860d9852766aa98472201c9e03078d19342e80026481bc3a1580aca15441339646b9241796524cd6de27c4a2854b420510359ca4d6acdc573c0d27b30d3288c6c9cb68d17030d9f24d08ab2daf19e40ca712a326bd4c571022fd018819ceea6342f8c6b60ca753ff96acc24959a5428683c9221b83a5565eccc7700c1d1753d5e55326f36238aa8c90565731a524fc6138a9cf7b9149c560385a999d3a498e39b7ec0ba7d39275394a1c377228202404c78d1c0de28593b87fe12246293983cc8e1a5c00e9c2616366126f82f849290cc28573c7ade5f72b212f07b285b38fd0b1154ad04a723572ec28964007440600a285e39c2469499364dd46516980b0a6e4d6144e8fde73a465810264013b14a01cc812e8800816205838a8cb3ec9d4db528ca104b9c2414da892e4096ad6e492158e31faae8929a5aa324b154ef2bd49b8924a2a9c4cd7c72da9532666a67092bfba7563775238fcaaba49625611d2aa289cdc9257ca134cb098432ea070acd324c8f83831266f9e7012e39a38aa311b4fb64e389eae95983f159f0d9b7052520c1b262c67fb52056f00c2847385b3937f92dd7b97967092a694a4c56283a7669570581157da6c4f775b2509c7127621ba049d419a1209876bb98d214479d87c849398752a47894912b6a1114e27c6f71151529db4398e2eb2c8e13870a01c1a0029c2e17ce3f7b66b8a004284639778ed8a7183925fb288810ce130f2d4cc9c24ff6de68570d8703a3553a94bd1618a8e0bb07a8e1be900204138adc6eb568dc164a93010cea56f7bdedabc2c363f3889fccfa0c27d49b9f7c1f12471641ea1b57346ecc171b69420ec4d12544ecb758884847897e5c1496f8af94e0a6f4ac9ba12e880480d901d9c43347f6b529563626e55021d101101880e4e69faec3efb4fae248100240727176196955f936421c6c149923f3b95b5c45ecf6f705236ca525b84a9db24236b667e0103b1c129caa5dc7449a8ae7cd7e0a4c457b5553b716450d2e0943664bacbc9eee96606470f3d7972084da9dc0291c1692e899794f21e2fd546d60c0126f00bb00189c1f95445cd2047a624e3775910181cd574dd92e9ca3849fd78c5392f7aa9dff8f1559d2b0e3a5e554787975ae5225ab4c25d7199b124236baa12e880080a3e58712a7153e388b08b297f68396ee4c8513e5671dad172c16cd6aad457c5f14bb854350bd20415a7e2f8af49bf68afff93222aced65b157f7cd36ed23e4e714cbdf184ea3e416e693e4c910a0d951a1fa540ad49eb980da3bff90f521c64fa3069adb55733f5318a93ce2651762909affd73da62023bf810c547288e2b239f9aae4bdd87cc141d497d80e2e8f69ac2497bf2f9db9f3899b4556a474f5aa6b4274efd6e3f42bb4e9cc5ec4226e1e327a912278e625535d2c41f15ffdcc4c9f4c5afae114acc851f9a388949fe15b959348d4c1b397c64e2a0dd3ba48950a6265416c1c4c9ff0495315cdc2e335de2eca2b7c1948c8f0d62193e2c713625b3267992ae7492578963efd58813111535895cf8a0c4c9dd2e575c34ff98c431e912f308f313fb94f4073e24717c9953a6fdc78491211d5c201c223a6a70488831537c44e2285a2efa7cec913533adc10724be5df3bf2fa91c59eb2d6ea86d31819090da507c3ce2bca677346ef6fa4ec487234e52cc1e97fbc7cec2bbc3c347234e9bccf435d332f4e60b1f8c38c5b90fa54a965c8ce1c85aef8c2c18e0858f451ca4669e9d9b08252551c4d944dd95a87b41c58cd8f09188d38dc6787b6532f535ef0311c71d214ee58fd1c89a8e1b3844762011df71630b3bf47188936492681ba38fda499246d6bac8b1c318e25c234b3ccd64b7818f429caaa47ebdb45a4aba87a346097410e2749e277fa12fa7423a72d4f8b2bd411cefabe26a9251419ccbdc64a934397b26d3858f409c2dc63c4168ee96e630a9f00188f3a672cbd51759d5a31959603103a7f0f1879320735d67cea0a4922d3368cc7015d4b800a2404808143efc70acbc17cf2b5c462bab4364ebf0d18773878985ff1a2f51914817397694e185e0830f07793f6292ead0fea18ac3c71e8e2e9e29c6fe5162263d1cd62f870ceafccd52521e4e7ac23bb4a5df70920c1e4e222227879fa0adeb1d4e6ee27eef42f4f555ec70909669aba3c4f773ad0ee74e13f292ed9c94753a1c2e2b978c4fcd2d253587a3c6db932ebda46897c8e1945b5312b9cd3531a9e42730e38f389c565456aef8acc8f7e170329b9337732c8530e1379cbdc452cf927a49828a1b4ed2988be5698cb572b6e1d807ab385cda26ed730f158d51c53993cc99349e8ad8f8a6e298df21767d7275f38a8a839c8d7abd3c2d97524e710af79139315cbeeb668ae37ccd99b867e948018ac00c1a5d38044674a006a8276094e23c27e742a6119ee12c18a438f7c820b3bf09c6280e26fb5b839ad3cbb768cc280acca031a3263083c68c92c00c1a332a023368cc282c66d09851109841012c66b40186280e62d7347e478966f587e2d45a6ea14c1067b9e44171125759c33fd3d6ebfec46937d4ee9934af1edb13a7b1a00495efce2d21eac439b449aee9da544e9839711ed3d52ec2369b38e88ce6259c096d1a524d1cb5a468410513cdc4a97f6f734b10a7e2164c1c4fa39f59ec1bd728769163c716a7b7a80181193466d098816b87807189534ce2ecb4b5f74e2225b871a3015deca8916ed8f00cb4a7a0c60242424a70e3c6484888150286254e51fddbdab4ff7c4e9538c98cef71bf689278254780418983bb6c989d4ba5c54223c098c42949296595a4d5e62e248953efa9b513cc826fec23714ae24dfb8cbf8ef70a89e35f95da7c5d49b39c1e71907e5a2aa920eeb49e8e38555217d4ff66cc4a9648146034e26c9d7d9b64d1ecf2b5076030e21c632fea2c4c9c24bc058c459c7ced339670ebc85a81a18863e68d954bd1aa165a1b301271aaad932bf3c698850de5cdc88201a5c040c4c9c56b636c7985b1b200e310270b3ad5c42d6d95843bb26688637e56263d7fa92419cc32051885c894d64b6d29c997e0c68d11117fb7d135eee8008310e7952de5a5e264a9243bb256a800631007e52e56928749ab004310c7a07e167ec653db47236bb8c504688ceca8a1011d366c2060068d193368cca031430bdfa1a38b9365251067d1194f0661a374b303c429e5c85319d467be942c028c3f1c67dfd2ccaf697d29f9e124e3595bd4102747d47d38fcac9cbff75a3cc92405187c38b8e50e15b7ea3d9cdc924c1ba3f866d68c1e2ca48000461e8e772ae643feb6c3b57c020c3c9c4caa85eefffd0e075d957636cd4996a46d87d36cced6139692aa2fa9c3f1f49f64eafc4fa8d2d2e11434298bb1c934f3dd399c2a5de61342ad72389da047895063b1b448e390984e7a9bf822389cbae74fa957ad586b79c3d1ab665d355838c12b6e38658e35653ad486f3e811254eadbc462b361cc62c66afa8d7800893a974cec49243425061a8e11913277985af8ca4e1b06f2a85bc269935d175a0e19c7f332a1bf394b8fe194e522ae5ada3e5af6637c32949b778614bbe31612fc349d224fe981853ca24dc6438064b529fc99ab12bd3633897a5f989be0b1abb440cc77193a4b1f2fdbb2a09c3d9dd4c9f4aa50286b3dbf7cfc8f2b6caa44195a4241334d35e38c6311384d69a9f73b10bc74ce94e550e2529d31d1738b8704c9ae63e665316d36de160562a7429d57c25f55a38c9198358673a25df48b37092d791d3a7d5193b2c1c744ff4bdb726d3ae7092be4793582322b3712b1cff4fc6a421b7fd4ca60ae73d935955b7637254385f4953bb5ca94f56740a67eb1d212cb56f45b948e13c3a47447d8c853ba370b0332568ea1235a5100a2715ca246d49de6750e5134e4a496243bbafcde273c2418afba6696f597b309a708c7e79922a93e1a465599424c060c249f690a2eeaeefa43659c2c9dac26810b75bbf91120e2ac5b2e65237a7514ec259bb8452dfa474fb72483809279695d8fc4999fc239ce2fda57c5fc652a289114e41ac89c1f636968f49114e2a4853dd61314b1c11110efaf289b9e23335363b84936449ccd3e9a2104e51cbf63266d6248e1984d3fbc9a5e7d6994a1202e13027a5d22b4b5329e97e70123a645bfc0491b2fbe0f425675b1c35b3650f8e515fff4249a76495180f4e7287506969ff0e5e2da1437da8df6f644d73ecc0815dc0d0c1e1f6ede4aa82d999520e4e55e28ed237d93d49120e0ef2be33abd91b9c4a840c9baff2998c221b9cb46c1aa12ea89794c4d4e07832c5204e49dd6ad1a5c1496c924a6d85111a9722028c199caa4c2931ab9daadc911060c8e01484a9d793a482002306c7fe4de2d3d4ee9a9a2c1460c0e0202aecec99ce7c06fd2b4e494b4d2ea51cca4626bc70c5496bc3c8a4793ebec45b71903b37fbebcef03bdac185160c48c20b5630666a1d6bbd9b5f0552544e18b1a27ad6578517eea4cb348d7d2a761ffd1527ece42c1d15a9c8b768e256f47d8a7b4cdea43589b5dcb429f497d3d6642996b026a53025e9a22f8c50b20991421363562f29a348bbd4446b4eb73a89021d7527dcf8ca55098782937d9428139574507c6286ae34e27fc3e413eadbbdb79adcb2aaf1049be14ca894625ab774e22d31532cd1d46bd59cd8040d6629b7e9e126de6452cff2abc837d1445289c1c4d40ae55e268c493b342ec5542fed4c17353189fb71202dae0c07d20273dce014bcc0c42968cf659c515fe22459895527eb84fd9696389b583e49a534c24d8d65ce850d749538dd86da1cdf8e4df9a6c4c1a45342e5c7daaf6812c7afb46549148b652ae9800be0e0a25d0121212238b8686f2cbc90c4315c98f94c19f3f505a3f02212c73fb792296667266187c44186f2ad3325c49f591e711031296a34ceb64dbb138484983bc139e185230e2a3e97622f7b4de6ed45238e2242591665256d2835238ea7f5b6bb6377a5398d190ee062e402a9460e5e2ce2941743ae28edf36fb28a389d18f38c9b722da8331147fffa2ee525d9e6ed11714a72429546cf93a4920f7192ccd42fcff5ed06a9210e7ac4fdac66d057cf14e220ff9292364a8883e613a52fef8338e8b7144b57de3cbea1208e1ab2b4873c2526995c20cebfdf23366b6238ab00710c97695edb2b32347a80177f38d5fc26b9c28e924c6e3f9c526ba9d0abff9522f7e1947adfdd5e5f167d3e1c7c830a7f53da4d78d76be4c82277bcd8c331e8f82641ec6445553d9c9429ef2b95462f9a46021d10d9e2451e4e41fac966354a9518453c1c4cc5d4cc68aa4d457364ed0e470d938292fd2eab67fa0b3b9cbd84c97422338952621d4ed9a397440ba74ecb4487638c9b53dbed9a17bd9163870eefbdc0cde178726b3c5d7286e9725350e3590ec72ab12a681d2f111dc6e1d459f13b2aea6fb85568bc80c351540813174f8c96692180c6880ed400733890166f38c4e82b41e486d3c9bc4c1553dcc6fa369cf3bff45538bb6c97b47143648d022fd870558ca66b194b9990088be0c51a8ed6a757642c41cba46e642dc78d1dc971e8d8a2c6045ea821ede31b4f0922c459501a0e6db2453d8b419f3ab2868683580aa5257ed49576475e9c0151e26851e1f6e636837eb6195bc94a96132bc371adf2986c8c51c62491e1e05e5256d16dfd93e2184e2aec5750b2bc522c59319c2ae5a48dfb97f284aec2709252fa7559bba44fc8c170deb08d63d2bedc46d3174e4910bb95be37a344d90bc724c97c9b26255f847863c71635ce92012fba702cf13586d314f4e9ef5c38e6bacce5d05b5b71c08b2d1ca4c6189fa7d6f2ff480bc758e59be5c459382657b558b2fb6fcd8985532c29fa99085d528be60a275d921879b15be9c6c40ae7d88a69f1ad2bd7e6fda20a471925e88a2773d38726c70d916cc00b2a1c3e53d093a14774a8176d07175af0025e4ce1b8d1f7dffc3eb4942422396e88a000e528a60f782185638afa6934f78d4d0bb40bbc88c2497e89a95749ee6fedc85a2a2bc00b289c2b573a93bfe79edf99e08484e0d0228de0c5130e63b79b256e2cd444271cdb4a504a764ba3be54e945134e595244acf6eba9e9997050cac44f1347c7edb44b3809b769e45b73a6a431251c637b2853c982c56653128e9779a9e45df0d12b23e1b026decbe8bb536284473868dcdf3a31ef0121217a8290103dc1814186268e7ef2bc84c9d3a73667e2fcb5eb23c435579e970258cc30509081896316cfb8bf8b232c4f645ce26c72dec9be94f2fa5d6c46160ca031838616690329054608199638e94c32596e8fff7d46160cc8e1366c201a33b2c06246a2838c4a1cbfc5c4a5692c132ca7449260594ee511a3e398c00c1a3370dcc8a1800484846016644ce2e46155fabf7a7e2b5c08081992386566879d98effb786544e2b88b1dea7476236b5aa4149895018983b41c97cba47b02ca04198f38096e216ec25376e64409321c711a25574a57b9bf4afe8320a311e712534ae72165469cfb64bf2bcdf6a529b38893ca209454de9792eea588639c8a2a757a4936ce449cbafb2e885bff9efc21e26cf25cca99727d8893989a4e97d2ce13a631c4a94d5eb3e5b1bad3650f6414e29832ce29b3f4a3c5a2c640904188d3ffa824e60a4aeacb92411cc45c5f5383beec8b88031982385ff69b98b435abb21d8853503188fcab09265a8a0c409c5c843241dbc49a129d8c3f9c37e5187d39c4095ab11f0ecada4ef36c9f4971f2818c3e9ccddaff43347d7d669d20830f47f5af92ca4bc54456f6702eed18d5120b42255b0fc724dcfec9603994a4a93c1c4fa8ab643a453c9cb469c92785b81b3dd21dc8b8c349ac989891b75f66f232ec70f04d296c3cb945e8ce7538995465956944c3be47875392f3253d4ddc55d3399c549053259ab81c8eb36a1e5f412e45ae71389a481913b53f49520bafb8371a453f7dc3292cf32641ca9cac3371c329b3c956b2ff2879196dc3312ef5a68ad17b92201b4ea19af57972cd7ce46b385dbd55086d2a6a38c7952064894b11d3571a8e1a67d30911254b4c130d0719533e79b7c4661deb0c27b14c8cb94513eebf34c3f9640837dd16553f746538f8ef85efd9d43e4924c34133c585d29745ce3e869358d61c8d75cac2956238f8c609ab63b1309c8229e137264f8d0a170c27cbcb77625dd2392af685f36e2ae9cd24d7ebcd0b478b5ef13fbd6493af75e1685eea4e46e8baba950bc7b5caab1062bf2e865b3806afb79343673398182d9c33de09ca4a36f94a9fb270b44a429a12fe46a7695838ee9f4cdac4ccae70f0d573bbca2272adb7c241f9de86cd9792acf9aa70fafe0a172534e8659b0a2561edfb43e8770a074bd1f439732605b14ae1b8df3ab7a382bcdd320ac774172625d7ca4d820585d39d0942a65daab4583de1245e9692bba377c229efcafc8bd46c7fb70987dfb2cc58bb98703419f72646b64b3828a1d7246b8db3aeaf120eaab486909fbfce6d9370d271775b2d2d2557868483a7a693c7247dd1368fac2d41c6110ebae74797d831379919e124556b9ccd678651528463ecab9a667655e68b08c7fe9099de372acd6f4338a50dfaa1312cce8e2684b3ae6e0abb319eb2a00cc2d1929f778fbb40385fe8c6f238694f473f389bfa8bf12bde33c9cc0727b9cf4aa8122ebc95d583832e25053dcd190b35f3e060b124b9442d71834c6f07e7cb619250f3a60e8eb14f8c9b22b71c1c665325295c4e6c0fad0c1c1cc4cf5a4d45fbcc08c9b8c131aec96b17831c6d92a80c1b9c54aacc2477f9f92829193538ccc8502ab49c09ca743268703e39329a88d57a4553c60c4e4968c59dbcb9727b284306a72e25e554ca6fe1ef95118393a4192db3e9d905b764c0e024899b3bad73946872fc8a738ad21c9a45c615870b9379f2e816f15d2b8e416d08717e1b9692c98ac3fe8afe9e3529c6e2559c3699b0d63d69f396ab8a9398b8172c8aa57c99a4e224c6aceb464baeb91a2a4e426498ac71d3a54ad2a7388fddabbac664753a531cb35af8d6cb58298e963a33a665436fc7a438ea89b541adeb511c4bb64a3a4c8f7c6c44714ae2b785c9929b42484371bacdf5cb0cd99b4ea0380993924edde4b2dea74f5ce1be35c363f4c451e4a9b968ce3c77ea4e1ceda466691f99132731480b5a3dde4d9cc4c49859ffdd2c8da58993885413611a4a93ee32711283de2466960c264ec24293bd8d387fb92e714c32acf50521d457652c718c5e62261d25fb465f9538cfc97222cb77b4e80b254ee9a2996962124784661267936b4d42ee87660a2571badc5227d76c8e8a4a244eb2093bb14f5394dc1e24ce56d1fb2f993caaf17cc471f308934dcc93bbcf1c714a62de56d079af79b4469c94e89bd79b42582a39469cabc646a511f22b457511871993963fe36a34311571d44c49a6123642880825e264a33b573bb4e92531224ec2fe5655d01ee218d4fc7b43cc581032431ca49cdd09b929edeaa510a73549f34cb2d110278510c7dfba8b2973adb2290de2f85517f5e4bbfc79928238692a4946863510679362e7093aa46ce600713451a4ae9a70fde15c69dffa5b1b3f9cc410f6a11a1bc3ffd587a38b6b7c4b29edc24cf870ae19396d9a24e1b3ec3d9c6450e36137a735e37a38b7cc26d1269cc995bd3c9c2b8911769ad1d2eb858793cc9a9ab33317f7a4dce19843e6c58ba893278c7638a90dd77292d26b0931ea703e31ce46db645ad1e8706b8941e7a8e3060e5c27c498c3c94e570a112b8d11d26d420c399cf2c46b460df6ab5eaaa1e30605e2703eade3dda2844b42d50e18814b408b1a212122367068512324040e073525897c2bd13a5b2c428c379c4451211774e6d448512b21861b4e5284ce8d6918a30d96b491258c1839f71662b0e13cde270993dcc6ebbb2db670400d1c37b2702e4c50630b0c8ceca8a181909020c458c3e952aaa4bd36a96469a486933863961a32a94d299e1962a4e1a046eb9209214f2b4631d0704c32932e19d6ec528644ac9ce19475b56bce94ca3d160a882186194e5b7946ebaa97247f6fa78618653886cc995e362bd37ab4430c329ce40dc2e4eebf5e347f0c079b91a264b6135ba42c86e349c2c8594aa37542993887186138a94971459d244ff0354981f716341070968618603897245ef4c6d2656714e30bc712ea64f19411b24723629a6a6c71218617cea675764bc42de913d22246178ec1b404a5e45c50d2c08c2cb0d8e10cb8e4c24195bebe0da73526497e0ba7101d55155731cbef5a388cd8e512379eb2707893136b7fdb9aa413162eb124cae467bd7756861857b035c98fae8cea68054f96d514547e8fd2ae0ab5898c8ca9ee376aa9400c2a1c6c2e78ac6db838969d425a66c4aeaae9b7434210190931a47050b2c4fe561aa53156a2704cb393d2a2fad92641e130ca2ff5bb49fa2b944f90410c2714463b54deca628bac65319a709e4ddfa1ff330553310731987034993ce5662e654fd3c85adbb8f13588b18493ab5d0cead2c73f88a18463bfc89a559bcae8598c241cc3352d7598a5285947c2d1cc5fcba42f69332759c438c231c8ca7c4ae8eac60e91111d5ce0480dc430c26943f799a749166cb46c254868c7057228403f10a30827d51873fd3b318870526ec1bbfd6a6449328483a951ba1ebea67b5ade253184709035e3727e6a3656384610ce765ea2ea4f5ff8d48170b024e93129fda82695f283738c67eec9b3d9654e1f1c34d8585dcc95d836f6e0642ad5eeb98ecf9b37b26636060f2cc4b888b1030b312788a183d36c8d3a3d95da5d64ceb89841e36c08317270de939424995ceb1934150307e715994a7909ef1962dce068d2d7f6865349b6b6c1e1eb4e5ea68d2747c9d7e01444551825294b96bfa7c131492e2ab8a9f8d7219fc139646626139654bf2e83c349f25b3fd48c10261d230667532794d2a6c22f093b060c8e279349516ea52687c92b8e2d222d659320579c4c091926ba09a5e4caad389ed22c594d8559715e35a52ea55cb7bb078bd4a8942fc6f260241006428140200c0a0588577700b3130000000c1693c542d17048b08cf3031400054a422c503a24221e1e14148a83a1712018080402023118100885c28060201c0e09c77090cc0729e7cd69e0f8f0113eee74023d2d5f93a5e47ddef1385575b7cb5dec920c32981657051a1400b93aa4ad2ee160c8214936fab78152cad097af5a32ef30a8f155bdd83f132052432707353326a83d60b62f7f6acfbcc5dfb8a8b5d46536e42f7f19695105a360171209495ed2bf1da28a82a0f2f2eafa67ff5d0432b7fe8e0aa81e675bc5b4ab2418e046c954480a5af8954ffc3454bc110363e7d638c969504e2647ad421ae58204566bdc265bb693173027532a685f71dde7952a61d679a496e582dd1f4122626893ef1da2939ef7b64f59b8aa59d14db8284fa61648de288cbb6ee6610c11852f398c984159adffb9ce55c01669a1a45653752654128aac70972dd7bf21a677c5b05db81692b88ca59843360fa08d5e1349fd95ff23917bf709f285419ad63dac57da1497c0b548bbb5ff65e2695df0d2b670ffd2853bc6d0ab25e0c51a555649fe8ba37200172a4e116d0714106b07ad2e2ef2f1fa99ac0f11b915f385f2dca4546e3348741f426aa7bdfbc0c7327639dd452fcd395c1c1ce6d4702ded3127888f0cccea71346e66abe822f7a49d0de4fc74f9b1e2b2108bee05f090ebbd63d7a4debfc75718a7d2124ed3d080960ad1cfa39e4ca2883167646aeedf068ec87cca4cb6063d14c1e85568606243fad6507d06a042b5280e5922f6d9415364d342cd2887e4881a96506f51c0b227a33248216914aa83da05b607950375093500750d150dd427511052f084cad2358b281a02d43ad48651975243a05241ad43f143c140f12214d4124135545a281c286850b4056e64723c286fa877a891509f2da83ecf2900d7508f5a14f025aa524e0bea5e285d06b5a14ea0b0a150822888f22b4816055c0a0e0a180a034aaa1a1547bb22676616a5217502b50b150a6ab0410185dcada00a30aaef94351918a18e3e5d3cbd7afa3dbd3e8d789a7a4af4b4fb84c3a2501d37c9289c1fa52315056a1b8a060a2014050a355178de14b08978597e4001335ca02cdb624f2846d54905200a4a8b6c5201d54733983b1186ba1554b6e438ab4e0d55c0f2bb233c9b03a50d7512a120295d86354aa1aca0cea1eea19640e5827a439187c2812205858782870202050525180a1f0a35a0e2984e48472594151410142d140a287929aaaf196cdde268ea419502d6908d915251eb59d342f7b8a116a0b6a126881a2815026a20141c1410347530d844516bbc8ff001884cfb65b1ec806cc304bc4a5071817d35e5f366f70bb872751a959c108bada5d22d8c45f5484087b7df77cbe6c21ca41cb9f0c69dde7c4b2c14d61892e4922104224ee0d7355dd088e61abd701b504fd5ae7d9e194529e04d4520fc51d6dd58d54e5cc58d5fd3e95a859e813cc45f00be4db28d19c80a274a1565de3eb9efa3c61a73805ba683a83b4114ed7042ac503e9e7d2f9687ab445186cfa6896013c6e4595ac844871f0dd50888914ab0abc4a089812fc9de1b3c3d5e0a3a97544d0afe5f88b361eb4a397f947a15a94c8b51ce5b1fadd73b83d0dfe1ba7d49a89c8bafb27033b8e617d1f93fe46072d973be8b66189162dcb411a2c575b8838f201b59ae745036fe71f75226db1279b6b5abf9de50d99c118e5302d7c96183361feb485c8eaa17bd1cb5e72e347b9f128610b0fc877400e6e8c69da0f8f891bb95bd556351d9cb3932d8c894ce887d2a5e93a4f438083aab25657ead900ea0412b4e8c303b6f05ecbcf83c95052c0437fd1854956de60bf8396604a2d81971169a538c7b0cfe0abb80acb61b4113c7f529d1c2974cbb481e003039045d14602021150fa56a2cb4d50700e87ed6f2873dc53fe4a132ca31caccf130fb6378fd788bf8ad8f6276342ee069ae9154ab9d46a6e6ca13a3f98d9094d6d51b89c4c4c2173c188202771ccd34c070982ef1107de7adbffd416b190a885755f4b1bc8a234b99bbac524729c1aedfa8398051fca16e05383259c42039b8d783111c0af3b04847505a99152bda88efe0b67da39110995bcc93385319786c4cff7b252c326cde01ecb28bdb440f12490cb10c8166f893864433a25a1bc1e833e2421c417c486e7e9b568c5244b807d221a93aa49b30032eda9943e246b0a4e032044342e8a31a1ac36085caadab256ca59923f1c40f7718431476f1aea96da17c1e7ce88b89f3d229d232e7e32427329d4f44865965ac0f4cb248964c6cbf4a1b09196194b6f4c65527647875e2b916bc26a020fc515ccfe60295cd22b543b31c201f9c38e7af7495ff5954aebd835142897548443a463e909e54d2b111a1513a3d56efb7770f5adbadad682a81de8b738a8cbc770b5f5a73dd8fb359c0f22aa63e6d1fb2956a5ebdfeadd13ddecf5576643e65d0021ac321649a78d3a57d755c64a0fbac9ff2c3bff8cc6a0f0e21071fbc8f5ae30675067eafbf846c63b89bf8301750224ba5aa9892b34293f1bbc5dba93ba3fbebdd7bdec472efb9b870f77cb61f6a90d0352c7743752602f9db768be7bdd325d6a5d065a50b97bb756f7799fb381d5a2ba95c2875c4ea923f64756bf5652be5aa1ffbece7aa9f443b8019ed78d148e0fdbf96d4688a119a045123d7b010014d61937193f1c23898310de32869ccddbba432238c552763ee267c2d4c8c66327adfaffd0433a1247935a36e3efe002212dd34dea6b57b96273b6477b67de538c13a6c48769eaad644e3ab37998be83f12ccde405144a7fc7c02d25a717993dc83a1f8005d1c44722f7091e0ef6be7c6bd3c6636df0b56fdeffb49c5694e699d521a113d75f0f057c567526cbac2c490a3e82fc863281b397e8f3dab99a693a2e4412475f1ee41eb57547e7865299c30fc5940b8a6ee61f75b1a88ca0ecdcf1f86126c59037c083aacecefbd19ed4f58e44296cded12440f0405e7388c5494fc6be119c8c4b1d361fb2a5e45a84e0c746b75a8d2450b1db4a1e891655b1aaf8d2cea6164709e474794dc1c5e7dcc4ae980b194e82f2a7f8708e6a01b94ff24d369429707a5c8f021f59107f4b38c3954bc9162f5e8be8857ee798bb291a50d712b9cebe61e0b683908cc723d11d5a8bd86f785ee6f44918e053ca5210e2136529887ec40418689656d76dc4dc0570ca6cfc2bfb5887aefab5eaf73fc25f7ac94b2bab7c7cad341a6ee5e9fcafd4808ceb9153fdcf33b66a02309783a28814f35764e576bdaa01b174a83dfe2aa9920ae569482ae684941674cfe75b873fdff08ce4e5cc8f843069231198313821be213570a1f6ebe036af6904c9da403c3644d32b0622311b7b08b755f654460ce2090b2c840519b422afd369e040dd678700933f1e392f997c384a33a0cca83a0466f6de2a553d088b9003da915f397b20f1b33ee793b4464bc1d8153dd7043da9a30eb3011fb1e3ac68468e032d86810c660501fccacacd45f0683e20f9657e67c968ba06580b80f668097a5cd4d5b4e2e60f13d586f04c57ee46e70307ee104c1f97b0e6d182f188263bd52403f488142ff92c68c80553196846088f7549a0180188006002100078019406606b4b45de6b40a30020cb8fba03b817c8077faef3aa9c037008b01905600a3cf20f865528d000252883c286e10041541e504ab0407048311800b50eb4f3184b95ed993387d381b2d14867295e1c3ce90bb106070c6e0cc665fe8c9016d901af4d380bc01960194010687c1e460182d364034f83678f1204910e6296c441e8c8a556f3cbb75561606afc8c103517c3fc9e33d0f4d4d2077ac8e9e50f084ee29803b901705d0bf0621fb604c8d509add22501ab419e06bb0f945f235264c30a605153e5bf4c37fd7e57eeaa445803b832ddc8d8ec6591fe1c109d662d403966210b4a70872001f564d04082670cf2e3e2d8d276692bfecc0a69e2334ce9cca513169e53d2e2482410370a0564a369759efd270dc6afb0e086f765cf25c6a0eb5c7709ebe3b4fb08169038d89868df1b909e62c692122f9f985aec8533af038b548bf9a100829b04ce01de50267a746f1d38488e8a164627db3c08f13023aedb8114eafe8e7f1eb5f27d436c50ceddb6b0e3be67e7c7a5291b287ec33f20cb0a9b1a9abff6a13070c7d6676600827718b6347a74e1583369f49161ba897dbebbb25553adf65f5cbb4302f5f5d274b451c7c61cb9f0835e305dcdaf155a6f29ad87a93ba633c084fb7c474cc3bba611ae06994dc2162047f09b62035788d0361bbb56e00410351bbfae358b6353899ad5c50b151d7de71e9aa40258035e7292844292cbc03e4208812aac049e967ce5a6b08fa0a0c075869afa1ac2eeb8e22443ebb0c965d3e295c424ee36026ccf78ee4fe31198844dc3e80b7e1181ca62d013c363688e05dbf0778ad5092178af6bbc456e0e420421880c8360330a2b3cf987f0aebfe28e3421cc6fd9bf26be88f5681b4e298cfa6c76ec89fce03659a227a8c62daa68f9149adddf498c55b8ebf4f8dcfb52ef5870cf710b519d478315b52e21147048d4665bf4083d3a5febeb949cfa8b0d28f8ba739285545034a332cac1720b53e747908494683a614062800285bda0911f49cc9417aa60272eb5268011569a11a1874ee7434805eea0be9824161f4e9f956a5ca104c32cafaecfb69c2d237a9d207dc110580e7a82d5d1016f01e3f5169bc0bdef57a8d539c35c34f483b844b1b99ccdc1a6ca4c9244530a32197329c4e42bad546b594d311216a8c9a73e78ffedc775a7873080c9574e28e831aafe85e1bf3410392b13c8ab0a695f507ee4a12415383038a5ca6c8a05549ca00e7ff904623876f2e96c12fa2deef4c98f67206c3c90aec4df61389734dc018ed6564fa5439189d070fbd63ef0571c85a54dc35302a5a0fb513c2ec62d9a02f5bb8e036fe278f7b0a4ac8f1831d3ee6bf35472c4983bf5b6bf474f5293c621c16381c47ad436e34650f9094e43572b05d7105da52a4ce2c765bc203fcc4dd14c0a5e12c7436678fdbd9c6cc006f5b1da87d04e1887bfe85d20efde9953f20663e19c14586d51e1e5c2423a36570e822684a10a74edbf1e7f9644986e7202e6310be1b3d3067cd2f2f408615faf2bc428ba52a06e0bb4e1bdd7ba17868850e0fe7d2eba2146390583a05690a450054906c300a9d0ee0c69ec92388f06a7884c9c68f7a269e06fe81c4253b0e3f80e41e338d35340a85738c4cae189bb0565c606d73856f26a7d90db87f0619ff0e956bb871989b20728f576b0108a4449e5ee8508a259cf346c14b20b813a601b9e0218b469793a6ef2a52094a49db06507cb3ec04ccb65464f7c066de444cbd545d78fc142439d0ecbb832c2288c55310cd7659ece369a8f569d2cb613f1436b68f858b0ae5938e1385a6e18b033c153260b5100b2492e628a8bca8d982429131293159cf764334ee47dcb4e0617d9e62f9925259bbcc42b9f049d32cba68d2ae2f9c5c96b288dca6d91a8f64c5fd2ca5a0192e44747a7f45c4a1d18a8bd57f6a7f19376a7d5204d95ef544720a63ab0e9f14e4bdcdbd931016b126d96d48058f54defe4ab50652d21eced65f1c632172873ce1313c2a2d410eb0b973d8f97258d058671994e66645bc9137c9047d19af66d613ca05976c47250130637d4c6ad2b4490b8c725d9d53469da301aa96b21bafe1c8512b4b501b9e3fc4b711e780e3304ec1875b1e422b4617f6c3813c5897330d27c5a635d0378c02db1ad020e25afce008eab4802a9b1f18c6e97f52c2d10f4408b8abc2c66d4966aa37525cd40c5403ad6fb5554d0d3864d7978a62b34189cdecad4df78b6f46b3d780c07cf4d22972f85450135d9e162f9a33f47c1c2f554a463a03ab8eb2d279003a4cbf5fbc4eed4727ca390da80b92188a491d8f20101c6ddad73cd1ad77f243c7362de75961bc3275024e0c6829d1ea809feaf60570152c5059039f0da12b093da1773975002fc8e5f6dfa5321568c358ae4bd111699044171474138de56bae5b8b4410cce48ef87126b5281cae058f46d1a29eb1a57d9c1b7d8aec61ab0405bd57a3741c723eb68049a2d8bfa0f46a20f46712887c3e285dc4cb9632a39f000417c12c62be325ac270af0d99112b6a2056a0597baf27bc91301f5ed393d42f8735208213bc7eaaab2371a9d5dc22f43f2d067ef229aa431a1620267b576890c43ca5483a3332f0326f506cd41eb56113d9559eb1e28121a9c001e1cc6262631f72f26e49c440d5ae21eb4d50371164e4f77d09199e09ccfcf16b47f0108330ca2e9f11db243416ea16565c57b242ec5d9869ed0e8c1baf5daa97edd8950a870b780b28e68fffd3a9f54a3b99804d5b89c4e7dc9c3081bbefe9ebfd8ca810377c099a0bc1f05d024a4d72e930afbcbfaf00e6fe848990c79e2fb23cdd51c2eb6d530293ba19603952eb91b1d933632a094416ef1ba7ef4fb43afb4a7eef472cef3a3f68a1901be51d7a88eb62887567124c162402ecd6dfebd9827d95f062162b98de5ca19dda5f98403d2f6e1384a67b1e5e0491f61701f1dbeb645eebdf1fad1435dbfe859d4d21dfe41ceefa93ec319d23d9312c27d0848064c7b09c088ae6366586ca75a2559036b5bfe70193a91cb69e7ce7a6d1d1e3ee0953a76f17658fc539f9f8d36deb173533b3f952c5be78f4351a6e8a5ee3b9bf4b2cbfd52b1c48dc825c910b3252d160c3eaa198a7b6ce258d4c73a6ffa75c08db6ee6540546b2c7a543a08f1252db80a5257e53776316b04a512eefa50ceb974d1ef5c327ab988c49874e4fdcd7dc881c3b4077c860dcaf45520a22ded2f47c09bc2385c4ebc7e40cb1e9b1ba241f7c542ebdbc5da267fd16639f603fc0de8f8212b3c8ab0e76719bcdcc121b3c726d88b5ddaffe5d3cde001bcb0d0ea528740c520a98d58a9e85c65e4669c7b59609fce565068cb0e73b4477ccf5af0888c959f44a8b1cb6bb216c0c285a815a8382ca0510a69ea851764339f69cd1854f13a1d6c04dfce5367d026d9ddc10610523e0e60800856666d054b03c85808fa04ed77a0789ffcbfd6259547ed7c3d14d5f72bb00ead4172161d5a212744378804a82a83103104fdaacc960c91fdf18b5ec2eaad67fd572d36cfef8834463dc9bbd311da158bd0c027224ae09869633c3224aea1f47502dfb017b2bd6b1188c3de087bdd51b947d34ef34b3985f73c2442185e6c2cb00f267084570ba80308db6da9c857c635a8dae1b4209e810c44644da01dadff717f28d50ee0630515bbf0092f9ff9167c6df0cf3c1cf77be65a800439d9d6703c2839168bd60f4649e2979f5894f4fe84ae680fc3d4274463291dea745949e2b5a03f77c1ccda0eb337768d3d0e352e8c013b6ff6c8f5cd509b4215c0a7d6a4503f648daf9ce402404dac4de3cf173e20a70c51ec43e9d9670ac45a6010adc81d1e8cf243ed08a58cdfd9b615a8293a2946c91ebe1c949a47ff5561f5c26241a927f18260f0b2c94741ac5b5318cbcabef72c09f4bda85d45017a3a2a748182d060ccd9648567518fb2651b76ce714ec96d31b0e2eaa69a2eb9654ff8e506fa71f414cf41ee4e177cc7ad5a66086f7d19c86764251ae75609ae00b37b6a4765ba7005dca4f16302e2175b65ff37bbda7b813919c98da27c57bb901546352fd33c50f6e1d2ac8c182e4c8919f81677c110f128d02afe9bdd52a9c9e274c47f5f11fe586bc1b8952b4e1d14130a67b49ddf66a7fdf7b2c2a0a8169a18be545df81dbf8223821c5c0a6c9a78655cb2116f9ac463faa3676941c069718b5652f8d59b8b0721303c544b36a21a3093463ec014b3d4dc558c0f1c40913d46ca6d6cb265e1ab95ef7ece6952a0070a2bc008ae0688e939df62896fa5346d21578b25e884d9fb1457cc175f03807b1a5c3ea301b3150326a8b9c03186f50a6a5c380fc79e46d199700568929c4960cd7a98efe92ff58a47d7331e7d835c223cb9d58765f1e8db13e67b5bf188f54b46a01648a838403f139c6bb181e813ceccdea0223e3faf30b4cf53d55e5631c8018c4f1972fcab182c0d2eb25920d71708190ac4b94ddb884ea6828bb14b666989f15b5496960c3fc5668ae3e6b5af09e980d915ef8d911f2e42bc65c544dfebc6e420572b4b1164c6270dba54b61c3c6089c0a605bfa75902ec64b166bf75e5f3419e327a54e94fdf2391ec9877d3a345377a20e96cde2090c076ec0afa0ddae35d67016431118fdf433dae573a10ae2fb364e8ec3127912629b2398b6a5a91022cdef2078b37ee1fc98d1d42e954b98faf1b8f455574b0522c00e1551adba281868932148ee8f9cd1d30ccc9598ad81e88e8fc1ea7717e28eada21739d9a832ece0e76a297691f9b6f9b81fbae3acb839b9f041276507d9d73e2130a9acd907550b00dafbd1d981016fe214bde7bea2e8a116be31d8526357eb96cee54c3e82ee5dd5e34a0bba612c14a9048a93fcd26751aca82de6d843b985e073bc7d16f7eb04e64cad38da5c751457833a30234e5f862293814947a245621c45b186438fa70aae124608a15bc6dd5b0402d1209bc0b1ae51292d03daceca0fdb6970a97246a23022d511351232235e8e5890c88cc435da4d8ea7306d3101d44555f87dc3ddbb72805d534d2d1232e0aaa66bb108a3420a85a8b0d0aced40961fbd5c4891d805323a4d60a0ff605227b01955bd4d5e88b0e89187753f5289d28c0e4edba7dac33a3c8adfb8a0b4874a888abe797ca6fb7fb86141403e48852c1295b47a90e4e0ec2db18df7d6328c9f382e1e60889d9257032f108b4fe1fae9268dfb00f10672b8fc2c5c47f25da613492768e6b1a3f317a8258bbbdc8fb5a699e61b39577ab4a288db1e1c53397e16ff1778273669ec16147081df47a1f90d4478c8e127c6a0573589f47f615736e5083c4904ed10d4ffaab2320f6ab9f3cf3534ede14cd91f8118c81d166526e66623a415d4a3d823619936cf694dfe1a65c4cf7abb8978bb6913d487f6090c2783b5cd5e4effb98f682f7453faff6bf3373a8124d79540d14b3148b59343dcafd393dead3a9490d855e88fda2e8bede526c86b96a7fb4d96d9a94d1f1ea4d9edbe553058fb7ea2c73663b4b5553a7688d542349582de0ea5055f37c0a919ec0c0c718494bd38e419e2dd467c28828908691210a1c398e812dd20c4c444243429ae344c02086abea2dc19c62e64b0660c7cec55733c2fd742820e4443ca8615927106ec63b0b97b372df562dc9b69d01702cd8d7e0b1079f0b49fd3aacd5ac841698c1eb69096c9c379c8607d7dc691b479ec8ba2ebdae5c2454eefa3d91a013440b493e4d0f8c8159d00c8e4a7f0753149bee26d9ae8ccdb99c8f4d006d2031bbbc2aaefd6a72a40fc5512a1da29270df5aba9478e29c78b08397fdb2d46f81077c817e4b9489d99339f8255ec13c20f2de1ab44d6998d43830e048b00aa033d028d809f00470f39b1f6a46268ada2e268081c92bd0f0deb10cb70f312ccc8752d682dda0459ee6727a7bd55d9ee7bc856d625417f43a3590dd648423488646ab5cff83aa9a0d918cbc7908b3ed2adb90ba2883d506e3fc20e883bfcce2e6b464aa1fd9d14c57435df633dbd75b91a100b4f72551fa7a47af6a583740758af1c91325156447b626b74365a53b5f937c03f8602c8595e7fe5bd679edeaa43135068eb54661bfb701cf0c5f93bd92847d9980b5c3a534ca16f8e09ec5346d99672a64f9554dda8543e8bf9cbe863c6f42ac4501a17a9d28d9a552d99569a073258d4724a490ac6a5eff1cb3a89c15b1490a567b9ba14f0c17e61505d80cf1fb2f850828a09726fb321c1a1dcb795a165141286c35b6ac0310d891c9591d9d7770062f4732f749e8788fe6dcada9930c4b76b123941869d4bb252365aa478a98bddbcead03c3e29e61121e0f1ed61396c8e0106c02f18cc154a32e2e7ee1fce32eb73916f57868722a000519b3aa550c02bb0610bfb7dcdd18aad8ec5687494c92155ec6a8bfc33fb0979cb7c23fc5cb5d9556284eeaabde1f7ba9c6a6930c4c7df2a288151cea0d95bbd37420f316d4d84bda02ed1dc4ff3b2a8676578f378f9ea14dd89312930e5ca1ec96482b96c968386f4d338b6043e5f6a5fbf4df1153703860361c8c7784ab30c293627c2faa695d63c170d980c2309f17836f1b39cb84f486d593eb037189e23330359fb2ec940ab169a5e02eb6e889210788feff1a9e6f31fff481a39b249563c918eea19d345eaa180a4a2d4da934b91b36824ed1d6c4866e2d97247a711b6283756181189585405385a85a51ba5ca8cc3cc6af989873e85093a525a121e0d244d158e2242a7a85bcf1070019a2958a0a58add3dd53d6ec8d667020ab9d07bfde6796e884d4220e62c9a7b1ffa87b70070212d313cba777ca091dbc985b06d88c27fa8d34bc950a94aa34167b823dc51070b73a083f8a61104ff04c24cec24910e793b1e3399a0e9a792d1fce5101cb4859d54d7713eded17dc98dcb40d521db46c6de15b1debb0ee84dab5281dd6b045c0e38085b7358b7db1abb3b1bc82c41f2f17b22388acccb0fe0a8c82cef10c4c8eaf9575b9600aa717148e17805b17a2643f33841d99f4d5d3e56910c64c0c933a03abc356b065c6aadebf08c2cefa396cb3bc84fab79324b6e9f1f8ce1f9c696bd8694b98b3d7b4994a01cbd6255069d8407344eee029baa31ef59b3c0e610fd2dc33dd428685d5d6e7878d193d8a82befac6ecdf320bd0499db169105acdd0fa4d5e1dbe3c55e23a3ed677067dfc88235e84686168ffcd9017d1d93fb37210469f300220a9b0ca7cf1a20d963678f949895862de81b06f1d3dc45958363194b9b8fcb64a488c164417ba15a2ec4aa010602c0732d68bee394401f20cd86612b579cc6c1481853ff1e9a00f13bff307e75d0ca124cf1ed7c17019dc3f0cb47bc3a0d6ba9dbcab70ddab5d3d90b7fbbac868973cfd89b2c88c36aa77e43d83392caf359e38398861e4a00948a29d8daa71a4b3bff4a65427894d09cd70f82e6310163d2ef43906722d06b60297f50ef427b5af030a1d1ee92c667e485927a89c9b92c3a8f914acd08eaa40c837493abb915786ecaa6cf667fd21121e6e1988dd03cdc4b081e96118684758492108c70248c5038151c6af33d041843bb4807ce2cc794a54349667a5df76af7ee5424df7122eea24f9e677b0b566f8a203a1e90637cf27dc2bb7903509ca4992980b865ab615d73fe72c85b708da54e5ac0cd3e65bb39dd05cd063416a7513acfa0d3283aab70b2bdc49a4e75061f36b1bdf1b426db2f62a92986f8aeec4ddadce98bd5e3b4395f94eda2ecd9924d38c6eac139561659092181b66e5bc0490e6bdd3482ca2f6df8a995e41b21429e49ab3c7511b4c88da7d8788dfca8005950673bcd370b874de8addf80b89af2a51253a9880f149ba7229ac76a5935ba49bdc805e6206f08861ae45eb2981678ee52e307f8097a5cf59bf2145401c01b1a028a922a854ba80109e6e924b2a722bacdec4b8e2d7430a99d02c3780b068919eab9fb980b425fbbd358727bf5a40cbf867ed0abaa2356912390bee28434c5420454a8184ea6999a0833883357f14d753512114ef80a4c2b9f530f2ce8a640090881186f4188d711058cd4ffff3715cb863ea8e14011ff1a74ab7995a30dec5c316ac8446afc206141fc4a7b23bf519f5eb5e9d5f38ca3758c6c5e32878a512d5bb1a74fe86a7769c7a34d70d95995a89ad05413872d0bdd6b2939a8e7b287fc4e014b9f929a51124cc1e788f04c357849f065ca836562a6aca6249661ed0f42998a464970cce5f703f64c82693fe24fd526160a8277421c3fa86146bf87fd16401749aa504c1c4de7e07a30f8a39a8da67c244b0c88e247232428219495470b08747bb10d42c222919bd24bbabe478df7e6e86c1607139db9466ed0b2de98ad1fd9da94a516f3e0af479b30f01014450fc2455454362bc2c0ec6de2b37001cbb8b5819887eb9f4244ea298cefad682ee85cc91145c8b01059a56631a0ae98b58bfe08de85de6e8bd4cfc64561cd55e03b40bd1460fb69334803dca2020afe63a63640b2a4d001ffffffffffffffff7fc1daf6ada61ac4c894a4f4840945a26a76534a49a694b28fe6859d2da7029c23a411d267fbce3d082e0ab70a0c0a9b8911266194a9d5512de15b5a8449507a357478ca7b165284616f4f7c4b121d22741261aa52a97c54c922c260a7bde5e4b152823a0f61f8987fc15a43986488cf65b5fb3929b310264187ebb809da316b2384b976ebfe4eac0661dcf79363ed4df8b8980e4198c4d2751f7425258927e14247204cbeabd5f92e3767eb0e4098b28ce988ac7e0f1d7f304993baf54949aaff543f98424e654f22323deab95de8e8834992d27f3f4e90f7b9b3850e3e98ee2ebf8853622925dddc8e3d98ebab4f0ab57441d534a2a507937e4bd59ede3ac7b6d6131d79304996f2bc5c4cbe946d192b3af060ca761e848c9cec6dc71d0ca3259b506d535db58ee8197630e5db6ad10fab55624645471dccd152b415bf0a293ae860f66c3a96b29b34f512cd82c73b8a8e391877de4b3b54758ef0921574c8e13ae260e5d5faf324a59f6ca0aa4f74c0c1e067a5a4f0299a6372de60ae1f7de91f56d4cc6de870833989e1b1f5b1c289f9db604e6f16ec8387c706637fb59959eb9d94eb1accdf772995643626680b183ad460d4cffda9d29e74f157313ad260d219e22be8e912619655071accf76e3907257bfcda07018e37e30c107c114607bee88005120844123bce60ce0e1a1fbb9228d5d78856411d66084107eed92be5d8ad9738cc28a3830c468f27badec5cfe73d99f1397274c718cc16a4b5a76849aad32d28870e3178e14c5ae7dddf894a18cca7a66a2ceebba9f4c1e09f8eb0504af6787fc16cad65eed7a73c7bdc0ba9865272f8d7d855aea30b26f9bb63c9ef38de1a56fd1887e9d0c10593a0f7dea424d654fb5b30775532edfba816ccfdd6256855077d25990593d2ab607eef2e527e2c182b94f89472aa916ee257306e0513f782e7b8cf8f15cc3eda72bcd0e92a98b25f50a9b29824c252337450c194aa43ee7cfabd0d11193aa660f4d3d1ff1494c6812fc0f8a2c6d0218576c475127d1410eba0f3be89b7a23c50d84ff49895e4eafa04d463dcfd2e4ef8a46e93e7f2496264d804e4f55d59cd042ebb28b18a59f376772ce1fc1b0db57cf2f6564a784fb864fb96325a064931a12309e6be522949d2e3888e65b407b23a90604e274794f6578f6092ea7485cc8f270f1b093a8c608ed17d4298dd6913fa11b53a8313061d4530e6c5d5f370b989a18308e690b39dc73ddf2941fa868e2118ae63774ef276f26793112d3d4d7408c1b4a7fd4bea585797c900a1e013117404c17432cfd25e4a61be8380602cf14decf493fa64ae5b858e1f985267dae6e98a9b5d6a42870fcc9ded82e89febf84c8d06bd30eafa65594b913f8b9b4083172699e17ad66795c64d68ecc2a493e0c9726e87a343c04ca0a10bf3e8549383d858faa05c1853f3a4b030e2cea48d062e4cdd61a34b0efb2dcc6969c54357ba1b93b685c9cd84130fea5d2e8db53005b9ba7f6f528dec244aa0410b7314f9eeb8154abd476fe06454408f406316e67f13ff2fa7912c4ca64b6c88bbb4198d5818f4436e78ce932afc0b0b538a32eae2af1befbea2cf2e32c6fbe4010d57184e6cb93d4b63d9b5d40aa38fc92e9b2709da3cce0a93b070ab3bd135d5b55598565e453b9c7a0f175485e172f3b442cea9308738c1a42494d434c9448579940939a2ab941cb2e414e6349ed6948ac514a60f1f4b49bf7f639f2f8561439cca3f1b614245a430edefc9614cdd44b59c51184b52a792d0beeba92ba23056c5f48ab585c2248fe8a44b49f13fd40c0a635f0cd3b2237a7bea4f98a409fae6c4325e66f484613d9c27d9e9ffba279d30670fcaff65748c2d35270c323b634dc4e2a9a56cc2a05f4faa144b4b3251d684c13ea528ba24b982754e26cc2e9f4a4c344bf92a8f09a3e79c27a860be255b7a09939c2ce327e46409932477b23d0fb312e6f061ccdb3e67cae72961ee6a91f339dc93305576ca29a792306bc936614b9023612e3593b4e2f476d61412462b498ea6dbe26c75fc08c38686126c46d9666e8e308c7e09656129abfd35a22ee5a76f79324698c2e8f29283091f2dc945987487df5933cdd65129c220d5432e96b475edc9449852f98c275951e5a94684d1545e4be73f9a12d621cca1f27353653684b9f48caa696ec95b5f21ccf65a9e657b1bf2530861ac36795d94990e26cb41984adab8303af59d8ea1204c3d5772f4d8268130a96f159d3c9785271310a6170feaa398f707939eb8ce1f25d5e9f1fc60aa9274de5cb33cd6671f4ea2e7a97c30a8746b6a4d0eefc1305af5b5f3d383a9d3479977adf2606e4f2b713c3eb787f0605251353a7fde01a523bf75745b763025cbe964892e23bce33a98db93eda7aaa0c4d1131d8c27a7b5cfff59e304d51c4c226e3fa5bade9d6b7230a8b86c085d495279eb3898c229b94b573a9d2f7d7030874f734bb2f5a8ceea0da611fa3abd93dc9f25c80d864f9f73925c1a27e46a83f9b6530cdd2394b43f618349f6d0273c8c5f8850adc15cc283853f25a906f3e67fb231494beb527ea09106a3558a7a615292795930d04043baa6222fda98badd67704c92e5822513fa4f3683f93dc9e6e2234ef4538d68d128c39b236a3dca43ff6430f5094f3bb12d4f2ccc53a03186e5c47eb5aace99a3d1c00641430ca6fb71533ae9490e8d028d30983be9c9398e99d8f97f3018bff533f489de13687c214908a1272bc9f60e99818617b89492ece9953c2e991c63a5e47212dc353b4bcaba60b094240b4a9cb15026e702da4912b682ed5b3065d7bdd07f37b1dec446190d2d18b3637b4405bd2575b60b34b260eeffa0b3f4535de58c05837ce927a9a5f76b562dd0b8825174ba7a7ff9bd5e1b1034ac60d6fb18f2a7d62509bf88486278a0510563dc75ae3f51b451cb081a5430ca69cd577eda8c34048d299853896f95518286144c6b7931de5252e5f161e00b30cc17607c6101309640230ae6b461266459f2246ed5808dc740126840c1d47af77aabb9f7571d197c6a1288943ed07882d972cefcfd4a97e9222798464b9824f9a60553e588ea075260e3068d261874568ba5f35d12ec438309e611baae32aa74c9f89760fa4f2d0f1e461a4a308cfaa97473311a49301a48307e9fa89ce4c5cab4444444442e30822faaa040e308348c608505348a60b2e049ed8367f9a93c114c3a5c0ef7ea9097f46808c6124ef89764e194d7470826f939a5f3a6e4f31f04530962a4e8126a54740c0493c9e50b93730c955f7e2005366040e30726cf2642ac97e8dcedd2f081e1e754fa7a5637f97a61124d2c4194ef8c0713e585294f3e59ae4b29954fd98529c74ccfe96bc99a5117a6d8b9427ae9248f342f176613c5a4d071e473fc24b83087ea52269c901dc200b7306deced28c14f34a97f5b987c4cd78d9df4935faf85614f8997cde3cd0851222262a3f40861005a985fe3fd439ae8502545850166614aaf544a494a34662c1b9558290c200b637c8df8f825be418618381043066298510608104b14068885e13a590cbd99a16b550211113050f0051826a0c0ff05fccdf817e0e8305e61005818438c0a3f7a55a9332c960703bcc234a264cb922c8647ddd760005798827de815ab2ca5ef7f800d066885b93ab8899d7f56984ebea4a3b4a75661d0f1e2659bbf785f160d065085d1535ab2a4d36980541835b6e4d1974c4f3ce50d320296077334b17f314205551be1c19c9f4aaeef50a94451ef60160b969277653bdd693b984f1253729ed1296e9eb38f3a183d868c3c1db70f3a1844a498aac939367e01c2c71c8c5ed5975ad3a3dfad363022223936081f723029b9173a9d8e22e45c3272dc200355a5f107526063031f7130bdbd78ce6d117ec0c154bd21cee449552384fc06c3094b928c2825c5b9edc30dc650395736fb102faa6e83f154123c89a53d7f2a668329554ce55bf913aa3e35f8588329495290ab5522567efb5083a99320cf44a94d9fd309e3230d268b93cdc35c7bdb0f34983ac9b94dbe6061bfc42b838f339892fcf94e2f986630a8bf2911b1d79b8c31f2a30ca6cfbefbf4e95bd265561f642891187c8ca14460f0210663a7ecfeff498ef0eed147180c3ade7592aee575aafc008329a8ebb4aa612599881f5f48b4f27e4ae6bea32b5a840f2fe047173eb860ba15a1e399ec5a52098d685d60045f1c181f5b48e892f2896b269b5813317c68c194293267abd42815da187d03471967d8880119648ca12602a6c147164c27e5d6cb0ff36ba5c682d1441351b264fc7105d3a5c9bf1e5633a4988c688d314623bc7101fbb082415a124a0a27287d54c164275b584917199ff44105b30942dc96588b9524f5630ac6d39d2b5e3839eacb924344e44a0a1f52a8c7ae82be34eb88368e7c44c1187e1fd4e8fe934f8a65213ea0604efa3a29e1640b15d6673f7c3cc15c39fec9f6217f4b0e6a1c3e9c6050426d849aed174b761e3e9a602af9bf4e559674693e138cae737289fae4d17d4b1f4b3056c8ed85f720bd42c7820f25989572afb636a25583c6513e92607abd51ebaffe41da67442b89f08104935e37113149f824bb5c107c1cc15872d8cfa54fa76ce50bba0c2bd6c087114cb29975ca39fd051818858f22982b8b96515af10aa41b3e8860f2f0f5b1e1630826155392e93fb73a1d8b883c0e32d0d98710cce99fd2c79a240e313a046747f8088249bb969d7025749fbc13101f403008719d72efa71f189428793cdbcb97b6cf870f4ca7847b923acd4e54bb17a6bf573dd3ec99d59917663f312e876917e64bb13ba920530978e8c26052ec0ad282147de9cd85e156c45a7f0aef26453d7061521b5627f6a4bde4f72d4c4a34934b8f96106261a5c2c316a6d1962584164fdfb8bf00e38b303af0854802005878d4c2e06954f44ef2040b045f84d1818267a2f0a085c9049d724e4df474f929ea310b53e99c93e487125e560a030cbf31867ac8c29447bec493f3071374ec110b73e849629fa699245d3fa295b030899e2da785cb7e498ebfc2bcb29f4d87911fb63d5718a45eb8ab987a51d4ff2fc8c00538f005185f64805b61ce1545890f7ad45f80f145181d288188c8611c3c5861ce29af7ded69932c9f5661b85092b4f52699c8e7ab8a2a4c66f1df4cf2d23c52611065bdee1b7a5498ca2b972067654918a15398fff39aa443d54992200944444c61366d1f43a7f830e9e788965b99a5c0a31426b126a2a2090b8b14a6ca5797a44c4bd1a35f1ea330a8b4a42e7774f5a44daae0210ac42595628685fa49d9300b85729234b97584b034081ea0306c297793d26cb6fdd4fa82c7274ca394f0ba31a663e4670b1e9ed8cf47a8a464ffd87aa9c4cc071e9d30c8755c96799277799ec18313e66afb207b46c96a0f1e9b30ba7c3efd57a2ac08b5268ca76a9f522aad15fa66c274aae41c637205134b1413264f51bdb43cccf7c453f0b8843946e6760e4fde37264b183e09f73032f54a9843ac5e6c9c10254ad9f14e279dca491846d6f96913f737c6e22109e37cde9724fec6b5c7ac8cc6718248184c38153afc2f48984dde9224b1baa4bb6c5a2c783cc2bca1c3ad7bedb7e5bc230c7a3def4982c93a7fb5110655a6a4f43621469884ab9c1115d7744aea22cc25b62725469d28153c14616c99d19acbd09fa64e84f942a924f2fc74493537c10311a60ea5fe63895a34213b84516479ae9f4d37d152c510c693916b2dea62f12984e19289dfb72bb94a8e9ac18310e613e7da565a2a8e92833c0661ae4e9ff7fc5dbbf534a265b7678011820e7c11820e5800ace021880493d546784c4bb9dc2b7804a2ecc003101e7f309b96937ac99320aeccf48349124d921f562ce7e6e4c6196886471f4c2aabd2c754fb45c7235a37cc7078f0c16cd9fa7d4bd212f5eb81c71e0c624384d0173ffec9ab871ecc79679f42ad524c8747b4c8f0cd83f1477f1615ab2c85c5ca0b3cf060d4cb5de27429614a850d1e77302529dae4947e34a255061966e4c03578d8c1b417ae9ed54e2749504d0d1e75309874822af9a4242cc8fd821b62ec1b3ce860b89c427dbfbcdab527c1b5c71c3ce4502223f08883c1bf93a0723033554a64c3030e1e6fb0bb2aad4d904b39e584cbc30da6be156136ba010211911b64b4c16462d6ad9b563af1f488561964e0b0001a3cd8605e1371d23fe453a9ebb10693f49674d8a8550de69c448c49ba5aba59a3123cd2603e216fa16efabbe38d06f3e9f777cf6c57caca19cc56e2976a879cfed3ac081e6630e7522fbade64ff90bf0c66fba0b166c2278329492b26e80e7fb2fb670cc651eaca934a9fc3e92e06735e92aa7ab6a38d523d0efc220c0c9471c3230c660bbdd60c133098c49ab413e344bf608e0bdb3eaaa41691534484081e5e30e9dead10b648e2d10503787001011e5bb0f18587166c3cc0230b8907166c5cc10a1e55708007156c30c0630a0af090c2023ca290e3061f83067840c146033c9e60e3011e4ec8d10406783001011e4bb091000f251cc0230909f040820d0f8f23847818e1c3a30805f020420d8f21d808f11082c95a4f7a12b2e46852a4038f2098a4e705d3d5713988fd8607108ca74d5cae8a4bf13f1f5132ccb8a1011a78fcc03c52c7d327dd4620228270a4870fcc734af8af98cf883f9d8e5e18466e87fb9fb99342e78539f55d6a48d7ebdcc6c881830259e8c60d7260043a7661d6fecba3d639268b98820e5d98f3f78c5b124f9ef4940b833ce563c26551554a1e17263923b733e696e5e4b730e7f076b2ab7633efb185491ef527a59c949dd95a0bd3e68aba4bd2c51354d8418b8210f9f9f41acf8d68ed143a667134e89045a23e4b5ef1b2a0d37c33e888459da43f8bbe57d24a28830e589846e909757d7f1e2c7e85b97410ff5713a54bb4ae30086996bf9753fda3d90a83aeb499cb96040f156485e9931062a3e53eeae7566138418d94bf2df79c63aac03aeb3d493756b18e541884fa0e27523f548a860a8328d145f3d55abd730a738776095993c37a25e9308579bd2b55d29e3ac90e8e60042222ff171831438c138cd16f0111911cbde82885d9dc53f44f82520f1da430999c6cdc3f49599416f3d0310a93fcea1039a51485c9a4247d50e1aaf21e33ab1bfc1a133a4261348b22e71b4afa789503c7a71c37100a307580c26cc924d5c934dfe4dc56d2f109837035d3124ab77cbfeb095392f1b11d6d7a2dcf4975c22ca255169eede1c39c305b08e16b9265790cf9a31074e00b11911c39c01011112903f843c7264c6243081d53673fa2f239d0031d9a30e99ca4e7d4a64bd405c7db28a365a0863b326158136d62e9e94b511b699830987cfaf1612978d2df20a38c1c8f432f61f0f4248d49e9f1a292a8250c4a495379a652877974a4a31285bcb414b6939f508e0e4a18632f49b3b7fd6fb28e6889c19330a69e1cab249355cf82235a39ce193910051d9230cf8d8e234d8f5b6726e8888449deaa9343b4081dcec006196640c224b576fcf0493e95e47124f508537fb8ffa4e289122607031d8e309b8ff4183aa9967c6a84496ba99179aae324695878bbf6fcd67fc6602a55f1fe9292a6a76f31984c3f674feaee51f40e83f9d409b7236f3098d2fb8ca5e5efb8327fc12409637ae5c2980a232f9862c83a1da4c9254577bb60ae8e1142b6c75b2571c1e4bdfe295f0af22eb905b3579292a4ec935269542d9864cb2bb2f55d9dd25930082576d6f4bc56a6050b063375bbf93f3a27a52b1847c7e7cf355ac1949456e8576a5530bc5d9864aa51c11c94988a554f313d9e82c93d681345acb943009182793e5a7a5039e51027140593e7f4f87e5279d2412898ffc2525e7d47035c04902718fdce04213f6f37007182b95e94144e928334c1248c300bb794da43da39468a4198600e972f2975d05982714dd6d1b6a54db6de4aa8d24413fb241527c1e01e3eac2a5a4a26854830e91227874dd1114cf2c3e79ef017a552ca08e6120f25fa256162c24911cc31f37d6dcfb23beb67d80d102298046d52b7deba7e978660b6a4f36eae89eff974211894ce30f9fa92f24e6f100cfa4bc8ca878c31690182b94dcd82be24efa9f87e6012a6bde1da83f8c0e021e7c45ead17c61033a6a266c60be39ddc2edd33673a55bb308cd03fd5d176aaf7a40b53e6f8274f594f12940be3db6986ac786279bc71614e62e456a986ca6ea2b73058f6c68ffdbafc96b630281d77846ee911da83b530ac9afa3149bfb3f3460ba388b0aa30a2c62d9d8ca8cec29c26c9739e265998d494cd78bbdc79b13069930fb28359a758725898525ef74e5afac207f71506add559bb3d4e5fef0ae3da57ca8b55d5688549c3c43c196b6a4c8cac30a51c2ae53c44c74f921aab307ca585cdcdd15fb3aac23c2327543249eecb3aa60229a673f4f31f1526b5d649164b5e7226750a7369ef98b0976dbf630a83fcce27e7b694c230ca344d7c2b15b498235a5a17a8410a2e4cffe3a834374f8ec294bf7766a14d8e373aa2304593f34fbcb71b62b4198f23a9120ae37aa7dc97f67fdd6441619e312929c1b3a738ef3e612cb73841ffae930a560d4f98aadf2de7cffe096a74c224cdead3a42931257f6a70c2b0253d4b14d9ebf994d4d8c469a244528d4ca07396a96f2575500313c68b272ea684d2392b4f70d4b844d6b084d1774d292b79b6cd4b39586b54c294cff34fa87ad9b84f9712352671cea34b92d3237a811198246a44c2985b6a7916ae4cbebd06240c761f4a344d5a55a5a850e311e6f393ccd4f25f8df838c2e897e761d34ff887bb11460b4a2935a6a48c30ed8c878ab1e139afba08b3d87fe810a322cc21a4f7294949fffd5f228c5a6dfb22564498d724f1b5e4b5f8b8dc214c6b9f3abeb4aa5a9219c2a4a5754dae871a0b15c258fe223b959cc27eb408618a7731fa6249d94f3c0853ec9c37bf54490d41984635c5244bf741cb1fc97146196cc61965987106086a04c238bf7b79dfc9c3afad0108e325b1fe6dc6624972fc07a3e8b513912608ed9cade107e308134c7cd0a1e4ccd40753ae3d39b7867bbaa7f960b6cb9d6c4daccb4927f760f6ed284a1071e27f146be8c164d2af3c2fe5f150230fa6ee9cf3c3c9192bbd6fd5a1061e8c267fd031babbe5297e871a77309c0c31d915cf945fd80ee6f31b3952fffdb2b53a9873fc93caffe470f724235abc861a7430497174dbdd44a7cdd2885685e161ccc1248cefa7924c3041462a0793955a4fc25efef01aded5888359f5d4b8992eddd58083d1920569e25c8959d1dfd57883e945c929c775bb44e5abe1069397fc501375d406638a6ea5a446a5125a6683313c4c4aa3d3ef46ff35983c3b9d98e8e4e33bf2420d3598b388680daffdf417a7e13dbd1d65e5a4a0c1b4a3820eb26cf694eccf60ba9445597d9520f34433988292e4b89d5b77654888e5bdf86825329832745fb85ca5a724f93118743e4934a99a952ae5c5604e92ac26bbc975188c25c84c5d914b6697dd81c1f8a1ad2c9fd0cc3b551b6a7cc160d2bff5a94c936f7d2f98732fe95d9243ee96ac0be63856f9ebb76235d4e082d1848cb6d87b3d19f7196a6cc11826c5c595ec4b7d590ba6a4829e606e29ec77b250ca25c9e432374b96e5ab8105f39a78d9f5b2bfa6ac47861a5730a8f8741f537fbf6a5bc17479a5e6b3fda3685905e3a9bafa31b1256534b030d4a082d9e4dd855512de94245502337a8c0b8888dc7833cce831d2136a4cc1a02ee3c54ae5d3785f0ae6e46e626b3b59ce698f42a7ef45a5ba1287824157e542cbefe8eca12718df534a2654d79b748f130ca33f76f487c50a5737c1f4ebe2394ac5e24b9809e6f81216daba4b087997600aa74d9e3e69f9a754259844f7c3aa754e8a154d8249feedcd13364582b15cce6455ff39a5cc23983ea628e192340fffa5114c72cf52b789563fa35204737ff0a04bcf44308ed24feda252d8d786606a2bf9a0b673496e5284600abf3dad2caa52888260b0cb26b8291ba5210482b1a28d5e2b4950e1ffc06cf295ba134689dbac860fcc27bf27889272744b82bd3069da28513cc70b6305957f17a61c4fad9c989393587561ee74229bf5be269d960b532cdd494e62165c987cfca3b5c5935b18e6f38285314b2945df1606f9be9cea738afae0a31606333dd9246dba4ed22e2d4c679f25a857b22cfd631606db39f9c35bece0264916a64aa224a53ba59cc6a4b130cb89d3a5df3f2ccc1657be4a99ff0a735759d261d29624e8387fb8c21cf369f7e7cff4848b76e1a315466f11cb29f7092b4c5762d99e430951959509c428a3590c1fab30965c7f69534a235a6080718324068e378107ecce48138c81decbc8b104b8c2872acc2687cad29fdfed4d38a265e3ec2315e6d3baa3fa3e16db3f235f8481011f438c3350819f850dffa0a75398c4e56c41aef7e9ff20539853bf9f32e5d9a475f69ef0510a839acc7a68cba7925c2105fba384970fa382430c1b55da828f51184f3d8cf819a5bd8479440b0c1fa23095ce282588b0f1d1ea88960572f0110a932c73ea39948bc8523f4061505a82998fec75733ba9c2c7274c52cd57c9755017a935e1c313e6ce90190d7957663989a9e2a3139e97289f5192a5d4dd233e38612c41585aef961bcf95c3c2c726cca6a264e7ac75c26e55868b0f4d98fd94523ba993144d4b69848f4c18c34c5fcfa43d7f193161102647c74f5f6773f925cc259bfceac5d612a670a28dce58748c316e80c38c4a18a465d1b6aa33a7ae82e3c718a30b143e2891ec14a35e9754b2923309e3b5bd5404f1210953b83c16a77be5d54f9130da9f8e9734f793feec885652f7010973c9561e4acb4d3af9f8116691134ee73fbbcf159bc287234c1b5ebe255e0aaed553e1a31185111f8b28910f4554216e624eb893b4e1de222262348e302e161f8938251fb177693c6b1bd192011938928888162b3e10e1c9cbbf24a9a9f40f515ed591711ba2bcd4416e7af6a51089124fc7bb6cebc10f42243d679b9e7cbaa3083e06810e42bdd4633f43fd108439c6cec20969b2cfb64098cf94cc18cb4e76150408c3e9b3b43872c29b29f9835a31535bd62a9c2459874977ea6be342c1871fcc1fca4a7a3b9d4e905e1fcc7d33da3d7ed976447c30c89f51d99eb64d7417868f3d18adc2c99cceed430fe60ecac27b7b0e628401f58114d8d841c82d4ceaf7dca49ab19c64ef476a50467ba092625b983d6a5d9ae0ba9b6adb1b426a61b292459f2494561b3dc9a28579ce723095ad47cdae112d1411a980c81f4266610a7f4af98b3a65617e53d9b2a4b8204a9055482c4cf94a2c8d9adc1a426061ae0aea4eb614a542cbf58a2d9cac899924271be20a9328256de55abeb69f642b4c97735049105f92841056184ef57e0a95157659648490559856ad52cea7a454753f4142882a8c16224edb270f4af6ba52610c1196432ba9937fd4235a050e21a830a5c88a25c4ee9818ea0d9218669071e314a6bb4ea3ab124388294c7a6e4a6e543ca2658618fe38105619424a616e532a888f9dc7b2a46d90916e8861e3cfb8f134781c6721a430e9b42cb173fbd500055e06629861690819854907a579254f68955c9510220a836953e23e09534444048961c6cb60c40621a1486aaf8db4b424509845ee5b2bf6c204f33f6152398cd9be78189f8f270c1ed7315466a713e6d58dd16aba93cc4b73c2d827e7123f7729db4e6ec268ff29ffd60535612ad124415d86d225f665c214d5fd42ef72929f649830d656101e6fa54b187ecdfa749493f733b3446aa724217410cd13c7e7b8125209d3d7e6c5cea92476273672942aa33d40029b4008254c6652f427d5d2d0df47542761ea0a71399a509df97b44ab8c1049e029ac49254cbf7846b442502261f6f8d919fa450d8440c26c62dcbdabdc0811d3234c82f4527671438eb0cf24cf49cdc7eb819046983a4efe4995d629df87119bce7711864f1e4adf7618eff028c29c336f17f79599fe26c2207fd49a98719bbd154498f36a86d598f010c6109692a9b8945ec7378429345b94ecb9fb4ded429846cf887117096112f3c9e329395ba9f106614a25d674572f47574a1026a59360d28d055d154e42026158bd202374b460b27e96e0075260430077080184d12f089d4b3ed3bd7fb27308f983a93a7ccb7786129b37c40f66fd90163d7f56d2392c88903e98947fc79bee0abab3a61b62941c8f1005c848373490630c217c3049b20513642f69442bd9aaaaaaaaa47857005e08d98341fe828dc8a8a454a79422440f460d9d517296d7ecc75014217930c8e9da93555b1d45cd89103c984d9294855039acc70e22816222e40e864ffd6582b29ff7940bb1c3973ce5189627b7c812132175403fcfc949591e997a6144081d0ce7962c85559d7f9768089983f1baf4ddc99d24662784089183298975727d3dbbd1f90e217130675513eb66c74d12ad1d42e060d6516bf2bb7f2bf2f406d3295137f973dd60186592243c96f4b0f7cd21a40d06cba5a2ddc929d5796183595c4f3ac984b8793743d660109fd54f84e5a8a70d7341881a0caf6aeb3ed28418ed7f2005365610920653baa8ef615cb48cccd160507d25447da666083983f17432dddea5548710331856e365e7c652ebfe66b40742ca60127e57b9f45a4e525a158490c11c8486689c5092204ace066f230521633085eb28738fab188c97ef66a287c1d8b39e2f76bb6f554e02216030f98a90f539f379f9f305bfea2c4797fa944b8778c118aab2466a977039bc5d305685cb9f55eeec20a48088c87e22840b668f53e99d2a878fd43010b205e397588a7572e9d8fc46d44688164cbde3563a9f73860c6ce00c42b2602cad7dab35d163150eaf1b2158307a8aff96693dc93f7805732c2d71a25a4b4f05252683102b184374ce3dfa28a5727c8c902a1877749ea062373eab09150c5e39dd54b38429dd4fc1f0164a46cd2d947c926c0c42a460166962394a84be5130ea9fe04957f09b99110c42a060aa0f13e32edb04bb31758a11f2048396cac552c1f166dc782718ec334477b0ff8a4fa5902698ed44d99bcf3295a4f94398903eb77825e7c32529214b30278bdbd962bf4bd4625a1ea204a32925cededbf478990483ada528a1bed46949428241e65625a9c352dd3378f4264844c8110c9eebb3ca5d30955337448811ac9c4e652a8414c1a0be6c438e1cf378417f811839ae6c541e428860ba946ad22e88d2592d22225f08198241e752234b09937bcf179fd773e81e5d164282605a114b23d62516428060f4dc2a2ab337ce83e807a65c2b0bca736a4979eb19427c60be18efad7029bd30f65b8774d13103082fcca3a490ffd1ff7e27681746f9ebe02587105fe6d185614bacbb7a13e335d53180e4c2ecf9ad92fc25579b1939ce701b2fb821c61e105c18434e9e2ce9e46c80dcc2601fefe4cef3d3f6a609406c611095217fdd4d7eb58db100520b93fca9eda49b574ffa314e9b154068611a0f5339dd6813543608209ddc13f17902882c0c66e7694c454b712c4cb12788f87776655b16781c645ce002144062e008c1182d8611119101b2c121406061ba132d5b392f496e4a90571874b77b988f67cfc7415c614c533a75ea6cf6275c405a61f4b426bffe4d320f2b082b0c6f97e4ceee55722d02905598e6c4662895c28a18f35f703300518549f989d696f74ab23bc9f022032415067d4a2721ab2f367288711a302387182ff81188542f4050619027bdae8a9e9347791c8cdd2f905398f7649de7e422105398f39c8a7d0b3247ff0909404a619ad53091a17320a4f0cdb39e308e02b71259a74e681193bb358088c2248d5e631c31d019fe38d40cf46588884c012414c9793372eb2f5a308d68bd67523a0001854999f6a58f936f4a5f595f84818130ea4a09209f30990ed1a6db664d8d50d69e274c715775c6ffed8439fd42fbc6ab73a5ce0973d0a233672a6813466d13624e89aca4f6b1268cfa27ce8441df69717d4bb227264c58a6b457d609209730bcc5106192785a3d9896307b529245bda036dbca2e8054c29c7358f98dab5d476b440b475a008412c6ce3a9fc451b2c792a4500032098305fb2cc2430802104918d6b4898e9eff9ba41dd1da178044c22444d6961c4eead0146d00020990479845c8888e95b63d3247183e28c1e4e82f96e30bd208dd928c2b418e52919719234cea434dd0bb25c9d7e58da7c12fc2a0538cee0bdab8d1c8c6005184414ff46c2ede9ed7e28856810148228cee398d5a1c2585de685c00418441abcdc44fe5fdb1191ec27ca99314bbcb452ea9ac0062086386a598a173bcbf7c17c26027c9395e095979da11c27482b00f254a8e1ab90dc2ec39de9d78955fea910119382ea005104198d7d382f212f40409842645b7aab65c5e5e6a6e337e3a6ab6f25a667c8e0208836849fa94e7e89c251405903f18cc2d7aee93a44991cb81f8c19493ac5ee27f4ebcd79800d207f37d7ae750e92e2995ddc2f1390e02207c30f969cdcfbe37d96ddc8339975b4a9fdb4ebe7c7a309cbadb7e92aa630895f200240f0617578d2dbdec652a081e4c424c124aae7f8f27852077309557e8f057b205fd41c50ea6917e27c9afefa4ed4e40ea6050d91931b3e8609274ae24c7de2b07afe760be2472b1928a96b58db22d80c8c1281e6c334e922a8eef7130a75373e9e46e372d4946b4ccb0c1dfb0510aa140a40a2070307749c9dd544a81bcc1f85d82e90a9971eacf1b642c1540dc606e93e4bad7cadd9877643d90ba206d3096a8649dad55a37232a2555f84d1016c02081b8c96e4ad2b86aadc8fa500b206a385351344895592d8a906935029ec84934ec6749804903418afd4766e4bb50926090de630c2ed648f3b1df3cf60d879ad1326577bd2de0c9e24c4a993cce3653099783ab1f3956345880c268ec5a8f451c632511c0c4641108441104322cb05f3120000001018938582c1783826d5057f1480034a36264e323422342616180d46e2a130180e8442e150180c088582288a813894c87890ec04f64e6744f1281f7f4440079df5fa56c4029dcd91f8cec1826433e3e1b17891a4f29c7d76d902aa8c1b8743432e591ade0f0256d4bcc0b03cb557a467c5748d0066e290f9b58ce807cccb5bf0dc38bc206017ea853212228d35782d4d514d2394ced2fe0e90679227e88cf3f3f7503c099b500faf130b09ac4affd61d425c8ab1f3122ac78ce2217c7fafac07726447dca9862ee79b3c78c12a1b718c4bfcd02de0a1d3b5723a9f2397c56d60c3f7f5edd2c6f2db8b276688f22c9167cf3613836af38670345e114b85670ec50f5a92bdbf64196e0bda9e24eeda6f76b3f05b3553be6dd67aa9733baf40129a538c53b412181bdfbc2186fe01713576f63ea8b6c59d17e82972b6bbc0250f3019f79387ee76288892e289d3793677047cda414f4619d8c192a807051ef841266650da3cae93d41664fb190b81809c1e5fd29eaed0589f393f3ab95336680a67a147bc327ad6062e98bb2330b087e12e7013dc78886fe80b5ba882870a0ab679a6810c826704026ada98df4907c4428f6595775acd4e76bfe93878cc3f065101775944e7359c336d397ddaf2da186e065665e4aee8511a3a1577d805bdc5eb45e695c8332d07cbd74847d6bdc267cd784bc2039322e2056411e03ad87d6738bad4be4fb9bb3a452f8bb46014235060e8183e6e43bc92779bb9331fb981fd3042843cde1787bb648640c14e1cc992e803c96c087431e885db63b2468df0b9dda7fc84c604a1f3a4a3f2aece529a908ef9bfb3325460dca9428e19d8e165fdb4c8e439044cc3c90206d18a21e3ed624df0178315d15705311d75113693750d81602b61c4cd135c19e64fbd9a1b6168856852c70e009a52e60fde69bd3c5340c09be31043d74e7332b03a01aef398c0c2304b940dfaac23456620b4713d9588116d5e0d37dc805e3ba255b55ec39ed77e28fac13e2f39d615b9e96124c029bcc1bfb352bba8bfc66db0e2757bd8ad79c189c697f4929d292f05298622029917b5aa3bc5c3b521ddd75f1f8fe9d3cff7de95623a7d53672aed744d887e7991142c76abd3570ed81078f275461762f1856d55548b3fab00e18c2d4881ede46c4021f1c08b3f26b1fbc56ad77663fb31d91cb980039a44e0da1952b88494531d9c9e1e9182cbd7a873af010310dde0245856bd6775bcaa684cad72cd02aa65166819cb0239ae2c5084057ce5601a5fba6938a8d7ebaaa7ce1214014e35aabe6c2a333aeb9bd05bc4ccba0d68115fde854abb027eaf72e92614ad0dae3144bb7b6851831ca55c6484c5370e36156a18159ddd902160edb4ed7e710c56006c46cd84b15c424880ea6e32eb7cb77b89d7b11620ecbfcbf8609423b62ed4ad2aa94a4558fa0947c5b31e54d4187f6b10a9b99cd7164dc7340e81ece15ecd15543874cca05fae6c7609346e17d590a871d01b33cf44fed400aa602d75ba775764d42d77cad02aa55ca9575e64502d736415063f12afb65b1aba80c4db559e2439d6e5e0786ef2d4ff1449ffa2e8a5cf2ea9e3d6d9d106853ec40d5e5f89aec88a11801875f3e0920bd828cb6a271c305eb5c41d4cc08b268834c018d1335d715952b9141a1ee65c2e39f05ac2a29d146acc41cde469bf6d4cc1e9d94745142d0f42702011b8b070744a8b9dac87c085cf7cdb25e4485bf847c33b7817fc821503f9791b8f85cc2387087d8c42d6c0c5bab13e60cc948873863163df74f5540dab050acb0ca8ec0d2f89b889a27480099fb397d9b4c02c5fe100a8c2ebc9c5f7f42728ca841b7c07b22eb748bea3324dd7c56114a1494781342ce25adf41c3702520cf6040c5b9da6b8f57989c74b1d999067c766924b2b03cbcb09c1df27e561d6e34083d06bfa3b360a37b63b8f570dda0713046e378110505b450d0ffc43c09f127303e9a1d69fd46309681e9b848f361877b57c421d58c90edaa78246928b0958a2d58d4bb09e66021ea808556854048489956ad82999c9383474cbce81b2218e11605a76aa350b0f1a4f031d9de5c9a9d90ab3b62b563145c7488b757652cf8ae07870ba36f1e43d714cacf4dc9f966c57fac67ffb3fb9f7c319b736e80ae55131b09865d4a0e22b1fbcd0e13a193fe3639d705f733822b9d8fbeb5553f838a94c8cf28d1bcbcae44495b79f4e7d43868b95ff54558a36323325022fdbb44d2926a86a8990870af0a51f0246574b0cc320972d860e059d98484e8c8aaacde1eac1fd7d8c7cff32a15657bb663ca15b569b43c1a65866f0299fc58789466b036ed0503310e5e56a5490d2cf798930301c8f0b9c11708ce3d156fa16ef1a51739f0c0b7e13c16dfc428569f486b706d8e83105940c8170815946931d8258e14a6938cd7954c6c0509b7d3e3736b7f3e416a3f43fc664d977061c076c365a31861eed46b229cf55f01ef291747e5206bf13244742e66d8c924d0629c9e918b8ff22b149611a9f51a486c3b971ff62bfe0b3592daa8c52d4233b3708ef79a6961ede6345c1420c7d2b155b8746a4c6e8d139781c8acdb8d65008c075f7e592b28fd47b11e2eada0f719ca2379501ba28347943d0d80aff5bcbf3b6bf6cf74363c255ce03f57b5aac090642389b2080b273246db5434bca8d51199d0f932092c97a0f54a8c23907e9f2b51db9d299f3e26410dfb488fb45c0ca235614a57d2b4194829259bd3471d438efc98a14ecad3e8540729650e23ad2d932614917c0949dd55e59ce82703899f7756cc3dcd38783a2a024199960cb1cef4925074e339ea0280d8381b02134ed29c1f57ba06036cb25a688af5bebff927862a948d8977ad84b30a59620c222ca43dae7877e4ec8fa1a8f0023c23dd258c0547180abd4ee31a2be35e8fcdeeed800ac4e1f09a911b6899ed4aa07b653c4ae82786faf2e13da51e201309166237a40b4fcae22e57b8ca0306051897b0a51ecae62d8fb07ce14655ae09c87ebc076dfd3ccaa8137a54c482a8368402a61106689876261286afdda2b0c716a88a238e6c0edd075272d543300bbaf5416b4a9c8a503c33c4dde01c66468726a6d0ae20421bd35c334faefe47019f85d9d2a773549960da2d7dcfede59a9c9626f7a4a886f7fb98d9695c445b6d499e4d68f4c34dd6e99d1024275514411bae86b4681e1b90e02d1f04ea8eed9ae670e3e18445a511016505916f73f768461a3554bf98e383bf08d89eb6c6128385dc751d262428333459ca7e97e80fe989b20744d219f61a06a13a06216e4b9a21761fd72022126d0cc359714af861b004ce2e0d5a2a0d1924425c11f9834f1221081f418d39b7acce4da84cf00eb40c78db62f6f53e13231e03832c984d11182e5dc1c878818ec39626ba6dc66c80421726f3d6596c912e11a86d8adf6f816647802a0f819f765cf80de098f5731098a435d1e269d63e70744091f4eb27927029f48abfb3d9b79e7ede87e61e4f228bd5ce405561d0678a68d03a210387dbbaa6769ee1f9f4e846eaeb165ed59e3ee248afd3143c6c1abc837c3c77fa3bd4921837824c3197b1c7b33e57889ad317811961b2ec7b574066cdd9126cf5d6c9f3127b6d9b79ad2f32415b7eaeb500510c6fd84fef3823c43403b9580b12858af1515e59521c5917ca8110c73b828e25238ce53837179441d65355c8264c5f5b9b0f99524eaba8465d5c67529f82bb31152cd2c84444b2c4bd449280cce479fad3fbca28af42baf7f7521579e9c4ed235e8b3458a0282e46ed5fa816d580b62926563868cdfe8145d1013f5b497ce3d4d9ff602b7102c5083f05b2d8845fdd70bb70e38bed6abbb4c698d11f96b8b413175820bce871629fdefc3f18380af0b97ce15d973b74db6a0ba7229099387bf616741b12a4d210fb1da04463f96ca92aa033306e9eedf8788e08316c3806120242c507caf6cb33d405b77aeb9a4cbbce23234f50a9b8ff5793953b2146e0e380a54254c53bca7a36e126b5d0f2acaeb1b27e1916178ba1172384892355ad8efcca5007275d64d9755164674147a261d46af2fb8e9c7f02c508a42f848c2de70f6a68b108d988c53c8be00f36f051f441c3e36b0c2f7b07e397c12011f90c13976f563e6d23288f1b3c6f4679cf95171d1ec654028093a074240378e40afff6aefb0290ba08b747ceb5b8b6876e1e2270ba05c145572966b7b7035594c04dfe7809dd866c827e66e94aab1b98a3669b9069fe63f93d2b76c0b0966490136b93789281800446ccb1e52c8662545af7fd92cc20ddffda7f0eaa1401c2d2e39acfadc50494f9b7979b001c97b3c6b2dd12520736b8734f249696c8baa08575df0d6163aec9873a42a309f1fc1c9e74f99449ec804914bd6818a5b0d42774876a282351f5502e4393f26957d58755091e95cde9556a0b966a8e957e6ad74c81a816c5f4b5a232bc36761bb537ca304dd586730cad101ac2bafb67ca235819492818c8e1a8add3c49c1a2aca63405704dabcbd39d13992b5fd01711dc62c6d5cc69106c41fe130943c2a8f48b2ae65987099470655958026486e3ec4bf0cebfd9c05a16cf212af4d5838ca8d7e7da89ef85c488358dea661007ca6e9d6bb53bf698495d6394a1fc091942aae0569158727975f3b6941c361cea55ccebb8fa986f5b427a6c77f3c23647db8fb88601dcf1a9df047a2ee2a3b0c0725768130ce25cc45fa3dc13c53d42ab7c271181341b2d4962556d85187624306154d54fb23699af9052484e407009ea042807f058357804ed8caa90e62969feab7626832dff727314fe3d4e2322dff86348992a45ec35c6a679022682ca42038e6bd53543335dc059c09249318e095dbfc006d729fe3ebc20d9ac5c472fb1c7bc146ce9b092a13fefb8f705ac30dbd8233230c23a9aa770dc96c1573455f91379545a8ff5b1af0722c4bb3726b8ecf68040de72765fdde9afd33090555aac71b504204eeab7e7f892de5def96190540695ded4f7864d40adb0eca922a159fdc17ce4f25212169311b9394850497a42c000c137cc2b0db29e8f6e1e15eb2652a27f565b5c436532a0f2b51425fe627d595db22919b960e291ef0bbc3ed1f391ad82e75f621cd796f22be093054b7ca0f0249aafd87ae354728d4194a8763f6e7e9854c595dc4b1fc66c67856e6812f7ebac86e67a56f2232fca7c0e9d836ab1d102810643026c1cfa04d6c5c74ae17a6965a8435e8a2d8a2d3210fa1fc8eea17f866e51166ff226d2ddbdc9c04bd7f9726ecc2e8875d0ea311ecc95641a989554ab790f39fd1acf9b7dc82044bf3d4e9ad72ac547b650931cda7cae046016d4e26b2f653b7c3c7a1d67ec855ca2e8df22b593ecaf576e6f6a799c1a5f163c1c07f7b9a51b2324d5321479aae8da3fa5e87f9e296be64896fe9b28344aee66d8379543913d05cd5188e85089d7fb8b914d85c31a9352d4df54deb75164686742ef2e1c16df4e40cf420e5c4ee78d241eaeca2dc48be2c3fa652b150bcf2c6622859ef79ed05c00ffaeaa18a724dc40430e9a2e35ec4ae882cf6876d1321314262dc9d649501cfe9c43d3f4a9696ef1095020f9062f7e160f5b56f44c76631b8abfd313ec4f4d0661109fc46d378f6c5a15917a3d767edee15b2492470607f8815f68299b8cbc75fc8ae7f8856e9a1b559bea70e0d31c415094a6c851e0bea8133147e2ad676f49b0654b5d1d75fefe4828459d8024533b040e6132d7dfab964485e95294d05e57e26b2e883ad3789c74e9391fe5afb65a3203937ff12f1e1bd9f9d4e6e2b0f31642f6cfc3d2d8f07e9b59f45c0ee84c0405116eb766dad89ad7c1c0462569c09e210316d75c575a3bc14b6a7d545a087c0b46ce4f073832c619c0037bbaed864d34b399bd86ab1b5f8287bea865a6552e99e9f451ed70af122389fe7d4e8ff8d78a16345b691db49614b52e812842b75face39630fb2c983509be5aa22a867d29b0b274918ca4b2fd640bab3487397dccb4608dd08f4a9dc3192e8a84eda38c68eb497496e12cab7879f9462890ed8cf5795bf2a4eeb1d3cb8407afd795f7d17b1320a66e9133e491cdbb86374abfcfdd1909c51216112c8a1fb412b61470aa8cedfac460dfe12e718f1add7e8fad780353e213e5cd673cf0b5e91dbc90d53462df60b6ef37333d8133fe0951e101f4ae4285515cb9db2d3859b0f663b72130cdb14bd5c140766907b4c30ae0fa5153977a6eda730a4f6cd17748618872f00ef906a95fb0927b63d747d8642f76070f5adc8a1120ac1a763b32778158f256487220ccc7964f5cea4509425c03b9e8731b6ab644a42f6f3598e8859b3c33f2107561ac879ba0dcd63933ed2eb669de00d569fdbb28f11b1370beb99f4d7a499fb86592def9f994263dc1a36195037d9a1949e8aa9905e2ddbf14d9adfdb780b645c6805133ca93c69024245674c2896d23beb0c865c4ae571c43c865d0746bfa04eb96192e9da10edd24411aa1a66119be74a2ce84ce3b6bc21a12bbd63a0402532c5f7b37d1b731551388d8a0e08b474fa3f53fa3a671cc6e6a62a0d5706c5535adb58730361654f3e3132b60d145f94d0e138c3b33f3dee3534300edc9189bb1f02a60c640e84c3dd10cc80d8031b4c57112e6167daaba1080a2e50f9c1c66cee04921006241f788e6fd03fc6e92a8ce2a224258dfb3b40be86a9bcc0249008e5b01e2f3cb02168e110b06935aa8ba010346e4fbb5b5e03af9773b3448bb2ea10c57ee7a6bedb6b53d6323345b2ddfe9a20f6e4404e963b0e300b74dd26467c5978df4549ffd1286c0aebb5e2475bbe027330ed27b014a47516b4fe52d71711002455fd6e115b1b073fae2e73ece52f177d5c45ebfbd4db4a641a57a33b9df236b9409048186361339cd35b92fccec2662d1b9c48335cd4fab51848231fa675d6ded31b34042c859fccd3148d8867e9e0fb8beac60519e33e2cd13562009cd68f56d6e73b0a8d9ee95a9b1ce31bd3859fa0955451e217da80dfdd0f8b156450f5949ba15955ab97b689bc1d4c841e1b5e00fbab441645b2d9d2dff15472fa5de4c6494a041e61c18530b781b069f34973d76b8b293dfb38f54d18814c8588021427a85818c2142b59842cea4739586722fc34d9ba37ef909a6507619047c9a65b8ca53e18fa54a66f92b59c109015c159853620d26be3042ac61934b98e5392596c60a8a9bdce9414d4f661f611338bd08724f83041f6595e6e486120cb89dd5a71ff674681796e5ed922a9d321791d97b0adb9419f5cff546cab4450d4ab745812c86e79a7423f15f3648acf9198eaa8878f89509050d30cf0041a179cd8655d39148779e1bbc5d0413f17823d43533755c23b7f7f6a2da47ead5c03aad7678e68d28115284f74a54271bad4d2a3e78511792ce586620ae17a25984118e11ed0e293f867efa55f6d2317d51df77c421aa72939f47785b6e12ab9838c308d27ac20cbc04be0898c70c79845d6d2ad288d270738acf2da92ebb559f1a58266974132a3d8ef71e03b8dc11c4c63dddc243aa4c4416248a4e7745bb9cb23180c7df698d24b9c86c49e4962ce8eccb552e65117850b36153d3baa905c075884125b13ca1301cf7cc0606c7640879164a735212dce8a106a6da8384ec4d9fd18610f408e9e57a0346d435a43bf8c1962acb29401ef1c51350736f4e8961bce8fb38853c8384f32cfe62afbe324dce266cedb4a5d5e9e02b13bdd591205449fc72d1570bfb32ecd6cd71e75fd774fd9cce14d404e8c1336efc572a1329dd32901472da2d7165686ed918571c482ed3bd82257fabd48c1be9f1255e44cb2717626f01f1a273e741cf82e39eb77d108ca3fd6067996a77125b14681661769a27003b4f331aaa5a5a95941b6bd5330002e73937e218008f67fd1b84beaaed73d3caed27f136589c5312bc5e09b4642103e5f7b8f7d9191ea51c757da3198dea3b0c5ec6e695577fa598942bdc67fd00574499e6d103d45e0c6489e1a294aa9de0c87507816b33cc4660f1f4dde6b0fd0d494dd2f5e575f760156d2bdc24e9ef67b43e28bc51a9488a2148bac30c1e0f2fad0909b5913b58b95c89c2bd3a49767fdbe3347cb19b328c4635c334fa438c467e212dcb2faab771e4a1e16c3405762411a4de0371c169bdd0d4573d1efd9f4f4b3d4849580f2630a4d83f3e6b91ef0c592ddb6e42813869e43c141cd35e638a1b8564de494e0e8dbeb8144a966580f606e78972181a1c12d151df8fde93ab51f3204f56449de6be905f174d366fdc71d989dd08361c60a2dcba705fb8389d2a9d5881d12481f7a4a35ce18128a9226e2bae84607eb20ec9a0ac10ff84c0f0c14eccd57dd452b1582a8d5ff4a566ad9b788d7c20a36dfb112cb7c6cf5a193d0998bd184bd555e2ead734a447648c394d8bf8b5010f2f4fd5403542337cd40bcff0f5a39f8cf1459a4d44e5057ae5c76902a5c651203a09a3898ff46716aadefabe742a1b16cc789f6ba8ca312e4ff04f0a1a0dba3756c8c00229ec01dcd689d67983f7889e70cc38985d5d0b05e6eb74ad949202610c824be858a7afc40444991998d14bd8fe927e1dd959fb3bb2a1c16ca7044087af04c01992290408d81f18b61bdac9e636251aa91acf51077353d928c732a615c33d73ea3e49f7dadb4ba25537fe301e74201c2a455b0b8094a59f6cd15e1dd7671c9dba433d9bb110764221bb295ef344c18ed7e52f015d1ed254594accc1d981e29fe6df2008bb5e0d2c39cd13e9edaaaa5f86c1f4f2a9027492580f1a58ec6827ed451afa63f576b419e4048b0a9a29343c80fce85d1ee8ff615ee82b1771e1e69def6ed870beccdbd36e0b800555522cb752c02e556b6ca1a093580f9f7055a627f660ba9c725ecbd575f9935a2eb7727691c744f08781486ef11b4b290508cae811719c07a4dc03d82265a700b0018be902ae082c77a12264e323ae352da9e7daf662126e5227c51ab180a228c27ef0c270cd12982bd6eccaeabd4f9a5a77c2f1ff7a16ebe1aabecd42f871c72f537b9929d1d9b118047f0826bafb88cd6d9f8649649d132158e933476ea47758881d50e2a0d0460cb6e0d82268859b62458de2fde40223352c629c546fd3b47603df2be08b4b5f60247fa866463440d804d997a3dc196d2234ef13c49f591a3756c88651227fd6d4ddcafefc2220a3ee3a64df60683caf972475c8a2d2222e5db8696b6c91d61454594406c4f51806e476011edd00e4a964a9fa58e7d7c098ea4145d61d6f37b3c2e24689a0f6ab28ef947efcad209dc0a50f5dd9f897a0399fb102c978a5af76e166a281730868330214fcf817eeebd00cace6c3679182b7c5e24b01df3562448840ac9dac4435903f8483937e9d08b4dfe78fcff5b1df250fa40fa98c2eeb434aeb10d9bc09942f79234ee2c8dacf36e85a1786758bb52f547d4f0bac0fc34b8b27572f4cddea3576a443a3c5cce755a5e18732b69ebd14a5df1b04d72ad5099f0b42c8f6708030940e75e1d03cdfd8e9874b1a4bf29f963a3c0902b1045cd9b8b281e8b5b9354c270f1c5c739b6c6128f806c8e1323a687ac272284a1126a2e5d4eaceab6993619514a5c9c88bec718a32d264268dca10239b58f4155ed12546b4dc23f3a90b3df2c97656d9f14f9d23e360a002e73c20043ca9c0cb630b8d351a27d454116c2350961238c41e72b98b6300c6fb475d267d92ea70677610189202e8a6802b6048e931616a0086ac89b8090992af4390e2f4f0e8bc964028acfc34051018f7ce79e1afa4dfefb0875103ac6512c8dd2a367b0d2a024343b2fc01f02f9f13689df9a7a5f4503bb2fc5f5aa19a9002d9038838a8a94e9863f419e6e6b357bf4794e4914acd207ca09b6863194d412138add0b963f582d930d345e7f7edd6fb246fb117897e498c68bad335a0f475ff30ae02699e269279743e5c4e19be41ba9ec0c249a5e71f95d7d90b78fd53326df1402bc7c49be220ff099fbf6e773989d71aa85d813585d962661a36dc5099d6dd442685d2a958f581fa10b8d578beecb8449e4256766eb36ff71ebc4e9c03e5aabdad3639c524b798680a0df7760cdcd2af5b7763e20dbd3bd690a8164ee52dde7e86505c84150892f77bbf2568ef207e5551ecb65f0952351e25b6721b670373e6c4f1f4467f3cba3aba1e3ad123ae029ba9218782a0990fbfbc82d68fc89544f58a5b1804146e3623a823a591f089815671ee4f54541a579adb307d6ca8a2a2e88a663ffd5a74a5c08cc101604fcb6e3250aab42df8472f01cff67f5a3671f10b51fb8caeec14e46cbdf5eaa554429b61e7cf57c159126e8c8ea8a1f3c5a9842489934fe1078debddf9c2152bccd70a966878f1a04c7d33d8b02246249350c16d8640e10f222954968afb94ee9a3ec7c6678efa741e359abd57311e1bb4dbf4c4a4f9480aa25efb90bb14309c1c3d4037813bfbdb51f9fabe9f408480350268bfc34e22f4a7009620a83186746bfd99260509b2f1315113c853a2a545c8c03fd3c7ea18eea0fd6119c2476356ebba0ee4c462dcb70c3af3cf279f221e9901a3d605f313f4fa1cd1d7a19a803414b9ff31b7918f4e5c85886c5790d2e1f83cb78d5cf94682b3baa6f94422e0016a7ffb327094b73e8ff20ff6075987b34c510fe139e81456e110809a4a048aeb0508eb3af51c7533785fc33f166c681aaa85d9fe14c0d5b8333e450c842b7a8801338cbda091300dd20c4092a8f36a320e7bd84665ea44a325b5817acf213888d7e4e9f0b486a94eb37ee75166570f4be11dcb25647253671be33daa52e0d534c4adade0929d0d9fe6c12337662a269bbe1d1cc30b5e6105e0c5f3d05509106e40bf024235586194a91f7a47404ab281110d748ab524b5747a947a4d448424036d4672833c285ca3cae9e9a9d9455227206744e70a8bb746724167e02486a41c9c4702bc07114a7e080879666c110449e5f872a93bfa3494ad89395c0a6c53cc175e0f1dabaee65ac2aed7e5388d812410384935bb069bd2d162f695d061830a8c040db563644b859e189491ddc62a997b32c2e54300d531552e68d7bf9461f1f1b098fb29bb42c03e69d122d9413091be04d2930efb625520dfeede4cc93a64929221fc8dba323e7b661b95bc960da50e102ccffdd31d451a11b411435eb07499382248633dabae2192b24961235778dd049e8439ad848f828e6b5b2fc861d0beeda6396af7916d46908d8890a6a36da71243c7fad17a051f266dbe770ae44f457bd5b310eeadf501566d89c217e600d7d3aae9da196d23f234a96f594029d416e45b78370accd7bd165ba2e6b186f3a8285334b506124099b44f6176c9e89161ca6a8ebd5524fe7e939c3b1ed418ef658968d72b99c4501b4a2d1853c081675aebd4d27d94248906a8e4d4f9ebf0ea051e55fedd270e76ca6c805c3a6055ee55cd4d1571fb4101d0f20af820c6277953717c8bb83e54794b3b26e9080f4598cce26687a58e8756e2299610c05b707108cd5e9fdc88a5bf02689b4272ce62e90f15bd48de613812712630e342b18f984b46ce08be96c2995c26067501d493b00056994d99e0f7ae18a3d09a538c57343796e363bb9a8dcc58d5919783b2c290a6005eb0a2f0c6f023af5f2113111d3f3f73fca8d28d9f97cd0585c24f501b17a660aabe68cd546f09f1e841942cbfc5c810226b6202ea7f3d50fc122bf082918cc573e11cda1eaa1e041d016b6741a77503c28ae377b5aceef6c31b1035930e8cf7d60382028942024943506153977f75b84a6fa113b4cc408120e13097eaff016cd36aa0c1aa443b6dfbc8b92e642b560f131f74d942050361f2c313f605aca291e32eb18b49d77366ce2433d55a77eb22c9b785fd52477d8ddaf6560ddc9ab95885032514865237682f7a45444c526bb4f0b65c8612d21d4925d9109fbf26f9fdca4e1a81daf630999bfed9fe3286c0dcf01faf78fddef7a9cbb4f895c8692d254b2bac320f819447ec4ec145ea1d4ab19e8f9af591c35cd6b21496ee488c3720bfbc42a7a813e5be0ba2a14e180372493094203a6994abea0f670639a1158ab69394d9aaadce4a9046eb57fac67be8bcef1947509ef20610ab458cc757525323ec13275fb94c09f2893e70dcd6119a9c25e2427d59536a062f4b36cda4d110e06b1d992a150754b37256c63a9e38d8e67cc5d0425b27b265f2fd1a21d06c382b4c13486f7254d92fff602610db56a678fbee62a7464ebc65079cc1695cdc82cc56cbb0e71be0eb48dd154890140cc01336c7267098a554dd7f89386628d3246900f4af865c2193f44dba82004a87b17ffb58dd0d656d1c1fd1d69ea25e2c4dcb90eeeb4de1293ad25a1962d0cebeb352a7205b8781222d7dc7c10c8922298d80f764de12f17c0cd536869ae01367c3fec0239262bc9a98c5b4a2a2a266add7b4698696da469326abc519db0aaa28d78fac71fd94a2181f3f50e23425b44ef74d0354efee583d36506ceb65fe7c7c1ebcc2ec9aea458a369d02c006046faa22b85e42984b3fc363d8f22378b4111615273e68748ebfc08d3ff9bffc911cac72587ac500cc6317f5f98d7a2ca4ef66e2a7be8926016c3c03eb943393e44b2f6cacb05ac559b2c1b75fd4084c56ed79ae4e75678ed12cbf62ca32611df9a029bcac9956bdc8d4854cca1a73be2af4926eb131054fbe82cf187938039ab105341a72243f03862d37909944ecd7a87d72b9438fced3db66d0ddbf7e602897a64a83a98cfe59662cbcf6aced813d181aa8cc674a904e1872784ef75204fd43110c95e1b9689a580fc34c6e406d60640926431a1394c314bebb9efd0dfe936f65313b8358a1c9fdcc8fc66b5fc3faa8ae07ba12215015effbee97be2a2296936b83f64e33133dc0fbb6e488d477cad39c74503dbf378fdc341d47618f34efc044042cef49841d238540cf34d76d74358a098cd9d5835a8e1e4c443b3c005b01da08b00434a87ae9354b14793afe82f0de94f6e15a3e134b57015be529c7dd377c1af9ca85560735f7f32f08cccb424d80ff5c5e832d458589e3402c023ca338c8e982987679d57fe2962f5c5e43c7b5b2a92dcc95fcf3f87fd68aa2b23a471493305e7b3e759661895ae91eca0a4d40f003a5a36a1e8f24e8ced48a3ee52ac113350c09c30432af361f630faf1e4b8775b0b05efe386944bff1ef3ce8a167670d3e5ca8526e543e42647f6e202192efaefc0d2816d350a0382c08ec6cdda8741a74e36cf7bb5406230806c56a74d247194634a6a6651f16caf6ec120406c6e6d1c07031fd86370e9506c14fff7189d3c26991d53b6faea50dd5dab160f1040dc38bcd9baed85a8d342a37b8e29ae3aa9cd1152b1923c2dd496399d853bdead26eb74051a613dec60eef54e1a6445103cf01fc248d3d926593aa2769e5972252c7deb718c74ee1ca737609892ea9171b774e605a8c34aeace53b6d2f0448a40e11e5a211aaef5c7403178253b766f9988bfc3daadbfc6fbab19ee0d1bb6b80573e042b8f33ea79434466f327e20effda9fc38146060fa8e78402d4fbaadceea47166d1b329de5e05dc35677d3792790952dc7710838f6549a9859cfa415558066ba72ce3f12743318268f2da84910e2f1ae50d2da1ee812e7fa75fb1bdcd42a48792c8a3c92cb6e822edbdb444936fb47d9a415e9ba1d9d8d8df688f2d888f567b35baaeb027876818c992bffe8bbc6c2d99af688b0d0eb0605afefd087c71f0fa242a9fd5dd4db66c9df8a8920f962495ad4bab7663dee34c425bd14a725a24ffae78f5db6b68f37aa2386f7ae7f5eadd8bd0e075bd299c098bb7174faf695e8de7259d7c3dc102cbfe16cc48aff7791d7d9d57507d7a41e145ac575c5eacf7ea4f913e90045ec747d8aa373cf65aec6b6f5278b9ffbe8edc92eb939b7cf561d90963f0d624ffb62a488b96d7bc5eae78f1df2b69af4d2aa954495c5e7bf592ecc538af1cbdf062beae7fa4f33d7aaf3cbcb8ead5c70be9f73a36c98020f2781dc183573da2dd50303daf2f7bcd2f92ff6742244f9d0ae9d6fa3a9a6a23b26bf575dc78b073fb7a608bcf1f6873f00b3f851d4f374ac9d8bf6672b6ce2bd6bc2e7921b85746af6778f5ea65cb8bf9bd327a61caab33afefcdebe8b839939e7a1df76307e3e14d5d61e554bc8065466504987725047b2e1c156a987c23e55ae368a1024330e9d9d766c537daf31765151b8cfc4984f13a96a840ddf82ce30142f519537f36bb2a17525b918b46c18940305ff477345908593d3057b508654b9ece6648548a7b11b1687b93c5abd94efbcc19191373d443394b93013091c81ee64f7d221509c5f878feea45cb53892047d17a150b2af27fb3a06f1a1e4eda0c533a125358e6497952b9e6497b8ee349298b48ec26aa43a89e0255e91c7990452a8a7879acc1feec4767e69858db89056c31aafb55f017c3bdd56e9279ad40c48dc941a0148dc8f5e9e2e28526ef26e94c0ca7104374e9ad1ed6f12148d1b1013d05e1904516db5838d3bfe711291cddf81a1c29542407e1edf063f5b88968b09c26310e8ba307a4fda71eed18ab03d27d1aff34b59ba0759ecc4741d36fea56183763a70f889d63f6e0aa678abf5b0fcc7fc304d80e0131f2f3d833c70093f511db4bd437f75005d0dbcacfa4d2324de76bbc08150c7d3e7d79154e6aaf7a254c87cfd2596c367c978e3a728dc55700c5aa151f381691f44f6ddb262eb94597b074794d23d748e8c4ca2106b75d83260df287267b8c39a78d20afaa2859001c1aff8c3a3a582b913229509e9e9662e568c4ce8ce9b4334c92d72f1561cc884f18614877c32920ca00aeef57e1cb2ecd63f79367b2c31f1cf628a966266d88754ca7d1abd1ddbdac7792d1b3034753a4a764a6a8dc38a126162d0be8dea54e857787a560c46d618ff63a379e726552d7b5900854c941e3c5623e6ecaa3bae07a78dadd99bb2a408372f88b500038328ce8703c1c074d01a941d087d6c641a9d83c0c91d9e775c9ce30f5f3385807d492a7d67c5628f7072eab3d21e4cfd7589b5277cb3c3711d44c3891ce597fd5a9777e69f8e4bc278be201f8590f965aed19abdc9dab2338d5b3a825403e6eb9ccc4a9ff16138fa7b328f430f15b5a47d84a8820f37bf227b59776481ede844e8e39135600a33c2555afc70fc89bbcef8422a7e07bd875b8e9438e618900898a019ab0c084d09f7046361d24545ebe6b795911519a1148388af2663c8c0fef86a786d8085f0b81336f9dabbaa8e336c979d521e2980bd594c7e349b00b5a1cac2b91b6e662213ca9079f183a3cb89ec6a1857c0320b241238d7c18e6b760b52819a8d092f96b5193983040bdbce799ff40f36e3dd8c4edc4012482fc4efaa44b4e10f3eb423e77b4f102ae59c59b0d3760f0140a0730333333333333333333d32c7bbbc3fe17feee44b65c801caa26bc2033333373aaa73303afd8afdf5a6b3bbfe01dbc3394127b0db90dc10d7cba8bad1a5d2089fbb6ab6dc5189773813c1ecd7b7afc6b3d96b640c8eb2496c2c72b997e2d904435e5e7949dbd9fcf02313baa37fff8c73137850542ef67d20b3b2edd952b10af62a6d9fd690572e5cc122fcd39eefe55207725f91f9456a54f1a2a1063bf2b58de13aff4990229e78f9bc3f4a80f6d9402593a65ebd8f13e38cf4781309a47e933e37a43fc8002c155653a5559e7b8793c8110391b7941baf2cc740249ebcbe3ba46f8d8d804a2682cbfd8c9670239f87f9fe5c80e79d212c8975fb72eada2046299e6b2198ba665d64802f9ae4772fb56116bb7156a20813c5dd9724e9bbed22d116a1c819c437bf730e6a189cb8f20d43002693d87de17e93eeb941a452077fd387d2b6f22f896631ed63b1f02b1ba3ffe47353dcc03490824cdb1f92ecf0581182a57fa98f361fd0f0602295aaca2078df90139a36ba7638f8278f73e2065ec4b939b3646adaa07041f8f23961ff180183dcc94a1f4db0131c798728e621d1d10548327b9501e634339207be8144a4b2ce3a58503b26c14d7faab0bd9c3dc806ce723ad1e688f1a3620555e0f795d9e542f59a306848fd12f6b31f578ac6ad08038d7e1ab2f645ef741b320dbdb8885f655efff210b72d8d85d88546241feeee1da46fb8a317360411ea7f614d69dc7f942f30ab2d75d4b45c6d5b0cf15a46e599ff7e453d5d15690378a77764c2b2b88b3bfe1526c69861ebe0a72d8559bdfdce3b0965505d1471f2e594c7277e3231584d759bb1e8f9229112a4859d4b37b4c218d53904a7b26eac7937a6898821c63c5581b3cfe488568948218aefc5c368c597a9b06298857c94731e5d2b61e6e688c82289d3ce5e0ef83acd8838628c8a398eeb0d4d60d2ed20805e97326531ff5f03dbc070d5010a353ee3cd2f08c67368d4f10ceed7a94b345258f320d4f10d3adf457eeef4e3d9d20f5eceac68593062748ffa1f1e381846bfbb809628f7fd831597ab49ac8d2af8768970952bcb8e5595fc74f4f175b343051a6ee6ce7e31265bc6dabb4d4556c6be7dcf6389a8fb2a7695882f49ac14726a6c7ef1d1a9520cbe745a64f7950829cf28fe26d9bc4e6cc684c82b0a98779c3be5e3d5f3424418e7a29b652cab1311909b2c764215ab687449f3ff6c155770a08683c82305e9ee9b734f4dfef08f258ee59ec10f16f3f1a41cad1d49a267b30821833b747e279f3d3348b206565dd983ee426ad2882189b730f23e5335e9a13418895a8f0314404b172fe518ad9e8107d494dd69c479f7b661b557302340c411ee7ed74bc92b14e69214863791c1396e11ded13826879b859f7f120083ff451c8beeabd987a0882e4e371fde7e18687ae8120c798344cbee59b110b0d4090c75d31c86deffad8741a7f20b4dbae85595d960a711268f881b03e5829ffc165d56f68f481984286652bf977e99a061f8851aabf2f69d8fe386534f640ca213efc14b4a3a10752ca6ca676e8e481d83125d1d5a439cd7d34f0404c56e69a9bc79de33134eeb0e7cbd947375984a06107f27bea817ece090ded69d481a03f0c3eb81e590ed16906d0a003b147e2e7dad952e5a58c31f841630e44f3619b8fcec35d5ca12107626eebad54413566e94d75001a712066f37fd7d4a3500234e040487fe9141fa3a51ee60d64ebe4ba6d496a366e2054bfa5e99dbac90ab581783fd8bf5b2b8d4e3e1bd67a17954ffdb5f951dd9cf966fe0334d64052cfd8bc8d1935d04843e50083061ac8234d1fe6c92bbdfc511a041a6728aef5052bbd8d86194811feab65ad29033125ab14f5b1460642767f5a28bd688c818d91d48cfab0a9b3d37cf7dbb3e7d21003a1c62e8a25fdfca3580268848138da9a6c3c5f87580606f220bdc3c36f1ef5a844e30b044fc91c1ed5213e8fa2e105f238a67d8feb525ea142a30ba48fd1de3c3a445e3f5c205f1a8bd64e493577ce5440630be46aef7cd7101baae98b010d2d902dbb564cf7a3b422791608627da2e279c6023977ca1433fbb87b30e71508af3dea6b1d0b93e27b81823e2b90346590eaa1898c7eca5e40a30a64cd638ffdcc95fc3dc7a00239db4b584cb64e8168d7ff63f360953673291053bcd4b710f7f1fb2c17d08802d1c771b37d347930a00105e2597476fa8f213f1fba16d07802315910ed1e5bd6e8bacac2a0e10452a8efb94f6a8e976c02d9b242f438f69536060d26107dd803bd731fbd04628fc6cae569bb1f97128851655f6ee6db473fae53402309a4e821bfd1b17bf369132081987632d8adec6d4c513d018d23102f7bb81ef558534afa19238c2eb65040c30864b71ffd55d8a0a6f2e9b01d348a407499ee8a9363042270a2a0418487e3766b266a75e7c8713bc810230060a03104a246f7bff8e3d4c12e4743088669cada96b2ba85081a41d023ca42bbc3b3c6a3b66253ac8b5ba56ae1d52068008154afb9e37a6bce686a061a3f2068b6a7fca6dafe838b860f085f3ecabdb63ebcedd0e801693b672caf289947f9ff828655b97743788ff72cd0d8c1d1311f5966ee573d7e61cbda72347440c8b8def1a278a4402307e488fda8ba1b7f3cee130744bb4e1f7a627e14b37503820f36a776c7d8d947130d1b70dfe1251a6ba3d6d743098b9cadae2c950b346a403e8f17ae745d54b73b30d0a00141572745bedf638fe3071c0990616ac62c486de1a5c382845298210b42df8ad66c8e8b3c4b2c889e63cca5f76baf6bcf800521f32873eeff0f1eef7e05796813ae797efca31c72853a7da969c08c56905a3d78a81efc335841568ffb1e9ae7337d9bb10a72eb571e778e0566a8825819ad2d6f8c0b1f196f82aa1c39749f3023158433cba6c9f20f437f382a881b533434fda019a720e6e023cf3fd48a96c29b610a72282fd14f9adb828fac19a520a5d0d27bf7a94d3c268319a4204a6e56e906b51eccc522316314e4513bbbba73cb192f470e4fcc10453ea88b1e328e0846185ed485827093e9e3ba707269b6c10c5090d273b6680ecb91e9a38bad8bc18c4f90d33cdc3e347c0f6ebbc10c4f105aab2ae46f7eec98c9667482982aed47cb87496f3b9c2045d7114ff5a3b8ce238560c626c8ef6eab321e2bdbbc5dccd004d13fcbcc044973f8a185f5aacf1f830952d6700f3a9df6c30d5e82ec79dcfad66df33c34c312a4fd610fb6afaa4a102c3ec746c7fd876acea004a1c264875a0fd3b9e71e306312ca9bbadc898c47866457566ff2e18f742273ba982109a2545d2cbb5cc13fe6448254fadb3dd111769b870471c63be35554c3db7f0431d648f448654790edc7a31e7f5ca6bce3a311e4fe909ed96a33821ce5433256cc8b2056a8a097f663cb7d5411840d3dbccbe14490aecb479772caa7a58a08e2a4c7a8f0710cef340f41f0d0a3b0da9fc7717f1882f8f25b2e1e7f60f94210e463ccca342321c81d3da351411b04d9fa2cc694b3208816e671a3ad57e52b10e43c1ef930b65dca1aa70204a9e3d975ecf156beff40f2a1b7f64d4833fc40cab1362d33a5b49a9d3e90f220c3060d5b492c6bca4030830fe494347e1c97310f538a99b107a2dcf720d47f94c447f10c3d903fe6514c5bd987273f74461e4892b7a1fc2653d6bd66e08164d2e39443e7dcf71dcdb883f252d6e5d1e5526933aaf94efbd7871dc86983d7271f6dd6b64d1f31a30ee44c7d5d9bc2faf0c7310008c40c3a9047d3a95356ad2cfe3855cc980349622eb57efc23982107528ebde5e39c72c2e40b63461c4852a7a23115f2eb553d33e0401ea9f7d094b17b3ef733de40ec3beb30df963794c70da4d4b360f2f9e94aac196d204626fff35ce1738e73061b881afb47d6fea1975bd70ccc580329e54e7b0d21ad2be20c3594cd480339f3e3be4ff2d2788606c2c60eb5153b8a86196720a7b3fd70e17c3cb465861988d1d22b8f69c9c7adcb406a8b397cfc87ce20c36a6359f11a69a3a51f43a8c4add514983106f27850d2c33c9a67bffb19622086c91c7fc662f80221d0b1634618885953e231348fe4e76680a1f4977c28b3f1c08c2f10c76f5fe35df70221f330e551dbdd5d48dd542bed6aacdc7d98e1d775f3c69cc105e2e738aa75ed31554f5b2055ba982236fcfca6390733b440103b4bdac914f88fa1a38c1959205e956dca145fa173dec5968e14f8ef2063031708810e15ccc0025bb142459b28e9788709b00c15bc03cc7ba0eccb50c13f74985f41e715c82517adebc7ed6275ec20430caa27c3c9086387e60c2b90ef5ed593890fcc54ad02f1c2c58bae381563ee5081987236ba32c5986f3953205c0c9e87b5bf763ecc4b819c5cf23d956b51206a9a5f8fd933502096f547911ffab8b67b3c81b03a2e9ac7172790fdd267e7e18f2aaafa26907e34fa3efeb730817c1a2e78887509e4f199bf4bfcad04e2fa5ece19bb7d2cee25815815d37e5ad97d351909849eb5e93cf60f97c27904c26cda8c9b53f6ccc3a411883152a3295a5f7634452854dc4d2210354a5abe5afb564943206cbc60df7918c38f3b42209abf5685f2ea81faa82090f3cfbea3e61008e4bbd06b3eaf3e981f46c6f1a28b3174a43160c60f88f9e1ecc79df29d0f13ecf8145091b1238c3170860f083f1e6ef28e8a5f99355de40e32c498d183193c2096e465064ff7d83d7c07a48fa987bb51d9e394e60c1d90d6a27cb86dbd3e0e9919392094bfe5d0ae1b0d15e542ccc0013957a5e07b79bc01314fbb7da7df67d88024f3ab1d46db9d5103728a1f7bbcd93e8306c49c119aef5d9f05592de9f8a03e7eafc74316444f7e7f212e0f3aca1f0b920f2a7cf6b59aaa29bbd85a3206000b42886af759cc3cc24881be82f4bbb3393b85f45ed51578ca9f4da9fca3790f9c05e710b79faf9b0c37f2407c75971f9fae48cf748110e808e3061ec8192bcc85ce146b7503380e37ee400c6f5e1d9db237ec407a6f114faee3bfad951762b88e148ce12c600bdca803d1bab7b5fab73cfb78d081a4f963e71f65fef1d09a03c1360ff3c7e3110bff961cd0d43a8ff35237b374cbefa431a5d0e24050758d6183878fb407df8003795a3cb26cd3e60af171e30d44f9cd2dfb43931ea96e20e7471b4b33ab142cb636907b1cfcae87eb581b6347190de81b6c20aa861fca95e65bedf4c61a14ebe1bd0ff5f2fc316ea8c1b0724d730b35b93b7b6f198b5ef12b46fbd36037d040d254efa69daef47255000e40e0ffe4c831831b67208f879abdf23ed60716aee0b1001c8080172908030c177c114606fe1fc06606a27d86ceea622903317dde1cb422f7543b6420691ebacaf8a0d3a6d9c640a6b8aab8776b6b7bd8feb456a86e5c0ca4f3b0e9c4e2dda55b37c240ba4c4f3e1edc4c64fe71030ca4f67e8b973206effd715f20e88f328385a9aa05cb156e788154296e90b855db7876156e748118ab25ef9962d3e53c178823d3dfa75127b487d942b9fe639a966a37b4408ea125f7a5c3ae7dec4616b03c6e317f7b1cbf8105bda44bd25d5a367c503dcee8b270e30ac4b09f964973361f5e77c30ae41efd70e3e445f5c1843a6e54811456bdb37b2a966c26638c27c32b70830aa4b4f49196f1c24a7fdc9802396ef77e503eb4fe6c6f48c1f3d6cbcd1abf41dc880239ce6eaeb03f28f134df8002d93bc4b847cf669dec1b4f205c6cae8ffe9129debe7302395e7cd439658abe8f6d33dc6802a93f5ace15d6c304a2efda86ad559740c8cb9d1ff22f0f73484a207dea65e817ffbc2a2581a4d203891fed76ef8f6dc20d24102f88c7b021fcbbb3d3831b4720070fbafe5f29c55ceac7f0ffe235c0841b46207b7a78f2f3686df18a40c88a71467b3c28ad6811819cb6e29647db48f1f1b831045232d94efdf79174b26e08819852c5ac9765fd3afeb8110442c6ce3b96073f1e0f422520dc0002616c635854b6ff78e41f90e2ed8f07fb12ea61b71b3e20e6970e9f32e67b40b6ffe1ef858c1f64866ef000352deda8cbb4abe0fe83fdd9cce8d4ded80131fe78d53c7f7959fe8e704307ba67cff960f32607c4aa105121b3be7e7e714068b3caa13fee61fc51f4c60d48a216cb62f0ef710f7436c20d1b90c721937712cd91ae9e1a10c5c7a1f53ecab97d941cc20d1a102e8759e4fbdc8aa4b320e9efa6309d475ee99a3e820d599072d0dcfbd1471f22be58902e78f4e82cd60f6cc082f895bdf93faa45f6fb0aa2575fb296f008d5141a6cb882103f48d13bd58bde4c2b08e9e9c7e17c7c5fd124561052328ee447cb680bdd2ac8e1f61b3b8fe62bc4b2a10ac27dfaf8c7c39f8da4a92057aabdee306d217cb6810a729cafbac87934f7cec62948f7365969d7fa2e37364c41aef0b1b0eddc11efb214844b5b8d939b349655c008630c7f0d3029087b29cd5d570e1af3c02ed60b315c870e1d778110e8c861631424114bd6263fa88c2f4e4ec08628c87d93ee5a3141185f9c8d50903f2ac5f8f6368c0a8002b3b0a92af30cd90a2fef700f99a1094c104605c408e302397298208c2ff24860e3cbcd797c55d31f4f9082d658cedccfe0c34c2758d55ed1e8bae104313f84857f7e59546d13c40f5339a9c7ae0992871e8ae7714ae129a799e0ce2b2643b5233da2c332b6472afbcbd7c404f947ad9ea36bc536dff312e4b04ee15bb35c660a36146c58826c2f22216ffb793c288d60a312e4517a0f3e4347dd543f3e40051b942084e64197875d4aa1c52e6c4c82b09ea1b2343f15d021041b9220f6f83466cffe3ed231777c5b2488d1cfe236c6bccf94b2528344ea29377612f1b1d39ab9b9f53cfad6c5560abcf8a25960e311a47dfd1fb567854cf5700449ca7e64dd99f6429c8d20e5d894792bd3c4271f3eb0c108b267ded7ee7dc6f02e32a0a35c606311640bb7a2eb17df3f5a4590a2f58fdb2beffc4474051b8920c78c1fef8c57f6ed1eda3610414ca9e787d172986f5c8720bc6b2abdcf0c41aa3b8ff61e868f5d3f6c1482f43b9a5a513dc3e76c8310241ff458f2f051ae3cdc0641ce1e6692aaa88aa1231d250862ca1936bd2aedae37660c2f5280170852de9ecd3af27144e4c70e2f03025f380d6c0082b0637b239661175b6f02ac3f10b4530a3d789f0e96930a6cf881fcdac30c2e15a79b0eadc0021880810a4040c968c16bf58150db1fe6f9f2f03f8d08b0c107c226b9d7d23ef7c178f640d6a0691f2cfcfc3dc6861ec85e39259b3efa98f36a230f84fdf1f94a6ad4ab6a6ce081946cc3e957f45f6c8fbe03793ff3879f3fbe876876208c25ed88cd73126cd481e0939da95e953f0cee18fe45035a0936e84088cdcb23f3b8c770816740c71c4879c77b72b3fa47f3c88110b739fb2878d66afa8803295e69e68166af89dfe140480b95f7f34d4f63be8158a3797de203cb958336dc4078fb91bfc694b2b3b636a0ed5e2d2f3365922d61d5a2b2a9dcc3a35d670379d019c7bdefb6b10652f4e7a1584e8f77d364430dc4eb3c7e8f37379726d94803c93d567a1ec76a030d8451698f5e7741828d3390ff572ab3d6b55bb81460c30cc44e31f22d98d4a6efcb05cc11b05106a246bb92c8986228cd6c9081649e95f7b1e279b81e63205518dfd49e8c0c1b622059cc83bc98c6ef7de36120c5b0e983d1a8c140fe921193cd7e81b0215396e9c9fc619b5e20d6fc690e6b3988f6f8228cd305e24eaa5450154dc92a965eb0c105e2f8efa8f72879aa5333b740ce3c3d8efd0bd5c30e6a813c7ef7e07d3a666616881b7ae54b3d9615061b58206f8f2c683cabd91e557c05e246e7ba71f930f161aa60c30a494e654bbfa2651548a9a2dc458fbd20d8a002712e3eabf6775daf933905f238e7f1d18f432c58c4b8226347181908830c30249023870b769031861736b0210582261f4c54f620f9b26379801b60230a84f9b1db5d4e4da51fc70614482e9e4723719fc63c26030c5731d8780279a8993a7fcc3a223a81741d9f3f4ced8fe9c6025e88b180ac1564ade0c860a30904f71ec5fc41f89be5980d261043e6a98e79e0e1ef4d55d8580279984245f591de0f7d90d5c08612489552793eb3525c4cb19104a268997ccb74ca9f920d2414af7afc996cfe873fa80c1b4720879afbf0edc903397268c18611c839bddedfe738912a478e31d40a368a40cc83f471fba83c45b041047248b1cab36cf3f3967701192e20c39e6063087c7c597cd5956af86e647ad3cb5829ca820d219055a35ec5cb294ef84e43b0110472343dfb78335a3ee81ebb60030804d31ee6d3cd16333fdcc60fc8f903ad3c0a5bf7c1751f903c3e6476f3e8012998b8b9be468f2d65367840bc965fdff69178da9c1d10f63d5ab24b345cf0a103721e26cdbb3b8d3fdad9460ec8316abe9a732c6e5c36705057d46cd4adcaf7289d99885e28b5acdd803cd6fd518e6539c50aae0d1b103f57f2abcb1f366ac0c954d5889ca787bd65a4f4445cd4cae162830644afb78b0f6a91974436d49805e1b3c48f92a5f3ab546d0d59904df24393b7cd8962a4604717554b821ab120a51ebf7cfa1fa987fcc16aa8010bd2e51ce723e2aff10a42768c32fe95ade10a4a42cc53a644c45ee4432f6a5faad488a85a41d2e4e3ebdc66d77d790d56907f2b66c6ddec2a48e375c1b30fb3e5f6ad0af2c4489afc69fdc0c2a9206916f350f1f93d980715e4ee142225aa9f7a29354e41f4a8e615224353903b59ce9a7a7a9029a54b411e7ff8981faac7588a3548a15579c95d5c5d5b946b9ae52ecd2d056a8c82d079ad072e139f79dce5a08628881a3d5d6e8f29e38ffb5010e47330fdfc9a3fe7020ab27488df6896aaf109c2e51e5d6dcad7e389ca126a788254271bb23aa56b748278e51b3d19938f7dce1a9c205516b50c659e036a6c8258b92ceb53e8c6e5144d1036a64a3f15fb262cac9109b28d7c0f320f2f334c9052d84f96437dcc2e41fab0993277bc66962044541e7b5efd289762b34ad4e399fb62c68c1284ce0ead8bad15418d4910938796f6518e99efdb4c12e456d5f31eecdd841a9120e5e1bc8f52bc4743823c0eb7a0d152cafdfec550e311f8d0c7e3f881e651c311e4e1e5a43dd278a937741a41ce3d1efbc6876a129d98508311e43cacdc9f347c49a8b108f2d8ddc77f67c33ed5a408724ce907bd49f28286950872cce3cec3fe9135104188bff7778d3dd639adc621c8b7997f74ebd16e79ae610862678cb9d62d6f59b66b1482649747ae977b3c6a1082fc692e7cce103eb8ba0741fce15d18f34c6b088224f7e9b747a31a81208f26acf7524e6df13728811a80208fc7034f1f73ab68f47f2045a5e8396fccd5f003d135fcb8c664cdc3af35fa40f661e8e581eba53c29d5e003692d6ae6610ad526ebe931d4d8839652d2b6c150430fa4d3d60af328e99c7b1ec8175a3aac7db09cdeec420d3c9063646a35b4d557ec8ce1a4851a7720dd677f0f36a5fc6e8b1d08de7b296f878e293d531dc81b2d899ae7ac061d8872797c29552ed8054b8d3910f3567cdc7bdbe3e07e0d3990ae2bf3f87cb4661d2e55a81107a2c71c239d2b343be70107a2450f32ab5c452fca2bd47803295ef0419cc798877a3b156ab881fca943ce2dd666f4f151a8d106d28e6b6c1e44a95fa8c10642f99474ddef5ff8516b200f2f43c5f0c3907ef50935d4405cb33cb458af4903317fa6ebd1d7d8a68f820652dae861fd503635ce40100dcd11d579e0f75a0d339053cef859a3767f1834652025f1618cdd75cfb1c93ad4200329f6fca7d7a46f51cb1a03f1ce65bc4ebe738faa1a62208864d6597526841a612059ce0793d04edef1d5000339c5ac717757430f3e58e30b64a99847ffdad6f002f9f2284da9018658360952fc0be91e8f5436452609f230585d88fd4482547df9a3989722bd870439d487131efcc287318f20a6f44fe9b22f2a174710264cdfc71e2f8f2f6f04d187716ea422834bcd08c2ce061fb5e6cf19ee2288f967ef9f26bed2aa08c2e7b01ae4c207fa0313414c4944d5a5f507222282985391a79a51babe4390f3bf627c9c7d53aaca1004bf934b3d531582b86751eee62db6868a10a4cb4a316ac37996cf2008a399467d328220cc6e8f62064be99d409045fd63da685a3a0b20c8391d6befef3f10936bb25cf1f981947ae4791883db07f2f83b55c3e50379e3af58cc21d77bf5443e4a58f440f00f8fc99c83849e79200f35768f7e3d33aa880782c7e960d57721fbdd8198c34fcb69ecca1c3b90f3388f5a79faf1ca5607527b676bf6cff9c2559c5cea90d99903c1fb7d6cfb192a6588b49003612edee5e7de1fe6b3681107a2547ab9f9caaa1bfe2eb674bc09768471ecca1a0ea42d9f58dfa459af2c262dde401ef8a5b3d7706ae106f2b887c9c7438d1bfef325b30dc438e3e3f0b962bcfad01db46003597374e7c7d1b9e3470960ade07678a1438c31b0b45803318facffc71abe166a20a61caaf1d44b630a3fd24068f96af5bad01653d040728d1e7d7be93390d2f969e6d1545a98813c1e8af4a8f34285b129a0451988239b69da429eacbe263041185f9081e463f71e074fb93110a6479dc962fce0444b2c3190d7233b59da4ffae947175b675eec2003c340f6b5f7d90e69693b060662d2944abfaf3bedd9185f2046e98ef47be95146ec0552cccf12a1167681a03d7ddeba52f93d1068c105b25b8f5266cbb7403ae98e52a5be96473db440fe14522b6db806a988045a64811c3f307f0b1763f2f328f38016587834a2b17eb0b99a9a362daec09cb5ab7d46dc6c988d5eea3e59f5e9fd61b4b002295a73e5971e5f000e40408c1448c0050c18630ccf408e1c39a4a04515c8315d2c9dde78d629c3022da8408ed1de0e6909afb7bbd842dd2990ed3ba85d7eb9c58abad8622990eec7a3b4ed695a14c8a3fbd016755a0b680105d2990f5c42f30fac457e02b134debf6abecccdf12eb6b434a085134879d463b3fad9f93c32bbd8c27b68d104d26c7a893c53f5da942eb6b032a00513c8e1afe246fb71c7983e38a0c51288da9d339a7b8d664e8ea10462dc54a85ef5dcab314920b6b959fa581ffd27ff8016482077c836f5718c1fa0c511083fccd9ad53454620e9271f4d3fd1a208848dd0307d251381f8837c5d0faee9801643206bc81e9e7cf2d1ece74220f5cfb78b8aa5e74c09012d82404cb6911b7d2a108839b77b32b45f9431c66be08b2fbca0408e1c5afce00186172c00638c327608400b1f34a99e9e292a566dae59a5bb1a44756c2aa58b6580163d2047bf2df78acb5fa276b19502e50139e6a1c7f3d135cb1f3ce8bd03b29e8b5cfe8928bf3931a0850ec829e3cf7aacbe6e3532c098801639206c88c5b5a560e947ebd0a166c6f022051886173ac48880163820b8f63063760ade805c5227d6b731d98da5850d889d271d3cec7ece030f4a408b1a90e2b53c48a754f0cbd18206c4ccbcea7799664194fa9c721f25ed20e39005f9a287a4c45470c4829462671f6b282ff5d78b73b0caf8c23d2081073c200563380b72e4305890c22f2a79deffd4fc385e41c8fea91ffa60cc871ebb827749156df16e59efb668bd4df5d988edd18af2771ee70ea63dda9215ad9cd6a9dbcdbde47cb48fbdce2a48b11e7d4226a30a829dc7fc507e31f94d3852410c6e23e1b53f0f70a0821c4c2ce7d8f571f3f3b05310abb23a7cc6d6ed64e13005793574a7d6a51e06cba314a40c7e123ea26e27d3250e529053cad339ceca5ce841cca8011ca320de59d6c7f768bd634441d23cb2f03c08d1ce7b8582bc61d3e2059de82c220e50981ddb5631626ab116bbb69a7367fa031c9f20ce4fa6fc3f0c3dfec508c38c71f5001c9e20a6bf8b1ec5f2f09375db099225cfadefbae9284e10d407193a67a9a6b8dd268872379947e11c630c36000e4d90b463b61adbf874ea23470e1d9c635c26c8e3f8a311fdf17bcee30b26c83df27cdbdae14b10d3f6f02c5e2fbdd468098284bf7dfd48e3357795b046c366b6b552c2dda35ad443741e9f1284b7eac1649e1f5e1e665e84e1014d018e4910b3846b580ea9f55bb100872412f10fd5171b1c91207c7e65c9f48338fd2c24881e69d93efe0f77ab7b04d1838fadf6e2c5fc147304a9ea07fd15dd6d046933bed34df744f7cf08727d7fd2f30a8de531bf8306381641b2f851ce1b51d96b7be8a8c72f0e4510b3a5dd987bec8e32c8f0d5513ec6c37cce00472208e5f2b1c7b29f41730811a4e896d94962b358e5380e41b4241f34e8860d41dad91ef940a6db87d3168274712a4235c45df5303808516c887dbc5cd9d75859774e9b7d3a8a5a0a2f01c72008395f953215f53f353bca70c031107008821832d3558c4932c2f862033972e8282e1c8120de7df661fcfa419bfc3800411ece878639cd6c1a1f8e3ff01e7cb7e647fae3fc80894c657894c7bb6d287bf7b1a76d18471f08f17a5ad64945a3453972e4c89123878ef51d65f8e2e003b136e6a98fc7993de0121f1923db211b595a6ac9b2a3a5bc3f4c0fa4fe64d6831cb78bad1578f13807345b355e00471ec892b956dcc7232501071ec8953ac5783f1fe7202f8e3b902bc98fc7079647de9683c30ec4541b73da7a8b775fe3a80379a4791c3d7ffe30d6331c7420c8c9584a9ad258ff287320e47a92e81832a6efb81cf24a119f5d358c03d1531e9dad6d4a316e0a1c08d2799ff2c7e3b08bdf1bb4f796330dab89edfe1065ee973be5e138dc404c6fb3f17d1caa5e934dc0d106d27e5946a88f827bb21f609411c61a2a171b4873977ae4e371b8e9e6e4c8a1a3ae70ac81d4bb933de89012973b6a20c9696ca8ad283d80230dc489dc98ed473490075a193ddd65a9ce401ef4a847fe66ef425ebad83a61bc8e1d615499811061a9e6c37f07e6c05106f28fd43ef8d042a3db82ef62070e3290a4c7a3a8cc1cede133ed058e31905c43e8d5864f97428f2b307088815029cae4f9b87afc68d6807fb1811c391efe3bbac0110652beb0142ed6c55cf2708081347e165f69f12f64ed165ea211ae35d755f1d63a0e2f9087953e3eb7183bc6fb8cb1c33360f50feb0229f430c60e9baa47fe55185ee0e00269373c5b204ee8599a7069b73869c17a4949a90338b240921ee58fd35dc779293f405820c50e6beb316e8f4e2ec47105d205350f4d8dcd8b3e7058811433f3d8936d3ef9a165f5c05105624c365bd757213cf628f01450250e2a183571a731a6e536f76a9a631ad9395780630aa41f756ca850bb4280430a84ba1ce536aa3fb4f4c1110542c5fa5454b37140815872a9f5c3feb06e185e98e178027918971e1937a694f732021c4e20858af99f87d7b5bfaa0e1c4d20688cce77dd23f76198190713484956fe522a362ab3811d6488816309c4ff0e3eb00d6de936b7830c317028819091fe43950d061e0c0c7000471288217ec93bea42162f10021d15c08104f278fc033f911fab66f77400c711c871f724dab234023946c6d82e15e228021dfac7a92af62602b9bc2a7f3aafa495c63104e20f35f328a5a3fe38c72104f2505cab3f0ffee3e9cf388260f098f9076f3fb48c2f7e8718cb388040b2bfaebc3deee8a183050f86160370fc803cac6c1fc7d0e3bb20c333805d90e162a00bc830c4e10334b32bd66e252e43352d8afb289fed0751990ccf408e1c5d90e1628481a307e4ec69fd3ef6ece371965b0c1c3c20b555caede6ead6f45eec80986b73f6d83ea896993a20c7944ad263793f7a640e08e71fa56e638789ed7040d6782be72db71edf2a44018e1b9037af99b90fef3b010e1b1036c54fce66f58b30c2285fe0a801393275764235ed9406070d889f3963f251786fcc82f4b292a3a9ee2a26fb862c8e329b69f352330ff13abd14158d14bf18db11c60372e448c18e304e8e1c3eb8110ba24f68f7a065f47d24df8005615b34fce8737684a1038c1dbfb5e3c1e057907cb8be6b3e14d1d5cf1524b1943d53856c4564ad206a6a77d6d37079b0176378c135c6175e90514622e0062b88ee9d63cae389ce3b6005375641bafff21f4bcd3bfe5441eada641bef72375241f24f7b3a95ba662a0f549077a7ab2a5849aae614e4ec51fa6ad3e0a9a9320531d8fe28374aa19b649d49c8d6785bf7a0cdc7c163a58b3bda0d521043061f4545e885197f14045fd16e8fd79f21be28c8631f77b556eed1a120aad89f8bea65ca5041410e8dedc1b5d64f90ffdd524aeedf9eedea62d713e4d823e21addd3adb54e9043975e5ecd3fb139dde00429bb5bd38546cff2a36c8214633e6bd4bfc1d0045953fac18f5cededa27e231304eb5099fe7914233776a35a75adcdff918f4b10339f55bea46143f2b70439c614da3c9c0ff305b71245d46ddab7975b77ad540f7df4630dd779a7a604e9f262866bde7302372641181d0d7fd9ef6ad6bf210952ece81836742cad8faba3b22241368f694d4a76eb7f50086e40e276abae756d1771af77abb8abe918ad2fd2479072f861d00f3fdc1144bb0e5a979d9223c7b9e14623883fbe9cb6b0594690d24c93798c1ec6f87c6311e4d1d460e6eedf5004d94222e2338ef7cd8f1b89208519db941e6b6ceeef0622c8977ed429e73c4c9d1e872067a6ecdd1e4755851f86205b871f87ea515b084267ca1d77232404f92ea69dea6c96e73e064150d7f6a1b7e51e550f1704daa2f21e6ed3ad66f6173ae5d7cb3d812074858f4f4376c4af8696700310844a29d463b8876633f10937fe40c850775d7731633287c20d3f90528aefc3eadce957a32bb03e106e2bf8303c6a7c2068c6f656ef3e5d8d970637f640aacd73c993d8d77ef5801b7a208f8f47055f78d145039ab0d0571ea9e5a05d814eb9942b39c9b2a97acd1fffc26b4ad9372b5c39c5cd97b5b762efe0a20a6ab5a4454b96b9aa4b45868ff3a38f373f57e4820ae4e8db90b9b6f2002ea6404c3e4e6935e4fb0779a540bc19ebeb41ee20e0220a64f5af0e691f3767cf81c2b12629d9eeed71b19217abe6e255ccc51308d569791d3dd4cfb9e4c209c470318e87be938b26946df2b5e996eede59719d5b172d9359f041ef5c30c1d49d7edb43ac25e06209e5f80b779e21ba01174a20b96ff07185fc73e4c89143471838e0220924ed1e07bdcb951ee4c80512c89ed3624a0f7b24e23b174720d9f59d56b694fa2fcc8511c89fef4f3ea79c2e2f068d8b4088cde1e1629f7e7ecf02ae015c1081dce15efff3f0a5477a2e86a05ca9d78bcab89de5d77c52cb93192e8440d058fa837dab4fdb7f47183ab8ae2ec0059bc729323c53f8c07030c07031c6d031c69be0f13a8e3f9d27b80082511da3e2699e628851c60f889f5f611d0f30bc386662b88003056800173e20e5dcf88b3e776e5972d103d26bfcc05793e6d83996082e7840f8ff3caef00c173b2057fef140738a9dae6e73a1035246cfdec6fdec71ec6570910382688ca9476e3fbee8c1e7020704adf2d7edbcb9b801f946bbf230dcc5d81f37011736200fbbe57353ce99927e0dc86556e9c7597e704103b2a55999a8d2c890a92d6671f9783c1e8b885afd78fc175bc8a28ab7f5cc2a11ad3bcffccb9da9be9297b7452c48656231a58ff6610f43b0388fc295c792cf4c0c5bbc822c153d743853ed4e5ba7610b5790cc0762be3f4c1d5a652bc8c9c7d1339715802181316cc10a62b67f6cfc4e796f1c0c2f4a0ac23056ab2075fe285f69f5a0b25c0c0e5ba882a8e97fb12296328f3ca9207de8b1f9308ffd3aa76b07c991c377fc16a8206e3e8deb31bf176c710a52c8ad9baecb182f96a620680791fbbbfcd53fac14e4d4f38e596aad9f551b6c410a425a071dfd097b991e5f94f1080361b0c5288857e7ab55d53635b5282a472888299365ccaf99ed20438c2d4041d0f861a818635c53b35c068a608b4f10d5f3c7d7f971223ce80992065faf13c47026e76515718298111ffe2defd2c3cc169b20ff4a4bbb4ec60b992d43055f5e860a086982a097924554e6e4e3ae63b0452648df837993b28cf7f14f5b6082a8e9e73598ba89f9580df42548799ca52e64757a8eef16962027fbfeb99aff3c3cd1b6a80429acfb30c4a728ae67a304b6a004b1c4433f7efe51d4b04c82d096c7e31fc5edb1872d254128fd28715dc12d224190dc743978b0982cc32d204112ab50e5a145206cf108f230e5611addf89939ea168e20d7664a27755537e96dd10882f8561693a9fd71ec2e0c5b3082a49771c5ce53cac3b4590429670ef53edc1fb39c2248666b3daecc3ecc636d8b4490349fdfce97670b4410defc34f538050d339d43103386f5503c74b41cb521c8b943f89f8fdb2d0a41caf959a9e3e5ac94b72d0841ac142f95cff641906a3bc6bc9ec57a34b12008f3a1c37c3c5cb708846a51251a73163ec7eacef7d11680207c9f86f04b3955f20fc4982ea1da83d91c743f90d38b8679e5e1fce47d2059cfe64f75e7d9663e90479bfd2ffa677abc7b20c514d66d22d50329733bfde353ca21cd03e1f2770e9f72070f241f766ed3cf3dbaf3ce1d089d69fadeeae46277ec40da9fa83c2a4f1d4821bc370f2e4786eee940dacc7931bac7c3d079cc81947359fb707c9807992207428d8f45d75b938ff38f03799452e61f5c0d07b2f7607c46f524777d032957920a73bf917eba819847e3be949c5cfcda406ab78e73971f2d78cf06f2e6c568d62333f561bf06d266f30cb359e252bd1ac83e0e1e4d457f186bfa349087b92da1ef3ed88c1e0de4d7313fef5859b97386d3b66f6975cc40d45a59d5d5eb99bf0c44b1d3e8bb7cd5e33cc840683fbbcd31de663ec6404af7d1d33c0e3e4e31622079d4868aedc240d2f9147af439eb540e06c2ff28e5a19c2697ca1788f3df99e287e97cb617c811abb2f539f2c4ee023163aeebc8c505f2c6241aedc739b3b75b20c6907db933a905f2f8c795acbda5939e5920ccebfac057d5653c2c905b3adad786d58cdd15487d793afcc03f5d570f2b907f947c9cda4769a3871e5520b407b7ec331544a607154a59c45e2a8f3305a267d74a55f3a6f99102d13a77f97810bddb9e28a8dfb6e9a1030582e496e69bf8c4f64f20f8c02f5df5a8626e9d40fe1fdd4d7898072e1735f4600239ba0f43329fe9af6509a494cfd53cfa9440de0f9ac7bd5f1f3a4f02692a6e8f6791408cdb3f4ac13336bf7904d2e6d9b0f659c7dd3202b13fc47670ab08a48ad59643b32de5c8108154674143646708a4bc1e5234fc4220ea48fb67fa78a4ef0781e441af3e2d4020c6f879dd3be739f51f106b3d985aca398ce63e2056f264791c2dd8c6bb0784b058152b45b6dae6012986acd11fcdde23ef801c3cae776387a51cd501f936868f6fbf6167cd01516b3e8f43e6c8ff8983d7d2451f1f9f3720c8851fbbe5616c77b16c610382854f2671f153ee61658b1a10243ea84e678c1963650b1a902c7ad01026eec34c9905494e93d7c94ca5b9c882e89aa1eec6df7cf25890ae73ce6d16b9911a16a4ea4e8bfff115e4f89ad28f55e357fc5c41d84c29858ef6a8d1f356e4316b389b4ab1821443c3874b692653e4ab20fb60e2877ea171e957057963b0a031e7910af238e554eb372a88317692492da71e1fa720afa7ab203d2cf5e8610a627a1ee5e17c8fe75c978298d3a27a476fdd3093823cfcc1b7854d9907568e82b4fe63cda52956c75014c48a37cd3e5e28c85a496e5372bbb81e2888ffe36495befb04413de3d39de7615d3c411e64c6764c35e6f14e10ed7bfa637cf1fca839410a1b1f33c46713c45d971f555813a4b2f471dfc9953813c41e8d686ff5402ff79820e6e8d96ed62532f7b80461734af1e36d2c41faf4fff79fa90429ff789c3de7d15ee70b2508f222f5b7d1e42c3e09527af6eca16d5e695d12448f399d79ae2341b6791f7986dd3fad21418a9eb5e41a3a43cc8f20eb8bf82865eb08e2b58591e8f7a8691b41cee1769bd3f25e5746104c35268fdb64f9d54510d536d4674a7b16464510d7a4475749adfac5449073f2fa514dc88820554c71c6f5920fcee343903d6dca1a4732fa756c08d2468fdb3707938c1b17826c17e55b39c486d2981084f4e89cead1fa402c1e04d17f5ec3560c3b5bb120c89efc55a2250e04597b53d8f20cdaeb03827c2978dd6afc9cbeff03c9c7c1ccd673268d3ffc405a911f4d1ac34ffde803f1e5ea637fb0758fe10379a02f9e3ba79bcc9b3d10f3b62c594a0df38d1e88b9935fba596d8c66f340941e7f8a678e88de8b07c29d0589def01d8879a2fc623fa6fe69079245573bcb61db471d48b1de3eb955fca8071d08e599e6f52f5e8f3990722fc7c88fe36a5c0e24531f47adb294c7731c489f477f5821265a453810f3f8c703cbe14cbcc71bc8956254db64d943353790efd3424fe6d175b6da40ec98e39e553fb66103395459f8c1c6b46dd1ae81f47db9625d68aa1e470da44a0d3b1d6fcb2c0d8ff240a3b2557fd8bcf9e30ca40857cfac1b1bb5310329059f294df58cb59481a4f3fd038ffd71ba0a19b2908a8f816891bb566ebf297431903ad60f7a942ff9833c0c641f5f4ed9136b5f59c140ce5176dd077df6b9ea1708aeb2d69b64325e552f10362d7d6431abb7a9da05f2b8ff8c7e568d29a95c20da89a779cc4c994eb740ea49ed7bab540b04890ca139c6b95748b340c82c73295636cbf78105d2e40fe7626327d9942b1073451fa5f2ed71658a158812abf147f7f9d3ea5520a73c8befefe1b2d95420e4e8d94b4474e57a0ae4cd6ef29e950259f2f79266578be98c02d953ddabe9e6b80b428178dbe3f1502a7f1c36e613cca318de62a5d009c4f051bb3949a7f8964d20e730177f95bc1f5dc9045267e58c9dec9346cf12887e29eff129811cf34f53fd7893c7ac63ad692c0f35482049fce02be484f7384720a61fe60f161723906aa335fedb4520fb259b08848cbc92acd4aeb28740ea114b3f8ee619550b8160a571d9d773d29e8340728d3b157fd81a6e0602395c0f4a3ff503d2df8c64e5983e8ad60704ab9adfbd8f7e7d0f483deeacd5a5f280a89747797e7cee803ccc3bbdaf2012400784cb7253499fb21091003920cd854e6ff1f2fb7848001c106f3bbca774c107d321016e60ec81f4d0520f23800d88a2fd3e36f951e7b021016a409e9ab1986a5d735f48001a90b32735e6a1f5d83a84b320fa20a2b7c7e30e7d1f5910ed538633edc482b43eb098ca3ec46d1e16c4d0e9f0bc5163ae98579066bd93c6dd15c44f5154255e33eade0a728ffa52fccdd7d69c15e46c3f27357a29457b15e47198290d92bba5b52ac831661ef43078d8773415a438d3f0d2e371a8540f15e458b22b623eb2a2de2988ad49a4bd93a62077bcc8d8e09b3d3eaf14e4c145767ed9780fcd2305c9c38bdb56ea417f78a32067d7b476387111ed4441d6a0deb9599bdcff50107b2ea7e81bd5f7634041ca64fe1ab6f20952f6db541f16ec87ba27489d3e886cf8485bf44e105f2ffedaad4ee69b13446bcfca435f374194c9b1cc3dda1f79aa0992ea554be5b09e8765268851348f433efaa50c62823c8a95152d638bb678099269b56db4cd2c4116f3f1f87cbc49e36a5609a2feff7824221ee6995182144f3ec307d2098d9aa8542ec5c2d148241409c441712814089e301f931308001838200bc562b160441286791400035a2a1e3436241e24241018141a128984e16018140685c2403018100a0803819040f896083d3fdc3bf8dffb8480378b1dd1d7614c1076c6920f927bb3c798c778e376955f365202c99c28b5524207da1f73814b014b2be95b1a98e594f71186de9e6707db6d5e20d2e2ad5dea4a1117ab962c94c6bfaa98c1b7a4def1bded74c16e72997c43bf5bb47fb83262beb324ee42e28d606d6ee8ddbd1146f9dbd050d847a023c58fa69dd7a645efbcd524a4977f38ee06f1b8618802766c2f9d1bda5adf70784e0bb90dddaaaa4592c1021eca9668ddfe2a914e865f49b39639a675b52efb295137353264cdafccb6cece4e10c6cf659f9f964f6fb6764e0cc534549306cb493f1000e85184ab7456133412dc9b259a61de2af41f69e036706f245c71bb446847cb57b12ab9f0b9c457537d51a29f9dee22bfa77a395f6b5c25382a6fb3a2c87638c0687e9e06994e3ace6d25175951794a97c43fcc84cb33f761160a2e2b1be594bfe76298ad091a7a5f652acdce98abbab4a7a81535b6f26b788dd46425925ae941a6422bf1e195d2f4ac5c7f8a64b210d8db39f373cc2a7caf55b8f2b1de10611bbd0f6327a797cf194fff17d5f3ba9a2446ccf95c71bab83a68264b355d6e1fe44abe94de96fb20f4dc9c8e7e86bc045c93f2008e07bb5bdc491b5957f0180a04ddfff73555d8c669505810791a3f4db52c0e4cfea286ad40751d7f22f8f40e8bb133e9f8a116859f476aa0138217550db93ef53a12138c43f761bfa384eab1b53a74851ee276bc1ebbdfac6f384fd9c592e90ba89042c73e1c162c0cea14c56d63408749602522d39e6d33b8722ccd8eb3808a643eaa64ffa1c5f979dcd4d7ac42a2d941530ef49efd019407a5e8d08ec325f4abfa579ed28546dfe49e7cbd1d530cdca26be0c8a7a7ead10576a0e2fe834ef9c3c56c5537d28f6868394feb8dc7bc78af94fe15088918b866389266f299210359507623ee600ca5dde9f22e31fed866e625bd1ea334867714e5c8e7c257e0f9818d79c4be017d2d157f95a13b335484183103829c61ff8b5ceef86bbfb68cc374f534642245ee2c46dec4184c009d02a6f98d9ef65a8f568e18af8f5dcc7b650d816c21723fe62e27bf44c9dde6f01d60d8283bbfd6267a12b3d75461583c3f4f9b2229c8ee3b45e46518de002e74c63d58731dffb0b679245d834c3ae5ad73f3d520df8df45b56e1d257d6e437905f628c4119e22157358e9b8581d90dd69a8701bb17c6561378b52aaaa90f5b5afe501a2afe475d6dee971dcf3e48270ef26d850b40762ba10759b1fb22dcc6c5bfeda94d38542da0c0e1a86aa5cdb4236822641a86857f15f8b2f47c5388970f14482e343364af7ba4a78e098b61017413cc0d65f8749bbc0a9e4b32f06e9d926ce545821cd43ff7111eb0621e450b88aa95afe493c6bb538efbcc3cabfc5c03141b15acb72a2fc233499d241125f0abcfd9be6f39f7871193fcdea7c15ade95169aa295014413ca59792d8f062f12866cb9fdaecbb07ca4dadc3001939223330c3aaa856ffd5577aaa602944c563ba377a1b8f8376f526446254444c5960867b63c493362d621bfd567e629a833c32bb8e737cbca9d456e932cf2161be2bbe9ede83b81b90e39e1e7fbfaac9b6a53d5843571b61f0a9c896bfcb0731919172b919b5747c5da7bb94709ccaf459edbbe4dee4b5717298b2a08798438acff20e56c7ba74f302fb246931fdf1aac9012337919fa799182e923017334f004555ae73c4a0ed5a57d48aeca5379064efce97d917e86dc4018e2244e85287f464ee4e9f361fbff40309c43b652080b6edcae3ac3b77ca529e6959eb1af8c5e811c235719fb24bfb247e49a3c01bc88c5a732b61fdfda47dfb52df66dc07117a58094b043297c11f7d1c5b16b0061a6842bfe53ae320780a2fba0021aa609a7ec09397d3eec7f12c5847628af095e2b494329d67618aefe41bf569f6c1efd1e05bbf7133065133595210d5252954c18ec859b8bfbf93e3f2feb51bd324edfa4905089852173e8147ba0731ab445320edd8252a64c8b90f0af7133450325a8a9444579705c7cb98fbae06bc41d8deba27b7fccafab27c8056e42ddb7ec07e805bf5ee3a05ea443562447ca543b8019da00880094a7d7e5785e9d67e194ff3ec4258302052ff7695af5ebe385f879801b431255aa7c9167e116019c521aa41c5f185ca5f2360a62bb9f5306c879f7f8ccd6086fce9c672d7971b14fce2b7a146dd8e8d78eb5a4ec56d45121809ba1e470ca1d40d7320aa7c8099223d5504cd7e0982a7f1e55a802c67ffa15bd1b36ded0e683bf12d782b7e33873df84ce31be6e9f9373792a664a155bf4ffc42853ac21511a55a45475f204f03ee2127c562617ef621f093fb3d6442333ff605c8d17fd2daf99092cf23d4e8384492d24f474755c92aecfdf891378fecfdff53eccafdd93723a4f822929611b40b28ed388129386dd821abf478c782aba2972566ba0ce9c22784b88514c3142eaa70060118e700af9dd0653184633425aa0b7f56c98fccc77d015962bfe79a8e4334c755c14a170b540e26e3801692296d69003d1ebeb0a4556a1284fa787c008c795588b70cce9f4f278f6beac0343215472336a84da9d664ff7502835aa1130f2b739b11bb6682b7f9992f0a97dc08d9b6407c22b64d6418ab951182d87e575724b781d96274014fbd61546a2e739caa8391b1c694d9da68b3b14863a487cacbe8112e1cc9cee4ca0e4cd2c48beb3f3b986f69984e6f24e1b7719522e9a951f2c5740b4165b14876bfbc82298ee5775065209285c04d19f31b9069738630b1160bb4e2783ccfcf04c500407dc281e36531dcd308dbc6da69beb5e70d398fb05d8e0060bf2b1e643d2d1f4841339f9c50e05bad1ccbc505edf02b2cbb99360f83b827bc504a4df2c50ba0569667471168ce5599afa65e1591408be4f536656cd6074df5239be32f868e0df1ba61382fabc3c27ae0b8073408b95e37c1c19f4d9c4914821b7dc078a5f94a3db299fc5fbcd086b7a66000558b914560fcf6d3dd45aa66071f70bd58eed43a11daede9fa7eb6474464d77311991877547861101ab4f7ffa299d92ed62ed99a438e29bd29d40cdcec921ddd0e37613b35026b1fa7195f1564697938ea9e1f661744d57308b6eaf44b7bbb3864936ec26511eb56b8f8ff0cb0911f6ee72d12c693a60b2ec570319f027b48874cd78610ad12a34c0e2962d9b2710a6d1bfc19ab8231fe3c169439d030e1edd7e40235d4542c60d6b8e2a4b64837657fb5f6af52fe6d2fa32e87d2a20a13ad2418904021e118a6257f69b5105890759b67b0520b438481a3d8c1784897b49187c8acb8104adc980c3772193a6d109e53c1ff8c20d9131eaadceb6d55dbd06e49df6d47eb354576a42de6e3f84224bfb0ab62ba88d896817909038da8aca5321e475ea3467d4d3920bee6be2e5ba97322046910dbdac3cac0ab1ae779f3d0a79ef04e6d84f29a9783838eac0d241112441cbb1de567ef9385723fa3a08b7a2e2dfad854e401b8dae14628f23b709ed47740c42d7cec87eafc7514396a78e48d2a3dffdcdf7f23c7cae7fc81650aaf5fd0733423a5270c0aa8d8594b089b94b024c66b0491c0db038858503ac1c7778160d2790c4fe5f5a2233ec0dc7f4f5bf31431f668894530efcc60eba97396698b402191d6663f289e0385d2fa864df14277e0154fee9b783d0e5f8e47ad6ad01541a498a14b3b9b1b46065609da28bc581199a068a47198e2be633152da36581a4897558314798d849a150206e88efd5f8c70581f3ec3f9e407aead4072cb79ba4160174cd0f1fa56b041e244da15d81e06fbcf217cbb8027370a0ffb553a18eb3e881ce031c6ff559207363617d67aecb06d802e50bd6cf69efb2b0d1f35d0d37b3a7923458d9fdd8b7b0b16328af456656814be419136c92245179f7f6aa8c951258d4ee2a3e1473314c0d818583f426df39b414c47dd0252d4b333719c2b98b58fd0dd425dd111e1bf495deb7fa53607244ca619a3f353ea31faf4d691343792f9cb46193bbd668e8d631c98b746b4630e0403b0e6dbb83232f29712ab9b7fce7d35885f32d86886eb0207cc84dbe34517cd9121ddc3807c1a8d4eb206acc94d91bab3acc6415c1325ae841190f2e6a7d27637061f736c298cd2ca75181ba75e78fd03db9e1102fe20fec1c6a6159f074cdb198802b7e4326883cf6d34f9f24589a6284aff5927a48b7d344316a9a199da30819c84df622e9e611087e9785372da65ddf9d332210068901d9150a64c33f5f2acb1bbb27122ebe918ee732e3603d4d46cc5a5c30ac04333c6ec0df0e45d24f9a45bd326b19b91e74c7de46a7b0a02be6aae8c78708098e1f60519058c7528796c78a93f224b28ab1400ac2e8e6f65a88e576162433dd2311df7eeebe21c95bfab17edf66f16bb41e2148646e908b2a98a9cb0ac17f27ff7da60b6a58c6ce1a889bad6ec4cd4d57ebcb7d9bfbb4f56018c6ae4c80475292a82c3cc1255ba789adf9c520edbc04031f2a975a7778e8dc03f9ead09d1e4054903bff52af36c6e026a869ef04f5e00b4c00e26de70bec024ba2cab051c378eb345dded8591cc96455cb657c6d357dc90d626789ed778e34228e7af48c1023b591aa822836b136ca958266aef64275c6230047c67bb30ac2b7151a35ff934c72d04ee949216902188cfb455f98d631ff0961ba1a45a1f6dd3cf5777b35e46e13fa571e2cea6b57f5451cb7a0312bc1bcdfc483b5759402c74cbb57799a460c5d916a4f077b32a45d4c6d4c23920ac73c3dba1aa5a5f20a4dbc2d93f6f78b66bf62ec3cad44484f4bf7c167d979f7c73918bb1a0b53cfab51e5105a11e8821e16a438c99b83a996e3e88cacc5fd4bf18b6e4a13399888cbec931f1406c0af3bb4efc6e42d5077df362a11c2a7ec4c164e33e436634a8b19094c2897412f7c8554ff3d74fb5321cf975ff96511e808574b0f1db7948ea899cefd07766b31976d7f1a7b4408ebd2498176a1933177196566933217623602fd704a09b227a0994c6511b97f861c26b24856642c4d1ae32f753ff845994a364849bf47fca87ad893852daa23881ede0b10b8db1b8e3a8b700e90b14c7a1c6a6273cb0053a5052d2cc7884fba579d520db2ce14b7731b9fed7502a37ac7eff6cbe92937e46ef84f9d3e42d422a3174e09901fe4491fd13ef1f8028d9a1fca93ba780aff0578e8f90f4e16e2028d2489bf8ac91d989fb2302c3b7d802a802a85e277d4959a844563e1514eb9dc1d5b8148e7a3b4add4ce72fed429f839969a7ec3a6de36596de3542bd8d319b0048336ebcdc4b96870fe499fb07a6f8ce79aa9df5c223eb1ceed533b5490fcadf949a4eb55b79eb90537e672896115b757f89c0df5ae24e02cb1eac32b2d6f4efdf106ef67d67bb9b1f5ca49257448d5b35429d83e9b71ed6278d2c0104c85c55a1daeac71463fc012d798e3114f9f4fa8bf71c17b281f9c1f63817c9d6da53382142f43625f509b183f449e607529f279b8524df39340759d05c327957fc23ab98129eb2494811f79a61126890c3a7f0d46dd2558678f1861ee5c89f0647eee42c3d3257c4eccc1e03f7ae3c719b1a541d406f0c8235a9d6b46042cae4223c3feae2ca000c2ae2e03aaaf2b6b7da9a9e043e4f1373af8cb72878e4d6019240c22ca6ae9ae42e5120d422ad0aadb6d17b066bf036b0b6ad544da0d3b95c964663cabd29cf0012afcc0c10e2f786c115bfe734734140151ab336dd782fec169854a91dd1eae54e11bd6194599153b73b6be0eca9d1aaf66e484441c804ae7badfc0ee96c0de13553fc000d9c066e63a35636c5122277046670d105dc65136111ddd015658af5790d3786fcf832d81e3a18d74dcc3dfc6b11094d8f4b10fd11cc7674445900035a4fba594abd1406cd79f11839362f18e015da130e1793feeed3e61c3451db05b20d1951ac1072d78a11626d1fa073a74d02b85c831a2efc0b994cdfbad1b5ef4238668b2328472030106f0dd394ed1d4f7ee7e263b17deeeb982018554cdb58d21d54ee3c46eee344b6f474c6c82f2adb5f2253e13038896016b10841bafb5ab164cdc39b5590e36246326636f6675d33b9bbc633e21b5761f53319f90d1ee3b30c0f189eb9a5cb8484afde520a05a4ab604a2a68f7cda3900d43f85670550a211518c4f08d1b2e61d200e63bab03f251a4111b1a4ea4d11264222ee5431ee0030a6ea3e4dbde9e025d5367aafbcae48cf7314d7e0f80fb378dda69cc63322f32790d05e83df64e9cf859e420b6e68004754ed76364b0485b44841e1a0399e0d38788577ab9a806d43096fa54de9ddc6d183d1f8378ac598681484ad4c716dd2896171eedc2e7477ca32f8ee30bcfc01a6af70177e12b218f9cc38ba31aa3fc7f38efb174af72fe0cc9ed3c33b5aa7fefbfad726ce8a5c1e23362b95174081a3ddf74e13c55e1d72fd86e1320a6f87ecbdec29350fdc59ec2a069f2d1ba88f2988557990b8619360e7cf47d729d603aa5b6225c466828c804b63ee8c3bf347407d9ec7e75c972ca44903644dc086235dcdb49b44b4111179a548a0302a4f46b61e65b6675879779c6a3770047b8356354095be3c9ef1678143a8b9ba43fd02ba6cba93d85d2776652e09bf66c7884039c5729e5d716858a8aa3c190a1f5e9d9c75befabb889b4e342024e9caee94828c32370042c7112acca2116d8c3c672ce55618db81c50b2ffaa89fcf422c75d2dd3ff0531f6237e1caff2c8f82418195274d0d4534c5249eab857a809ad263e917788d4018d6176fb94c44ee87002f9d05514fb1549d9275dbbdf16f2404565a1080cf2831e35ba495e555a969b64f7fd1a0448408ed40a4ae135d983c14aa8ab8ee40f3d7a8b20c453f035d11db925262a151f04c57196b75d84448880bb31c755284f5ca3c1fde4d347f80ef8f68042e140936be9da80d6f4758fa4a3d7e732b90a543b0e4eef3e19fb8a295e009079aec589ee43b2232e26793795e8cc3545da9c7d1f8a0dfe26972078c8b98456187173fdda1d8eafd6656df87a2f838d3b875951c8871c33dfedcdbe1c2df8227049a056f5890fe62560dc1444d827d354e23a24c60a369a2824ecb9a5b503b0336685c5c6a99b86d86cc59304f2b3a37e6d5ca8c50c22421cc5e062412fa321c0fdcf8993f9eb4e7d2e8fa80e84b9040522240a4c084d57423c061d20e675e8d8e26fdd64ac18eab85df121da8a7fb2ceb3672a7db2bccbc91440608cc29b04ae5fad5960a5d454e8c83e92a841aa271a8a80f8a09d4d92fa86978a49839ed232a99f15dfcfe969453ce54acf76db9039b2deb4ffb7e8084fd9351012c227ccde2a91715349974d7cf195c153d94104f88a85ea3c8529010db5493c4548238d85186c2cad10fe3662fa9410bc33ce8f86b2b570763701cd04a0b96922b6073928e543511eac0d09f109e2b722073c00fa8db62271db0c4a16d241c334a8c0056d1e750834a2f59fc8194a85f089247bba652335daf4ad0badaa5feb0c3e2cbb4b3d46126468c18e87a486e350f35a454af7c05c087f57ac9a4f5941239f2bfd5df5829f381981305d1f0faff1df7e3d232b694622e1c2dbc8ceceada7d9504bd0adc68cdf186511aeffc8efdc125ee945c94a4916d8947815750523a21abd33f6b4fbbca24cf27f324721012ec3a21cbca2673e96c5e84f025d08fa8f7b782d71bad90a24705de68653232e2e01397537d83bb39df5354aaac6e59a9c32db4e7f2ca525deea9b736adb32028aa57038a6cae72d0fa38fae0096ef58d10d39c995b254362fa7152aa8312138a3703d923c97d8ad64b8b3eaa1315d2c79f4ac0486d07fdb523b8cce88af28a949f9e993988f149b147f8488b80b5e69650de96d4894902b3deaa333a77eca68e24a87d9e86ed5afec9219aa61d65581d61f9e1d35f4e8c98aaa7db16253ea4d2e7d88f86900563f0f40780e7ded512a2bd5cffc2e51740856a0d1ec7a17921ce2d6cf057e36e5d7f45d1735de4f7c15c7900e194ef8ce4279f61511c0ed20d6ed5174570242a3c3114548e1c0a4db227ef4f0473d2c4f5f88a0a5b46cdd3f0968866ebe5d2e2c0d7c276cd248a71521dc4ec7c207b946fdf0353710ca712136d2078847671501668ae6385c9efeb7b39558fc0bab9f5462ea44d55d557eab4bbe87a2e72ad50d4adfd6f44cde8b40dbe24711faae93cfe69381054bae528ae8f073cc54a9f678493770ce83875325d26b4cd11a28dcc6d16581a64e1a9231c913deba892d1297f0ab099520917b2162ab5ce3d29c94ec6c62bd92b6d19c6a89347cf1b22983f011bcf1912b387255122387fba2e6565550d47f82de4ac7c2881d992a99fdbf99a0a6e98721b7282f026d169e237ac23b0273eea2d9e4597d7247a26066de027f419f483fc056a258767d63f76c49dcc879af66007b3b83a3a098dddee189c13646a1027941660e26c7d0495c04ee2e6a7e3a5bba463e8e0bcd3718238276abad9ae8adb614d7d136ae16f4061d971ae213e21c07f2c7a9354c0549df34fe8c403c5325297c2c1e394cb7d83cf3df59ce3feb22b1a884a6f4b564913e818a9f5126072d943a97ef6a38980aa937227f1e81b03c50b0c79c6026ab53c14ae30f0f3af1b0e89ad294db8c712dfc5b01753229ccbd362004f9e6b72320a8b3594a54f511a14c2a3f5bb0c3615baaf57184ac8f2ed8838ee073f6059719cc4a38c1e9949d28d4f2b907a35f1e4d1d98e3222d499078d6b680e3bab244731b4d814cf09c246ba32a56ad98924ee837e2968d23900830e08455099bab9121302aacb7879d531dadbde465dd0bbc9007fc3880bdc492c3679a118c2e55617b6d2e82bdf31ca28db83dd48280e587bd45fe0f1b9406b6d9594cb16ab3f1e185444c649908605b6aadd81ac08daba5134dba13e0a26f92243ea5279f33c97d2e11c30a7a292b88ca2e90a0f7c49871abc43132218ee52442b1e1e4e79c99c35f545b5f57c5532848dc86ca028a5c965ef89fb76687e86f187f7b399e0a9864b66f5531806d19c84eafb0c19f076bd68685c58391e129270977acf7788ad09c26d1eef68bbebab3065d8c80433d0f4d26186dbc0223a9c1d27abcc516324dff97b4052838331f77fd947ee8f1914fa90768cf780a98b1bc330b4de3d39bfcb83ab2c7737734d851cb052ad1d146cf24f41ba832bec69ba89fb3c16f1d049ff06333ab18e04e3de87de58e1f0444af961f9b8f1fc6e7e60cf079265ab5f64624a5f4dee9fad26e6c9de63fcbabffa95dd4117408525372ff8c4027eb78097e598ffa0289829234d4a035730c46ba1b232de52a0e2e41f60bffe91018d5175d6d2e15c1bea8cf286fd58ddc107d11712345341b1917957a235af1fa771f2cb44800d39df859e8545cd620cebde30a37697812d4667732e36b35ad54c71c91dacc3b42a618e30525a27dd2f4789d25092a91bd5fc03f38098ada9ffc76a28e666bfd032bc9c271aa687d821695cc35e43c68dc64c7ac7f075f68167ae76c37b6dc5bb46d1021ae3a4809fe505dd7f2fd78b7a242bf9605ad738bde3dd72f81b2b1b849a6c694d6a6b0e76215af486911d601cc24b3e9a986f8ee1c625470f22297388e3ef4fb8e5d3bbde14f0310af47f527f07486d7207f341b2968dcdf9fee5e433e520afa74033bd854d3d5fedc906e260cc1e5045126192a108afe89cf5799ca490ee2872ca4b599dc7be0a0eb21e1baa66eb4bfe64e92c2f7b92e83bec61446822ba3513cb7e882a90c2c66776d630cfcc538a2a788178be90d6582df6cde091d5b59bd573f03493d9779f7434f77e602c75482cfb3d4653885f6c2c67f2a7df63b5c494b4893c6efd7fdf0675114eed2a4994440f2f62d00749b4f31295ee17e38f23cde8f512a14a618bd6791f743fc542821851391ad20b39bd996006474ce78f7e7bde178fa1dcd937f65ca2b15e211943b7d09f034bf1db5469e8bd4321903ba8ed8fe2581af7fc62c82a8bf8539d39a73082c8e897c12bf2a85a5a096c93c32f039e2190f6e7894aa6bd007b154023a94507fee0309226f8d21a044b666f1121c7de1a0eb4b6327f49f983921c9d038a3a516efe6f3ec8f7e726af4fab1127dd257a52ef6ad7fa840f105f4fb7e6e6b7f1f5acdf3402a4d98fbef584b674f6e9bcd211c7d7cf5ec3dc80312666351fa566ec6549ad6df9151e056ce5df55d8080d371d370e5bc9422c5baa8333de3a810c25173399b3c407634830ee3be2c27fdb751197daeebc445ebe90008a0118b6a40515b5249275da6574fd30005958fcbf783befbff77568b78210e7aa01f50b01f766a52e6604f55ddfbed1b3a0f64051c60152319c45f98b909dc5a961d1c225a4bb24a3d716c85423cf6865f5a45920e992ac5bbf4747a9d35d5ed2a133d1359d8ac0ecf9d1f150e0ab0c357c4f49c4d7c49373d04dda4165a3f6348e519dd47fab0eaa5290a7833bba420aba5c64a95fdc2abf50bfaa89e7541530a5515cfbd8a1819474f7f92b66686258290a409e612c120e725e8c6ed39762ff3e4d0bd4f1487708ec4682fa9a7db08c74e1830ba1efebd7604defcc14e5fd45901f29c267b495893a4618282a81a4c09037e55a15f25018fe8fba50a28752fe57db7a32dcabe08caae315db34f89350f4a149dde6fc9d8ccd52f8e837a1c3ced10f415b6932dfc35001ee8db836bd1caeae6c497abc0285fcd85c43d0dc3b9648256b96cc649e8f44b9c754e9961dd10944dbbdcef24ac0ab2a5e2d79ed1c5eab13bb0176175e217b15e395865795bc5af07ac64b2a2ff9bc6a21afc52756e4959a17ebfc751c8d338034fbd7b1575f7c30c0378bc7fbbe298864c926c401255e82e1ba615df7b36e9c654af30d4401c21a9f6ff9818058112db094fe1ec2bd3006ac737d2fa586666df20beb7c7d04d587e21c9464844af945f5a1789652834a1644e9604bc9ea00f50c145109050ca6a26d551028e083d91b1f23eade04816b2f83a8b2a412836a35a280b897ce7794a7ee9ffa3f850ed5a240d166511f8bff41610fd53d147fa88a0e14edca7cf25b34285aa07282c20eaa2e4501e120406c207c8a662ade278a9fda7cd2f149fb27ef9ffc3f15fa043da889094c4fbd4e828579abdcac65a2425aceefc10f0feecf594bc25d82435f794d1d74f1cac1a902fa8c4381dbe63dcd7bd31034c68c429ec707269743819779eaad6e64c0fdd57a458fc92aa98910b01ffa9d276dcad889e0163a9a4dd594afb849c6050a010606793538b4c56fbcba0d57dea5f18b50504df5e53e7123a307e7bb0074d3024931513f229afa88ca14b69504eb8c9764b389c5da9e27697fe03c7cd3cde704fe4e697030880571b4df925237e31a824285b048af45d1e254a7a570ca803e884f9386a08351eda0c3d05fb83cbf65801accd66d9482ede3e033c0b42bac8c1b989c03814a1d3fb132ebc31e3de2dcb861b9a1c241268dfdeaa2c403dc38548ef9c463bab6ff13de84a08a89966a67aa6810408fcab5997dff264c23ae6c253ff008f5ea418416258aea646cb3efb0f1769e284219da08f92cabb708cbf5848eee795a2c202312bc79520888bbc18307cd25f00f85e7ab1a4c99272036e06e7b638d32d1918e7fc5e759704511e604f938e22a4eac32668830d613673be08451681e7795986b2467ca4b8463192b09494926cf7b0892051ee23103f4ad234ea22ad2c297811f347fa9fbba1df41d5c0897ae699eecbc842268597923bff0841a7d835ec51bab45e7eca69409c5bba0506dd555c19c5db7bb891f0722c211ece968207cf3ac8cabce1e54e373c66a43b04184f0fc7e3ea0a374d0b4d6a9c1c465f0dba2d4dbdbc06b583056c953ba05f0882dd7e319c42616220c3b771efe31407f22fce860926080f2a78d770834346ebd2a5a18acbd99f0829b903b3b45df30504bf3fc0788a59810e80d06d590febaa7993ebb527d2b31d192a28028cbbb5ea07808b126459e4c9c041aec71718924c0630a56e25305474ab3383b51fb5638b85a093de653904e1c468a31140d88578b78a55bd9a322a6be1e7225b33f7acda8a84fd7358a0c91fbae63050396649731d600b59b260f268518ab3102cb551a1822212771156b4ba2822d364849e0cd8294445da897877c353fc51ab50f81fcacc3a0d3548cd6601cf05cf70d59393762401617721ffc41e3f272316d22e46f81d1e35e8d94aa4d851f63643e265070aef06e80ce3b61a0e7cf9ef2371466588311582d6682391cadca362d7060d261a377842188527491291efe9ec1076b8128cada07ee48868363f63b60ae108576c34442efc52a3ea7f99bbb76eb173a02ada3fa16170e6baeb5d102bc5e020e22cbe3a130a42b917b84db6945c4731d8fd9573c3b97ab5284c1e7c0979495c7e1d855810494782637076ffb453d30523d8b262624bee38181286073b4eeb49ad0d58314b4194df07df9c702ec096ca84391ddf50866508741443196e1f52b0a8e27eca4b17128c8b0206e0f753fed16848cf10e1cbdd487be62f15e54878053cd2a294fa7ea23da307ab2804ef88b85ccba95d9461b24b38d037b4be380017bbcc5557bfcb629eca5f6d83604640f6eab91539a60d62c237893f092b124849cf3362dbc5f8225c4f213f1230ef3805c1a5d5d0379e0349a45c4917a73572c15037944c3bc399ee1c8560c8fd503996656716ae780e0914de1584115524fd2b65782ea42332f6b0426bbe3605c5db8dcf3bca85282bbc1de2541cd1c535791361b8c430ca8d738a512603c9ff6f0a26cea0885d9b2ec536e0987db0f0edff20f37f12c78a158581cb680fa32b328597a26306d9a180d42f0d6749d87dfb75131db56a077d5b81243121ee7bc1cfc3c9000a949ba44a1b1a3ae1b42e5e0f5fe47fdb05e3cd703aa4ac18b8c93b91c3d2a60fe47a979b16f246122d52283bc922551bb0cc13b1dcec1a20c3ff665e17030a5af107e7ce1ec00452a30c2135b0ad3904995e448a27956fcdca0376684b4212122ad2d21c40547a2356d9e0f8620e8c67ddf8513999f6182869cb56041978872c7d6898542264ca796053fbb1bac57da9979b636dae81d4d370ead84bb11ac5a2a477b9fe7358fcc1d59171c35b2e5a9271a495fc61696554962004a11cd9bdce69c413f78bf6fc7b0762e42071cb374e616cc79c97986b45e81a95b88e58c39b85eeb920970822710cbe369a77fda7c1cb95f4ce236b9cbafe6ceb18c054e5ef9baf87cc947a964f76d92256cc30b269312c8f627f731d64fd8273d3b1a95a3382c6a856f62d8115263add0b42b1ce3c638391f0915e42b476f39f5cae1e7243bd40bfc23365205a2ce2d29187426f0fb737662be0a7261a69cc7f4ee543b5dad61f2c69dfe29df8477accb13b95567bfd30041485ea98591f55a855c77acf392a1d2ac7683be203af3851fcec7c05d47dd88dd78b0643d10bef40c49d1e40e82534e7a2150ecaab146889214b8cc0ff50f9e0f144ea3e8f41146327e9d11338eef12bf6074d777e33c068b0d821c6b077c8d77084b1418253fe9c5445312147d873a244c32f961a9c7a2448868f98e631579040ef5cc90fb6778c142e12f2a1e85c38c83205a3a2fb3970a4a7c54ae3ade662023e5ecacb0e919a980ba9b260ccb53b08ab60e2ae51bfc1c10129cb8390777fa927a321900c87f1291f6f4ee048be846426e84ffee9bd4c5b6d1f903c6e5d2ae1c193385b69d1594a38d9b2dbebec6179e0289f5393fae88c79fe437d6014d0d26cccff3fd1d7926986a4c247663de0761a12a3f87024143aa41ddf2e93e3bc0a4ebc34fa3b8557c5da2019f8e56c394155a019ffd6fb2a297590e6c2de041ae00dd610e2c295cb9206596849cba59e4ffe31c0bad472c894d7aa2e0d964f8e385b5b120d7a764a6bde9a6af1f37fde9775e401b20814b2d69ae30f907c29ea02529a8306b8dbf1aea6bd79f791325375bfe36374c55f141a1b62478857a8ccb948b407299acaa38b4c41bf3295e1025064b2643f13c8ac2f5a631b328e5fe46a2ead6bb40df6efec5270b78b461b14c8da546a7bacce25be23335a9ab30aaca5eaf11e4939f9dc9926d76aac34be26b9a549c6ab0c8c2cb7754300f8ed39c200917748c440e2e0d5975af23c3e368b7af760b9d325d562d91327fcbe1fa213ccb7d6f738db539fece53b4c3fcb4172db504a8720fb9226fe15225a4f53b7ad8b4e7db936866ce93dc3f023f20371e74c74721a570012a76ca653650ae26e9cfcff033fc0c3fc3cff033b00dc2f7fcf63fef1f3b255936e6761d4a9c9b649229a594349d6cc01ae92a518070699ad96902110632063206e8a8df773663cd7f074a2134cc590c3cde703439d1bd4e2bb496bc871b4ef62749bb977c4db53978b4419bef8fd7fd930dc7decba4d2cb99b6e5d9630d871f0d67de15e3a18683a6b7a45cee9d81471a4ec2780962e3eb5b2f083dd07090d7a92649625f6d726864f038c3d94b96eb11af994f30790e12c48881a346f230c3694b95d89cf1ab84914719703b25e4650a327ba345071e6470e46f6c93de4e76788ce1184c3229eba7a2a99811c3713ef3977597b4f187e1b449e764b92c49b866c1502915ce74693ec964fdc2d9a46adbf4ebda68591a1e5e38f9da9b496ac394a89c47178e1b4e30213faf964db8860717f8cc3196645cb34d533398b85027d7edb185a39fb68a6c13939ad1c22996d2d512d7c4799a85b3de69559714ed259960e120c4767ef5945ce12832ce7c540a266dfe56389e283e325deeab704c4af69331c8f1d32454388dac97d61c279d964ce194154799a04a96fc2e8563bcac105fd393778fc2b1f56ac4f78fa58fa07014978b19cbf29c89fa13ce5f67d2c5d3169662ee84a3578bcac52871a279138ea1a4f813355b6192890907e9a2b4f55c6909c7bf349b371b21464909a7934bd782e9ae5752128e7a2679a620249c548a3471eb949c5b1fe1a004cbe0a22e6d84830c35267dbb568493a04a45c813fafd4c887098d3e12b32e58d500ee134329849169460620be1242b4db60d96959b0bc2d9a26d97a6501d25209c3d4cd8f40a2d2f0dfde0e83abaf144d4e24a3c7c7012c636b3ba3faef25e9c64dad31283e64beef2e2242541254b9b239420ddc54964d2b12e0e5fa6c49015e5e2985d83b4b618aa840e1727d9d2ac28b1424db6cc2dce656bd1d425a91b94d8e2acf3953cec7e2fe9b538f895f8a1ccad448ba3d86997f6ba8fb967714ab57982b26d657192a4b02b57d9c6e29826cae42b65720513581c946a7d97db9265a977495a1cad6bbae224c65fef5392f012bdd28ab36d9828abb12bea841507f9b8af507731bfafe29474f55cbe8b163c551ca368cd1d5a59b4965271ceabf45fbe79948a50710a4b267b6b49903b4f711e5d3569e29b7f766a8a934aad763156f68df9521cfbdc548c0acd1a6f529c6be36f4ccf5e74cd288eb15db284b59c6a9614c5f9c3622e6136ff8c341427cf1793f4a5337934509c2d26656342e3ed55f889837a93434c9c9e38c528fb5633eac449ccefdf94a7e7fc729c38a78595e6f80d16d6dbc449d59728d3f45b539e268e6ea2d564bd9889f3f56acca52b7ea8ea30719273a3f2d4252555c9250e6229fd55e6b168bf254eb14d2c21574f492a5e89d377d5c9341a4a9cd3ae33bf44757f651227b954d42471cc1a774f30db344b2d12076569bde47d8a9e689038ee9cb79dcc3de29464c75458d2923f33479c4daadb93a562b14d1a71de9f8d966f348d1a31e2f8a164122f4f9ed1f0228ea6762a946b65d18d228eee71b1d6e28893c5441c94ace86942dfce34449cb4f4892bf237995e05e310273bb143c45b28434708971dc0304442f7869dedd05acf582b844936e3625c8a0b8fad0c2b1563180d06210e3e3af3953449c59c248c419cbe749c5ca92c99c6de24018620ce1bf6e5629693dbdc610462af9331a97e2be9372c80018872e4c91042ef6a753976e040018c3fa8494bac2aa7b556564aaf926f4568550b0f861fce5b2629214d0cea018c3e1cd305d1fef9101f9651b5b2d4f6ca666e594b4ce2b48eb987735849ed19e787a18783df9d788df6397ad330f270aad821635dfa11769615e0c0c349869755a9d5a9d0228c3b9c7c339512b24dceda0cc30ea72fe182ca345e1d8ee9b4629bf20b1d8c162e6626512db1ce20effdb171eb42b9fd3f8783d41c9d173e4c3c49d20d6e6820468c1bdcc891430ea72c6abbf154ff4b19051871388c189927f496e0704c7b256e4931fd86f3cc99e4a6bd59de24dd70aaf78b69c3b1b4090d3ba1574d746c30261311f1b294abc2abe4e4a5cb5af1f5326b38d589dfa2c44d42e6b960a8e170b359e3db4a30d270ca4c61c3c5ec5fa7250c349c945d9d10f35f991b867186d3c64b4d796242e425d101c30cc75496d74eae71c65a829e86e18051862d97c746cd189b3494d63869b38961fa92030c321ce35e2c67b43171ac1ac369358634d9d69772ff3490f13970d4dd486c78c003cbc6b50bea0f409c820aa2622f579296d38f3f1c2bc923bf72fcc3cf67217cf8e1183b949c58924c7d21f7e124297917d1a5e6c349871c2d4998bf2e710ffadee54dd1b27f7a38ab6a4ad157f904bdec471e0e223a2bbb45ce1e7ce0e118834a6295f4f4ebffe30e6715d1694a2a4b3b615a207cd8e1144a26d5a4be44fb8a7dd4e194c9d3ee67641264d60bc2071d4ed962da132785cdcef4c71c8e5e272fbb3567840f39f071a2a9e9e695bcdab382b6d65e13e4de861f71b0180587d36ba90d8b23fb78c3d1d3d47b7d849d20946e38e9930493fc46491b0e9ba44da222f36c38695026b85826d1fcf51ace224dc5f484b3379151c3299f246a122da9a808b1051f693899f60d42f484f6bb24349cfef2c9af98dfca323ec3316826f1131a2d663848ab8ecd5fede625e8a30ca669ae0a964ad3b4ada48deaca07190e328810df27e68f311c35ac6fccde202dbffc218683d457d3cc76b9330f83c5b804c3d904db1eada27fe124f466c9f687178e6ea6c404eb0a27fba90b073d7b5e61a6529baa3fb8704c31e35a4ac7d7e4928f2d9c62496e4ad84a62503f7f68e10e8b15dce562d0ced8e5e1e0230b27293447beabc9a6ce040bc7efcaa16de76f42eb57389e92644997fe46ab3f2b9c35c5047542ee3546598593bcb07262bbf74185c368123406933e97335bc3c7144e4ad37aceba4ef49748e1746d92a59d3545e1743195ae3cf985c2c933dc06fbdba4d7d28f271c84922449dc985e0c1f4e38c898a4a4aaa81343ef2a7c34e1181773c4985a0cbba50f26e4c712f6071f4a386919bdda96622dbeb3c047124ec9a46f09e2e652aea075e003090793fceae40425bc9f78dfa0310319364266ec08b9c1114cb2c15243c6bbb2a9b55df69758a6ff0c1d5fe058239c35a7b926b9ff4b8ade8c3519f8818f229ce428e174976c2705798268c8d8114203878e321d4c84e3c69790a6458f369d3f86705c17ef754b267e08e17c72c656d355927c72e923082725e9cb70b5bc64823e8080122e854d7d9ea224c2c70f0e9b32496295f651f6f3171f3e3849b3b7aa4b772f8e9a66f4d586b36ee0c18bf3283bed3771a476571ebb0882872e30462e1017874a4a6f52fab25dce78dcc2d024acf5dfa9dfa0331eb6b018b5b018478b93c75f9c8bfeaaab9b199bb33044be32e533ca55b5bbc5503a73c4a9cb431607258edca8788232c1e4c3589c74e5c6bccc26581c64895362ff6cce6432f78a937029ebe6c4902a67523678b8e294643651474fac4b52da8a34633ed1fc79b0e2185b1a2b8c652ef058857ad2c8b62062831eaa4853996546b1d02315c7d12074e6fc85eb3931a838df683b41da788ac3baa8f72c25f4f47da6389698bbba5e64ee4a713cbbb698a2aa4656c991e2d41b2ec79fba68f7f26e14077dd972b9ec668d5772278ad39f98aa67e8192f2d14a75493ae366fdecaffdd81e2649bb12d5a8506b3f13e71106ec265dc147aa554779e3849e23325f1ebbb63fabb4e1ce424ad7e72ae058dff71e26c82a78bf092cb6f43779b386950971637efa18953d093448b964abe5159c9c4292ea3c5093ac4c3c4c162ae92b1fb7497384957c2f867bfffdf7d9638b856b86bd829e9b9c1abc42996c5c9a725d6fbec78943887da8a6abf9a2a29cb794ce29c266ca7fd326cdcb0214347885a5d093c2471b88b1f4a526e2978ad1789534c7a275c839a89f51d240e4a491d3afcccf4db8bf7886f6b941cf51a94e788638ea814175f83759daeb847235095974fc50b31ea63c4d9a41c7a7e6b6ab4c92fc2121db2aed3d3c4f1157112dcb43e84acfc6692127130f5253208a5f716c543c45994f07aef25e610550ca6d172c865caa19943c958e14ade919b214eb25f3eb18fb15349ac10079384c96e505af98207210e17946c2bf2821293707b0c0269328aaf5fefd9050f411c6d4f5968953fd92499409cae3293e68d8e7faa1e8038aeff99f6dad259929cb3e0f187539f9874d60966a6645bfc705e112a06515277bb8face0d187534cf2e8d3795149c5e9c187838ca1524e634c3df6703ad9b812269378e8e12476ca6cc9115b265e6478e4e194fe827f8a2e8d5f490f3c9cc71dce9b4fb45f2d5f34b876388f3a9c64cf897b17378e3e4b03c7070f3a1c5352d1f3820c7bcce194641dd127a27ae37df490c3493dfe84cd0a190f78c4e124a7d7c26748cf0c5f060d64a6c3030ea74b3ad612114d5c260e13164346e41287b9186dc454bc1b53472c71f8ac502a6497384193229538451dfb3069c1ed4d2294884ce22053dbd6871a452461314a1089838a68aa9598311148588c47588c1ca80411471cb4726494dda6110719d3ac9af8907e23150025228c38a6cfedcd6ac6d034892ce230a3ff36c92b1b4afc238ac033cc8565d3168d150b5a1b4eda6e5d791049c43955e3ab4997f2ebed88388cf0cbd8279b5bb658c944e4102779dd67f304552bf61ae214e47987eabf184b2b91429cacc7df35af28a1e28910c70cc22df9eb6d89fb06718a316dc4e56efd4a0be2b0493cb1224ca574b21509c4499624572c650b4a8e13409ca27505132fa3d549d2c81f8e9d229732890c722ffbe13033ca6437e1a4c6fe46a40f67cbe921a39b341f4ea152542a49532589ece17c924ce14c4f87799bf470d228a6e234c8913c9c2e09e5fb3d7aefc7040f2731f14d34c952913b609aadb343b5b22c5c56ed70ca5825ed472f7538062dc14c859acb39a1c33147358a92c5391c4e9850797262440e270bd17dd1d3a3c4a823c6e15c323a33ac058763aca85d9b2925ca2411df70d05c52963fe9c45d5311dd70dc5ccddb4d6a99826d386ca86a8acd777a4e8cb0e124e442eac92a665df22944d6708abf92b4a5927693d40ed570bcb85757e2dbedc64fc3419558ada8ab092177349c6a6c84bae7cfe799ce700c26dac450316d866312d3dd2da7455f539132588c926470f38790ddae203206319c20120630e017225e38896749ccea49bb60342d99cdd4326b19aedd2c7e5e5e4a594f63225c387ea9f5896bca4db20b26912d9ca48b4109d26aa3c7a55a3866dd2c13adeef4c61366e1544ae725a5e726d48d8553579013735ebe4a51bcc229e82df3add60011b1c2318692e315b382e99a316260a40aa7de9326f68a3ed14b898510a1c2512c3545ef5245d58d91291cac42ecbb645e911091c249f7f5a9369d7da6718c44e12409424f29ad29d65e1781c2e992b8ed3379476485651421f28493ffc9d8705192573263c44083224e38c7a9dfb5f355156f310891269c7b556ef36610baefa1469870bc185bb77c336cf64b0a91259ce45a3b49a6d3d223f58b28e1185432b1e26b92144b393442240907b3f46f49457b9369d6420409074d57f2759eec8cb53541e408cc488b79be971911239cfd4a90d39ab1e5e50822454026d9d2b3a462ea678408c6552c77b335a5191c22433826b9bdf4fb6b1b1f82af814660be8808e18b48102e0284f37e48bb0dea2fd6c617f9c149b86c628b78932bc652c407b76c98930f4d954bac5b5eb73f7a7150c2050bb55bb7126378715c51416e58261d77e21e3e767196b7a83f96f75d2d881fba38c9ec33a52f9f204e9df8918be36f2ebb58d9dd60273642d08c94a6820f5c9cf7c49b2fd33ee14c4a7ddc02992d2955fcec69e0a811a2c38ce0c31607a1f74b9b0c26d8ade573e038147cd4e22cfa4a65c92ce1a20cb78164cc483b500d5dc1072d8e17b7b4649fec85533f6671781f8ba1f46978c6211fb2b8e294757b5d95626131ca072cce23329bb8e4b64156828f577cb8023f5aa1770613ed8c62950f569cba33b7ffbf8ab35ccc9c90dd93a7e4aa38be897e2ae6afe46b522a4e92e978493a6afd320815c76ff9926b7f7286864f71eeb5b1b63d298f5c6c8a8365e94bea2449259131a538988dd2215e4b9b4a0d290eba24a9ba2eedbc85cb28ce16e236b687298a936f58e8b812e227ba501cc36d7a5ef6cc2364509ca498f54a189d2bdfd4270e327c5d8a8f2ccfdb3d71d0e3633a279e96a9e6a31347dfff4c0bdf95e4783f38614c566aaee1d5434df674ba5d1f9b38a5e6536be2946dfdc28c3413e793653426a1946dc935260e9a76222ead0453499738baf89b2e3953d6d792071f9638a978a31575468ec7ef361e051f9538881227aae6af786ab7267c50e2182757d23bbbaa82e56312673d2b59ae94f887240ee2465f6f36e5cb4c23713a619e6bf716426dea03121f8f38a879893e31e89d3271c4b1ed94f78fd2d7ad498d38ee58bcb6da3393c266c449760b564264aead4adf224e27444fca7935677aa488d3b9cace2825f5918893dc6341683b2949310621e22093e558cb54f626080f71909d2f57c24f269d9e218ea937a6429c828a2f374a9c10a7375b73eb12eb4cc643f818c4c9f794aaf8a3b913c4f1bd2edf9218a3862a05e254f289320a11c092409904f0ea3c4a4909021e150337863920edd0f1458e020400072d01050440478e4f5ff88d1c381250011d2138900202b005000400000000c000244081056400003a74740c020800ed401ea30001403b90dfc811e30000184003127003870c1a24b443870c190b004000187080c3975882d5c95f688e433871ca617ea3626d219b385fbc2ef9b45fd20e436000a289834aca2e64de352b00c9c479cc4c883275a25c7e1e72031937727c0eca3a709838983079d352de845ce21cda26fa26f1379b128658e26031af63971fe7bd8754e2a05f7776654f438f8a1267178d5df162ea4a00328993a0b4ee775645838e326ed028434924a8be74ce580b991189c39b1c2773ce6a534e67acdda0711ed8c01790389af86c17a13b32678f38f708bd21f4b454b8e68b1a218e388eaa9ebc1d4233d668ccd0216306364270b800a411279f9defd3277dc86d860e1934500a99c10d1a662324460c1046e0a8b1889358a344cd0bd76b2ae274d6e29b24cd24b3a966acb10d42fc8bdc61232411c724b26d945252de782522929517664eeb3bc461af74ba86958b973733d6b6c6d730c431768cde147c76cf52e36b2848213064870d429c10090019c4e955a4da262554dea09463860e1a82389d1cedec0da154e56fc61a6b200e2fba474f0aa3d484cc8c3543e38b1a0888b7c18d1a35bec60140fe80213bfc468d2f64c84800881f708023f50165c84000081f1a25c5a42ee9a533d69c7d7156e373c8b04188cbf090f43874b0edb011823a6c84ecc8b1207bd8f13a6a7c0d0380e8014376d098a103c9909100903c1c93b28ca25577a5064a108207a84102040f684a4d6f8b555163062dff5af1cb972461a3207738e5e93841083fd92284300062877389da77429650d7d6c8005287d3f5a9bc951b42367c1b884609de6f8cc0bcdfb09108428753c62097b2bb994d5c41e6908aa8866acc56592b650a0e10399c54de4c73668bba246b6880c4e16473d9cbd449226a35d681e3868c3210389c4a53b0ffb98a19179037e86e5dc9e233a55c3963447449d5511604e286b3c811956bb60569c3416d4c5946530a990477c61a0d74e80310369c049917b62e49a36432d26c80ace168a6fcbc444bbd248b1a8e2996df2add9a90a1a403d1b0349cc44a9a2fbf60d725050736081a0e16cac752c916f97ecf584b6f030639c3c93c2d99b4564a52da790e128098e124e48e0a3aaec15ffa15a001a40c674d3129952e9dc9797a1bc8d04186d39f4a1d624b775a6d3903640c074d8bdfefea0688188e49ba2caa7e441a021286c3959d24c99d8d392f08738080e174adaaf94f8ad476807ce198c48fff26bddfd831821831cc40bc5040baa0205c308666b08aaa9a29a5b8dd368e71d4481c00d9c2c9fd3537ac47338068e12c2236a9b2586aacccc241acea4cc6909939c3c2d9db339b55953c04902b20881510a40a074285f3c927b65ed53c8552ac2de62da7a6c568991d8048e11894863341a31eb0e1811cbfc3033f831c2f02bc1cbf23e40520513859dc7658e5be94c3af0008144e42a3daecd5a5a5993ce1142f7576359d9f986c271c3e4f2bc72f9e702a37e17872bd64baa076ba3b261ccbe44fcacb6d092741c925eb7e089db1b9196ba70510251c4dc8a815266ac94926130049c2498987d8ff2db16f924090700a9a4c8ca7736d172e1fb223c7e3c801c8110e6ea3b26165fadd82ff1c8811a3868cff04c408c7d896ad7498e4f88447e002193a0029c25947fb57500b9d257950002182dafb23b24af211a61a204328bb9b56fa295100220490202ca66519642ccd459750d9a54e2b4613198000e130a652e3f34f69b6fc0707516729d357ff4365101f1cc44c2beb89b6a05e8af4e2acf35b4163366da62a115e182b5959556ab1768d9a4a6813756ebbc44a12d9c5a7b97354183525c67591a6940f954f7c4f915c1c7575356aa8888bc359ed499964ea16e7b43649a78b2c95e4d41607d54a61ba527c4b76d5e2ac9a4f5027ba49df8268714c79cb5e4f2df99b91599ce3efe4b7d87b268b83924d44cddc26794e38128b76dfc3cac4adf0c1a296b72c3296b9b2585967ce9a553a5d9716465e710a3256f8a6b799aba8191322ae38a6aeb74d82106219cc569c4d5a1295c48f93eec6244458716a8bd992cc264ce3b6c82a8e9535cebea464e965a9e2fc9a4e9b5cb5793536426ae00522a93889a17194ce32296bc4196bfd850eeb40041525a57adb9de264a527294d722999521e8288298e5da19b232765de126e20528ac365d2fdd3b8cd585b0d44487192f6c3d751401019852996d4a6ca1222a28884e240a1914f64c413e71211a797d29c1de944a9f1daaeb1269e56d539fabafdf6196b344029881143c70d229c3856c9ba8b7e258f327f13c756f3912629f90801871ea833d75196a3288ac12808622086d2b5747313502088441c0b0603f238cf75f103134001ca22c17020108c83a180401c8a610cc4301002310cc2200803518851b0836a3dd9adf0a04fc5096d9ee3b92ca713a4ab03cad2658f18bc9f56a00a7b9b08e52d2f40002e5dacc10a3d5b8be140edf780822a76605c439525de350163c0259c5175384c1468453863f464b6973fcb2a464fe4c8baa0184e4f10e86681975b552e0a154eae782dfcd1eaa476926329a78be5a80efc6798b71b401868448348c9565b10cd8c885ea5daf45b059df4d1457c8adfa498d05f60d427e38669a5cb664e12a3efe2342edcfa533e0c54e52e22e91c67755a01de834de37abb108ad4df4e284437f5a2bda27c8ae4816277b638ed4811a844eccfb0e5b82ba58c504c030d07d156d6de20285d92dbf26cbb0ce8a77defbefb94e959100effaa8a486a41795af3aa22159933bc7cf0699451082da286ea666b526c0168ec7b28322319a80e7b4ac8ad62bb957ce02f45139584d023299a5de7ac38bf329c1ca2a4c895cb0dae543e23f94e9623dce88234bce1a7dd938e28513ce3089a9b82a8590893e1f2ec161858b3858e7d300bbfa5970826db00dc56e93072c11d722c32fdddda96154f24c95b720cc1ad2df1d121cbc0ec2a2ec34d156af1dc3ea4f234c52fefc8d4788115310ba1f1980d5d7b2cf03b46256049167b812ac061b98845c771834eaa586b9fb597f828101c322dd9626acb68229f03cd18d1abefddab2b719fe48de722b281cbd3182dc556d7288a48d23ff9730c04a0339b47fb7749ed3f9306760f78119a2016c6927ae4a5f0b3f65cd4d555ea31a445210699c48855bea200e12d54b7d64915ba91810739af10323d348bbdc0d0a2ce11c07fcb6f604b135234cdacdaad20e916ff1abba0223e845530c1b705d11fefe0334c5df6340873eeb2de735801db7f9292438e574e206135a8248f6ad987acae279299f500b449ce537f4334e4ce3e6443fabe9bb40c62ff6ea9ec5120423244ea55800055e90f003860493bedcbcf3550a33a5d08be9a5f8f213c836c72497fac052a218d578b2fa0972097b5ec6408dc846c5d087d17565492ab031d33e359fe90d0cfe990253a8f6e11f0150d229cf1162ca739e814e2289ef09ec35814a028855fc640a18d08f166807e690e6d3ee499d73640668f9627c286bfc37ef8fb601fe8f438324360dd917530e225340beb729f1157fc83ca6891bb1e560353cf76926ed4f21c26f8812057c31d69e6b197450f32ac3e3f0c7fcab37e90860fca061e4ecb3738bd2ca7ea2e8b3a03d0e6919d716185618a7e5d53ddf4fa6bb36bf1b62db23a2918583dacce5b10ae3315c1d8ec4538f42bc1286aef02bf30e86f7db846d11b203dea22f1c6d998ccca4a50125aa89e390b6066b19929d7e8a1824fc3cf3ea6a8aba515bf83babeebd30d423b351d0c7e7e136d1104d4a789b4fbb9ea1e025686ca8d170db7c88a9e2d104fd0205fe76052dffd1adc0456b32869b2116cbcef56e5ac70b899d78b46e07661830df978949ca2ffa21b8f09517c1c4ff3ad9d9067021003527ac9eafbbce05eb2344a7ae3599b60c9424bf55a6ca86791f07f48fe51ae2a16ffc2c46e7f99b8ba90811609aae6eb91fa8d5e35f8dc65613524c95487caf3bc76c602d798ac66469ee8a6e50312e7cd7da07014053507dc481678acad02906015e1c08e4aa2e1f70c9c180a520374df7d11cf9c55a7cebdc9257f7e2222dbf779a920a416296466e238e611fa6709d7b308ff53f31072abff7d25debe6075ca8d8c7958425f189e3d65aa9eae3c0598bd5decc3a6199b0f9baed3962e49c538ceb40d75892e1c01e17c2d1eb5c6826dfc69db31285a73c4f60ae5e5ce48b0aca4c69ff514913364507a218a08506509a783dd30c730e3898162de29de54f003e565728c7dfb6a2bccdb969b50e40a63df1ea432aa4d6b27629c45bbfe7a9a1346f1dc95ef79925488d33b2802059022fa73b6c89f0c66c8d3f3621466bf30817f08e118808cd2bb416e411e81159092ca664d73b6c3cb30b2232c1e0e27160c6938c13376ffae3b176f145b3cef8ecd286ed103b3d69fcff6babdd4e950adbde081ab76cb225123ee132db502c528be0d598d1f06b29bc2a586229e1414cfd16471b58dc4982b3dd06704f039c50e31b65109996812391f6d6a19604e9a9fb5cdad960817504e89172eb185fde1fd2701cb1b65fa516b2ac089bf04e49728fa80db3bf6eea6be249c2ac7eae1eb100d57877694b58c4a60e83cdcc2ac102da7b854797e7ffa99a504ca2d0f4429896907832ac9f5b8ce33818a872a5db8033fa9ee95a5a365ba88b97f7ac924daa667c7f05f4521716b764d0b30901cad65814c71e16f47ace9601bfbe347afdf3cfff3cfca189d2d1d94f2f548b8d6859169e4eb4519a802a76df6424aa35489b85af852339cb9c0a1e3f1eef25384c0f9851ebcb70336cdae72d7fd19cd840c517745803b137a76811408db68cd60a7327738337f688f11e0b6fdd9f43ff93af534e16853aa84944c3025de49184eac7de611ed99894f9cb210990bfc506dc0b4575804bbeaf4e84bc30ca8ecdc2f6f7b561c3aedd6928d835deed93989ef522c59da791653eb1762196f5d0c170beb9b0342703213229bead81dee2af8b9738078c57746c9eee77f34188c7b42e69782b9222730976ce1b4e2b2a1672e4cbb8429a80faf696c43a092835ca6f7da068b604a5caeaef1aed66e0064cd1076cfcd6da7d70b4819570430ec895f0c3150096d2b92351eb5915881c7109ae3ca15230849c4f303ea6cf374093d3042bf175c97d5bb6108069277c71012136e8e110952bc4cb11479f6db03efd664270e5c0d0fd81aabbb92626c0fdef8754c22f23f8c81c4b4ad9f32beafbc6c44e04ee261b06cc233ccac037d317134a9a120d4b66d418cd4202761e2a1d06b69d220b4ab28c506f8b093ed18e142bf15a213f99e97b3a5de0097df185c452dd950920b41d6307966aacf3b81309b7b9889741446970560cc93b2488b4201369d0de54cdaafb2fe4209530aecb7dc694f746e0548c7245f90e4f6d26901aeda0405a3ba4c6631d45e5e30891d7b4950ff53660849b72c8382988702bb8eabb5386f22ccf0e881f6ecc813c57bb484950e8c371705e513a7ea76a83631fe5f8c5042427f18ddd1059b065c8b1cec60232680acfcb362485d0622cc184d617d030a76e20e91b7e29927d42a0b9889a7193720554aa6e95a086e2f213b07b153a366f121c005fa65813ad9103a454f0d6d098ce062a8f531caa09d3741ba6b9eef7be90b513e8b186008fbcffb3e06fe02023a27516b309942b261766dc050ba2fc117845d4597b61f9af9986d88d38dda76739df3dcf071af816c5ea3bbcee3a526baf412a8483c5ec28eaadb0eb0c5f81cbfdc01ccc48218aabca942fa0d846fa40410b621ce57393c726dbc7df1b68d3f02f2e84db0648149a98a3a7009244d128714cc11fa050efdec20d85de634e6aafe50e7da8ea13cee23b5f649f170e771b83c8214372bda04d137a2577ab86ed4703aa1e32ba412a744d4f0d1c734a1a2b2b12a71d33dfdc10d37da8dce8818fc7491522284ca9298cbe4203d2c31a07ab999c29180ca1b43d96819f7febbb11bb2d7007868efbf9156cc90149f9c53b445cae68f464097f879b0a49f958c2c8425a5678a11206bd8cf82b7f1c0760c0f913be6460efb8244d6310335197d25e06addd317e6840b5f0f9c89700a02f9be20eda04b140fc06e6c71c5d587bed34e0007764d6e58507df0cab801090c2f578fbf9e58447ce3d2513038cfa3cfaf4d022acb74339a98a7ee0227ca70382e52bf571913f544a1d65d3b99dc702864fe18bf198e6a6870c86018e83e0786ae0fc060da19687f48196fff14d7dbb5ab405379cd2e33d45bc1adad4ee7e05b0f75c624fc9bb93b8c4c085049edc743b079e6f2c8dcecc9362da0313d03c5d7b09f7ba652ed2152ec47cbaa1b1655d5e278db121a5d476ebc3a2a9bc40fd709a456a92c191f5c654249c7a871a1f9fe2cb86457c8d3d7a9bb2cd88097cddf271df9b0c130014daa081aa448a4313ce171aa08a2e8935aff93d3686c5f849d08c9d6567cf55c87eaafc3c27dbd26012f9812e70706aab5441fc98e300e1994a87e0194c6be02baaabe8ad4d459d42e8908e92bb3c9c066c56ac2e3bc8595a79f8817708c8c7f1750ec30beb4bb8a8bf3dc672de35669aa0068f12a295b6a1ad2a12fbf1a9ac2c540139c3d4e8c248566a26df81281c0a375a50bd3d3fd165fa0ef5330392ea767f012babec33a193a413d35842c93462dadb8a9186381a11c81f7010a0487182e72b16e5f6a31883434063179c3729a1b2b2de6f995f9d52d34fc1a5e12fefb0095643ef7b6afdfe08fbd0a34adbc13666b1326b0a160bd98363b68941af17068b82900a327377fad601548050ffa0eb7e49c504aeff0693b8f42c23be0d5bb42ce60e6dadab5eb6a4bc6a1f2f218c758da431a64efb7e10fecb075f2873d37c715818c58a6312bac9327ecf3ce4413382bb113ece60dbd6959b41b2e60b1e8825451a3618d31eb69819b47fa363782ea65a2d88337380cd0f319841e27cce2dd6d2b6acf1542ce71e31f03e88ed0b60c149efa65dd8abb7a08c5a39ba3e169e1217be28a5c335b0f751b3664b46394b1f0074d62598104929ebaf49296d31b10f2e157ecef2e6bf4c0fdc802c9f566d8e1aee6ae90141e6eba97d6b01022f64aad4dd12c70b056fc715653d341e8895ab25523bdd78ab8c308a921ef85b873abbb9d74d6430069bddd2fbd9a142b16a0c92350d8cfb90fe6f89c10baad417583a1dc9b89965f18da64f5411156a39996b7d8ace5b3aa653643f7429c82cef0eb67a739c7429ad236f0d8f1948f0726700204aa1f1945176ca255d61f42979585dc0708dc686da5c98682e217050a691626e486c115b348a8a83308ebaf5901e2df0e20833c94123bf46c394939f725e37b33d5083f0a0de23f8d64f113b0eee6494a565e80b8eeb1dd421b86f4088b839b3a6a73dfb4a50d92dc69a9fb4f70c5f3fd9ab008f29b27c92d05e957ece98e1fac9b7e741dd74438d4afc91c392d764e83b41d006ad3dbfbf2239b304b0004fd016b6d697e93467793aa50a183e12b6d479da9962bcdbd384a3286d63fbf0e7dedc987ae5ac0ba3bfd6416dd52112c84065cfa69af2af0e2e87764488509064ea447561560cc9941c377612758fd73a5c712b511192ab160cfdaf0e4c6b9a12f3d3001f2a49cc39070b64fa5b054c3b8fe3d5d6c4ae1d00b75337721ff29d473cde5554e236223be6ca0a74a74234b8a295002b1f4696fe5a407455507f0da285cbac2b116dad360840c26a72f90b74b578857edd7f3e5aeca175ccbc75594cb492ad411204a4e3a7402fe43dc7ec8f301b4b01ac4a07a0ee72e39a07129a1c1aac0ce099d09bae58453f1d7985b81c0f26ddb92d53b53040ee42801b1e90ab682ac48b3461ba39c1191aaa13c7d943acd8907ab0c8aa90815a88b7df700fda8d78b3ac0022aee1b1530e9e167c38ba2aa3521749a769bf26cf0600d54c21adcea764261945c3da63ccf8c23095b29e196642419ef35f5267e3859987dc545d36780d7e74e86e72b1546f91b7c7a7453ebf5c5e52ce45d01668d14d0369e16d5bfeed590ff0f7d74106b7a7e9a053f3b4007370867105d493e3c6d013b270361420a33e930a12b0809e67fa767ce8827574b87f0fe685e22c25db8ebdf2e77cbc4302e26bd2f12ce144d43db9533673f1b63fc69a010875518eba17cfab71f97ffefda2a1b735b49546604a69dd4e9433ae043cb192333875b93cdc5510f9b1a37d2db2eb866ea06fa758e40bfeaf131f089a2b939d791da382a83a898e7a1e478cf61cf8b5959237fece6028ed22522f05c01b879d2648220a3361b4d55e6dd614d8488434710d547d29a6906409ee201b81cfaa247dda0e0385a8c51506511a17849f2e3907a8c8a673cf4cbd0238dd0dce7d23e2e6d198e90e296c91ad42770e44680dbc83c6741c5251051a9a327398cb20f026012f16cf0c5c8aab836e2d802a579926d86db8a6be794ea74543cfc066ded5930dd12f54ba08ea03826508bef4fb3bf5de3c9a8ed56d02c5c41be18c5d063c8e2277fc7bfb9a9c7cac131c5f68e87ca9c07f1a8428e7530417929876d28c71d870fa132d8496321f9b64d9af23d4b45d8666e7e7079c29e701c9b3c1fe64bc012d746534f6df17bb7a4417f25301044a6d4a25652edd1448724577fbe0eb86882745a19dce7187faec033538271ed12f3cd38d299afd045576a61e7bc60d5bb4404eb1bd178a7f33f9fbeb28137f657a471e866c9c55217f87df5135dcf388012e20971a1f32a4ec3d2e4a3032a63b6ba1c311226c5821c4f731900cab8a773b3e6fd405b555b1f7380f7472f7899f574349282453e8d5a058c9abe1f7f9f4806bb69cfa162be42cc72776c94a685ffc51b54d5848c68e06bb0c59b22495e10b745520a25bb8285e0345e6f0e2447da03e1c118089a4f42927210d6187c11252a36a7228bb81a2530584e9d3dc5d14e458e2268470c16671f85b55bc7b69c48196d88466122b46f53a79aa6b87fb30822cbc5255ef46e5f33370fa791a155b4b944de26fd2253d830b1ad8d81c21a2e0cc3f42e948f369812d2426389556e6e24066d12e1c570a36cee02d5917320195c05ce4d32c5558aa328e16d761f2a973e5bcd0ed1f6444891d9d9e3cc3d5aa56e7b0f1e745864e672505aeb3eceb07ca70d49bc20990a90dc8481d58e382050b29962b59e63e4687b0cde1d4e27535c6ff9ca35cbc92e36b22f6e103515be5225e4b9503b4d8de333d5bc0745b7833d7fab2f5868965059015fa9fe53a60be6e954e6f6937de30f8a8d191c835d9db40a5ce57d8daa1924bc64e3281f3d8344a0343495b96dddde983d0ad179aa188a5c98c9d6fdf022d659fe839ebdf84c30ffeaf40f359181263b7b5f96f22f4088bb6e3ed69ec1bd01bfca2f831e053e08ec9f8bca05b52d595b013a390b79274f9394d28b3bae67576453b0ccd3e166f44d863f7cc01c5e4cf0a03143df54dd3bc85af2249b6462640765aca4d46d05614017ba9bf0e7f880198048174ab5b1fbd190484b018a4148bc2b2d601d4140742bf55e3d4b2b604c0162eeefdd3c0969d9621ae20b607ab11d71481e33df9bc6491b4e4c52b5ec2dc4733915ce77b28cbd16ef030caf6713731f91fe046001e65426cffee98124d1267a80c10bb7091c846b7687b50311f5074290e0469ddddc5da774a4543a0b0d790b9a1ec84e608aaeffe28d7ea7c24a4f487468a6d08bf8775215150eb021a9a04174d468929e1673dfbf13c75a8efa5cf442f23fa36b5cae02", + "0x3a65787472696e7369635f696e646578": "0x00000000", + "0x3c311d57d4daf52904616cf69648081e4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x3c311d57d4daf52904616cf69648081e5e0621c4869aa60c02be9adcc98a0d1d": "0x0802f40601439cb3765ef8e2a0a5770a78fdda8ea3675f0d4262ceac46fe9b8a38b25ca9a71a8570a05814e75eee9eab0757d2c98e91b24c1fa2e3eb75f7b26d4b", + "0x3f1467a096bcd71a5b6a0c8155e208104e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x45323df7cc47150b3930e2666b0aa3134e7b9012096b41c4eb3aaf947f6ea429": "0x0200", + "0x4dcb50595177a3177648411a42aca0f54e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x57f8dc2f5ab09467896f47300f0424384e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x57f8dc2f5ab09467896f47300f0424385e0621c4869aa60c02be9adcc98a0d1d": "0x0802f40601439cb3765ef8e2a0a5770a78fdda8ea3675f0d4262ceac46fe9b8a38b25ca9a71a8570a05814e75eee9eab0757d2c98e91b24c1fa2e3eb75f7b26d4b", + "0x7474449cca95dc5d0c00e71735a6d17d4e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0x79e2fe5d327165001f8232643023ed8b4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x7b3237373ffdfeb1cab4222e3b520d6b4e7b9012096b41c4eb3aaf947f6ea429": "0x0300", + "0xb8753e9383841da95f7b8871e5de32694e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0xc2261276cc9d1f8598ea4b6a74b15c2f4e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0xc2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80": "0x00000000000000000000000000000000", + "0xcd5c1f6df63bc97f4a8ce37f14a50ca74e7b9012096b41c4eb3aaf947f6ea429": "0x0200", + "0xcec5070d609dd3497f72bde07fc96ba04c014e6bf8b8c2c011e7290b85696bb3274dc1bb854565c3b25ca9a71a8570a05814e75eee9eab0757d2c98e91b24c1fa2e3eb75f7b26d4b": "0xb25ca9a71a8570a05814e75eee9eab0757d2c98e91b24c1fa2e3eb75f7b26d4b", + "0xcec5070d609dd3497f72bde07fc96ba04c014e6bf8b8c2c011e7290b85696bb3d6e4cff6e22a77dc02f40601439cb3765ef8e2a0a5770a78fdda8ea3675f0d4262ceac46fe9b8a38": "0x02f40601439cb3765ef8e2a0a5770a78fdda8ea3675f0d4262ceac46fe9b8a38", + "0xcec5070d609dd3497f72bde07fc96ba04e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa195089a0705a664955c36175726180b25ca9a71a8570a05814e75eee9eab0757d2c98e91b24c1fa2e3eb75f7b26d4b": "0xb25ca9a71a8570a05814e75eee9eab0757d2c98e91b24c1fa2e3eb75f7b26d4b", + "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950d6611bd6b7ff46a3617572618002f40601439cb3765ef8e2a0a5770a78fdda8ea3675f0d4262ceac46fe9b8a38": "0x02f40601439cb3765ef8e2a0a5770a78fdda8ea3675f0d4262ceac46fe9b8a38", + "0xcec5070d609dd3497f72bde07fc96ba088dcde934c658227ee1dfafcd6e16903": "0x0802f40601439cb3765ef8e2a0a5770a78fdda8ea3675f0d4262ceac46fe9b8a38b25ca9a71a8570a05814e75eee9eab0757d2c98e91b24c1fa2e3eb75f7b26d4b", + "0xcec5070d609dd3497f72bde07fc96ba0e0cdd062e6eaf24295ad4ccfc41d4609": "0x0802f40601439cb3765ef8e2a0a5770a78fdda8ea3675f0d4262ceac46fe9b8a3802f40601439cb3765ef8e2a0a5770a78fdda8ea3675f0d4262ceac46fe9b8a38b25ca9a71a8570a05814e75eee9eab0757d2c98e91b24c1fa2e3eb75f7b26d4bb25ca9a71a8570a05814e75eee9eab0757d2c98e91b24c1fa2e3eb75f7b26d4b", + "0xd57bce545fb382c34570e5dfbf338f5e4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0xd5e1a2fa16732ce6906189438c0a82c64e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0xe38f185207498abb5c213d0fb059b3d84e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0xe38f185207498abb5c213d0fb059b3d86323ae84c43568be0d1394d5d0d522c4": "0x03000000", + "0xf0c365c3cf59d671eb72da0e7a4113c44e7b9012096b41c4eb3aaf947f6ea429": "0x0000" + }, + "childrenDefault": {} + } + } +} \ No newline at end of file diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 4aad4dec236270cad841a54e45f2265c67a2d24f..dcaea40d2da0153e3646dd324415064064316bcc 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true description = "Logic which is common to all parachain runtimes" license = "Apache-2.0" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -13,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } log = { version = "0.4.19", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -num-traits = { version = "0.2", default-features = false} +num-traits = { version = "0.2", default-features = false } smallvec = "1.11.0" # Substrate @@ -31,12 +34,12 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } # Polkadot -rococo-runtime-constants = { path = "../../../polkadot/runtime/rococo/constants", default-features = false} -westend-runtime-constants = { path = "../../../polkadot/runtime/westend/constants", default-features = false} -polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false} -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false} -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false} +rococo-runtime-constants = { path = "../../../polkadot/runtime/rococo/constants", default-features = false } +westend-runtime-constants = { path = "../../../polkadot/runtime/westend/constants", default-features = false } +polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false } +polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } # Cumulus pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } @@ -45,14 +48,14 @@ cumulus-primitives-utility = { path = "../../primitives/utility", default-featur parachain-info = { package = "staging-parachain-info", path = "../pallets/parachain-info", default-features = false } [dev-dependencies] -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} +pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index 81d78baba5486933ad4667648cfdae95275a3f73..50cb1c7f3e8a1b942a035338de656763d56dd0d8 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -122,7 +122,7 @@ impl> ContainsPair for AssetsFr mod tests { use super::*; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, FindAuthor, ValidatorRegistration}, PalletId, }; @@ -155,6 +155,7 @@ mod tests { pub const MaxReserves: u32 = 50; } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; diff --git a/cumulus/parachains/common/src/polkadot.rs b/cumulus/parachains/common/src/polkadot.rs index 744108bce2e5f539e17dc71725a7c1dd7af52675..ca4138303421f573ae9c383218d84a0e77a7ebd6 100644 --- a/cumulus/parachains/common/src/polkadot.rs +++ b/cumulus/parachains/common/src/polkadot.rs @@ -31,6 +31,8 @@ pub mod account { /// It is used as a temporarily place to deposit a slashed imbalance /// before the teleport to the Treasury. pub const AMBASSADOR_REFERENDA_PALLET_ID: PalletId = PalletId(*b"py/amref"); + /// Fellowship treasury pallet ID + pub const FELLOWSHIP_TREASURY_PALLET_ID: PalletId = PalletId(*b"py/feltr"); } /// Consensus-related. diff --git a/cumulus/parachains/common/src/westend.rs b/cumulus/parachains/common/src/westend.rs index 9d3e0bd1a0e2a51d2dde47b5be329be0bc8b5d38..2bd4d18a15eba8fc04f0505439d55cb56062f67a 100644 --- a/cumulus/parachains/common/src/westend.rs +++ b/cumulus/parachains/common/src/westend.rs @@ -13,6 +13,26 @@ // See the License for the specific language governing permissions and // limitations under the License. +/// Universally recognized accounts. +pub mod account { + use frame_support::PalletId; + + /// Westend treasury pallet id, used to convert into AccountId - in Westend as a destination for + /// slashed funds. + pub const WESTEND_TREASURY_PALLET_ID: PalletId = PalletId(*b"py/trsry"); + /// Alliance pallet ID - used as a temporary place to deposit a slashed imbalance before the + /// teleport to the Treasury. + pub const ALLIANCE_PALLET_ID: PalletId = PalletId(*b"py/allia"); + /// Referenda pallet ID - used as a temporary place to deposit a slashed imbalance before the + /// teleport to the Treasury. + pub const REFERENDA_PALLET_ID: PalletId = PalletId(*b"py/refer"); + /// Ambassador Referenda pallet ID - used as a temporary place to deposit a slashed imbalance + /// before the teleport to the Treasury. + pub const AMBASSADOR_REFERENDA_PALLET_ID: PalletId = PalletId(*b"py/amref"); + /// Fellowship treasury pallet ID. + pub const FELLOWSHIP_TREASURY_PALLET_ID: PalletId = PalletId(*b"py/feltr"); +} + pub mod currency { use polkadot_core_primitives::Balance; use westend_runtime_constants as constants; @@ -21,6 +41,7 @@ pub mod currency { pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; pub const UNITS: Balance = constants::currency::UNITS; + pub const DOLLARS: Balance = UNITS; // 1_000_000_000_000 pub const CENTS: Balance = constants::currency::CENTS; pub const MILLICENTS: Balance = constants::currency::MILLICENTS; pub const GRAND: Balance = constants::currency::GRAND; @@ -44,7 +65,7 @@ pub mod fee { use smallvec::smallvec; pub use sp_runtime::Perbill; - /// The block saturation level. Fees will be updates based on this value. + /// The block saturation level. Fees will be updated based on this value. pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25); /// Handles converting a weight scalar to a fee value, based on the scale and granularity of the @@ -110,11 +131,11 @@ pub mod fee { /// Consensus-related. pub mod consensus { - /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included - /// into the relay chain. + /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included into the + /// relay chain. pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; - /// How many parachain blocks are processed by the relay chain per parent. Limits the - /// number of blocks authored per slot. + /// How many parachain blocks are processed by the relay chain per parent. Limits the number of + /// blocks authored per slot. pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; /// Relay chain slot duration, in milliseconds. pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; diff --git a/cumulus/parachains/common/src/xcm_config.rs b/cumulus/parachains/common/src/xcm_config.rs index 4b0215d672b2e3f8b5602976524edfd01940725b..7a63e720b0797f9cbd05018dbdac6c36926cfc14 100644 --- a/cumulus/parachains/common/src/xcm_config.rs +++ b/cumulus/parachains/common/src/xcm_config.rs @@ -100,6 +100,25 @@ impl, Runtime: parachain_info::C } } +/// Contains all sibling system parachains, including the one where this matcher is used. +/// +/// This structure can only be used at a parachain level. In the Relay Chain, please use +/// the `xcm_builder::IsChildSystemParachain` matcher. +pub struct AllSiblingSystemParachains; + +impl Contains for AllSiblingSystemParachains { + fn contains(l: &MultiLocation) -> bool { + log::trace!(target: "xcm::contains", "AllSiblingSystemParachains location: {:?}", l); + match *l { + // System parachain + MultiLocation { parents: 1, interior: X1(Parachain(id)) } => + ParaId::from(id).is_system(), + // Everything else + _ => false, + } + } +} + /// Accepts an asset if it is a concrete asset from the system (Relay Chain or system parachain). pub struct ConcreteAssetFromSystem(PhantomData); impl> ContainsPair @@ -120,14 +139,31 @@ impl> ContainsPair } } +/// Filter to check if a given location is the parent Relay Chain or a sibling parachain. +/// +/// This type should only be used within the context of a parachain, since it does not verify that +/// the parent is indeed a Relay Chain. +pub struct ParentRelayOrSiblingParachains; +impl Contains for ParentRelayOrSiblingParachains { + fn contains(location: &MultiLocation) -> bool { + matches!( + location, + MultiLocation { parents: 1, interior: Here } | + MultiLocation { parents: 1, interior: X1(Parachain(_)) } + ) + } +} + #[cfg(test)] mod tests { - use frame_support::parameter_types; + use frame_support::{parameter_types, traits::Contains}; use super::{ - ConcreteAssetFromSystem, ContainsPair, GeneralIndex, Here, MultiAsset, MultiLocation, - PalletInstance, Parachain, Parent, + AllSiblingSystemParachains, ConcreteAssetFromSystem, ContainsPair, GeneralIndex, Here, + MultiAsset, MultiLocation, PalletInstance, Parachain, Parent, }; + use polkadot_primitives::LOWEST_PUBLIC_ID; + use xcm::latest::prelude::*; parameter_types! { pub const RelayLocation: MultiLocation = MultiLocation::parent(); @@ -180,4 +216,19 @@ mod tests { ); } } + + #[test] + fn all_sibling_system_parachains_works() { + // system parachain + assert!(AllSiblingSystemParachains::contains(&MultiLocation::new(1, X1(Parachain(1))))); + // non-system parachain + assert!(!AllSiblingSystemParachains::contains(&MultiLocation::new( + 1, + X1(Parachain(LOWEST_PUBLIC_ID.into())) + ))); + // when used at relay chain + assert!(!AllSiblingSystemParachains::contains(&MultiLocation::new(0, X1(Parachain(1))))); + // when used with non-parachain + assert!(!AllSiblingSystemParachains::contains(&MultiLocation::new(1, X1(OnlyChild)))); + } } diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/0_init.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/0_init.yml deleted file mode 100644 index fdc1aa258d42ac681c10930b8e6b32858462944d..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/0_init.yml +++ /dev/null @@ -1,145 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9900 - assets_parachain: &assets_parachain - wsPort: 9910 - paraId: &ap_id 1000 - penpal_parachain: &penpal_parachain - wsPort: 9920 - paraId: &pp_id 2000 - variables: - common: - xcm_version: &xcm_version 3 - require_weight_at_most: &weight_at_most {refTime: 1000000000, proofSize: 200000} - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - assets_parachain_destination: &ap_dest { v3: { 0, interior: { x1: { parachain: *ap_id }}}} - penpal_parachain: - signer: &pp_signer //Alice - decodedCalls: - ap_force_xcm_version: - chain: *assets_parachain - pallet: polkadotXcm - call: forceXcmVersion - args: [ - { # location - parents: 1, - interior: Here - }, - *xcm_version # xcmVersion - ] - -tests: - - name: Initialize Chains - its: - - name: XCM supported versions between chains - actions: - - extrinsics: # Relay Chain sets supported version for Asset Parachain - - chain: *relay_chain - sudo: true - signer: *rc_signer - pallet: xcmPallet - call: forceXcmVersion - args: [ - { # location - parents: 0, - interior: { - X1: { - Parachain: *ap_id - } - } - }, - *xcm_version # xcmVersion - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.SupportedVersionChanged - result: { location: { parents: 0, interior: { X1: { Parachain: *ap_id }}}, version: *xcm_version } - - extrinsics: # Relay Chain sets supported version for Penpal Parachain - - chain: *relay_chain - sudo: true - signer: *rc_signer - pallet: xcmPallet - call: forceXcmVersion - args: [ - { # location - parents: 0, - interior: { - X1: { - Parachain: *pp_id - } - } - }, - *xcm_version # xcmVersion - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.SupportedVersionChanged - result: { location: { parents: 0, interior: { X1: { Parachain: *pp_id }}}, version: *xcm_version } - - extrinsics: # Asset Parachain sets supported version for Relay Chain through it - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 2200000000, - proofSize: 200000 - } - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: *weight_at_most, - call: $ap_force_xcm_version - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { - outcome: { Complete: { refTime: '1,019,210,000', proofSize: '200,000' }} - } - - name: polkadotXcm.SupportedVersionChanged - chain: *assets_parachain - result: { location: { parents: 1, interior: Here }, version: *xcm_version } - - extrinsics: # Penpal Parachain sets supported version for Relay Chain - - chain: *penpal_parachain - signer: *pp_signer - sudo: true - pallet: polkadotXcm - call: forceXcmVersion - args: [ - { # location - parents: 1, - interior: Here - }, - *xcm_version # xcmVersion - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: polkadotXcm.SupportedVersionChanged - result: { location: { parents: 1, interior: Here }, version: *xcm_version } diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/1_dmp.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/1_dmp.yml deleted file mode 100644 index 0e207e632a023b83e5e5fac5b374eead16b74e61..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/1_dmp.yml +++ /dev/null @@ -1,263 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9900 - assets_parachain: &assets_parachain - wsPort: 9910 - paraId: &ap_id 1000 - variables: - common: - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - wallet: &rc_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - assets_parachain_destination: &ap_dest { v3: { parents: 0, interior: { x1: { parachain: *ap_id }}}} - assets_parachain_account: &ap_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - assets_parachain_beneficiary: &ap_benf { v3: { parents: 0, interior: { x1: { accountId32: { id: *ap_acc }}}}} - ksm: &rc_ksm { concrete: { parents: 0, interior: { here: true }}} - amount: &amount 1000000000000 - ksm_fungible: &rc_ksm_fungible { id: *rc_ksm, fun: { fungible: *amount }} - require_weight_at_most: &rc_weight_at_most { refTime: 1000000000, proofSize: 200000 } - assets_parachain_account: - wallet: &ap_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - asset_id: &asset_id 1 - asset_min_balance: &asset_ed 1000 - decodedCalls: - force_create_asset: - chain: *assets_parachain - pallet: assets - call: forceCreate - args: [ - *asset_id, - { Id: *ap_wallet }, # owner - true, # isSufficient - *asset_ed # minBalance - ] - -tests: - - name: DMP - its: [] - describes: - - name: xcmPallet.limitedTeleportAssets - before: &before_get_balances - - name: Get the balances of the Relay Chain's sender & Assets Parachain's receiver - actions: - - queries: - balance_rc_sender_before: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - balance_ap_receiver_before: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - its: - - name: Should teleport native assets from the Relay Chain to the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: limitedTeleportAssets - args: [ - *ap_dest, # destination - *ap_benf, # beneficiary - { v3: [ *rc_ksm_fungible ] }, - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '764,772,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '166,944,000', proofSize: 0 }}} - - queries: - balance_rc_sender_after: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - balance_ap_receiver_after: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - - - name: Should reduce the balance of the sender - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_rc_sender_before, - after: $balance_rc_sender_after, - }, - amount: *amount - } - ] - - - name: Should increase the balance of the receiver - actions: - - asserts: - balanceIncreased: - args: [ - { - balances: { - before: $balance_ap_receiver_before, - after: $balance_ap_receiver_after, - } - } - ] - - - name: xcmPallet.send | Superuser - Transact(assets.forceCreate) - its: - - name: Relay Chain Superuser account SHOULD be able to execute a XCM Transact instruction in the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: *rc_weight_at_most, - call: $force_create_asset - } - } - ] - } - ] - events: - - name: xcmPallet.Sent - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '1,014,103,000', proofSize: '200,000' }}} - - queries: - forced_created_asset: - chain: *assets_parachain - pallet: assets - call: asset - args: [ *asset_id ] - - asserts: - isSome: - args: [ $forced_created_asset ] - - - name: xcmPallet.send | Native - Transact(assets.forceCreate) - its: - - name: Relay Chain Native account SHOULD NOT be able to execute a XCM Transact instruction in the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: Native, - requireWeightAtMost: *rc_weight_at_most, - call: $force_create_asset - } - } - ] - } - ] - events: - - name: system.ExtrinsicFailed - result: { dispatchError: BadOrigin } - - - name: xcmPallet.limitedReserveTransferAssets - before: *before_get_balances - its: - - name: SHOULD NOT reserved transfer native assets from the Relay Chain to the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: limitedReserveTransferAssets - args: [ - *ap_dest, # destination - *ap_benf, # beneficiary - { v3: [ *rc_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '750,645,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { - outcome: { - Incomplete: [ - { refTime: '1,000,000,000', proofSize: 0 }, - UntrustedReserveLocation - ] - } - } - - queries: - balance_rc_sender_after: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - balance_ap_receiver_after: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - - - name: Should reduce the balance of the sender - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_rc_sender_before, - after: $balance_rc_sender_after, - }, - amount: *amount - } - ] - - - name: Should keep the balance of the receiver - actions: - - asserts: - equal: - args: - [ - $balance_ap_receiver_before, - $balance_ap_receiver_after - ] diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/2_ump.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/2_ump.yml deleted file mode 100644 index 2a0bb88090e928dd51db3f92dadab2cdd4eb93be..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/2_ump.yml +++ /dev/null @@ -1,191 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9900 - assets_parachain: &assets_parachain - wsPort: 9910 - paraId: &ap_id 1000 - variables: - common: - amount: &amount 1000000000000 - require_weight_at_most: &weight_at_most {refTime: 1000000000, proofSize: 0} - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - wallet: &rc_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F #Alice - assets_parachain_destination: &ap_dest { v3: { 0, interior: { x1: { parachain: *ap_id }}}} - assets_parachain_account: &ap_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - assets_parachain_beneficiary: &ap_benf {v3: { parents: 0, interior: { x1: { accountId32: { id: *ap_acc }}}}} - ksm: &rc_ksm { concrete: { 0, interior: { here: true }}} - ksm_fungible: &rc_ksm_fungible { id: *rc_ksm, fun: { fungible: *amount }} - assets_parachain_account: - signer: &ap_signer //Alice - wallet: &ap_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - relay_chain_destination: &rc_dest { v3: { parents: 1, interior: { here: true }}} - assets_parachain_account: &rc_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' #Alice - relay_chain_beneficiary: &rc_benf {v3: { parents: 0, interior: { x1: { accountId32: { id: *rc_acc }}}}} - ksm: &ap_ksm { concrete: { parents: 1, interior: { here: true }}} - ksm_fungible: &ap_ksm_fungible { id: *ap_ksm, fun: { fungible: *amount }} - decodedCalls: - system_remark: - chain: *relay_chain - pallet: system - call: remark - args: [ 0x0011 ] - -tests: - - name: UMP - describes: - - name: polkadotXcm.limitedTeleportAssets - before: - - name: DEPENDENCY | Do a 'limitedTeleportAssets' from the Relay Chain to the Assets Parachain to have funds to send them back - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: limitedTeleportAssets - args: [ - *ap_dest, # destination - *ap_benf, # beneficiary - { v3: [ *rc_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '761,173,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '166,944,000', proofSize: 0 }}} - - - name: Get the balances of the Assets Parachain's sender & Relay Chain's receiver - actions: - - queries: - balance_ap_sender_before: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - balance_rc_receiver_before: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - its: - - name: Should teleport native assets back from Assets Parachain to the Relay Chain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedTeleportAssets - args: [ - *rc_dest, # destination - *rc_benf, # beneficiary - { v3: [ *ap_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '539,494,000', proofSize: '7,133' }}} - - name: messageQueue.Processed - chain: *relay_chain - threshold: *weight_threshold - result: { origin: { Ump: { Para: '1,000' } }, weightUsed: { refTime: '298,716,000', proofSize: '0' }, success: true } - - queries: - balance_ap_sender_after: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - balance_rc_receiver_after: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - - - name: Should reduce the balance of the sender - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_ap_sender_before, - after: $balance_ap_sender_after, - }, - amount: *amount - } - ] - - - name: Should increase the balance of the receiver - actions: - - asserts: - balanceIncreased: - args: [ - { - balances: { - before: $balance_rc_receiver_before, - after: $balance_rc_receiver_after, - } - } - ] - - - name: polkadotXcm.send | Native - Transact(system.remark) - its: - - name: Assets Parachain SHOULD NOT be able to dispatch 'send' call - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: send - args: [ - *rc_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: Native, - requireWeightAtMost: *weight_at_most, - call: $system_remark - } - } - ] - } - ] - events: - - name: system.ExtrinsicFailed - result: { dispatchError: BadOrigin } - - - name: polkadotXcm.limitedReserveTransferAssets - its: - - name: Should NOT be able to reserve transfer native assets from the Assets Parachain to the Relay Chain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedReserveTransferAssets - args: [ - *rc_dest, # destination - *rc_benf, # beneficiary - { v3: [ *ap_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: polkadotXcm.Attempted - result: { outcome: { Error: Barrier }} diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/3_force_hrmp-open-channels.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/3_force_hrmp-open-channels.yml deleted file mode 100644 index dfdae028f00d0d62e8500335d4c11f256a01462d..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/3_force_hrmp-open-channels.yml +++ /dev/null @@ -1,122 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9900 - assets_parachain: - wsPort: 9910 - paraId: &ap_id 1000 - penpal_parachain: - wsPort: 9920 - paraId: &pp_id 2000 - variables: - common: - amount: &amount 2000000000000 - hrmp_channels: - proposed_max_capacity: &max_capacity 8 - proposed_max_message_size: &max_message_size 8192 - channel: &channel { - maxCapacity: *max_capacity, - maxTotalSize: *max_message_size, - maxMessageSize: *max_message_size, - msgCount: 0, - totalSize: 0, - mqcHead: null, - senderDeposit: 0, - recipientDeposit: 0 - } - chains: - relay_chain: - signer: &rc_signer //Alice - assets_parachain_account: - sovereign_account: &ap_sovereign F7fq1jSNVTPfJmaHaXCMtatT1EZefCUsa7rRiQVNR5efcah - penpal_parachain: - sovereign_account: &pp_sovereign F7fq1jMZkfuCuoMTyiEVAP2DMpMt18WopgBqTJznLihLNbZ - -tests: - - name: HRMP - beforeEach: - - name: DEPENDENCY | Penpal Parachain Sovereign account in the Relay Chain needs to be funded - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: balances - call: transfer - args: [ - *pp_sovereign, # destination - *amount, # value - ] - events: - - name: balances.Transfer - - - name: DEPENDENCY | Assets Parachain Sovereign account in the Relay Chain needs to be funded - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: balances - call: transfer - args: [ - *ap_sovereign, # destination - *amount, # value - ] - events: - - name: balances.Transfer - describes: - - name: hrmp.forceOpenHrmpChannel (Penpal Parachain → Assets Parachain) - its: - - name: Open Penpal Parachain to Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: hrmp - call: forceOpenHrmpChannel - args: [ - *pp_id, - *ap_id, - *max_capacity, - *max_message_size - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: hrmp.HrmpChannelForceOpened - - - name: hrmp.forceOpenHrmpChannel (Assets Parachain → PenPal Parachain) - its: - - name: Open Assets Parachain to PenPal Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: hrmp - call: forceOpenHrmpChannel - args: [ - *ap_id, - *pp_id, - *max_capacity, - *max_message_size - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: hrmp.HrmpChannelForceOpened - - - name: hrmp.forceProcessHrmpOpen (make sure all the channels are open) - its: - - name: Make sure all the pending channels are open - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: hrmp - call: forceProcessHrmpOpen - args: [ 2 ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/4_hrmp.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/4_hrmp.yml deleted file mode 100644 index 02e53da75580a95c35eedb7f311eb68758b195af..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/4_hrmp.yml +++ /dev/null @@ -1,388 +0,0 @@ ---- -# Note: This tests depends on the 3_hrmp-open-channels.yml for opening channels, otherwise teleports aren't going to -# work. -settings: - chains: - relay_chain: &relay_chain - wsPort: 9900 - assets_parachain: &assets_parachain - wsPort: 9910 - paraId: &ap_id 1000 - penpal_parachain: &penpal_parachain - wsPort: 9920 - paraId: &pp_id 2000 - variables: - common: - mint_amount: &mint_amount 1000000000000 - amount: &amount 100000000000 - require_weight_at_most: &weight_at_most {refTime: 1200000000, proofSize: 20000} - amount_to_send: &amount_to_send 500000000000 - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - assets_parachain_destination: &ap_dest { v3: { 0, interior: { x1: { parachain: *ap_id }}}} - assets_parachain_dest_routed: &ap_dest_routed { v3: { parents: 1, interior: { x1: { parachain: *ap_id } }}} - assets_parachain_account: - signer: &ap_signer //Alice - wallet: &ap_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - asset_id: &asset_id 2 - assets_pallet_id: &assets_pallet_id 50 - asset_min_balance: &asset_ed 1000 - penpal_parachain_destination: &pp_dest { v3: { parents: 1, interior: { x1: { parachain: *pp_id } }}} - ksm: &ap_ksm { concrete: { parents: 1, interior: { here: true }}} - ksm_fungible: &ap_ksm_fungible { id: *ap_ksm, fun: { fungible: *amount }} - suff_asset: &suff_asset { concrete: { parents: 0, interior: { x2: [ { PalletInstance: *assets_pallet_id }, { GeneralIndex: *asset_id } ] }}} - suff_asset_fail: &suff_asset_fail { concrete: { parents: 0, interior: { x2: [ { PalletInstance: *assets_pallet_id }, { GeneralIndex: 3 } ] }}} - suff_asset_fungible_fail: &ap_suff_asset_fungible_fail { id: *suff_asset_fail, fun: { fungible: 200000000000 }} - penpal_parachain: - sovereign_account: &pp_sovereign_sibl FBeL7EAeUroLWXW1yfKboiqTqVfbRBcsUKd6QqVf4kGBySS - signer: &pp_signer //Alice - penpal_parachain_account: &pp_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - decodedCalls: - force_create_asset: - chain: *assets_parachain - pallet: assets - call: forceCreate - args: [ - *asset_id, - { Id: *ap_wallet }, # owner - true, # isSufficient - *asset_ed # minBalance - ] - force_create_asset2: - chain: *assets_parachain - pallet: assets - call: forceCreate - args: [ - *asset_id, - { Id: *ap_wallet }, # owner - true, # isSufficient - *asset_ed # minBalance - ] - -tests: - - name: HRMP - describes: - - name: polkadotXcm.limitedReserveTransferAssets (Asset) | Assets Parachain -> Penpal Parachain - before: - - name: DEPENDENCY | A sufficient Asset should exist in the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - SetTopic: '0x0123456789012345678901234567891201234567890123456789012345678912' - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: *weight_at_most, - call: $force_create_asset - } - } - ] - } - ] - events: - - name: xcmPallet.Sent - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '1,216,703,000', proofSize: '20,000' }}} - - queries: - forced_created_asset: - chain: *assets_parachain - pallet: assets - call: asset - args: [ *asset_id ] - - asserts: - isSome: - args: [ $forced_created_asset ] - - - name: DEPENDENCY | Some Assets should be minted for the sender - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: assets - call: mint - args: [ - *asset_id, - *ap_wallet, - *mint_amount - ] - events: - - name: assets.Issued - result: { assetId: *asset_id, owner: *ap_wallet, amount: *mint_amount } - - its: - - name: Assets Parachain should be able to reserve transfer an Asset to Penpal Parachain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedReserveTransferAssets - args: [ - *pp_dest, # destination - { # beneficiary - V3: { - parents: 0, - interior: { - X1: { - AccountId32: { - id: *pp_acc - } - } - } - } - }, - { # assets - V3: [ - { - id: { - Concrete: { - parents: 0, - interior: { - X2: [ - { - PalletInstance: *assets_pallet_id - }, - { - GeneralIndex: *asset_id - } - ] - } - } - }, - fun: { - Fungible: *amount_to_send - } - } - ] - }, - 0, # feeAssetItem - Unlimited # weightLimit - ] - events: - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '679,150,000', proofSize: '6,196' }}} - - name: assets.Transferred - result: { - assetId: *asset_id, - from: *ap_wallet, - to: *pp_sovereign_sibl, - amount: *amount_to_send - } - - - name: polkadotXcm.limitedReserveTransferAssets (KSM) | Assets Parachain -> Penpal Parachain - its: - - name: Assets Parachain should be able to reserve transfer KSM to Penpal Parachain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedReserveTransferAssets - args: [ - *pp_dest, # destination - { # beneficiary - V3: { - parents: 0, - interior: { - X1: { - AccountId32: { - id: *pp_acc - } - } - } - } - }, - { # assets - V3: [ - *ap_ksm_fungible - ] - }, - 0, # feeAssetItem - Unlimited # weightLimit - ] - events: - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '679,150,000', proofSize: '6,196' }}} - - name: balances.Endowed - result: { - account: *pp_sovereign_sibl, - freeBalance: *amount - } - - - name: polkadotXcm.send( assets.forceCreateAsset ) | Penpal Parachain -> Assets Parachain - before: - - name: Get the asset balance of the Penpal Parachain Sovereign account in Assets Parachain - actions: - - queries: - assets_balance_pp_sovereign_before: - chain: *assets_parachain - pallet: assets - call: account - args: [ - *asset_id, - *pp_sovereign_sibl - ] - its: - - name: Penpal Parachain should be able to send XCM message paying its fee with sufficient asset in Assets Parachain - actions: - - extrinsics: - - chain: *penpal_parachain - signer: *pp_signer - sudo: true - pallet: polkadotXcm - call: send - args: [ - *ap_dest_routed, # destination - { - v3: [ #message - { - WithdrawAsset: [ - { - id: { - concrete: { - parents: 0, - interior: { - X2: [ - { PalletInstance: *assets_pallet_id }, - { GeneralIndex: *asset_id } - ] - } - } - }, - fun: { fungible: *amount }} - ] - }, - { - BuyExecution: { - fees: { id: *suff_asset, fun: { fungible: *amount }}, - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: SovereignAccount, - requireWeightAtMost: *weight_at_most, - call: $force_create_asset2 - } - }, - { - RefundSurplus - }, - { - DepositAsset: { - assets: { Wild: All }, - beneficiary: { - parents: 0, - interior: { - X1: { - AccountId32: { - network: , # None - id: *pp_acc - } - } - }} - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: polkadotXcm.Sent - - name: assets.Burned - chain: *assets_parachain - result: { assetId: *asset_id, owner: *pp_sovereign_sibl } - - name: assets.Issued - chain: *assets_parachain - result: { assetId: *asset_id } - - queries: - assets_balance_pp_sovereign_after: - chain: *assets_parachain - pallet: assets - call: account - args: [ - *asset_id, - *pp_sovereign_sibl - ] - forced_created_asset2: - chain: *assets_parachain - pallet: assets - call: asset - args: [ 3 ] - - asserts: - isSome: - args: [ $forced_created_asset2 ] - - name: Should reduce the assets balance of the Penpal Parachain's SovereignAccount in the Assets Parachain - actions: - - asserts: - assetsDecreased: - args: [ - { - balances: { - before: $assets_balance_pp_sovereign_before, - after: $assets_balance_pp_sovereign_after, - }, - } - ] - - - name: Penpal Parachain SHOULD NOT be able to send XCM message paying its fee with sufficient assets if not enough balance - actions: - - extrinsics: - - chain: *penpal_parachain - signer: *pp_signer - sudo: true - pallet: polkadotXcm - call: send - args: [ - *ap_dest_routed, # destination - { - v3: [ #message - { - WithdrawAsset: [*ap_suff_asset_fungible_fail] - }, - { - BuyExecution: { - fees: *ap_suff_asset_fungible_fail, - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: SovereignAccount, - requireWeightAtMost: *weight_at_most, - call: $force_create_asset2 - } - } - ] - } - ] - events: - - name: xcmpQueue.Fail - chain: *assets_parachain - threshold: *weight_threshold - result: { - error: FailedToTransactAsset, - weight: { refTime: '152,426,000', proofSize: '3,593' } - } diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/config.toml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/config.toml deleted file mode 100644 index 1ec06b3fa10435e5b865e8da0683be24214c426c..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/config.toml +++ /dev/null @@ -1,71 +0,0 @@ -[relaychain] -default_command = "./bin/polkadot" -default_args = [ "-lparachain=debug", "-lxcm=trace" ] -chain = "kusama-local" - - [[relaychain.nodes]] - name = "alice" - ws_port = 9900 - validator = true - args = ["--state-cache-size=0"] - - [[relaychain.nodes]] - name = "bob" - ws_port = 9901 - validator = true - - [[relaychain.nodes]] - name = "charlie" - ws_port = 9902 - validator = true - - [[relaychain.nodes]] - name = "dave" - ws_port = 9903 - validator = true - -[[parachains]] -id = 1000 -chain = "asset-hub-kusama-local" -cumulus_based = true - - [[parachains.collators]] - name = "collator1" - ws_port = 9910 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace", "--state-cache-size=0" ] - - [[parachains.collators]] - name = "collator2" - ws_port = 9911 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace" ] - -[[parachains]] -id = 2000 -chain = "penpal-kusama-2000" -cumulus_based = true - - [[parachains.collators]] - name = "collator3" - ws_port = 9920 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace", "--state-cache-size=0" ] - - [[parachains.collators]] - name = "collator4" - ws_port = 9921 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace" ] - -# [[hrmpChannels]] -# sender = 1000 -# recipient = 2000 -# maxCapacity = 8 -# maxMessageSize = 8192 - -# [[hrmpChannels]] -# sender = 2000 -# recipient = 1000 -# maxCapacity = 8 -# maxMessageSize = 8192 diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/0_init.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/0_init.yml deleted file mode 100644 index a6d3fb3ec83439bc9f9bc5e9ae42293573dcf5e3..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/0_init.yml +++ /dev/null @@ -1,145 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9800 - assets_parachain: &assets_parachain - wsPort: 9810 - paraId: &ap_id 1000 - penpal_parachain: &penpal_parachain - wsPort: 9820 - paraId: &pp_id 2000 - variables: - common: - xcm_version: &xcm_version '3' - require_weight_at_most: &weight_at_most {refTime: 1000000000, proofSize: 200000} - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - assets_parachain_destination: &ap_dest { v3: { 0, interior: { x1: { parachain: *ap_id }}}} - penpal_parachain: - signer: &pp_signer //Alice - decodedCalls: - ap_force_xcm_version: - chain: *assets_parachain - pallet: polkadotXcm - call: forceXcmVersion - args: [ - { # location - parents: 1, - interior: Here - }, - *xcm_version # xcmVersion - ] - -tests: - - name: Initialize Chains - its: - - name: XCM supported versions between chains - actions: - - extrinsics: # Relay Chain sets supported version for Asset Parachain - - chain: *relay_chain - sudo: true - signer: *rc_signer - pallet: xcmPallet - call: forceXcmVersion - args: [ - { # location - parents: 0, - interior: { - X1: { - Parachain: *ap_id - } - } - }, - *xcm_version # xcmVersion - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.SupportedVersionChanged - result: { location: { parents: 0, interior: { X1: { Parachain: *ap_id }}}, version: *xcm_version } - - extrinsics: # Relay Chain sets supported version for Penpal Parachain - - chain: *relay_chain - sudo: true - signer: *rc_signer - pallet: xcmPallet - call: forceXcmVersion - args: [ - { # location - parents: 0, - interior: { - X1: { - Parachain: *pp_id - } - } - }, - *xcm_version # xcmVersion - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.SupportedVersionChanged - result: { location: { parents: 0, interior: { X1: { Parachain: *pp_id }}}, version: *xcm_version } - - extrinsics: # Asset Parachain sets supported version for Relay Chain through it - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 3200000000, - proofSize: 200000 - } - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: *weight_at_most, - call: $ap_force_xcm_version - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { - outcome: { Complete: { refTime: '1,019,210,000', proofSize: '200,000' }} - } - - name: polkadotXcm.SupportedVersionChanged - chain: *assets_parachain - result: { location: { parents: 1, interior: Here }, version: *xcm_version } - - extrinsics: # Penpal Parachain sets supported version for Relay Chain - - chain: *penpal_parachain - signer: *pp_signer - sudo: true - pallet: polkadotXcm - call: forceXcmVersion - args: [ - { # location - parents: 1, - interior: Here - }, - *xcm_version # xcmVersion - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: polkadotXcm.SupportedVersionChanged - result: { location: { parents: 1, interior: Here}, version: *xcm_version } diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/1_dmp.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/1_dmp.yml deleted file mode 100644 index 36b296f3eb1f3f8ed93b9cdc4354fc89b692b50c..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/1_dmp.yml +++ /dev/null @@ -1,263 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9800 - assets_parachain: &assets_parachain - wsPort: 9810 - paraId: &ap_id 1000 - variables: - common: - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - wallet: &rc_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - assets_parachain_destination: &ap_dest { v3: { parents: 0, interior: { x1: { parachain: *ap_id }}}} - assets_parachain_account: &ap_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - assets_parachain_beneficiary: &ap_benf {v3: { parents: 0, interior: { x1: { accountId32: { id: *ap_acc }}}}} - ksm: &rc_ksm { concrete: { parents: 0, interior: { here: true }}} - amount: &amount 1000000000000 - ksm_fungible: &rc_ksm_fungible { id: *rc_ksm, fun: { fungible: *amount }} - require_weight_at_most: &rc_weight_at_most {refTime: 1000000000, proofSize: 200000} - assets_parachain_account: - wallet: &ap_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - asset_id: &asset_id 1 - asset_min_balance: &asset_ed 1000 - decodedCalls: - force_create_asset: - chain: *assets_parachain - pallet: assets - call: forceCreate - args: [ - *asset_id, - { Id: *ap_wallet }, # owner - true, # isSufficient - *asset_ed # minBalance - ] - -tests: - - name: DMP - its: [] - describes: - - name: xcmPallet.limitedTeleportAssets - before: &before_get_balances - - name: Get the balances of the Relay Chain's sender & Assets Parachain's receiver - actions: - - queries: - balance_rc_sender_before: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - balance_ap_receiver_before: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - its: - - name: Should teleport native assets from the Relay Chain to the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: limitedTeleportAssets - args: [ - *ap_dest, # destination - *ap_benf, # beneficiary - { v3: [ *rc_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '166,944,000', proofSize: 0 }}} - - queries: - balance_rc_sender_after: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - balance_ap_receiver_after: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - - - name: Should reduce the balance of the sender - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_rc_sender_before, - after: $balance_rc_sender_after, - }, - amount: *amount - } - ] - - - name: Should increase the balance of the receiver - actions: - - asserts: - balanceIncreased: - args: [ - { - balances: { - before: $balance_ap_receiver_before, - after: $balance_ap_receiver_after, - } - } - ] - - - name: xcmPallet.send | Superuser - Transact(assets.forceCreate) - its: - - name: Relay Chain Superuser account SHOULD be able to execute a XCM Transact instruction in the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originType: Superuser, - requireWeightAtMost: *rc_weight_at_most, - call: $force_create_asset - } - } - ] - } - ] - events: - - name: xcmPallet.Sent - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '1,014,103,000', proofSize: '200,000' }}} - - queries: - forced_created_asset: - chain: *assets_parachain - pallet: assets - call: asset - args: [ *asset_id ] - - asserts: - isSome: - args: [ $forced_created_asset ] - - - name: xcmPallet.send | Native - Transact(assets.forceCreate) - its: - - name: Relay Chain Native account SHOULD NOT be able to execute a XCM Transact instruction in the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originType: Native, - requireWeightAtMost: *rc_weight_at_most, - call: $force_create_asset - } - } - ] - } - ] - events: - - name: system.ExtrinsicFailed - result: { dispatchError: BadOrigin } - - - name: xcmPallet.limitedReserveTransferAssets - before: *before_get_balances - its: - - name: SHOULD NOT reserved transfer native assets from the Relay Chain to the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: limitedReserveTransferAssets - args: [ - *ap_dest, # destination - *ap_benf, # beneficiary - { v3: [ *rc_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '2,000,000,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { - outcome: { - Incomplete: [ - { refTime: '1,000,000,000', proofSize: 0 }, - UntrustedReserveLocation - ] - } - } - - queries: - balance_rc_sender_after: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - balance_ap_receiver_after: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - - - name: Should reduce the balance of the sender - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_rc_sender_before, - after: $balance_rc_sender_after, - }, - amount: *amount - } - ] - - - name: Should keep the balance of the receiver - actions: - - asserts: - equal: - args: - [ - $balance_ap_receiver_before, - $balance_ap_receiver_after - ] diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/2_ump.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/2_ump.yml deleted file mode 100644 index fa84d4b006a7cf76019ea895a5da08dc109fb3ee..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/2_ump.yml +++ /dev/null @@ -1,194 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9800 - assets_parachain: &assets_parachain - wsPort: 9810 - paraId: &ap_id 1000 - variables: - common: - amount: &amount 1000000000000 - require_weight_at_most: &weight_at_most {refTime: 1000000000, proofSize: 0} - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - wallet: &rc_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - assets_parachain_destination: &ap_dest { v3: { 0, interior: { x1: { parachain: *ap_id }}}} - assets_parachain_account: &ap_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - assets_parachain_beneficiary: &ap_benf {v3: { parents: 0, interior: { x1: { accountId32: { id: *ap_acc }}}}} - ksm: &rc_ksm { concrete: { 0, interior: { here: true }}} - ksm_fungible: &rc_ksm_fungible { id: *rc_ksm, fun: { fungible: *amount }} - assets_parachain_account: - signer: &ap_signer //Alice - wallet: &ap_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - relay_chain_destination: &rc_dest { v3: { parents: 1, interior: { here: true }}} - assets_parachain_account: &rc_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - relay_chain_beneficiary: &rc_benf {v3: { parents: 0, interior: { x1: { accountId32: { id: *rc_acc }}}}} - ksm: &ap_ksm { concrete: { parents: 1, interior: { here: true }}} - ksm_fungible: &ap_ksm_fungible { id: *ap_ksm, fun: { fungible: *amount }} - decodedCalls: - system_remark: - chain: *relay_chain - pallet: system - call: remark - args: [ 0x0011 ] - -tests: - - name: UMP - describes: - - name: polkadotXcm.limitedTeleportAssets - before: - - name: DEPENDENCY | Do a 'limitedTeleportAssets' from the Relay Chain to the Assets Parachain to have funds to send them back - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: limitedTeleportAssets - args: [ - *ap_dest, # destination - *ap_benf, # beneficiary - { v3: [ *rc_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '166,944,000', proofSize: 0 }}} - - - name: Get the balances of the Assets Parachain's sender & Relay Chain's receiver - actions: - - queries: - balance_ap_sender_before: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - balance_rc_receiver_before: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - - its: - - name: Should be able to teleport native assets back from Assets Parachain to the Relay Chain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedTeleportAssets - args: [ - *rc_dest, # destination - *rc_benf, # beneficiary - { v3: [ *ap_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '533,283,000', proofSize: '7,096' }}} - - name: messageQueue.Processed - chain: *relay_chain - threshold: *weight_threshold - result: { origin: { Ump: { Para: '1,000' } }, weightUsed: { refTime: '4,000,000,000', proofSize: '0' }, success: true } - - queries: - balance_ap_sender_after: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - balance_rc_receiver_after: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - - - name: Should reduce the balance of the sender - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_ap_sender_before, - after: $balance_ap_sender_after, - }, - amount: *amount - } - ] - - - name: Should increase the balance of the receiver - actions: - - asserts: - balanceIncreased: - args: [ - { - balances: { - before: $balance_rc_receiver_before, - after: $balance_rc_receiver_after, - } - } - ] - - - name: polkadotXcm.send | Native - Transact(system.remark) - its: - - name: Assets Parachain SHOULD NOT be able to dispatch 'send' call - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: send - args: [ - *rc_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originType: Native, - requireWeightAtMost: *weight_at_most, - call: $system_remark - } - } - ] - } - ] - events: - - name: system.ExtrinsicFailed - attributes: - - type: SpRuntimeDispatchError - value: BadOrigin - - - name: polkadotXcm.limitedReserveTransferAssets - its: - - name: Should NOT be able to reserve transfer native assets from the Assets Parachain to the Relay Chain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedReserveTransferAssets - args: [ - *rc_dest, # destination - *rc_benf, # beneficiary - { v3: [ *ap_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: polkadotXcm.Attempted - result: { outcome: { Error: Barrier }} diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/3_force_hrmp-open-channels.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/3_force_hrmp-open-channels.yml deleted file mode 100644 index ecf344a073b4c55243a087af6704acb554710341..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/3_force_hrmp-open-channels.yml +++ /dev/null @@ -1,120 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9800 - assets_parachain: - wsPort: 9810 - paraId: &ap_id 1000 - penpal_parachain: - wsPort: 9820 - paraId: &pp_id 2000 - variables: - common: - amount: &amount 2000000000000 - hrmp_channels: - proposed_max_capacity: &max_capacity 8 - proposed_max_message_size: &max_message_size 8192 - channel: &channel { - maxCapacity: *max_capacity, - maxTotalSize: *max_message_size, - maxMessageSize: *max_message_size, - msgCount: 0, - totalSize: 0, - mqcHead: null, - senderDeposit: 0, - recipientDeposit: 0 - } - chains: - relay_chain: - signer: &rc_signer //Alice - assets_parachain_account: - sovereign_account: &ap_sovereign 5Ec4AhPZk8STuex8Wsi9TwDtJQxKqzPJRCH7348Xtcs9vZLJ - penpal_parachain: - sovereign_account: &pp_sovereign F7fq1jMZkfuCuoMTyiEVAP2DMpMt18WopgBqTJznLihLNbZ - -tests: - - name: HRMP - beforeEach: - - name: DEPENDENCY | Penpal Parachain Sovereign account in the Relay Chain needs to be funded - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: balances - call: transfer - args: [ - *pp_sovereign, # destination - *amount, # value - ] - events: - - name: balances.Transfer - - - name: DEPENDENCY | Assets Parachain Sovereign account in the Relay Chain needs to be funded - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: balances - call: transfer - args: [ - *ap_sovereign, # destination - *amount, # value - ] - events: - - name: balances.Transfer - describes: - - name: hrmp.hrmpInitOpenChannel (Penpal Parachain → Assets Parachain) - its: - - name: Open Penpal Parachain to Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: hrmp - call: forceOpenHrmpChannel - args: [ - *pp_id, - *ap_id, - *max_capacity, - *max_message_size - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: hrmp.HrmpChannelForceOpened - - name: hrmp.hrmpInitOpenChannel (Assets Parachain → PenPal Parachain) - its: - - name: Open Assets Parachain to PenPal Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: hrmp - call: forceOpenHrmpChannel - args: [ - *ap_id, - *pp_id, - *max_capacity, - *max_message_size - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: hrmp.HrmpChannelForceOpened - - name: hrmp.forceProcessHrmpOpen (make sure all the channels are open) - its: - - name: Make sure all the pending channels are open - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: hrmp - call: forceProcessHrmpOpen - args: [ 2 ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/4_hrmp.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/4_hrmp.yml deleted file mode 100644 index 681af698c16da401e580bcf2b12f28f6ba5043a1..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/4_hrmp.yml +++ /dev/null @@ -1,388 +0,0 @@ ---- -# Note: This tests depends on the 3_hrmp-open-channels.yml for opening channels, otherwise teleports aren't going to -# work. -settings: - chains: - relay_chain: &relay_chain - wsPort: 9800 - assets_parachain: &assets_parachain - wsPort: 9810 - paraId: &ap_id 1000 - penpal_parachain: &penpal_parachain - wsPort: 9820 - paraId: &pp_id 2000 - variables: - common: - mint_amount: &mint_amount 1000000000000 - amount: &amount 1000000000000 - require_weight_at_most: &weight_at_most {refTime: 1200000000, proofSize: 20000} - amount_to_send: &amount_to_send 500000000000 - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - assets_parachain_destination: &ap_dest { v3: { 0, interior: { x1: { parachain: *ap_id }}}} - assets_parachain_dest_routed: &ap_dest_routed { v3: { parents: 1, interior: { x1: { parachain: *ap_id } }}} - assets_parachain_account: - signer: &ap_signer //Alice - wallet: &ap_wallet 15oF4uVJwmo4TdGW7VfQxNLavjCXviqxT9S1MgbjMNHr6Sp5 - asset_id: &asset_id 2 - assets_pallet_id: &assets_pallet_id 50 - asset_min_balance: &asset_ed 1000 - penpal_parachain_destination: &pp_dest { v3: { parents: 1, interior: { x1: { parachain: *pp_id } }}} - ksm: &ap_ksm { concrete: { parents: 1, interior: { here: true }}} - ksm_fungible: &ap_ksm_fungible { id: *ap_ksm, fun: { fungible: *amount }} - suff_asset: &suff_asset { concrete: { parents: 0, interior: { x2: [ { PalletInstance: *assets_pallet_id }, { GeneralIndex: *asset_id } ] }}} - suff_asset_fail: &suff_asset_fail { concrete: { parents: 0, interior: { x2: [ { PalletInstance: *assets_pallet_id }, { GeneralIndex: 3 } ] }}} - suff_asset_fungible_fail: &ap_suff_asset_fungible_fail { id: *suff_asset_fail, fun: { fungible: 200000000000 }} - penpal_parachain: - sovereign_account: &pp_sovereign_sibl 13cKp89Msu7M2PiaCuuGr1BzAsD5V3vaVbDMs3YtjMZHdGwR - signer: &pp_signer //Alice - penpal_parachain_account: &pp_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - decodedCalls: - force_create_asset: - chain: *assets_parachain - pallet: assets - call: forceCreate - args: [ - *asset_id, - { Id: *ap_wallet }, # owner - true, # isSufficient - *asset_ed # minBalance - ] - force_create_asset2: - chain: *assets_parachain - pallet: assets - call: forceCreate - args: [ - *asset_id, - { Id: *ap_wallet }, # owner - true, # isSufficient - *asset_ed # minBalance - ] - -tests: - - name: HRMP - describes: - - name: polkadotXcm.limitedReserveTransferAssets (Asset) | Assets Parachain -> Penpal Parachain - before: - - name: DEPENDENCY | A sufficient Asset should exist in the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - SetTopic: '0x0123456789012345678901234567891201234567890123456789012345678912' - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: *weight_at_most, - call: $force_create_asset - } - } - ] - } - ] - events: - - name: xcmPallet.Sent - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '1,216,703,000', proofSize: '20,000' }}} - - queries: - forced_created_asset: - chain: *assets_parachain - pallet: assets - call: asset - args: [ *asset_id ] - - asserts: - isSome: - args: [ $forced_created_asset ] - - - name: DEPENDENCY | Some Assets should be minted for the sender - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: assets - call: mint - args: [ - *asset_id, - *ap_wallet, - *mint_amount - ] - events: - - name: assets.Issued - result: { assetId: *asset_id, owner: *ap_wallet, amount: *mint_amount } - - its: - - name: Assets Parachain should be able to reserve transfer an Asset to Penpal Parachain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedReserveTransferAssets - args: [ - *pp_dest, # destination - { # beneficiary - V3: { - parents: 0, - interior: { - X1: { - AccountId32: { - id: *pp_acc - } - } - } - } - }, - { # assets - V3: [ - { - id: { - Concrete: { - parents: 0, - interior: { - X2: [ - { - PalletInstance: *assets_pallet_id - }, - { - GeneralIndex: *asset_id - } - ] - } - } - }, - fun: { - Fungible: *amount_to_send - } - } - ] - }, - 0, # feeAssetItem - Unlimited # weightLimit - ] - events: - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '673,627,000', proofSize: '6,196' }}} - - name: assets.Transferred - result: { - assetId: *asset_id, - from: *ap_wallet, - to: *pp_sovereign_sibl, - amount: *amount_to_send - } - - - name: polkadotXcm.limitedReserveTransferAssets (KSM) | Assets Parachain -> Penpal Parachain - its: - - name: Assets Parachain should be able to reserve transfer KSM to Penpal Parachain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedReserveTransferAssets - args: [ - *pp_dest, # destination - { # beneficiary - V3: { - parents: 0, - interior: { - X1: { - AccountId32: { - id: *pp_acc - } - } - } - } - }, - { # assets - V3: [ - *ap_ksm_fungible - ] - }, - 0, # feeAssetItem - Unlimited # weightLimit - ] - events: - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '679,150,000', proofSize: '6,196' }}} - - name: balances.Endowed - result: { - account: *pp_sovereign_sibl, - freeBalance: *amount - } - - - name: polkadotXcm.send( assets.forceCreateAsset ) | Penpal Parachain -> Assets Parachain - before: - - name: Get the asset balance of the Penpal Parachain Sovereign account in Assets Parachain - actions: - - queries: - assets_balance_pp_sovereign_before: - chain: *assets_parachain - pallet: assets - call: account - args: [ - *asset_id, - *pp_sovereign_sibl - ] - its: - - name: Penpal Parachain should be able to send XCM message paying its fee with sufficient asset in Assets Parachain - actions: - - extrinsics: - - chain: *penpal_parachain - signer: *pp_signer - sudo: true - pallet: polkadotXcm - call: send - args: [ - *ap_dest_routed, # destination - { - v3: [ #message - { - WithdrawAsset: [ - { - id: { - concrete: { - parents: 0, - interior: { - X2: [ - { PalletInstance: *assets_pallet_id }, - { GeneralIndex: *asset_id } - ] - } - } - }, - fun: { fungible: *amount_to_send }} - ] - }, - { - BuyExecution: { - fees: { id: *suff_asset, fun: { fungible: *amount_to_send }}, - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: SovereignAccount, - requireWeightAtMost: *weight_at_most, - call: $force_create_asset2 - } - }, - { - RefundSurplus - }, - { - DepositAsset: { - assets: { Wild: All }, - beneficiary: { - parents: 0, - interior: { - X1: { - AccountId32: { - network: , # None - id: *pp_acc - } - } - }} - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: polkadotXcm.Sent - - name: assets.Burned - chain: *assets_parachain - result: { assetId: *asset_id, owner: *pp_sovereign_sibl } - - name: assets.Issued - chain: *assets_parachain - result: { assetId: *asset_id } - - queries: - assets_balance_pp_sovereign_after: - chain: *assets_parachain - pallet: assets - call: account - args: [ - *asset_id, - *pp_sovereign_sibl - ] - forced_created_asset2: - chain: *assets_parachain - pallet: assets - call: asset - args: [ 3 ] - - asserts: - isSome: - args: [ $forced_created_asset2 ] - - name: Should reduce the assets balance of the Penpal Parachain's SovereignAccount in the Assets Parachain - actions: - - asserts: - assetsDecreased: - args: [ - { - balances: { - before: $assets_balance_pp_sovereign_before, - after: $assets_balance_pp_sovereign_after, - }, - } - ] - - - name: Penpal Parachain SHOULD NOT be able to send XCM message paying its fee with sufficient assets if not enough balance - actions: - - extrinsics: - - chain: *penpal_parachain - signer: *pp_signer - sudo: true - pallet: polkadotXcm - call: send - args: [ - *ap_dest_routed, # destination - { - v3: [ #message - { - WithdrawAsset: [*ap_suff_asset_fungible_fail] - }, - { - BuyExecution: { - fees: *ap_suff_asset_fungible_fail, - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: SovereignAccount, - requireWeightAtMost: *weight_at_most, - call: $force_create_asset2 - } - } - ] - } - ] - events: - - name: xcmpQueue.Fail - chain: *assets_parachain - threshold: *weight_threshold - result: { - error: FailedToTransactAsset, - weight: { refTime: '152,426,000', proofSize: '3,593' } - } diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/config.toml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/config.toml deleted file mode 100644 index da53cd0ad4f23d5405b0c372d5531022dcd8bd2a..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/config.toml +++ /dev/null @@ -1,72 +0,0 @@ -[relaychain] -default_command = "./bin/polkadot" -default_args = [ "-lparachain=debug", "-lxcm=trace" ] -chain = "polkadot-local" - - [[relaychain.nodes]] - name = "alice" - ws_port = 9800 - validator = true - args = ["--state-cache-size=0"] - - [[relaychain.nodes]] - name = "bob" - ws_port = 9801 - validator = true - - [[relaychain.nodes]] - name = "charlie" - ws_port = 9802 - validator = true - - [[relaychain.nodes]] - name = "dave" - ws_port = 9803 - validator = true - -[[parachains]] -id = 1000 -chain = "asset-hub-polkadot-local" -cumulus_based = true - - [[parachains.collators]] - name = "collator1" - ws_port = 9810 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace", "--state-cache-size=0" ] - - [[parachains.collators]] - name = "collator2" - ws_port = 9811 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace" ] - - -[[parachains]] -id = 2000 -chain = "penpal-polkadot-2000" -cumulus_based = true - - [[parachains.collators]] - name = "collator3" - ws_port = 9820 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace", "--state-cache-size=0" ] - - [[parachains.collators]] - name = "collator4" - ws_port = 9821 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace" ] - -# [[hrmpChannels]] -# sender = 1000 -# recipient = 2000 -# maxCapacity = 8 -# maxMessageSize = 8192 - -# [[hrmpChannels]] -# sender = 2000 -# recipient = 1000 -# maxCapacity = 8 -# maxMessageSize = 8192 diff --git a/cumulus/parachains/integration-tests/e2e/collectives/README.md b/cumulus/parachains/integration-tests/e2e/collectives/README.md deleted file mode 100644 index 9c4efe7c9504835814e1a693dd8d57710da1bc3b..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/README.md +++ /dev/null @@ -1,26 +0,0 @@ -E2E tests concerning Polkadot Governance and the Collectives Parachain. The tests run by the Parachain Integration Tests -[tool](https://github.com/paritytech/parachains-integration-tests/). - -# Requirements -The tests require some changes to the regular production runtime builds: - -## RelayChain runtime -1. Alice has SUDO -2. Public Referenda `StakingAdmin`, `FellowshipAdmin` tracks settings (see the corresponding keys of the `TRACKS_DATA` - constant in the `governance::tracks` module of the Relay Chain runtime crate): -``` yaml -prepare_period: 5 Block, -decision_period: 1 Block, -confirm_period: 1 Block, -min_enactment_period: 1 Block, -``` - -## Collectives runtime -1. Fellowship Referenda `Fellows` track settings (see the corresponding key of the `TRACKS_DATA` constant in the - `fellowship::tracks` module of the Collectives runtime crate): -``` yaml -prepare_period: 5 Block, -decision_period: 1 Block, -confirm_period: 1 Block, -min_enactment_period: 1 Block, -``` diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/0_init.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/0_init.yml deleted file mode 100644 index 33f4d603e2a74b1275186fd5f237099d0c531648..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/0_init.yml +++ /dev/null @@ -1,166 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - assethub_parachain: &assethub_parachain - wsPort: 9810 - paraId: &sp_id 1000 - variables: - xcm_version: &xcm_version 3 - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - accounts: - alice_signer: &alice_signer //Alice - decodedCalls: - ap_force_xcm_version: - chain: *collectives_parachain - pallet: polkadotXcm - call: forceXcmVersion - args: [ - { # location - parents: 1, - interior: Here - }, - *xcm_version - ] - -tests: - - name: Initialize Chains - its: - - name: XCM supported versions between chains - actions: - - extrinsics: # Relay Chain sets supported version for Collectives Parachain - - chain: *relay_chain - sudo: true - signer: *alice_signer - pallet: xcmPallet - call: forceXcmVersion - args: [ - { # location - parents: 0, - interior: { - X1: { - Parachain: *cp_id - } - } - }, - *xcm_version - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.SupportedVersionChanged - result: { location: { parents: 0, interior: { X1: { Parachain: *cp_id }}}, version: *xcm_version } - - extrinsics: # Collectives Parachain sets supported version for Relay Chain through it - - chain: *relay_chain - signer: *alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { 0, interior: { x1: { parachain: *cp_id }}}}, # destination - { - v3: [ # message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 2200000000, # 2_200_000_000 - proofSize: 200000, # 200_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 200000000, # 200_000_000 - proofSize: 0, - }, - call: $ap_force_xcm_version - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - - name: polkadotXcm.SupportedVersionChanged - chain: *collectives_parachain - result: { location: { parents: 1, interior: Here }, version: *xcm_version } - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '2,200,000,000', proofSize: 0 }}} - - extrinsics: # Relay Chain sets supported version for AssetHub Parachain - - chain: *relay_chain - sudo: true - signer: *alice_signer - pallet: xcmPallet - call: forceXcmVersion - args: [ - { # location - parents: 0, - interior: { - X1: { - Parachain: *sp_id - } - } - }, - *xcm_version - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.SupportedVersionChanged - result: { location: { parents: 0, interior: { X1: { Parachain: *sp_id } } }, version: *xcm_version } - - extrinsics: # AssetHub Parachain sets supported version for Relay Chain through it - - chain: *relay_chain - signer: *alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { 0, interior: { x1: { parachain: *sp_id }}}}, # destination - { - v3: [ # message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 2200000000, # 2_200_000_000 - proofSize: 200000, # 200_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 200000000, # 200_000_000 - proofSize: 0, - }, - call: $ap_force_xcm_version - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - - name: polkadotXcm.SupportedVersionChanged - chain: *assethub_parachain - result: { location: { parents: 1, interior: Here }, version: *xcm_version } - - name: dmpQueue.ExecutedDownward - chain: *assethub_parachain - result: { outcome: { Complete: {} } } diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/1_teleport.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/1_teleport.yml deleted file mode 100644 index cda04859b195a6158f7ca97a0d34721d3287de76..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/1_teleport.yml +++ /dev/null @@ -1,168 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - variables: - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - accounts: - alice_signer: &acc_alice_signer //Alice - alice_account32: &acc_alice_acc32 '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - alice_ss58: &acc_alice_ss58 '15oF4uVJwmo4TdGW7VfQxNLavjCXviqxT9S1MgbjMNHr6Sp5' - checking_account: &checking_account '13UVJyLnbVp9x5XDyJv8g8r3UddNwBrdaH7AADCmw9XQWvYW' - -tests: - - name: Teleport assets from Relay Chain to Collectives Parachain successful. - before: - - name: Get the Alice balances on Relay & Collectives Chains. - actions: - - queries: - balance_rc_alice_1: - chain: *relay_chain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - balance_cp_alice_1: - chain: *collectives_parachain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - its: - - name: Teleport assets from Relay Chain to Collectives Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *acc_alice_signer - pallet: xcmPallet - call: teleportAssets - args: [ - { v3: { 0, interior: { x1: { parachain: *cp_id }}}}, # destination - { v3: { parents: 0, interior: { x1: { accountId32: { id: *acc_alice_acc32 }}}}}, # beneficiary - { - v3: [ - # { - # # TODO use a separate Assets to pay a fee, to receive an exact amount of assets on beneficiary account. - # # a call with two assets fails with an error right now. - # id: { concrete: { 0, interior: { here: true }}}, - # fun: { fungible: 1000000000000 } # 1_000_000_000_000 - # }, - { - id: { concrete: { 0, interior: { here: true }}}, - fun: { fungible: 20000000000000 } # 20_000_000_000_000 - } - ] - }, # assets - 0, # feeAssetItem - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '4,000,000,000', proofSize: 0 }}} - - queries: - balance_rc_alice_2: - chain: *relay_chain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - balance_cp_alice_2: - chain: *collectives_parachain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - - name: Alice deposit check, balance decreased on Relay Chain, increased on Collectives. - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_rc_alice_1, - after: $balance_rc_alice_2, - } - } - ] - balanceIncreased: - args: [ - { - balances: { - before: $balance_cp_alice_1, - after: $balance_cp_alice_2, - } - } - ] - - - name: Teleport assets from Collectives Parachain to Relay Chain successful - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *acc_alice_signer - pallet: polkadotXcm - call: teleportAssets - args: [ - { v3: { parents: 1, interior: { here: true }}}, # destination - { v3: { parents: 0, interior: { x1: { accountId32: { id: *acc_alice_acc32 }}}}}, # beneficiary - { - v3: [ - { - id: { concrete: { parents: 1, interior: { here: true }}}, - fun: { fungible: 10000000000000 } # 10_000_000_000_000 - } - ] - }, # assets - 0, # feeAssetItem - ] - events: - - name: balances.Withdraw - result: { who: *acc_alice_ss58, amount: 10000000000000 } - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: 0 }}} - - name: balances.Withdraw - chain: *relay_chain - result: { who: *checking_account, amount: 10000000000000 } # amount received and withdrawn from registry account - - name: messageQueue.Processed - chain: *relay_chain - threshold: *weight_threshold - result: { origin: { Ump: { Para: *cp_id } }, weightUsed: { refTime: '4,000,000,000', proofSize: '0' }, success: true } - - queries: - balance_rc_alice_3: - chain: *relay_chain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - balance_cp_alice_3: - chain: *collectives_parachain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - - - name: Alice deposit check, balance decreased on Collectives, increased on Relay Chain. - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_cp_alice_2, - after: $balance_cp_alice_3, - } - } - ] - balanceIncreased: - args: [ - { - balances: { - before: $balance_rc_alice_2, - after: $balance_rc_alice_3, - } - } - ] -# TODO (P2) assert Alice balance before and after teleport (see example in kick_member test) -# TODO (P1) test: teleport of non relay chain assets fails diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/2_reserve.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/2_reserve.yml deleted file mode 100644 index bd17f07524a2ddff5e26fe06057fa7dbd5a288be..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/2_reserve.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - variables: - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - accounts: - alice_signer: &alice_signer //Alice - alice_account32: &alice_acc32 '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - -tests: - - name: Reserve assets from Relay Chain to Collectives Parachain fails - its: - - name: Reserve assets from Relay Chain to Collectives Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: xcmPallet - call: reserveTransferAssets - args: [ - { v3: { 0, interior: { x1: { parachain: *cp_id }}}}, # destination - { v3: { parents: 0, interior: { x1: { accountId32: { id: *alice_acc32 }}}}}, # beneficiary - { - v3: [ - { - id: { concrete: { 0, interior: { here: true }}}, - fun: { fungible: 20000000000000 } # 20_000_000_000_000 - } - ] - }, # assets - 0, # feeAssetItem - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '2,000,000,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { - outcome: { - Incomplete: [ - { refTime: '1,000,000,000', proofSize: 0 }, - UntrustedReserveLocation - ] - } - } diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/3_hrmp-open-channels.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/3_hrmp-open-channels.yml deleted file mode 100644 index 17a16d9ccd7da8e0d7fc53973987bb0ed117705b..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/3_hrmp-open-channels.yml +++ /dev/null @@ -1,69 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - assethub_parachain: &assethub_parachain - wsPort: 9810 - paraId: &sp_id 1000 - variables: - chains: - accounts: - alice_signer: &alice_signer //Alice - hrmp: - proposed_max_capacity: &hrmp_proposed_max_capacity 8 - proposed_max_message_size: &hrmp_proposed_max_message_size 8192 -tests: - - name: HRMP - describes: - - name: Force Open HRMP Channel From Collectives Parachain → AssetHub Parachain - its: - - name: Alice calls hrmp.forceOpenHrmpChannel - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - sudo: true - pallet: hrmp - call: forceOpenHrmpChannel - args: [ - *cp_id, # sender - *sp_id, # recipient - *hrmp_proposed_max_capacity, # proposedMaxCapacity - *hrmp_proposed_max_message_size # proposedMaxMessageSize - ] - events: - - name: hrmp.HrmpChannelForceOpened - result: { - sender: *cp_id, - recipient: *sp_id, - proposed_max_capacity: *hrmp_proposed_max_capacity, - proposed_max_message_size: *hrmp_proposed_max_message_size - } - - name: Force Open HRMP Channel From AssetHub Parachain → Collectives Parachain - its: - - name: Alice calls hrmp.forceOpenHrmpChannel - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - sudo: true - pallet: hrmp - call: forceOpenHrmpChannel - args: [ - *sp_id, # sender - *cp_id, # recipient - *hrmp_proposed_max_capacity, # proposedMaxCapacity - *hrmp_proposed_max_message_size # proposedMaxMessageSize - ] - events: - - name: hrmp.HrmpChannelForceOpened - result: { - sender: *sp_id, - recipient: *cp_id, - proposed_max_capacity: *hrmp_proposed_max_capacity, - proposed_max_message_size: *hrmp_proposed_max_message_size - } diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/0_join_alliance_fails.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/0_join_alliance_fails.yml deleted file mode 100644 index 9aff8b1db1023ec8bb011d60d2eaf2dd76886f5f..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/0_join_alliance_fails.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -settings: - chains: - relay_chain: - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - variables: - accounts: - alice_signer: &alice_signer //Alice - -tests: - - name: Alice fails to join an the Alliance, since it is not initialized yet. - its: - - name: Alice joins alliance - actions: - - extrinsics: # Relay Chain sets supported version for Asset Parachain - - chain: *collectives_parachain - signer: *alice_signer - pallet: alliance - call: joinAlliance - args: [] - events: - - name: system.ExtrinsicFailed - result: { - dispatchError: { Module: { index: 50, error: '0x00000000' }} - } - # TODO assert with Alliance Error variant - alliance.AllianceNotYetInitialized - # issue - https://github.com/paritytech/parachains-integration-tests/issues/59 diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/1_init_alliance.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/1_init_alliance.yml deleted file mode 100644 index 1e01c701744a82cfb6ff1ad716bd1bcb91351108..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/1_init_alliance.yml +++ /dev/null @@ -1,256 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &coll_para_id 1001 - variables: - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - accounts: - alice_signer: &acc_alice_signer //Alice - liam_account32: &acc_liam_acc32 "0x3614671a5de540d891eb8c4939c8153a4aa790602b347c18177b86d0fc546221" # //Liam - olivia_account32: &acc_olivia_acc32 "0x24ee8a659c6716fe9f7cb4e9e028602aa12867654ca02737da9171b7ff697d5c" # //Olivia - noah_account32: &acc_noah_acc32 "0x9c6ad3bc3aa2f1b2e837898e6da9980445f7ef8b3eee0b8c8e305f8cfae68517" # //Noah - emma_account32: &acc_emma_acc32 "0x8ac272b333ba1127c8db57fa777ec820b24598a236efa648caf0d26d86f64572" # //Emma - james_account32: &acc_james_acc32 "0x9a52805151a0b5effc084af9264011139872a21a3950cb9ae0b2955c4bf92c18" # //James - ava_account32: &acc_ava_acc32 "0x348ef0b8776adbc09c862ddc29b1d193b9e24738e54eea3b0609c83856dc101c" # //Ava - mia_account32: &acc_mia_acc32 "0xaebf15374cf7e758d10232514c569a7abf81cc1b8f1e81a73dbc608a0e335264" # //Mia - decodedCalls: - init_alliance_members: - chain: *collectives_parachain - pallet: alliance - call: initMembers - args: [ - [ - *acc_liam_acc32, - *acc_olivia_acc32, - *acc_noah_acc32, - *acc_emma_acc32, - *acc_james_acc32, - *acc_ava_acc32 - ], - [ - *acc_mia_acc32 - ] - ] - init_alliance_voting_members: - chain: *collectives_parachain - pallet: alliance - call: initMembers - args: [ - [ - *acc_liam_acc32, - *acc_olivia_acc32, - *acc_noah_acc32, - *acc_emma_acc32, - *acc_james_acc32, - *acc_ava_acc32, - *acc_mia_acc32 - ], - [] - ] - disband: - chain: *collectives_parachain - pallet: alliance - call: disband - args: [ - { - fellowMembers: 6, - allyMembers: 1 - } - ] - -tests: - - name: Alliance initiated with the root call, second init call fails. Alliance disband and set again. - its: - - name: Alliance initiated, founders and fellows are set. - actions: - - extrinsics: - - chain: *relay_chain - signer: *acc_alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *coll_para_id }}}}, # destination - { - v3: [ # message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 3000000000, # 3_000_000_000 - proofSize: 2000000, # 2_000_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 1000000000, # 1_000_000_000 - proofSize: 1000000, # 1_000_000 - }, - call: $init_alliance_members - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - result: { origin: { parents: 0, interior: Here }, destination: { parents: 0, interior: { X1: { Parachain: *coll_para_id }}}} - - name: alliance.MembersInitialized - chain: *collectives_parachain - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: '1,000,000' }}} - - - name: Alliance init call fails. - actions: - - extrinsics: - - chain: *relay_chain - signer: *acc_alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *coll_para_id }}}}, # destination - { - v3: [ # message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 3000000000, # 3_000_000_000 - proofSize: 2000000, # 2_000_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 1000000000, # 1_000_000_000 - proofSize: 1000000, # 1_000_000 - }, - call: $init_alliance_voting_members - } - } - ] - } - ] - events: - # TODO can not currently assert variant AllianceAlreadyInitialized, XCM Transact fails silently - # issue - https://github.com/paritytech/polkadot/issues/4623 - # Next test with a disband call will fail, if this call does not fail, - # since a witness data from a disband call will be invalid. - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - result: { origin: { parents: 0, interior: Here }, destination: { parents: 0, interior: { X1: { Parachain: *coll_para_id }}}} - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: '1,000,000' }}} - - - name: Alliance disbanded and initialized again. - actions: - - extrinsics: - - chain: *relay_chain - signer: *acc_alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *coll_para_id }}}}, # destination - { - v3: [ # message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 5000000000, # 3_000_000_000 - proofSize: 1000000, # 1_000_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 3000000000, # 3_000_000_000 - proofSize: 200000, # 200_000 - }, - call: $disband - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - result: { origin: { parents: 0, interior: Here }, destination: { parents: 0, interior: { X1: { Parachain: *coll_para_id }}}} - - name: alliance.AllianceDisbanded - chain: *collectives_parachain - result: { fellowMembers: 6, allyMembers: 1, unreserved: 0 } - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,321,495,872', proofSize: '181,779' }}} - - name: Alliance initiated, founders and fellows are set. - actions: - - extrinsics: - - chain: *relay_chain - signer: *acc_alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *coll_para_id }}}}, # destination - { - v3: [ # message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 3000000000, # 3_000_000_000 - proofSize: 2000000, # 2_000_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 1000000000, # 1_000_000_000 - proofSize: 1000000, # 1_000_000 - }, - call: $init_alliance_members - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - result: { origin: { parents: 0, interior: Here }, destination: { parents: 0, interior: { X1: { Parachain: *coll_para_id }}}} - - name: alliance.MembersInitialized - chain: *collectives_parachain - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: '1,000,000' }}} diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/2_join_alliance_fails.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/2_join_alliance_fails.yml deleted file mode 100644 index 2afdadae60224f67b5aede4798bc8c48387b6336..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/2_join_alliance_fails.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -settings: - chains: - relay_chain: - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: 1001 - variables: - accounts: - liam_signer: &acc_liam_signer //Liam - -tests: - - name: Liam fails to join an the Alliance, Liam is already a member. - its: - - name: Alice joins alliance - actions: - - extrinsics: # Relay Chain sets supported version for Asset Parachain - - chain: *collectives_parachain - signer: *acc_liam_signer - pallet: alliance - call: joinAlliance - args: [] - events: - - name: system.ExtrinsicFailed - result: { - dispatchError: { Module: { index: 50, error: '0x02000000' }} - } - # TODO assert with Alliance Error variant - alliance.AllianceNotYetInitialized - # issue - https://github.com/paritytech/parachains-integration-tests/issues/59 diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/3_kick_member.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/3_kick_member.yml deleted file mode 100644 index a5941cb47234ee2866a095feb7198ed7b88f6860..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/3_kick_member.yml +++ /dev/null @@ -1,175 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - variables: - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - init_teleport_amount: &init_teleport_amount 20000000000000 # 20_000_000_000_000 - accounts: - alice_signer: &acc_alice_signer //Alice - treasury_account32: &acc_treasury_acc32 '0x6d6f646c70792f74727372790000000000000000000000000000000000000000' - alice_account32: &acc_alice_acc32 '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - alice_ss58: &acc_alice_ss58 '15oF4uVJwmo4TdGW7VfQxNLavjCXviqxT9S1MgbjMNHr6Sp5' - decodedCalls: - alliance_kick_member: - chain: *collectives_parachain - pallet: alliance - call: kickMember - args: [ - {Id: *acc_alice_acc32} - ] - -tests: - - name: Member kicked out, deposited assets slashed and teleported to Relay Chain treasury. - before: - - name: DEPENDENCY | Do a 'limitedTeleportAssets' from the Relay Chain to the Collectives Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *acc_alice_signer - pallet: xcmPallet - call: limitedTeleportAssets - args: [ - { v3: { 0, interior: { x1: { parachain: *cp_id }}}}, # destination - { v3: { parents: 0, interior: { x1: { accountId32: { id: *acc_alice_acc32 }}}}}, # beneficiary - { v3: [ { id: { concrete: { 0, interior: { here: true }}}, fun: { fungible: *init_teleport_amount }} ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { - outcome: { Complete: { refTime: '3,000,000,000', proofSize: 0 }} - } - - name: balances.Deposit - chain: *collectives_parachain - result: { who: *acc_alice_ss58 } - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { - outcome: { Complete: { refTime: '4,000,000,000', proofSize: 0 }} - } - - name: Get the balances of the Relay Chain's treasury & Collectives parachain's future alliance member - actions: - - queries: - balance_rc_treasury_before: - chain: *relay_chain - pallet: system - call: account - args: [ *acc_treasury_acc32 ] - balance_cp_alice_before: - chain: *collectives_parachain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - its: - - name: Alice joins alliance - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *acc_alice_signer - pallet: alliance - call: joinAlliance - args: [] - events: - - name: balances.Reserved - chain: *collectives_parachain - result: { who: *acc_alice_ss58, amount: 10000000000000 } - - name: alliance.NewAllyJoined - result: {ally: *acc_alice_ss58, reserved: 10000000000000 } - - queries: - balance_cp_alice_after: - chain: *collectives_parachain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - - name: Alice deposit check, balance decreased - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_cp_alice_before, - after: $balance_cp_alice_after, - } - # TODO (P3) set `amount` and `fee` for more strict assert - } - ] - - name: Kick Alice from alliance - actions: - - extrinsics: # Asset Parachain sets supported version for Relay Chain through it - - chain: *relay_chain - signer: *acc_alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *cp_id }}}}, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 4000000000, # 4_000_000_000 - proofSize: 2000000, # 2_000_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 2000000000, # 2_000_000_000 - proofSize: 1000000, # 1_000_000 - }, - call: $alliance_kick_member - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - result: { origin: { parents: 0, interior: Here }, destination: { parents: 0, interior: { X1: { Parachain: *cp_id }}}} - - name: alliance.MemberKicked - chain: *collectives_parachain - result: { member: *acc_alice_ss58, slashed: 10000000000000 } - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { - outcome: { Complete: { refTime: '4,000,000,000', proofSize: '1,000,000' }} - } - - name: messageQueue.Processed - result: { origin: { Ump: { Para: *cp_id }}, success: true } - - - queries: - balance_rc_treasury_after: - chain: *relay_chain - pallet: system - call: account - args: [ *acc_treasury_acc32 ] - - name: Slashed balance appears on the relay chain treasury account - actions: - - asserts: - balanceIncreased: - args: [ - { - balances: { - before: $balance_rc_treasury_before, - after: $balance_rc_treasury_after, - } - # TODO (P3) set `amount` and `fee` for more strict assert - } - ] diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/2_opengov/0_assethub.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/2_opengov/0_assethub.yml deleted file mode 100644 index c53efff51fbfb218bd8bf57d4876084bbaccb30d..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/2_opengov/0_assethub.yml +++ /dev/null @@ -1,149 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - assethub_parachain: &assethub_parachain - wsPort: 9810 - paraId: &ap_id 1000 - variables: - proposal_index: &proposal_index 0 - chains: - accounts: - alice_signer: &alice_signer //Alice - bob_signer: &bob_signer //Bob - decodedCalls: - set_candidates_ap: - chain: *assethub_parachain - encode: true - pallet: collatorSelection - call: setDesiredCandidates - args: [ - 3 - ] - send_set_candidates_rc: - chain: *relay_chain - encode: false - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *ap_id }}}}, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 200000000, # 200_000_000 - proofSize: 100000, # 100_000 - }, - call: $set_candidates_ap - } - } - ] - } - ] -tests: - - name: OpenGov - describes: - - name: Set desired candidates on AssetHub from Relay Chain OpenGov Staking track - its: - - name: Note preimage from xcm send set_desired_candidates call - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: preimage - call: notePreimage - args: [ - $send_set_candidates_rc - ] - events: - - name: preimage.Noted - result: {hash_: $send_set_candidates_rc.hash } - - name: Submit a proposal to set desired candidates - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: referenda - call: submit - args: [ - { - "Origins": "StakingAdmin", - }, - { - "Lookup": { - "hash_": $send_set_candidates_rc.hash, - "len": $send_set_candidates_rc.len, - }, - }, - { - "After": 1, - }, - ] - events: - - name: referenda.Submitted - result: { - index: *proposal_index, - proposal: { Lookup: { hash_: $send_set_candidates_rc.hash, len: $send_set_candidates_rc.len }} - } - - name: Alice Vote Aye - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: convictionVoting - call: vote - args: [ - *proposal_index, - { - "Standard": { - "vote": { - "aye": true, - "conviction": "Locked1x", - }, - "balance": 200000000000000, - } - }, - ] # TODO no event to catch https://github.com/paritytech/substrate/issues/14687 - - name: Bob Vote Aye - actions: - - extrinsics: - - chain: *relay_chain - signer: *bob_signer - pallet: convictionVoting - call: vote - args: [ - *proposal_index, - { - "Standard": { - "vote": { - "aye": true, - "conviction": "Locked1x", - }, - "balance": 200000000000000, - } - }, - ] # TODO no event to catch https://github.com/paritytech/substrate/issues/14687 - - name: Submit the decision deposit - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: referenda - call: placeDecisionDeposit - args: [ - *proposal_index, - ] - events: - - name: referenda.DecisionDepositPlaced - result: { index: *proposal_index } - - name: collatorSelection.NewDesiredCandidates - chain: *assethub_parachain - result: { desiredCandidates: 3 } diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/0_init.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/0_init.yml deleted file mode 100644 index 1e4b2dabe21178d3b7c1bd6087c9015665ae93e0..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/0_init.yml +++ /dev/null @@ -1,209 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - variables: - proposal_index: &proposal_index 1 - chains: - accounts: - alice_signer: &alice_signer //Alice - bob_signer: &bob_signer //Bob - alice_account32: &acc_alice_acc32 '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - alice_ss58: &acc_alice_ss58 '15oF4uVJwmo4TdGW7VfQxNLavjCXviqxT9S1MgbjMNHr6Sp5' - decodedCalls: - fellowship_induct_alice_cp: - chain: *collectives_parachain - encode: true - pallet: fellowshipCore - call: induct - args: [ - *acc_alice_acc32 - ] - fellowship_promote_1_alice_cp: - chain: *collectives_parachain - encode: true - pallet: fellowshipCore - call: promote - args: [ - *acc_alice_acc32, - 1 - ] - fellowship_promote_2_alice_cp: - chain: *collectives_parachain - encode: true - pallet: fellowshipCore - call: promote - args: [ - *acc_alice_acc32, - 2 - ] - fellowship_promote_3_alice_cp: - chain: *collectives_parachain - encode: true - pallet: fellowshipCore - call: promote - args: [ - *acc_alice_acc32, - 3 - ] - send_init_fellowship_rc: - chain: *relay_chain - encode: false - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *cp_id }}}}, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { # since batch_all not yet allowed over xcm, we have to send multiple `Transact`. - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 1500000000, # 1_500_000_000 - proofSize: 10000, # 10_000 - }, - call: $fellowship_induct_alice_cp - } - }, - { - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 1500000000, # 1_500_000_000 - proofSize: 10000, # 10_000 - }, - call: $fellowship_promote_1_alice_cp - } - }, - { - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 1500000000, # 1_500_000_000 - proofSize: 10000, # 10_000 - }, - call: $fellowship_promote_2_alice_cp - } - }, - { - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 1500000000, # 1_500_000_000 - proofSize: 10000, # 10_000 - }, - call: $fellowship_promote_3_alice_cp - } - } - ] - } - ] - -tests: - - name: Fellowship - describes: - - name: Init the Fellowship - its: - - name: Note preimage from init fellowship call - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: preimage - call: notePreimage - args: [ - $send_init_fellowship_rc - ] - events: - - name: preimage.Noted - result: { hash_: $send_init_fellowship_rc.hash } - - name: Submit a proposal to init the Fellowship - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: referenda - call: submit - args: [ - { - "Origins": "FellowshipAdmin", - }, - { - "Lookup": { - "hash_": $send_init_fellowship_rc.hash, - "len": $send_init_fellowship_rc.len, - }, - }, - { - "After": 1, - }, - ] - events: - - name: referenda.Submitted - result: { - index: *proposal_index, - proposal: { Lookup: { hash_: $send_init_fellowship_rc.hash, len: $send_init_fellowship_rc.len }} - } - - name: Alice Vote Aye - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: convictionVoting - call: vote - args: [ - *proposal_index, - { - "Standard": { - "vote": { - "aye": true, - "conviction": "Locked1x", - }, - "balance": 200000000000000, - } - }, - ] # TODO no Aye event to catch https://github.com/paritytech/substrate/issues/14687 - - name: Bob Vote Aye - actions: - - extrinsics: - - chain: *relay_chain - signer: *bob_signer - pallet: convictionVoting - call: vote - args: [ - *proposal_index, - { - "Standard": { - "vote": { - "aye": true, - "conviction": "Locked1x", - }, - "balance": 200000000000000, - } - }, - ] # TODO no Aye event to catch https://github.com/paritytech/substrate/issues/14687 - - name: Submit the decision deposit - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: referenda - call: placeDecisionDeposit - args: [ - *proposal_index, - ] - events: - - name: referenda.DecisionDepositPlaced - result: { index: *proposal_index } - - name: fellowshipCollective.MemberAdded - chain: *collectives_parachain - result: { who: *acc_alice_ss58 } diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/1_whitelist_call.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/1_whitelist_call.yml deleted file mode 100644 index 5991c7ae2f8a2d44c87ab7d4bf8d8501b790f254..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/1_whitelist_call.yml +++ /dev/null @@ -1,146 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - variables: - fellows_proposal_index: &fellows_proposal_index 0 - chains: - accounts: - alice_signer: &alice_signer //Alice - decodedCalls: - remark_rc: - chain: *relay_chain - encode: false - pallet: system - call: remark - args: [ - "0x10" - ] - whitelist_remark_rc: - chain: *relay_chain - encode: true - pallet: whitelist - call: whitelistCall - args: [ - $remark_rc.hash - ] - send_whitelist_remark_cp: - chain: *collectives_parachain - encode: false - pallet: polkadotXcm - call: send - args: [ - { v3: { parents: 1, interior: { here: true }}}, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 500000000, # 500_000_000 - proofSize: 20000, # 20_000 - }, - call: $whitelist_remark_rc - } - } - ] - } - ] - -tests: - - name: Fellowship - describes: - - name: The Fellowship white list the call - its: - - name: Note preimage from the whitelist call on the Relay Chain - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: preimage - call: notePreimage - args: [ - $remark_rc - ] - events: - - name: preimage.Noted - result: { hash_: $remark_rc.hash } - - name: Note preimage from the xcm send call to white list the call above - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: preimage - call: notePreimage - args: [ - $send_whitelist_remark_cp, - ] - events: - - name: preimage.Noted - result: { hash_: $send_whitelist_remark_cp.hash } - - name: Submit a proposal to while list the call - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: fellowshipReferenda - call: submit - args: [ - { - "FellowshipOrigins": "Fellows", - }, - { - "Lookup": { - "hash_": $send_whitelist_remark_cp.hash, - "len": $send_whitelist_remark_cp.len, - }, - }, - { - "After": 1, - }, - ] - events: - - name: fellowshipReferenda.Submitted - result: { - index: *fellows_proposal_index, - proposal: { Lookup: { hash_: $send_whitelist_remark_cp.hash, len: $send_whitelist_remark_cp.len}} - } - - name: Vote Aye - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: fellowshipCollective - call: vote - args: [ - *fellows_proposal_index, - true, - ] - events: - - name: fellowshipCollective.Voted - result: { poll: *fellows_proposal_index, vote: { Aye: 1 } } - - name: Submit the decision deposit - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: fellowshipReferenda - call: placeDecisionDeposit - args: [ - *fellows_proposal_index, - ] - events: - - name: fellowshipReferenda.DecisionDepositPlaced - result: {index: *fellows_proposal_index} - - name: whitelist.CallWhitelisted - chain: *relay_chain - result: { callHash: $remark_rc.hash } diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/2_assethub.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/2_assethub.yml deleted file mode 100644 index c0805594808cfe6cc900080953122f12174ad528..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/2_assethub.yml +++ /dev/null @@ -1,126 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - assethub_parachain: &assethub_parachain - wsPort: 9810 - paraId: &ap_id 1000 - variables: - fellows_proposal_index: &fellows_proposal_index 1 - chains: - accounts: - alice_signer: &alice_signer //Alice - - decodedCalls: - xcmp_resume_execution_ap: - chain: *assethub_parachain - encode: true - pallet: xcmpQueue - call: resumeXcmExecution - args: [] - send_xcmp_resume_execution_cp: - chain: *collectives_parachain - encode: false - pallet: polkadotXcm - call: send - args: [ - { v3: { parents: 1, interior: { x1: { parachain: *ap_id }}}}, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 300000000, # 300_000_000 - proofSize: 10000, # 10_000 - }, - call: $xcmp_resume_execution_ap - } - } - ] - } - ] - -tests: - - name: Fellowship - describes: - - name: The Fellowship resume xcm execution for the xcmp queue on AssetHub - its: - - name: Note preimage from the xcm send call to suspend_xcm_execution - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: preimage - call: notePreimage - args: [ - $send_xcmp_resume_execution_cp - ] - events: - - name: preimage.Noted - result: {hash_: $send_xcmp_resume_execution_cp.hash } - - name: Submit a proposal to resume xcm execution on AssetHub - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: fellowshipReferenda - call: submit - args: [ - { - "FellowshipOrigins": "Fellows", - }, - { - "Lookup": { - "hash_": $send_xcmp_resume_execution_cp.hash, - "len": $send_xcmp_resume_execution_cp.len, - }, - }, - { - "After": 1, - }, - ] - events: - - name: fellowshipReferenda.Submitted - result: { - index: 1, - proposal: {Lookup: {hash_: $send_xcmp_resume_execution_cp.hash, len: $send_xcmp_resume_execution_cp.len}} - } - - name: Vote Aye - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: fellowshipCollective - call: vote - args: [ - *fellows_proposal_index, - true, - ] - events: - - name: fellowshipCollective.Voted - result: { poll: *fellows_proposal_index, vote: { Aye: 1 } } - - name: Submit the decision deposit - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: fellowshipReferenda - call: placeDecisionDeposit - args: [ - *fellows_proposal_index, - ] - events: - - name: fellowshipReferenda.DecisionDepositPlaced - result: {index: *fellows_proposal_index} - - name: xcmpQueue.Success - chain: *assethub_parachain diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/config.toml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/config.toml deleted file mode 100644 index 20fda92bd08f5dd067d40e197ee7ea7dd151522b..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/config.toml +++ /dev/null @@ -1,42 +0,0 @@ -[relaychain] -default_command = "./bin/polkadot" -default_args = [ "-lparachain=trace", "-lxcm=trace" ] -chain = "polkadot-local" - - [[relaychain.nodes]] - name = "alice" - ws_port = 9700 - validator = true - args = ["--state-cache-size=0"] - - [[relaychain.nodes]] - name = "bob" - ws_port = 9701 - validator = true - - [[relaychain.nodes]] - name = "charlie" - ws_port = 9702 - validator = true - - [[relaychain.nodes]] - name = "dave" - ws_port = 9703 - validator = true - -[[parachains]] -id = 1001 -chain = "collectives-polkadot-local" -cumulus_based = true - - [[parachains.collators]] - name = "collator1" - ws_port = 9710 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace", "--state-cache-size=0" ] - - [[parachains.collators]] - name = "collator2" - ws_port = 9711 - command = "./bin/polkadot-parachain" - args = ["-lxcm=trace"] diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml index dbf7e9c9a700556f4e5b1da69c7373a0b84f9e9e..1596169efbeeeded0d40a380c1c3c9654636c233 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml @@ -7,6 +7,9 @@ license = "Apache-2.0" description = "Asset Hub Rococo emulated chain" publish = false +[lints] +workspace = true + [dependencies] serde_json = "1.0.104" diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs index 0580d61eae9b2a961eedd99c361f3dbe594ee589..00f412564205507f2deb6b516ace435302fcf4c4 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -21,7 +21,8 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_system_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, impl_foreign_assets_helpers_for_parachain, + impl_xcm_helpers_for_parachain, impls::Parachain, xcm_emulator::decl_test_parachains, }; use rococo_emulated_chain::Rococo; @@ -37,6 +38,7 @@ decl_test_parachains! { XcmpMessageHandler: asset_hub_rococo_runtime::XcmpQueue, LocationToAccountId: asset_hub_rococo_runtime::xcm_config::LocationToAccountId, ParachainInfo: asset_hub_rococo_runtime::ParachainInfo, + MessageOrigin: cumulus_primitives_core::AggregateMessageOrigin, }, pallets = { PolkadotXcm: asset_hub_rococo_runtime::PolkadotXcm, @@ -52,4 +54,6 @@ decl_test_parachains! { // AssetHubRococo implementation impl_accounts_helpers_for_parachain!(AssetHubRococo); impl_assert_events_helpers_for_parachain!(AssetHubRococo); -impl_assets_helpers_for_system_parachain!(AssetHubRococo, Rococo); +impl_assets_helpers_for_parachain!(AssetHubRococo, Rococo); +impl_foreign_assets_helpers_for_parachain!(AssetHubRococo, Rococo); +impl_xcm_helpers_for_parachain!(AssetHubRococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml index 0ff817b6b96109e7f57508bbac16d672c76886e0..ff5a70628db485566e95844a92662955b665285f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml @@ -7,6 +7,9 @@ license = "Apache-2.0" description = "Asset Hub Westend emulated chain" publish = false +[lints] +workspace = true + [dependencies] serde_json = "1.0.104" diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs index 804b727c33f8399557342bac1c5b200bb16aebee..25d7c1079b4dd3cfa08a27ac3e10cbc498279e34 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -21,7 +21,8 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_system_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, impl_foreign_assets_helpers_for_parachain, + impl_xcm_helpers_for_parachain, impls::Parachain, xcm_emulator::decl_test_parachains, }; use westend_emulated_chain::Westend; @@ -37,6 +38,7 @@ decl_test_parachains! { XcmpMessageHandler: asset_hub_westend_runtime::XcmpQueue, LocationToAccountId: asset_hub_westend_runtime::xcm_config::LocationToAccountId, ParachainInfo: asset_hub_westend_runtime::ParachainInfo, + MessageOrigin: cumulus_primitives_core::AggregateMessageOrigin, }, pallets = { PolkadotXcm: asset_hub_westend_runtime::PolkadotXcm, @@ -52,4 +54,6 @@ decl_test_parachains! { // AssetHubWestend implementation impl_accounts_helpers_for_parachain!(AssetHubWestend); impl_assert_events_helpers_for_parachain!(AssetHubWestend); -impl_assets_helpers_for_system_parachain!(AssetHubWestend, Westend); +impl_assets_helpers_for_parachain!(AssetHubWestend, Westend); +impl_foreign_assets_helpers_for_parachain!(AssetHubWestend, Westend); +impl_xcm_helpers_for_parachain!(AssetHubWestend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml deleted file mode 100644 index 0f212c1599963f8b70fd6815aac6d8c35db8dd33..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "asset-hub-wococo-emulated-chain" -version = "0.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Asset Hub Wococo emulated chain" -publish = false - -[dependencies] -serde_json = "1.0.104" - -# Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } - -# Polakadot -parachains-common = { path = "../../../../../../../parachains/common" } - -# Cumulus -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -asset-hub-rococo-runtime = { path = "../../../../../../runtimes/assets/asset-hub-rococo" } -wococo-emulated-chain = { path = "../../../relays/wococo" } -asset-hub-rococo-emulated-chain = { path = "../asset-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs deleted file mode 100644 index 677ca1763cfd68b5f7ec33da977d01c69ac09bb5..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Substrate -use frame_support::traits::OnInitialize; - -// Cumulus -use emulated_integration_tests_common::{ - impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_system_parachain, xcm_emulator::decl_test_parachains, -}; -use wococo_emulated_chain::Wococo; - -// AssetHubWococo Parachain declaration -decl_test_parachains! { - pub struct AssetHubWococo { - genesis = asset_hub_rococo_emulated_chain::genesis::genesis(), - on_init = { - asset_hub_rococo_runtime::AuraExt::on_initialize(1); - }, - runtime = asset_hub_rococo_runtime, - core = { - XcmpMessageHandler: asset_hub_rococo_runtime::XcmpQueue, - LocationToAccountId: asset_hub_rococo_runtime::xcm_config::LocationToAccountId, - ParachainInfo: asset_hub_rococo_runtime::ParachainInfo, - }, - pallets = { - PolkadotXcm: asset_hub_rococo_runtime::PolkadotXcm, - Assets: asset_hub_rococo_runtime::Assets, - ForeignAssets: asset_hub_rococo_runtime::ForeignAssets, - PoolAssets: asset_hub_rococo_runtime::PoolAssets, - AssetConversion: asset_hub_rococo_runtime::AssetConversion, - Balances: asset_hub_rococo_runtime::Balances, - } - }, -} - -// AssetHubWococo implementation -impl_accounts_helpers_for_parachain!(AssetHubWococo); -impl_assert_events_helpers_for_parachain!(AssetHubWococo); -impl_assets_helpers_for_system_parachain!(AssetHubWococo, Wococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml index 43c0f5fd14c9b2ba8d0b493e0e84ce93cb6113ca..d0c498b54b4e0d2727864db15d6415052e939231 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml @@ -7,6 +7,9 @@ license = "Apache-2.0" description = "Bridge Hub Rococo emulated chain" publish = false +[lints] +workspace = true + [dependencies] serde_json = "1.0.104" @@ -22,3 +25,11 @@ parachains-common = { path = "../../../../../../../parachains/common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } bridge-hub-rococo-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-rococo" } +bridge-hub-common = { path = "../../../../../../runtimes/bridge-hubs/common", default-features = false } + +# Snowbridge +snowbridge-core = { path = "../../../../../../../../bridges/snowbridge/parachain/primitives/core", default-features = false } +snowbridge-router-primitives = { path = "../../../../../../../../bridges/snowbridge/parachain/primitives/router", default-features = false } +snowbridge-system = { path = "../../../../../../../../bridges/snowbridge/parachain/pallets/system", default-features = false } +snowbridge-inbound-queue = { path = "../../../../../../../../bridges/snowbridge/parachain/pallets/inbound-queue", default-features = false } +snowbridge-outbound-queue = { path = "../../../../../../../../bridges/snowbridge/parachain/pallets/outbound-queue", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs index 4af84c82e98df7f42e7f883ea4d54dfb1d5e1f8e..3dd0cb10ab697af86efd71b9aa1a2df0f35fa3c8 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs @@ -22,6 +22,7 @@ use emulated_integration_tests_common::{ }; use parachains_common::Balance; +pub const ASSETHUB_PARA_ID: u32 = 1000; pub const PARA_ID: u32 = 1013; pub const ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; @@ -56,20 +57,17 @@ pub fn genesis() -> Storage { safe_xcm_version: Some(SAFE_XCM_VERSION), ..Default::default() }, - bridge_wococo_grandpa: bridge_hub_rococo_runtime::BridgeWococoGrandpaConfig { + bridge_westend_grandpa: bridge_hub_rococo_runtime::BridgeWestendGrandpaConfig { owner: Some(get_account_id_from_seed::(accounts::BOB)), ..Default::default() }, - bridge_rococo_grandpa: bridge_hub_rococo_runtime::BridgeRococoGrandpaConfig { + bridge_westend_messages: bridge_hub_rococo_runtime::BridgeWestendMessagesConfig { owner: Some(get_account_id_from_seed::(accounts::BOB)), ..Default::default() }, - bridge_rococo_messages: bridge_hub_rococo_runtime::BridgeRococoMessagesConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), - ..Default::default() - }, - bridge_wococo_messages: bridge_hub_rococo_runtime::BridgeWococoMessagesConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), + ethereum_system: bridge_hub_rococo_runtime::EthereumSystemConfig { + para_id: PARA_ID.into(), + asset_hub_para_id: ASSETHUB_PARA_ID.into(), ..Default::default() }, ..Default::default() diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs index d7630954c868e478e4900aab1ecdbee34c74a039..8c18d112bc12fb4883d313106fa66841dcad8d2e 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs @@ -21,7 +21,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - xcm_emulator::decl_test_parachains, + impl_xcm_helpers_for_parachain, impls::Parachain, xcm_emulator::decl_test_parachains, }; // BridgeHubRococo Parachain declaration @@ -36,10 +36,14 @@ decl_test_parachains! { XcmpMessageHandler: bridge_hub_rococo_runtime::XcmpQueue, LocationToAccountId: bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, ParachainInfo: bridge_hub_rococo_runtime::ParachainInfo, + MessageOrigin: bridge_hub_common::AggregateMessageOrigin, }, pallets = { PolkadotXcm: bridge_hub_rococo_runtime::PolkadotXcm, Balances: bridge_hub_rococo_runtime::Balances, + EthereumSystem: bridge_hub_rococo_runtime::EthereumSystem, + EthereumInboundQueue: bridge_hub_rococo_runtime::EthereumInboundQueue, + EthereumOutboundQueue: bridge_hub_rococo_runtime::EthereumOutboundQueue, } }, } @@ -47,3 +51,4 @@ decl_test_parachains! { // BridgeHubRococo implementation impl_accounts_helpers_for_parachain!(BridgeHubRococo); impl_assert_events_helpers_for_parachain!(BridgeHubRococo); +impl_xcm_helpers_for_parachain!(BridgeHubRococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml index e5e6fd7073933bca21683faef3459afdf1ac8e8b..3d5a7e1071d5f780506b42820bb729e4fad33da7 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml @@ -7,6 +7,9 @@ license = "Apache-2.0" description = "Bridge Hub Westend emulated chain" publish = false +[lints] +workspace = true + [dependencies] serde_json = "1.0.104" @@ -22,3 +25,4 @@ parachains-common = { path = "../../../../../../../parachains/common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } bridge-hub-westend-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-westend" } +bridge-hub-common = { path = "../../../../../../runtimes/bridge-hubs/common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs index cd578d6862f2e16c39d9c14faf5c76acd928f3c0..2eb7e0ddbd29a92f21169cb1b70fb1c803ea2623 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs @@ -22,7 +22,7 @@ use emulated_integration_tests_common::{ }; use parachains_common::Balance; -pub const PARA_ID: u32 = 1013; +pub const PARA_ID: u32 = 1002; pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; pub fn genesis() -> Storage { diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs index 436b65cb916b6be5b64aa4890c952873210e49ee..b0dddc9dbf9a5b71a776e3ae48b97bbb9f29adf2 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs @@ -21,7 +21,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - xcm_emulator::decl_test_parachains, + impl_xcm_helpers_for_parachain, impls::Parachain, xcm_emulator::decl_test_parachains, }; // BridgeHubWestend Parachain declaration @@ -36,6 +36,7 @@ decl_test_parachains! { XcmpMessageHandler: bridge_hub_westend_runtime::XcmpQueue, LocationToAccountId: bridge_hub_westend_runtime::xcm_config::LocationToAccountId, ParachainInfo: bridge_hub_westend_runtime::ParachainInfo, + MessageOrigin: bridge_hub_common::AggregateMessageOrigin, }, pallets = { PolkadotXcm: bridge_hub_westend_runtime::PolkadotXcm, @@ -47,3 +48,4 @@ decl_test_parachains! { // BridgeHubWestend implementation impl_accounts_helpers_for_parachain!(BridgeHubWestend); impl_assert_events_helpers_for_parachain!(BridgeHubWestend); +impl_xcm_helpers_for_parachain!(BridgeHubWestend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs deleted file mode 100644 index 6807a2ab8c8093ef43a02a7ea06ef18ae9399586..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Substrate -use frame_support::traits::OnInitialize; - -// Cumulus -use emulated_integration_tests_common::{ - impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - xcm_emulator::decl_test_parachains, -}; - -// BridgeHubWococo Parachain declaration -decl_test_parachains! { - pub struct BridgeHubWococo { - genesis = bridge_hub_rococo_emulated_chain::genesis::genesis(), - on_init = { - bridge_hub_rococo_runtime::AuraExt::on_initialize(1); - }, - runtime = bridge_hub_rococo_runtime, - core = { - XcmpMessageHandler: bridge_hub_rococo_runtime::XcmpQueue, - LocationToAccountId: bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, - ParachainInfo: bridge_hub_rococo_runtime::ParachainInfo, - }, - pallets = { - PolkadotXcm: bridge_hub_rococo_runtime::PolkadotXcm, - Balances: bridge_hub_rococo_runtime::Balances, - } - }, -} - -// BridgeHubWococo implementation -impl_accounts_helpers_for_parachain!(BridgeHubWococo); -impl_assert_events_helpers_for_parachain!(BridgeHubWococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml similarity index 72% rename from cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml rename to cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml index 0b02730a50c694019d56c3733c576b65986b9f1d..54d2d9b6b9823ac4718ffe34e18c106ff34fdb16 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml @@ -1,12 +1,15 @@ [package] -name = "bridge-hub-wococo-emulated-chain" +name = "collectives-westend-emulated-chain" version = "0.0.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" -description = "Bridge Hub Wococo emulated chain" +description = "Collectives Westend emulated chain" publish = false +[lints] +workspace = true + [dependencies] serde_json = "1.0.104" @@ -21,5 +24,5 @@ parachains-common = { path = "../../../../../../../parachains/common" } # Cumulus cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } -bridge-hub-rococo-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-rococo" } -bridge-hub-rococo-emulated-chain = { path = "../bridge-hub-rococo" } +collectives-westend-runtime = { path = "../../../../../../runtimes/collectives/collectives-westend" } +westend-emulated-chain = { path = "../../../relays/westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/genesis.rs new file mode 100644 index 0000000000000000000000000000000000000000..d79ef55072ae481b4bba37ef7a97e2d3783668f2 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/genesis.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use sp_core::storage::Storage; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage, collators, SAFE_XCM_VERSION, +}; +use parachains_common::Balance; + +pub const PARA_ID: u32 = 1001; +pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; + +pub fn genesis() -> Storage { + let genesis_config = collectives_westend_runtime::RuntimeGenesisConfig { + system: collectives_westend_runtime::SystemConfig::default(), + balances: collectives_westend_runtime::BalancesConfig { + balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(), + }, + parachain_info: collectives_westend_runtime::ParachainInfoConfig { + parachain_id: PARA_ID.into(), + ..Default::default() + }, + collator_selection: collectives_westend_runtime::CollatorSelectionConfig { + invulnerables: collators::invulnerables().iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: ED * 16, + ..Default::default() + }, + session: collectives_westend_runtime::SessionConfig { + keys: collators::invulnerables() + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + collectives_westend_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: collectives_westend_runtime::PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + ..Default::default() + }; + + build_genesis_storage( + &genesis_config, + collectives_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + ) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..a32e865dd9ce8497755a261c6922273aea8b49f6 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/lib.rs @@ -0,0 +1,52 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod genesis; + +// Substrate +use frame_support::traits::OnInitialize; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + impls::Parachain, xcm_emulator::decl_test_parachains, +}; + +// CollectivesWestend Parachain declaration +decl_test_parachains! { + pub struct CollectivesWestend { + genesis = genesis::genesis(), + on_init = { + collectives_westend_runtime::AuraExt::on_initialize(1); + }, + runtime = collectives_westend_runtime, + core = { + XcmpMessageHandler: collectives_westend_runtime::XcmpQueue, + LocationToAccountId: collectives_westend_runtime::xcm_config::LocationToAccountId, + ParachainInfo: collectives_westend_runtime::ParachainInfo, + MessageOrigin: cumulus_primitives_core::AggregateMessageOrigin, + }, + pallets = { + PolkadotXcm: collectives_westend_runtime::PolkadotXcm, + Balances: collectives_westend_runtime::Balances, + FellowshipTreasury: collectives_westend_runtime::FellowshipTreasury, + AssetRate: collectives_westend_runtime::AssetRate, + } + }, +} + +// AssetHubWestend implementation +impl_accounts_helpers_for_parachain!(CollectivesWestend); +impl_assert_events_helpers_for_parachain!(CollectivesWestend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml index 42aaee3f1020f5808d2b62e754d6c209067e3e8b..d325b78fa664ac46105a56876a346c14972e1617 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml @@ -7,6 +7,9 @@ license = "Apache-2.0" description = "Penpal emulated chain" publish = false +[lints] +workspace = true + [dependencies] serde_json = "1.0.104" @@ -22,3 +25,5 @@ parachains-common = { path = "../../../../../../../parachains/common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } penpal-runtime = { path = "../../../../../../runtimes/testing/penpal" } +rococo-emulated-chain = { path = "../../../relays/rococo" } +westend-emulated-chain = { path = "../../../relays/westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index 8709d4e91969adad0bc857433539c5e199d811cd..244a846bbc2f53c9f65e4144484783f70a71879d 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -15,14 +15,18 @@ mod genesis; pub use genesis::{genesis, ED, PARA_ID_A, PARA_ID_B}; +pub use penpal_runtime::xcm_config::{LocalTeleportableToAssetHub, XcmConfig}; // Substrate use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ - impl_assert_events_helpers_for_parachain, xcm_emulator::decl_test_parachains, + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + impl_assets_helpers_for_parachain, impls::Parachain, xcm_emulator::decl_test_parachains, }; +use rococo_emulated_chain::Rococo; +use westend_emulated_chain::Westend; // Penpal Parachain declaration decl_test_parachains! { @@ -36,10 +40,13 @@ decl_test_parachains! { XcmpMessageHandler: penpal_runtime::XcmpQueue, LocationToAccountId: penpal_runtime::xcm_config::LocationToAccountId, ParachainInfo: penpal_runtime::ParachainInfo, + MessageOrigin: cumulus_primitives_core::AggregateMessageOrigin, }, pallets = { PolkadotXcm: penpal_runtime::PolkadotXcm, Assets: penpal_runtime::Assets, + ForeignAssets: penpal_runtime::ForeignAssets, + Balances: penpal_runtime::Balances, } }, pub struct PenpalB { @@ -52,14 +59,21 @@ decl_test_parachains! { XcmpMessageHandler: penpal_runtime::XcmpQueue, LocationToAccountId: penpal_runtime::xcm_config::LocationToAccountId, ParachainInfo: penpal_runtime::ParachainInfo, + MessageOrigin: cumulus_primitives_core::AggregateMessageOrigin, }, pallets = { PolkadotXcm: penpal_runtime::PolkadotXcm, Assets: penpal_runtime::Assets, + ForeignAssets: penpal_runtime::ForeignAssets, + Balances: penpal_runtime::Balances, } }, } // Penpal implementation +impl_accounts_helpers_for_parachain!(PenpalA); +impl_accounts_helpers_for_parachain!(PenpalB); +impl_assets_helpers_for_parachain!(PenpalA, Rococo); +impl_assets_helpers_for_parachain!(PenpalB, Westend); impl_assert_events_helpers_for_parachain!(PenpalA); impl_assert_events_helpers_for_parachain!(PenpalB); diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml index 325c722951739a917815835fabdcdb3f2df6c4b5..d2e54367de2e70ce2171682693ec186a0f739aa8 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml @@ -7,6 +7,9 @@ license = "Apache-2.0" description = "Rococo emulated chain" publish = false +[lints] +workspace = true + [dependencies] serde_json = "1.0.104" diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs index 6f5f3923ead9d251fd3f6d0c9903fe29cbc2d7f6..45e1e94de0100bf8a765b22163f519d0a54ced1c 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs @@ -16,7 +16,6 @@ // Substrate use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; use grandpa::AuthorityId as GrandpaId; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_core::{sr25519, storage::Storage}; @@ -38,7 +37,6 @@ const ENDOWMENT: u128 = 1_000_000 * ROC; fn session_keys( babe: BabeId, grandpa: GrandpaId, - im_online: ImOnlineId, para_validator: ValidatorId, para_assignment: AssignmentId, authority_discovery: AuthorityDiscoveryId, @@ -47,7 +45,6 @@ fn session_keys( rococo_runtime::SessionKeys { babe, grandpa, - im_online, para_validator, para_assignment, authority_discovery, @@ -74,7 +71,6 @@ pub fn genesis() -> Storage { x.4.clone(), x.5.clone(), x.6.clone(), - x.7.clone(), get_from_seed::("Alice"), ), ) diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs index f806f4a5d9eb394eb0bba324bd32ee2249301ae1..0791f63235fb35a7e8c0a1b8d380d6cc137a8754 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs @@ -24,7 +24,7 @@ use emulated_integration_tests_common::{ // Rococo declaration decl_test_relay_chains! { - #[api_version(8)] + #[api_version(10)] pub struct Rococo { genesis = genesis::genesis(), on_init = (), diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml index 20b9737735fd4713196f930e2f44a8a2526e32f9..b073bbb94f9ee832e71f9cc01b30906a26ef8440 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -7,6 +7,9 @@ license = "Apache-2.0" description = "Westend emulated chain" publish = false +[lints] +workspace = true + [dependencies] serde_json = "1.0.104" diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs index e87b85881d3cbfcbe61e01c9529c1fb1d48e559f..e2297100a4525e6d26cb469f6f1458023a12b82c 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs @@ -16,7 +16,6 @@ // Substrate use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; use grandpa::AuthorityId as GrandpaId; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_core::storage::Storage; @@ -39,7 +38,6 @@ const STASH: u128 = 100 * WND; fn session_keys( babe: BabeId, grandpa: GrandpaId, - im_online: ImOnlineId, para_validator: ValidatorId, para_assignment: AssignmentId, authority_discovery: AuthorityDiscoveryId, @@ -48,7 +46,6 @@ fn session_keys( westend_runtime::SessionKeys { babe, grandpa, - im_online, para_validator, para_assignment, authority_discovery, @@ -75,7 +72,6 @@ pub fn genesis() -> Storage { x.4.clone(), x.5.clone(), x.6.clone(), - x.7.clone(), get_from_seed::("Alice"), ), ) diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs index af45d8db4e622d6f6df468e63fc3137a777492e6..8a5d4bbf80854f0e3dfec25a058352e0eb9cd31f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs @@ -24,13 +24,13 @@ use emulated_integration_tests_common::{ // Westend declaration decl_test_relay_chains! { - #[api_version(8)] + #[api_version(10)] pub struct Westend { genesis = genesis::genesis(), on_init = (), runtime = westend_runtime, core = { - SovereignAccountOf: westend_runtime::xcm_config::LocationConverter, //TODO: rename to SovereignAccountOf, + SovereignAccountOf: westend_runtime::xcm_config::LocationConverter, }, pallets = { XcmPallet: westend_runtime::XcmPallet, diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml deleted file mode 100644 index 51a87954b8c083ffdeec90b890a942c3e5fe00aa..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "wococo-emulated-chain" -version = "0.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Wococo emulated chain" -publish = false - -[dependencies] -serde_json = "1.0.104" - -# Substrate -sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } -sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } -beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../../../substrate/primitives/consensus/beefy" } -grandpa = { package = "sc-consensus-grandpa", path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } -pallet-im-online = { path = "../../../../../../../substrate/frame/im-online", default-features = false } - -# Polkadot -polkadot-primitives = { path = "../../../../../../../polkadot/primitives", default-features = false } -rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants", default-features = false } -rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } - -# Cumulus -parachains-common = { path = "../../../../../../parachains/common" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -rococo-emulated-chain = { path = "../rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs deleted file mode 100644 index a04deee330f7ddf2ec1b5458a9716fc7fafddb82..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Cumulus -use emulated_integration_tests_common::{ - impl_accounts_helpers_for_relay_chain, impl_assert_events_helpers_for_relay_chain, - impl_hrmp_channels_helpers_for_relay_chain, impl_send_transact_helpers_for_relay_chain, - xcm_emulator::decl_test_relay_chains, -}; - -// Wococo declaration -decl_test_relay_chains! { - #[api_version(8)] - pub struct Wococo { - genesis = rococo_emulated_chain::genesis::genesis(), - on_init = (), - runtime = rococo_runtime, - core = { - SovereignAccountOf: rococo_runtime::xcm_config::LocationConverter, - }, - pallets = { - XcmPallet: rococo_runtime::XcmPallet, - Sudo: rococo_runtime::Sudo, - Balances: rococo_runtime::Balances, - Hrmp: rococo_runtime::Hrmp, - } - }, -} - -// Wococo implementation -impl_accounts_helpers_for_relay_chain!(Wococo); -impl_assert_events_helpers_for_relay_chain!(Wococo); -impl_hrmp_channels_helpers_for_relay_chain!(Wococo); -impl_send_transact_helpers_for_relay_chain!(Wococo); diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index 08bb284cded2c7114b4348cb996d621c1c18c1d6..f2e799df8100fb801de5a10fb3b0b6c231b4efea 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license = "Apache-2.0" description = "Common resources for integration testing with xcm-emulator" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } paste = "1.0.14" @@ -13,29 +16,29 @@ serde_json = "1.0.108" # Substrate grandpa = { package = "sc-consensus-grandpa", path = "../../../../../substrate/client/consensus/grandpa" } -sp-authority-discovery = { path = "../../../../../substrate/primitives/authority-discovery", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} -sp-consensus-babe = { path = "../../../../../substrate/primitives/consensus/babe", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false} -pallet-im-online = { path = "../../../../../substrate/frame/im-online", default-features = false} +sp-authority-discovery = { path = "../../../../../substrate/primitives/authority-discovery", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-consensus-babe = { path = "../../../../../substrate/primitives/consensus/babe", default-features = false } +pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +pallet-im-online = { path = "../../../../../substrate/frame/im-online", default-features = false } beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../substrate/primitives/consensus/beefy" } # Polkadot polkadot-service = { path = "../../../../../polkadot/node/service", default-features = false, features = ["full-node"] } -polkadot-primitives = { path = "../../../../../polkadot/primitives", default-features = false} +polkadot-primitives = { path = "../../../../../polkadot/primitives", default-features = false } polkadot-runtime-parachains = { path = "../../../../../polkadot/runtime/parachains" } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } # Cumulus parachains-common = { path = "../../../common" } cumulus-primitives-core = { path = "../../../../primitives/core" } -xcm-emulator = { path = "../../../../xcm/xcm-emulator", default-features = false} -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false} +xcm-emulator = { path = "../../../../xcm/xcm-emulator", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system" } asset-test-utils = { path = "../../../runtimes/assets/test-utils" } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 6c99c1614db9269c846fb8e40e5666d1e3cb7afa..42b5847d17c4696d6085989f2e28ec8292970e5c 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -38,7 +38,7 @@ pub use polkadot_runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, }; pub use xcm::{ - prelude::{MultiLocation, OriginKind, Outcome, VersionedXcm}, + prelude::{MultiLocation, OriginKind, Outcome, VersionedXcm, XcmVersion}, v3::Error, DoubleEncoded, }; @@ -47,7 +47,8 @@ pub use xcm::{ pub use cumulus_pallet_parachain_system; pub use cumulus_pallet_xcmp_queue; pub use cumulus_primitives_core::{ - relay_chain::HrmpChannelId, DmpMessageHandler, ParaId, XcmpMessageHandler, + relay_chain::HrmpChannelId, DmpMessageHandler, Junction, Junctions, NetworkId, ParaId, + XcmpMessageHandler, }; pub use parachains_common::{AccountId, Balance}; pub use xcm_emulator::{ @@ -62,11 +63,14 @@ use bp_messages::{ LaneId, MessageKey, OutboundLaneData, }; use bridge_runtime_common::messages_xcm_extension::XcmBlobMessageDispatchResult; -pub use pallet_bridge_messages::Instance2 as BridgeMessagesInstance2; -use pallet_bridge_messages::{Config, Instance1, OutboundLanes, Pallet}; +use pallet_bridge_messages::{Config, OutboundLanes, Pallet}; +pub use pallet_bridge_messages::{ + Instance1 as BridgeMessagesInstance1, Instance2 as BridgeMessagesInstance2, + Instance3 as BridgeMessagesInstance3, +}; -pub struct BridgeHubMessageHandler { - _marker: std::marker::PhantomData<(S, T, I)>, +pub struct BridgeHubMessageHandler { + _marker: std::marker::PhantomData<(S, SI, T, TI)>, } struct LaneIdWrapper(LaneId); @@ -83,13 +87,14 @@ impl From for LaneIdWrapper { } } -impl BridgeMessageHandler for BridgeHubMessageHandler +impl BridgeMessageHandler for BridgeHubMessageHandler where - S: Config, - T: Config, - I: 'static, - >::InboundPayload: From>, - >::MessageDispatch: + S: Config, + SI: 'static, + T: Config, + TI: 'static, + >::InboundPayload: From>, + >::MessageDispatch: MessageDispatch, { fn get_source_outbound_messages() -> Vec { @@ -100,16 +105,13 @@ where // collect messages from `OutboundMessages` for each active outbound lane in the source for lane in active_lanes { - let latest_generated_nonce = - OutboundLanes::::get(lane).latest_generated_nonce; - let latest_received_nonce = - OutboundLanes::::get(lane).latest_received_nonce; + let latest_generated_nonce = OutboundLanes::::get(lane).latest_generated_nonce; + let latest_received_nonce = OutboundLanes::::get(lane).latest_received_nonce; (latest_received_nonce + 1..=latest_generated_nonce).for_each(|nonce| { - let encoded_payload: Vec = - Pallet::::outbound_message_data(*lane, nonce) - .expect("Bridge message does not exist") - .into(); + let encoded_payload: Vec = Pallet::::outbound_message_data(*lane, nonce) + .expect("Bridge message does not exist") + .into(); let payload = Vec::::decode(&mut &encoded_payload[..]) .expect("Decodign XCM message failed"); let id: u32 = LaneIdWrapper(*lane).into(); @@ -133,9 +135,9 @@ where // Directly dispatch outbound messages assuming everything is correct // and bypassing the `Relayers` and `InboundLane` logic - let dispatch_result = TargetMessageDispatch::::dispatch(DispatchMessage { + let dispatch_result = TargetMessageDispatch::::dispatch(DispatchMessage { key: MessageKey { lane_id, nonce }, - data: DispatchMessageData::> { payload }, + data: DispatchMessageData::> { payload }, }); let result = match dispatch_result.dispatch_level_result { @@ -151,14 +153,14 @@ where } fn notify_source_message_delivery(lane_id: u32) { - let data = OutboundLanes::::get(LaneIdWrapper::from(lane_id).0); + let data = OutboundLanes::::get(LaneIdWrapper::from(lane_id).0); let new_data = OutboundLaneData { oldest_unpruned_nonce: data.oldest_unpruned_nonce + 1, latest_received_nonce: data.latest_received_nonce + 1, ..data }; - OutboundLanes::::insert(LaneIdWrapper::from(lane_id).0, new_data); + OutboundLanes::::insert(LaneIdWrapper::from(lane_id).0, new_data); } } @@ -171,10 +173,14 @@ macro_rules! impl_accounts_helpers_for_relay_chain { pub fn fund_accounts(accounts: Vec<($crate::impls::AccountId, $crate::impls::Balance)>) { ::execute_with(|| { for account in accounts { + let who = account.0; + let actual = ]>::Balances::free_balance(&who); + let actual = actual.saturating_add(]>::Balances::reserved_balance(&who)); + $crate::impls::assert_ok!(]>::Balances::force_set_balance( ::RuntimeOrigin::root(), - account.0.into(), - account.1, + who.into(), + actual.saturating_add(account.1), )); } }); @@ -384,14 +390,42 @@ macro_rules! impl_accounts_helpers_for_parachain { pub fn fund_accounts(accounts: Vec<($crate::impls::AccountId, $crate::impls::Balance)>) { ::execute_with(|| { for account in accounts { + let who = account.0; + let actual = ]>::Balances::free_balance(&who); + let actual = actual.saturating_add(]>::Balances::reserved_balance(&who)); + $crate::impls::assert_ok!(]>::Balances::force_set_balance( ::RuntimeOrigin::root(), - account.0.into(), - account.1, + who.into(), + actual.saturating_add(account.1), )); } }); } + + /// Fund a sovereign account of sibling para. + pub fn fund_para_sovereign(sibling_para_id: $crate::impls::ParaId, balance: $crate::impls::Balance) { + let sibling_location = Self::sibling_location_of(sibling_para_id); + let sovereign_account = Self::sovereign_account_id_of(sibling_location); + Self::fund_accounts(vec![(sovereign_account.into(), balance)]) + } + + /// Return local sovereign account of `para_id` on other `network_id` + pub fn sovereign_account_of_parachain_on_other_global_consensus( + network_id: $crate::impls::NetworkId, + para_id: $crate::impls::ParaId, + ) -> $crate::impls::AccountId { + let remote_location = $crate::impls::MultiLocation { + parents: 2, + interior: $crate::impls::Junctions::X2( + $crate::impls::Junction::GlobalConsensus(network_id), + $crate::impls::Junction::Parachain(para_id.into()), + ), + }; + ::execute_with(|| { + Self::sovereign_account_id_of(remote_location) + }) + } } } }; @@ -556,7 +590,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { } #[macro_export] -macro_rules! impl_assets_helpers_for_system_parachain { +macro_rules! impl_assets_helpers_for_parachain { ( $chain:ident, $relay_chain:ident ) => { $crate::impls::paste::paste! { impl $chain { @@ -614,7 +648,9 @@ macro_rules! impl_assets_helpers_for_system_parachain { $crate::impls::assert_expected_events!( Self, vec![ - RuntimeEvent::::Assets($crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount }) => { + RuntimeEvent::::Assets( + $crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount } + ) => { asset_id: *asset_id == id, owner: *owner == beneficiary.clone().into(), amount: *amount == amount_to_mint, @@ -687,3 +723,115 @@ macro_rules! impl_assets_helpers_for_system_parachain { } }; } + +#[macro_export] +macro_rules! impl_foreign_assets_helpers_for_parachain { + ( $chain:ident, $relay_chain:ident ) => { + $crate::impls::paste::paste! { + impl $chain { + /// Create foreign assets using sudo `ForeignAssets::force_create()` + pub fn force_create_foreign_asset( + id: $crate::impls::MultiLocation, + owner: $crate::impls::AccountId, + is_sufficient: bool, + min_balance: u128, + prefund_accounts: Vec<($crate::impls::AccountId, u128)>, + ) { + use $crate::impls::Inspect; + let sudo_origin = <$chain as $crate::impls::Chain>::RuntimeOrigin::root(); + ::execute_with(|| { + $crate::impls::assert_ok!( + ]>::ForeignAssets::force_create( + sudo_origin, + id, + owner.clone().into(), + is_sufficient, + min_balance, + ) + ); + assert!(]>::ForeignAssets::asset_exists(id)); + type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; + $crate::impls::assert_expected_events!( + Self, + vec![ + RuntimeEvent::::ForeignAssets( + $crate::impls::pallet_assets::Event::ForceCreated { + asset_id, + .. + } + ) => { asset_id: *asset_id == id, }, + ] + ); + }); + for (beneficiary, amount) in prefund_accounts.into_iter() { + let signed_origin = + <$chain as $crate::impls::Chain>::RuntimeOrigin::signed(owner.clone()); + Self::mint_foreign_asset(signed_origin, id, beneficiary, amount); + } + } + + /// Mint assets making use of the ForeignAssets pallet-assets instance + pub fn mint_foreign_asset( + signed_origin: ::RuntimeOrigin, + id: $crate::impls::MultiLocation, + beneficiary: $crate::impls::AccountId, + amount_to_mint: u128, + ) { + ::execute_with(|| { + $crate::impls::assert_ok!(]>::ForeignAssets::mint( + signed_origin, + id.into(), + beneficiary.clone().into(), + amount_to_mint + )); + + type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; + + $crate::impls::assert_expected_events!( + Self, + vec![ + RuntimeEvent::::ForeignAssets( + $crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount } + ) => { + asset_id: *asset_id == id, + owner: *owner == beneficiary.clone().into(), + amount: *amount == amount_to_mint, + }, + ] + ); + }); + } + } + } + }; +} + +#[macro_export] +macro_rules! impl_xcm_helpers_for_parachain { + ( $chain:ident ) => { + $crate::impls::paste::paste! { + impl $chain { + /// Set XCM version for destination. + pub fn force_xcm_version(dest: $crate::impls::MultiLocation, version: $crate::impls::XcmVersion) { + ::execute_with(|| { + $crate::impls::assert_ok!(]>::PolkadotXcm::force_xcm_version( + ::RuntimeOrigin::root(), + $crate::impls::bx!(dest), + version, + )); + }); + } + + /// Set default/safe XCM version for runtime. + pub fn force_default_xcm_version(version: Option<$crate::impls::XcmVersion>) { + ::execute_with(|| { + $crate::impls::assert_ok!(]>::PolkadotXcm::force_default_xcm_version( + ::RuntimeOrigin::root(), + version, + )); + }); + } + } + } + } +} diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 952b053f2aa241bcf4a92812a51afa8e65a6855a..58222f622c2a63f9ebbc856f2d0ab7afb040eee4 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -21,7 +21,6 @@ pub use xcm_emulator; // Substrate use grandpa::AuthorityId as GrandpaId; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_core::{sr25519, storage::Storage, Pair, Public}; @@ -163,7 +162,6 @@ pub mod validators { AccountId, BabeId, GrandpaId, - ImOnlineId, ValidatorId, AssignmentId, AuthorityDiscoveryId, diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs index 6ea3524ed4a39d5da293c3f69f9c8a35917aab39..8718f1e83a003386fa40a99d4090906908ee717c 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs @@ -120,3 +120,102 @@ macro_rules! test_parachain_is_trusted_teleporter { } }; } + +#[macro_export] +macro_rules! include_penpal_create_foreign_asset_on_asset_hub { + ( $penpal:ident, $asset_hub:ident, $relay_ed:expr, $weight_to_fee:expr) => { + $crate::impls::paste::paste! { + pub fn penpal_create_foreign_asset_on_asset_hub( + asset_id_on_penpal: u32, + foreign_asset_at_asset_hub: MultiLocation, + ah_as_seen_by_penpal: MultiLocation, + is_sufficient: bool, + asset_owner: AccountId, + prefund_amount: u128, + ) { + use frame_support::weights::WeightToFee; + let ah_check_account = $asset_hub::execute_with(|| { + <$asset_hub as [<$asset_hub Pallet>]>::PolkadotXcm::check_account() + }); + let penpal_check_account = + $penpal::execute_with(|| <$penpal as [<$penpal Pallet>]>::PolkadotXcm::check_account()); + let penpal_as_seen_by_ah = $asset_hub::sibling_location_of($penpal::para_id()); + + // prefund SA of Penpal on AssetHub with enough native tokens to pay for creating + // new foreign asset, also prefund CheckingAccount with ED, because teleported asset + // itself might not be sufficient and CheckingAccount cannot be created otherwise + let sov_penpal_on_ah = $asset_hub::sovereign_account_id_of(penpal_as_seen_by_ah); + $asset_hub::fund_accounts(vec![ + (sov_penpal_on_ah.clone().into(), $relay_ed * 100_000_000_000), + (ah_check_account.clone().into(), $relay_ed * 1000), + ]); + + // prefund SA of AssetHub on Penpal with native asset + let sov_ah_on_penpal = $penpal::sovereign_account_id_of(ah_as_seen_by_penpal); + $penpal::fund_accounts(vec![ + (sov_ah_on_penpal.into(), $relay_ed * 1_000_000_000), + (penpal_check_account.clone().into(), $relay_ed * 1000), + ]); + + // Force create asset on $penpal and prefund [<$penpal Sender>] + $penpal::force_create_and_mint_asset( + asset_id_on_penpal, + ASSET_MIN_BALANCE, + is_sufficient, + asset_owner, + None, + prefund_amount, + ); + + let require_weight_at_most = Weight::from_parts(1_100_000_000_000, 30_000); + // `OriginKind::Xcm` required by ForeignCreators pallet-assets origin filter + let origin_kind = OriginKind::Xcm; + let call_create_foreign_assets = + <$asset_hub as Chain>::RuntimeCall::ForeignAssets(pallet_assets::Call::< + <$asset_hub as Chain>::Runtime, + pallet_assets::Instance2, + >::create { + id: foreign_asset_at_asset_hub, + min_balance: ASSET_MIN_BALANCE, + admin: sov_penpal_on_ah.into(), + }) + .encode(); + let buy_execution_fee_amount = $weight_to_fee::weight_to_fee( + &Weight::from_parts(10_100_000_000_000, 300_000), + ); + let buy_execution_fee = MultiAsset { + id: Concrete(MultiLocation { parents: 1, interior: Here }), + fun: Fungible(buy_execution_fee_amount), + }; + let xcm = VersionedXcm::from(Xcm(vec![ + WithdrawAsset { 0: vec![buy_execution_fee.clone()].into() }, + BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, + Transact { require_weight_at_most, origin_kind, call: call_create_foreign_assets.into() }, + ExpectTransactStatus(MaybeErrorCode::Success), + RefundSurplus, + DepositAsset { assets: All.into(), beneficiary: penpal_as_seen_by_ah }, + ])); + // Send XCM message from penpal => asset_hub + let sudo_penpal_origin = <$penpal as Chain>::RuntimeOrigin::root(); + $penpal::execute_with(|| { + assert_ok!(<$penpal as [<$penpal Pallet>]>::PolkadotXcm::send( + sudo_penpal_origin.clone(), + bx!(ah_as_seen_by_penpal.into()), + bx!(xcm), + )); + type RuntimeEvent = <$penpal as Chain>::RuntimeEvent; + assert_expected_events!( + $penpal, + vec![ + RuntimeEvent::PolkadotXcm(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + $asset_hub::execute_with(|| { + type ForeignAssets = <$asset_hub as [<$asset_hub Pallet>]>::ForeignAssets; + assert!(ForeignAssets::asset_exists(foreign_asset_at_asset_hub)); + }); + } + } + }; +} diff --git a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs index 47e92ed075fa0ce00cdf7a012ae07e13bc9bed6f..70a9408c309741cb9b9fa01bc2f54cc1a77453ed 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs @@ -59,3 +59,17 @@ pub fn xcm_transact_unpaid_execution( Transact { require_weight_at_most, origin_kind, call }, ])) } + +/// Helper method to get the non-fee asset used in multiple assets transfer +pub fn non_fee_asset(assets: &MultiAssets, fee_idx: usize) -> Option<(MultiLocation, u128)> { + let asset = assets.inner().into_iter().enumerate().find(|a| a.0 != fee_idx)?.1.clone(); + let asset_id = match asset.id { + Concrete(id) => id, + _ => return None, + }; + let asset_amount = match asset.fun { + Fungible(amount) => amount, + _ => return None, + }; + Some((asset_id, asset_amount)) +} diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml index 713cc2ecdbb253044cbf4063317a2cd8fae2e823..bb31f8e467d50b202182be519938c4be0eb5bda5 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml @@ -7,6 +7,9 @@ license = "Apache-2.0" description = "Rococo System emulated network" publish = false +[lints] +workspace = true + [dependencies] # Cumulus emulated-integration-tests-common = { path = "../../common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml similarity index 50% rename from cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/Cargo.toml rename to cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml index 53a6f0840a5b2b28e4beec5082d39bd99f67cf11..744cbe4f8c1e31ed5a9b122a5b5939509234236a 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml @@ -1,18 +1,22 @@ [package] -name = "rococo-wococo-system-emulated-network" +name = "rococo-westend-system-emulated-network" version = "0.0.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" -description = "Rococo<>Wococo emulated bridged network" +description = "Rococo<>Westend emulated bridged network" publish = false +[lints] +workspace = true + [dependencies] # Cumulus emulated-integration-tests-common = { path = "../../common", default-features = false } rococo-emulated-chain = { path = "../../chains/relays/rococo" } -wococo-emulated-chain = { path = "../../chains/relays/wococo" } +westend-emulated-chain = { path = "../../chains/relays/westend" } asset-hub-rococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-rococo" } -asset-hub-wococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-wococo" } +asset-hub-westend-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-westend" } bridge-hub-rococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-rococo" } -bridge-hub-wococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-wococo" } +bridge-hub-westend-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-westend" } +penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs similarity index 55% rename from cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/src/lib.rs rename to cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs index e20dcfa6b32c3eebf899a9889d3c43c4b6abeea8..ee8b038a364d73301732f278786b30b18d534643 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs @@ -14,23 +14,25 @@ // limitations under the License. pub use asset_hub_rococo_emulated_chain; -pub use asset_hub_wococo_emulated_chain; +pub use asset_hub_westend_emulated_chain; pub use bridge_hub_rococo_emulated_chain; -pub use bridge_hub_wococo_emulated_chain; +pub use bridge_hub_westend_emulated_chain; +pub use penpal_emulated_chain; pub use rococo_emulated_chain; -pub use wococo_emulated_chain; +pub use westend_emulated_chain; use asset_hub_rococo_emulated_chain::AssetHubRococo; -use asset_hub_wococo_emulated_chain::AssetHubWococo; +use asset_hub_westend_emulated_chain::AssetHubWestend; use bridge_hub_rococo_emulated_chain::BridgeHubRococo; -use bridge_hub_wococo_emulated_chain::BridgeHubWococo; +use bridge_hub_westend_emulated_chain::BridgeHubWestend; +use penpal_emulated_chain::PenpalA; use rococo_emulated_chain::Rococo; -use wococo_emulated_chain::Wococo; +use westend_emulated_chain::Westend; // Cumulus use emulated_integration_tests_common::{ accounts::{ALICE, BOB}, - impls::{BridgeHubMessageHandler, BridgeMessagesInstance2}, + impls::{BridgeHubMessageHandler, BridgeMessagesInstance1, BridgeMessagesInstance3}, xcm_emulator::{ decl_test_bridges, decl_test_networks, decl_test_sender_receiver_accounts_parameter_types, Chain, @@ -43,52 +45,56 @@ decl_test_networks! { parachains = vec![ AssetHubRococo, BridgeHubRococo, + PenpalA, ], - bridge = RococoWococoMockBridge + bridge = RococoWestendMockBridge }, - pub struct WococoMockNet { - relay_chain = Wococo, + pub struct WestendMockNet { + relay_chain = Westend, parachains = vec![ - AssetHubWococo, - BridgeHubWococo, + AssetHubWestend, + BridgeHubWestend, ], - bridge = WococoRococoMockBridge + bridge = WestendRococoMockBridge }, } decl_test_bridges! { - pub struct RococoWococoMockBridge { + pub struct RococoWestendMockBridge { source = BridgeHubRococoPara, - target = BridgeHubWococoPara, - handler = RococoWococoMessageHandler + target = BridgeHubWestendPara, + handler = RococoWestendMessageHandler }, - pub struct WococoRococoMockBridge { - source = BridgeHubWococoPara, + pub struct WestendRococoMockBridge { + source = BridgeHubWestendPara, target = BridgeHubRococoPara, - handler = WococoRococoMessageHandler + handler = WestendRococoMessageHandler } } type BridgeHubRococoRuntime = ::Runtime; -type BridgeHubWococoRuntime = ::Runtime; +type BridgeHubWestendRuntime = ::Runtime; -pub type RococoWococoMessageHandler = BridgeHubMessageHandler< +pub type RococoWestendMessageHandler = BridgeHubMessageHandler< BridgeHubRococoRuntime, - BridgeHubWococoRuntime, - BridgeMessagesInstance2, + BridgeMessagesInstance3, + BridgeHubWestendRuntime, + BridgeMessagesInstance1, >; -pub type WococoRococoMessageHandler = BridgeHubMessageHandler< - BridgeHubWococoRuntime, +pub type WestendRococoMessageHandler = BridgeHubMessageHandler< + BridgeHubWestendRuntime, + BridgeMessagesInstance1, BridgeHubRococoRuntime, - BridgeMessagesInstance2, + BridgeMessagesInstance3, >; decl_test_sender_receiver_accounts_parameter_types! { RococoRelay { sender: ALICE, receiver: BOB }, AssetHubRococoPara { sender: ALICE, receiver: BOB }, BridgeHubRococoPara { sender: ALICE, receiver: BOB }, - WococoRelay { sender: ALICE, receiver: BOB }, - AssetHubWococoPara { sender: ALICE, receiver: BOB }, - BridgeHubWococoPara { sender: ALICE, receiver: BOB } + WestendRelay { sender: ALICE, receiver: BOB }, + AssetHubWestendPara { sender: ALICE, receiver: BOB }, + BridgeHubWestendPara { sender: ALICE, receiver: BOB }, + PenpalAPara { sender: ALICE, receiver: BOB } } diff --git a/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml index a4360076d6bd4e2a4a78008708e0ae0891b7fe70..80ffb9cfd6cc384fbc262f0ae4efb39b3c7085a7 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml @@ -7,10 +7,14 @@ license = "Apache-2.0" description = "Westend System emulated network" publish = false +[lints] +workspace = true + [dependencies] # Cumulus emulated-integration-tests-common = { path = "../../common", default-features = false } westend-emulated-chain = { path = "../../chains/relays/westend", default-features = false } asset-hub-westend-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-westend" } bridge-hub-westend-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-westend" } +collectives-westend-emulated-chain = { path = "../../chains/parachains/collectives/collectives-westend" } penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/westend-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/westend-system/src/lib.rs index 667b44a69869a1a9e991d13053cccc5f206324c7..26cd5c7e860867a8ea908013aac4af6ae50a2f52 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/westend-system/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/networks/westend-system/src/lib.rs @@ -15,11 +15,13 @@ pub use asset_hub_westend_emulated_chain; pub use bridge_hub_westend_emulated_chain; +pub use collectives_westend_emulated_chain; pub use penpal_emulated_chain; pub use westend_emulated_chain; use asset_hub_westend_emulated_chain::AssetHubWestend; use bridge_hub_westend_emulated_chain::BridgeHubWestend; +use collectives_westend_emulated_chain::CollectivesWestend; use penpal_emulated_chain::{PenpalA, PenpalB}; use westend_emulated_chain::Westend; @@ -35,6 +37,7 @@ decl_test_networks! { parachains = vec![ AssetHubWestend, BridgeHubWestend, + CollectivesWestend, PenpalA, PenpalB, ], @@ -46,6 +49,7 @@ decl_test_sender_receiver_accounts_parameter_types! { WestendRelay { sender: ALICE, receiver: BOB }, AssetHubWestendPara { sender: ALICE, receiver: BOB }, BridgeHubWestendPara { sender: ALICE, receiver: BOB }, + CollectivesWestendPara { sender: ALICE, receiver: BOB }, PenpalAPara { sender: ALICE, receiver: BOB }, PenpalBPara { sender: ALICE, receiver: BOB } } diff --git a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml deleted file mode 100644 index a596617e82bf2c2f31631486bfffe46451270f29..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "wococo-system-emulated-network" -version = "0.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Wococo System emulated network" -publish = false - -[dependencies] -# Cumulus -emulated-integration-tests-common = { path = "../../common", default-features = false } -wococo-emulated-chain = { path = "../../chains/relays/wococo" } -asset-hub-wococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-wococo" } -bridge-hub-wococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-wococo" } -penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs deleted file mode 100644 index 5369afe7dffece450925f35275170e59717a1d33..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub use asset_hub_wococo_emulated_chain; -pub use bridge_hub_wococo_emulated_chain; -pub use wococo_emulated_chain; - -use asset_hub_wococo_emulated_chain::AssetHubWococo; -use bridge_hub_wococo_emulated_chain::BridgeHubWococo; -use penpal_emulated_chain::{PenpalA, PenpalB}; -use wococo_emulated_chain::Wococo; - -// Cumulus -use emulated_integration_tests_common::{ - accounts::{ALICE, BOB}, - xcm_emulator::{decl_test_networks, decl_test_sender_receiver_accounts_parameter_types}, -}; - -decl_test_networks! { - pub struct WococoMockNet { - relay_chain = Wococo, - parachains = vec![ - AssetHubWococo, - BridgeHubWococo, - PenpalA, - PenpalB, - ], - bridge = () - }, -} - -decl_test_sender_receiver_accounts_parameter_types! { - WococoRelay { sender: ALICE, receiver: BOB }, - AssetHubWococoPara { sender: ALICE, receiver: BOB }, - BridgeHubWococoPara { sender: ALICE, receiver: BOB }, - PenpalAPara { sender: ALICE, receiver: BOB }, - PenpalBPara { sender: ALICE, receiver: BOB } -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index 6e592f04ba1e2ce4669bedfbd275dc99854a940d..445395fc783075e0483b6f0319b467ad4be90b14 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -7,26 +7,30 @@ license = "Apache-2.0" description = "Asset Hub Rococo runtime integration tests with xcm-emulator" publish = false +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } assert_matches = "1.5.0" # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false} -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false} -pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false} +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } +pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } # Cumulus asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } parachains-common = { path = "../../../../../../parachains/common" } asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo" } -emulated-integration-tests-common = { path = "../../../common", default-features = false} -rococo-system-emulated-network ={ path = "../../../networks/rococo-system" } +emulated-integration-tests-common = { path = "../../../common", default-features = false } +rococo-system-emulated-network = { path = "../../../networks/rococo-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 11380cd1e2d6bc797f0a9dca145cc3e7ac0b3828..3ff8c37c64651c79d76a33bf10aa83db5ba20754 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -61,18 +61,20 @@ pub const ASSET_MIN_BALANCE: u128 = 1000; pub const ASSETS_PALLET_ID: u8 = 50; pub type RelayToSystemParaTest = Test; +pub type RelayToParaTest = Test; pub type SystemParaToRelayTest = Test; pub type SystemParaToParaTest = Test; +pub type ParaToSystemParaTest = Test; -/// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests -pub fn relay_test_args(amount: Balance) -> TestArgs { +/// Returns a `TestArgs` instance to be used for the Relay Chain across integration tests +pub fn relay_test_args( + dest: MultiLocation, + beneficiary_id: AccountId32, + amount: Balance, +) -> TestArgs { TestArgs { - dest: Rococo::child_location_of(AssetHubRococo::para_id()), - beneficiary: AccountId32Junction { - network: None, - id: AssetHubRococoReceiver::get().into(), - } - .into(), + dest, + beneficiary: AccountId32Junction { network: None, id: beneficiary_id.into() }.into(), amount, assets: (Here, amount).into(), asset_id: None, @@ -81,13 +83,14 @@ pub fn relay_test_args(amount: Balance) -> TestArgs { } } -/// Returns a `TestArgs` instance to de used for the System Parachain accross integraton tests -pub fn system_para_test_args( +/// Returns a `TestArgs` instance to be used by parachains across integration tests +pub fn para_test_args( dest: MultiLocation, beneficiary_id: AccountId32, amount: Balance, assets: MultiAssets, asset_id: Option, + fee_asset_item: u32, ) -> TestArgs { TestArgs { dest, @@ -95,7 +98,7 @@ pub fn system_para_test_args( amount, assets, asset_id, - fee_asset_item: 0, + fee_asset_item, weight_limit: WeightLimit::Unlimited, } } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs index b3841af0e6c38372b8fb621fac468b25bdec63a1..c9270934ddfe87c4c969428facccda71b9401588 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs @@ -18,3 +18,11 @@ mod send; mod set_xcm_versions; mod swap; mod teleport; + +use crate::*; +emulated_integration_tests_common::include_penpal_create_foreign_asset_on_asset_hub!( + PenpalA, + AssetHubRococo, + ROCOCO_ED, + parachains_common::rococo::fee::WeightToFee +); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 76d93b2dbdbb4b96eaf9946510e51053d2be9dfd..e6142e29b7c875ad0227fa979855c498eb00a738 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -16,47 +16,38 @@ use crate::*; use asset_hub_rococo_runtime::xcm_config::XcmConfig as AssetHubRococoXcmConfig; use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; +use rococo_system_emulated_network::penpal_emulated_chain::XcmConfig as PenpalRococoXcmConfig; -fn relay_origin_assertions(t: RelayToSystemParaTest) { +fn relay_to_para_sender_assertions(t: RelayToParaTest) { type RuntimeEvent = ::RuntimeEvent; - - Rococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(630_092_000, 6_196))); - + Rococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); assert_expected_events!( Rococo, vec![ - // Amount to reserve transfer is transferred to System Parachain's Sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Transfer { from, to, amount }) => { + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from, to, amount } + ) => { from: *from == t.sender.account_id, to: *to == Rococo::sovereign_account_id_of( t.args.dest ), - amount: *amount == t.args.amount, + amount: *amount == t.args.amount, }, ] ); } -fn system_para_dest_assertions_incomplete(_t: RelayToSystemParaTest) { - AssetHubRococo::assert_dmp_queue_incomplete(Some(Weight::from_parts(57_185_000, 3504))); -} - -fn system_para_to_relay_assertions(_t: SystemParaToRelayTest) { - AssetHubRococo::assert_xcm_pallet_attempted_error(Some(XcmError::Barrier)) -} - -fn system_para_to_para_assertions(t: SystemParaToParaTest) { +fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; - AssetHubRococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( 864_610_000, 8_799, ))); - assert_expected_events!( AssetHubRococo, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Balances( pallet_balances::Event::Transfer { from, to, amount } ) => { @@ -70,18 +61,69 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { ); } -fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { +fn para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn para_to_system_para_sender_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + PenpalA::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); + assert_expected_events!( + PenpalA, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn para_to_system_para_receiver_assertions(t: ParaToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), + ); + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount to reserve transfer is withdrawn from Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_penpal_on_ahr.clone().into(), + amount: *amount == t.args.amount, + }, + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} +fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { + type RuntimeEvent = ::RuntimeEvent; AssetHubRococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( 864_610_000, 8799, ))); - assert_expected_events!( AssetHubRococo, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Assets( pallet_assets::Event::Transferred { asset_id, from, to, amount } ) => { @@ -96,29 +138,22 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { ); } -fn relay_limited_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn relay_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) +fn system_para_to_para_assets_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::Assets(pallet_assets::Event::Issued { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); } -fn system_para_limited_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::limited_reserve_transfer_assets( +fn relay_to_para_reserve_transfer_assets(t: RelayToParaTest) -> DispatchResult { + ::XcmPallet::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), bx!(t.args.beneficiary.into()), @@ -128,17 +163,7 @@ fn system_para_limited_reserve_transfer_assets(t: SystemParaToRelayTest) -> Disp ) } -fn system_para_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - -fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { +fn system_para_to_para_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), @@ -149,101 +174,108 @@ fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) ) } -fn system_para_to_para_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( +fn para_to_system_para_reserve_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { + ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), t.args.fee_asset_item, + t.args.weight_limit, ) } -/// Limited Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't -/// work +/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work #[test] -fn limited_reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain +fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { + let signed_origin = ::RuntimeOrigin::signed(RococoSender::get().into()); + let destination = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: AssetHubRococoReceiver::get().into() }.into(); let amount_to_send: Balance = ROCOCO_ED * 1000; - let test_args = TestContext { - sender: RococoSender::get(), - receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions_incomplete); - test.set_dispatchable::(relay_limited_reserve_transfer_assets); - test.assert(); - - let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + let assets: MultiAssets = (Here, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + Rococo::execute_with(|| { + let result = ::XcmPallet::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 99, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); } -/// Limited Reserve Transfers of native asset from System Parachain to Relay Chain shoudln't work +/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work #[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_relay_fails() { +fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { // Init values for System Parachain + let signed_origin = + ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); let destination = AssetHubRococo::parent_location(); let beneficiary_id = RococoReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); + let assets: MultiAssets = (Parent, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + AssetHubRococo::execute_with(|| { + let result = + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); + }); } -/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work +/// Reserve Transfers of native asset from Relay to Parachain should work #[test] -fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain +fn reserve_transfer_native_asset_from_relay_to_para() { + // Init values for Relay + let destination = Rococo::child_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); let amount_to_send: Balance = ROCOCO_ED * 1000; + let test_args = TestContext { sender: RococoSender::get(), - receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), + receiver: PenpalAReceiver::get(), + args: relay_test_args(destination, beneficiary_id, amount_to_send), }; - let mut test = RelayToSystemParaTest::new(test_args); + let mut test = RelayToParaTest::new(test_args); let sender_balance_before = test.sender.balance; let receiver_balance_before = test.receiver.balance; - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions_incomplete); - test.set_dispatchable::(relay_reserve_transfer_assets); + test.set_assertion::(relay_to_para_sender_assertions); + test.set_assertion::(para_receiver_assertions); + test.set_dispatchable::(relay_to_para_reserve_transfer_assets); test.assert(); let delivery_fees = Rococo::execute_with(|| { @@ -255,44 +287,19 @@ fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { let sender_balance_after = test.sender.balance; let receiver_balance_after = test.receiver.balance; + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); } -/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work -#[test] -fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { - // Init values for System Parachain - let destination = AssetHubRococo::parent_location(); - let beneficiary_id = RococoReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Limited Reserve Transfers of native asset from System Parachain to Parachain should work +/// Reserve Transfers of native asset from System Parachain to Parachain should work #[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_para() { +fn reserve_transfer_native_asset_from_system_para_to_para() { // Init values for System Parachain let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let beneficiary_id = PenpalAReceiver::get(); @@ -302,20 +309,21 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToParaTest::new(test_args); let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); + test.set_assertion::(system_para_to_para_sender_assertions); + test.set_assertion::(para_receiver_assertions); + test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); test.assert(); let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { xcm_helpers::transfer_assets_delivery_fees::< @@ -323,117 +331,165 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); } -/// Reserve Transfers of native asset from System Parachain to Parachain should work +/// Reserve Transfers of native asset from Parachain to System Parachain should work #[test] -fn reserve_transfer_native_asset_from_system_para_to_para() { - // Init values for System Parachain - let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); +fn reserve_transfer_native_asset_from_para_to_system_para() { + // Init values for Penpal Parachain + let destination = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; let assets = (Parent, amount_to_send).into(); let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + sender: PenpalASender::get(), + receiver: AssetHubRococoReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; - let mut test = SystemParaToParaTest::new(test_args); + let mut test = ParaToSystemParaTest::new(test_args); let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); + let penpal_location_as_seen_by_ahr = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location_as_seen_by_ahr); + + // fund the Penpal's SA on AHR with the native tokens held in reserve + AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), amount_to_send * 2)]); + + test.set_assertion::(para_to_system_para_sender_assertions); + test.set_assertion::(para_to_system_para_receiver_assertions); + test.set_dispatchable::(para_to_system_para_reserve_transfer_assets); test.assert(); let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; - let delivery_fees = AssetHubRococo::execute_with(|| { + let delivery_fees = PenpalA::execute_with(|| { xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, + ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); } -/// Limited Reserve Transfers of a local asset from System Parachain to Parachain should work +/// Reserve Transfers of a local asset and native asset from System Parachain to Parachain should +/// work #[test] -fn limited_reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account +fn reserve_transfer_assets_from_system_para_to_para() { + // Force create asset on AssetHubRococo and PenpalA from Relay Chain AssetHubRococo::force_create_and_mint_asset( ASSET_ID, ASSET_MIN_BALANCE, - true, + false, AssetHubRococoSender::get(), Some(Weight::from_parts(1_019_445_000, 200_000)), - ASSET_MIN_BALANCE * 1000000, + ASSET_MIN_BALANCE * 1_000_000, + ); + PenpalA::force_create_and_mint_asset( + ASSET_ID, + ASSET_MIN_BALANCE, + false, + PenpalASender::get(), + None, + 0, ); // Init values for System Parachain let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); - - let system_para_test_args = TestContext { + let fee_amount_to_send = ASSET_HUB_ROCOCO_ED * 1000; + let asset_amount_to_send = ASSET_MIN_BALANCE * 1000; + let assets: MultiAssets = vec![ + (Parent, fee_amount_to_send).into(), + (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), asset_amount_to_send) + .into(), + ] + .into(); + let fee_asset_index = assets + .inner() + .iter() + .position(|r| r == &(Parent, fee_amount_to_send).into()) + .unwrap() as u32; + + let para_test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args( + destination, + beneficiary_id, + asset_amount_to_send, + assets, + None, + fee_asset_index, + ), }; - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); + let mut test = SystemParaToParaTest::new(para_test_args); - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - system_para_test.assert(); -} + // Create SA-of-Penpal-on-AHR with ED. + let penpal_location = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location); + AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), ROCOCO_ED)]); -/// Reserve Transfers of a local asset from System Parachain to Parachain should work -#[test] -fn reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account - AssetHubRococo::force_create_and_mint_asset( - ASSET_ID, - ASSET_MIN_BALANCE, - true, - AssetHubRococoSender::get(), - Some(Weight::from_parts(1_019_445_000, 200_000)), - ASSET_MIN_BALANCE * 1000000, - ); + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; - // Init values for System Parachain - let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); + let sender_assets_before = AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &AssetHubRococoSender::get()) + }); + let receiver_assets_before = PenpalA::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &PenpalAReceiver::get()) + }); - let system_para_test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; + test.set_assertion::(system_para_to_para_assets_sender_assertions); + test.set_assertion::(system_para_to_para_assets_receiver_assertions); + test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); + test.assert(); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); + // Sender's balance is reduced + assert!(sender_balance_after < sender_balance_before); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + fee_amount_to_send); + + let sender_assets_after = AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &AssetHubRococoSender::get()) + }); + let receiver_assets_after = PenpalA::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &PenpalAReceiver::get()) + }); - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_reserve_transfer_assets); - system_para_test.assert(); + // Sender's balance is reduced by exact amount + assert_eq!(sender_assets_before - asset_amount_to_send, sender_assets_after); + // Receiver's balance is increased by exact amount + assert_eq!(receiver_assets_after, receiver_assets_before + asset_amount_to_send); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index e08af50c14eeda732c4fad1b1c2f296ce6fb590d..3dcc51b75ccc3112779e3340c1479e1b4d31bfb1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -14,17 +14,17 @@ // limitations under the License. use crate::*; -use frame_support::{instances::Instance2, BoundedVec}; use parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; -use sp_runtime::{DispatchError, ModuleError}; +use rococo_system_emulated_network::penpal_emulated_chain::LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub; +use sp_runtime::ModuleError; #[test] fn swap_locally_on_chain_using_local_assets() { - let asset_native = Box::new(asset_hub_rococo_runtime::xcm_config::TokenLocation::get()); - let asset_one = Box::new(MultiLocation { + let asset_native = asset_hub_rococo_runtime::xcm_config::TokenLocation::get(); + let asset_one = MultiLocation { parents: 0, interior: X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), - }); + }; AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; @@ -44,16 +44,10 @@ fn swap_locally_on_chain_using_local_assets() { 100_000_000_000_000, )); - assert_ok!(::Balances::force_set_balance( - ::RuntimeOrigin::root(), - AssetHubRococoSender::get().into(), - 100_000_000_000_000, - )); - assert_ok!(::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - asset_native.clone(), - asset_one.clone(), + Box::new(asset_native), + Box::new(asset_one), )); assert_expected_events!( @@ -65,8 +59,8 @@ fn swap_locally_on_chain_using_local_assets() { assert_ok!(::AssetConversion::add_liquidity( ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - asset_native.clone(), - asset_one.clone(), + Box::new(asset_native), + Box::new(asset_one), 1_000_000_000_000, 2_000_000_000_000, 0, @@ -81,7 +75,7 @@ fn swap_locally_on_chain_using_local_assets() { ] ); - let path = BoundedVec::<_, _>::truncate_from(vec![asset_native.clone(), asset_one.clone()]); + let path = vec![Box::new(asset_native), Box::new(asset_one)]; assert_ok!( ::AssetConversion::swap_exact_tokens_for_tokens( @@ -106,8 +100,8 @@ fn swap_locally_on_chain_using_local_assets() { assert_ok!(::AssetConversion::remove_liquidity( ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - asset_native, - asset_one, + Box::new(asset_native), + Box::new(asset_one), 1414213562273 - EXISTENTIAL_DEPOSIT * 2, // all but the 2 EDs can't be retrieved. 0, 0, @@ -118,114 +112,37 @@ fn swap_locally_on_chain_using_local_assets() { #[test] fn swap_locally_on_chain_using_foreign_assets() { - use frame_support::weights::WeightToFee; - - let asset_native = Box::new(asset_hub_rococo_runtime::xcm_config::TokenLocation::get()); - - let foreign_asset1_at_asset_hub_rococo = Box::new(MultiLocation { - parents: 1, - interior: X3( - Parachain(PenpalA::para_id().into()), - PalletInstance(ASSETS_PALLET_ID), - GeneralIndex(ASSET_ID.into()), - ), - }); - - let assets_para_destination: VersionedMultiLocation = - MultiLocation { parents: 1, interior: X1(Parachain(AssetHubRococo::para_id().into())) } - .into(); - - let penpal_location = - MultiLocation { parents: 1, interior: X1(Parachain(PenpalA::para_id().into())) }; - - // 1. Create asset on penpal: - PenpalA::execute_with(|| { - assert_ok!(::Assets::create( - ::RuntimeOrigin::signed(PenpalASender::get()), - ASSET_ID.into(), - PenpalASender::get().into(), - 1000, - )); - - assert!(::Assets::asset_exists(ASSET_ID)); - }); - - // 2. Create foreign asset on asset_hub_rococo: - - let require_weight_at_most = Weight::from_parts(1_100_000_000_000, 30_000); - let origin_kind = OriginKind::Xcm; - let sov_penpal_on_asset_hub_rococo = AssetHubRococo::sovereign_account_id_of(penpal_location); + let asset_native = asset_hub_rococo_runtime::xcm_config::TokenLocation::get(); + let ah_as_seen_by_penpal = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + let asset_location_on_penpal = PenpalLocalTeleportableToAssetHub::get(); + let asset_id_on_penpal = match asset_location_on_penpal.last() { + Some(GeneralIndex(id)) => *id as u32, + _ => unreachable!(), + }; + let asset_owner_on_penpal = PenpalASender::get(); + let foreign_asset_at_asset_hub_rococo = + MultiLocation { parents: 1, interior: X1(Parachain(PenpalA::para_id().into())) } + .appended_with(asset_location_on_penpal) + .unwrap(); + + // 1. Create asset on penpal and, 2. Create foreign asset on asset_hub_rococo + super::penpal_create_foreign_asset_on_asset_hub( + asset_id_on_penpal, + foreign_asset_at_asset_hub_rococo, + ah_as_seen_by_penpal, + true, + asset_owner_on_penpal, + ASSET_MIN_BALANCE * 1_000_000, + ); + let penpal_as_seen_by_ah = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_as_seen_by_ah); AssetHubRococo::fund_accounts(vec![ (AssetHubRococoSender::get().into(), 5_000_000 * ROCOCO_ED), /* An account to swap dot * for something else. */ - (sov_penpal_on_asset_hub_rococo.clone().into(), 1000_000_000_000_000_000 * ROCOCO_ED), ]); - let sov_penpal_on_asset_hub_rococo_as_location: MultiLocation = MultiLocation { - parents: 0, - interior: X1(AccountId32Junction { - network: None, - id: sov_penpal_on_asset_hub_rococo.clone().into(), - }), - }; - - let call_foreign_assets_create = - ::RuntimeCall::ForeignAssets(pallet_assets::Call::< - ::Runtime, - Instance2, - >::create { - id: *foreign_asset1_at_asset_hub_rococo, - min_balance: 1000, - admin: sov_penpal_on_asset_hub_rococo.clone().into(), - }) - .encode() - .into(); - - let buy_execution_fee_amount = parachains_common::rococo::fee::WeightToFee::weight_to_fee( - &Weight::from_parts(10_100_000_000_000, 300_000), - ); - let buy_execution_fee = MultiAsset { - id: Concrete(MultiLocation { parents: 1, interior: Here }), - fun: Fungible(buy_execution_fee_amount), - }; - - let xcm = VersionedXcm::from(Xcm(vec![ - WithdrawAsset { 0: vec![buy_execution_fee.clone()].into() }, - BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, - Transact { require_weight_at_most, origin_kind, call: call_foreign_assets_create }, - RefundSurplus, - DepositAsset { - assets: All.into(), - beneficiary: sov_penpal_on_asset_hub_rococo_as_location, - }, - ])); - - // Send XCM message from penpal => asset_hub_rococo - let sudo_penpal_origin = ::RuntimeOrigin::root(); - PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( - sudo_penpal_origin.clone(), - bx!(assets_para_destination.clone()), - bx!(xcm), - )); - - type RuntimeEvent = ::RuntimeEvent; - - assert_expected_events!( - PenpalA, - vec![ - RuntimeEvent::PolkadotXcm(pallet_xcm::Event::Sent { .. }) => {}, - ] - ); - }); - - // Receive XCM message in Assets Parachain AssetHubRococo::execute_with(|| { - assert!(::ForeignAssets::asset_exists( - *foreign_asset1_at_asset_hub_rococo - )); - // 3: Mint foreign asset on asset_hub_rococo: // // (While it might be nice to use batch, @@ -234,11 +151,9 @@ fn swap_locally_on_chain_using_foreign_assets() { type RuntimeEvent = ::RuntimeEvent; // 3. Mint foreign asset (in reality this should be a teleport or some such) assert_ok!(::ForeignAssets::mint( - ::RuntimeOrigin::signed( - sov_penpal_on_asset_hub_rococo.clone().into() - ), - *foreign_asset1_at_asset_hub_rococo, - sov_penpal_on_asset_hub_rococo.clone().into(), + ::RuntimeOrigin::signed(sov_penpal_on_ahr.clone().into()), + foreign_asset_at_asset_hub_rococo, + sov_penpal_on_ahr.clone().into(), 3_000_000_000_000, )); @@ -252,8 +167,8 @@ fn swap_locally_on_chain_using_foreign_assets() { // 4. Create pool: assert_ok!(::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - asset_native.clone(), - foreign_asset1_at_asset_hub_rococo.clone(), + Box::new(asset_native), + Box::new(foreign_asset_at_asset_hub_rococo), )); assert_expected_events!( @@ -265,16 +180,14 @@ fn swap_locally_on_chain_using_foreign_assets() { // 5. Add liquidity: assert_ok!(::AssetConversion::add_liquidity( - ::RuntimeOrigin::signed( - sov_penpal_on_asset_hub_rococo.clone() - ), - asset_native.clone(), - foreign_asset1_at_asset_hub_rococo.clone(), + ::RuntimeOrigin::signed(sov_penpal_on_ahr.clone()), + Box::new(asset_native), + Box::new(foreign_asset_at_asset_hub_rococo), 1_000_000_000_000, 2_000_000_000_000, 0, 0, - sov_penpal_on_asset_hub_rococo.clone().into() + sov_penpal_on_ahr.clone().into() )); assert_expected_events!( @@ -287,10 +200,7 @@ fn swap_locally_on_chain_using_foreign_assets() { ); // 6. Swap! - let path = BoundedVec::<_, _>::truncate_from(vec![ - asset_native.clone(), - foreign_asset1_at_asset_hub_rococo.clone(), - ]); + let path = vec![Box::new(asset_native), Box::new(foreign_asset_at_asset_hub_rococo)]; assert_ok!( ::AssetConversion::swap_exact_tokens_for_tokens( @@ -315,22 +225,20 @@ fn swap_locally_on_chain_using_foreign_assets() { // 7. Remove liquidity assert_ok!(::AssetConversion::remove_liquidity( - ::RuntimeOrigin::signed( - sov_penpal_on_asset_hub_rococo.clone() - ), - asset_native, - foreign_asset1_at_asset_hub_rococo, + ::RuntimeOrigin::signed(sov_penpal_on_ahr.clone()), + Box::new(asset_native), + Box::new(foreign_asset_at_asset_hub_rococo), 1414213562273 - 2_000_000_000, // all but the 2 EDs can't be retrieved. 0, 0, - sov_penpal_on_asset_hub_rococo.clone().into(), + sov_penpal_on_ahr.clone().into(), )); }); } #[test] fn cannot_create_pool_from_pool_assets() { - let asset_native = Box::new(asset_hub_rococo_runtime::xcm_config::TokenLocation::get()); + let asset_native = asset_hub_rococo_runtime::xcm_config::TokenLocation::get(); let mut asset_one = asset_hub_rococo_runtime::xcm_config::PoolAssetsPalletLocation::get(); asset_one.append_with(GeneralIndex(ASSET_ID.into())).expect("pool assets"); @@ -355,10 +263,10 @@ fn cannot_create_pool_from_pool_assets() { assert_matches::assert_matches!( ::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - asset_native.clone(), + Box::new(asset_native), Box::new(asset_one), ), - Err(DispatchError::Module(ModuleError{index: _, error: _, message})) => assert_eq!(message, Some("UnsupportedAsset")) + Err(DispatchError::Module(ModuleError{index: _, error: _, message})) => assert_eq!(message, Some("Unknown")) ); }); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index 0d2ca68524707f9ae814aaffea74373919b5d5b0..e64c02f52583e9055f2fd550f7205b8cc34f2a01 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -15,7 +15,9 @@ use crate::*; use asset_hub_rococo_runtime::xcm_config::XcmConfig as AssetHubRococoXcmConfig; +use emulated_integration_tests_common::xcm_helpers::non_fee_asset; use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; +use rococo_system_emulated_network::penpal_emulated_chain::LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub; fn relay_origin_assertions(t: RelayToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -51,7 +53,7 @@ fn relay_dest_assertions(t: SystemParaToRelayTest) { assert_expected_events!( Rococo, vec![ - // Amount is witdrawn from Relay Chain's `CheckAccount` + // Amount is withdrawn from Relay Chain's `CheckAccount` RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who, amount }) => { who: *who == ::XcmPallet::check_account(), amount: *amount == t.args.amount, @@ -110,6 +112,123 @@ fn para_dest_assertions(t: RelayToSystemParaTest) { ); } +fn penpal_to_ah_foreign_assets_sender_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + PenpalA::assert_xcm_pallet_attempted_complete(None); + let expected_asset_id = t.args.asset_id.unwrap(); + let (_, expected_asset_amount) = + non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + RuntimeEvent::Assets(pallet_assets::Event::Burned { asset_id, owner, balance }) => { + asset_id: *asset_id == expected_asset_id, + owner: *owner == t.sender.account_id, + balance: *balance == expected_asset_amount, + }, + ] + ); +} + +fn penpal_to_ah_foreign_assets_receiver_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), + ); + let (expected_foreign_asset_id, expected_foreign_asset_amount) = + non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); + assert_expected_events!( + AssetHubRococo, + vec![ + // native asset reserve transfer for paying fees, withdrawn from Penpal's sov account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_penpal_on_ahr.clone().into(), + amount: *amount == t.args.amount, + }, + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == t.receiver.account_id, + }, + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, amount }) => { + asset_id: *asset_id == expected_foreign_asset_id, + owner: *owner == t.receiver.account_id, + amount: *amount == expected_foreign_asset_amount, + }, + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn ah_to_penpal_foreign_assets_sender_assertions(t: SystemParaToParaTest) { + type RuntimeEvent = ::RuntimeEvent; + AssetHubRococo::assert_xcm_pallet_attempted_complete(None); + let (expected_foreign_asset_id, expected_foreign_asset_amount) = + non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); + assert_expected_events!( + AssetHubRococo, + vec![ + // native asset used for fees is transferred to Parachain's Sovereign account as reserve + RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from, to, amount } + ) => { + from: *from == t.sender.account_id, + to: *to == AssetHubRococo::sovereign_account_id_of( + t.args.dest + ), + amount: *amount == t.args.amount, + }, + // foreign asset is burned locally as part of teleportation + RuntimeEvent::ForeignAssets(pallet_assets::Event::Burned { asset_id, owner, balance }) => { + asset_id: *asset_id == expected_foreign_asset_id, + owner: *owner == t.sender.account_id, + balance: *balance == expected_foreign_asset_amount, + }, + ] + ); +} + +fn ah_to_penpal_foreign_assets_receiver_assertions(t: SystemParaToParaTest) { + type RuntimeEvent = ::RuntimeEvent; + let expected_asset_id = t.args.asset_id.unwrap(); + let (_, expected_asset_amount) = + non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); + let checking_account = ::PolkadotXcm::check_account(); + assert_expected_events!( + PenpalA, + vec![ + // checking account burns local asset as part of incoming teleport + RuntimeEvent::Assets(pallet_assets::Event::Burned { asset_id, owner, balance }) => { + asset_id: *asset_id == expected_asset_id, + owner: *owner == checking_account, + balance: *balance == expected_asset_amount, + }, + // local asset is teleported into account of receiver + RuntimeEvent::Assets(pallet_assets::Event::Issued { asset_id, owner, amount }) => { + asset_id: *asset_id == expected_asset_id, + owner: *owner == t.receiver.account_id, + amount: *amount == expected_asset_amount, + }, + // native asset for fee is deposited to receiver + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == t.receiver.account_id, + }, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + fn relay_limited_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { ::XcmPallet::limited_teleport_assets( t.signed_origin, @@ -152,15 +271,39 @@ fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { ) } +fn para_to_system_para_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { + ::PolkadotXcm::transfer_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + +fn system_para_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { + ::PolkadotXcm::transfer_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + /// Limited Teleport of native asset from Relay Chain to the System Parachain should work #[test] fn limited_teleport_native_assets_from_relay_to_system_para_works() { // Init values for Relay Chain let amount_to_send: Balance = ROCOCO_ED * 1000; + let dest = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); let test_args = TestContext { sender: RococoSender::get(), receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), + args: relay_test_args(dest, beneficiary_id, amount_to_send), }; let mut test = RelayToSystemParaTest::new(test_args); @@ -204,7 +347,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -245,7 +388,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -278,10 +421,12 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { fn teleport_native_assets_from_relay_to_system_para_works() { // Init values for Relay Chain let amount_to_send: Balance = ROCOCO_ED * 1000; + let dest = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); let test_args = TestContext { sender: RococoSender::get(), receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), + args: relay_test_args(dest, beneficiary_id, amount_to_send), }; let mut test = RelayToSystemParaTest::new(test_args); @@ -325,7 +470,7 @@ fn teleport_native_assets_back_from_system_para_to_relay_works() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -366,7 +511,7 @@ fn teleport_native_assets_from_system_para_to_relay_fails() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -406,3 +551,199 @@ fn teleport_to_other_system_parachains_works() { (native_asset, amount) ); } + +/// Bidirectional teleports of local Penpal assets to Asset Hub as foreign assets should work +/// (using native reserve-based transfer for fees) +#[test] +fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { + let ah_as_seen_by_penpal = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + let asset_location_on_penpal = PenpalLocalTeleportableToAssetHub::get(); + let asset_id_on_penpal = match asset_location_on_penpal.last() { + Some(GeneralIndex(id)) => *id as u32, + _ => unreachable!(), + }; + let asset_owner_on_penpal = PenpalASender::get(); + let foreign_asset_at_asset_hub_rococo = + MultiLocation { parents: 1, interior: X1(Parachain(PenpalA::para_id().into())) } + .appended_with(asset_location_on_penpal) + .unwrap(); + super::penpal_create_foreign_asset_on_asset_hub( + asset_id_on_penpal, + foreign_asset_at_asset_hub_rococo, + ah_as_seen_by_penpal, + false, + asset_owner_on_penpal, + ASSET_MIN_BALANCE * 1_000_000, + ); + let penpal_to_ah_beneficiary_id = AssetHubRococoReceiver::get(); + + let fee_amount_to_send = ASSET_HUB_ROCOCO_ED * 10_000; + let asset_amount_to_send = ASSET_MIN_BALANCE * 1000; + + let penpal_assets: MultiAssets = vec![ + (Parent, fee_amount_to_send).into(), + (asset_location_on_penpal, asset_amount_to_send).into(), + ] + .into(); + let fee_asset_index = penpal_assets + .inner() + .iter() + .position(|r| r == &(Parent, fee_amount_to_send).into()) + .unwrap() as u32; + + // Penpal to AH test args + let penpal_to_ah_test_args = TestContext { + sender: PenpalASender::get(), + receiver: AssetHubRococoReceiver::get(), + args: para_test_args( + ah_as_seen_by_penpal, + penpal_to_ah_beneficiary_id, + asset_amount_to_send, + penpal_assets, + Some(asset_id_on_penpal), + fee_asset_index, + ), + }; + let mut penpal_to_ah = ParaToSystemParaTest::new(penpal_to_ah_test_args); + + let penpal_sender_balance_before = penpal_to_ah.sender.balance; + let ah_receiver_balance_before = penpal_to_ah.receiver.balance; + + let penpal_sender_assets_before = PenpalA::execute_with(|| { + type Assets = ::Assets; + >::balance(asset_id_on_penpal, &PenpalASender::get()) + }); + let ah_receiver_assets_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + foreign_asset_at_asset_hub_rococo, + &AssetHubRococoReceiver::get(), + ) + }); + + penpal_to_ah.set_assertion::(penpal_to_ah_foreign_assets_sender_assertions); + penpal_to_ah.set_assertion::(penpal_to_ah_foreign_assets_receiver_assertions); + penpal_to_ah.set_dispatchable::(para_to_system_para_transfer_assets); + penpal_to_ah.assert(); + + let penpal_sender_balance_after = penpal_to_ah.sender.balance; + let ah_receiver_balance_after = penpal_to_ah.receiver.balance; + + let penpal_sender_assets_after = PenpalA::execute_with(|| { + type Assets = ::Assets; + >::balance(asset_id_on_penpal, &PenpalASender::get()) + }); + let ah_receiver_assets_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + foreign_asset_at_asset_hub_rococo, + &AssetHubRococoReceiver::get(), + ) + }); + + // Sender's balance is reduced + assert!(penpal_sender_balance_after < penpal_sender_balance_before); + // Receiver's balance is increased + assert!(ah_receiver_balance_after > ah_receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(ah_receiver_balance_after < ah_receiver_balance_before + fee_amount_to_send); + + // Sender's balance is reduced by exact amount + assert_eq!(penpal_sender_assets_before - asset_amount_to_send, penpal_sender_assets_after); + // Receiver's balance is increased by exact amount + assert_eq!(ah_receiver_assets_after, ah_receiver_assets_before + asset_amount_to_send); + + /////////////////////////////////////////////////////////////////////// + // Now test transferring foreign assets back from AssetHub to Penpal // + /////////////////////////////////////////////////////////////////////// + + // Move funds on AH from AHReceiver to AHSender + AssetHubRococo::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + assert_ok!(ForeignAssets::transfer( + ::RuntimeOrigin::signed(AssetHubRococoReceiver::get()), + foreign_asset_at_asset_hub_rococo, + AssetHubRococoSender::get().into(), + asset_amount_to_send, + )); + }); + + let ah_to_penpal_beneficiary_id = PenpalAReceiver::get(); + let penpal_as_seen_by_ah = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let ah_assets: MultiAssets = vec![ + (Parent, fee_amount_to_send).into(), + (foreign_asset_at_asset_hub_rococo, asset_amount_to_send).into(), + ] + .into(); + let fee_asset_index = ah_assets + .inner() + .iter() + .position(|r| r == &(Parent, fee_amount_to_send).into()) + .unwrap() as u32; + + // AH to Penpal test args + let ah_to_penpal_test_args = TestContext { + sender: AssetHubRococoSender::get(), + receiver: PenpalAReceiver::get(), + args: para_test_args( + penpal_as_seen_by_ah, + ah_to_penpal_beneficiary_id, + asset_amount_to_send, + ah_assets, + Some(asset_id_on_penpal), + fee_asset_index, + ), + }; + let mut ah_to_penpal = SystemParaToParaTest::new(ah_to_penpal_test_args); + + let ah_sender_balance_before = ah_to_penpal.sender.balance; + let penpal_receiver_balance_before = ah_to_penpal.receiver.balance; + + let ah_sender_assets_before = AssetHubRococo::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + foreign_asset_at_asset_hub_rococo, + &AssetHubRococoSender::get(), + ) + }); + let penpal_receiver_assets_before = PenpalA::execute_with(|| { + type Assets = ::Assets; + >::balance(asset_id_on_penpal, &PenpalAReceiver::get()) + }); + + ah_to_penpal.set_assertion::(ah_to_penpal_foreign_assets_sender_assertions); + ah_to_penpal.set_assertion::(ah_to_penpal_foreign_assets_receiver_assertions); + ah_to_penpal.set_dispatchable::(system_para_to_para_transfer_assets); + ah_to_penpal.assert(); + + let ah_sender_balance_after = ah_to_penpal.sender.balance; + let penpal_receiver_balance_after = ah_to_penpal.receiver.balance; + + let ah_sender_assets_after = AssetHubRococo::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + foreign_asset_at_asset_hub_rococo, + &AssetHubRococoSender::get(), + ) + }); + let penpal_receiver_assets_after = PenpalA::execute_with(|| { + type Assets = ::Assets; + >::balance(asset_id_on_penpal, &PenpalAReceiver::get()) + }); + + // Sender's balance is reduced + assert!(ah_sender_balance_after < ah_sender_balance_before); + // Receiver's balance is increased + assert!(penpal_receiver_balance_after > penpal_receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(penpal_receiver_balance_after < penpal_receiver_balance_before + fee_amount_to_send); + + // Sender's balance is reduced by exact amount + assert_eq!(ah_sender_assets_before - asset_amount_to_send, ah_sender_assets_after); + // Receiver's balance is increased by exact amount + assert_eq!(penpal_receiver_assets_after, penpal_receiver_assets_before + asset_amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml index 7080abc0a4403681e099cd61fddcc6b0b72dbed0..3b2d3367d40d1984ee91fac96c37b524a5a97c27 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -7,27 +7,30 @@ license = "Apache-2.0" description = "Asset Hub Westend runtime integration tests with xcm-emulator" publish = false +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } assert_matches = "1.5.0" # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../../../substrate/frame/system", default-features = false} -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false} -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false} -pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false} -pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false} -pallet-asset-rate = { path = "../../../../../../../substrate/frame/asset-rate", default-features = false} +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../../../substrate/frame/system", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } +pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false } +pallet-asset-rate = { path = "../../../../../../../substrate/frame/asset-rate", default-features = false } pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } # Polkadot polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants", default-features = false } @@ -36,6 +39,7 @@ parachains-common = { path = "../../../../../../parachains/common" } asset-hub-westend-runtime = { path = "../../../../../runtimes/assets/asset-hub-westend" } asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } cumulus-pallet-dmp-queue = { default-features = false, path = "../../../../../../pallets/dmp-queue" } +cumulus-pallet-xcmp-queue = { default-features = false, path = "../../../../../../pallets/xcmp-queue" } cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../../pallets/parachain-system" } -emulated-integration-tests-common = { path = "../../../common", default-features = false} -westend-system-emulated-network ={ path = "../../../networks/westend-system" } +emulated-integration-tests-common = { path = "../../../common", default-features = false } +westend-system-emulated-network = { path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index e52ad448c0b685f82d15c89cd6e4f2f2f16d68ca..e9c7a59faaf65c1808a73479211b54e03d58a47e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -47,11 +47,17 @@ pub use westend_system_emulated_network::{ asset_hub_westend_emulated_chain::{ genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, }, - penpal_emulated_chain::PenpalAParaPallet as PenpalAPallet, + collectives_westend_emulated_chain::{ + genesis::ED as COLLECTIVES_WESTEND_ED, + CollectivesWestendParaPallet as CollectivesWestendPallet, + }, + penpal_emulated_chain::PenpalBParaPallet as PenpalBPallet, westend_emulated_chain::{genesis::ED as WESTEND_ED, WestendRelayPallet as WestendPallet}, AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, - AssetHubWestendParaSender as AssetHubWestendSender, PenpalAPara as PenpalA, - PenpalAParaReceiver as PenpalAReceiver, PenpalAParaSender as PenpalASender, + AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubWestendPara as BridgeHubWestend, + BridgeHubWestendParaReceiver as BridgeHubWestendReceiver, + CollectivesWestendPara as CollectivesWestend, PenpalBPara as PenpalB, + PenpalBParaReceiver as PenpalBReceiver, PenpalBParaSender as PenpalBSender, WestendRelay as Westend, WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, }; @@ -62,18 +68,20 @@ pub const ASSET_MIN_BALANCE: u128 = 1000; pub const ASSETS_PALLET_ID: u8 = 50; pub type RelayToSystemParaTest = Test; +pub type RelayToParaTest = Test; pub type SystemParaToRelayTest = Test; -pub type SystemParaToParaTest = Test; +pub type SystemParaToParaTest = Test; +pub type ParaToSystemParaTest = Test; -/// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests -pub fn relay_test_args(amount: Balance) -> TestArgs { +/// Returns a `TestArgs` instance to be used for the Relay Chain across integration tests +pub fn relay_test_args( + dest: MultiLocation, + beneficiary_id: AccountId32, + amount: Balance, +) -> TestArgs { TestArgs { - dest: Westend::child_location_of(AssetHubWestend::para_id()), - beneficiary: AccountId32Junction { - network: None, - id: AssetHubWestendReceiver::get().into(), - } - .into(), + dest, + beneficiary: AccountId32Junction { network: None, id: beneficiary_id.into() }.into(), amount, assets: (Here, amount).into(), asset_id: None, @@ -82,13 +90,14 @@ pub fn relay_test_args(amount: Balance) -> TestArgs { } } -/// Returns a `TestArgs` instance to de used for the System Parachain accross integraton tests -pub fn system_para_test_args( +/// Returns a `TestArgs` instance to be used by parachains across integration tests +pub fn para_test_args( dest: MultiLocation, beneficiary_id: AccountId32, amount: Balance, assets: MultiAssets, asset_id: Option, + fee_asset_item: u32, ) -> TestArgs { TestArgs { dest, @@ -96,7 +105,7 @@ pub fn system_para_test_args( amount, assets, asset_id, - fee_asset_item: 0, + fee_asset_item, weight_limit: WeightLimit::Unlimited, } } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs new file mode 100644 index 0000000000000000000000000000000000000000..d7de0a451f202217cdde1c32b8a4dd1b853ef861 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use emulated_integration_tests_common::accounts::{ALICE, BOB}; +use frame_support::traits::fungibles::{Create, Inspect, Mutate}; +use polkadot_runtime_common::impls::VersionedLocatableAsset; +use xcm_executor::traits::ConvertLocation; + +#[test] +fn create_and_claim_treasury_spend() { + const ASSET_ID: u32 = 1984; + const SPEND_AMOUNT: u128 = 1_000_000; + // treasury location from a sibling parachain. + let treasury_location: MultiLocation = MultiLocation::new( + 1, + X2(Parachain(CollectivesWestend::para_id().into()), PalletInstance(65)), + ); + // treasury account on a sibling parachain. + let treasury_account = + asset_hub_westend_runtime::xcm_config::LocationToAccountId::convert_location( + &treasury_location, + ) + .unwrap(); + let asset_hub_location = MultiLocation::new(1, Parachain(AssetHubWestend::para_id().into())); + let root = ::RuntimeOrigin::root(); + // asset kind to be spent from the treasury. + let asset_kind = VersionedLocatableAsset::V3 { + location: asset_hub_location, + asset_id: AssetId::Concrete((PalletInstance(50), GeneralIndex(ASSET_ID.into())).into()), + }; + // treasury spend beneficiary. + let alice: AccountId = Westend::account_id_of(ALICE); + let bob: AccountId = CollectivesWestend::account_id_of(BOB); + let bob_signed = ::RuntimeOrigin::signed(bob.clone()); + + AssetHubWestend::execute_with(|| { + type Assets = ::Assets; + + // create an asset class and mint some assets to the treasury account. + assert_ok!(>::create( + ASSET_ID, + treasury_account.clone(), + true, + SPEND_AMOUNT / 2 + )); + assert_ok!(>::mint_into(ASSET_ID, &treasury_account, SPEND_AMOUNT * 4)); + // beneficiary has zero balance. + assert_eq!(>::balance(ASSET_ID, &alice,), 0u128,); + }); + + CollectivesWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type FellowshipTreasury = + ::FellowshipTreasury; + type AssetRate = ::AssetRate; + + // create a conversion rate from `asset_kind` to the native currency. + assert_ok!(AssetRate::create(root.clone(), Box::new(asset_kind.clone()), 2.into())); + + // create and approve a treasury spend. + assert_ok!(FellowshipTreasury::spend( + root, + Box::new(asset_kind), + SPEND_AMOUNT, + Box::new(MultiLocation::new(0, Into::<[u8; 32]>::into(alice.clone())).into()), + None, + )); + // claim the spend. + assert_ok!(FellowshipTreasury::payout(bob_signed.clone(), 0)); + + assert_expected_events!( + CollectivesWestend, + vec![ + RuntimeEvent::FellowshipTreasury(pallet_treasury::Event::Paid { .. }) => {}, + ] + ); + }); + + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type Assets = ::Assets; + + // assert events triggered by xcm pay program + // 1. treasury asset transferred to spend beneficiary + // 2. response to the Fellowship treasury pallet instance sent back + // 3. XCM program completed + assert_expected_events!( + AssetHubWestend, + vec![ + RuntimeEvent::Assets(pallet_assets::Event::Transferred { asset_id: id, from, to, amount }) => { + id: id == &ASSET_ID, + from: from == &treasury_account, + to: to == &alice, + amount: amount == &SPEND_AMOUNT, + }, + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true ,.. }) => {}, + ] + ); + // beneficiary received the assets from the treasury. + assert_eq!(>::balance(ASSET_ID, &alice,), SPEND_AMOUNT,); + }); + + CollectivesWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type FellowshipTreasury = + ::FellowshipTreasury; + + // check the payment status to ensure the response from the AssetHub was received. + assert_ok!(FellowshipTreasury::check_status(bob_signed, 0)); + assert_expected_events!( + CollectivesWestend, + vec![ + RuntimeEvent::FellowshipTreasury(pallet_treasury::Event::SpendProcessed { .. }) => {}, + ] + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs index 0c9de89c5f98f04a8a8f79048faf260c7bae27d5..ee720c2448041c085ee53c014c8d8125e7ee8caa 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs @@ -13,9 +13,18 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod fellowship_treasury; mod reserve_transfer; mod send; mod set_xcm_versions; mod swap; mod teleport; mod treasury; + +use crate::*; +emulated_integration_tests_common::include_penpal_create_foreign_asset_on_asset_hub!( + PenpalB, + AssetHubWestend, + WESTEND_ED, + parachains_common::westend::fee::WeightToFee +); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 19a203897ad855bb5021469efa20f60f52ca6348..7472445c4ba77df93d9935d1f416854432297fe2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -14,38 +14,33 @@ // limitations under the License. use crate::*; -use asset_hub_westend_runtime::xcm_config::XcmConfig; +use asset_hub_westend_runtime::xcm_config::XcmConfig as AssetHubWestendXcmConfig; use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; +use westend_system_emulated_network::penpal_emulated_chain::XcmConfig as PenpalWestendXcmConfig; -fn relay_origin_assertions(t: RelayToSystemParaTest) { +fn relay_to_para_sender_assertions(t: RelayToParaTest) { type RuntimeEvent = ::RuntimeEvent; - Westend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(629_384_000, 6_196))); + Westend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); assert_expected_events!( Westend, vec![ - // Amount to reserve transfer is transferred to System Parachain's Sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Transfer { from, to, amount }) => { + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from, to, amount } + ) => { from: *from == t.sender.account_id, to: *to == Westend::sovereign_account_id_of( t.args.dest ), - amount: *amount == t.args.amount, + amount: *amount == t.args.amount, }, ] ); } -fn system_para_dest_assertions(_t: RelayToSystemParaTest) { - AssetHubWestend::assert_dmp_queue_incomplete(Some(Weight::from_parts(31_352_000, 1489))); -} - -fn system_para_to_relay_assertions(_t: SystemParaToRelayTest) { - AssetHubWestend::assert_xcm_pallet_attempted_error(Some(XcmError::Barrier)) -} - -fn system_para_to_para_assertions(t: SystemParaToParaTest) { +fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; AssetHubWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( @@ -56,7 +51,7 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubWestend, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Balances( pallet_balances::Event::Transfer { from, to, amount } ) => { @@ -70,7 +65,64 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { ); } -fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { +fn para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalB, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn para_to_system_para_sender_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + PenpalB::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); + + assert_expected_events!( + PenpalB, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn para_to_system_para_receiver_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalB::para_id()), + ); + + assert_expected_events!( + AssetHubWestend, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_penpal_on_ahw.clone().into(), + amount: *amount == t.args.amount, + }, + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; AssetHubWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( @@ -81,7 +133,7 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubWestend, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Assets( pallet_assets::Event::Transferred { asset_id, from, to, amount } ) => { @@ -96,29 +148,22 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { ); } -fn relay_limited_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn relay_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) +fn system_para_to_para_assets_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalB, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::Assets(pallet_assets::Event::Issued { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); } -fn system_para_limited_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::limited_reserve_transfer_assets( +fn relay_to_para_reserve_transfer_assets(t: RelayToParaTest) -> DispatchResult { + ::XcmPallet::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), bx!(t.args.beneficiary.into()), @@ -128,17 +173,7 @@ fn system_para_limited_reserve_transfer_assets(t: SystemParaToRelayTest) -> Disp ) } -fn system_para_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - -fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { +fn system_para_to_para_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), @@ -149,101 +184,107 @@ fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) ) } -fn system_para_to_para_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( +fn para_to_system_para_reserve_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { + ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), t.args.fee_asset_item, + t.args.weight_limit, ) } -/// Limited Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't -/// work +/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work #[test] -fn limited_reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain +fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { + let signed_origin = ::RuntimeOrigin::signed(WestendSender::get().into()); + let destination = Westend::child_location_of(AssetHubWestend::para_id()); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: AssetHubWestendReceiver::get().into() }.into(); let amount_to_send: Balance = WESTEND_ED * 1000; - let test_args = TestContext { - sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: relay_test_args(amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions); - test.set_dispatchable::(relay_limited_reserve_transfer_assets); - test.assert(); - - let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + let assets: MultiAssets = (Here, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + Westend::execute_with(|| { + let result = ::XcmPallet::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 99, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); } -/// Limited Reserve Transfers of native asset from System Parachain to Relay Chain shoudln't work +/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work #[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_relay_fails() { +fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { // Init values for System Parachain + let signed_origin = + ::RuntimeOrigin::signed(AssetHubWestendSender::get().into()); let destination = AssetHubWestend::parent_location(); let beneficiary_id = WestendReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); + let assets: MultiAssets = (Parent, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + AssetHubWestend::execute_with(|| { + let result = + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); + }); } -/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work +/// Reserve Transfers of native asset from Relay to Parachain should work #[test] -fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain +fn reserve_transfer_native_asset_from_relay_to_para() { + // Init values for Relay + let destination = Westend::child_location_of(PenpalB::para_id()); + let beneficiary_id = PenpalBReceiver::get(); let amount_to_send: Balance = WESTEND_ED * 1000; + let test_args = TestContext { sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: relay_test_args(amount_to_send), + receiver: PenpalBReceiver::get(), + args: relay_test_args(destination, beneficiary_id, amount_to_send), }; - let mut test = RelayToSystemParaTest::new(test_args); + let mut test = RelayToParaTest::new(test_args); let sender_balance_before = test.sender.balance; let receiver_balance_before = test.receiver.balance; - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions); - test.set_dispatchable::(relay_reserve_transfer_assets); + test.set_assertion::(relay_to_para_sender_assertions); + test.set_assertion::(para_receiver_assertions); + test.set_dispatchable::(relay_to_para_reserve_transfer_assets); test.assert(); let delivery_fees = Westend::execute_with(|| { @@ -255,193 +296,210 @@ fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { let sender_balance_after = test.sender.balance; let receiver_balance_after = test.receiver.balance; + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); } -/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work +/// Reserve Transfers of native asset from System Parachain to Parachain should work #[test] -fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { +fn reserve_transfer_native_asset_from_system_para_to_para() { // Init values for System Parachain - let destination = AssetHubWestend::parent_location(); - let beneficiary_id = WestendReceiver::get(); + let destination = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let beneficiary_id = PenpalBReceiver::get(); let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; let assets = (Parent, amount_to_send).into(); let test_args = TestContext { sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + receiver: PenpalBReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; - let mut test = SystemParaToRelayTest::new(test_args); + let mut test = SystemParaToParaTest::new(test_args); let sender_balance_before = test.sender.balance; let receiver_balance_before = test.receiver.balance; - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_reserve_transfer_assets); + test.set_assertion::(system_para_to_para_sender_assertions); + test.set_assertion::(para_receiver_assertions); + test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); test.assert(); let sender_balance_after = test.sender.balance; let receiver_balance_after = test.receiver.balance; - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Limited Reserve Transfers of native asset from System Parachain to Parachain should work -#[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_para() { - // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::<::XcmSender>( - test.args.assets.clone(), - 0, - test.args.weight_limit, - test.args.beneficiary, - test.args.dest, - ) + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); } -/// Reserve Transfers of native asset from System Parachain to Parachain should work +/// Reserve Transfers of native asset from Parachain to System Parachain should work #[test] -fn reserve_transfer_native_asset_from_system_para_to_para() { - // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); +fn reserve_transfer_native_asset_from_para_to_system_para() { + // Init values for Penpal Parachain + let destination = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + let beneficiary_id = AssetHubWestendReceiver::get(); let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; let assets = (Parent, amount_to_send).into(); let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + sender: PenpalBSender::get(), + receiver: AssetHubWestendReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; - let mut test = SystemParaToParaTest::new(test_args); + let mut test = ParaToSystemParaTest::new(test_args); let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); + let penpal_location_as_seen_by_ahw = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let sov_penpal_on_ahw = + AssetHubWestend::sovereign_account_id_of(penpal_location_as_seen_by_ahw); + + // fund the Penpal's SA on AHW with the native tokens held in reserve + AssetHubWestend::fund_accounts(vec![(sov_penpal_on_ahw.into(), amount_to_send * 2)]); + + test.set_assertion::(para_to_system_para_sender_assertions); + test.set_assertion::(para_to_system_para_receiver_assertions); + test.set_dispatchable::(para_to_system_para_reserve_transfer_assets); test.assert(); let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; - let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::<::XcmSender>( - test.args.assets.clone(), - 0, - test.args.weight_limit, - test.args.beneficiary, - test.args.dest, - ) + let delivery_fees = PenpalB::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); } -/// Limited Reserve Transfers of a local asset from System Parachain to Parachain should work +/// Reserve Transfers of a local asset and native asset from System Parachain to Parachain should +/// work #[test] -fn limited_reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account +fn reserve_transfer_assets_from_system_para_to_para() { + // Force create asset on AssetHubWestend and PenpalB from Relay Chain AssetHubWestend::force_create_and_mint_asset( ASSET_ID, ASSET_MIN_BALANCE, true, AssetHubWestendSender::get(), Some(Weight::from_parts(1_019_445_000, 200_000)), - ASSET_MIN_BALANCE * 1000000, + ASSET_MIN_BALANCE * 1_000_000, + ); + PenpalB::force_create_and_mint_asset( + ASSET_ID, + ASSET_MIN_BALANCE, + false, + PenpalBSender::get(), + None, + 0, ); // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); - - let system_para_test_args = TestContext { + let destination = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let beneficiary_id = PenpalBReceiver::get(); + let fee_amount_to_send = ASSET_HUB_WESTEND_ED * 1000; + let asset_amount_to_send = ASSET_MIN_BALANCE * 1000; + let assets: MultiAssets = vec![ + (Parent, fee_amount_to_send).into(), + (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), asset_amount_to_send) + .into(), + ] + .into(); + let fee_asset_index = assets + .inner() + .iter() + .position(|r| r == &(Parent, fee_amount_to_send).into()) + .unwrap() as u32; + + let para_test_args = TestContext { sender: AssetHubWestendSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + receiver: PenpalBReceiver::get(), + args: para_test_args( + destination, + beneficiary_id, + asset_amount_to_send, + assets, + None, + fee_asset_index, + ), }; - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); + let mut test = SystemParaToParaTest::new(para_test_args); - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - system_para_test.assert(); -} + // Create SA-of-Penpal-on-AHW with ED. + let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location); + AssetHubWestend::fund_accounts(vec![(sov_penpal_on_ahw.into(), WESTEND_ED)]); -/// Reserve Transfers of a local asset from System Parachain to Parachain should work -#[test] -fn reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account - AssetHubWestend::force_create_and_mint_asset( - ASSET_ID, - ASSET_MIN_BALANCE, - true, - AssetHubWestendSender::get(), - Some(Weight::from_parts(1_019_445_000, 200_000)), - ASSET_MIN_BALANCE * 1000000, - ); + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; - // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); - - let system_para_test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; + let sender_assets_before = AssetHubWestend::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &AssetHubWestendSender::get()) + }); + let receiver_assets_before = PenpalB::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &PenpalBReceiver::get()) + }); + + test.set_assertion::(system_para_to_para_assets_sender_assertions); + test.set_assertion::(system_para_to_para_assets_receiver_assertions); + test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); + test.assert(); - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + // Sender's balance is reduced + assert!(sender_balance_after < sender_balance_before); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + fee_amount_to_send); + + let sender_assets_after = AssetHubWestend::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &AssetHubWestendSender::get()) + }); + let receiver_assets_after = PenpalB::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &PenpalBReceiver::get()) + }); - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_reserve_transfer_assets); - system_para_test.assert(); + // Sender's balance is reduced by exact amount + assert_eq!(sender_assets_before - asset_amount_to_send, sender_assets_after); + // Receiver's balance is increased by exact amount + assert_eq!(receiver_assets_after, receiver_assets_before + asset_amount_to_send); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs index bda9a3e69c4fbbc2fef03f5fe040526dc3a35f1d..4b98eeb0ed33bfccef33f466805170f6fe361f36 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs @@ -33,7 +33,7 @@ fn send_transact_as_superuser_from_relay_to_system_para_works() { #[test] fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { let para_sovereign_account = AssetHubWestend::sovereign_account_id_of( - AssetHubWestend::sibling_location_of(PenpalA::para_id()), + AssetHubWestend::sibling_location_of(PenpalB::para_id()), ); // Force create and mint assets for Parachain's sovereign account @@ -60,8 +60,8 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { let native_asset = (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), fee_amount).into(); - let root_origin = ::RuntimeOrigin::root(); - let system_para_destination = PenpalA::sibling_location_of(AssetHubWestend::para_id()).into(); + let root_origin = ::RuntimeOrigin::root(); + let system_para_destination = PenpalB::sibling_location_of(AssetHubWestend::para_id()).into(); let xcm = xcm_transact_paid_execution( call, origin_kind, @@ -69,14 +69,14 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { para_sovereign_account.clone(), ); - PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + PenpalB::execute_with(|| { + assert_ok!(::PolkadotXcm::send( root_origin, bx!(system_para_destination), bx!(xcm), )); - PenpalA::assert_xcm_pallet_sent(); + PenpalB::assert_xcm_pallet_sent(); }); AssetHubWestend::execute_with(|| { diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index a8e19f9ef4b1c66bdd3f918ce9d5f53e76b8b2d2..47b6ab01e8f803758ea57bc8a853df2a6a2632dc 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -14,14 +14,15 @@ // limitations under the License. use crate::*; +use westend_system_emulated_network::penpal_emulated_chain::LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub; #[test] fn swap_locally_on_chain_using_local_assets() { - let asset_native = Box::new(asset_hub_westend_runtime::xcm_config::WestendLocation::get()); - let asset_one = Box::new(MultiLocation { + let asset_native = asset_hub_westend_runtime::xcm_config::WestendLocation::get(); + let asset_one = MultiLocation { parents: 0, interior: X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), - }); + }; AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; @@ -43,8 +44,8 @@ fn swap_locally_on_chain_using_local_assets() { assert_ok!(::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - asset_native.clone(), - asset_one.clone(), + Box::new(asset_native), + Box::new(asset_one), )); assert_expected_events!( @@ -56,8 +57,8 @@ fn swap_locally_on_chain_using_local_assets() { assert_ok!(::AssetConversion::add_liquidity( ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - asset_native.clone(), - asset_one.clone(), + Box::new(asset_native), + Box::new(asset_one), 1_000_000_000_000, 2_000_000_000_000, 0, @@ -72,7 +73,7 @@ fn swap_locally_on_chain_using_local_assets() { ] ); - let path = BoundedVec::<_, _>::truncate_from(vec![asset_native.clone(), asset_one.clone()]); + let path = vec![Box::new(asset_native), Box::new(asset_one)]; assert_ok!(::AssetConversion::swap_exact_tokens_for_tokens( ::RuntimeOrigin::signed(AssetHubWestendSender::get()), @@ -95,8 +96,8 @@ fn swap_locally_on_chain_using_local_assets() { assert_ok!(::AssetConversion::remove_liquidity( ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - asset_native, - asset_one, + Box::new(asset_native), + Box::new(asset_one), 1414213562273 - 2_000_000_000, // all but the 2 EDs can't be retrieved. 0, 0, @@ -107,113 +108,37 @@ fn swap_locally_on_chain_using_local_assets() { #[test] fn swap_locally_on_chain_using_foreign_assets() { - use frame_support::weights::WeightToFee; - - let asset_native = Box::new(asset_hub_westend_runtime::xcm_config::WestendLocation::get()); - - let foreign_asset1_at_asset_hub_westend = Box::new(MultiLocation { - parents: 1, - interior: X3( - Parachain(PenpalA::para_id().into()), - PalletInstance(ASSETS_PALLET_ID), - GeneralIndex(ASSET_ID.into()), - ), - }); - - let assets_para_destination: VersionedMultiLocation = - MultiLocation { parents: 1, interior: X1(Parachain(AssetHubWestend::para_id().into())) } - .into(); - - let penpal_location = - MultiLocation { parents: 1, interior: X1(Parachain(PenpalA::para_id().into())) }; - - // 1. Create asset on penpal: - PenpalA::execute_with(|| { - assert_ok!(::Assets::create( - ::RuntimeOrigin::signed(PenpalASender::get()), - ASSET_ID.into(), - PenpalASender::get().into(), - 1000, - )); - - assert!(::Assets::asset_exists(ASSET_ID)); - }); - - // 2. Create foreign asset on asset_hub_westend: - - let require_weight_at_most = Weight::from_parts(1_100_000_000_000, 30_000); - let origin_kind = OriginKind::Xcm; - let sov_penpal_on_asset_hub_westend = AssetHubWestend::sovereign_account_id_of(penpal_location); - - AssetHubWestend::fund_accounts(vec![ - (AssetHubWestendSender::get().into(), 5_000_000 * WESTEND_ED), - (sov_penpal_on_asset_hub_westend.clone().into(), 1000_000_000_000_000_000 * WESTEND_ED), - ]); - - let sov_penpal_on_asset_hub_westend_as_location: MultiLocation = MultiLocation { - parents: 0, - interior: X1(AccountId32Junction { - network: None, - id: sov_penpal_on_asset_hub_westend.clone().into(), - }), + let asset_native = asset_hub_westend_runtime::xcm_config::WestendLocation::get(); + let ah_as_seen_by_penpal = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + let asset_location_on_penpal = PenpalLocalTeleportableToAssetHub::get(); + let asset_id_on_penpal = match asset_location_on_penpal.last() { + Some(GeneralIndex(id)) => *id as u32, + _ => unreachable!(), }; - - let call_foreign_assets_create = - ::RuntimeCall::ForeignAssets(pallet_assets::Call::< - ::Runtime, - Instance2, - >::create { - id: *foreign_asset1_at_asset_hub_westend, - min_balance: 1000, - admin: sov_penpal_on_asset_hub_westend.clone().into(), - }) - .encode() - .into(); - - let buy_execution_fee_amount = parachains_common::westend::fee::WeightToFee::weight_to_fee( - &Weight::from_parts(10_100_000_000_000, 300_000), + let asset_owner_on_penpal = PenpalBSender::get(); + let foreign_asset_at_asset_hub_westend = + MultiLocation { parents: 1, interior: X1(Parachain(PenpalB::para_id().into())) } + .appended_with(asset_location_on_penpal) + .unwrap(); + + // 1. Create asset on penpal and, 2. Create foreign asset on asset_hub_westend + super::penpal_create_foreign_asset_on_asset_hub( + asset_id_on_penpal, + foreign_asset_at_asset_hub_westend, + ah_as_seen_by_penpal, + true, + asset_owner_on_penpal, + ASSET_MIN_BALANCE * 1_000_000, ); - let buy_execution_fee = MultiAsset { - id: Concrete(MultiLocation { parents: 1, interior: Here }), - fun: Fungible(buy_execution_fee_amount), - }; - - let xcm = VersionedXcm::from(Xcm(vec![ - WithdrawAsset { 0: vec![buy_execution_fee.clone()].into() }, - BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, - Transact { require_weight_at_most, origin_kind, call: call_foreign_assets_create }, - RefundSurplus, - DepositAsset { - assets: All.into(), - beneficiary: sov_penpal_on_asset_hub_westend_as_location, - }, - ])); - - // Send XCM message from penpal => asset_hub_westend - let sudo_penpal_origin = ::RuntimeOrigin::root(); - PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( - sudo_penpal_origin.clone(), - bx!(assets_para_destination.clone()), - bx!(xcm), - )); - type RuntimeEvent = ::RuntimeEvent; - - assert_expected_events!( - PenpalA, - vec![ - RuntimeEvent::PolkadotXcm(pallet_xcm::Event::Sent { .. }) => {}, - ] - ); - }); + let penpal_as_seen_by_ah = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_as_seen_by_ah); + AssetHubWestend::fund_accounts(vec![ + (AssetHubWestendSender::get().into(), 5_000_000 * WESTEND_ED), /* An account to swap dot + * for something else. */ + ]); - // Receive XCM message in Assets Parachain in the next block. AssetHubWestend::execute_with(|| { - assert!(::ForeignAssets::asset_exists( - *foreign_asset1_at_asset_hub_westend - )); - // 3: Mint foreign asset on asset_hub_westend: // // (While it might be nice to use batch, @@ -222,11 +147,9 @@ fn swap_locally_on_chain_using_foreign_assets() { type RuntimeEvent = ::RuntimeEvent; // 3. Mint foreign asset (in reality this should be a teleport or some such) assert_ok!(::ForeignAssets::mint( - ::RuntimeOrigin::signed( - sov_penpal_on_asset_hub_westend.clone().into() - ), - *foreign_asset1_at_asset_hub_westend, - sov_penpal_on_asset_hub_westend.clone().into(), + ::RuntimeOrigin::signed(sov_penpal_on_ahw.clone().into()), + foreign_asset_at_asset_hub_westend, + sov_penpal_on_ahw.clone().into(), 3_000_000_000_000, )); @@ -240,8 +163,8 @@ fn swap_locally_on_chain_using_foreign_assets() { // 4. Create pool: assert_ok!(::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - asset_native.clone(), - foreign_asset1_at_asset_hub_westend.clone(), + Box::new(asset_native), + Box::new(foreign_asset_at_asset_hub_westend), )); assert_expected_events!( @@ -253,16 +176,14 @@ fn swap_locally_on_chain_using_foreign_assets() { // 5. Add liquidity: assert_ok!(::AssetConversion::add_liquidity( - ::RuntimeOrigin::signed( - sov_penpal_on_asset_hub_westend.clone() - ), - asset_native.clone(), - foreign_asset1_at_asset_hub_westend.clone(), + ::RuntimeOrigin::signed(sov_penpal_on_ahw.clone()), + Box::new(asset_native), + Box::new(foreign_asset_at_asset_hub_westend), 1_000_000_000_000, 2_000_000_000_000, 0, 0, - sov_penpal_on_asset_hub_westend.clone().into() + sov_penpal_on_ahw.clone().into() )); assert_expected_events!( @@ -275,10 +196,7 @@ fn swap_locally_on_chain_using_foreign_assets() { ); // 6. Swap! - let path = BoundedVec::<_, _>::truncate_from(vec![ - asset_native.clone(), - foreign_asset1_at_asset_hub_westend.clone(), - ]); + let path = vec![Box::new(asset_native), Box::new(foreign_asset_at_asset_hub_westend)]; assert_ok!(::AssetConversion::swap_exact_tokens_for_tokens( ::RuntimeOrigin::signed(AssetHubWestendSender::get()), @@ -301,22 +219,20 @@ fn swap_locally_on_chain_using_foreign_assets() { // 7. Remove liquidity assert_ok!(::AssetConversion::remove_liquidity( - ::RuntimeOrigin::signed( - sov_penpal_on_asset_hub_westend.clone() - ), - asset_native, - foreign_asset1_at_asset_hub_westend, + ::RuntimeOrigin::signed(sov_penpal_on_ahw.clone()), + Box::new(asset_native), + Box::new(foreign_asset_at_asset_hub_westend), 1414213562273 - 2_000_000_000, // all but the 2 EDs can't be retrieved. 0, 0, - sov_penpal_on_asset_hub_westend.clone().into(), + sov_penpal_on_ahw.into(), )); }); } #[test] fn cannot_create_pool_from_pool_assets() { - let asset_native = Box::new(asset_hub_westend_runtime::xcm_config::WestendLocation::get()); + let asset_native = asset_hub_westend_runtime::xcm_config::WestendLocation::get(); let mut asset_one = asset_hub_westend_runtime::xcm_config::PoolAssetsPalletLocation::get(); asset_one.append_with(GeneralIndex(ASSET_ID.into())).expect("pool assets"); @@ -341,10 +257,10 @@ fn cannot_create_pool_from_pool_assets() { assert_matches::assert_matches!( ::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - asset_native.clone(), + Box::new(asset_native), Box::new(asset_one), ), - Err(DispatchError::Module(ModuleError{index: _, error: _, message})) => assert_eq!(message, Some("UnsupportedAsset")) + Err(DispatchError::Module(ModuleError{index: _, error: _, message})) => assert_eq!(message, Some("Unknown")) ); }); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index 57e1b93f349e9ea674374416e63ac044d613860f..2dd68ae3a83e3faa3d00ebbf904854e0dad263af 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -15,7 +15,9 @@ use crate::*; use asset_hub_westend_runtime::xcm_config::XcmConfig as AssetHubWestendXcmConfig; +use emulated_integration_tests_common::xcm_helpers::non_fee_asset; use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; +use westend_system_emulated_network::penpal_emulated_chain::LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub; fn relay_origin_assertions(t: RelayToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -51,7 +53,7 @@ fn relay_dest_assertions(t: SystemParaToRelayTest) { assert_expected_events!( Westend, vec![ - // Amount is witdrawn from Relay Chain's `CheckAccount` + // Amount is withdrawn from Relay Chain's `CheckAccount` RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who, amount }) => { who: *who == ::XcmPallet::check_account(), amount: *amount == t.args.amount, @@ -110,6 +112,123 @@ fn para_dest_assertions(t: RelayToSystemParaTest) { ); } +fn penpal_to_ah_foreign_assets_sender_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + PenpalB::assert_xcm_pallet_attempted_complete(None); + let expected_asset_id = t.args.asset_id.unwrap(); + let (_, expected_asset_amount) = + non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); + assert_expected_events!( + PenpalB, + vec![ + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + RuntimeEvent::Assets(pallet_assets::Event::Burned { asset_id, owner, balance }) => { + asset_id: *asset_id == expected_asset_id, + owner: *owner == t.sender.account_id, + balance: *balance == expected_asset_amount, + }, + ] + ); +} + +fn penpal_to_ah_foreign_assets_receiver_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + let sov_penpal_on_ahr = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalB::para_id()), + ); + let (expected_foreign_asset_id, expected_foreign_asset_amount) = + non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); + assert_expected_events!( + AssetHubWestend, + vec![ + // native asset reserve transfer for paying fees, withdrawn from Penpal's sov account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_penpal_on_ahr.clone().into(), + amount: *amount == t.args.amount, + }, + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == t.receiver.account_id, + }, + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, amount }) => { + asset_id: *asset_id == expected_foreign_asset_id, + owner: *owner == t.receiver.account_id, + amount: *amount == expected_foreign_asset_amount, + }, + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn ah_to_penpal_foreign_assets_sender_assertions(t: SystemParaToParaTest) { + type RuntimeEvent = ::RuntimeEvent; + AssetHubWestend::assert_xcm_pallet_attempted_complete(None); + let (expected_foreign_asset_id, expected_foreign_asset_amount) = + non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); + assert_expected_events!( + AssetHubWestend, + vec![ + // native asset used for fees is transferred to Parachain's Sovereign account as reserve + RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from, to, amount } + ) => { + from: *from == t.sender.account_id, + to: *to == AssetHubWestend::sovereign_account_id_of( + t.args.dest + ), + amount: *amount == t.args.amount, + }, + // foreign asset is burned locally as part of teleportation + RuntimeEvent::ForeignAssets(pallet_assets::Event::Burned { asset_id, owner, balance }) => { + asset_id: *asset_id == expected_foreign_asset_id, + owner: *owner == t.sender.account_id, + balance: *balance == expected_foreign_asset_amount, + }, + ] + ); +} + +fn ah_to_penpal_foreign_assets_receiver_assertions(t: SystemParaToParaTest) { + type RuntimeEvent = ::RuntimeEvent; + let expected_asset_id = t.args.asset_id.unwrap(); + let (_, expected_asset_amount) = + non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); + let checking_account = ::PolkadotXcm::check_account(); + assert_expected_events!( + PenpalB, + vec![ + // checking account burns local asset as part of incoming teleport + RuntimeEvent::Assets(pallet_assets::Event::Burned { asset_id, owner, balance }) => { + asset_id: *asset_id == expected_asset_id, + owner: *owner == checking_account, + balance: *balance == expected_asset_amount, + }, + // local asset is teleported into account of receiver + RuntimeEvent::Assets(pallet_assets::Event::Issued { asset_id, owner, amount }) => { + asset_id: *asset_id == expected_asset_id, + owner: *owner == t.receiver.account_id, + amount: *amount == expected_asset_amount, + }, + // native asset for fee is deposited to receiver + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == t.receiver.account_id, + }, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + fn relay_limited_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { ::XcmPallet::limited_teleport_assets( t.signed_origin, @@ -152,15 +271,39 @@ fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { ) } +fn system_para_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { + ::PolkadotXcm::transfer_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + +fn para_to_system_para_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { + ::PolkadotXcm::transfer_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + /// Limited Teleport of native asset from Relay Chain to the System Parachain should work #[test] fn limited_teleport_native_assets_from_relay_to_system_para_works() { // Init values for Relay Chain let amount_to_send: Balance = WESTEND_ED * 1000; + let dest = Westend::child_location_of(AssetHubWestend::para_id()); + let beneficiary = AssetHubWestendReceiver::get(); let test_args = TestContext { sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: relay_test_args(amount_to_send), + receiver: beneficiary.clone(), + args: relay_test_args(dest, beneficiary, amount_to_send), }; let mut test = RelayToSystemParaTest::new(test_args); @@ -204,7 +347,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let test_args = TestContext { sender: AssetHubWestendSender::get(), receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -245,7 +388,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let test_args = TestContext { sender: AssetHubWestendSender::get(), receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -278,10 +421,12 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { fn teleport_native_assets_from_relay_to_system_para_works() { // Init values for Relay Chain let amount_to_send: Balance = WESTEND_ED * 1000; + let dest = Westend::child_location_of(AssetHubWestend::para_id()); + let beneficiary = AssetHubWestendReceiver::get(); let test_args = TestContext { sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: relay_test_args(amount_to_send), + receiver: beneficiary.clone(), + args: relay_test_args(dest, beneficiary, amount_to_send), }; let mut test = RelayToSystemParaTest::new(test_args); @@ -325,7 +470,7 @@ fn teleport_native_assets_back_from_system_para_to_relay_works() { let test_args = TestContext { sender: AssetHubWestendSender::get(), receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -366,7 +511,7 @@ fn teleport_native_assets_from_system_para_to_relay_fails() { let test_args = TestContext { sender: AssetHubWestendSender::get(), receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -394,16 +539,211 @@ fn teleport_native_assets_from_system_para_to_relay_fails() { assert_eq!(receiver_balance_after, receiver_balance_before); } -// TODO: uncomment when CollectivesWestend and BridgeHubWestend are implemented -// https://github.com/paritytech/polkadot-sdk/pull/1737 (CollectivesWestend) -// #[test] -// fn teleport_to_other_system_parachains_works() { -// let amount = ASSET_HUB_WESTEND_ED * 100; -// let native_asset: VersionedMultiAssets = (Parent, amount).into(); - -// test_parachain_is_trusted_teleporter!( -// AssetHubWestend, // Origin -// vec![CollectivesWestend, BridgeHubWestend], // Destinations -// (native_asset, amount) -// ); -// } +#[test] +fn teleport_to_other_system_parachains_works() { + let amount = ASSET_HUB_WESTEND_ED * 100; + let native_asset: MultiAssets = (Parent, amount).into(); + + test_parachain_is_trusted_teleporter!( + AssetHubWestend, // Origin + AssetHubWestendXcmConfig, // XCM Configuration + vec![BridgeHubWestend], // Destinations + (native_asset, amount) + ); +} + +/// Bidirectional teleports of local Penpal assets to Asset Hub as foreign assets should work +/// (using native reserve-based transfer for fees) +#[test] +fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { + let ah_as_seen_by_penpal = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + let asset_location_on_penpal = PenpalLocalTeleportableToAssetHub::get(); + let asset_id_on_penpal = match asset_location_on_penpal.last() { + Some(GeneralIndex(id)) => *id as u32, + _ => unreachable!(), + }; + let asset_owner_on_penpal = PenpalBSender::get(); + let foreign_asset_at_asset_hub_westend = + MultiLocation { parents: 1, interior: X1(Parachain(PenpalB::para_id().into())) } + .appended_with(asset_location_on_penpal) + .unwrap(); + super::penpal_create_foreign_asset_on_asset_hub( + asset_id_on_penpal, + foreign_asset_at_asset_hub_westend, + ah_as_seen_by_penpal, + false, + asset_owner_on_penpal, + ASSET_MIN_BALANCE * 1_000_000, + ); + let penpal_to_ah_beneficiary_id = AssetHubWestendReceiver::get(); + + let fee_amount_to_send = ASSET_HUB_WESTEND_ED * 1000; + let asset_amount_to_send = ASSET_MIN_BALANCE * 1000; + + let penpal_assets: MultiAssets = vec![ + (Parent, fee_amount_to_send).into(), + (asset_location_on_penpal, asset_amount_to_send).into(), + ] + .into(); + let fee_asset_index = penpal_assets + .inner() + .iter() + .position(|r| r == &(Parent, fee_amount_to_send).into()) + .unwrap() as u32; + + // Penpal to AH test args + let penpal_to_ah_test_args = TestContext { + sender: PenpalBSender::get(), + receiver: AssetHubWestendReceiver::get(), + args: para_test_args( + ah_as_seen_by_penpal, + penpal_to_ah_beneficiary_id, + asset_amount_to_send, + penpal_assets, + Some(asset_id_on_penpal), + fee_asset_index, + ), + }; + let mut penpal_to_ah = ParaToSystemParaTest::new(penpal_to_ah_test_args); + + let penpal_sender_balance_before = penpal_to_ah.sender.balance; + let ah_receiver_balance_before = penpal_to_ah.receiver.balance; + + let penpal_sender_assets_before = PenpalB::execute_with(|| { + type Assets = ::Assets; + >::balance(asset_id_on_penpal, &PenpalBSender::get()) + }); + let ah_receiver_assets_before = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + foreign_asset_at_asset_hub_westend, + &AssetHubWestendReceiver::get(), + ) + }); + + penpal_to_ah.set_assertion::(penpal_to_ah_foreign_assets_sender_assertions); + penpal_to_ah.set_assertion::(penpal_to_ah_foreign_assets_receiver_assertions); + penpal_to_ah.set_dispatchable::(para_to_system_para_transfer_assets); + penpal_to_ah.assert(); + + let penpal_sender_balance_after = penpal_to_ah.sender.balance; + let ah_receiver_balance_after = penpal_to_ah.receiver.balance; + + let penpal_sender_assets_after = PenpalB::execute_with(|| { + type Assets = ::Assets; + >::balance(asset_id_on_penpal, &PenpalBSender::get()) + }); + let ah_receiver_assets_after = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + foreign_asset_at_asset_hub_westend, + &AssetHubWestendReceiver::get(), + ) + }); + + // Sender's balance is reduced + assert!(penpal_sender_balance_after < penpal_sender_balance_before); + // Receiver's balance is increased + assert!(ah_receiver_balance_after > ah_receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(ah_receiver_balance_after < ah_receiver_balance_before + fee_amount_to_send); + + // Sender's balance is reduced by exact amount + assert_eq!(penpal_sender_assets_before - asset_amount_to_send, penpal_sender_assets_after); + // Receiver's balance is increased by exact amount + assert_eq!(ah_receiver_assets_after, ah_receiver_assets_before + asset_amount_to_send); + + /////////////////////////////////////////////////////////////////////// + // Now test transferring foreign assets back from AssetHub to Penpal // + /////////////////////////////////////////////////////////////////////// + + // Move funds on AH from AHReceiver to AHSender + AssetHubWestend::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + assert_ok!(ForeignAssets::transfer( + ::RuntimeOrigin::signed(AssetHubWestendReceiver::get()), + foreign_asset_at_asset_hub_westend, + AssetHubWestendSender::get().into(), + asset_amount_to_send, + )); + }); + + let ah_to_penpal_beneficiary_id = PenpalBReceiver::get(); + let penpal_as_seen_by_ah = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let ah_assets: MultiAssets = vec![ + (Parent, fee_amount_to_send).into(), + (foreign_asset_at_asset_hub_westend, asset_amount_to_send).into(), + ] + .into(); + let fee_asset_index = ah_assets + .inner() + .iter() + .position(|r| r == &(Parent, fee_amount_to_send).into()) + .unwrap() as u32; + + // AH to Penpal test args + let ah_to_penpal_test_args = TestContext { + sender: AssetHubWestendSender::get(), + receiver: PenpalBReceiver::get(), + args: para_test_args( + penpal_as_seen_by_ah, + ah_to_penpal_beneficiary_id, + asset_amount_to_send, + ah_assets, + Some(asset_id_on_penpal), + fee_asset_index, + ), + }; + let mut ah_to_penpal = SystemParaToParaTest::new(ah_to_penpal_test_args); + + let ah_sender_balance_before = ah_to_penpal.sender.balance; + let penpal_receiver_balance_before = ah_to_penpal.receiver.balance; + + let ah_sender_assets_before = AssetHubWestend::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + foreign_asset_at_asset_hub_westend, + &AssetHubWestendSender::get(), + ) + }); + let penpal_receiver_assets_before = PenpalB::execute_with(|| { + type Assets = ::Assets; + >::balance(asset_id_on_penpal, &PenpalBReceiver::get()) + }); + + ah_to_penpal.set_assertion::(ah_to_penpal_foreign_assets_sender_assertions); + ah_to_penpal.set_assertion::(ah_to_penpal_foreign_assets_receiver_assertions); + ah_to_penpal.set_dispatchable::(system_para_to_para_transfer_assets); + ah_to_penpal.assert(); + + let ah_sender_balance_after = ah_to_penpal.sender.balance; + let penpal_receiver_balance_after = ah_to_penpal.receiver.balance; + + let ah_sender_assets_after = AssetHubWestend::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + foreign_asset_at_asset_hub_westend, + &AssetHubWestendSender::get(), + ) + }); + let penpal_receiver_assets_after = PenpalB::execute_with(|| { + type Assets = ::Assets; + >::balance(asset_id_on_penpal, &PenpalBReceiver::get()) + }); + + // Sender's balance is reduced + assert!(ah_sender_balance_after < ah_sender_balance_before); + // Receiver's balance is increased + assert!(penpal_receiver_balance_after > penpal_receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(penpal_receiver_balance_after < penpal_receiver_balance_before + fee_amount_to_send); + + // Sender's balance is reduced by exact amount + assert_eq!(ah_sender_assets_before - asset_amount_to_send, ah_sender_assets_after); + // Receiver's balance is increased by exact amount + assert_eq!(penpal_receiver_assets_after, penpal_receiver_assets_before + asset_amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index 035d9c107933ccdcf31431f39d6ebd905f4113a5..e75187bea95eb7c64d84b37a7dadd3b4758b6e87 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -7,27 +7,48 @@ license = "Apache-2.0" description = "Bridge Hub Rococo runtime integration tests with xcm-emulator" publish = false +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +hex = "0.4.3" +hex-literal = "0.4.1" # Substrate -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} +sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } # Bridges -pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false} -bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false} +pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false } +bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false } # Cumulus asset-test-utils = { path = "../../../../../../parachains/runtimes/assets/test-utils" } parachains-common = { path = "../../../../../../parachains/common" } -cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false} -cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false} +cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } +cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false } bridge-hub-rococo-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } -emulated-integration-tests-common = { path = "../../../common", default-features = false} -rococo-wococo-system-emulated-network ={ path = "../../../networks/rococo-wococo-system" } +emulated-integration-tests-common = { path = "../../../common", default-features = false } +rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } +penpal-runtime = { path = "../../../../../runtimes/testing/penpal", default-features = false } +rococo-system-emulated-network = { path = "../../../networks/rococo-system" } +asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo", default-features = false } + +# Snowbridge +snowbridge-core = { path = "../../../../../../../bridges/snowbridge/parachain/primitives/core", default-features = false } +snowbridge-router-primitives = { path = "../../../../../../../bridges/snowbridge/parachain/primitives/router", default-features = false } +snowbridge-system = { path = "../../../../../../../bridges/snowbridge/parachain/pallets/system", default-features = false } +snowbridge-inbound-queue = { path = "../../../../../../../bridges/snowbridge/parachain/pallets/inbound-queue", default-features = false } +snowbridge-outbound-queue = { path = "../../../../../../../bridges/snowbridge/parachain/pallets/outbound-queue", default-features = false } +snowbridge-rococo-common = { path = "../../../../../../../bridges/snowbridge/parachain/runtime/rococo-common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs index 19e10d23bbba270e3615499a07b2e1ace1310217..5127bd759dc63e6797e6adbe379ced5fa1e96bcc 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -14,14 +14,16 @@ // limitations under the License. // Substrate -pub use frame_support::assert_ok; +pub use frame_support::{assert_err, assert_ok, pallet_prelude::DispatchResult}; +pub use sp_runtime::DispatchError; // Polkadot pub use xcm::{ + latest::ParentThen, prelude::{AccountId32 as AccountId32Junction, *}, v3::{ Error, - NetworkId::{Rococo as RococoId, Wococo as WococoId}, + NetworkId::{Rococo as RococoId, Westend as WestendId}, }, }; @@ -30,6 +32,8 @@ pub use bp_messages::LaneId; // Cumulus pub use emulated_integration_tests_common::{ + accounts::ALICE, + impls::Inspect, test_parachain_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, @@ -39,15 +43,26 @@ pub use emulated_integration_tests_common::{ PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, }; pub use parachains_common::{AccountId, Balance}; -pub use rococo_wococo_system_emulated_network::{ +pub use rococo_system_emulated_network::{ + penpal_emulated_chain::PenpalAParaPallet as PenpalAPallet, + BridgeHubRococoParaReceiver as BridgeHubRococoReceiver, PenpalAPara as PenpalA, + PenpalAParaReceiver as PenpalAReceiver, PenpalAParaSender as PenpalASender, +}; +pub use rococo_westend_system_emulated_network::{ + asset_hub_rococo_emulated_chain::{ + genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + }, + asset_hub_westend_emulated_chain::{ + genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, + }, bridge_hub_rococo_emulated_chain::{ genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubRococoParaPallet as BridgeHubRococoPallet, }, rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, - AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWococoPara as AssetHubWococo, - BridgeHubRococoPara as BridgeHubRococo, BridgeHubRococoParaReceiver as BridgeHubRococoReceiver, - BridgeHubRococoParaSender as BridgeHubRococoSender, BridgeHubWococoPara as BridgeHubWococo, + AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWestendPara as AssetHubWestend, + AssetHubWestendParaReceiver as AssetHubWestendReceiver, BridgeHubRococoPara as BridgeHubRococo, + BridgeHubRococoParaSender as BridgeHubRococoSender, BridgeHubWestendPara as BridgeHubWestend, RococoRelay as Rococo, RococoRelayReceiver as RococoReceiver, RococoRelaySender as RococoSender, }; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs new file mode 100644 index 0000000000000000000000000000000000000000..5a2111a9be940f70a168e0fcf76a97cd7f839e3b --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -0,0 +1,171 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::tests::*; + +fn send_asset_from_asset_hub_rococo_to_asset_hub_westend(id: MultiLocation, amount: u128) { + let destination = asset_hub_westend_location(); + + // fund the AHR's SA on BHR for paying bridge transport fees + BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); + + // set XCM versions + AssetHubRococo::force_xcm_version(destination, XCM_VERSION); + BridgeHubRococo::force_xcm_version(bridge_hub_westend_location(), XCM_VERSION); + + // send message over bridge + assert_ok!(send_asset_from_asset_hub_rococo(destination, (id, amount))); + assert_bridge_hub_rococo_message_accepted(true); + assert_bridge_hub_westend_message_received(); +} + +#[test] +fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { + let roc_at_asset_hub_rococo: MultiLocation = Parent.into(); + let roc_at_asset_hub_westend = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Rococo)) }; + let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + AssetHubWestend::force_create_foreign_asset( + roc_at_asset_hub_westend, + owner, + true, + ASSET_MIN_BALANCE, + vec![], + ); + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Westend, + AssetHubWestend::para_id(), + ); + + let rocs_in_reserve_on_ahr_before = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + let sender_rocs_before = + ::account_data_of(AssetHubRococoSender::get()).free; + let receiver_rocs_before = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendReceiver::get()) + }); + + let amount = ASSET_HUB_ROCOCO_ED * 1_000; + send_asset_from_asset_hub_rococo_to_asset_hub_westend(roc_at_asset_hub_rococo, amount); + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // issue ROCs on AHW + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == roc_at_asset_hub_rococo, + owner: *owner == AssetHubWestendReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_rocs_after = + ::account_data_of(AssetHubRococoSender::get()).free; + let receiver_rocs_after = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendReceiver::get()) + }); + let rocs_in_reserve_on_ahr_after = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + + // Sender's balance is reduced + assert!(sender_rocs_before > sender_rocs_after); + // Receiver's balance is increased + assert!(receiver_rocs_after > receiver_rocs_before); + // Reserve balance is reduced by sent amount + assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before + amount); +} + +#[test] +fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { + let prefund_amount = 10_000_000_000_000u128; + let wnd_at_asset_hub_rococo = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Westend)) }; + let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + AssetHubRococo::force_create_foreign_asset( + wnd_at_asset_hub_rococo, + owner, + true, + ASSET_MIN_BALANCE, + vec![(AssetHubRococoSender::get(), prefund_amount)], + ); + + // fund the AHR's SA on AHW with the WND tokens held in reserve + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Rococo, + AssetHubRococo::para_id(), + ); + AssetHubWestend::fund_accounts(vec![(sov_ahr_on_ahw.clone(), prefund_amount)]); + + let wnds_in_reserve_on_ahw_before = + ::account_data_of(sov_ahr_on_ahw.clone()).free; + assert_eq!(wnds_in_reserve_on_ahw_before, prefund_amount); + let sender_wnds_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoSender::get()) + }); + assert_eq!(sender_wnds_before, prefund_amount); + let receiver_wnds_before = + ::account_data_of(AssetHubWestendReceiver::get()).free; + + let amount_to_send = ASSET_HUB_WESTEND_ED * 1_000; + send_asset_from_asset_hub_rococo_to_asset_hub_westend(wnd_at_asset_hub_rococo, amount_to_send); + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // WND is withdrawn from AHR's SA on AHW + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_ahr_on_ahw, + amount: *amount == amount_to_send, + }, + // WNDs deposited to beneficiary + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == AssetHubWestendReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_wnds_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoSender::get()) + }); + let receiver_wnds_after = + ::account_data_of(AssetHubWestendReceiver::get()).free; + let wnds_in_reserve_on_ahw_after = + ::account_data_of(sov_ahr_on_ahw).free; + + // Sender's balance is reduced + assert!(sender_wnds_before > sender_wnds_after); + // Receiver's balance is increased + assert!(receiver_wnds_after > receiver_wnds_before); + // Reserve balance is reduced by sent amount + assert_eq!(wnds_in_reserve_on_ahw_after, wnds_in_reserve_on_ahw_before - amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/example.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/example.rs deleted file mode 100644 index 35cfa394174cd577e45868a2f26fbfae6b963b37..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/example.rs +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::*; - -#[test] -fn example() { - // Init tests variables - // XcmPallet send arguments - let sudo_origin = ::RuntimeOrigin::root(); - let destination = Rococo::child_location_of(BridgeHubRococo::para_id()).into(); - let weight_limit = WeightLimit::Unlimited; - let check_origin = None; - - let remote_xcm = Xcm(vec![ClearOrigin]); - - let xcm = VersionedXcm::from(Xcm(vec![ - UnpaidExecution { weight_limit, check_origin }, - ExportMessage { - network: WococoId, - destination: X1(Parachain(AssetHubWococo::para_id().into())), - xcm: remote_xcm, - }, - ])); - - //Rococo Global Consensus - // Send XCM message from Relay Chain to Bridge Hub source Parachain - Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send( - sudo_origin, - bx!(destination), - bx!(xcm), - )); - - type RuntimeEvent = ::RuntimeEvent; - - assert_expected_events!( - Rococo, - vec![ - RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, - ] - ); - }); - // Receive XCM message in Bridge Hub source Parachain - BridgeHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - assert_expected_events!( - BridgeHubRococo, - vec![ - RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { - success: true, - .. - }) => {}, - RuntimeEvent::BridgeWococoMessages(pallet_bridge_messages::Event::MessageAccepted { - lane_id: LaneId([0, 0, 0, 1]), - nonce: 1, - }) => {}, - ] - ); - }); - - // Wococo GLobal Consensus - // Receive XCM message in Bridge Hub target Parachain - BridgeHubWococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - assert_expected_events!( - BridgeHubWococo, - vec![ - RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, - ] - ); - }); - // Receive embeded XCM message within `ExportMessage` in Parachain destination - AssetHubWococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - assert_expected_events!( - AssetHubWococo, - vec![ - RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { - .. - }) => {}, - ] - ); - }); -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs index 1eef05c6b9281e26d3e412f4aac8b9e265d2d5c0..e71a022af4cf4793d59b3e557a70a29ae0aa7121 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs @@ -13,5 +13,103 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod example; +use crate::*; + +mod asset_transfers; +mod send_xcm; +mod snowbridge; mod teleport; + +pub(crate) fn asset_hub_westend_location() -> MultiLocation { + MultiLocation { + parents: 2, + interior: X2( + GlobalConsensus(NetworkId::Westend), + Parachain(AssetHubWestend::para_id().into()), + ), + } +} + +pub(crate) fn bridge_hub_westend_location() -> MultiLocation { + MultiLocation { + parents: 2, + interior: X2( + GlobalConsensus(NetworkId::Westend), + Parachain(BridgeHubWestend::para_id().into()), + ), + } +} + +pub(crate) fn send_asset_from_asset_hub_rococo( + destination: MultiLocation, + (id, amount): (MultiLocation, u128), +) -> DispatchResult { + let signed_origin = + ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); + + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: AssetHubWestendReceiver::get().into() }.into(); + + let assets: MultiAssets = (id, amount).into(); + let fee_asset_item = 0; + + AssetHubRococo::execute_with(|| { + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ) + }) +} + +pub(crate) fn assert_bridge_hub_rococo_message_accepted(expected_processed: bool) { + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + if expected_processed { + assert_expected_events!( + BridgeHubRococo, + vec![ + // pay for bridge fees + RuntimeEvent::Balances(pallet_balances::Event::Withdraw { .. }) => {}, + // message exported + RuntimeEvent::BridgeWestendMessages( + pallet_bridge_messages::Event::MessageAccepted { .. } + ) => {}, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + } else { + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { + success: false, + .. + }) => {}, + ] + ); + } + }); +} + +pub(crate) fn assert_bridge_hub_westend_message_received() { + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubWestend, + vec![ + // message sent to destination + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }) +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs new file mode 100644 index 0000000000000000000000000000000000000000..a3a7d96a14ae2a2a32aee4849cbfda82254a50fe --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -0,0 +1,177 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::tests::*; + +#[test] +fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable() { + // Init tests variables + // XcmPallet send arguments + let sudo_origin = ::RuntimeOrigin::root(); + let destination = Rococo::child_location_of(BridgeHubRococo::para_id()).into(); + let weight_limit = WeightLimit::Unlimited; + let check_origin = None; + + let remote_xcm = Xcm(vec![ClearOrigin]); + + let xcm = VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit, check_origin }, + ExportMessage { + network: WestendId, + destination: X1(Parachain(AssetHubWestend::para_id().into())), + xcm: remote_xcm, + }, + ])); + + // Rococo Global Consensus + // Send XCM message from Relay Chain to Bridge Hub source Parachain + Rococo::execute_with(|| { + assert_ok!(::XcmPallet::send( + sudo_origin, + bx!(destination), + bx!(xcm), + )); + + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + Rococo, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + // Receive XCM message in Bridge Hub source Parachain, it should fail, because we don't have + // opened bridge/lane. + assert_bridge_hub_rococo_message_accepted(false); +} + +#[test] +fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { + // Initially set only default version on all runtimes + AssetHubRococo::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); + BridgeHubRococo::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); + BridgeHubWestend::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); + AssetHubWestend::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); + + // prepare data + let destination = asset_hub_westend_location(); + let native_token = MultiLocation::parent(); + let amount = ASSET_HUB_ROCOCO_ED * 1_000; + + // fund the AHR's SA on BHR for paying bridge transport fees + BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); + // fund sender + AssetHubRococo::fund_accounts(vec![(AssetHubRococoSender::get().into(), amount * 10)]); + + // send XCM from AssetHubRococo - fails - destination version not known + assert_err!( + send_asset_from_asset_hub_rococo(destination, (native_token, amount)), + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [1, 0, 0, 0], + message: Some("SendFailure") + }) + ); + + // set destination version + AssetHubRococo::force_xcm_version(destination, xcm::v3::prelude::XCM_VERSION); + + // TODO: remove this block, when removing `xcm:v2` + { + // send XCM from AssetHubRococo - fails - AssetHubRococo is set to the default/safe `2` + // version, which does not have the `ExportMessage` instruction. If the default `2` is + // changed to `3`, then this assert can go away" + assert_err!( + send_asset_from_asset_hub_rococo(destination, (native_token, amount)), + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [1, 0, 0, 0], + message: Some("SendFailure") + }) + ); + + // set exact version for BridgeHubWestend to `2` without `ExportMessage` instruction + AssetHubRococo::force_xcm_version( + ParentThen(Parachain(BridgeHubRococo::para_id().into()).into()).into(), + xcm::v2::prelude::XCM_VERSION, + ); + // send XCM from AssetHubRococo - fails - `ExportMessage` is not in `2` + assert_err!( + send_asset_from_asset_hub_rococo(destination, (native_token, amount)), + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [1, 0, 0, 0], + message: Some("SendFailure") + }) + ); + } + + // set version with `ExportMessage` for BridgeHubRococo + AssetHubRococo::force_xcm_version( + ParentThen(Parachain(BridgeHubRococo::para_id().into()).into()).into(), + xcm::v3::prelude::XCM_VERSION, + ); + // send XCM from AssetHubRococo - ok + assert_ok!(send_asset_from_asset_hub_rococo(destination, (native_token, amount))); + + // `ExportMessage` on local BridgeHub - fails - remote BridgeHub version not known + assert_bridge_hub_rococo_message_accepted(false); + + // set version for remote BridgeHub on BridgeHubRococo + BridgeHubRococo::force_xcm_version( + bridge_hub_westend_location(), + xcm::v3::prelude::XCM_VERSION, + ); + // set version for AssetHubWestend on BridgeHubWestend + BridgeHubWestend::force_xcm_version( + ParentThen(Parachain(AssetHubWestend::para_id().into()).into()).into(), + xcm::v3::prelude::XCM_VERSION, + ); + + // send XCM from AssetHubRococo - ok + assert_ok!(send_asset_from_asset_hub_rococo(destination, (native_token, amount))); + assert_bridge_hub_rococo_message_accepted(true); + assert_bridge_hub_westend_message_received(); + // message delivered and processed at destination + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // message processed with failure, but for this scenario it is ok, important is that was delivered + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: false, .. } + ) => {}, + ] + ); + }); + + // TODO: remove this block, when removing `xcm:v2` + { + // set `2` version for remote BridgeHub on BridgeHubRococo, which does not have + // `UniversalOrigin` and `DescendOrigin` + BridgeHubRococo::force_xcm_version( + bridge_hub_westend_location(), + xcm::v2::prelude::XCM_VERSION, + ); + + // send XCM from AssetHubRococo - ok + assert_ok!(send_asset_from_asset_hub_rococo(destination, (native_token, amount))); + // message is not accepted on the local BridgeHub (`DestinationUnsupported`) because we + // cannot add `UniversalOrigin` and `DescendOrigin` + assert_bridge_hub_rococo_message_accepted(false); + } +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs new file mode 100644 index 0000000000000000000000000000000000000000..e62a73caff589081c1899eb48b44afcecb5f23f6 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -0,0 +1,505 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use crate::*; +use codec::{Decode, Encode}; +use emulated_integration_tests_common::xcm_emulator::ConvertLocation; +use frame_support::pallet_prelude::TypeInfo; +use hex_literal::hex; +use snowbridge_core::outbound::OperatingMode; +use snowbridge_rococo_common::EthereumNetwork; +use snowbridge_router_primitives::inbound::{ + Command, Destination, GlobalConsensusEthereumConvertsFor, MessageV1, VersionedMessage, +}; +use snowbridge_system; +use sp_core::H256; + +const INITIAL_FUND: u128 = 5_000_000_000 * ROCOCO_ED; +const CHAIN_ID: u64 = 11155111; +const TREASURY_ACCOUNT: [u8; 32] = + hex!("6d6f646c70792f74727372790000000000000000000000000000000000000000"); +const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); +const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); +const XCM_FEE: u128 = 4_000_000_000; + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +pub enum ControlCall { + #[codec(index = 3)] + CreateAgent, + #[codec(index = 4)] + CreateChannel { mode: OperatingMode }, +} + +#[allow(clippy::large_enum_variant)] +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +pub enum SnowbridgeControl { + #[codec(index = 83)] + Control(ControlCall), +} + +#[test] +fn create_agent() { + let origin_para: u32 = 1001; + + BridgeHubRococo::fund_para_sovereign(origin_para.into(), INITIAL_FUND); + + let sudo_origin = ::RuntimeOrigin::root(); + let destination = Rococo::child_location_of(BridgeHubRococo::para_id()).into(); + + let create_agent_call = SnowbridgeControl::Control(ControlCall::CreateAgent {}); + + let remote_xcm = VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + DescendOrigin(X1(Parachain(origin_para))), + Transact { + require_weight_at_most: 3000000000.into(), + origin_kind: OriginKind::Xcm, + call: create_agent_call.encode().into(), + }, + ])); + + //Rococo Global Consensus + // Send XCM message from Relay Chain to Bridge Hub source Parachain + Rococo::execute_with(|| { + assert_ok!(::XcmPallet::send( + sudo_origin, + bx!(destination), + bx!(remote_xcm), + )); + + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + Rococo, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::EthereumSystem(snowbridge_system::Event::CreateAgent { + .. + }) => {}, + ] + ); + }); +} + +#[test] +fn create_channel() { + let origin_para: u32 = 1001; + + BridgeHubRococo::fund_para_sovereign(origin_para.into(), INITIAL_FUND); + + let sudo_origin = ::RuntimeOrigin::root(); + let destination: VersionedMultiLocation = + Rococo::child_location_of(BridgeHubRococo::para_id()).into(); + + let create_agent_call = SnowbridgeControl::Control(ControlCall::CreateAgent {}); + + let create_agent_xcm = VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + DescendOrigin(X1(Parachain(origin_para))), + Transact { + require_weight_at_most: 3000000000.into(), + origin_kind: OriginKind::Xcm, + call: create_agent_call.encode().into(), + }, + ])); + + let create_channel_call = + SnowbridgeControl::Control(ControlCall::CreateChannel { mode: OperatingMode::Normal }); + + let create_channel_xcm = VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + DescendOrigin(X1(Parachain(origin_para))), + Transact { + require_weight_at_most: 3000000000.into(), + origin_kind: OriginKind::Xcm, + call: create_channel_call.encode().into(), + }, + ])); + + //Rococo Global Consensus + // Send XCM message from Relay Chain to Bridge Hub source Parachain + Rococo::execute_with(|| { + assert_ok!(::XcmPallet::send( + sudo_origin.clone(), + bx!(destination.clone()), + bx!(create_agent_xcm), + )); + + assert_ok!(::XcmPallet::send( + sudo_origin, + bx!(destination), + bx!(create_channel_xcm), + )); + + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + Rococo, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::EthereumSystem(snowbridge_system::Event::CreateChannel { + .. + }) => {}, + ] + ); + }); +} + +#[test] +fn register_weth_token_from_ethereum_to_asset_hub() { + BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND); + + let message_id_: H256 = [1; 32].into(); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type EthereumInboundQueue = + ::EthereumInboundQueue; + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::RegisterToken { token: WETH.into(), fee: XCM_FEE }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert(message_id_, message).unwrap(); + let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubRococo::para_id().into()).unwrap(); + + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Created { .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_penpal() { + let asset_hub_sovereign = BridgeHubRococo::sovereign_account_id_of(MultiLocation { + parents: 1, + interior: X1(Parachain(AssetHubRococo::para_id().into())), + }); + BridgeHubRococo::fund_accounts(vec![(asset_hub_sovereign.clone(), INITIAL_FUND)]); + + PenpalA::fund_accounts(vec![ + (PenpalAReceiver::get(), INITIAL_FUND), + (PenpalASender::get(), INITIAL_FUND), + ]); + + let weth_asset_location: MultiLocation = + (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); + let weth_asset_id = weth_asset_location.into(); + + let origin_location = (Parent, Parent, EthereumNetwork::get()).into(); + + // Fund ethereum sovereign in asset hub + let ethereum_sovereign: AccountId = + GlobalConsensusEthereumConvertsFor::::convert_location(&origin_location) + .unwrap(); + AssetHubRococo::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); + + // Create asset on assethub. + AssetHubRococo::execute_with(|| { + assert_ok!(::ForeignAssets::create( + pallet_xcm::Origin::Xcm(origin_location).into(), + weth_asset_id, + asset_hub_sovereign.clone().into(), + 1000, + )); + + assert!(::ForeignAssets::asset_exists( + weth_asset_id + )); + }); + + // Create asset on penpal. + PenpalA::execute_with(|| { + assert_ok!(::ForeignAssets::create( + ::RuntimeOrigin::signed(PenpalASender::get()), + weth_asset_id, + asset_hub_sovereign.into(), + 1000, + )); + + assert!(::ForeignAssets::asset_exists(weth_asset_id)); + }); + + let message_id_: H256 = [1; 32].into(); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type EthereumInboundQueue = + ::EthereumInboundQueue; + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::SendToken { + token: WETH.into(), + destination: Destination::ForeignAccountId32 { + para_id: 2000, + id: PenpalAReceiver::get().into(), + fee: XCM_FEE, + }, + amount: 1_000_000_000, + fee: XCM_FEE, + }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert(message_id_, message).unwrap(); + let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubRococo::para_id().into()).unwrap(); + + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + + PenpalA::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_asset_hub() { + BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND); + + // Fund ethereum sovereign in asset hub + AssetHubRococo::fund_accounts(vec![(AssetHubRococoReceiver::get(), INITIAL_FUND)]); + + let message_id_: H256 = [1; 32].into(); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type EthereumInboundQueue = + ::EthereumInboundQueue; + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::RegisterToken { token: WETH.into(), fee: XCM_FEE }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert(message_id_, message).unwrap(); + let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubRococo::para_id().into()).unwrap(); + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::SendToken { + token: WETH.into(), + destination: Destination::AccountId32 { id: AssetHubRococoReceiver::get().into() }, + amount: 1_000_000_000, + fee: XCM_FEE, + }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert(message_id_, message).unwrap(); + let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubRococo::para_id().into()).unwrap(); + + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_weth_asset_from_asset_hub_to_ethereum() { + use asset_hub_rococo_runtime::xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee; + let assethub_sovereign = BridgeHubRococo::sovereign_account_id_of(MultiLocation { + parents: 1, + interior: X1(Parachain(AssetHubRococo::para_id().into())), + }); + + AssetHubRococo::force_default_xcm_version(Some(XCM_VERSION)); + BridgeHubRococo::force_default_xcm_version(Some(XCM_VERSION)); + AssetHubRococo::force_xcm_version( + MultiLocation { + parents: 2, + interior: X1(GlobalConsensus(Ethereum { chain_id: CHAIN_ID })), + }, + XCM_VERSION, + ); + + BridgeHubRococo::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); + AssetHubRococo::fund_accounts(vec![(AssetHubRococoReceiver::get(), INITIAL_FUND)]); + + const WETH_AMOUNT: u128 = 1_000_000_000; + let message_id_: H256 = [1; 32].into(); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type EthereumInboundQueue = + ::EthereumInboundQueue; + + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::RegisterToken { token: WETH.into(), fee: XCM_FEE }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert(message_id_, message).unwrap(); + let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubRococo::para_id().into()).unwrap(); + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::SendToken { + token: WETH.into(), + destination: Destination::AccountId32 { id: AssetHubRococoReceiver::get().into() }, + amount: WETH_AMOUNT, + fee: XCM_FEE, + }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert(message_id_, message).unwrap(); + let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubRococo::para_id().into()).unwrap(); + + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + ] + ); + let assets = vec![MultiAsset { + id: Concrete(MultiLocation { + parents: 2, + interior: X2( + GlobalConsensus(Ethereum { chain_id: CHAIN_ID }), + AccountKey20 { network: None, key: WETH }, + ), + }), + fun: Fungible(WETH_AMOUNT), + }]; + let multi_assets = VersionedMultiAssets::V3(MultiAssets::from(assets)); + + let destination = VersionedMultiLocation::V3(MultiLocation { + parents: 2, + interior: X1(GlobalConsensus(Ethereum { chain_id: CHAIN_ID })), + }); + + let beneficiary = VersionedMultiLocation::V3(MultiLocation { + parents: 0, + interior: X1(AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }), + }); + + let free_balance_before = ::Balances::free_balance( + AssetHubRococoReceiver::get(), + ); + ::PolkadotXcm::reserve_transfer_assets( + RuntimeOrigin::signed(AssetHubRococoReceiver::get()), + Box::new(destination), + Box::new(beneficiary), + Box::new(multi_assets), + 0, + ) + .unwrap(); + let free_balance_after = ::Balances::free_balance( + AssetHubRococoReceiver::get(), + ); + // assert at least DefaultBridgeHubEthereumBaseFee charged from the sender + let free_balance_diff = free_balance_before - free_balance_after; + assert!(free_balance_diff > DefaultBridgeHubEthereumBaseFee::get()); + }); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::EthereumOutboundQueue(snowbridge_outbound_queue::Event::MessageQueued {..}) => {}, + ] + ); + let events = BridgeHubRococo::events(); + assert!( + events.iter().any(|event| matches!( + event, + RuntimeEvent::Balances(pallet_balances::Event::Deposit{ who, amount }) + if *who == TREASURY_ACCOUNT.into() && *amount == 16903333 + )), + "Snowbridge sovereign takes local fee." + ); + assert!( + events.iter().any(|event| matches!( + event, + RuntimeEvent::Balances(pallet_balances::Event::Deposit{ who, amount }) + if *who == assethub_sovereign && *amount == 2680000000000, + )), + "AssetHub sovereign takes remote fee." + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 62b969b682f4696889a95261810121f3fde63533..6dcb57f416102b9edde4f286bc33b1434bb899fa 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -7,27 +7,33 @@ license = "Apache-2.0" description = "Bridge Hub Westend runtime integration tests with xcm-emulator" publish = false +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } # Substrate -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } # Bridges -pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false} -bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false} +pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false } +bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false } # Cumulus asset-test-utils = { path = "../../../../../../parachains/runtimes/assets/test-utils" } parachains-common = { path = "../../../../../../parachains/common" } -cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false} -cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false} +cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } +cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false } bridge-hub-westend-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } -emulated-integration-tests-common = { path = "../../../common", default-features = false} -westend-system-emulated-network ={ path = "../../../networks/westend-system" } +emulated-integration-tests-common = { path = "../../../common", default-features = false } +rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs index f406a73d18d55e2969da4fdf9b5f495ac9287933..90a11d38f777360805b47224feeff0d41d02eb2e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -14,12 +14,17 @@ // limitations under the License. // Substrate -pub use frame_support::assert_ok; +pub use frame_support::{assert_err, assert_ok, pallet_prelude::DispatchResult}; +pub use sp_runtime::DispatchError; // Polkadot pub use xcm::{ + latest::ParentThen, prelude::{AccountId32 as AccountId32Junction, *}, - v3::{Error, NetworkId::Rococo as RococoId}, + v3::{ + Error, + NetworkId::{Rococo as RococoId, Westend as WestendId}, + }, }; // Bridges @@ -27,6 +32,8 @@ pub use bp_messages::LaneId; // Cumulus pub use emulated_integration_tests_common::{ + accounts::ALICE, + impls::Inspect, test_parachain_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, @@ -36,16 +43,22 @@ pub use emulated_integration_tests_common::{ PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, }; pub use parachains_common::{AccountId, Balance}; -pub use westend_system_emulated_network::{ +pub use rococo_westend_system_emulated_network::{ + asset_hub_rococo_emulated_chain::{ + genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + }, + asset_hub_westend_emulated_chain::{ + genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, + }, bridge_hub_westend_emulated_chain::{ - genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubWestendParaPallet as BridgeHubWestendPallet, + genesis::ED as BRIDGE_HUB_WESTEND_ED, BridgeHubWestendParaPallet as BridgeHubWestendPallet, }, - westend_emulated_chain::{genesis::ED as ROCOCO_ED, WestendRelayPallet as WestendPallet}, + westend_emulated_chain::WestendRelayPallet as WestendPallet, + AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, - AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubWestendPara as BridgeHubWestend, - BridgeHubWestendParaReceiver as BridgeHubWestendReceiver, - BridgeHubWestendParaSender as BridgeHubWestendSender, WestendRelay as Westend, - WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, + AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubRococoPara as BridgeHubRococo, + BridgeHubWestendPara as BridgeHubWestend, BridgeHubWestendParaSender as BridgeHubWestendSender, + WestendRelay as Westend, }; pub const ASSET_ID: u32 = 1; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs new file mode 100644 index 0000000000000000000000000000000000000000..21f4b4ee2356160494a2b54f841e7689799d554c --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -0,0 +1,170 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use crate::tests::*; + +fn send_asset_from_asset_hub_westend_to_asset_hub_rococo(id: MultiLocation, amount: u128) { + let destination = asset_hub_rococo_location(); + + // fund the AHW's SA on BHW for paying bridge transport fees + BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); + + // set XCM versions + AssetHubWestend::force_xcm_version(destination, XCM_VERSION); + BridgeHubWestend::force_xcm_version(bridge_hub_rococo_location(), XCM_VERSION); + + // send message over bridge + assert_ok!(send_asset_from_asset_hub_westend(destination, (id, amount))); + assert_bridge_hub_westend_message_accepted(true); + assert_bridge_hub_rococo_message_received(); +} + +#[test] +fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { + let wnd_at_asset_hub_westend: MultiLocation = Parent.into(); + let wnd_at_asset_hub_rococo = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Westend)) }; + let owner: AccountId = AssetHubRococo::account_id_of(ALICE); + AssetHubRococo::force_create_foreign_asset( + wnd_at_asset_hub_rococo, + owner, + true, + ASSET_MIN_BALANCE, + vec![], + ); + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Rococo, + AssetHubRococo::para_id(), + ); + + let wnds_in_reserve_on_ahw_before = + ::account_data_of(sov_ahr_on_ahw.clone()).free; + let sender_wnds_before = + ::account_data_of(AssetHubWestendSender::get()).free; + let receiver_wnds_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoReceiver::get()) + }); + + let amount = ASSET_HUB_WESTEND_ED * 1_000; + send_asset_from_asset_hub_westend_to_asset_hub_rococo(wnd_at_asset_hub_westend, amount); + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // issue WNDs on AHR + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == wnd_at_asset_hub_rococo, + owner: *owner == AssetHubRococoReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_wnds_after = + ::account_data_of(AssetHubWestendSender::get()).free; + let receiver_wnds_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoReceiver::get()) + }); + let wnds_in_reserve_on_ahw_after = + ::account_data_of(sov_ahr_on_ahw).free; + + // Sender's balance is reduced + assert!(sender_wnds_before > sender_wnds_after); + // Receiver's balance is increased + assert!(receiver_wnds_after > receiver_wnds_before); + // Reserve balance is increased by sent amount + assert_eq!(wnds_in_reserve_on_ahw_after, wnds_in_reserve_on_ahw_before + amount); +} + +#[test] +fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { + let prefund_amount = 10_000_000_000_000u128; + let roc_at_asset_hub_westend = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Rococo)) }; + let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + AssetHubWestend::force_create_foreign_asset( + roc_at_asset_hub_westend, + owner, + true, + ASSET_MIN_BALANCE, + vec![(AssetHubWestendSender::get(), prefund_amount)], + ); + + // fund the AHW's SA on AHR with the ROC tokens held in reserve + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Westend, + AssetHubWestend::para_id(), + ); + AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), prefund_amount)]); + + let rocs_in_reserve_on_ahr_before = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + assert_eq!(rocs_in_reserve_on_ahr_before, prefund_amount); + let sender_rocs_before = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendSender::get()) + }); + assert_eq!(sender_rocs_before, prefund_amount); + let receiver_rocs_before = + ::account_data_of(AssetHubRococoReceiver::get()).free; + + let amount_to_send = ASSET_HUB_ROCOCO_ED * 1_000; + send_asset_from_asset_hub_westend_to_asset_hub_rococo(roc_at_asset_hub_westend, amount_to_send); + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // ROC is withdrawn from AHW's SA on AHR + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_ahw_on_ahr, + amount: *amount == amount_to_send, + }, + // ROCs deposited to beneficiary + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == AssetHubRococoReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_rocs_after = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendSender::get()) + }); + let receiver_rocs_after = + ::account_data_of(AssetHubRococoReceiver::get()).free; + let rocs_in_reserve_on_ahr_after = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + + // Sender's balance is reduced + assert!(sender_rocs_before > sender_rocs_after); + // Receiver's balance is increased + assert!(receiver_rocs_after > receiver_rocs_before); + // Reserve balance is reduced by sent amount + assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before - amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/example.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/example.rs deleted file mode 100644 index 1fdd9441e48316ed1fe8e371601c068c74752d05..0000000000000000000000000000000000000000 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/example.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::*; - -#[test] -fn example() { - // Init tests variables - // XcmPallet send arguments - let sudo_origin = ::RuntimeOrigin::root(); - let destination = Westend::child_location_of(BridgeHubWestend::para_id()).into(); - let weight_limit = WeightLimit::Unlimited; - let check_origin = None; - - let remote_xcm = Xcm(vec![ClearOrigin]); - - let xcm = VersionedXcm::from(Xcm(vec![ - UnpaidExecution { weight_limit, check_origin }, - ExportMessage { - network: RococoId, - destination: X1(Parachain(AssetHubWestend::para_id().into())), - xcm: remote_xcm, - }, - ])); - - // Westend Global Consensus - // Send XCM message from Relay Chain to Bridge Hub source Parachain - Westend::execute_with(|| { - assert_ok!(::XcmPallet::send( - sudo_origin, - bx!(destination), - bx!(xcm), - )); - - type RuntimeEvent = ::RuntimeEvent; - - assert_expected_events!( - Westend, - vec![ - RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, - ] - ); - }); - // Receive XCM message in Bridge Hub source Parachain - BridgeHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - assert_expected_events!( - BridgeHubWestend, - vec![ - RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { - success: true, - .. - }) => {}, - RuntimeEvent::BridgeRococoMessages(pallet_bridge_messages::Event::MessageAccepted { - lane_id: LaneId([0, 0, 0, 2]), - nonce: 1, - }) => {}, - ] - ); - }); -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs index 1eef05c6b9281e26d3e412f4aac8b9e265d2d5c0..ec2e68fc8894bc676ec9137c0b36463db25761a7 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs @@ -13,5 +13,102 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod example; +use crate::*; + +mod asset_transfers; +mod send_xcm; mod teleport; + +pub(crate) fn asset_hub_rococo_location() -> MultiLocation { + MultiLocation { + parents: 2, + interior: X2( + GlobalConsensus(NetworkId::Rococo), + Parachain(AssetHubRococo::para_id().into()), + ), + } +} + +pub(crate) fn bridge_hub_rococo_location() -> MultiLocation { + MultiLocation { + parents: 2, + interior: X2( + GlobalConsensus(NetworkId::Rococo), + Parachain(BridgeHubRococo::para_id().into()), + ), + } +} + +pub(crate) fn send_asset_from_asset_hub_westend( + destination: MultiLocation, + (id, amount): (MultiLocation, u128), +) -> DispatchResult { + let signed_origin = + ::RuntimeOrigin::signed(AssetHubWestendSender::get().into()); + + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: AssetHubRococoReceiver::get().into() }.into(); + + let assets: MultiAssets = (id, amount).into(); + let fee_asset_item = 0; + + AssetHubWestend::execute_with(|| { + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ) + }) +} + +pub(crate) fn assert_bridge_hub_westend_message_accepted(expected_processed: bool) { + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + if expected_processed { + assert_expected_events!( + BridgeHubWestend, + vec![ + // pay for bridge fees + RuntimeEvent::Balances(pallet_balances::Event::Withdraw { .. }) => {}, + // message exported + RuntimeEvent::BridgeRococoMessages( + pallet_bridge_messages::Event::MessageAccepted { .. } + ) => {}, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + } else { + assert_expected_events!( + BridgeHubWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { + success: false, + .. + }) => {}, + ] + ); + } + }); +} + +pub(crate) fn assert_bridge_hub_rococo_message_received() { + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubRococo, + vec![ + // message sent to destination + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }) +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs new file mode 100644 index 0000000000000000000000000000000000000000..0773cbb059929cc360c28cf776986559b43e0ced --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -0,0 +1,177 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::tests::*; + +#[test] +fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable() { + // Init tests variables + // XcmPallet send arguments + let sudo_origin = ::RuntimeOrigin::root(); + let destination = Westend::child_location_of(BridgeHubWestend::para_id()).into(); + let weight_limit = WeightLimit::Unlimited; + let check_origin = None; + + let remote_xcm = Xcm(vec![ClearOrigin]); + + let xcm = VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit, check_origin }, + ExportMessage { + network: RococoId, + destination: X1(Parachain(AssetHubRococo::para_id().into())), + xcm: remote_xcm, + }, + ])); + + // Westend Global Consensus + // Send XCM message from Relay Chain to Bridge Hub source Parachain + Westend::execute_with(|| { + assert_ok!(::XcmPallet::send( + sudo_origin, + bx!(destination), + bx!(xcm), + )); + + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + // Receive XCM message in Bridge Hub source Parachain, it should fail, because we don't have + // opened bridge/lane. + assert_bridge_hub_westend_message_accepted(false); +} + +#[test] +fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { + // Initially set only default version on all runtimes + AssetHubRococo::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); + BridgeHubRococo::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); + BridgeHubWestend::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); + AssetHubWestend::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); + + // prepare data + let destination = asset_hub_rococo_location(); + let native_token = MultiLocation::parent(); + let amount = ASSET_HUB_WESTEND_ED * 1_000; + + // fund the AHR's SA on BHR for paying bridge transport fees + BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); + // fund sender + AssetHubWestend::fund_accounts(vec![(AssetHubWestendSender::get().into(), amount * 10)]); + + // send XCM from AssetHubWestend - fails - destination version not known + assert_err!( + send_asset_from_asset_hub_westend(destination, (native_token, amount)), + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [1, 0, 0, 0], + message: Some("SendFailure") + }) + ); + + // set destination version + AssetHubWestend::force_xcm_version(destination, xcm::v3::prelude::XCM_VERSION); + + // TODO: remove this block, when removing `xcm:v2` + { + // send XCM from AssetHubRococo - fails - AssetHubRococo is set to the default/safe `2` + // version, which does not have the `ExportMessage` instruction. If the default `2` is + // changed to `3`, then this assert can go away" + assert_err!( + send_asset_from_asset_hub_westend(destination, (native_token, amount)), + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [1, 0, 0, 0], + message: Some("SendFailure") + }) + ); + + // set exact version for BridgeHubWestend to `2` without `ExportMessage` instruction + AssetHubWestend::force_xcm_version( + ParentThen(Parachain(BridgeHubWestend::para_id().into()).into()).into(), + xcm::v2::prelude::XCM_VERSION, + ); + // send XCM from AssetHubWestend - fails - `ExportMessage` is not in `2` + assert_err!( + send_asset_from_asset_hub_westend(destination, (native_token, amount)), + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [1, 0, 0, 0], + message: Some("SendFailure") + }) + ); + } + + // set version with `ExportMessage` for BridgeHubWestend + AssetHubWestend::force_xcm_version( + ParentThen(Parachain(BridgeHubWestend::para_id().into()).into()).into(), + xcm::v3::prelude::XCM_VERSION, + ); + // send XCM from AssetHubWestend - ok + assert_ok!(send_asset_from_asset_hub_westend(destination, (native_token, amount))); + + // `ExportMessage` on local BridgeHub - fails - remote BridgeHub version not known + assert_bridge_hub_westend_message_accepted(false); + + // set version for remote BridgeHub on BridgeHubWestend + BridgeHubWestend::force_xcm_version( + bridge_hub_rococo_location(), + xcm::v3::prelude::XCM_VERSION, + ); + // set version for AssetHubRococo on BridgeHubRococo + BridgeHubRococo::force_xcm_version( + ParentThen(Parachain(AssetHubRococo::para_id().into()).into()).into(), + xcm::v3::prelude::XCM_VERSION, + ); + + // send XCM from AssetHubWestend - ok + assert_ok!(send_asset_from_asset_hub_westend(destination, (native_token, amount))); + assert_bridge_hub_westend_message_accepted(true); + assert_bridge_hub_rococo_message_received(); + // message delivered and processed at destination + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // message processed with failure, but for this scenario it is ok, important is that was delivered + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: false, .. } + ) => {}, + ] + ); + }); + + // TODO: remove this block, when removing `xcm:v2` + { + // set `2` version for remote BridgeHub on BridgeHubRococo, which does not have + // `UniversalOrigin` and `DescendOrigin` + BridgeHubWestend::force_xcm_version( + bridge_hub_rococo_location(), + xcm::v2::prelude::XCM_VERSION, + ); + + // send XCM from AssetHubWestend - ok + assert_ok!(send_asset_from_asset_hub_westend(destination, (native_token, amount))); + // message is not accepted on the local BridgeHub (`DestinationUnsupported`) because we + // cannot add `UniversalOrigin` and `DescendOrigin` + assert_bridge_hub_westend_message_accepted(false); + } +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs index 32639b8614be0e72dd2766c42d5d34f1ce9c8812..8dff6c292955f96d3e5bd83c424fcf9cdb85e8a2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs @@ -18,7 +18,7 @@ use bridge_hub_westend_runtime::xcm_config::XcmConfig; #[test] fn teleport_to_other_system_parachains_works() { - let amount = BRIDGE_HUB_ROCOCO_ED * 100; + let amount = BRIDGE_HUB_WESTEND_ED * 100; let native_asset: MultiAssets = (Parent, amount).into(); test_parachain_is_trusted_teleporter!( diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index e3f8023f4199fed318e2e5efcd7cd33687e90958..9ed2822fa3009e2ec014a8065e00824c49ede83e 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -6,6 +6,9 @@ edition = "2021" description = "Managed content" license = "Apache-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } @@ -22,7 +25,7 @@ sp-std = { path = "../../../../substrate/primitives/std", default-features = fal sp-io = { path = "../../../../substrate/primitives/io", default-features = false } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", diff --git a/cumulus/parachains/pallets/collective-content/src/benchmarking.rs b/cumulus/parachains/pallets/collective-content/src/benchmarking.rs index 1f145f725b139b4e93481a9c69b0715471f7ec0b..943386a842766129c2b5d429dc3a9648249ea73f 100644 --- a/cumulus/parachains/pallets/collective-content/src/benchmarking.rs +++ b/cumulus/parachains/pallets/collective-content/src/benchmarking.rs @@ -56,7 +56,7 @@ mod benchmarks { .map_err(|_| BenchmarkError::Weightless)?; #[extrinsic_call] - _(origin as T::RuntimeOrigin, cid.clone(), Some(expire_at.clone())); + _(origin as T::RuntimeOrigin, cid.clone(), Some(expire_at)); assert_eq!(>::count(), 1); assert_last_event::( diff --git a/cumulus/parachains/pallets/collective-content/src/mock.rs b/cumulus/parachains/pallets/collective-content/src/mock.rs index 2ae5943f332acf61b60792563b7d3f9b1e808c70..7a752da71fceff211679e4b87428198c52f87e5d 100644 --- a/cumulus/parachains/pallets/collective-content/src/mock.rs +++ b/cumulus/parachains/pallets/collective-content/src/mock.rs @@ -18,7 +18,7 @@ pub use crate as pallet_collective_content; use crate::WeightInfo; use frame_support::{ - ord_parameter_types, parameter_types, + derive_impl, ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64}, weights::Weight, }; @@ -55,6 +55,7 @@ impl pallet_collective_content::Config for Test { type WeightInfo = CCWeightInfo; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = (); type BlockWeights = (); diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index 727182dfb8e8639b6fb889a1f6a898b14495697e..31f7b8aef392f02d46d8eb23cb58f780b738a143 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -6,20 +6,23 @@ version = "0.1.0" license = "Apache-2.0" description = "Pallet to store the parachain ID" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../substrate/frame/system", default-features = false} +frame-support = { path = "../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../substrate/frame/system", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../../substrate/primitives/std", default-features = false} +sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../../substrate/primitives/std", default-features = false } cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 0133befa855b030e622b44a7c54e9b49d5eedde7..5c1099a110a4bb54c39fef27df8ecaa01596efd9 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -6,22 +6,25 @@ version = "0.1.0" license = "Apache-2.0" description = "Ping Pallet for Cumulus XCM/UMP testing." +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false} -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../substrate/frame/system", default-features = false} +sp-std = { path = "../../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../substrate/frame/system", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } cumulus-pallet-xcm = { path = "../../../pallets/xcm", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-xcm/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs deleted file mode 100644 index 52ad3241e519594423953eb24e6586b8035bb818..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ /dev/null @@ -1,1515 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Asset Hub Kusama Runtime -//! -//! Asset Hub Kusama, formerly known as "Statemine", is the canary network for its Polkadot cousin. - -#![cfg_attr(not(feature = "std"), no_std)] -#![recursion_limit = "256"] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -mod weights; -pub mod xcm_config; - -use assets_common::{ - foreign_creators::ForeignCreators, - local_and_foreign_assets::{LocalAndForeignAssets, MultiLocationConverter}, - matching::FromSiblingParachain, - AssetIdForTrustBackedAssetsConvert, MultiLocationForAssetId, -}; -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; -use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Verify}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, Perbill, Permill, -}; - -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -use codec::{Decode, Encode, MaxEncodedLen}; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; -use frame_support::{ - construct_runtime, - dispatch::DispatchClass, - genesis_builder_helper::{build_config, create_default_config}, - ord_parameter_types, parameter_types, - traits::{ - AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, - InstanceFilter, TransformOrigin, - }, - weights::{ConstantMultiplier, Weight}, - BoundedVec, PalletId, -}; -use frame_system::{ - limits::{BlockLength, BlockWeights}, - EnsureRoot, EnsureSigned, EnsureSignedBy, -}; -use pallet_asset_conversion_tx_payment::AssetConversionAdapter; -use pallet_nfts::PalletFeatures; -pub use parachains_common as common; -use parachains_common::{ - impls::DealWithFees, - kusama::{consensus::*, currency::*, fee::WeightToFee}, - message_queue::{NarrowOriginToSibling, ParaIdToSibling}, - AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, BlockNumber, Hash, Header, Nonce, - Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, - NORMAL_DISPATCH_RATIO, SLOT_DURATION, -}; -use sp_runtime::RuntimeDebug; -use xcm::opaque::v3::MultiLocation; -use xcm_config::{ - FellowshipLocation, ForeignAssetsConvertedConcreteId, GovernanceLocation, KsmLocation, - PoolAssetsConvertedConcreteId, TrustBackedAssetsConvertedConcreteId, -}; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; - -// Polkadot imports -use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use xcm::latest::BodyId; - -use crate::xcm_config::{ - ForeignCreatorsSovereignAccountOf, LocalAndForeignAssetsMultiLocationMatcher, - TrustBackedAssetsPalletLocation, -}; -use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - } -} - -#[cfg(feature = "state-trie-version-1")] -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - // Note: "statemine" is the legacy name for this chain. It has been renamed to - // "asset-hub-kusama". Many wallets/tools depend on the `spec_name`, so it remains "statemine" - // for the time being. Wallets/tools should update to treat "asset-hub-kusama" equally. - spec_name: create_runtime_str!("statemine"), - impl_name: create_runtime_str!("statemine"), - authoring_version: 1, - spec_version: 10000, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 13, - state_version: 1, -}; - -#[cfg(not(feature = "state-trie-version-1"))] -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - // Note: "statemine" is the legacy name for this change. It has been renamed to - // "asset-hub-kusama". Many wallets/tools depend on the `spec_name`, so it remains "statemine" - // for the time being. Wallets/tools should update to treat "asset-hub-kusama" equally. - spec_name: create_runtime_str!("statemine"), - impl_name: create_runtime_str!("statemine"), - authoring_version: 1, - spec_version: 10000, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 13, - state_version: 0, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } -} - -parameter_types! { - pub const Version: RuntimeVersion = VERSION; - pub RuntimeBlockLength: BlockLength = - BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); - pub const SS58Prefix: u8 = 2; -} - -// Configure FRAME pallets to include in runtime. -impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = RuntimeBlockWeights; - type BlockLength = RuntimeBlockLength; - type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; - type Nonce = Nonce; - type Hash = Hash; - type Hashing = BlakeTwo256; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; - type BlockHashCount = BlockHashCount; - type DbWeight = RocksDbWeight; - type Version = Version; - type PalletInfo = PalletInfo; - type OnNewAccount = (); - type OnKilledAccount = (); - type AccountData = pallet_balances::AccountData; - type SystemWeightInfo = weights::frame_system::WeightInfo; - type SS58Prefix = SS58Prefix; - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; - type WeightInfo = weights::pallet_timestamp::WeightInfo; -} - -impl pallet_authorship::Config for Runtime { - type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = (CollatorSelection,); -} - -parameter_types! { - pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; -} - -impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<50>; - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = weights::pallet_balances::WeightInfo; - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - // We allow each account to have holds on it from: - // - `NftFractionalization`: 1 - type MaxHolds = ConstU32<1>; - type MaxFreezes = ConstU32<0>; -} - -parameter_types! { - /// Relay Chain `TransactionByteFee` / 10 - pub const TransactionByteFee: Balance = MILLICENTS; -} - -impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = - pallet_transaction_payment::CurrencyAdapter>; - type WeightToFee = WeightToFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; - type OperationalFeeMultiplier = ConstU8<5>; -} - -parameter_types! { - pub const AssetDeposit: Balance = UNITS / 10; // 1 / 10 UNITS deposit to create asset - pub const AssetAccountDeposit: Balance = deposit(1, 16); - pub const ApprovalDeposit: Balance = EXISTENTIAL_DEPOSIT; - pub const AssetsStringLimit: u32 = 50; - /// Key = 32 bytes, Value = 36 bytes (32+1+1+1+1) - // https://github.com/paritytech/substrate/blob/069917b/frame/assets/src/lib.rs#L257L271 - pub const MetadataDepositBase: Balance = deposit(1, 68); - pub const MetadataDepositPerByte: Balance = deposit(0, 1); -} - -/// We allow root to execute privileged asset operations. -pub type AssetsForceOrigin = EnsureRoot; - -// Called "Trust Backed" assets because these are generally registered by some account, and users of -// the asset assume it has some claimed backing. The pallet is called `Assets` in -// `construct_runtime` to avoid breaking changes on storage reads. -pub type TrustBackedAssetsInstance = pallet_assets::Instance1; -type TrustBackedAssetsCall = pallet_assets::Call; -impl pallet_assets::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type AssetId = AssetIdForTrustBackedAssets; - type AssetIdParameter = codec::Compact; - type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; - type ForceOrigin = AssetsForceOrigin; - type AssetDeposit = AssetDeposit; - type MetadataDepositBase = MetadataDepositBase; - type MetadataDepositPerByte = MetadataDepositPerByte; - type ApprovalDeposit = ApprovalDeposit; - type StringLimit = AssetsStringLimit; - type Freezer = (); - type Extra = (); - type WeightInfo = weights::pallet_assets_local::WeightInfo; - type CallbackHandle = (); - type AssetAccountDeposit = AssetAccountDeposit; - type RemoveItemsLimit = frame_support::traits::ConstU32<1000>; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = (); -} - -parameter_types! { - pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); - pub const AllowMultiAssetPools: bool = false; - // should be non-zero if AllowMultiAssetPools is true, otherwise can be zero - pub const LiquidityWithdrawalFee: Permill = Permill::from_percent(0); -} - -ord_parameter_types! { - pub const AssetConversionOrigin: sp_runtime::AccountId32 = - AccountIdConversion::::into_account_truncating(&AssetConversionPalletId::get()); -} - -pub type PoolAssetsInstance = pallet_assets::Instance3; -impl pallet_assets::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type RemoveItemsLimit = ConstU32<1000>; - type AssetId = u32; - type AssetIdParameter = u32; - type Currency = Balances; - type CreateOrigin = - AsEnsureOriginWithArg>; - type ForceOrigin = AssetsForceOrigin; - // Deposits are zero because creation/admin is limited to Asset Conversion pallet. - type AssetDeposit = ConstU128<0>; - type AssetAccountDeposit = ConstU128<0>; - type MetadataDepositBase = ConstU128<0>; - type MetadataDepositPerByte = ConstU128<0>; - type ApprovalDeposit = ApprovalDeposit; - type StringLimit = ConstU32<50>; - type Freezer = (); - type Extra = (); - type WeightInfo = weights::pallet_assets_pool::WeightInfo; - type CallbackHandle = (); - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = (); -} - -impl pallet_asset_conversion::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type HigherPrecisionBalance = sp_core::U256; - type Currency = Balances; - type AssetBalance = Balance; - type AssetId = MultiLocation; - type Assets = LocalAndForeignAssets< - Assets, - AssetIdForTrustBackedAssetsConvert, - ForeignAssets, - >; - type PoolAssets = PoolAssets; - type PoolAssetId = u32; - type PoolSetupFee = ConstU128<0>; // Asset class deposit fees are sufficient to prevent spam - type PoolSetupFeeReceiver = AssetConversionOrigin; - // should be non-zero if `AllowMultiAssetPools` is true, otherwise can be zero. - type LiquidityWithdrawalFee = LiquidityWithdrawalFee; - type LPFee = ConstU32<3>; - type PalletId = AssetConversionPalletId; - type AllowMultiAssetPools = AllowMultiAssetPools; - type MaxSwapPathLength = ConstU32<4>; - type MultiAssetId = Box; - type MultiAssetIdConverter = - MultiLocationConverter; - type MintMinLiquidity = ConstU128<100>; - type WeightInfo = weights::pallet_asset_conversion::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = - crate::xcm_config::BenchmarkMultiLocationConverter>; -} - -parameter_types! { - // we just reuse the same deposits - pub const ForeignAssetsAssetDeposit: Balance = AssetDeposit::get(); - pub const ForeignAssetsAssetAccountDeposit: Balance = AssetAccountDeposit::get(); - pub const ForeignAssetsApprovalDeposit: Balance = ApprovalDeposit::get(); - pub const ForeignAssetsAssetsStringLimit: u32 = AssetsStringLimit::get(); - pub const ForeignAssetsMetadataDepositBase: Balance = MetadataDepositBase::get(); - pub const ForeignAssetsMetadataDepositPerByte: Balance = MetadataDepositPerByte::get(); -} - -/// Assets managed by some foreign location. Note: we do not declare a `ForeignAssetsCall` type, as -/// this type is used in proxy definitions. We assume that a foreign location would not want to set -/// an individual, local account as a proxy for the issuance of their assets. This issuance should -/// be managed by the foreign location's governance. -pub type ForeignAssetsInstance = pallet_assets::Instance2; -impl pallet_assets::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type AssetId = MultiLocationForAssetId; - type AssetIdParameter = MultiLocationForAssetId; - type Currency = Balances; - type CreateOrigin = ForeignCreators< - (FromSiblingParachain>,), - ForeignCreatorsSovereignAccountOf, - AccountId, - >; - type ForceOrigin = AssetsForceOrigin; - type AssetDeposit = ForeignAssetsAssetDeposit; - type MetadataDepositBase = ForeignAssetsMetadataDepositBase; - type MetadataDepositPerByte = ForeignAssetsMetadataDepositPerByte; - type ApprovalDeposit = ForeignAssetsApprovalDeposit; - type StringLimit = ForeignAssetsAssetsStringLimit; - type Freezer = (); - type Extra = (); - type WeightInfo = weights::pallet_assets_foreign::WeightInfo; - type CallbackHandle = (); - type AssetAccountDeposit = ForeignAssetsAssetAccountDeposit; - type RemoveItemsLimit = frame_support::traits::ConstU32<1000>; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = xcm_config::XcmBenchmarkHelper; -} - -parameter_types! { - // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. - pub const DepositBase: Balance = deposit(1, 88); - // Additional storage item size of 32 bytes. - pub const DepositFactor: Balance = deposit(0, 32); - pub const MaxSignatories: u32 = 100; -} - -impl pallet_multisig::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type Currency = Balances; - type DepositBase = DepositBase; - type DepositFactor = DepositFactor; - type MaxSignatories = MaxSignatories; - type WeightInfo = weights::pallet_multisig::WeightInfo; -} - -impl pallet_utility::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type PalletsOrigin = OriginCaller; - type WeightInfo = weights::pallet_utility::WeightInfo; -} - -parameter_types! { - // One storage item; key size 32, value size 8; . - pub const ProxyDepositBase: Balance = deposit(1, 40); - // Additional storage item size of 33 bytes. - pub const ProxyDepositFactor: Balance = deposit(0, 33); - pub const MaxProxies: u16 = 32; - // One storage item; key size 32, value size 16 - pub const AnnouncementDepositBase: Balance = deposit(1, 48); - pub const AnnouncementDepositFactor: Balance = deposit(0, 66); - pub const MaxPending: u16 = 32; -} - -/// The type used to represent the kinds of proxying allowed. -#[derive( - Copy, - Clone, - Eq, - PartialEq, - Ord, - PartialOrd, - Encode, - Decode, - RuntimeDebug, - MaxEncodedLen, - scale_info::TypeInfo, -)] -pub enum ProxyType { - /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. - Any, - /// Can execute any call that does not transfer funds or assets. - NonTransfer, - /// Proxy with the ability to reject time-delay proxy announcements. - CancelProxy, - /// Assets proxy. Can execute any call from `assets`, **including asset transfers**. - Assets, - /// Owner proxy. Can execute calls related to asset ownership. - AssetOwner, - /// Asset manager. Can execute calls related to asset management. - AssetManager, - /// Collator selection proxy. Can execute calls related to collator selection mechanism. - Collator, -} -impl Default for ProxyType { - fn default() -> Self { - Self::Any - } -} - -impl InstanceFilter for ProxyType { - fn filter(&self, c: &RuntimeCall) -> bool { - match self { - ProxyType::Any => true, - ProxyType::NonTransfer => !matches!( - c, - RuntimeCall::Balances { .. } | - RuntimeCall::Assets { .. } | - RuntimeCall::NftFractionalization { .. } | - RuntimeCall::Nfts { .. } | - RuntimeCall::Uniques { .. } - ), - ProxyType::CancelProxy => matches!( - c, - RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::Assets => { - matches!( - c, - RuntimeCall::Assets { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } | - RuntimeCall::NftFractionalization { .. } | - RuntimeCall::Nfts { .. } | RuntimeCall::Uniques { .. } - ) - }, - ProxyType::AssetOwner => matches!( - c, - RuntimeCall::Assets(TrustBackedAssetsCall::create { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::start_destroy { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::destroy_accounts { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::destroy_approvals { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::finish_destroy { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::transfer_ownership { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_team { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_metadata { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::clear_metadata { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_min_balance { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::create { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::destroy { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::redeposit { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::transfer_ownership { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_team { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_collection_max_supply { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_collection { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::create { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::destroy { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::transfer_ownership { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_team { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_attribute { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_attribute { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_collection_max_supply { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::AssetManager => matches!( - c, - RuntimeCall::Assets(TrustBackedAssetsCall::mint { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::burn { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::freeze { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::block { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::thaw { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::freeze_asset { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::thaw_asset { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::touch_other { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::refund_other { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::force_mint { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::update_mint_settings { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::mint_pre_signed { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_attributes_pre_signed { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_item_transfer { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::unlock_item_transfer { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_item_properties { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::clear_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_collection_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::clear_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::mint { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::burn { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::freeze { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::thaw { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::freeze_collection { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::thaw_collection { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::Collator => matches!( - c, - RuntimeCall::CollatorSelection { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - } - } - - fn is_superset(&self, o: &Self) -> bool { - match (self, o) { - (x, y) if x == y => true, - (ProxyType::Any, _) => true, - (_, ProxyType::Any) => false, - (ProxyType::Assets, ProxyType::AssetOwner) => true, - (ProxyType::Assets, ProxyType::AssetManager) => true, - (ProxyType::NonTransfer, ProxyType::Collator) => true, - _ => false, - } - } -} - -impl pallet_proxy::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type Currency = Balances; - type ProxyType = ProxyType; - type ProxyDepositBase = ProxyDepositBase; - type ProxyDepositFactor = ProxyDepositFactor; - type MaxProxies = MaxProxies; - type WeightInfo = weights::pallet_proxy::WeightInfo; - type MaxPending = MaxPending; - type CallHasher = BlakeTwo256; - type AnnouncementDepositBase = AnnouncementDepositBase; - type AnnouncementDepositFactor = AnnouncementDepositFactor; -} - -parameter_types! { - pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); -} - -impl cumulus_pallet_parachain_system::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = parachain_info::Pallet; - type DmpQueue = frame_support::traits::EnqueueWithOrigin; - type ReservedDmpWeight = ReservedDmpWeight; - type OutboundXcmpMessageSource = XcmpQueue; - type XcmpMessageHandler = XcmpQueue; - type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; -} - -impl parachain_info::Config for Runtime {} - -parameter_types! { - pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; -} - -impl pallet_message_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_message_queue::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = xcm_builder::ProcessXcmMessage< - AggregateMessageOrigin, - xcm_executor::XcmExecutor, - RuntimeCall, - >; - type Size = u32; - // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: - type QueueChangeHandler = NarrowOriginToSibling; - type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; - type MaxStale = sp_core::ConstU32<8>; - type ServiceWeight = MessageQueueServiceWeight; -} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -parameter_types! { - // Fellows pluralistic body. - pub const FellowsBodyId: BodyId = BodyId::Technical; -} - -impl cumulus_pallet_xcmp_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ChannelInfo = ParachainSystem; - type VersionWrapper = PolkadotXcm; - // Enqueue XCMP messages from siblings for later processing. - type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; - type ControllerOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, - >; - type ControllerOriginConverter = xcm_config::XcmOriginToTransactDispatchOrigin; - type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = NoPriceForMessageDelivery; -} - -parameter_types! { - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - -parameter_types! { - pub const Period: u32 = 6 * HOURS; - pub const Offset: u32 = 0; -} - -impl pallet_session::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ValidatorId = ::AccountId; - // we don't have stash and controller, thus we don't need the convert as well. - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ShouldEndSession = pallet_session::PeriodicSessions; - type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = CollatorSelection; - // Essentially just Aura, but let's be pedantic. - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - type WeightInfo = weights::pallet_session::WeightInfo; -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - #[cfg(feature = "experimental")] - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; -} - -parameter_types! { - pub const PotId: PalletId = PalletId(*b"PotStake"); - pub const SessionLength: BlockNumber = 6 * HOURS; - // StakingAdmin pluralistic body. - pub const StakingAdminBodyId: BodyId = BodyId::Defense; -} - -/// We allow root and the `StakingAdmin` to execute privileged collator selection operations. -pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, ->; - -impl pallet_collator_selection::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type UpdateOrigin = CollatorSelectionUpdateOrigin; - type PotId = PotId; - type MaxCandidates = ConstU32<100>; - type MinEligibleCollators = ConstU32<4>; - type MaxInvulnerables = ConstU32<20>; - // should be a multiple of session or things will get inconsistent - type KickThreshold = Period; - type ValidatorId = ::AccountId; - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ValidatorRegistration = Session; - type WeightInfo = weights::pallet_collator_selection::WeightInfo; -} - -impl pallet_asset_conversion_tx_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Fungibles = LocalAndForeignAssets< - Assets, - AssetIdForTrustBackedAssetsConvert, - ForeignAssets, - >; - type OnChargeAssetTransaction = AssetConversionAdapter; -} - -parameter_types! { - pub const UniquesCollectionDeposit: Balance = UNITS / 10; // 1 / 10 UNIT deposit to create a collection - pub const UniquesItemDeposit: Balance = UNITS / 1_000; // 1 / 1000 UNIT deposit to mint an item - pub const UniquesMetadataDepositBase: Balance = deposit(1, 129); - pub const UniquesAttributeDepositBase: Balance = deposit(1, 0); - pub const UniquesDepositPerByte: Balance = deposit(0, 1); -} - -impl pallet_uniques::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type CollectionId = u32; - type ItemId = u32; - type Currency = Balances; - type ForceOrigin = AssetsForceOrigin; - type CollectionDeposit = UniquesCollectionDeposit; - type ItemDeposit = UniquesItemDeposit; - type MetadataDepositBase = UniquesMetadataDepositBase; - type AttributeDepositBase = UniquesAttributeDepositBase; - type DepositPerByte = UniquesDepositPerByte; - type StringLimit = ConstU32<128>; - type KeyLimit = ConstU32<32>; - type ValueLimit = ConstU32<64>; - type WeightInfo = weights::pallet_uniques::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type Helper = (); - type CreateOrigin = AsEnsureOriginWithArg>; - type Locker = (); -} - -parameter_types! { - pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); - pub NewAssetSymbol: BoundedVec = (*b"FRAC").to_vec().try_into().unwrap(); - pub NewAssetName: BoundedVec = (*b"Frac").to_vec().try_into().unwrap(); -} - -impl pallet_nft_fractionalization::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Deposit = AssetDeposit; - type Currency = Balances; - type NewAssetSymbol = NewAssetSymbol; - type NewAssetName = NewAssetName; - type StringLimit = AssetsStringLimit; - type NftCollectionId = ::CollectionId; - type NftId = ::ItemId; - type AssetBalance = ::Balance; - type AssetId = >::AssetId; - type Assets = Assets; - type Nfts = Nfts; - type PalletId = NftFractionalizationPalletId; - type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; - type RuntimeHoldReason = RuntimeHoldReason; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = (); -} - -parameter_types! { - pub NftsPalletFeatures: PalletFeatures = PalletFeatures::all_enabled(); - pub const NftsMaxDeadlineDuration: BlockNumber = 12 * 30 * DAYS; - // re-use the Uniques deposits - pub const NftsCollectionDeposit: Balance = UniquesCollectionDeposit::get(); - pub const NftsItemDeposit: Balance = UniquesItemDeposit::get(); - pub const NftsMetadataDepositBase: Balance = UniquesMetadataDepositBase::get(); - pub const NftsAttributeDepositBase: Balance = UniquesAttributeDepositBase::get(); - pub const NftsDepositPerByte: Balance = UniquesDepositPerByte::get(); -} - -impl pallet_nfts::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type CollectionId = u32; - type ItemId = u32; - type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; - type ForceOrigin = AssetsForceOrigin; - type Locker = (); - type CollectionDeposit = NftsCollectionDeposit; - type ItemDeposit = NftsItemDeposit; - type MetadataDepositBase = NftsMetadataDepositBase; - type AttributeDepositBase = NftsAttributeDepositBase; - type DepositPerByte = NftsDepositPerByte; - type StringLimit = ConstU32<256>; - type KeyLimit = ConstU32<64>; - type ValueLimit = ConstU32<256>; - type ApprovalsLimit = ConstU32<20>; - type ItemAttributesApprovalsLimit = ConstU32<30>; - type MaxTips = ConstU32<10>; - type MaxDeadlineDuration = NftsMaxDeadlineDuration; - type MaxAttributesPerCall = ConstU32<10>; - type Features = NftsPalletFeatures; - type OffchainSignature = Signature; - type OffchainPublic = ::Signer; - type WeightInfo = weights::pallet_nfts::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type Helper = (); -} - -// Create the runtime by composing the FRAME pallets that were previously configured. -construct_runtime!( - pub enum Runtime - { - // System support stuff. - System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, - ParachainSystem: cumulus_pallet_parachain_system::{ - Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, - } = 1, - // RandomnessCollectiveFlip = 2 removed - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 3, - ParachainInfo: parachain_info::{Pallet, Storage, Config} = 4, - - // Monetary stuff. - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, - AssetTxPayment: pallet_asset_conversion_tx_payment::{Pallet, Event} = 13, - - // Collator support. the order of these 5 are important and shall not change. - Authorship: pallet_authorship::{Pallet, Storage} = 20, - CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, - Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, - Aura: pallet_aura::{Pallet, Storage, Config} = 23, - AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, - - // XCM helpers. - XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, - CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - // Temporary to migrate the remaining DMP messages: - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, - MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, - - // Handy utilities. - Utility: pallet_utility::{Pallet, Call, Event} = 40, - Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, - Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, - - // The main stage. - Assets: pallet_assets::::{Pallet, Call, Storage, Event} = 50, - Uniques: pallet_uniques::{Pallet, Call, Storage, Event} = 51, - Nfts: pallet_nfts::{Pallet, Call, Storage, Event} = 52, - ForeignAssets: pallet_assets::::{Pallet, Call, Storage, Event} = 53, - NftFractionalization: pallet_nft_fractionalization::{Pallet, Call, Storage, Event, HoldReason} = 54, - - PoolAssets: pallet_assets::::{Pallet, Call, Storage, Event} = 55, - AssetConversion: pallet_asset_conversion::{Pallet, Call, Storage, Event} = 56, - - #[cfg(feature = "state-trie-version-1")] - StateTrieMigration: pallet_state_trie_migration = 70, - } -); - -/// The address format for describing accounts. -pub type Address = sp_runtime::MultiAddress; -/// Block type as expected by this runtime. -pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, -); -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; -/// Migrations to apply on runtime upgrade. -pub type Migrations = (pallet_collator_selection::migration::v1::MigrateToV1,); - -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, - Migrations, ->; - -#[cfg(feature = "runtime-benchmarks")] -mod benches { - frame_benchmarking::define_benchmarks!( - [frame_system, SystemBench::] - [pallet_assets, Local] - [pallet_assets, Foreign] - [pallet_assets, Pool] - [pallet_asset_conversion, AssetConversion] - [pallet_balances, Balances] - [pallet_message_queue, MessageQueue] - [pallet_multisig, Multisig] - [pallet_nft_fractionalization, NftFractionalization] - [pallet_nfts, Nfts] - [pallet_proxy, Proxy] - [pallet_session, SessionBench::] - [pallet_uniques, Uniques] - [pallet_utility, Utility] - [pallet_timestamp, Timestamp] - [pallet_collator_selection, CollatorSelection] - [cumulus_pallet_parachain_system, ParachainSystem] - [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] - // XCM - [pallet_xcm, PolkadotXcm] - // NOTE: Make sure you point to the individual modules below. - [pallet_xcm_benchmarks::fungible, XcmBalances] - [pallet_xcm_benchmarks::generic, XcmGeneric] - ); -} - -impl_runtime_apis! { - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Aura::authorities().into_inner() - } - } - - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> sp_std::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Nonce { - System::account_nonce(account) - } - } - - impl pallet_asset_conversion::AssetConversionApi< - Block, - Balance, - u128, - Box, - > for Runtime - { - fn quote_price_exact_tokens_for_tokens(asset1: Box, asset2: Box, amount: u128, include_fee: bool) -> Option { - AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) - } - fn quote_price_tokens_for_exact_tokens(asset1: Box, asset2: Box, amount: u128, include_fee: bool) -> Option { - AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) - } - fn get_reserves(asset1: Box, asset2: Box) -> Option<(Balance, Balance)> { - AssetConversion::get_reserves(&asset1, &asset2).ok() - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { - fn query_info( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi - for Runtime - { - fn query_call_info( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::RuntimeDispatchInfo { - TransactionPayment::query_call_info(call, len) - } - fn query_call_fee_details( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_call_fee_details(call, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl assets_common::runtime_api::FungiblesApi< - Block, - AccountId, - > for Runtime - { - fn query_account_balances(account: AccountId) -> Result { - use assets_common::fungible_conversion::{convert, convert_balance}; - Ok([ - // collect pallet_balance - { - let balance = Balances::free_balance(account.clone()); - if balance > 0 { - vec![convert_balance::(balance)?] - } else { - vec![] - } - }, - // collect pallet_assets (TrustBackedAssets) - convert::<_, _, _, _, TrustBackedAssetsConvertedConcreteId>( - Assets::account_balances(account.clone()) - .iter() - .filter(|(_, balance)| balance > &0) - )?, - // collect pallet_assets (ForeignAssets) - convert::<_, _, _, _, ForeignAssetsConvertedConcreteId>( - ForeignAssets::account_balances(account.clone()) - .iter() - .filter(|(_, balance)| balance > &0) - )?, - // collect pallet_assets (PoolAssets) - convert::<_, _, _, _, PoolAssetsConvertedConcreteId>( - PoolAssets::account_balances(account) - .iter() - .filter(|(_, balance)| balance > &0) - )?, - // collect ... e.g. other tokens - ].concat().into()) - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - #[cfg(feature = "try-runtime")] - impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { - let weight = Executive::try_runtime_upgrade(checks).unwrap(); - (weight, RuntimeBlockWeights::get().max_block) - } - - fn execute_block( - block: Block, - state_root_check: bool, - signature_check: bool, - select: frame_try_runtime::TryStateSelect, - ) -> Weight { - // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to - // have a backtrace here. - Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn benchmark_metadata(extra: bool) -> ( - Vec, - Vec, - ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; - use frame_system_benchmarking::Pallet as SystemBench; - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - - // This is defined once again in dispatch_benchmark, because list_benchmarks! - // and add_benchmarks! are macros exported by define_benchmarks! macros and those types - // are referenced in that call. - type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; - type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - - // Benchmark files generated for `Assets/ForeignAssets` instances are by default - // `pallet_assets_assets.rs / pallet_assets_foreign_assets`, which is not really nice, - // so with this redefinition we can change names to nicer: - // `pallet_assets_local.rs / pallet_assets_foreign.rs`. - type Local = pallet_assets::Pallet::; - type Foreign = pallet_assets::Pallet::; - type Pool = pallet_assets::Pallet::; - - let mut list = Vec::::new(); - list_benchmarks!(list, extra); - - let storage_info = AllPalletsWithSystem::storage_info(); - (list, storage_info) - } - - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; - use sp_storage::TrackedStorageKey; - - use frame_system_benchmarking::Pallet as SystemBench; - impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { - ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); - Ok(()) - } - - fn verify_set_code() { - System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); - } - } - - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - impl cumulus_pallet_session_benchmarking::Config for Runtime {} - - use xcm::latest::prelude::*; - use xcm_config::{KsmLocation, MaxAssetsIntoHolding}; - use pallet_xcm_benchmarks::asset_instance_from; - - parameter_types! { - pub ExistentialDepositMultiAsset: Option = Some(( - KsmLocation::get(), - ExistentialDeposit::get() - ).into()); - } - - impl pallet_xcm_benchmarks::Config for Runtime { - type XcmConfig = xcm_config::XcmConfig; - type AccountIdConverter = xcm_config::LocationToAccountId; - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositMultiAsset, - xcm_config::PriceForParentDelivery, - >; - fn valid_destination() -> Result { - Ok(KsmLocation::get()) - } - fn worst_case_holding(depositable_count: u32) -> MultiAssets { - // A mix of fungible, non-fungible, and concrete assets. - let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; - let holding_fungibles = holding_non_fungibles.saturating_sub(1); - let fungibles_amount: u128 = 100; - let mut assets = (0..holding_fungibles) - .map(|i| { - MultiAsset { - id: Concrete(GeneralIndex(i as u128).into()), - fun: Fungible(fungibles_amount * i as u128), - } - }) - .chain(core::iter::once(MultiAsset { id: Concrete(Here.into()), fun: Fungible(u128::MAX) })) - .chain((0..holding_non_fungibles).map(|i| MultiAsset { - id: Concrete(GeneralIndex(i as u128).into()), - fun: NonFungible(asset_instance_from(i)), - })) - .collect::>(); - - assets.push(MultiAsset { - id: Concrete(KsmLocation::get()), - fun: Fungible(1_000_000 * UNITS), - }); - assets.into() - } - } - - parameter_types! { - pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - KsmLocation::get(), - MultiAsset { fun: Fungible(UNITS), id: Concrete(KsmLocation::get()) }, - )); - pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; - pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; - } - - impl pallet_xcm_benchmarks::fungible::Config for Runtime { - type TransactAsset = Balances; - - type CheckedAccount = CheckedAccount; - type TrustedTeleporter = TrustedTeleporter; - type TrustedReserve = TrustedReserve; - - fn get_multi_asset() -> MultiAsset { - MultiAsset { - id: Concrete(KsmLocation::get()), - fun: Fungible(UNITS), - } - } - } - - impl pallet_xcm_benchmarks::generic::Config for Runtime { - type TransactAsset = Balances; - type RuntimeCall = RuntimeCall; - - fn worst_case_response() -> (u64, Response) { - (0u64, Response::Version(Default::default())) - } - - fn worst_case_asset_exchange() -> Result<(MultiAssets, MultiAssets), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn universal_alias() -> Result<(MultiLocation, Junction), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { - Ok((KsmLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) - } - - fn subscribe_origin() -> Result { - Ok(KsmLocation::get()) - } - - fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { - let origin = KsmLocation::get(); - let assets: MultiAssets = (Concrete(KsmLocation::get()), 1_000 * UNITS).into(); - let ticket = MultiLocation { parents: 0, interior: Here }; - Ok((origin, ticket, assets)) - } - - fn unlockable_asset() -> Result<(MultiLocation, MultiLocation, MultiAsset), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn export_message_origin_and_destination( - ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError> { - Err(BenchmarkError::Skip) - } - } - - type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; - type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - - type Local = pallet_assets::Pallet::; - type Foreign = pallet_assets::Pallet::; - type Pool = pallet_assets::Pallet::; - - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - //TODO: use from relay_well_known_keys::ACTIVE_CONFIG - hex_literal::hex!("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385").to_vec().into(), - ]; - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - add_benchmarks!(params, batches); - - Ok(batches) - } - } - - impl sp_genesis_builder::GenesisBuilder for Runtime { - fn create_default_config() -> Vec { - create_default_config::() - } - - fn build_config(config: Vec) -> sp_genesis_builder::Result { - build_config::(config) - } - } -} - -cumulus_pallet_parachain_system::register_validate_block! { - Runtime = Runtime, - BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, -} - -#[cfg(feature = "state-trie-version-1")] -parameter_types! { - // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) - pub const MigrationSignedDepositPerItem: Balance = CENTS; - pub const MigrationSignedDepositBase: Balance = 2_000 * CENTS; - pub const MigrationMaxKeyLen: u32 = 512; -} - -#[cfg(feature = "state-trie-version-1")] -impl pallet_state_trie_migration::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type SignedDepositPerItem = MigrationSignedDepositPerItem; - type SignedDepositBase = MigrationSignedDepositBase; - // An origin that can control the whole pallet: should be Root, or a part of your council. - type ControlOrigin = frame_system::EnsureSignedBy; - // specific account for the migration, can trigger the signed migrations. - type SignedFilter = frame_system::EnsureSignedBy; - - // Replace this with weight based on your runtime. - type WeightInfo = pallet_state_trie_migration::weights::SubstrateWeight; - - type MaxKeyLen = MigrationMaxKeyLen; -} - -#[cfg(feature = "state-trie-version-1")] -frame_support::ord_parameter_types! { - pub const MigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); - pub const RootMigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); -} - -#[cfg(feature = "state-trie-version-1")] -#[test] -fn ensure_key_ss58() { - use frame_support::traits::SortedMembers; - use sp_core::crypto::Ss58Codec; - let acc = - AccountId::from_ss58check("5F4EbSkZz18X36xhbsjvDNs6NuZ82HyYtq5UiJ1h9SBHJXZD").unwrap(); - //panic!("{:x?}", acc); - assert_eq!(acc, MigController::sorted_members()[0]); - let acc = - AccountId::from_ss58check("5F4EbSkZz18X36xhbsjvDNs6NuZ82HyYtq5UiJ1h9SBHJXZD").unwrap(); - assert_eq!(acc, RootMigController::sorted_members()[0]); - //panic!("{:x?}", acc); -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{CENTS, MILLICENTS}; - use parachains_common::kusama::fee; - use sp_runtime::traits::Zero; - use sp_weights::WeightToFee; - - /// We can fit at least 1000 transfers in a block. - #[test] - fn sane_block_weight() { - use pallet_balances::WeightInfo; - let block = RuntimeBlockWeights::get().max_block; - let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; - let transfer = - base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); - - let fit = block.checked_div_per_component(&transfer).unwrap_or_default(); - assert!(fit >= 1000, "{} should be at least 1000", fit); - } - - /// The fee for one transfer is at most 1 CENT. - #[test] - fn sane_transfer_fee() { - use pallet_balances::WeightInfo; - let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; - let transfer = - base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); - - let fee: Balance = fee::WeightToFee::weight_to_fee(&transfer); - assert!(fee <= CENTS, "{} MILLICENTS should be at most 1000", fee / MILLICENTS); - } - - /// Weight is being charged for both dimensions. - #[test] - fn weight_charged_for_both_components() { - let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(10_000, 0)); - assert!(!fee.is_zero(), "Charges for ref time"); - - let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, 10_000)); - assert_eq!(fee, CENTS, "10kb maps to CENT"); - } - - /// Filling up a block by proof size is at most 30 times more expensive than ref time. - /// - /// This is just a sanity check. - #[test] - fn full_block_fee_ratio() { - let block = RuntimeBlockWeights::get().max_block; - let time_fee: Balance = - fee::WeightToFee::weight_to_fee(&Weight::from_parts(block.ref_time(), 0)); - let proof_fee: Balance = - fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, block.proof_size())); - - let proof_o_time = proof_fee.checked_div(time_fee).unwrap_or_default(); - assert!(proof_o_time <= 30, "{} should be at most 30", proof_o_time); - let time_o_proof = time_fee.checked_div(proof_fee).unwrap_or_default(); - assert!(time_o_proof <= 30, "{} should be at most 30", time_o_proof); - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs deleted file mode 100644 index f787aa3270118b87202bc78b58dcb8084d5f5a5b..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_parachain_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemint-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// statemint-dev -// --pallet -// cumulus_pallet_parachain_system -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/statemint/src/weights -// --steps -// 50 -// --repeat -// 20 - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_parachain_system`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { - /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) - /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) - /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) - /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue Pages (r:0 w:16) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 1000]`. - fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `12` - // Estimated: `8013` - // Minimum execution time: 1_660_000 picoseconds. - Weight::from_parts(1_720_000, 0) - .saturating_add(Weight::from_parts(0, 8013)) - // Standard Error: 28_418 - .saturating_add(Weight::from_parts(24_636_963, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs deleted file mode 100644 index e394e8b837a10323bddab2f2fdfff6d6208b290c..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_xcmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --pallet -// cumulus-pallet-xcmp-queue -// --chain -// asset-hub-kusama-dev -// --output -// cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs -// --extrinsic -// - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_xcmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_u32() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 6_000_000 picoseconds. - Weight::from_parts(6_000_000, 0) - .saturating_add(Weight::from_parts(0, 1561)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn enqueue_xcmp_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `118` - // Estimated: `3517` - // Minimum execution time: 15_000_000 picoseconds. - Weight::from_parts(16_000_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn suspend_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 3_000_000 picoseconds. - Weight::from_parts(3_000_000, 0) - .saturating_add(Weight::from_parts(0, 1561)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn resume_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `111` - // Estimated: `1596` - // Minimum execution time: 4_000_000 picoseconds. - Weight::from_parts(5_000_000, 0) - .saturating_add(Weight::from_parts(0, 1596)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn take_first_concatenated_xcm() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 44_000_000 picoseconds. - Weight::from_parts(45_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) - /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65747` - // Estimated: `69212` - // Minimum execution time: 62_000_000 picoseconds. - Weight::from_parts(66_000_000, 0) - .saturating_add(Weight::from_parts(0, 69212)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65710` - // Estimated: `69175` - // Minimum execution time: 42_000_000 picoseconds. - Weight::from_parts(43_000_000, 0) - .saturating_add(Weight::from_parts(0, 69175)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/frame_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/frame_system.rs deleted file mode 100644 index 6304051e6cb3b18d04cf266293e97df2ba891a0f..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/frame_system.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `frame_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=frame_system -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `frame_system`. -pub struct WeightInfo(PhantomData); -impl frame_system::WeightInfo for WeightInfo { - /// The range of component `b` is `[0, 3932160]`. - fn remark(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_106_000 picoseconds. - Weight::from_parts(1_884_213, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(388, 0).saturating_mul(b.into())) - } - /// The range of component `b` is `[0, 3932160]`. - fn remark_with_event(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_528_000 picoseconds. - Weight::from_parts(27_081_927, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_730, 0).saturating_mul(b.into())) - } - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - fn set_heap_pages() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 3_882_000 picoseconds. - Weight::from_parts(4_149_000, 0) - .saturating_add(Weight::from_parts(0, 1485)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) - /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) - /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_code() -> Weight { - // Proof Size summary in bytes: - // Measured: `119` - // Estimated: `1604` - // Minimum execution time: 103_389_161_000 picoseconds. - Weight::from_parts(106_870_091_000, 0) - .saturating_add(Weight::from_parts(0, 1604)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn set_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_236_000 picoseconds. - Weight::from_parts(2_302_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_045 - .saturating_add(Weight::from_parts(763_456, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn kill_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_175_000 picoseconds. - Weight::from_parts(2_238_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_040 - .saturating_add(Weight::from_parts(571_397, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `p` is `[0, 1000]`. - fn kill_prefix(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `84 + p * (69 ±0)` - // Estimated: `80 + p * (70 ±0)` - // Minimum execution time: 3_843_000 picoseconds. - Weight::from_parts(3_947_000, 0) - .saturating_add(Weight::from_parts(0, 80)) - // Standard Error: 2_188 - .saturating_add(Weight::from_parts(1_212_360, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/mod.rs deleted file mode 100644 index f04081a84fb3e191f1a6e1e26cfaac3dee90cc76..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/mod.rs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; -pub mod cumulus_pallet_parachain_system; -pub mod cumulus_pallet_xcmp_queue; -pub mod extrinsic_weights; -pub mod frame_system; -pub mod pallet_asset_conversion; -pub mod pallet_assets_foreign; -pub mod pallet_assets_local; -pub mod pallet_assets_pool; -pub mod pallet_balances; -pub mod pallet_collator_selection; -pub mod pallet_message_queue; -pub mod pallet_multisig; -pub mod pallet_nft_fractionalization; -pub mod pallet_nfts; -pub mod pallet_proxy; -pub mod pallet_session; -pub mod pallet_timestamp; -pub mod pallet_uniques; -pub mod pallet_utility; -pub mod pallet_xcm; -pub mod paritydb_weights; -pub mod rocksdb_weights; -pub mod xcm; - -pub use block_weights::constants::BlockExecutionWeight; -pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; -pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_asset_conversion.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_asset_conversion.rs deleted file mode 100644 index 3fcf2f8f4ec57260b2aaa7e422baa8c8e9be4772..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_asset_conversion.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_asset_conversion` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/cumulus/.git/.artifacts/bench.json -// --pallet=pallet_asset_conversion -// --chain=asset-hub-kusama-dev -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_asset_conversion`. -pub struct WeightInfo(PhantomData); -impl pallet_asset_conversion::WeightInfo for WeightInfo { - /// Storage: `AssetConversion::Pools` (r:1 w:1) - /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x76a2c49709deec21d9c05f96c1f47351` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x76a2c49709deec21d9c05f96c1f47351` (r:1 w:0) - /// Storage: `System::Account` (r:2 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `AssetConversion::NextPoolAssetId` (r:1 w:1) - /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn create_pool() -> Weight { - // Proof Size summary in bytes: - // Measured: `480` - // Estimated: `6196` - // Minimum execution time: 88_484_000 picoseconds. - Weight::from_parts(92_964_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: `AssetConversion::Pools` (r:1 w:0) - /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:2 w:2) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn add_liquidity() -> Weight { - // Proof Size summary in bytes: - // Measured: `1117` - // Estimated: `7404` - // Minimum execution time: 153_015_000 picoseconds. - Weight::from_parts(157_018_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: `AssetConversion::Pools` (r:1 w:0) - /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x2433d831722b1f4aeb1666953f1c0e77` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x2433d831722b1f4aeb1666953f1c0e77` (r:1 w:0) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn remove_liquidity() -> Weight { - // Proof Size summary in bytes: - // Measured: `1106` - // Estimated: `7404` - // Minimum execution time: 141_726_000 picoseconds. - Weight::from_parts(147_865_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `ForeignAssets::Asset` (r:2 w:2) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:4 w:4) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn swap_exact_tokens_for_tokens() -> Weight { - // Proof Size summary in bytes: - // Measured: `1148` - // Estimated: `13818` - // Minimum execution time: 168_619_000 picoseconds. - Weight::from_parts(174_283_000, 0) - .saturating_add(Weight::from_parts(0, 13818)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(8)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:2 w:2) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:4 w:4) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn swap_tokens_for_exact_tokens() -> Weight { - // Proof Size summary in bytes: - // Measured: `1148` - // Estimated: `13818` - // Minimum execution time: 171_565_000 picoseconds. - Weight::from_parts(173_702_000, 0) - .saturating_add(Weight::from_parts(0, 13818)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(8)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_foreign.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_foreign.rs deleted file mode 100644 index c2688d97905491adbd91cd8bbc588adc64819c96..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_foreign.rs +++ /dev/null @@ -1,533 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_assets` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/cumulus/.git/.artifacts/bench.json -// --pallet=pallet_assets -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_assets`. -pub struct WeightInfo(PhantomData); -impl pallet_assets::WeightInfo for WeightInfo { - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `4273` - // Minimum execution time: 30_485_000 picoseconds. - Weight::from_parts(31_007_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `4` - // Estimated: `4273` - // Minimum execution time: 12_991_000 picoseconds. - Weight::from_parts(13_304_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn start_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 15_689_000 picoseconds. - Weight::from_parts(16_063_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: ForeignAssets Asset (r:1 w:1) - /// Proof: ForeignAssets Asset (max_values: None, max_size: Some(808), added: 3283, mode: MaxEncodedLen) - /// Storage: ForeignAssets Account (r:1001 w:1000) - /// Proof: ForeignAssets Account (max_values: None, max_size: Some(732), added: 3207, mode: MaxEncodedLen) - /// Storage: System Account (r:1000 w:1000) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `c` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - fn destroy_accounts(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + c * (208 ±0)` - // Estimated: `4273 + c * (3207 ±0)` - // Minimum execution time: 18_533_000 picoseconds. - Weight::from_parts(18_791_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 5_059 - .saturating_add(Weight::from_parts(12_049_659, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 3207).saturating_mul(c.into())) - } - /// Storage: ForeignAssets Asset (r:1 w:1) - /// Proof: ForeignAssets Asset (max_values: None, max_size: Some(808), added: 3283, mode: MaxEncodedLen) - /// Storage: ForeignAssets Approvals (r:1001 w:1000) - /// Proof: ForeignAssets Approvals (max_values: None, max_size: Some(746), added: 3221, mode: MaxEncodedLen) - /// The range of component `a` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy_approvals(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `413 + a * (86 ±0)` - // Estimated: `4273 + a * (3221 ±0)` - // Minimum execution time: 20_028_000 picoseconds. - Weight::from_parts(20_148_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 3_401 - .saturating_add(Weight::from_parts(13_897_319, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 3221).saturating_mul(a.into())) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:0) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn finish_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 15_949_000 picoseconds. - Weight::from_parts(16_241_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 27_156_000 picoseconds. - Weight::from_parts(28_182_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 33_503_000 picoseconds. - Weight::from_parts(33_860_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `7404` - // Minimum execution time: 45_065_000 picoseconds. - Weight::from_parts(45_856_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `7404` - // Minimum execution time: 39_913_000 picoseconds. - Weight::from_parts(40_791_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `7404` - // Minimum execution time: 45_337_000 picoseconds. - Weight::from_parts(45_980_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 19_012_000 picoseconds. - Weight::from_parts(19_326_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 18_656_000 picoseconds. - Weight::from_parts(19_205_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn freeze_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 15_440_000 picoseconds. - Weight::from_parts(15_825_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn thaw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 15_465_000 picoseconds. - Weight::from_parts(15_769_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:0) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 16_579_000 picoseconds. - Weight::from_parts(16_931_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 15_138_000 picoseconds. - Weight::from_parts(15_435_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: ForeignAssets Asset (r:1 w:0) - /// Proof: ForeignAssets Asset (max_values: None, max_size: Some(808), added: 3283, mode: MaxEncodedLen) - /// Storage: ForeignAssets Metadata (r:1 w:1) - /// Proof: ForeignAssets Metadata (max_values: None, max_size: Some(738), added: 3213, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, _s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 29_846_000 picoseconds. - Weight::from_parts(31_607_649, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:1) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `406` - // Estimated: `4273` - // Minimum execution time: 30_582_000 picoseconds. - Weight::from_parts(31_008_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: ForeignAssets Asset (r:1 w:0) - /// Proof: ForeignAssets Asset (max_values: None, max_size: Some(808), added: 3283, mode: MaxEncodedLen) - /// Storage: ForeignAssets Metadata (r:1 w:1) - /// Proof: ForeignAssets Metadata (max_values: None, max_size: Some(738), added: 3213, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `81` - // Estimated: `4273` - // Minimum execution time: 14_186_000 picoseconds. - Weight::from_parts(14_717_332, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 517 - .saturating_add(Weight::from_parts(2_595, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:1) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn force_clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `406` - // Estimated: `4273` - // Minimum execution time: 29_499_000 picoseconds. - Weight::from_parts(29_918_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn force_asset_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 13_815_000 picoseconds. - Weight::from_parts(14_138_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 33_029_000 picoseconds. - Weight::from_parts(33_524_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `520` - // Estimated: `7404` - // Minimum execution time: 63_205_000 picoseconds. - Weight::from_parts(64_078_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `446` - // Estimated: `4273` - // Minimum execution time: 34_948_000 picoseconds. - Weight::from_parts(35_484_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - fn force_cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `446` - // Estimated: `4273` - // Minimum execution time: 35_722_000 picoseconds. - Weight::from_parts(36_266_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn set_min_balance() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 15_855_000 picoseconds. - Weight::from_parts(16_182_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn touch() -> Weight { - // Proof Size summary in bytes: - // Measured: `345` - // Estimated: `4273` - // Minimum execution time: 34_984_000 picoseconds. - Weight::from_parts(35_512_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn touch_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 33_041_000 picoseconds. - Weight::from_parts(34_124_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn refund() -> Weight { - // Proof Size summary in bytes: - // Measured: `471` - // Estimated: `4273` - // Minimum execution time: 31_728_000 picoseconds. - Weight::from_parts(32_012_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn refund_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `401` - // Estimated: `4273` - // Minimum execution time: 29_432_000 picoseconds. - Weight::from_parts(29_968_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn block() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 18_827_000 picoseconds. - Weight::from_parts(19_172_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_local.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_local.rs deleted file mode 100644 index 957e33fcd9ea20ff201ae3635466afcf154be011..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_local.rs +++ /dev/null @@ -1,530 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_assets` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_assets -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_assets`. -pub struct WeightInfo(PhantomData); -impl pallet_assets::WeightInfo for WeightInfo { - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3675` - // Minimum execution time: 26_510_000 picoseconds. - Weight::from_parts(27_332_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3675` - // Minimum execution time: 10_899_000 picoseconds. - Weight::from_parts(11_395_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn start_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 13_593_000 picoseconds. - Weight::from_parts(14_108_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1001 w:1000) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1000 w:1000) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `c` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - fn destroy_accounts(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + c * (208 ±0)` - // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 16_216_000 picoseconds. - Weight::from_parts(16_636_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 9_346 - .saturating_add(Weight::from_parts(15_306_152, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1001 w:1000) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy_approvals(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `414 + a * (86 ±0)` - // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 16_745_000 picoseconds. - Weight::from_parts(17_247_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(Weight::from_parts(15_634_963, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:0) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn finish_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 13_650_000 picoseconds. - Weight::from_parts(14_721_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 24_121_000 picoseconds. - Weight::from_parts(25_023_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 31_414_000 picoseconds. - Weight::from_parts(32_235_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `6208` - // Minimum execution time: 43_114_000 picoseconds. - Weight::from_parts(44_106_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `6208` - // Minimum execution time: 37_954_000 picoseconds. - Weight::from_parts(38_772_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `6208` - // Minimum execution time: 43_051_000 picoseconds. - Weight::from_parts(44_003_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 17_048_000 picoseconds. - Weight::from_parts(17_614_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 16_705_000 picoseconds. - Weight::from_parts(17_581_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn freeze_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 13_284_000 picoseconds. - Weight::from_parts(13_735_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn thaw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 13_030_000 picoseconds. - Weight::from_parts(13_417_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:0) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 14_174_000 picoseconds. - Weight::from_parts(14_660_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 12_737_000 picoseconds. - Weight::from_parts(13_172_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn set_metadata(n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 27_707_000 picoseconds. - Weight::from_parts(29_036_880, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 688 - .saturating_add(Weight::from_parts(2_426, 0).saturating_mul(n.into())) - // Standard Error: 688 - .saturating_add(Weight::from_parts(776, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `407` - // Estimated: `3675` - // Minimum execution time: 28_514_000 picoseconds. - Weight::from_parts(29_216_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `82` - // Estimated: `3675` - // Minimum execution time: 12_452_000 picoseconds. - Weight::from_parts(13_095_356, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 275 - .saturating_add(Weight::from_parts(826, 0).saturating_mul(n.into())) - // Standard Error: 275 - .saturating_add(Weight::from_parts(808, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn force_clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `407` - // Estimated: `3675` - // Minimum execution time: 28_181_000 picoseconds. - Weight::from_parts(29_050_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn force_asset_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 12_253_000 picoseconds. - Weight::from_parts(12_545_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 31_084_000 picoseconds. - Weight::from_parts(32_052_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `521` - // Estimated: `6208` - // Minimum execution time: 61_756_000 picoseconds. - Weight::from_parts(62_740_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3675` - // Minimum execution time: 33_370_000 picoseconds. - Weight::from_parts(34_127_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn force_cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3675` - // Minimum execution time: 33_753_000 picoseconds. - Weight::from_parts(34_613_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn set_min_balance() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 13_508_000 picoseconds. - Weight::from_parts(13_997_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn touch() -> Weight { - // Proof Size summary in bytes: - // Measured: `346` - // Estimated: `3675` - // Minimum execution time: 32_578_000 picoseconds. - Weight::from_parts(33_675_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn touch_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 30_768_000 picoseconds. - Weight::from_parts(31_710_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn refund() -> Weight { - // Proof Size summary in bytes: - // Measured: `472` - // Estimated: `3675` - // Minimum execution time: 30_028_000 picoseconds. - Weight::from_parts(30_793_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn refund_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `402` - // Estimated: `3675` - // Minimum execution time: 28_354_000 picoseconds. - Weight::from_parts(29_097_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn block() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 16_607_000 picoseconds. - Weight::from_parts(17_433_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_pool.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_pool.rs deleted file mode 100644 index e0b4ff3655217bb2b91c27ea0290b9aeee389ba2..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_pool.rs +++ /dev/null @@ -1,530 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_assets` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/cumulus/.git/.artifacts/bench.json -// --pallet=pallet_assets -// --chain=asset-hub-kusama-dev -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_assets`. -pub struct WeightInfo(PhantomData); -impl pallet_assets::WeightInfo for WeightInfo { - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3675` - // Minimum execution time: 11_591_000 picoseconds. - Weight::from_parts(11_901_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3675` - // Minimum execution time: 11_184_000 picoseconds. - Weight::from_parts(11_640_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn start_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `314` - // Estimated: `3675` - // Minimum execution time: 13_809_000 picoseconds. - Weight::from_parts(14_226_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1001 w:1000) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1000 w:1000) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `c` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - fn destroy_accounts(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + c * (208 ±0)` - // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 16_439_000 picoseconds. - Weight::from_parts(16_743_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 4_792 - .saturating_add(Weight::from_parts(14_463_991, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Approvals` (r:1001 w:1000) - /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy_approvals(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `451 + a * (86 ±0)` - // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 17_218_000 picoseconds. - Weight::from_parts(17_585_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 2_056 - .saturating_add(Weight::from_parts(5_323_866, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Metadata` (r:1 w:0) - /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn finish_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 13_848_000 picoseconds. - Weight::from_parts(14_325_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 24_904_000 picoseconds. - Weight::from_parts(25_607_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3675` - // Minimum execution time: 31_477_000 picoseconds. - Weight::from_parts(32_338_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:2 w:2) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `6208` - // Minimum execution time: 42_994_000 picoseconds. - Weight::from_parts(44_041_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:2 w:2) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `6208` - // Minimum execution time: 37_551_000 picoseconds. - Weight::from_parts(38_648_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:2 w:2) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `6208` - // Minimum execution time: 42_829_000 picoseconds. - Weight::from_parts(44_029_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3675` - // Minimum execution time: 17_304_000 picoseconds. - Weight::from_parts(17_782_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3675` - // Minimum execution time: 17_040_000 picoseconds. - Weight::from_parts(17_698_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn freeze_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `314` - // Estimated: `3675` - // Minimum execution time: 13_238_000 picoseconds. - Weight::from_parts(13_810_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn thaw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `314` - // Estimated: `3675` - // Minimum execution time: 13_034_000 picoseconds. - Weight::from_parts(13_603_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Metadata` (r:1 w:0) - /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 14_357_000 picoseconds. - Weight::from_parts(14_774_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 13_040_000 picoseconds. - Weight::from_parts(13_616_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Metadata` (r:1 w:1) - /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn set_metadata(n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 15_274_000 picoseconds. - Weight::from_parts(16_096_881, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 239 - .saturating_add(Weight::from_parts(1_631, 0).saturating_mul(n.into())) - // Standard Error: 239 - .saturating_add(Weight::from_parts(2_334, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Metadata` (r:1 w:1) - /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `444` - // Estimated: `3675` - // Minimum execution time: 15_900_000 picoseconds. - Weight::from_parts(16_526_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Metadata` (r:1 w:1) - /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `119` - // Estimated: `3675` - // Minimum execution time: 13_391_000 picoseconds. - Weight::from_parts(14_047_176, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 172 - .saturating_add(Weight::from_parts(2_617, 0).saturating_mul(n.into())) - // Standard Error: 172 - .saturating_add(Weight::from_parts(2_081, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Metadata` (r:1 w:1) - /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn force_clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `444` - // Estimated: `3675` - // Minimum execution time: 15_794_000 picoseconds. - Weight::from_parts(16_279_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn force_asset_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 12_538_000 picoseconds. - Weight::from_parts(13_080_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Approvals` (r:1 w:1) - /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `314` - // Estimated: `3675` - // Minimum execution time: 18_991_000 picoseconds. - Weight::from_parts(19_812_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Approvals` (r:1 w:1) - /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:2 w:2) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `558` - // Estimated: `6208` - // Minimum execution time: 50_336_000 picoseconds. - Weight::from_parts(51_441_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Approvals` (r:1 w:1) - /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `484` - // Estimated: `3675` - // Minimum execution time: 21_195_000 picoseconds. - Weight::from_parts(21_946_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Approvals` (r:1 w:1) - /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn force_cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `484` - // Estimated: `3675` - // Minimum execution time: 21_568_000 picoseconds. - Weight::from_parts(22_366_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn set_min_balance() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 13_690_000 picoseconds. - Weight::from_parts(14_086_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn touch() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 18_240_000 picoseconds. - Weight::from_parts(19_000_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn touch_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 18_469_000 picoseconds. - Weight::from_parts(19_040_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn refund() -> Weight { - // Proof Size summary in bytes: - // Measured: `406` - // Estimated: `3675` - // Minimum execution time: 14_633_000 picoseconds. - Weight::from_parts(15_296_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn refund_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `439` - // Estimated: `3675` - // Minimum execution time: 14_751_000 picoseconds. - Weight::from_parts(15_312_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn block() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3675` - // Minimum execution time: 16_930_000 picoseconds. - Weight::from_parts(17_653_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs deleted file mode 100644 index 5c5a31eb348628cb38c1f58a2edf9c2460f6e20c..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_collator_selection` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_collator_selection -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collator_selection`. -pub struct WeightInfo(PhantomData); -impl pallet_collator_selection::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:20 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 20]`. - fn set_invulnerables(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `163 + b * (79 ±0)` - // Estimated: `1154 + b * (2555 ±0)` - // Minimum execution time: 15_408_000 picoseconds. - Weight::from_parts(13_068_592, 0) - .saturating_add(Weight::from_parts(0, 1154)) - // Standard Error: 7_395 - .saturating_add(Weight::from_parts(3_219_916, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) - } - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 19]`. - /// The range of component `c` is `[1, 99]`. - fn add_invulnerable(b: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `756 + b * (32 ±0) + c * (53 ±0)` - // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` - // Minimum execution time: 49_692_000 picoseconds. - Weight::from_parts(51_768_986, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 18_404 - .saturating_add(Weight::from_parts(55_676, 0).saturating_mul(b.into())) - // Standard Error: 3_488 - .saturating_add(Weight::from_parts(184_343, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[5, 20]`. - fn remove_invulnerable(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `119 + b * (32 ±0)` - // Estimated: `6287` - // Minimum execution time: 16_486_000 picoseconds. - Weight::from_parts(16_646_017, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 3_230 - .saturating_add(Weight::from_parts(148_941, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn set_desired_candidates() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_806_000 picoseconds. - Weight::from_parts(8_002_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_937_000 picoseconds. - Weight::from_parts(8_161_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[1, 99]`. - fn register_as_candidate(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `736 + c * (52 ±0)` - // Estimated: `6287 + c * (54 ±0)` - // Minimum execution time: 42_805_000 picoseconds. - Weight::from_parts(45_979_502, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_336 - .saturating_add(Weight::from_parts(221_049, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[4, 100]`. - fn leave_intent(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 34_814_000 picoseconds. - Weight::from_parts(36_371_520, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_391 - .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - fn note_author() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `6196` - // Minimum execution time: 46_989_000 picoseconds. - Weight::from_parts(48_151_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:97 w:97) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `r` is `[1, 100]`. - /// The range of component `c` is `[1, 100]`. - fn new_session(r: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `2243 + c * (97 ±0) + r * (112 ±0)` - // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` - // Minimum execution time: 17_547_000 picoseconds. - Weight::from_parts(17_854_000, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 370_637 - .saturating_add(Weight::from_parts(15_798_857, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_message_queue.rs deleted file mode 100644 index 792b7d18b672dfc1adad680b7c7a10598598645c..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_message_queue.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_message_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemint-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// statemint-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/statemint/src/weights - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `pallet_message_queue`. -pub struct WeightInfo(PhantomData); -impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn ready_ring_knit() -> Weight { - // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 18_976_000 picoseconds. - Weight::from_parts(18_976_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - fn ready_ring_unknit() -> Weight { - // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 12_686_000 picoseconds. - Weight::from_parts(12_686_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn service_queue_base() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3517` - // Minimum execution time: 4_951_000 picoseconds. - Weight::from_parts(4_951_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_023_000 picoseconds. - Weight::from_parts(6_023_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_no_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_901_000 picoseconds. - Weight::from_parts(6_901_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn service_page_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 58_503_000 picoseconds. - Weight::from_parts(58_503_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn bump_service_head() -> Weight { - // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 9_318_000 picoseconds. - Weight::from_parts(9_318_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn reap_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 52_228_000 picoseconds. - Weight::from_parts(52_228_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_removed() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 59_617_000 picoseconds. - Weight::from_parts(59_617_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_updated() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 69_681_000 picoseconds. - Weight::from_parts(69_681_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_multisig.rs deleted file mode 100644 index d2e0f0ec7f0b96aaffa66c0eb3c04f6b907431d8..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_multisig.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_multisig` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_multisig -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_multisig`. -pub struct WeightInfo(PhantomData); -impl pallet_multisig::WeightInfo for WeightInfo { - /// The range of component `z` is `[0, 10000]`. - fn as_multi_threshold_1(z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_714_000 picoseconds. - Weight::from_parts(14_440_231, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(598, 0).saturating_mul(z.into())) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_create(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `262 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 44_768_000 picoseconds. - Weight::from_parts(33_662_218, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_633 - .saturating_add(Weight::from_parts(128_927, 0).saturating_mul(s.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_543, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[3, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_approve(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 29_745_000 picoseconds. - Weight::from_parts(20_559_891, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 914 - .saturating_add(Weight::from_parts(103_601, 0).saturating_mul(s.into())) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_504, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_complete(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `385 + s * (33 ±0)` - // Estimated: `6811` - // Minimum execution time: 51_506_000 picoseconds. - Weight::from_parts(36_510_777, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 2_183 - .saturating_add(Weight::from_parts(183_764, 0).saturating_mul(s.into())) - // Standard Error: 21 - .saturating_add(Weight::from_parts(1_653, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_create(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `263 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 31_072_000 picoseconds. - Weight::from_parts(32_408_621, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 913 - .saturating_add(Weight::from_parts(121_410, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_approve(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 18_301_000 picoseconds. - Weight::from_parts(18_223_547, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 747 - .saturating_add(Weight::from_parts(114_584, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn cancel_as_multi(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `454 + s * (1 ±0)` - // Estimated: `6811` - // Minimum execution time: 32_107_000 picoseconds. - Weight::from_parts(33_674_827, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_220 - .saturating_add(Weight::from_parts(122_011, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nft_fractionalization.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nft_fractionalization.rs deleted file mode 100644 index 4becc569514c1b571048033264ed3b6673269944..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nft_fractionalization.rs +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_nft_fractionalization` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_nft_fractionalization -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_nft_fractionalization`. -pub struct WeightInfo(PhantomData); -impl pallet_nft_fractionalization::WeightInfo for WeightInfo { - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// Storage: `NftFractionalization::NftToAsset` (r:0 w:1) - /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) - fn fractionalize() -> Weight { - // Proof Size summary in bytes: - // Measured: `462` - // Estimated: `4326` - // Minimum execution time: 178_501_000 picoseconds. - Weight::from_parts(180_912_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(8)) - } - /// Storage: `NftFractionalization::NftToAsset` (r:1 w:1) - /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn unify() -> Weight { - // Proof Size summary in bytes: - // Measured: `1275` - // Estimated: `4326` - // Minimum execution time: 125_253_000 picoseconds. - Weight::from_parts(128_238_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(10)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nfts.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nfts.rs deleted file mode 100644 index 7a51830799ad60424283ef2f2e2021b857795a08..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nfts.rs +++ /dev/null @@ -1,772 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_nfts` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_nfts -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_nfts`. -pub struct WeightInfo(PhantomData); -impl pallet_nfts::WeightInfo for WeightInfo { - /// Storage: `Nfts::NextCollectionId` (r:1 w:1) - /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:1) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `179` - // Estimated: `3549` - // Minimum execution time: 39_124_000 picoseconds. - Weight::from_parts(39_975_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::NextCollectionId` (r:1 w:1) - /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:1) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `3549` - // Minimum execution time: 23_444_000 picoseconds. - Weight::from_parts(23_857_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:1) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1001 w:1000) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1000 w:1000) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionMetadataOf` (r:0 w:1) - /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:1) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - /// The range of component `m` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `32204 + a * (366 ±0)` - // Estimated: `2523990 + a * (2954 ±0)` - // Minimum execution time: 1_224_365_000 picoseconds. - Weight::from_parts(1_281_136_346, 0) - .saturating_add(Weight::from_parts(0, 2523990)) - // Standard Error: 10_484 - .saturating_add(Weight::from_parts(6_910_740, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(1004)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1005)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(a.into())) - } - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `455` - // Estimated: `4326` - // Minimum execution time: 50_489_000 picoseconds. - Weight::from_parts(51_045_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn force_mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `455` - // Estimated: `4326` - // Minimum execution time: 49_146_000 picoseconds. - Weight::from_parts(49_756_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Nfts::Attribute` (r:1 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:0 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `564` - // Estimated: `4326` - // Minimum execution time: 56_059_000 picoseconds. - Weight::from_parts(57_162_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:2) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `593` - // Estimated: `4326` - // Minimum execution time: 42_406_000 picoseconds. - Weight::from_parts(43_187_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:5000 w:5000) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// The range of component `i` is `[0, 5000]`. - fn redeposit(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `763 + i * (108 ±0)` - // Estimated: `3549 + i * (3336 ±0)` - // Minimum execution time: 16_960_000 picoseconds. - Weight::from_parts(17_167_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - // Standard Error: 24_110 - .saturating_add(Weight::from_parts(18_046_970, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3336).saturating_mul(i.into())) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn lock_item_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `3534` - // Minimum execution time: 21_023_000 picoseconds. - Weight::from_parts(21_409_000, 0) - .saturating_add(Weight::from_parts(0, 3534)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn unlock_item_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `3534` - // Minimum execution time: 20_706_000 picoseconds. - Weight::from_parts(21_030_000, 0) - .saturating_add(Weight::from_parts(0, 3534)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn lock_collection() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `3549` - // Minimum execution time: 17_449_000 picoseconds. - Weight::from_parts(17_804_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:2) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3549` - // Minimum execution time: 22_958_000 picoseconds. - Weight::from_parts(23_499_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:2 w:4) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `369` - // Estimated: `6078` - // Minimum execution time: 40_105_000 picoseconds. - Weight::from_parts(40_800_000, 0) - .saturating_add(Weight::from_parts(0, 6078)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:2) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_collection_owner() -> Weight { - // Proof Size summary in bytes: - // Measured: `311` - // Estimated: `3549` - // Minimum execution time: 17_832_000 picoseconds. - Weight::from_parts(18_297_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn force_collection_config() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `3549` - // Minimum execution time: 15_027_000 picoseconds. - Weight::from_parts(15_370_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn lock_item_properties() -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `3534` - // Minimum execution time: 19_912_000 picoseconds. - Weight::from_parts(20_258_000, 0) - .saturating_add(Weight::from_parts(0, 3534)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - fn set_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `539` - // Estimated: `3944` - // Minimum execution time: 50_138_000 picoseconds. - Weight::from_parts(50_971_000, 0) - .saturating_add(Weight::from_parts(0, 3944)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - fn force_set_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `344` - // Estimated: `3944` - // Minimum execution time: 26_385_000 picoseconds. - Weight::from_parts(27_086_000, 0) - .saturating_add(Weight::from_parts(0, 3944)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - fn clear_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `983` - // Estimated: `3944` - // Minimum execution time: 45_687_000 picoseconds. - Weight::from_parts(47_107_000, 0) - .saturating_add(Weight::from_parts(0, 3944)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - fn approve_item_attributes() -> Weight { - // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `4466` - // Minimum execution time: 18_065_000 picoseconds. - Weight::from_parts(18_371_000, 0) - .saturating_add(Weight::from_parts(0, 4466)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1001 w:1000) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1000]`. - fn cancel_item_attributes_approval(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `760 + n * (398 ±0)` - // Estimated: `4466 + n * (2954 ±0)` - // Minimum execution time: 26_680_000 picoseconds. - Weight::from_parts(27_010_000, 0) - .saturating_add(Weight::from_parts(0, 4466)) - // Standard Error: 6_351 - .saturating_add(Weight::from_parts(6_584_290, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - fn set_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `539` - // Estimated: `3812` - // Minimum execution time: 42_038_000 picoseconds. - Weight::from_parts(42_758_000, 0) - .saturating_add(Weight::from_parts(0, 3812)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `849` - // Estimated: `3812` - // Minimum execution time: 40_220_000 picoseconds. - Weight::from_parts(41_026_000, 0) - .saturating_add(Weight::from_parts(0, 3812)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) - /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) - fn set_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `398` - // Estimated: `3759` - // Minimum execution time: 38_135_000 picoseconds. - Weight::from_parts(38_561_000, 0) - .saturating_add(Weight::from_parts(0, 3759)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) - /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) - fn clear_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `716` - // Estimated: `3759` - // Minimum execution time: 37_583_000 picoseconds. - Weight::from_parts(38_215_000, 0) - .saturating_add(Weight::from_parts(0, 3759)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `410` - // Estimated: `4326` - // Minimum execution time: 21_405_000 picoseconds. - Weight::from_parts(21_803_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `418` - // Estimated: `4326` - // Minimum execution time: 18_713_000 picoseconds. - Weight::from_parts(19_185_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - fn clear_all_transfer_approvals() -> Weight { - // Proof Size summary in bytes: - // Measured: `418` - // Estimated: `4326` - // Minimum execution time: 17_803_000 picoseconds. - Weight::from_parts(18_270_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn set_accept_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `3517` - // Minimum execution time: 15_982_000 picoseconds. - Weight::from_parts(16_700_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - fn set_collection_max_supply() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `3549` - // Minimum execution time: 19_501_000 picoseconds. - Weight::from_parts(19_785_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn update_mint_settings() -> Weight { - // Proof Size summary in bytes: - // Measured: `323` - // Estimated: `3538` - // Minimum execution time: 18_914_000 picoseconds. - Weight::from_parts(19_292_000, 0) - .saturating_add(Weight::from_parts(0, 3538)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn set_price() -> Weight { - // Proof Size summary in bytes: - // Measured: `518` - // Estimated: `4326` - // Minimum execution time: 24_625_000 picoseconds. - Weight::from_parts(25_257_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:1 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:2) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn buy_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `705` - // Estimated: `4326` - // Minimum execution time: 50_833_000 picoseconds. - Weight::from_parts(52_161_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// The range of component `n` is `[0, 10]`. - fn pay_tips(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_220_000 picoseconds. - Weight::from_parts(3_476_001, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 7_084 - .saturating_add(Weight::from_parts(3_844_820, 0).saturating_mul(n.into())) - } - /// Storage: `Nfts::Item` (r:2 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn create_swap() -> Weight { - // Proof Size summary in bytes: - // Measured: `494` - // Estimated: `7662` - // Minimum execution time: 21_983_000 picoseconds. - Weight::from_parts(22_746_000, 0) - .saturating_add(Weight::from_parts(0, 7662)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::PendingSwapOf` (r:1 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - fn cancel_swap() -> Weight { - // Proof Size summary in bytes: - // Measured: `513` - // Estimated: `4326` - // Minimum execution time: 20_875_000 picoseconds. - Weight::from_parts(21_465_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:2 w:2) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:1 w:2) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:2 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:2 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:4) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:2) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn claim_swap() -> Weight { - // Proof Size summary in bytes: - // Measured: `834` - // Estimated: `7662` - // Minimum execution time: 84_771_000 picoseconds. - Weight::from_parts(86_078_000, 0) - .saturating_add(Weight::from_parts(0, 7662)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(10)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:2 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:10 w:10) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 10]`. - fn mint_pre_signed(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `558` - // Estimated: `6078 + n * (2954 ±0)` - // Minimum execution time: 143_265_000 picoseconds. - Weight::from_parts(150_978_773, 0) - .saturating_add(Weight::from_parts(0, 6078)) - // Standard Error: 49_443 - .saturating_add(Weight::from_parts(31_888_255, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(6)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:10 w:10) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 10]`. - fn set_attributes_pre_signed(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `588` - // Estimated: `4466 + n * (2954 ±0)` - // Minimum execution time: 83_754_000 picoseconds. - Weight::from_parts(96_685_026, 0) - .saturating_add(Weight::from_parts(0, 4466)) - // Standard Error: 72_592 - .saturating_add(Weight::from_parts(30_914_858, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_proxy.rs deleted file mode 100644 index 0cdffc653bcd6c1d04c38cf214ab1aaa39cd6787..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_proxy.rs +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_proxy` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_proxy -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_proxy`. -pub struct WeightInfo(PhantomData); -impl pallet_proxy::WeightInfo for WeightInfo { - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 16_417_000 picoseconds. - Weight::from_parts(17_283_443, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_409 - .saturating_add(Weight::from_parts(32_123, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn proxy_announced(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `454 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `5698` - // Minimum execution time: 37_572_000 picoseconds. - Weight::from_parts(37_045_756, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_896 - .saturating_add(Weight::from_parts(139_561, 0).saturating_mul(a.into())) - // Standard Error: 2_993 - .saturating_add(Weight::from_parts(73_270, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn remove_announcement(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `369 + a * (68 ±0)` - // Estimated: `5698` - // Minimum execution time: 24_066_000 picoseconds. - Weight::from_parts(24_711_403, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_626 - .saturating_add(Weight::from_parts(128_391, 0).saturating_mul(a.into())) - // Standard Error: 1_680 - .saturating_add(Weight::from_parts(23_124, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn reject_announcement(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `369 + a * (68 ±0)` - // Estimated: `5698` - // Minimum execution time: 24_162_000 picoseconds. - Weight::from_parts(23_928_058, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_072 - .saturating_add(Weight::from_parts(152_299, 0).saturating_mul(a.into())) - // Standard Error: 2_141 - .saturating_add(Weight::from_parts(39_775, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn announce(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `386 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `5698` - // Minimum execution time: 33_858_000 picoseconds. - Weight::from_parts(33_568_059, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_816 - .saturating_add(Weight::from_parts(134_400, 0).saturating_mul(a.into())) - // Standard Error: 1_876 - .saturating_add(Weight::from_parts(57_028, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn add_proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 24_947_000 picoseconds. - Weight::from_parts(26_235_199, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_363 - .saturating_add(Weight::from_parts(41_435, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn remove_proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 25_186_000 picoseconds. - Weight::from_parts(26_823_133, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_259 - .saturating_add(Weight::from_parts(34_224, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn remove_proxies(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 22_156_000 picoseconds. - Weight::from_parts(23_304_060, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_738 - .saturating_add(Weight::from_parts(39_612, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn create_pure(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `139` - // Estimated: `4706` - // Minimum execution time: 26_914_000 picoseconds. - Weight::from_parts(28_009_062, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_978 - .saturating_add(Weight::from_parts(12_255, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 30]`. - fn kill_pure(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `164 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 23_281_000 picoseconds. - Weight::from_parts(24_392_989, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_943 - .saturating_add(Weight::from_parts(30_287, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_timestamp.rs deleted file mode 100644 index e27289a49e992246c20c9b169acb8cd92c61b5e0..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_timestamp.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_timestamp` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_timestamp -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_timestamp`. -pub struct WeightInfo(PhantomData); -impl pallet_timestamp::WeightInfo for WeightInfo { - /// Storage: `Timestamp::Now` (r:1 w:1) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Aura::CurrentSlot` (r:1 w:0) - /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn set() -> Weight { - // Proof Size summary in bytes: - // Measured: `86` - // Estimated: `1493` - // Minimum execution time: 9_313_000 picoseconds. - Weight::from_parts(9_775_000, 0) - .saturating_add(Weight::from_parts(0, 1493)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn on_finalize() -> Weight { - // Proof Size summary in bytes: - // Measured: `57` - // Estimated: `0` - // Minimum execution time: 3_322_000 picoseconds. - Weight::from_parts(3_577_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_uniques.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_uniques.rs deleted file mode 100644 index 69d3e773afb347e22e93d1537532fda65f606462..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_uniques.rs +++ /dev/null @@ -1,466 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_uniques` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_uniques -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_uniques`. -pub struct WeightInfo(PhantomData); -impl pallet_uniques::WeightInfo for WeightInfo { - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3643` - // Minimum execution time: 28_845_000 picoseconds. - Weight::from_parts(29_675_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3643` - // Minimum execution time: 13_492_000 picoseconds. - Weight::from_parts(14_049_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1001 w:1000) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1000 w:1000) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Attribute` (r:1000 w:1000) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassMetadataOf` (r:0 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:1000) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Uniques::CollectionMaxSupply` (r:0 w:1) - /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1000]`. - /// The range of component `m` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `257 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` - // Estimated: `3643 + a * (2647 ±0) + m * (2662 ±0) + n * (2597 ±0)` - // Minimum execution time: 2_920_070_000 picoseconds. - Weight::from_parts(2_983_862_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - // Standard Error: 36_415 - .saturating_add(Weight::from_parts(7_589_778, 0).saturating_mul(n.into())) - // Standard Error: 36_415 - .saturating_add(Weight::from_parts(479_496, 0).saturating_mul(m.into())) - // Standard Error: 36_415 - .saturating_add(Weight::from_parts(562_056, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(m.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2647).saturating_mul(a.into())) - .saturating_add(Weight::from_parts(0, 2662).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:0) - /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:1) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 35_329_000 picoseconds. - Weight::from_parts(36_019_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:1) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 36_474_000 picoseconds. - Weight::from_parts(37_190_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:2) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 26_786_000 picoseconds. - Weight::from_parts(27_400_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:5000 w:5000) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// The range of component `i` is `[0, 5000]`. - fn redeposit(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `738 + i * (76 ±0)` - // Estimated: `3643 + i * (2597 ±0)` - // Minimum execution time: 14_546_000 picoseconds. - Weight::from_parts(14_831_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - // Standard Error: 24_362 - .saturating_add(Weight::from_parts(17_972_938, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 2597).saturating_mul(i.into())) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 18_919_000 picoseconds. - Weight::from_parts(19_547_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 18_643_000 picoseconds. - Weight::from_parts(19_000_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn freeze_collection() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 13_530_000 picoseconds. - Weight::from_parts(14_165_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn thaw_collection() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 13_523_000 picoseconds. - Weight::from_parts(14_055_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:2) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `356` - // Estimated: `3643` - // Minimum execution time: 22_131_000 picoseconds. - Weight::from_parts(22_628_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 13_841_000 picoseconds. - Weight::from_parts(14_408_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_item_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 16_954_000 picoseconds. - Weight::from_parts(17_482_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) - fn set_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `3652` - // Minimum execution time: 38_493_000 picoseconds. - Weight::from_parts(39_513_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) - fn clear_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `756` - // Estimated: `3652` - // Minimum execution time: 37_918_000 picoseconds. - Weight::from_parts(38_666_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - fn set_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `348` - // Estimated: `3652` - // Minimum execution time: 29_810_000 picoseconds. - Weight::from_parts(30_363_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `3652` - // Minimum execution time: 30_877_000 picoseconds. - Weight::from_parts(31_430_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) - fn set_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 30_478_000 picoseconds. - Weight::from_parts(31_065_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) - fn clear_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `473` - // Estimated: `3643` - // Minimum execution time: 29_582_000 picoseconds. - Weight::from_parts(30_160_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 19_328_000 picoseconds. - Weight::from_parts(19_866_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `461` - // Estimated: `3643` - // Minimum execution time: 19_131_000 picoseconds. - Weight::from_parts(19_569_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn set_accept_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3517` - // Minimum execution time: 15_212_000 picoseconds. - Weight::from_parts(15_691_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:1) - /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn set_collection_max_supply() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 16_290_000 picoseconds. - Weight::from_parts(16_654_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Asset` (r:1 w:0) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn set_price() -> Weight { - // Proof Size summary in bytes: - // Measured: `259` - // Estimated: `3587` - // Minimum execution time: 16_095_000 picoseconds. - Weight::from_parts(16_555_000, 0) - .saturating_add(Weight::from_parts(0, 3587)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:1 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:2) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn buy_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `540` - // Estimated: `3643` - // Minimum execution time: 35_506_000 picoseconds. - Weight::from_parts(36_305_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_utility.rs deleted file mode 100644 index e6c3e1295ef2a5ab8a0770028d99647a994665b7..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_utility.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_utility` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_utility -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_utility`. -pub struct WeightInfo(PhantomData); -impl pallet_utility::WeightInfo for WeightInfo { - /// The range of component `c` is `[0, 1000]`. - fn batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_103_000 picoseconds. - Weight::from_parts(7_226_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_732 - .saturating_add(Weight::from_parts(6_560_347, 0).saturating_mul(c.into())) - } - fn as_derivative() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_208_000 picoseconds. - Weight::from_parts(5_480_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn batch_all(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_070_000 picoseconds. - Weight::from_parts(1_321_270, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_454 - .saturating_add(Weight::from_parts(6_864_640, 0).saturating_mul(c.into())) - } - fn dispatch_as() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_255_000 picoseconds. - Weight::from_parts(9_683_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn force_batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_852_000 picoseconds. - Weight::from_parts(7_007_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_745 - .saturating_add(Weight::from_parts(6_562_902, 0).saturating_mul(c.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs deleted file mode 100644 index 1e4a723e10f0bdceb6b16573981bd31bc8554054..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 25_043_000 picoseconds. - Weight::from_parts(25_670_000, 0) - .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 18_893_000 picoseconds. - Weight::from_parts(19_261_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 14_107_000 picoseconds. - Weight::from_parts(14_500_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_175_000 picoseconds. - Weight::from_parts(7_493_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_default_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_162_000 picoseconds. - Weight::from_parts(2_278_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_subscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 29_144_000 picoseconds. - Weight::from_parts(30_134_000, 0) - .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_unsubscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `363` - // Estimated: `3828` - // Minimum execution time: 31_522_000 picoseconds. - Weight::from_parts(32_679_000, 0) - .saturating_add(Weight::from_parts(0, 3828)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) - /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_suspension() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_338_000 picoseconds. - Weight::from_parts(2_494_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_supported_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `162` - // Estimated: `11052` - // Minimum execution time: 17_315_000 picoseconds. - Weight::from_parts(17_787_000, 0) - .saturating_add(Weight::from_parts(0, 11052)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notifiers() -> Weight { - // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `11056` - // Minimum execution time: 17_273_000 picoseconds. - Weight::from_parts(17_712_000, 0) - .saturating_add(Weight::from_parts(0, 11056)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn already_notified_target() -> Weight { - // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `13538` - // Minimum execution time: 18_395_000 picoseconds. - Weight::from_parts(19_095_000, 0) - .saturating_add(Weight::from_parts(0, 13538)) - .saturating_add(T::DbWeight::get().reads(5)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn notify_current_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `212` - // Estimated: `6152` - // Minimum execution time: 27_343_000 picoseconds. - Weight::from_parts(28_068_000, 0) - .saturating_add(Weight::from_parts(0, 6152)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn notify_target_migration_fail() -> Weight { - // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `8621` - // Minimum execution time: 9_156_000 picoseconds. - Weight::from_parts(9_552_000, 0) - .saturating_add(Weight::from_parts(0, 8621)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notify_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `11063` - // Minimum execution time: 17_454_000 picoseconds. - Weight::from_parts(17_831_000, 0) - .saturating_add(Weight::from_parts(0, 11063)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn migrate_and_notify_old_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `11105` - // Minimum execution time: 34_299_000 picoseconds. - Weight::from_parts(35_156_000, 0) - .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn new_query() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `1588` - // Minimum execution time: 4_508_000 picoseconds. - Weight::from_parts(4_702_000, 0) - .saturating_add(Weight::from_parts(0, 1588)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::Queries` (r:1 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn take_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `7740` - // Estimated: `11205` - // Minimum execution time: 26_557_000 picoseconds. - Weight::from_parts(26_980_000, 0) - .saturating_add(Weight::from_parts(0, 11205)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs deleted file mode 100644 index 7d49b56e461a0a3919e2130dbb6d84f6888349ad..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs +++ /dev/null @@ -1,634 +0,0 @@ -// This file is part of Cumulus. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Tests for the Statemine (Kusama Assets Hub) chain. - -use asset_hub_kusama_runtime::xcm_config::{ - AssetFeeAsExistentialDepositMultiplierFeeCharger, KsmLocation, TrustBackedAssetsPalletLocation, -}; -pub use asset_hub_kusama_runtime::{ - xcm_config::{CheckingAccount, ForeignCreatorsSovereignAccountOf, XcmConfig}, - AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, - ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, -}; -use asset_test_utils::{CollatorSessionKeys, ExtBuilder}; -use codec::{Decode, Encode}; -use cumulus_primitives_utility::ChargeWeightInFungibles; -use frame_support::{ - assert_noop, assert_ok, - traits::fungibles::InspectEnumerable, - weights::{Weight, WeightToFee as WeightToFeeT}, -}; -use parachains_common::{ - kusama::fee::WeightToFee, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, -}; -use sp_runtime::traits::MaybeEquivalence; -use xcm::latest::prelude::*; -use xcm_executor::traits::{Identity, JustTry, WeightTrader}; - -const ALICE: [u8; 32] = [1u8; 32]; -const SOME_ASSET_ADMIN: [u8; 32] = [5u8; 32]; - -type AssetIdForTrustBackedAssetsConvert = - assets_common::AssetIdForTrustBackedAssetsConvert; - -type RuntimeHelper = asset_test_utils::RuntimeHelper; - -fn collator_session_keys() -> CollatorSessionKeys { - CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - ) -} - -#[test] -fn test_asset_xcm_trader() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - let minimum_asset_balance = 3333333_u128; - let local_asset_id = 1; - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - local_asset_id.into(), - AccountId::from(ALICE).into(), - true, - minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - local_asset_id.into(), - AccountId::from(ALICE).into(), - minimum_asset_balance - )); - - // get asset id as multilocation - let asset_multilocation = - AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(); - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 4e9 weight - let bought = Weight::from_parts(4_000_000_000u64, 0); - - // Lets calculate amount needed - let asset_amount_needed = - AssetFeeAsExistentialDepositMultiplierFeeCharger::charge_weight_in_fungibles( - local_asset_id, - bought, - ) - .expect("failed to compute"); - - // Lets pay with: asset_amount_needed + asset_amount_extra - let asset_amount_extra = 100_u128; - let asset: MultiAsset = - (asset_multilocation, asset_amount_needed + asset_amount_extra).into(); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Lets buy_weight and make sure buy_weight does not return an error - let unused_assets = trader.buy_weight(bought, asset.into(), &ctx).expect("Expected Ok"); - // Check whether a correct amount of unused assets is returned - assert_ok!( - unused_assets.ensure_contains(&(asset_multilocation, asset_amount_extra).into()) - ); - - // Drop trader - drop(trader); - - // Make sure author(Alice) has received the amount - assert_eq!( - Assets::balance(local_asset_id, AccountId::from(ALICE)), - minimum_asset_balance + asset_amount_needed - ); - - // We also need to ensure the total supply increased - assert_eq!( - Assets::total_supply(local_asset_id), - minimum_asset_balance + asset_amount_needed - ); - }); -} - -#[test] -fn test_asset_xcm_trader_with_refund() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - // We set existential deposit to be identical to the one for Balances first - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - true, - ExistentialDeposit::get() - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - 1.into(), - AccountId::from(ALICE).into(), - ExistentialDeposit::get() - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 4e9 weight - let bought = Weight::from_parts(4_000_000_000u64, 0); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - // lets calculate amount needed - let amount_bought = WeightToFee::weight_to_fee(&bought); - - let asset: MultiAsset = (asset_multilocation, amount_bought).into(); - - // Make sure buy_weight does not return an error - assert_ok!(trader.buy_weight(bought, asset.clone().into(), &ctx)); - - // Make sure again buy_weight does return an error - // This assert relies on the fact, that we use `TakeFirstAssetTrader` in `WeightTrader` - // tuple chain, which cannot be called twice - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // We actually use half of the weight - let weight_used = bought / 2; - - // Make sure refurnd works. - let amount_refunded = WeightToFee::weight_to_fee(&(bought - weight_used)); - - assert_eq!( - trader.refund_weight(bought - weight_used, &ctx), - Some((asset_multilocation, amount_refunded).into()) - ); - - // Drop trader - drop(trader); - - // We only should have paid for half of the bought weight - let fees_paid = WeightToFee::weight_to_fee(&weight_used); - - assert_eq!( - Assets::balance(1, AccountId::from(ALICE)), - ExistentialDeposit::get() + fees_paid - ); - - // We also need to ensure the total supply increased - assert_eq!(Assets::total_supply(1), ExistentialDeposit::get() + fees_paid); - }); -} - -#[test] -fn test_asset_xcm_trader_refund_not_possible_since_amount_less_than_ed() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - // We set existential deposit to be identical to the one for Balances first - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - true, - ExistentialDeposit::get() - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy small amount - let bought = Weight::from_parts(500_000_000u64, 0); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - let amount_bought = WeightToFee::weight_to_fee(&bought); - - assert!( - amount_bought < ExistentialDeposit::get(), - "we are testing what happens when the amount does not exceed ED" - ); - - let asset: MultiAsset = (asset_multilocation, amount_bought).into(); - - // Buy weight should return an error - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // not credited since the ED is higher than this value - assert_eq!(Assets::balance(1, AccountId::from(ALICE)), 0); - - // We also need to ensure the total supply did not increase - assert_eq!(Assets::total_supply(1), 0); - }); -} - -#[test] -fn test_that_buying_ed_refund_does_not_refund() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - // We set existential deposit to be identical to the one for Balances first - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - true, - ExistentialDeposit::get() - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are gonna buy ED - let bought = Weight::from_parts(ExistentialDeposit::get().try_into().unwrap(), 0); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - let amount_bought = WeightToFee::weight_to_fee(&bought); - - assert!( - amount_bought < ExistentialDeposit::get(), - "we are testing what happens when the amount does not exceed ED" - ); - - // We know we will have to buy at least ED, so lets make sure first it will - // fail with a payment of less than ED - let asset: MultiAsset = (asset_multilocation, amount_bought).into(); - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // Now lets buy ED at least - let asset: MultiAsset = (asset_multilocation, ExistentialDeposit::get()).into(); - - // Buy weight should work - assert_ok!(trader.buy_weight(bought, asset.into(), &ctx)); - - // Should return None. We have a specific check making sure we dont go below ED for - // drop payment - assert_eq!(trader.refund_weight(bought, &ctx), None); - - // Drop trader - drop(trader); - - // Make sure author(Alice) has received the amount - assert_eq!(Assets::balance(1, AccountId::from(ALICE)), ExistentialDeposit::get()); - - // We also need to ensure the total supply increased - assert_eq!(Assets::total_supply(1), ExistentialDeposit::get()); - }); -} - -#[test] -fn test_asset_xcm_trader_not_possible_for_non_sufficient_assets() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // Create a non-sufficient asset with specific existential deposit - let minimum_asset_balance = 1_000_000_u128; - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - false, - minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - 1.into(), - AccountId::from(ALICE).into(), - minimum_asset_balance - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 4e9 weight - let bought = Weight::from_parts(4_000_000_000u64, 0); - - // lets calculate amount needed - let asset_amount_needed = WeightToFee::weight_to_fee(&bought); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - let asset: MultiAsset = (asset_multilocation, asset_amount_needed).into(); - - // Make sure again buy_weight does return an error - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // Drop trader - drop(trader); - - // Make sure author(Alice) has NOT received the amount - assert_eq!(Assets::balance(1, AccountId::from(ALICE)), minimum_asset_balance); - - // We also need to ensure the total supply NOT increased - assert_eq!(Assets::total_supply(1), minimum_asset_balance); - }); -} - -#[test] -fn test_assets_balances_api_works() { - use assets_common::runtime_api::runtime_decl_for_fungibles_api::FungiblesApi; - - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - let local_asset_id = 1; - let foreign_asset_id_multilocation = - MultiLocation { parents: 1, interior: X2(Parachain(1234), GeneralIndex(12345)) }; - - // check before - assert_eq!(Assets::balance(local_asset_id, AccountId::from(ALICE)), 0); - assert_eq!( - ForeignAssets::balance(foreign_asset_id_multilocation, AccountId::from(ALICE)), - 0 - ); - assert_eq!(Balances::free_balance(AccountId::from(ALICE)), 0); - assert!(Runtime::query_account_balances(AccountId::from(ALICE)) - .unwrap() - .try_as::() - .unwrap() - .is_none()); - - // Drip some balance - use frame_support::traits::fungible::Mutate; - let some_currency = ExistentialDeposit::get(); - Balances::mint_into(&AccountId::from(ALICE), some_currency).unwrap(); - - // We need root origin to create a sufficient asset - let minimum_asset_balance = 3333333_u128; - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - local_asset_id.into(), - AccountId::from(ALICE).into(), - true, - minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - local_asset_id.into(), - AccountId::from(ALICE).into(), - minimum_asset_balance - )); - - // create foreign asset - let foreign_asset_minimum_asset_balance = 3333333_u128; - assert_ok!(ForeignAssets::force_create( - RuntimeHelper::root_origin(), - foreign_asset_id_multilocation, - AccountId::from(SOME_ASSET_ADMIN).into(), - false, - foreign_asset_minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(ForeignAssets::mint( - RuntimeHelper::origin_of(AccountId::from(SOME_ASSET_ADMIN)), - foreign_asset_id_multilocation, - AccountId::from(ALICE).into(), - 6 * foreign_asset_minimum_asset_balance - )); - - // check after - assert_eq!( - Assets::balance(local_asset_id, AccountId::from(ALICE)), - minimum_asset_balance - ); - assert_eq!( - ForeignAssets::balance(foreign_asset_id_multilocation, AccountId::from(ALICE)), - 6 * minimum_asset_balance - ); - assert_eq!(Balances::free_balance(AccountId::from(ALICE)), some_currency); - - let result: MultiAssets = Runtime::query_account_balances(AccountId::from(ALICE)) - .unwrap() - .try_into() - .unwrap(); - assert_eq!(result.len(), 3); - - // check currency - assert!(result.inner().iter().any(|asset| asset.eq( - &assets_common::fungible_conversion::convert_balance::( - some_currency - ) - .unwrap() - ))); - // check trusted asset - assert!(result.inner().iter().any(|asset| asset.eq(&( - AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(), - minimum_asset_balance - ) - .into()))); - // check foreign asset - assert!(result.inner().iter().any(|asset| asset.eq(&( - Identity::convert_back(&foreign_asset_id_multilocation).unwrap(), - 6 * foreign_asset_minimum_asset_balance - ) - .into()))); - }); -} - -asset_test_utils::include_teleports_for_native_asset_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - 1000 -); - -asset_test_utils::include_teleports_for_foreign_assets_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - ForeignCreatorsSovereignAccountOf, - ForeignAssetsInstance, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }) -); - -asset_test_utils::include_asset_transactor_transfer_with_local_consensus_currency_works!( - Runtime, - XcmConfig, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }) -); - -asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_works!( - asset_transactor_transfer_with_trust_backed_assets_works, - Runtime, - XcmConfig, - TrustBackedAssetsInstance, - AssetIdForTrustBackedAssets, - AssetIdForTrustBackedAssetsConvert, - collator_session_keys(), - ExistentialDeposit::get(), - 12345, - Box::new(|| { - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }) -); - -asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_works!( - asset_transactor_transfer_with_foreign_assets_works, - Runtime, - XcmConfig, - ForeignAssetsInstance, - MultiLocation, - JustTry, - collator_session_keys(), - ExistentialDeposit::get(), - MultiLocation { parents: 1, interior: X2(Parachain(1313), GeneralIndex(12345)) }, - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - }) -); - -asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_parachain_assets_works!( - Runtime, - XcmConfig, - WeightToFee, - ForeignCreatorsSovereignAccountOf, - ForeignAssetsInstance, - MultiLocation, - JustTry, - collator_session_keys(), - ExistentialDeposit::get(), - AssetDeposit::get(), - MetadataDepositBase::get(), - MetadataDepositPerByte::get(), - Box::new(|pallet_asset_call| RuntimeCall::ForeignAssets(pallet_asset_call).encode()), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::ForeignAssets(pallet_asset_event)) => Some(pallet_asset_event), - _ => None, - } - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert_eq!(ForeignAssets::asset_ids().collect::>().len(), 1); - }) -); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml deleted file mode 100644 index b5eff6b63afbae705927855c867f9cf325e488db..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml +++ /dev/null @@ -1,215 +0,0 @@ -[package] -name = "asset-hub-polkadot-runtime" -version = "0.9.420" -authors.workspace = true -edition.workspace = true -description = "Asset Hub Polkadot parachain runtime" -license = "Apache-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } -hex-literal = { version = "0.4.1", optional = true } -log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -smallvec = "1.11.0" - -# Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-asset-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false } -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false} -pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false} -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} -sp-weights = { path = "../../../../../substrate/primitives/weights", default-features = false} - -# Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} - -# Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -assets-common = { path = "../common", default-features = false } - -[dev-dependencies] -hex-literal = "0.4.1" -asset-test-utils = { path = "../test-utils" } - -[build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } - -[features] -default = [ "std" ] -runtime-benchmarks = [ - "assets-common/runtime-benchmarks", - "cumulus-pallet-dmp-queue/runtime-benchmarks", - "cumulus-pallet-parachain-system/runtime-benchmarks", - "cumulus-pallet-session-benchmarking/runtime-benchmarks", - "cumulus-pallet-xcmp-queue/runtime-benchmarks", - "cumulus-primitives-core/runtime-benchmarks", - "cumulus-primitives-utility/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "hex-literal", - "pallet-asset-tx-payment/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-collator-selection/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", - "pallet-multisig/runtime-benchmarks", - "pallet-nfts/runtime-benchmarks", - "pallet-proxy/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-uniques/runtime-benchmarks", - "pallet-utility/runtime-benchmarks", - "pallet-xcm-benchmarks/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-common/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] -try-runtime = [ - "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", - "cumulus-pallet-parachain-system/try-runtime", - "cumulus-pallet-xcm/try-runtime", - "cumulus-pallet-xcmp-queue/try-runtime", - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-asset-tx-payment/try-runtime", - "pallet-assets/try-runtime", - "pallet-aura/try-runtime", - "pallet-authorship/try-runtime", - "pallet-balances/try-runtime", - "pallet-collator-selection/try-runtime", - "pallet-message-queue/try-runtime", - "pallet-multisig/try-runtime", - "pallet-nfts/try-runtime", - "pallet-proxy/try-runtime", - "pallet-session/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", - "pallet-uniques/try-runtime", - "pallet-utility/try-runtime", - "pallet-xcm/try-runtime", - "parachain-info/try-runtime", - "polkadot-runtime-common/try-runtime", - "sp-runtime/try-runtime", -] -std = [ - "assets-common/std", - "codec/std", - "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", - "cumulus-pallet-parachain-system/std", - "cumulus-pallet-session-benchmarking/std", - "cumulus-pallet-xcm/std", - "cumulus-pallet-xcmp-queue/std", - "cumulus-primitives-core/std", - "cumulus-primitives-utility/std", - "frame-benchmarking?/std", - "frame-executive/std", - "frame-support/std", - "frame-system-benchmarking?/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime?/std", - "log/std", - "pallet-asset-tx-payment/std", - "pallet-assets/std", - "pallet-aura/std", - "pallet-authorship/std", - "pallet-balances/std", - "pallet-collator-selection/std", - "pallet-message-queue/std", - "pallet-multisig/std", - "pallet-nfts-runtime-api/std", - "pallet-nfts/std", - "pallet-proxy/std", - "pallet-session/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "pallet-uniques/std", - "pallet-utility/std", - "pallet-xcm-benchmarks?/std", - "pallet-xcm/std", - "parachain-info/std", - "parachains-common/std", - "polkadot-core-primitives/std", - "polkadot-parachain-primitives/std", - "polkadot-runtime-common/std", - "scale-info/std", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-storage/std", - "sp-transaction-pool/std", - "sp-version/std", - "sp-weights/std", - "substrate-wasm-builder", - "xcm-builder/std", - "xcm-executor/std", - "xcm/std", -] - -experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs deleted file mode 100644 index 57a1150bc88c0bb523be5ef21b3a88bc511eab44..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ /dev/null @@ -1,1349 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Asset Hub Polkadot Runtime -//! -//! Asset Hub Polkadot is a parachain that provides an interface to create, manage, and use assets. -//! Assets may be fungible or non-fungible. -//! -//! ## Renaming -//! -//! This chain was originally known as "Statemint". You may see references to Statemint, Statemine, -//! and Westmint throughout the codebase. These are synonymous with "Asset Hub Polkadot, Kusama, and -//! Westend", respectively. -//! -//! ## Assets -//! -//! - Fungibles: Configuration of `pallet-assets`. -//! - Non-Fungibles (NFTs): Configuration of `pallet-uniques`. -//! -//! ## Other Functionality -//! -//! ### Native Balances -//! -//! Asset Hub Polkadot uses its parent DOT token as its native asset. -//! -//! ### Governance -//! -//! As a system parachain, Asset Hub defers its governance (namely, its `Root` origin), to its -//! Relay Chain parent, Polkadot. -//! -//! ### Collator Selection -//! -//! Asset Hub uses `pallet-collator-selection`, a simple first-come-first-served registration -//! system where collators can reserve a small bond to join the block producer set. There is no -//! slashing. -//! -//! ### XCM -//! -//! Because Asset Hub is fully under the control of the Relay Chain, it is meant to be a -//! `TrustedTeleporter`. It can also serve as a reserve location to other parachains for DOT as well -//! as other local assets. - -#![cfg_attr(not(feature = "std"), no_std)] -#![recursion_limit = "256"] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -mod weights; -pub mod xcm_config; - -use assets_common::{ - foreign_creators::ForeignCreators, matching::FromSiblingParachain, MultiLocationForAssetId, -}; -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; -use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, ConvertInto, Verify}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, Perbill, -}; - -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::{ - construct_runtime, - dispatch::DispatchClass, - genesis_builder_helper::{build_config, create_default_config}, - parameter_types, - traits::{ - AsEnsureOriginWithArg, ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, - InstanceFilter, TransformOrigin, - }, - weights::{ConstantMultiplier, Weight}, - PalletId, -}; -use frame_system::{ - limits::{BlockLength, BlockWeights}, - EnsureRoot, EnsureSigned, -}; -use pallet_nfts::PalletFeatures; -pub use parachains_common as common; -use parachains_common::{ - impls::{AssetsToBlockAuthor, DealWithFees}, - message_queue::*, - polkadot::{consensus::*, currency::*, fee::WeightToFee}, - AccountId, AssetHubPolkadotAuraId as AuraId, AssetIdForTrustBackedAssets, Balance, BlockNumber, - Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, - NORMAL_DISPATCH_RATIO, SLOT_DURATION, -}; -use sp_runtime::RuntimeDebug; -use xcm_config::{ - DotLocation, FellowshipLocation, ForeignAssetsConvertedConcreteId, GovernanceLocation, - TrustBackedAssetsConvertedConcreteId, XcmOriginToTransactDispatchOrigin, -}; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; - -// Polkadot imports -use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use xcm::latest::BodyId; - -use crate::xcm_config::ForeignCreatorsSovereignAccountOf; -use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - } -} - -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - // Note: "statemint" is the legacy name for this chain. It has been renamed to - // "asset-hub-polkadot". Many wallets/tools depend on the `spec_name`, so it remains "statemint" - // for the time being. Wallets/tools should update to treat "asset-hub-polkadot" equally. - spec_name: create_runtime_str!("statemint"), - impl_name: create_runtime_str!("statemint"), - authoring_version: 1, - spec_version: 10000, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 13, - state_version: 0, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } -} - -parameter_types! { - pub const Version: RuntimeVersion = VERSION; - pub RuntimeBlockLength: BlockLength = - BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); - pub const SS58Prefix: u8 = 0; -} - -// Configure FRAME pallets to include in runtime. -impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = RuntimeBlockWeights; - type BlockLength = RuntimeBlockLength; - type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; - type Nonce = Nonce; - type Hash = Hash; - type Hashing = BlakeTwo256; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; - type BlockHashCount = BlockHashCount; - type DbWeight = RocksDbWeight; - type Version = Version; - type PalletInfo = PalletInfo; - type OnNewAccount = (); - type OnKilledAccount = (); - type AccountData = pallet_balances::AccountData; - type SystemWeightInfo = weights::frame_system::WeightInfo; - type SS58Prefix = SS58Prefix; - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; - type WeightInfo = weights::pallet_timestamp::WeightInfo; -} - -impl pallet_authorship::Config for Runtime { - type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = (CollatorSelection,); -} - -parameter_types! { - pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; -} - -impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<50>; - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = weights::pallet_balances::WeightInfo; - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; -} - -parameter_types! { - /// Relay Chain `TransactionByteFee` / 10 - pub const TransactionByteFee: Balance = MILLICENTS; -} - -impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = - pallet_transaction_payment::CurrencyAdapter>; - type WeightToFee = WeightToFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; - type OperationalFeeMultiplier = ConstU8<5>; -} - -parameter_types! { - pub const AssetDeposit: Balance = 10 * UNITS; // 10 UNITS deposit to create fungible asset class - pub const AssetAccountDeposit: Balance = deposit(1, 16); - pub const ApprovalDeposit: Balance = EXISTENTIAL_DEPOSIT; - pub const AssetsStringLimit: u32 = 50; - /// Key = 32 bytes, Value = 36 bytes (32+1+1+1+1) - // https://github.com/paritytech/substrate/blob/069917b/frame/assets/src/lib.rs#L257L271 - pub const MetadataDepositBase: Balance = deposit(1, 68); - pub const MetadataDepositPerByte: Balance = deposit(0, 1); -} - -/// We allow root to execute privileged asset operations. -pub type AssetsForceOrigin = EnsureRoot; - -// Called "Trust Backed" assets because these are generally registered by some account, and users of -// the asset assume it has some claimed backing. The pallet is called `Assets` in -// `construct_runtime` to avoid breaking changes on storage reads. -pub type TrustBackedAssetsInstance = pallet_assets::Instance1; -type TrustBackedAssetsCall = pallet_assets::Call; -impl pallet_assets::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type AssetId = AssetIdForTrustBackedAssets; - type AssetIdParameter = codec::Compact; - type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; - type ForceOrigin = AssetsForceOrigin; - type AssetDeposit = AssetDeposit; - type MetadataDepositBase = MetadataDepositBase; - type MetadataDepositPerByte = MetadataDepositPerByte; - type ApprovalDeposit = ApprovalDeposit; - type StringLimit = AssetsStringLimit; - type Freezer = (); - type Extra = (); - type WeightInfo = weights::pallet_assets_local::WeightInfo; - type CallbackHandle = (); - type AssetAccountDeposit = AssetAccountDeposit; - type RemoveItemsLimit = frame_support::traits::ConstU32<1000>; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = (); -} - -parameter_types! { - // we just reuse the same deposits - pub const ForeignAssetsAssetDeposit: Balance = AssetDeposit::get(); - pub const ForeignAssetsAssetAccountDeposit: Balance = AssetAccountDeposit::get(); - pub const ForeignAssetsApprovalDeposit: Balance = ApprovalDeposit::get(); - pub const ForeignAssetsAssetsStringLimit: u32 = AssetsStringLimit::get(); - pub const ForeignAssetsMetadataDepositBase: Balance = MetadataDepositBase::get(); - pub const ForeignAssetsMetadataDepositPerByte: Balance = MetadataDepositPerByte::get(); -} - -/// Assets managed by some foreign location. Note: we do not declare a `ForeignAssetsCall` type, as -/// this type is used in proxy definitions. We assume that a foreign location would not want to set -/// an individual, local account as a proxy for the issuance of their assets. This issuance should -/// be managed by the foreign location's governance. -pub type ForeignAssetsInstance = pallet_assets::Instance2; -impl pallet_assets::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type AssetId = MultiLocationForAssetId; - type AssetIdParameter = MultiLocationForAssetId; - type Currency = Balances; - type CreateOrigin = ForeignCreators< - (FromSiblingParachain>,), - ForeignCreatorsSovereignAccountOf, - AccountId, - >; - type ForceOrigin = AssetsForceOrigin; - type AssetDeposit = ForeignAssetsAssetDeposit; - type MetadataDepositBase = ForeignAssetsMetadataDepositBase; - type MetadataDepositPerByte = ForeignAssetsMetadataDepositPerByte; - type ApprovalDeposit = ForeignAssetsApprovalDeposit; - type StringLimit = ForeignAssetsAssetsStringLimit; - type Freezer = (); - type Extra = (); - type WeightInfo = weights::pallet_assets_foreign::WeightInfo; - type CallbackHandle = (); - type AssetAccountDeposit = ForeignAssetsAssetAccountDeposit; - type RemoveItemsLimit = frame_support::traits::ConstU32<1000>; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = xcm_config::XcmBenchmarkHelper; -} - -parameter_types! { - // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. - pub const DepositBase: Balance = deposit(1, 88); - // Additional storage item size of 32 bytes. - pub const DepositFactor: Balance = deposit(0, 32); - pub const MaxSignatories: u32 = 100; -} - -impl pallet_multisig::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type Currency = Balances; - type DepositBase = DepositBase; - type DepositFactor = DepositFactor; - type MaxSignatories = MaxSignatories; - type WeightInfo = weights::pallet_multisig::WeightInfo; -} - -impl pallet_utility::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type PalletsOrigin = OriginCaller; - type WeightInfo = weights::pallet_utility::WeightInfo; -} - -parameter_types! { - // One storage item; key size 32, value size 8; . - pub const ProxyDepositBase: Balance = deposit(1, 40); - // Additional storage item size of 33 bytes. - pub const ProxyDepositFactor: Balance = deposit(0, 33); - pub const MaxProxies: u16 = 32; - // One storage item; key size 32, value size 16 - pub const AnnouncementDepositBase: Balance = deposit(1, 48); - pub const AnnouncementDepositFactor: Balance = deposit(0, 66); - pub const MaxPending: u16 = 32; -} - -/// The type used to represent the kinds of proxying allowed. -#[derive( - Copy, - Clone, - Eq, - PartialEq, - Ord, - PartialOrd, - Encode, - Decode, - RuntimeDebug, - MaxEncodedLen, - scale_info::TypeInfo, -)] -pub enum ProxyType { - /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. - Any, - /// Can execute any call that does not transfer funds or assets. - NonTransfer, - /// Proxy with the ability to reject time-delay proxy announcements. - CancelProxy, - /// Assets proxy. Can execute any call from `assets`, **including asset transfers**. - Assets, - /// Owner proxy. Can execute calls related to asset ownership. - AssetOwner, - /// Asset manager. Can execute calls related to asset management. - AssetManager, - /// Collator selection proxy. Can execute calls related to collator selection mechanism. - Collator, -} -impl Default for ProxyType { - fn default() -> Self { - Self::Any - } -} - -impl InstanceFilter for ProxyType { - fn filter(&self, c: &RuntimeCall) -> bool { - match self { - ProxyType::Any => true, - ProxyType::NonTransfer => !matches!( - c, - RuntimeCall::Balances { .. } | - RuntimeCall::Assets { .. } | - RuntimeCall::Nfts { .. } | - RuntimeCall::Uniques { .. } - ), - ProxyType::CancelProxy => matches!( - c, - RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::Assets => { - matches!( - c, - RuntimeCall::Assets { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } | - RuntimeCall::Nfts { .. } | RuntimeCall::Uniques { .. } - ) - }, - ProxyType::AssetOwner => matches!( - c, - RuntimeCall::Assets(TrustBackedAssetsCall::create { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::start_destroy { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::destroy_accounts { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::destroy_approvals { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::finish_destroy { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::transfer_ownership { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_team { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_metadata { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::clear_metadata { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_min_balance { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::create { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::destroy { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::redeposit { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::transfer_ownership { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_team { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_collection_max_supply { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_collection { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::create { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::destroy { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::transfer_ownership { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_team { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_attribute { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_attribute { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_collection_max_supply { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::AssetManager => matches!( - c, - RuntimeCall::Assets(TrustBackedAssetsCall::mint { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::burn { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::freeze { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::block { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::thaw { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::freeze_asset { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::thaw_asset { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::touch_other { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::refund_other { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::force_mint { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::update_mint_settings { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::mint_pre_signed { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_attributes_pre_signed { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_item_transfer { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::unlock_item_transfer { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_item_properties { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::clear_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_collection_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::clear_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::mint { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::burn { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::freeze { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::thaw { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::freeze_collection { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::thaw_collection { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::Collator => matches!( - c, - RuntimeCall::CollatorSelection { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - } - } - - fn is_superset(&self, o: &Self) -> bool { - match (self, o) { - (x, y) if x == y => true, - (ProxyType::Any, _) => true, - (_, ProxyType::Any) => false, - (ProxyType::Assets, ProxyType::AssetOwner) => true, - (ProxyType::Assets, ProxyType::AssetManager) => true, - (ProxyType::NonTransfer, ProxyType::Collator) => true, - _ => false, - } - } -} - -impl pallet_proxy::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type Currency = Balances; - type ProxyType = ProxyType; - type ProxyDepositBase = ProxyDepositBase; - type ProxyDepositFactor = ProxyDepositFactor; - type MaxProxies = MaxProxies; - type WeightInfo = weights::pallet_proxy::WeightInfo; - type MaxPending = MaxPending; - type CallHasher = BlakeTwo256; - type AnnouncementDepositBase = AnnouncementDepositBase; - type AnnouncementDepositFactor = AnnouncementDepositFactor; -} - -parameter_types! { - pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); -} - -impl cumulus_pallet_parachain_system::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = parachain_info::Pallet; - type DmpQueue = frame_support::traits::EnqueueWithOrigin; - type ReservedDmpWeight = ReservedDmpWeight; - type OutboundXcmpMessageSource = XcmpQueue; - type XcmpMessageHandler = XcmpQueue; - type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; -} - -parameter_types! { - pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; -} - -impl pallet_message_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_message_queue::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = xcm_builder::ProcessXcmMessage< - AggregateMessageOrigin, - xcm_executor::XcmExecutor, - RuntimeCall, - >; - type Size = u32; - // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: - type QueueChangeHandler = NarrowOriginToSibling; - type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; - type MaxStale = sp_core::ConstU32<8>; - type ServiceWeight = MessageQueueServiceWeight; -} - -impl parachain_info::Config for Runtime {} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -parameter_types! { - // Fellows pluralistic body. - pub const FellowsBodyId: BodyId = BodyId::Technical; -} - -impl cumulus_pallet_xcmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type ChannelInfo = ParachainSystem; - type VersionWrapper = PolkadotXcm; - // Enqueue XCMP messages from siblings for later processing. - type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; - type ControllerOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, - >; - type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; - type PriceForSiblingDelivery = NoPriceForMessageDelivery; -} - -parameter_types! { - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - -parameter_types! { - pub const Period: u32 = 6 * HOURS; - pub const Offset: u32 = 0; -} - -impl pallet_session::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ValidatorId = ::AccountId; - // we don't have stash and controller, thus we don't need the convert as well. - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ShouldEndSession = pallet_session::PeriodicSessions; - type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = CollatorSelection; - // Essentially just Aura, but let's be pedantic. - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - type WeightInfo = weights::pallet_session::WeightInfo; -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - #[cfg(feature = "experimental")] - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; -} - -parameter_types! { - pub const PotId: PalletId = PalletId(*b"PotStake"); - pub const SessionLength: BlockNumber = 6 * HOURS; - // `StakingAdmin` pluralistic body. - pub const StakingAdminBodyId: BodyId = BodyId::Defense; -} - -/// We allow root and the `StakingAdmin` to execute privileged collator selection operations. -pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, ->; - -impl pallet_collator_selection::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type UpdateOrigin = CollatorSelectionUpdateOrigin; - type PotId = PotId; - type MaxCandidates = ConstU32<100>; - type MinEligibleCollators = ConstU32<4>; - type MaxInvulnerables = ConstU32<20>; - // should be a multiple of session or things will get inconsistent - type KickThreshold = Period; - type ValidatorId = ::AccountId; - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ValidatorRegistration = Session; - type WeightInfo = weights::pallet_collator_selection::WeightInfo; -} - -impl pallet_asset_tx_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Fungibles = Assets; - type OnChargeAssetTransaction = pallet_asset_tx_payment::FungiblesAdapter< - pallet_assets::BalanceToAssetBalance< - Balances, - Runtime, - ConvertInto, - TrustBackedAssetsInstance, - >, - AssetsToBlockAuthor, - >; -} - -parameter_types! { - pub const UniquesCollectionDeposit: Balance = 10 * UNITS; // 10 UNIT deposit to create uniques class - pub const UniquesItemDeposit: Balance = UNITS / 100; // 1 / 100 UNIT deposit to create uniques instance - pub const UniquesMetadataDepositBase: Balance = deposit(1, 129); - pub const UniquesAttributeDepositBase: Balance = deposit(1, 0); - pub const UniquesDepositPerByte: Balance = deposit(0, 1); -} - -impl pallet_uniques::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type CollectionId = u32; - type ItemId = u32; - type Currency = Balances; - type ForceOrigin = AssetsForceOrigin; - type CollectionDeposit = UniquesCollectionDeposit; - type ItemDeposit = UniquesItemDeposit; - type MetadataDepositBase = UniquesMetadataDepositBase; - type AttributeDepositBase = UniquesAttributeDepositBase; - type DepositPerByte = UniquesDepositPerByte; - type StringLimit = ConstU32<128>; - type KeyLimit = ConstU32<32>; // Max 32 bytes per key - type ValueLimit = ConstU32<64>; // Max 64 bytes per value - type WeightInfo = weights::pallet_uniques::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type Helper = (); - type CreateOrigin = AsEnsureOriginWithArg>; - type Locker = (); -} - -parameter_types! { - pub NftsPalletFeatures: PalletFeatures = PalletFeatures::all_enabled(); - pub const NftsMaxDeadlineDuration: BlockNumber = 12 * 30 * DAYS; - // re-use the Uniques deposits - pub const NftsCollectionDeposit: Balance = UniquesCollectionDeposit::get(); - pub const NftsItemDeposit: Balance = UniquesItemDeposit::get(); - pub const NftsMetadataDepositBase: Balance = UniquesMetadataDepositBase::get(); - pub const NftsAttributeDepositBase: Balance = UniquesAttributeDepositBase::get(); - pub const NftsDepositPerByte: Balance = UniquesDepositPerByte::get(); -} - -impl pallet_nfts::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type CollectionId = u32; - type ItemId = u32; - type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; - type ForceOrigin = AssetsForceOrigin; - type Locker = (); - type CollectionDeposit = NftsCollectionDeposit; - type ItemDeposit = NftsItemDeposit; - type MetadataDepositBase = NftsMetadataDepositBase; - type AttributeDepositBase = NftsAttributeDepositBase; - type DepositPerByte = NftsDepositPerByte; - type StringLimit = ConstU32<256>; - type KeyLimit = ConstU32<64>; - type ValueLimit = ConstU32<256>; - type ApprovalsLimit = ConstU32<20>; - type ItemAttributesApprovalsLimit = ConstU32<30>; - type MaxTips = ConstU32<10>; - type MaxDeadlineDuration = NftsMaxDeadlineDuration; - type MaxAttributesPerCall = ConstU32<10>; - type Features = NftsPalletFeatures; - type OffchainSignature = Signature; - type OffchainPublic = ::Signer; - type WeightInfo = weights::pallet_nfts::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type Helper = (); -} - -// Create the runtime by composing the FRAME pallets that were previously configured. -construct_runtime!( - pub enum Runtime - { - // System support stuff. - System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, - ParachainSystem: cumulus_pallet_parachain_system::{ - Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, - } = 1, - // RandomnessCollectiveFlip = 2 removed - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 3, - ParachainInfo: parachain_info::{Pallet, Storage, Config} = 4, - - // Monetary stuff. - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, - AssetTxPayment: pallet_asset_tx_payment::{Pallet, Event} = 12, - - // Collator support. the order of these 5 are important and shall not change. - Authorship: pallet_authorship::{Pallet, Storage} = 20, - CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, - Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, - Aura: pallet_aura::{Pallet, Storage, Config} = 23, - AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, - - // XCM helpers. - XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, - CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, - MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, - - // Handy utilities. - Utility: pallet_utility::{Pallet, Call, Event} = 40, - Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, - Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, - - // The main stage. - Assets: pallet_assets::::{Pallet, Call, Storage, Event} = 50, - Uniques: pallet_uniques::{Pallet, Call, Storage, Event} = 51, - Nfts: pallet_nfts::{Pallet, Call, Storage, Event} = 52, - ForeignAssets: pallet_assets::::{Pallet, Call, Storage, Event} = 53, - } -); - -/// The address format for describing accounts. -pub type Address = sp_runtime::MultiAddress; -/// Block type as expected by this runtime. -pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_asset_tx_payment::ChargeAssetTxPayment, -); -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; -/// Migrations to apply on runtime upgrade. -pub type Migrations = ( - // unreleased - pallet_collator_selection::migration::v1::MigrateToV1, -); - -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, - Migrations, ->; - -#[cfg(feature = "runtime-benchmarks")] -mod benches { - frame_benchmarking::define_benchmarks!( - [frame_system, SystemBench::] - [pallet_assets, Local] - [pallet_assets, Foreign] - [pallet_balances, Balances] - [pallet_message_queue, MessageQueue] - [pallet_multisig, Multisig] - [pallet_nfts, Nfts] - [pallet_proxy, Proxy] - [pallet_session, SessionBench::] - [pallet_uniques, Uniques] - [pallet_utility, Utility] - [pallet_timestamp, Timestamp] - [pallet_collator_selection, CollatorSelection] - [cumulus_pallet_parachain_system, ParachainSystem] - [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] - // XCM - [pallet_xcm, PolkadotXcm] - // NOTE: Make sure you point to the individual modules below. - [pallet_xcm_benchmarks::fungible, XcmBalances] - [pallet_xcm_benchmarks::generic, XcmGeneric] - ); -} - -impl_runtime_apis! { - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Aura::authorities().into_inner() - } - } - - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> sp_std::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Nonce { - System::account_nonce(account) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { - fn query_info( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi - for Runtime - { - fn query_call_info( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::RuntimeDispatchInfo { - TransactionPayment::query_call_info(call, len) - } - fn query_call_fee_details( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_call_fee_details(call, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl assets_common::runtime_api::FungiblesApi< - Block, - AccountId, - > for Runtime - { - fn query_account_balances(account: AccountId) -> Result { - use assets_common::fungible_conversion::{convert, convert_balance}; - Ok([ - // collect pallet_balance - { - let balance = Balances::free_balance(account.clone()); - if balance > 0 { - vec![convert_balance::(balance)?] - } else { - vec![] - } - }, - // collect pallet_assets (TrustBackedAssets) - convert::<_, _, _, _, TrustBackedAssetsConvertedConcreteId>( - Assets::account_balances(account.clone()) - .iter() - .filter(|(_, balance)| balance > &0) - )?, - // collect pallet_assets (ForeignAssets) - convert::<_, _, _, _, ForeignAssetsConvertedConcreteId>( - ForeignAssets::account_balances(account) - .iter() - .filter(|(_, balance)| balance > &0) - )?, - // collect ... e.g. other tokens - ].concat().into()) - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - #[cfg(feature = "try-runtime")] - impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { - let weight = Executive::try_runtime_upgrade(checks).unwrap(); - (weight, RuntimeBlockWeights::get().max_block) - } - - fn execute_block( - block: Block, - state_root_check: bool, - signature_check: bool, - select: frame_try_runtime::TryStateSelect, - ) -> Weight { - // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to - // have a backtrace here. - Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn benchmark_metadata(extra: bool) -> ( - Vec, - Vec, - ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; - use frame_system_benchmarking::Pallet as SystemBench; - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - - // This is defined once again in dispatch_benchmark, because list_benchmarks! - // and add_benchmarks! are macros exported by define_benchmarks! macros and those types - // are referenced in that call. - type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; - type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - - // Benchmark files generated for `Assets/ForeignAssets` instances are by default - // `pallet_assets_assets.rs / pallet_assets_foreign_assets`, which is not really nice, - // so with this redefinition we can change names to nicer: - // `pallet_assets_local.rs / pallet_assets_foreign.rs`. - type Local = pallet_assets::Pallet::; - type Foreign = pallet_assets::Pallet::; - - let mut list = Vec::::new(); - list_benchmarks!(list, extra); - - let storage_info = AllPalletsWithSystem::storage_info(); - (list, storage_info) - } - - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; - use sp_storage::TrackedStorageKey; - - use frame_system_benchmarking::Pallet as SystemBench; - impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { - ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); - Ok(()) - } - - fn verify_set_code() { - System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); - } - } - - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - impl cumulus_pallet_session_benchmarking::Config for Runtime {} - - use xcm::latest::prelude::*; - use xcm_config::{DotLocation, MaxAssetsIntoHolding}; - use pallet_xcm_benchmarks::asset_instance_from; - - parameter_types! { - pub ExistentialDepositMultiAsset: Option = Some(( - xcm_config::DotLocation::get(), - ExistentialDeposit::get() - ).into()); - } - - impl pallet_xcm_benchmarks::Config for Runtime { - type XcmConfig = xcm_config::XcmConfig; - type AccountIdConverter = xcm_config::LocationToAccountId; - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositMultiAsset, - xcm_config::PriceForParentDelivery, - >; - fn valid_destination() -> Result { - Ok(DotLocation::get()) - } - fn worst_case_holding(depositable_count: u32) -> MultiAssets { - // A mix of fungible, non-fungible, and concrete assets. - let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; - let holding_fungibles = holding_non_fungibles - 1; - let fungibles_amount: u128 = 100; - let mut assets = (0..holding_fungibles) - .map(|i| { - MultiAsset { - id: Concrete(GeneralIndex(i as u128).into()), - fun: Fungible(fungibles_amount * i as u128), - } - }) - .chain(core::iter::once(MultiAsset { id: Concrete(Here.into()), fun: Fungible(u128::MAX) })) - .chain((0..holding_non_fungibles).map(|i| MultiAsset { - id: Concrete(GeneralIndex(i as u128).into()), - fun: NonFungible(asset_instance_from(i)), - })) - .collect::>(); - - assets.push(MultiAsset { - id: Concrete(DotLocation::get()), - fun: Fungible(1_000_000 * UNITS), - }); - assets.into() - } - } - - parameter_types! { - pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - DotLocation::get(), - MultiAsset { fun: Fungible(UNITS), id: Concrete(DotLocation::get()) }, - )); - pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; - pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; - } - - impl pallet_xcm_benchmarks::fungible::Config for Runtime { - type TransactAsset = Balances; - - type CheckedAccount = CheckedAccount; - type TrustedTeleporter = TrustedTeleporter; - type TrustedReserve = TrustedReserve; - - fn get_multi_asset() -> MultiAsset { - MultiAsset { - id: Concrete(DotLocation::get()), - fun: Fungible(UNITS), - } - } - } - - impl pallet_xcm_benchmarks::generic::Config for Runtime { - type TransactAsset = Balances; - type RuntimeCall = RuntimeCall; - - fn worst_case_response() -> (u64, Response) { - (0u64, Response::Version(Default::default())) - } - - fn worst_case_asset_exchange() -> Result<(MultiAssets, MultiAssets), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn universal_alias() -> Result<(MultiLocation, Junction), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { - Ok((DotLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) - } - - fn subscribe_origin() -> Result { - Ok(DotLocation::get()) - } - - fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { - let origin = DotLocation::get(); - let assets: MultiAssets = (Concrete(DotLocation::get()), 1_000 * UNITS).into(); - let ticket = MultiLocation { parents: 0, interior: Here }; - Ok((origin, ticket, assets)) - } - - fn unlockable_asset() -> Result<(MultiLocation, MultiLocation, MultiAsset), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn export_message_origin_and_destination( - ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError> { - Err(BenchmarkError::Skip) - } - } - - type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; - type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - - type Local = pallet_assets::Pallet::; - type Foreign = pallet_assets::Pallet::; - - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - //TODO: use from relay_well_known_keys::ACTIVE_CONFIG - hex_literal::hex!("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385").to_vec().into(), - ]; - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - add_benchmarks!(params, batches); - - Ok(batches) - } - } - - impl sp_genesis_builder::GenesisBuilder for Runtime { - fn create_default_config() -> Vec { - create_default_config::() - } - - fn build_config(config: Vec) -> sp_genesis_builder::Result { - build_config::(config) - } - } -} - -cumulus_pallet_parachain_system::register_validate_block! { - Runtime = Runtime, - BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{CENTS, MILLICENTS}; - use parachains_common::polkadot::fee; - use sp_runtime::traits::Zero; - use sp_weights::WeightToFee; - - /// We can fit at least 1000 transfers in a block. - #[test] - fn sane_block_weight() { - use pallet_balances::WeightInfo; - let block = RuntimeBlockWeights::get().max_block; - let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; - let transfer = - base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); - - let fit = block.checked_div_per_component(&transfer).unwrap_or_default(); - assert!(fit >= 1000, "{} should be at least 1000", fit); - } - - /// The fee for one transfer is at most 1 CENT. - #[test] - fn sane_transfer_fee() { - use pallet_balances::WeightInfo; - let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; - let transfer = - base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); - - let fee: Balance = fee::WeightToFee::weight_to_fee(&transfer); - assert!(fee <= CENTS, "{} MILLICENTS should be at most 1000", fee / MILLICENTS); - } - - /// Weight is being charged for both dimensions. - #[test] - fn weight_charged_for_both_components() { - let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(10_000, 0)); - assert!(!fee.is_zero(), "Charges for ref time"); - - let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, 10_000)); - assert_eq!(fee, CENTS, "10kb maps to CENT"); - } - - /// Filling up a block by proof size is at most 30 times more expensive than ref time. - /// - /// This is just a sanity check. - #[test] - fn full_block_fee_ratio() { - let block = RuntimeBlockWeights::get().max_block; - let time_fee: Balance = - fee::WeightToFee::weight_to_fee(&Weight::from_parts(block.ref_time(), 0)); - let proof_fee: Balance = - fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, block.proof_size())); - - let proof_o_time = proof_fee.checked_div(time_fee).unwrap_or_default(); - assert!(proof_o_time <= 30, "{} should be at most 30", proof_o_time); - let time_o_proof = time_fee.checked_div(proof_fee).unwrap_or_default(); - assert!(time_o_proof <= 30, "{} should be at most 30", time_o_proof); - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/block_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/block_weights.rs deleted file mode 100644 index e7fdb2aae2a01ec06076de83d94817e540e205dd..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/block_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Importing a block with 0 Extrinsics. - pub const BlockExecutionWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(5_000_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::BlockExecutionWeight::get(); - - // At least 100 µs. - assert!( - w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 100 µs." - ); - // At most 50 ms. - assert!( - w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 50 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs deleted file mode 100644 index 970534560c68d619908dc241658493717c480c1c..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_parachain_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westmint-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// westmint-dev -// --pallet -// cumulus_pallet_parachain_system -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/westmint/src/weights -// --steps -// 50 -// --repeat -// 20 - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_parachain_system`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { - /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) - /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) - /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) - /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue Pages (r:0 w:16) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 1000]`. - fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `12` - // Estimated: `8013` - // Minimum execution time: 1_638_000 picoseconds. - Weight::from_parts(1_690_000, 0) - .saturating_add(Weight::from_parts(0, 8013)) - // Standard Error: 22_873 - .saturating_add(Weight::from_parts(24_208_496, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/mod.rs deleted file mode 100644 index 0823dcad88e97f6cc3a4eb2b60da82de2ed816d6..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/mod.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; -pub mod cumulus_pallet_parachain_system; -pub mod cumulus_pallet_xcmp_queue; -pub mod extrinsic_weights; -pub mod frame_system; -pub mod pallet_assets_foreign; -pub mod pallet_assets_local; -pub mod pallet_balances; -pub mod pallet_collator_selection; -pub mod pallet_message_queue; -pub mod pallet_multisig; -pub mod pallet_nfts; -pub mod pallet_proxy; -pub mod pallet_session; -pub mod pallet_timestamp; -pub mod pallet_uniques; -pub mod pallet_utility; -pub mod pallet_xcm; -pub mod paritydb_weights; -pub mod rocksdb_weights; -pub mod xcm; - -pub use block_weights::constants::BlockExecutionWeight; -pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; -pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_foreign.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_foreign.rs deleted file mode 100644 index adb686c0afc3d77c16c565fbf2234e17f447b5fd..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_foreign.rs +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_assets` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_assets -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_assets`. -pub struct WeightInfo(PhantomData); -impl pallet_assets::WeightInfo for WeightInfo { - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `4273` - // Minimum execution time: 29_979_000 picoseconds. - Weight::from_parts(30_763_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `4` - // Estimated: `4273` - // Minimum execution time: 12_255_000 picoseconds. - Weight::from_parts(12_614_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn start_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 15_240_000 picoseconds. - Weight::from_parts(15_627_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1001 w:1000) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1000 w:1000) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `c` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - fn destroy_accounts(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + c * (208 ±0)` - // Estimated: `4273 + c * (3207 ±0)` - // Minimum execution time: 17_814_000 picoseconds. - Weight::from_parts(18_006_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 10_358 - .saturating_add(Weight::from_parts(15_409_972, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 3207).saturating_mul(c.into())) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1001 w:1000) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy_approvals(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `413 + a * (86 ±0)` - // Estimated: `4273 + a * (3221 ±0)` - // Minimum execution time: 18_957_000 picoseconds. - Weight::from_parts(19_347_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 5_051 - .saturating_add(Weight::from_parts(15_416_931, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 3221).saturating_mul(a.into())) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:0) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn finish_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 15_409_000 picoseconds. - Weight::from_parts(15_835_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 26_753_000 picoseconds. - Weight::from_parts(27_349_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 33_918_000 picoseconds. - Weight::from_parts(34_624_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `7404` - // Minimum execution time: 45_863_000 picoseconds. - Weight::from_parts(46_674_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `7404` - // Minimum execution time: 40_592_000 picoseconds. - Weight::from_parts(41_582_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `7404` - // Minimum execution time: 46_170_000 picoseconds. - Weight::from_parts(46_880_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 18_421_000 picoseconds. - Weight::from_parts(19_003_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 18_009_000 picoseconds. - Weight::from_parts(18_683_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn freeze_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 14_702_000 picoseconds. - Weight::from_parts(15_118_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn thaw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 14_329_000 picoseconds. - Weight::from_parts(14_857_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:0) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 15_776_000 picoseconds. - Weight::from_parts(16_337_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 14_290_000 picoseconds. - Weight::from_parts(14_655_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:1) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 29_296_000 picoseconds. - Weight::from_parts(30_512_261, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 474 - .saturating_add(Weight::from_parts(530, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:1) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `406` - // Estimated: `4273` - // Minimum execution time: 30_342_000 picoseconds. - Weight::from_parts(31_030_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:1) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `81` - // Estimated: `4273` - // Minimum execution time: 13_574_000 picoseconds. - Weight::from_parts(14_181_016, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 262 - .saturating_add(Weight::from_parts(420, 0).saturating_mul(n.into())) - // Standard Error: 262 - .saturating_add(Weight::from_parts(1_118, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:1) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn force_clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `406` - // Estimated: `4273` - // Minimum execution time: 29_679_000 picoseconds. - Weight::from_parts(30_346_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn force_asset_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 13_334_000 picoseconds. - Weight::from_parts(13_827_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 32_648_000 picoseconds. - Weight::from_parts(33_555_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `520` - // Estimated: `7404` - // Minimum execution time: 65_431_000 picoseconds. - Weight::from_parts(66_502_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `446` - // Estimated: `4273` - // Minimum execution time: 35_207_000 picoseconds. - Weight::from_parts(35_915_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - fn force_cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `446` - // Estimated: `4273` - // Minimum execution time: 35_768_000 picoseconds. - Weight::from_parts(36_553_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn set_min_balance() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 15_108_000 picoseconds. - Weight::from_parts(15_556_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn touch() -> Weight { - // Proof Size summary in bytes: - // Measured: `345` - // Estimated: `4273` - // Minimum execution time: 34_373_000 picoseconds. - Weight::from_parts(35_200_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn touch_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 32_201_000 picoseconds. - Weight::from_parts(33_591_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn refund() -> Weight { - // Proof Size summary in bytes: - // Measured: `471` - // Estimated: `4273` - // Minimum execution time: 31_148_000 picoseconds. - Weight::from_parts(31_751_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn refund_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `401` - // Estimated: `4273` - // Minimum execution time: 29_127_000 picoseconds. - Weight::from_parts(29_922_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn block() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 18_386_000 picoseconds. - Weight::from_parts(18_762_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_local.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_local.rs deleted file mode 100644 index 810f5b57c45dcc5f33fc76f3417682a0d517d6ed..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_local.rs +++ /dev/null @@ -1,528 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_assets` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_assets -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_assets`. -pub struct WeightInfo(PhantomData); -impl pallet_assets::WeightInfo for WeightInfo { - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3675` - // Minimum execution time: 26_698_000 picoseconds. - Weight::from_parts(27_507_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3675` - // Minimum execution time: 10_833_000 picoseconds. - Weight::from_parts(11_314_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn start_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 13_389_000 picoseconds. - Weight::from_parts(14_231_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1001 w:1000) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1000 w:1000) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `c` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - fn destroy_accounts(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + c * (208 ±0)` - // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 16_027_000 picoseconds. - Weight::from_parts(16_455_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 10_266 - .saturating_add(Weight::from_parts(15_263_742, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1001 w:1000) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy_approvals(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `414 + a * (86 ±0)` - // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 17_167_000 picoseconds. - Weight::from_parts(17_397_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 5_072 - .saturating_add(Weight::from_parts(15_429_203, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:0) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn finish_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 13_694_000 picoseconds. - Weight::from_parts(14_239_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 24_406_000 picoseconds. - Weight::from_parts(24_981_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 31_372_000 picoseconds. - Weight::from_parts(32_021_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `6208` - // Minimum execution time: 42_982_000 picoseconds. - Weight::from_parts(43_918_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `6208` - // Minimum execution time: 37_161_000 picoseconds. - Weight::from_parts(38_756_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `6208` - // Minimum execution time: 43_141_000 picoseconds. - Weight::from_parts(44_187_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 16_721_000 picoseconds. - Weight::from_parts(17_433_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 16_623_000 picoseconds. - Weight::from_parts(17_110_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn freeze_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 13_079_000 picoseconds. - Weight::from_parts(13_700_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn thaw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 13_026_000 picoseconds. - Weight::from_parts(13_444_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:0) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 13_945_000 picoseconds. - Weight::from_parts(14_792_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 12_800_000 picoseconds. - Weight::from_parts(13_183_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 27_637_000 picoseconds. - Weight::from_parts(28_967_060, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 464 - .saturating_add(Weight::from_parts(572, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `407` - // Estimated: `3675` - // Minimum execution time: 28_427_000 picoseconds. - Weight::from_parts(28_961_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `82` - // Estimated: `3675` - // Minimum execution time: 12_251_000 picoseconds. - Weight::from_parts(12_928_907, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 244 - .saturating_add(Weight::from_parts(1_800, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn force_clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `407` - // Estimated: `3675` - // Minimum execution time: 28_263_000 picoseconds. - Weight::from_parts(29_165_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn force_asset_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 12_343_000 picoseconds. - Weight::from_parts(12_659_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 31_113_000 picoseconds. - Weight::from_parts(31_798_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `521` - // Estimated: `6208` - // Minimum execution time: 61_428_000 picoseconds. - Weight::from_parts(62_707_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3675` - // Minimum execution time: 33_538_000 picoseconds. - Weight::from_parts(34_216_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn force_cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3675` - // Minimum execution time: 33_870_000 picoseconds. - Weight::from_parts(34_709_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn set_min_balance() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 13_358_000 picoseconds. - Weight::from_parts(13_735_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn touch() -> Weight { - // Proof Size summary in bytes: - // Measured: `346` - // Estimated: `3675` - // Minimum execution time: 32_159_000 picoseconds. - Weight::from_parts(32_998_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn touch_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 30_709_000 picoseconds. - Weight::from_parts(31_486_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn refund() -> Weight { - // Proof Size summary in bytes: - // Measured: `472` - // Estimated: `3675` - // Minimum execution time: 29_557_000 picoseconds. - Weight::from_parts(30_510_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn refund_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `402` - // Estimated: `3675` - // Minimum execution time: 28_027_000 picoseconds. - Weight::from_parts(28_865_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn block() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 16_758_000 picoseconds. - Weight::from_parts(17_280_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_balances.rs deleted file mode 100644 index 7c4501e6d882ef61bd3e608b76750287245eb83a..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_balances.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_balances` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_balances -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_balances`. -pub struct WeightInfo(PhantomData); -impl pallet_balances::WeightInfo for WeightInfo { - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_allow_death() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 56_173_000 picoseconds. - Weight::from_parts(57_097_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 41_470_000 picoseconds. - Weight::from_parts(42_051_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_creating() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3593` - // Minimum execution time: 14_771_000 picoseconds. - Weight::from_parts(15_125_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_killing() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3593` - // Minimum execution time: 22_210_000 picoseconds. - Weight::from_parts(22_712_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `6196` - // Minimum execution time: 57_475_000 picoseconds. - Weight::from_parts(58_343_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_all() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 52_139_000 picoseconds. - Weight::from_parts(52_601_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_unreserve() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3593` - // Minimum execution time: 17_372_000 picoseconds. - Weight::from_parts(17_978_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:999 w:999) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `u` is `[1, 1000]`. - fn upgrade_accounts(u: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + u * (136 ±0)` - // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 17_143_000 picoseconds. - Weight::from_parts(17_475_000, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 16_909 - .saturating_add(Weight::from_parts(15_474_628, 0).saturating_mul(u.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs deleted file mode 100644 index c33e79970ff4774f1b94b218836e9d22fd6fe729..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_collator_selection` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_collator_selection -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collator_selection`. -pub struct WeightInfo(PhantomData); -impl pallet_collator_selection::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:20 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 20]`. - fn set_invulnerables(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `163 + b * (79 ±0)` - // Estimated: `1154 + b * (2555 ±0)` - // Minimum execution time: 14_882_000 picoseconds. - Weight::from_parts(12_290_529, 0) - .saturating_add(Weight::from_parts(0, 1154)) - // Standard Error: 6_842 - .saturating_add(Weight::from_parts(3_189_571, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) - } - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 19]`. - /// The range of component `c` is `[1, 99]`. - fn add_invulnerable(b: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `756 + b * (32 ±0) + c * (53 ±0)` - // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` - // Minimum execution time: 48_113_000 picoseconds. - Weight::from_parts(49_767_909, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_725 - .saturating_add(Weight::from_parts(232_655, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[5, 20]`. - fn remove_invulnerable(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `119 + b * (32 ±0)` - // Estimated: `6287` - // Minimum execution time: 16_228_000 picoseconds. - Weight::from_parts(16_351_387, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_953 - .saturating_add(Weight::from_parts(140_754, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn set_desired_candidates() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_541_000 picoseconds. - Weight::from_parts(7_720_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_402_000 picoseconds. - Weight::from_parts(7_729_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[1, 99]`. - fn register_as_candidate(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `736 + c * (52 ±0)` - // Estimated: `6287 + c * (54 ±0)` - // Minimum execution time: 41_874_000 picoseconds. - Weight::from_parts(45_654_015, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_317 - .saturating_add(Weight::from_parts(221_237, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[4, 100]`. - fn leave_intent(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 33_693_000 picoseconds. - Weight::from_parts(37_321_527, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 3_499 - .saturating_add(Weight::from_parts(182_068, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - fn note_author() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `6196` - // Minimum execution time: 44_412_000 picoseconds. - Weight::from_parts(45_196_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:97 w:97) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `r` is `[1, 100]`. - /// The range of component `c` is `[1, 100]`. - fn new_session(r: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `2243 + c * (97 ±0) + r * (112 ±0)` - // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` - // Minimum execution time: 17_360_000 picoseconds. - Weight::from_parts(17_599_000, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 350_829 - .saturating_add(Weight::from_parts(15_375_949, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_message_queue.rs deleted file mode 100644 index a9f0cb07cfe1385d62df0f1e81cd1b03cc963f7e..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_message_queue.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_message_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westmint-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// westmint-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/westmint/src/weights - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `pallet_message_queue`. -pub struct WeightInfo(PhantomData); -impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn ready_ring_knit() -> Weight { - // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 12_192_000 picoseconds. - Weight::from_parts(12_192_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - fn ready_ring_unknit() -> Weight { - // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 10_447_000 picoseconds. - Weight::from_parts(10_447_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn service_queue_base() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3517` - // Minimum execution time: 4_851_000 picoseconds. - Weight::from_parts(4_851_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_342_000 picoseconds. - Weight::from_parts(6_342_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_no_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_199_000 picoseconds. - Weight::from_parts(6_199_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn service_page_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 58_612_000 picoseconds. - Weight::from_parts(58_612_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn bump_service_head() -> Weight { - // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 7_296_000 picoseconds. - Weight::from_parts(7_296_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn reap_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 48_345_000 picoseconds. - Weight::from_parts(48_345_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_removed() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 56_441_000 picoseconds. - Weight::from_parts(56_441_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_updated() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 70_858_000 picoseconds. - Weight::from_parts(70_858_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_nfts.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_nfts.rs deleted file mode 100644 index 842daf49f599c0f7c349a2b6c25f797db9a79d19..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_nfts.rs +++ /dev/null @@ -1,772 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_nfts` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_nfts -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_nfts`. -pub struct WeightInfo(PhantomData); -impl pallet_nfts::WeightInfo for WeightInfo { - /// Storage: `Nfts::NextCollectionId` (r:1 w:1) - /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:1) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3549` - // Minimum execution time: 37_915_000 picoseconds. - Weight::from_parts(39_275_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::NextCollectionId` (r:1 w:1) - /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:1) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3549` - // Minimum execution time: 22_722_000 picoseconds. - Weight::from_parts(23_500_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:1) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1001 w:1000) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1000 w:1000) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionMetadataOf` (r:0 w:1) - /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:1) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - /// The range of component `m` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `32170 + a * (366 ±0)` - // Estimated: `2523990 + a * (2954 ±0)` - // Minimum execution time: 1_231_520_000 picoseconds. - Weight::from_parts(1_228_960_098, 0) - .saturating_add(Weight::from_parts(0, 2523990)) - // Standard Error: 8_836 - .saturating_add(Weight::from_parts(6_818_975, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(1004)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1005)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(a.into())) - } - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `421` - // Estimated: `4326` - // Minimum execution time: 48_581_000 picoseconds. - Weight::from_parts(50_020_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn force_mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `421` - // Estimated: `4326` - // Minimum execution time: 47_171_000 picoseconds. - Weight::from_parts(48_084_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Nfts::Attribute` (r:1 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:0 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `530` - // Estimated: `4326` - // Minimum execution time: 53_591_000 picoseconds. - Weight::from_parts(55_074_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:2) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `4326` - // Minimum execution time: 40_935_000 picoseconds. - Weight::from_parts(41_835_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:5000 w:5000) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// The range of component `i` is `[0, 5000]`. - fn redeposit(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `729 + i * (108 ±0)` - // Estimated: `3549 + i * (3336 ±0)` - // Minimum execution time: 16_543_000 picoseconds. - Weight::from_parts(16_769_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - // Standard Error: 23_638 - .saturating_add(Weight::from_parts(17_762_895, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3336).saturating_mul(i.into())) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn lock_item_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `401` - // Estimated: `3534` - // Minimum execution time: 20_446_000 picoseconds. - Weight::from_parts(20_740_000, 0) - .saturating_add(Weight::from_parts(0, 3534)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn unlock_item_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `401` - // Estimated: `3534` - // Minimum execution time: 20_088_000 picoseconds. - Weight::from_parts(20_627_000, 0) - .saturating_add(Weight::from_parts(0, 3534)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn lock_collection() -> Weight { - // Proof Size summary in bytes: - // Measured: `306` - // Estimated: `3549` - // Minimum execution time: 17_036_000 picoseconds. - Weight::from_parts(17_435_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:2) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `354` - // Estimated: `3549` - // Minimum execution time: 22_528_000 picoseconds. - Weight::from_parts(23_047_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:2 w:4) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `335` - // Estimated: `6078` - // Minimum execution time: 38_473_000 picoseconds. - Weight::from_parts(39_353_000, 0) - .saturating_add(Weight::from_parts(0, 6078)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:2) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_collection_owner() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3549` - // Minimum execution time: 17_708_000 picoseconds. - Weight::from_parts(18_022_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn force_collection_config() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `3549` - // Minimum execution time: 14_606_000 picoseconds. - Weight::from_parts(14_891_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn lock_item_properties() -> Weight { - // Proof Size summary in bytes: - // Measured: `401` - // Estimated: `3534` - // Minimum execution time: 19_492_000 picoseconds. - Weight::from_parts(19_919_000, 0) - .saturating_add(Weight::from_parts(0, 3534)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - fn set_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `3944` - // Minimum execution time: 50_583_000 picoseconds. - Weight::from_parts(53_846_000, 0) - .saturating_add(Weight::from_parts(0, 3944)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - fn force_set_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `310` - // Estimated: `3944` - // Minimum execution time: 25_937_000 picoseconds. - Weight::from_parts(26_540_000, 0) - .saturating_add(Weight::from_parts(0, 3944)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - fn clear_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `949` - // Estimated: `3944` - // Minimum execution time: 45_738_000 picoseconds. - Weight::from_parts(46_468_000, 0) - .saturating_add(Weight::from_parts(0, 3944)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - fn approve_item_attributes() -> Weight { - // Proof Size summary in bytes: - // Measured: `347` - // Estimated: `4466` - // Minimum execution time: 17_361_000 picoseconds. - Weight::from_parts(18_191_000, 0) - .saturating_add(Weight::from_parts(0, 4466)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1001 w:1000) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1000]`. - fn cancel_item_attributes_approval(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `726 + n * (398 ±0)` - // Estimated: `4466 + n * (2954 ±0)` - // Minimum execution time: 25_884_000 picoseconds. - Weight::from_parts(26_265_000, 0) - .saturating_add(Weight::from_parts(0, 4466)) - // Standard Error: 6_423 - .saturating_add(Weight::from_parts(6_507_369, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - fn set_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `3812` - // Minimum execution time: 40_802_000 picoseconds. - Weight::from_parts(41_742_000, 0) - .saturating_add(Weight::from_parts(0, 3812)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `815` - // Estimated: `3812` - // Minimum execution time: 38_904_000 picoseconds. - Weight::from_parts(39_919_000, 0) - .saturating_add(Weight::from_parts(0, 3812)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) - /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) - fn set_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `364` - // Estimated: `3759` - // Minimum execution time: 37_012_000 picoseconds. - Weight::from_parts(37_632_000, 0) - .saturating_add(Weight::from_parts(0, 3759)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) - /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) - fn clear_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `682` - // Estimated: `3759` - // Minimum execution time: 36_243_000 picoseconds. - Weight::from_parts(37_313_000, 0) - .saturating_add(Weight::from_parts(0, 3759)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `4326` - // Minimum execution time: 20_919_000 picoseconds. - Weight::from_parts(21_505_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `384` - // Estimated: `4326` - // Minimum execution time: 18_943_000 picoseconds. - Weight::from_parts(19_969_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - fn clear_all_transfer_approvals() -> Weight { - // Proof Size summary in bytes: - // Measured: `384` - // Estimated: `4326` - // Minimum execution time: 17_320_000 picoseconds. - Weight::from_parts(18_071_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn set_accept_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3517` - // Minimum execution time: 14_934_000 picoseconds. - Weight::from_parts(15_422_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - fn set_collection_max_supply() -> Weight { - // Proof Size summary in bytes: - // Measured: `306` - // Estimated: `3549` - // Minimum execution time: 18_715_000 picoseconds. - Weight::from_parts(19_025_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn update_mint_settings() -> Weight { - // Proof Size summary in bytes: - // Measured: `289` - // Estimated: `3538` - // Minimum execution time: 18_249_000 picoseconds. - Weight::from_parts(18_826_000, 0) - .saturating_add(Weight::from_parts(0, 3538)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn set_price() -> Weight { - // Proof Size summary in bytes: - // Measured: `484` - // Estimated: `4326` - // Minimum execution time: 23_529_000 picoseconds. - Weight::from_parts(23_958_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:1 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:2) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn buy_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `671` - // Estimated: `4326` - // Minimum execution time: 50_885_000 picoseconds. - Weight::from_parts(52_157_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// The range of component `n` is `[0, 10]`. - fn pay_tips(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_258_000 picoseconds. - Weight::from_parts(3_342_691, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 6_268 - .saturating_add(Weight::from_parts(3_761_373, 0).saturating_mul(n.into())) - } - /// Storage: `Nfts::Item` (r:2 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn create_swap() -> Weight { - // Proof Size summary in bytes: - // Measured: `460` - // Estimated: `7662` - // Minimum execution time: 21_220_000 picoseconds. - Weight::from_parts(21_654_000, 0) - .saturating_add(Weight::from_parts(0, 7662)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::PendingSwapOf` (r:1 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - fn cancel_swap() -> Weight { - // Proof Size summary in bytes: - // Measured: `479` - // Estimated: `4326` - // Minimum execution time: 20_430_000 picoseconds. - Weight::from_parts(21_038_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:2 w:2) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:1 w:2) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:2 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:2 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:4) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:2) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn claim_swap() -> Weight { - // Proof Size summary in bytes: - // Measured: `800` - // Estimated: `7662` - // Minimum execution time: 83_344_000 picoseconds. - Weight::from_parts(84_898_000, 0) - .saturating_add(Weight::from_parts(0, 7662)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(10)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:2 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:10 w:10) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 10]`. - fn mint_pre_signed(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `524` - // Estimated: `6078 + n * (2954 ±0)` - // Minimum execution time: 143_435_000 picoseconds. - Weight::from_parts(151_744_537, 0) - .saturating_add(Weight::from_parts(0, 6078)) - // Standard Error: 44_459 - .saturating_add(Weight::from_parts(31_293_503, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(6)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:10 w:10) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 10]`. - fn set_attributes_pre_signed(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `554` - // Estimated: `4466 + n * (2954 ±0)` - // Minimum execution time: 84_627_000 picoseconds. - Weight::from_parts(96_076_065, 0) - .saturating_add(Weight::from_parts(0, 4466)) - // Standard Error: 62_058 - .saturating_add(Weight::from_parts(30_461_383, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_proxy.rs deleted file mode 100644 index b6121f2fca2ecdee22f31a3ac1f67f4dc72ae813..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_proxy.rs +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_proxy` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_proxy -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_proxy`. -pub struct WeightInfo(PhantomData); -impl pallet_proxy::WeightInfo for WeightInfo { - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 16_130_000 picoseconds. - Weight::from_parts(16_649_312, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 761 - .saturating_add(Weight::from_parts(42_507, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn proxy_announced(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `454 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `5698` - // Minimum execution time: 37_732_000 picoseconds. - Weight::from_parts(36_993_926, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 3_278 - .saturating_add(Weight::from_parts(144_955, 0).saturating_mul(a.into())) - // Standard Error: 3_387 - .saturating_add(Weight::from_parts(64_624, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn remove_announcement(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `369 + a * (68 ±0)` - // Estimated: `5698` - // Minimum execution time: 24_229_000 picoseconds. - Weight::from_parts(24_199_507, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_672 - .saturating_add(Weight::from_parts(124_324, 0).saturating_mul(a.into())) - // Standard Error: 1_727 - .saturating_add(Weight::from_parts(28_481, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn reject_announcement(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `369 + a * (68 ±0)` - // Estimated: `5698` - // Minimum execution time: 23_868_000 picoseconds. - Weight::from_parts(25_293_069, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_728 - .saturating_add(Weight::from_parts(114_080, 0).saturating_mul(a.into())) - // Standard Error: 1_786 - .saturating_add(Weight::from_parts(3_690, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn announce(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `386 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `5698` - // Minimum execution time: 34_343_000 picoseconds. - Weight::from_parts(34_539_112, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_917 - .saturating_add(Weight::from_parts(117_360, 0).saturating_mul(a.into())) - // Standard Error: 1_981 - .saturating_add(Weight::from_parts(40_908, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn add_proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 25_506_000 picoseconds. - Weight::from_parts(26_350_920, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_950 - .saturating_add(Weight::from_parts(48_972, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn remove_proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 25_234_000 picoseconds. - Weight::from_parts(26_232_489, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_468 - .saturating_add(Weight::from_parts(48_955, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn remove_proxies(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 22_184_000 picoseconds. - Weight::from_parts(22_974_929, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_200 - .saturating_add(Weight::from_parts(45_741, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn create_pure(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `139` - // Estimated: `4706` - // Minimum execution time: 27_044_000 picoseconds. - Weight::from_parts(27_978_605, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_206 - .saturating_add(Weight::from_parts(13_736, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 30]`. - fn kill_pure(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `164 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 22_770_000 picoseconds. - Weight::from_parts(23_441_470, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_959 - .saturating_add(Weight::from_parts(47_317, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_session.rs deleted file mode 100644 index 560322abeb3f6baf573fa6dfd9caba41b1e8cbd6..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_session.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_session` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_session -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_session`. -pub struct WeightInfo(PhantomData); -impl pallet_session::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:1 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn set_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `270` - // Estimated: `3735` - // Minimum execution time: 16_684_000 picoseconds. - Weight::from_parts(17_167_000, 0) - .saturating_add(Weight::from_parts(0, 3735)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:0 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn purge_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `3707` - // Minimum execution time: 11_692_000 picoseconds. - Weight::from_parts(12_248_000, 0) - .saturating_add(Weight::from_parts(0, 3707)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_timestamp.rs deleted file mode 100644 index 17b050c3e90be8bc506d2f505cebefc227137c48..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_timestamp.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_timestamp` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_timestamp -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_timestamp`. -pub struct WeightInfo(PhantomData); -impl pallet_timestamp::WeightInfo for WeightInfo { - /// Storage: `Timestamp::Now` (r:1 w:1) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Aura::CurrentSlot` (r:1 w:0) - /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn set() -> Weight { - // Proof Size summary in bytes: - // Measured: `86` - // Estimated: `1493` - // Minimum execution time: 9_214_000 picoseconds. - Weight::from_parts(9_535_000, 0) - .saturating_add(Weight::from_parts(0, 1493)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn on_finalize() -> Weight { - // Proof Size summary in bytes: - // Measured: `57` - // Estimated: `0` - // Minimum execution time: 3_269_000 picoseconds. - Weight::from_parts(3_458_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_uniques.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_uniques.rs deleted file mode 100644 index 5b13d56f5bb2edb491d2e4d76ef64a10d45d4e0c..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_uniques.rs +++ /dev/null @@ -1,466 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_uniques` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_uniques -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_uniques`. -pub struct WeightInfo(PhantomData); -impl pallet_uniques::WeightInfo for WeightInfo { - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3643` - // Minimum execution time: 29_513_000 picoseconds. - Weight::from_parts(30_346_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3643` - // Minimum execution time: 13_600_000 picoseconds. - Weight::from_parts(14_110_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1001 w:1000) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1000 w:1000) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Attribute` (r:1000 w:1000) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassMetadataOf` (r:0 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:1000) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Uniques::CollectionMaxSupply` (r:0 w:1) - /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1000]`. - /// The range of component `m` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `257 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` - // Estimated: `3643 + a * (2647 ±0) + m * (2662 ±0) + n * (2597 ±0)` - // Minimum execution time: 2_945_869_000 picoseconds. - Weight::from_parts(3_037_917_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - // Standard Error: 35_850 - .saturating_add(Weight::from_parts(7_558_563, 0).saturating_mul(n.into())) - // Standard Error: 35_850 - .saturating_add(Weight::from_parts(501_089, 0).saturating_mul(m.into())) - // Standard Error: 35_850 - .saturating_add(Weight::from_parts(538_921, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(m.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2647).saturating_mul(a.into())) - .saturating_add(Weight::from_parts(0, 2662).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:0) - /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:1) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 36_225_000 picoseconds. - Weight::from_parts(36_858_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:1) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 37_021_000 picoseconds. - Weight::from_parts(37_749_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:2) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 26_884_000 picoseconds. - Weight::from_parts(27_414_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:5000 w:5000) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// The range of component `i` is `[0, 5000]`. - fn redeposit(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `738 + i * (76 ±0)` - // Estimated: `3643 + i * (2597 ±0)` - // Minimum execution time: 14_797_000 picoseconds. - Weight::from_parts(14_943_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - // Standard Error: 25_250 - .saturating_add(Weight::from_parts(18_014_600, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 2597).saturating_mul(i.into())) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 18_864_000 picoseconds. - Weight::from_parts(19_299_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 18_530_000 picoseconds. - Weight::from_parts(19_230_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn freeze_collection() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 13_807_000 picoseconds. - Weight::from_parts(14_270_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn thaw_collection() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 13_657_000 picoseconds. - Weight::from_parts(14_059_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:2) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `356` - // Estimated: `3643` - // Minimum execution time: 22_108_000 picoseconds. - Weight::from_parts(22_520_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 14_128_000 picoseconds. - Weight::from_parts(14_481_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_item_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 17_114_000 picoseconds. - Weight::from_parts(17_570_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) - fn set_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `3652` - // Minimum execution time: 40_412_000 picoseconds. - Weight::from_parts(43_009_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) - fn clear_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `756` - // Estimated: `3652` - // Minimum execution time: 38_044_000 picoseconds. - Weight::from_parts(38_871_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - fn set_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `348` - // Estimated: `3652` - // Minimum execution time: 30_016_000 picoseconds. - Weight::from_parts(30_723_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `3652` - // Minimum execution time: 30_942_000 picoseconds. - Weight::from_parts(31_527_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) - fn set_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 30_727_000 picoseconds. - Weight::from_parts(31_688_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) - fn clear_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `473` - // Estimated: `3643` - // Minimum execution time: 29_844_000 picoseconds. - Weight::from_parts(30_403_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 19_155_000 picoseconds. - Weight::from_parts(19_909_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `461` - // Estimated: `3643` - // Minimum execution time: 19_163_000 picoseconds. - Weight::from_parts(19_804_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn set_accept_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3517` - // Minimum execution time: 15_413_000 picoseconds. - Weight::from_parts(15_762_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:1) - /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn set_collection_max_supply() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 16_477_000 picoseconds. - Weight::from_parts(16_811_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Asset` (r:1 w:0) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn set_price() -> Weight { - // Proof Size summary in bytes: - // Measured: `259` - // Estimated: `3587` - // Minimum execution time: 16_415_000 picoseconds. - Weight::from_parts(16_906_000, 0) - .saturating_add(Weight::from_parts(0, 3587)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:1 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:2) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn buy_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `540` - // Estimated: `3643` - // Minimum execution time: 35_814_000 picoseconds. - Weight::from_parts(36_569_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_utility.rs deleted file mode 100644 index d028fb898a4a7cd6147302cbf549ed62095bf43e..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_utility.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_utility` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_utility -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_utility`. -pub struct WeightInfo(PhantomData); -impl pallet_utility::WeightInfo for WeightInfo { - /// The range of component `c` is `[0, 1000]`. - fn batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_918_000 picoseconds. - Weight::from_parts(2_421_521, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_252 - .saturating_add(Weight::from_parts(6_625_635, 0).saturating_mul(c.into())) - } - fn as_derivative() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_304_000 picoseconds. - Weight::from_parts(5_546_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn batch_all(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_847_000 picoseconds. - Weight::from_parts(1_224_975, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_818 - .saturating_add(Weight::from_parts(6_891_149, 0).saturating_mul(c.into())) - } - fn dispatch_as() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_269_000 picoseconds. - Weight::from_parts(9_604_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn force_batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_855_000 picoseconds. - Weight::from_parts(6_965_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_631 - .saturating_add(Weight::from_parts(6_545_496, 0).saturating_mul(c.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs deleted file mode 100644 index 27867e278ed06c21bb488a1841b3d80e491c50d5..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=asset-hub-polkadot-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send() -> Weight { - // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 25_203_000 picoseconds. - Weight::from_parts(25_927_000, 0) - .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 20_113_000 picoseconds. - Weight::from_parts(20_439_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 14_959_000 picoseconds. - Weight::from_parts(15_264_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_399_000 picoseconds. - Weight::from_parts(7_674_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_default_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_388_000 picoseconds. - Weight::from_parts(2_522_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_subscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 28_791_000 picoseconds. - Weight::from_parts(29_443_000, 0) - .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_unsubscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `292` - // Estimated: `3757` - // Minimum execution time: 30_880_000 picoseconds. - Weight::from_parts(31_675_000, 0) - .saturating_add(Weight::from_parts(0, 3757)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) - /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_suspension() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_365_000 picoseconds. - Weight::from_parts(2_550_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_supported_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `129` - // Estimated: `11019` - // Minimum execution time: 17_185_000 picoseconds. - Weight::from_parts(17_680_000, 0) - .saturating_add(Weight::from_parts(0, 11019)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notifiers() -> Weight { - // Proof Size summary in bytes: - // Measured: `133` - // Estimated: `11023` - // Minimum execution time: 16_974_000 picoseconds. - Weight::from_parts(17_660_000, 0) - .saturating_add(Weight::from_parts(0, 11023)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn already_notified_target() -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `13505` - // Minimum execution time: 18_536_000 picoseconds. - Weight::from_parts(19_292_000, 0) - .saturating_add(Weight::from_parts(0, 13505)) - .saturating_add(T::DbWeight::get().reads(5)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn notify_current_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `6082` - // Minimum execution time: 27_368_000 picoseconds. - Weight::from_parts(28_161_000, 0) - .saturating_add(Weight::from_parts(0, 6082)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn notify_target_migration_fail() -> Weight { - // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `8587` - // Minimum execution time: 9_553_000 picoseconds. - Weight::from_parts(9_899_000, 0) - .saturating_add(Weight::from_parts(0, 8587)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notify_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `11030` - // Minimum execution time: 17_445_000 picoseconds. - Weight::from_parts(18_206_000, 0) - .saturating_add(Weight::from_parts(0, 11030)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn migrate_and_notify_old_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `146` - // Estimated: `11036` - // Minimum execution time: 34_200_000 picoseconds. - Weight::from_parts(35_198_000, 0) - .saturating_add(Weight::from_parts(0, 11036)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn new_query() -> Weight { - // Proof Size summary in bytes: - // Measured: `69` - // Estimated: `1554` - // Minimum execution time: 4_679_000 picoseconds. - Weight::from_parts(4_841_000, 0) - .saturating_add(Weight::from_parts(0, 1554)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::Queries` (r:1 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn take_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `7706` - // Estimated: `11171` - // Minimum execution time: 27_281_000 picoseconds. - Weight::from_parts(27_694_000, 0) - .saturating_add(Weight::from_parts(0, 11171)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/rocksdb_weights.rs deleted file mode 100644 index 3dd817aa6f137085b0e5fdf2b11b7f50e5c8b002..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/rocksdb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout - /// the runtime. - pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::RocksDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs deleted file mode 100644 index 44fcb91d6880cdee33fa9142e7eff5ca01427db4..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -mod pallet_xcm_benchmarks_fungible; -mod pallet_xcm_benchmarks_generic; - -use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; -use frame_support::weights::Weight; -use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; -use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; -use xcm::{latest::prelude::*, DoubleEncoded}; - -trait WeighMultiAssets { - fn weigh_multi_assets(&self, weight: Weight) -> Weight; -} - -const MAX_ASSETS: u64 = 100; - -impl WeighMultiAssets for MultiAssetFilter { - fn weigh_multi_assets(&self, weight: Weight) -> Weight { - match self { - Self::Definite(assets) => weight.saturating_mul(assets.inner().iter().count() as u64), - Self::Wild(asset) => match asset { - All => weight.saturating_mul(MAX_ASSETS), - AllOf { fun, .. } => match fun { - WildFungibility::Fungible => weight, - // Magic number 2 has to do with the fact that we could have up to 2 times - // MaxAssetsIntoHolding in the worst-case scenario. - WildFungibility::NonFungible => - weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64), - }, - AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), - AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), - }, - } - } -} - -impl WeighMultiAssets for MultiAssets { - fn weigh_multi_assets(&self, weight: Weight) -> Weight { - weight.saturating_mul(self.inner().iter().count() as u64) - } -} - -pub struct AssetHubPolkadotXcmWeight(core::marker::PhantomData); -impl XcmWeightInfo for AssetHubPolkadotXcmWeight { - fn withdraw_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) - } - fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) - } - fn receive_teleported_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) - } - fn query_response( - _query_id: &u64, - _response: &Response, - _max_weight: &Weight, - _querier: &Option, - ) -> Weight { - XcmGeneric::::query_response() - } - fn transfer_asset(assets: &MultiAssets, _dest: &MultiLocation) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::transfer_asset()) - } - fn transfer_reserve_asset( - assets: &MultiAssets, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::transfer_reserve_asset()) - } - fn transact( - _origin_type: &OriginKind, - _require_weight_at_most: &Weight, - _call: &DoubleEncoded, - ) -> Weight { - XcmGeneric::::transact() - } - fn hrmp_new_channel_open_request( - _sender: &u32, - _max_message_size: &u32, - _max_capacity: &u32, - ) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn hrmp_channel_accepted(_recipient: &u32) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn hrmp_channel_closing(_initiator: &u32, _sender: &u32, _recipient: &u32) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn clear_origin() -> Weight { - XcmGeneric::::clear_origin() - } - fn descend_origin(_who: &InteriorMultiLocation) -> Weight { - XcmGeneric::::descend_origin() - } - fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::report_error() - } - - fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) - } - fn deposit_reserve_asset( - assets: &MultiAssetFilter, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::deposit_reserve_asset()) - } - fn exchange_asset(_give: &MultiAssetFilter, _receive: &MultiAssets, _maximal: &bool) -> Weight { - Weight::MAX - } - fn initiate_reserve_withdraw( - assets: &MultiAssetFilter, - _reserve: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::initiate_reserve_withdraw()) - } - fn initiate_teleport( - assets: &MultiAssetFilter, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::initiate_teleport()) - } - fn report_holding(_response_info: &QueryResponseInfo, _assets: &MultiAssetFilter) -> Weight { - XcmGeneric::::report_holding() - } - fn buy_execution(_fees: &MultiAsset, _weight_limit: &WeightLimit) -> Weight { - XcmGeneric::::buy_execution() - } - fn refund_surplus() -> Weight { - XcmGeneric::::refund_surplus() - } - fn set_error_handler(_xcm: &Xcm) -> Weight { - XcmGeneric::::set_error_handler() - } - fn set_appendix(_xcm: &Xcm) -> Weight { - XcmGeneric::::set_appendix() - } - fn clear_error() -> Weight { - XcmGeneric::::clear_error() - } - fn claim_asset(_assets: &MultiAssets, _ticket: &MultiLocation) -> Weight { - XcmGeneric::::claim_asset() - } - fn trap(_code: &u64) -> Weight { - XcmGeneric::::trap() - } - fn subscribe_version(_query_id: &QueryId, _max_response_weight: &Weight) -> Weight { - XcmGeneric::::subscribe_version() - } - fn unsubscribe_version() -> Weight { - XcmGeneric::::unsubscribe_version() - } - fn burn_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmGeneric::::burn_asset()) - } - fn expect_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmGeneric::::expect_asset()) - } - fn expect_origin(_origin: &Option) -> Weight { - XcmGeneric::::expect_origin() - } - fn expect_error(_error: &Option<(u32, XcmError)>) -> Weight { - XcmGeneric::::expect_error() - } - fn expect_transact_status(_transact_status: &MaybeErrorCode) -> Weight { - XcmGeneric::::expect_transact_status() - } - fn query_pallet(_module_name: &Vec, _response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::query_pallet() - } - fn expect_pallet( - _index: &u32, - _name: &Vec, - _module_name: &Vec, - _crate_major: &u32, - _min_crate_minor: &u32, - ) -> Weight { - XcmGeneric::::expect_pallet() - } - fn report_transact_status(_response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::report_transact_status() - } - fn clear_transact_status() -> Weight { - XcmGeneric::::clear_transact_status() - } - fn universal_origin(_: &Junction) -> Weight { - Weight::MAX - } - fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight { - Weight::MAX - } - fn lock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn unlock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn note_unlockable(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn request_unlock(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn set_fees_mode(_: &bool) -> Weight { - XcmGeneric::::set_fees_mode() - } - fn set_topic(_topic: &[u8; 32]) -> Weight { - XcmGeneric::::set_topic() - } - fn clear_topic() -> Weight { - XcmGeneric::::clear_topic() - } - fn alias_origin(_: &MultiLocation) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX - } - fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { - XcmGeneric::::unpaid_execution() - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs deleted file mode 100644 index 7200ebc16a2875336fc50e711386cab41df6c9b4..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs +++ /dev/null @@ -1,659 +0,0 @@ -// This file is part of Cumulus. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Tests for the Statemint (Polkadot Assets Hub) chain. - -use asset_hub_polkadot_runtime::xcm_config::{ - AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, DotLocation, - ForeignCreatorsSovereignAccountOf, TrustBackedAssetsPalletLocation, XcmConfig, -}; -pub use asset_hub_polkadot_runtime::{ - AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, - ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, -}; -use asset_test_utils::{CollatorSessionKeys, ExtBuilder}; -use codec::{Decode, Encode}; -use cumulus_primitives_utility::ChargeWeightInFungibles; -use frame_support::{ - assert_noop, assert_ok, - traits::fungibles::InspectEnumerable, - weights::{Weight, WeightToFee as WeightToFeeT}, -}; -use parachains_common::{ - polkadot::fee::WeightToFee, AccountId, AssetHubPolkadotAuraId as AuraId, - AssetIdForTrustBackedAssets, Balance, -}; -use sp_runtime::traits::MaybeEquivalence; -use xcm::latest::prelude::*; -use xcm_executor::traits::{Identity, JustTry, WeightTrader}; - -const ALICE: [u8; 32] = [1u8; 32]; -const SOME_ASSET_ADMIN: [u8; 32] = [5u8; 32]; - -type AssetIdForTrustBackedAssetsConvert = - assets_common::AssetIdForTrustBackedAssetsConvert; - -type RuntimeHelper = asset_test_utils::RuntimeHelper; - -fn collator_session_keys() -> CollatorSessionKeys { - CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - ) -} - -#[test] -fn test_asset_xcm_trader() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - let minimum_asset_balance = 333333333_u128; - let local_asset_id = 1; - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - local_asset_id.into(), - AccountId::from(ALICE).into(), - true, - minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - local_asset_id.into(), - AccountId::from(ALICE).into(), - minimum_asset_balance - )); - - // get asset id as multilocation - let asset_multilocation = - AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(); - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 400e9 weight - // Because of the ED being higher in kusama's asset hub - // and not to complicate things, we use a little - // bit more of weight - let bought = Weight::from_parts(400_000_000_000u64, 0); - - // Lets calculate amount needed - let asset_amount_needed = - AssetFeeAsExistentialDepositMultiplierFeeCharger::charge_weight_in_fungibles( - local_asset_id, - bought, - ) - .expect("failed to compute"); - - // Lets pay with: asset_amount_needed + asset_amount_extra - let asset_amount_extra = 100_u128; - let asset: MultiAsset = - (asset_multilocation, asset_amount_needed + asset_amount_extra).into(); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Lets buy_weight and make sure buy_weight does not return an error - let unused_assets = trader.buy_weight(bought, asset.into(), &ctx).expect("Expected Ok"); - // Check whether a correct amount of unused assets is returned - assert_ok!( - unused_assets.ensure_contains(&(asset_multilocation, asset_amount_extra).into()) - ); - - // Drop trader - drop(trader); - - // Make sure author(Alice) has received the amount - assert_eq!( - Assets::balance(local_asset_id, AccountId::from(ALICE)), - minimum_asset_balance + asset_amount_needed - ); - - // We also need to ensure the total supply increased - assert_eq!( - Assets::total_supply(local_asset_id), - minimum_asset_balance + asset_amount_needed - ); - }); -} - -#[test] -fn test_asset_xcm_trader_with_refund() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - // We set existential deposit to be identical to the one for Balances first - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - true, - ExistentialDeposit::get() - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - 1.into(), - AccountId::from(ALICE).into(), - ExistentialDeposit::get() - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 400e9 weight - // Because of the ED being higher in kusama's asset hub - // and not to complicate things, we use a little - // bit more of weight - let bought = Weight::from_parts(400_000_000_000u64, 0); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - // lets calculate amount needed - let amount_bought = WeightToFee::weight_to_fee(&bought); - - let asset: MultiAsset = (asset_multilocation, amount_bought).into(); - - // Make sure buy_weight does not return an error - assert_ok!(trader.buy_weight(bought, asset.clone().into(), &ctx)); - - // Make sure again buy_weight does return an error - // This assert relies on the fact, that we use `TakeFirstAssetTrader` in `WeightTrader` - // tuple chain, which cannot be called twice - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // We actually use half of the weight - let weight_used = bought / 2; - - // Make sure refurnd works. - let amount_refunded = WeightToFee::weight_to_fee(&(bought - weight_used)); - - assert_eq!( - trader.refund_weight(bought - weight_used, &ctx), - Some((asset_multilocation, amount_refunded).into()) - ); - - // Drop trader - drop(trader); - - // We only should have paid for half of the bought weight - let fees_paid = WeightToFee::weight_to_fee(&weight_used); - - assert_eq!( - Assets::balance(1, AccountId::from(ALICE)), - ExistentialDeposit::get() + fees_paid - ); - - // We also need to ensure the total supply increased - assert_eq!(Assets::total_supply(1), ExistentialDeposit::get() + fees_paid); - }); -} - -#[test] -fn test_asset_xcm_trader_refund_not_possible_since_amount_less_than_ed() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - // We set existential deposit to be identical to the one for Balances first - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - true, - ExistentialDeposit::get() - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 50e9 weight - // Because of the ED being higher in kusama's asset hub - // and not to complicate things, we use a little - // bit more of weight - let bought = Weight::from_parts(50_000_000_000u64, 0); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - let amount_bought = WeightToFee::weight_to_fee(&bought); - - assert!( - amount_bought < ExistentialDeposit::get(), - "we are testing what happens when the amount does not exceed ED" - ); - - let asset: MultiAsset = (asset_multilocation, amount_bought).into(); - - // Buy weight should return an error - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // not credited since the ED is higher than this value - assert_eq!(Assets::balance(1, AccountId::from(ALICE)), 0); - - // We also need to ensure the total supply did not increase - assert_eq!(Assets::total_supply(1), 0); - }); -} - -#[test] -fn test_that_buying_ed_refund_does_not_refund() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - // We set existential deposit to be identical to the one for Balances first - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - true, - ExistentialDeposit::get() - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are gonna buy ED - let bought = Weight::from_parts(ExistentialDeposit::get().try_into().unwrap(), 0); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - let amount_bought = WeightToFee::weight_to_fee(&bought); - - assert!( - amount_bought < ExistentialDeposit::get(), - "we are testing what happens when the amount does not exceed ED" - ); - - // We know we will have to buy at least ED, so lets make sure first it will - // fail with a payment of less than ED - let asset: MultiAsset = (asset_multilocation, amount_bought).into(); - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // Now lets buy ED at least - let asset: MultiAsset = (asset_multilocation, ExistentialDeposit::get()).into(); - - // Buy weight should work - assert_ok!(trader.buy_weight(bought, asset.into(), &ctx)); - - // Should return None. We have a specific check making sure we dont go below ED for - // drop payment - assert_eq!(trader.refund_weight(bought, &ctx), None); - - // Drop trader - drop(trader); - - // Make sure author(Alice) has received the amount - assert_eq!(Assets::balance(1, AccountId::from(ALICE)), ExistentialDeposit::get()); - - // We also need to ensure the total supply increased - assert_eq!(Assets::total_supply(1), ExistentialDeposit::get()); - }); -} - -#[test] -fn test_asset_xcm_trader_not_possible_for_non_sufficient_assets() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // Create a non-sufficient asset - let minimum_asset_balance = 1_000_000_u128; - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - false, - minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - 1.into(), - AccountId::from(ALICE).into(), - minimum_asset_balance - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 400e9 weight - // Because of the ED being higher in kusama's asset hub - // and not to complicate things, we use a little - // bit more of weight - let bought = Weight::from_parts(400_000_000_000u64, 0); - - // lets calculate amount needed - let asset_amount_needed = WeightToFee::weight_to_fee(&bought); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - let asset: MultiAsset = (asset_multilocation, asset_amount_needed).into(); - - // Make sure again buy_weight does return an error - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // Drop trader - drop(trader); - - // Make sure author(Alice) has NOT received the amount - assert_eq!(Assets::balance(1, AccountId::from(ALICE)), minimum_asset_balance); - - // We also need to ensure the total supply NOT increased - assert_eq!(Assets::total_supply(1), minimum_asset_balance); - }); -} - -#[test] -fn test_assets_balances_api_works() { - use assets_common::runtime_api::runtime_decl_for_fungibles_api::FungiblesApi; - - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - let local_asset_id = 1; - let foreign_asset_id_multilocation = - MultiLocation { parents: 1, interior: X2(Parachain(1234), GeneralIndex(12345)) }; - - // check before - assert_eq!(Assets::balance(local_asset_id, AccountId::from(ALICE)), 0); - assert_eq!( - ForeignAssets::balance(foreign_asset_id_multilocation, AccountId::from(ALICE)), - 0 - ); - assert_eq!(Balances::free_balance(AccountId::from(ALICE)), 0); - assert!(Runtime::query_account_balances(AccountId::from(ALICE)) - .unwrap() - .try_as::() - .unwrap() - .is_none()); - - // Drip some balance - use frame_support::traits::fungible::Mutate; - let some_currency = ExistentialDeposit::get(); - Balances::mint_into(&AccountId::from(ALICE), some_currency).unwrap(); - - // We need root origin to create a sufficient asset - let minimum_asset_balance = 3333333_u128; - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - local_asset_id.into(), - AccountId::from(ALICE).into(), - true, - minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - local_asset_id.into(), - AccountId::from(ALICE).into(), - minimum_asset_balance - )); - - // create foreign asset - let foreign_asset_minimum_asset_balance = 3333333_u128; - assert_ok!(ForeignAssets::force_create( - RuntimeHelper::root_origin(), - foreign_asset_id_multilocation, - AccountId::from(SOME_ASSET_ADMIN).into(), - false, - foreign_asset_minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(ForeignAssets::mint( - RuntimeHelper::origin_of(AccountId::from(SOME_ASSET_ADMIN)), - foreign_asset_id_multilocation, - AccountId::from(ALICE).into(), - 6 * foreign_asset_minimum_asset_balance - )); - - // check after - assert_eq!( - Assets::balance(local_asset_id, AccountId::from(ALICE)), - minimum_asset_balance - ); - assert_eq!( - ForeignAssets::balance(foreign_asset_id_multilocation, AccountId::from(ALICE)), - 6 * minimum_asset_balance - ); - assert_eq!(Balances::free_balance(AccountId::from(ALICE)), some_currency); - - let result: MultiAssets = Runtime::query_account_balances(AccountId::from(ALICE)) - .unwrap() - .try_into() - .unwrap(); - assert_eq!(result.len(), 3); - - // check currency - assert!(result.inner().iter().any(|asset| asset.eq( - &assets_common::fungible_conversion::convert_balance::( - some_currency - ) - .unwrap() - ))); - // check trusted asset - assert!(result.inner().iter().any(|asset| asset.eq(&( - AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(), - minimum_asset_balance - ) - .into()))); - // check foreign asset - assert!(result.inner().iter().any(|asset| asset.eq(&( - Identity::convert_back(&foreign_asset_id_multilocation).unwrap(), - 6 * foreign_asset_minimum_asset_balance - ) - .into()))); - }); -} - -asset_test_utils::include_teleports_for_native_asset_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - 1000 -); - -asset_test_utils::include_teleports_for_foreign_assets_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - ForeignCreatorsSovereignAccountOf, - ForeignAssetsInstance, - asset_test_utils::CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) } - ), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }) -); - -asset_test_utils::include_asset_transactor_transfer_with_local_consensus_currency_works!( - Runtime, - XcmConfig, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }) -); - -asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_works!( - asset_transactor_transfer_with_trust_backed_assets_works, - Runtime, - XcmConfig, - TrustBackedAssetsInstance, - AssetIdForTrustBackedAssets, - AssetIdForTrustBackedAssetsConvert, - collator_session_keys(), - ExistentialDeposit::get(), - 12345, - Box::new(|| { - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }) -); - -asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_works!( - asset_transactor_transfer_with_foreign_assets_works, - Runtime, - XcmConfig, - ForeignAssetsInstance, - MultiLocation, - JustTry, - asset_test_utils::CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) } - ), - ExistentialDeposit::get(), - MultiLocation { parents: 1, interior: X2(Parachain(1313), GeneralIndex(12345)) }, - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - }) -); - -asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_parachain_assets_works!( - Runtime, - XcmConfig, - WeightToFee, - ForeignCreatorsSovereignAccountOf, - ForeignAssetsInstance, - MultiLocation, - JustTry, - asset_test_utils::CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) } - ), - ExistentialDeposit::get(), - AssetDeposit::get(), - MetadataDepositBase::get(), - MetadataDepositPerByte::get(), - Box::new(|pallet_asset_call| RuntimeCall::ForeignAssets(pallet_asset_call).encode()), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::ForeignAssets(pallet_asset_event)) => Some(pallet_asset_event), - _ => None, - } - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert_eq!(ForeignAssets::asset_ids().collect::>().len(), 1); - }) -); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index f4f33677d4a23c9fa2cf0b6890a1bfa1b1427613..43579cfe5bb972ddb6f652f8c86d8c16c4fb3347 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true description = "Rococo variant of Asset Hub parachain runtime" license = "Apache-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } @@ -14,65 +17,64 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false} -pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false} -pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false} -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } +pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } +pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false } +pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false } +pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false } +pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false, optional = true } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} -sp-weights = { path = "../../../../../substrate/primitives/weights", default-features = false} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +sp-weights = { path = "../../../../../substrate/primitives/weights", default-features = false } # num-traits feature needed for dex integer sq root: -primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "scale-info", "num-traits"] } +primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "num-traits", "scale-info"] } # Polkadot -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false} -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -86,10 +88,10 @@ assets-common = { path = "../common", default-features = false } pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-asset-hub-wococo = { path = "../../../../../bridges/primitives/chain-asset-hub-wococo", default-features = false } bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-wococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-wococo", default-features = false } bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +snowbridge-router-primitives = { path = "../../../../../bridges/snowbridge/parachain/primitives/router", default-features = false } +snowbridge-rococo-common = { path = "../../../../../bridges/snowbridge/parachain/runtime/rococo-common", default-features = false } [dev-dependencies] asset-test-utils = { path = "../test-utils" } @@ -98,7 +100,7 @@ asset-test-utils = { path = "../test-utils" } substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] # When enabled the `state_version` is set to `1`. # This means that the chain will start using the new state format. The migration is lazy, so # it requires to write a storage value to use the new state format. To migrate all the other @@ -106,10 +108,9 @@ default = [ "std" ] # This pallet will migrate the entire state, controlled through some account. # # This feature should be removed when the main-net will be migrated. -state-trie-version-1 = [ "pallet-state-trie-migration" ] +state-trie-version-1 = ["pallet-state-trie-migration"] runtime-benchmarks = [ "assets-common/runtime-benchmarks", - "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", @@ -138,13 +139,14 @@ runtime-benchmarks = [ "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", + "snowbridge-rococo-common/runtime-benchmarks", + "snowbridge-router-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", @@ -180,13 +182,10 @@ std = [ "assets-common/std", "bp-asset-hub-rococo/std", "bp-asset-hub-westend/std", - "bp-asset-hub-wococo/std", "bp-bridge-hub-rococo/std", "bp-bridge-hub-westend/std", - "bp-bridge-hub-wococo/std", "codec/std", "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", @@ -232,6 +231,8 @@ std = [ "primitive-types/std", "rococo-runtime-constants/std", "scale-info/std", + "snowbridge-rococo-common/std", + "snowbridge-router-primitives/std", "sp-api/std", "sp-block-builder/std", "sp-consensus-aura/std", @@ -252,9 +253,9 @@ std = [ "xcm/std", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index f649ebedeff9a3b5d3165b9b23913cb1ca834da1..61939a2c80a77ac948c07a279bfc238348699061 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -16,12 +16,6 @@ //! # Asset Hub Rococo Runtime //! //! Asset Hub Rococo, formerly known as "Rockmine", is the test network for its Kusama cousin. -//! -//! This runtime is also used for Asset Hub Wococo. But we dont want to create another exact copy of -//! Asset Hub Rococo, so we injected some tweaks backed by `RuntimeFlavor` and `pub storage Flavor: -//! RuntimeFlavor`. (For example this is needed for successful asset transfer between Asset Hub -//! Rococo and Asset Hub Wococo, where we need to have correct `xcm_config::UniversalLocation` with -//! correct `GlobalConsensus`. #![cfg_attr(not(feature = "std"), no_std)] #![recursion_limit = "256"] @@ -35,17 +29,18 @@ pub mod xcm_config; use assets_common::{ foreign_creators::ForeignCreators, - local_and_foreign_assets::{LocalAndForeignAssets, MultiLocationConverter}, - matching::FromSiblingParachain, + local_and_foreign_assets::{LocalFromLeft, TargetFromLeft}, + matching::{FromNetwork, FromSiblingParachain}, AssetIdForTrustBackedAssetsConvert, MultiLocationForAssetId, }; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use cumulus_primitives_core::AggregateMessageOrigin; +use snowbridge_rococo_common::EthereumNetwork; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Verify}, + traits::{AccountIdConversion, BlakeTwo256, Block as BlockT, Saturating, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Permill, }; @@ -58,13 +53,14 @@ use sp_version::RuntimeVersion; use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_primitives_core::ParaId; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, ord_parameter_types, parameter_types, traits::{ - AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, - Equals, InstanceFilter, TransformOrigin, + fungible, fungibles, tokens::imbalance::ResolveAssetTo, AsEnsureOriginWithArg, ConstBool, + ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Equals, InstanceFilter, + TransformOrigin, }, weights::{ConstantMultiplier, Weight}, BoundedVec, PalletId, @@ -88,8 +84,9 @@ use parachains_common::{ use sp_runtime::{Perbill, RuntimeDebug}; use xcm::opaque::v3::MultiLocation; use xcm_config::{ - ForeignAssetsConvertedConcreteId, GovernanceLocation, PoolAssetsConvertedConcreteId, - TokenLocation, TrustBackedAssetsConvertedConcreteId, + ForeignAssetsConvertedConcreteId, ForeignCreatorsSovereignAccountOf, GovernanceLocation, + PoolAssetsConvertedConcreteId, TokenLocation, TrustBackedAssetsConvertedConcreteId, + TrustBackedAssetsPalletLocation, }; #[cfg(any(feature = "std", test))] @@ -100,21 +97,8 @@ use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use xcm::latest::prelude::*; -use crate::xcm_config::{ - ForeignCreatorsSovereignAccountOf, LocalAndForeignAssetsMultiLocationMatcher, - TrustBackedAssetsPalletLocation, -}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; -/// Enum for handling differences in the runtime configuration for `AssetHubRococo` vs. -/// `AssetHubWococo`. -#[derive(Default, Eq, PartialEq, Debug, Clone, Copy, Decode, Encode)] -pub enum RuntimeFlavor { - #[default] - Rococo, - Wococo, -} - impl_opaque_keys! { pub struct SessionKeys { pub aura: Aura, @@ -127,10 +111,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_003_000, + spec_version: 1_005_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 13, + transaction_version: 14, state_version: 1, }; @@ -140,10 +124,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_003_000, + spec_version: 1_005_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 13, + transaction_version: 14, state_version: 0, }; @@ -179,25 +163,17 @@ parameter_types! { } // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; type BlockHashCount = BlockHashCount; type DbWeight = RocksDbWeight; type Version = Version; - type PalletInfo = PalletInfo; - type OnNewAccount = (); - type OnKilledAccount = (); type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; type SS58Prefix = SS58Prefix; @@ -302,8 +278,6 @@ impl pallet_assets::Config for Runtime { parameter_types! { pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); - pub const AllowMultiAssetPools: bool = false; - // should be non-zero if AllowMultiAssetPools is true, otherwise can be zero pub const LiquidityWithdrawalFee: Permill = Permill::from_percent(0); } @@ -338,36 +312,50 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = (); } +/// Union fungibles implementation for `Assets`` and `ForeignAssets`. +pub type LocalAndForeignAssets = fungibles::UnionOf< + Assets, + ForeignAssets, + LocalFromLeft< + AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssets, + >, + MultiLocation, + AccountId, +>; + impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type HigherPrecisionBalance = sp_core::U256; - type Currency = Balances; - type AssetBalance = Balance; - type AssetId = MultiLocation; - type Assets = LocalAndForeignAssets< - Assets, - AssetIdForTrustBackedAssetsConvert, - ForeignAssets, + type AssetKind = MultiLocation; + type Assets = fungible::UnionOf< + Balances, + LocalAndForeignAssets, + TargetFromLeft, + Self::AssetKind, + Self::AccountId, >; - type PoolAssets = PoolAssets; + type PoolId = (Self::AssetKind, Self::AssetKind); + type PoolLocator = + pallet_asset_conversion::WithFirstAsset; type PoolAssetId = u32; + type PoolAssets = PoolAssets; type PoolSetupFee = ConstU128<0>; // Asset class deposit fees are sufficient to prevent spam - type PoolSetupFeeReceiver = AssetConversionOrigin; - // should be non-zero if `AllowMultiAssetPools` is true, otherwise can be zero. + type PoolSetupFeeAsset = TokenLocation; + type PoolSetupFeeTarget = ResolveAssetTo; type LiquidityWithdrawalFee = LiquidityWithdrawalFee; type LPFee = ConstU32<3>; type PalletId = AssetConversionPalletId; - type AllowMultiAssetPools = AllowMultiAssetPools; - type MaxSwapPathLength = ConstU32<4>; - type MultiAssetId = Box; - type MultiAssetIdConverter = - MultiLocationConverter; + type MaxSwapPathLength = ConstU32<3>; type MintMinLiquidity = ConstU128<100>; type WeightInfo = weights::pallet_asset_conversion::WeightInfo; #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = - crate::xcm_config::BenchmarkMultiLocationConverter>; + type BenchmarkHelper = assets_common::benchmarks::AssetPairFactory< + TokenLocation, + parachain_info::Pallet, + xcm_config::AssetsPalletIndex, + >; } parameter_types! { @@ -392,7 +380,10 @@ impl pallet_assets::Config for Runtime { type AssetIdParameter = MultiLocationForAssetId; type Currency = Balances; type CreateOrigin = ForeignCreators< - (FromSiblingParachain>,), + ( + FromSiblingParachain>, + FromNetwork, + ), ForeignCreatorsSovereignAccountOf, AccountId, >; @@ -698,12 +689,6 @@ parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - parameter_types! { pub const Period: u32 = 6 * HOURS; pub const Offset: u32 = 0; @@ -763,12 +748,9 @@ impl pallet_collator_selection::Config for Runtime { impl pallet_asset_conversion_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type Fungibles = LocalAndForeignAssets< - Assets, - AssetIdForTrustBackedAssetsConvert, - ForeignAssets, - >; - type OnChargeAssetTransaction = AssetConversionAdapter; + type Fungibles = LocalAndForeignAssets; + type OnChargeAssetTransaction = + AssetConversionAdapter; } parameter_types! { @@ -866,77 +848,16 @@ impl pallet_nfts::Config for Runtime { type Helper = (); } -/// XCM router instance to BridgeHub with bridging capabilities for `Wococo` global -/// consensus with dynamic fees and back-pressure. -pub type ToWococoXcmRouterInstance = pallet_xcm_bridge_hub_router::Instance1; -impl pallet_xcm_bridge_hub_router::Config for Runtime { - type WeightInfo = weights::pallet_xcm_bridge_hub_router_to_wococo::WeightInfo; - - type UniversalLocation = xcm_config::UniversalLocation; - type BridgedNetworkId = xcm_config::bridging::to_wococo::WococoNetwork; - type Bridges = xcm_config::bridging::NetworkExportTable; - - #[cfg(not(feature = "runtime-benchmarks"))] - type BridgeHubOrigin = EnsureXcm>; - #[cfg(feature = "runtime-benchmarks")] - type BridgeHubOrigin = EitherOfDiverse< - // for running benchmarks - EnsureRoot, - // for running tests with `--feature runtime-benchmarks` - EnsureXcm>, - >; - - type ToBridgeHubSender = XcmpQueue; - type WithBridgeHubChannel = - cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider< - xcm_config::bridging::SiblingBridgeHubParaId, - Runtime, - >; - - type ByteFee = xcm_config::bridging::XcmBridgeHubRouterByteFee; - type FeeAsset = xcm_config::bridging::XcmBridgeHubRouterFeeAssetId; -} - -/// XCM router instance to BridgeHub with bridging capabilities for `Rococo` global -/// consensus with dynamic fees and back-pressure. -pub type ToRococoXcmRouterInstance = pallet_xcm_bridge_hub_router::Instance2; -impl pallet_xcm_bridge_hub_router::Config for Runtime { - type WeightInfo = weights::pallet_xcm_bridge_hub_router_to_rococo::WeightInfo; - - type UniversalLocation = xcm_config::UniversalLocation; - type BridgedNetworkId = xcm_config::bridging::to_rococo::RococoNetwork; - type Bridges = xcm_config::bridging::NetworkExportTable; - - #[cfg(not(feature = "runtime-benchmarks"))] - type BridgeHubOrigin = EnsureXcm>; - #[cfg(feature = "runtime-benchmarks")] - type BridgeHubOrigin = EitherOfDiverse< - // for running benchmarks - EnsureRoot, - // for running tests with `--feature runtime-benchmarks` - EnsureXcm>, - >; - - type ToBridgeHubSender = XcmpQueue; - type WithBridgeHubChannel = - cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider< - xcm_config::bridging::SiblingBridgeHubParaId, - Runtime, - >; - - type ByteFee = xcm_config::bridging::XcmBridgeHubRouterByteFee; - type FeeAsset = xcm_config::bridging::XcmBridgeHubRouterFeeAssetId; -} - /// XCM router instance to BridgeHub with bridging capabilities for `Westend` global /// consensus with dynamic fees and back-pressure. pub type ToWestendXcmRouterInstance = pallet_xcm_bridge_hub_router::Instance3; impl pallet_xcm_bridge_hub_router::Config for Runtime { - type WeightInfo = weights::pallet_xcm_bridge_hub_router_to_westend::WeightInfo; + type WeightInfo = weights::pallet_xcm_bridge_hub_router::WeightInfo; type UniversalLocation = xcm_config::UniversalLocation; type BridgedNetworkId = xcm_config::bridging::to_westend::WestendNetwork; type Bridges = xcm_config::bridging::NetworkExportTable; + type DestinationVersion = PolkadotXcm; #[cfg(not(feature = "runtime-benchmarks"))] type BridgeHubOrigin = EnsureXcm>; @@ -987,7 +908,6 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Handy utilities. @@ -996,10 +916,7 @@ construct_runtime!( Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, // Bridge utilities. - ToWococoXcmRouter: pallet_xcm_bridge_hub_router::::{Pallet, Storage, Call} = 43, - ToRococoXcmRouter: pallet_xcm_bridge_hub_router::::{Pallet, Storage, Call} = 44, ToWestendXcmRouter: pallet_xcm_bridge_hub_router::::{Pallet, Storage, Call} = 45, - // The main stage. Assets: pallet_assets::::{Pallet, Call, Storage, Event} = 50, Uniques: pallet_uniques::{Pallet, Call, Storage, Event} = 51, @@ -1038,7 +955,64 @@ pub type SignedExtra = ( pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. -pub type Migrations = (pallet_collator_selection::migration::v1::MigrateToV1,); +pub type Migrations = ( + pallet_collator_selection::migration::v1::MigrateToV1, + InitStorageVersions, + // unreleased + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, +); + +/// Migration to initialize storage versions for pallets added after genesis. +/// +/// This is now done automatically (see ), +/// but some pallets had made it in and had storage set in them for this parachain before it was +/// merged. +pub struct InitStorageVersions; + +impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { + fn on_runtime_upgrade() -> Weight { + use frame_support::traits::{GetStorageVersion, StorageVersion}; + + let mut writes = 0; + + if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { + PolkadotXcm::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Multisig::on_chain_storage_version() == StorageVersion::new(0) { + Multisig::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Assets::on_chain_storage_version() == StorageVersion::new(0) { + Assets::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Uniques::on_chain_storage_version() == StorageVersion::new(0) { + Uniques::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Nfts::on_chain_storage_version() == StorageVersion::new(0) { + Nfts::current_storage_version().put::(); + writes.saturating_inc(); + } + + if ForeignAssets::on_chain_storage_version() == StorageVersion::new(0) { + ForeignAssets::current_storage_version().put::(); + writes.saturating_inc(); + } + + if PoolAssets::on_chain_storage_version() == StorageVersion::new(0) { + PoolAssets::current_storage_version().put::(); + writes.saturating_inc(); + } + + ::DbWeight::get().reads_writes(7, writes) + } +} /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -1073,11 +1047,9 @@ mod benches { [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] - [pallet_xcm_bridge_hub_router, ToWococo] [pallet_xcm_bridge_hub_router, ToWestend] - [pallet_xcm_bridge_hub_router, ToRococo] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1181,18 +1153,19 @@ impl_runtime_apis! { impl pallet_asset_conversion::AssetConversionApi< Block, Balance, - u128, - Box, + MultiLocation, > for Runtime { - fn quote_price_exact_tokens_for_tokens(asset1: Box, asset2: Box, amount: u128, include_fee: bool) -> Option { + fn quote_price_exact_tokens_for_tokens(asset1: MultiLocation, asset2: MultiLocation, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) } - fn quote_price_tokens_for_exact_tokens(asset1: Box, asset2: Box, amount: u128, include_fee: bool) -> Option { + + fn quote_price_tokens_for_exact_tokens(asset1: MultiLocation, asset2: MultiLocation, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) } - fn get_reserves(asset1: Box, asset2: Box) -> Option<(Balance, Balance)> { - AssetConversion::get_reserves(&asset1, &asset2).ok() + + fn get_reserves(asset1: MultiLocation, asset2: MultiLocation) -> Option<(Balance, Balance)> { + AssetConversion::get_reserves(asset1, asset2).ok() } } @@ -1315,6 +1288,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; // This is defined once again in dispatch_benchmark, because list_benchmarks! @@ -1331,9 +1305,7 @@ impl_runtime_apis! { type Foreign = pallet_assets::Pallet::; type Pool = pallet_assets::Pallet::; - type ToWococo = XcmBridgeHubRouterBench; type ToWestend = XcmBridgeHubRouterBench; - type ToRococo = XcmBridgeHubRouterBench; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -1368,44 +1340,114 @@ impl_runtime_apis! { Config as XcmBridgeHubRouterConfig, }; - impl XcmBridgeHubRouterConfig for Runtime { - fn make_congested() { - cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( - xcm_config::bridging::SiblingBridgeHubParaId::get().into() - ); + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) } - fn ensure_bridged_target_destination() -> MultiLocation { + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( - xcm_config::bridging::SiblingBridgeHubParaId::get().into() + random_para_id.into() ); - xcm_config::bridging::to_wococo::AssetHubWococo::get() + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) } - } - impl XcmBridgeHubRouterConfig for Runtime { - fn make_congested() { - cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( - xcm_config::bridging::SiblingBridgeHubParaId::get().into() + + fn set_up_complex_asset_transfer( + ) -> Option<(MultiAssets, u32, MultiLocation, Box)> { + // Transfer to Relay some local AH asset (local-reserve-transfer) while paying + // fees using teleported native token. + // (We don't care that Relay doesn't accept incoming unknown AH local asset) + let dest = Parent.into(); + + let fee_amount = EXISTENTIAL_DEPOSIT; + let fee_asset: MultiAsset = (MultiLocation::parent(), fee_amount).into(); + + let who = frame_benchmarking::whitelisted_caller(); + // Give some multiple of the existential deposit + let balance = fee_amount + EXISTENTIAL_DEPOSIT * 1000; + let _ = >::make_free_balance_be( + &who, balance, ); - } - fn ensure_bridged_target_destination() -> MultiLocation { - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( - xcm_config::bridging::SiblingBridgeHubParaId::get().into() + // verify initial balance + assert_eq!(Balances::free_balance(&who), balance); + + // set up local asset + let asset_amount = 10u128; + let initial_asset_amount = asset_amount * 10; + let (asset_id, _, _) = pallet_assets::benchmarking::create_default_minted_asset::< + Runtime, + pallet_assets::Instance1 + >(true, initial_asset_amount); + let asset_location = MultiLocation::new( + 0, + X2(PalletInstance(50), GeneralIndex(u32::from(asset_id).into())) ); - xcm_config::bridging::to_westend::AssetHubWestend::get() + let transfer_asset: MultiAsset = (asset_location, asset_amount).into(); + + let assets: MultiAssets = vec![fee_asset.clone(), transfer_asset].into(); + let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; + + // verify transferred successfully + let verify = Box::new(move || { + // verify native balance after transfer, decreased by transferred fee amount + // (plus transport fees) + assert!(Balances::free_balance(&who) <= balance - fee_amount); + // verify asset balance decreased by exactly transferred amount + assert_eq!( + Assets::balance(asset_id.into(), &who), + initial_asset_amount - asset_amount, + ); + }); + Some((assets, fee_index as u32, dest, verify)) } } - impl XcmBridgeHubRouterConfig for Runtime { + + impl XcmBridgeHubRouterConfig for Runtime { fn make_congested() { cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( xcm_config::bridging::SiblingBridgeHubParaId::get().into() ); } - fn ensure_bridged_target_destination() -> MultiLocation { - xcm_config::Flavor::set(&RuntimeFlavor::Wococo); + fn ensure_bridged_target_destination() -> Result { ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( xcm_config::bridging::SiblingBridgeHubParaId::get().into() ); - xcm_config::bridging::to_rococo::AssetHubRococo::get() + let bridged_asset_hub = xcm_config::bridging::to_westend::AssetHubWestend::get(); + let _ = PolkadotXcm::force_xcm_version( + RuntimeOrigin::root(), + Box::new(bridged_asset_hub), + XCM_VERSION, + ).map_err(|e| { + log::error!( + "Failed to dispatch `force_xcm_version({:?}, {:?}, {:?})`, error: {:?}", + RuntimeOrigin::root(), + bridged_asset_hub, + XCM_VERSION, + e + ); + BenchmarkError::Stop("XcmVersion was not stored!") + })?; + Ok(bridged_asset_hub) } } @@ -1424,7 +1466,7 @@ impl_runtime_apis! { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationToAccountId; type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, + xcm_config::XcmConfig, ExistentialDepositMultiAsset, xcm_config::PriceForParentDelivery, >; @@ -1464,11 +1506,11 @@ impl_runtime_apis! { MultiAsset { fun: Fungible(UNITS), id: Concrete(TokenLocation::get()) }, )); pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; - // AssetHubRococo trusts AssetHubWococo as reserve for WOCs + // AssetHubRococo trusts AssetHubWestend as reserve for WNDs pub TrustedReserve: Option<(MultiLocation, MultiAsset)> = Some( ( - xcm_config::bridging::to_wococo::AssetHubWococo::get(), - MultiAsset::from((xcm_config::bridging::to_wococo::WocLocation::get(), 1000000000000 as u128)) + xcm_config::bridging::to_westend::AssetHubWestend::get(), + MultiAsset::from((xcm_config::bridging::to_westend::WndLocation::get(), 1000000000000 as u128)) ) ); } @@ -1543,9 +1585,7 @@ impl_runtime_apis! { type Foreign = pallet_assets::Pallet::; type Pool = pallet_assets::Pallet::; - type ToWococo = XcmBridgeHubRouterBench; type ToWestend = XcmBridgeHubRouterBench; - type ToRococo = XcmBridgeHubRouterBench; let whitelist: Vec = vec![ // Block Number diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system.rs index 4f993155c19c7fc521f00f00b4841a15064d15c8..b257c3825a7e756cf5260e3b14e17f78b34d36c8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system.rs @@ -152,4 +152,31 @@ impl frame_system::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 33_027_000 picoseconds. + Weight::from_parts(33_027_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `22` + // Estimated: `1518` + // Minimum execution time: 118_101_992_000 picoseconds. + Weight::from_parts(118_101_992_000, 0) + .saturating_add(Weight::from_parts(0, 1518)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs index 0fc36d74ff053712c3e3c3395c965e593cd2bcdf..fa9e86102c619c9ff68316cae2a27a7f79fea2e6 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs @@ -15,7 +15,6 @@ // along with Cumulus. If not, see . pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; @@ -36,14 +35,11 @@ pub mod pallet_timestamp; pub mod pallet_uniques; pub mod pallet_utility; pub mod pallet_xcm; -pub mod pallet_xcm_bridge_hub_router_to_rococo; -pub mod pallet_xcm_bridge_hub_router_to_westend; -pub mod pallet_xcm_bridge_hub_router_to_wococo; +pub mod pallet_xcm_bridge_hub_router; pub mod paritydb_weights; pub mod rocksdb_weights; pub mod xcm; pub use block_weights::constants::BlockExecutionWeight; pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs index 4514fbfa8763ad40a65ad604eadde797c6f6bf9b..0486932d1d6e44a7fe4a1c01640d6e3329577a2c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs @@ -17,25 +17,23 @@ //! Autogenerated weights for `pallet_asset_conversion` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-30, STEPS: `20`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot-parachain +// ./target/debug/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 +// --chain=asset-hub-rococo-dev +// --steps=20 +// --repeat=2 +// --pallet=pallet-asset-conversion // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/cumulus/.git/.artifacts/bench.json -// --pallet=pallet_asset_conversion -// --chain=asset-hub-rococo-dev -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,9 +48,7 @@ pub struct WeightInfo(PhantomData); impl pallet_asset_conversion::WeightInfo for WeightInfo { /// Storage: `AssetConversion::Pools` (r:1 w:1) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x76a2c49709deec21d9c05f96c1f47351` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x76a2c49709deec21d9c05f96c1f47351` (r:1 w:0) - /// Storage: `System::Account` (r:2 w:1) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ForeignAssets::Account` (r:1 w:1) /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) @@ -66,22 +62,22 @@ impl pallet_asset_conversion::WeightInfo for WeightInfo /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: - // Measured: `480` - // Estimated: `6196` - // Minimum execution time: 88_484_000 picoseconds. - Weight::from_parts(92_964_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `408` + // Estimated: `4689` + // Minimum execution time: 906_000_000 picoseconds. + Weight::from_parts(945_000_000, 0) + .saturating_add(Weight::from_parts(0, 4689)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(7)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ForeignAssets::Asset` (r:1 w:1) /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) /// Storage: `ForeignAssets::Account` (r:2 w:2) /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Account` (r:2 w:2) @@ -90,34 +86,32 @@ impl pallet_asset_conversion::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `1117` // Estimated: `7404` - // Minimum execution time: 153_015_000 picoseconds. - Weight::from_parts(157_018_000, 0) + // Minimum execution time: 1_609_000_000 picoseconds. + Weight::from_parts(1_631_000_000, 0) .saturating_add(Weight::from_parts(0, 7404)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(7)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ForeignAssets::Asset` (r:1 w:1) /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) /// Storage: `ForeignAssets::Account` (r:2 w:2) /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x2433d831722b1f4aeb1666953f1c0e77` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x2433d831722b1f4aeb1666953f1c0e77` (r:1 w:0) /// Storage: `PoolAssets::Account` (r:1 w:1) /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn remove_liquidity() -> Weight { // Proof Size summary in bytes: // Measured: `1106` // Estimated: `7404` - // Minimum execution time: 141_726_000 picoseconds. - Weight::from_parts(147_865_000, 0) + // Minimum execution time: 1_480_000_000 picoseconds. + Weight::from_parts(1_506_000_000, 0) .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `ForeignAssets::Asset` (r:2 w:2) @@ -126,15 +120,19 @@ impl pallet_asset_conversion::WeightInfo for WeightInfo /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn swap_exact_tokens_for_tokens() -> Weight { + /// The range of component `n` is `[2, 3]`. + fn swap_exact_tokens_for_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1148` - // Estimated: `13818` - // Minimum execution time: 168_619_000 picoseconds. - Weight::from_parts(174_283_000, 0) - .saturating_add(Weight::from_parts(0, 13818)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(8)) + // Measured: `0 + n * (557 ±0)` + // Estimated: `7404 + n * (393 ±73)` + // Minimum execution time: 933_000_000 picoseconds. + Weight::from_parts(950_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + // Standard Error: 18_792_550 + .saturating_add(Weight::from_parts(46_683_673, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 393).saturating_mul(n.into())) } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -142,14 +140,18 @@ impl pallet_asset_conversion::WeightInfo for WeightInfo /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) /// Storage: `ForeignAssets::Account` (r:4 w:4) /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn swap_tokens_for_exact_tokens() -> Weight { + /// The range of component `n` is `[2, 3]`. + fn swap_tokens_for_exact_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1148` - // Estimated: `13818` - // Minimum execution time: 171_565_000 picoseconds. - Weight::from_parts(173_702_000, 0) - .saturating_add(Weight::from_parts(0, 13818)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(8)) + // Measured: `0 + n * (557 ±0)` + // Estimated: `7404 + n * (393 ±180)` + // Minimum execution time: 936_000_000 picoseconds. + Weight::from_parts(954_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + // Standard Error: 15_942_881 + .saturating_add(Weight::from_parts(39_755_102, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 393).saturating_mul(n.into())) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs index d98abbbc2d3db26bb757597f9e859752cfa8ed88..aeda7bbbb6a7e47d8d1e1f403c7419439f7f5953 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs @@ -124,7 +124,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -178,6 +178,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs index afe85fdaf28820f52c7a134e66c6884d96dd6aef..f8820bbb58cb24afe1afe034e131414368089444 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,37 +64,95 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 24_498_000 picoseconds. - Weight::from_parts(25_385_000, 0) + // Minimum execution time: 25_003_000 picoseconds. + Weight::from_parts(25_800_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `39` - // Estimated: `3504` - // Minimum execution time: 19_746_000 picoseconds. - Weight::from_parts(20_535_000, 0) - .saturating_add(Weight::from_parts(0, 3504)) - .saturating_add(T::DbWeight::get().reads(2)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 88_832_000 picoseconds. + Weight::from_parts(90_491_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `39` - // Estimated: `3504` - // Minimum execution time: 15_059_000 picoseconds. - Weight::from_parts(15_386_000, 0) - .saturating_add(Weight::from_parts(0, 3504)) - .saturating_add(T::DbWeight::get().reads(2)) + // Measured: `400` + // Estimated: `6196` + // Minimum execution time: 138_911_000 picoseconds. + Weight::from_parts(142_483_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `496` + // Estimated: `6208` + // Minimum execution time: 146_932_000 picoseconds. + Weight::from_parts(153_200_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().writes(7)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -112,8 +170,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_108_000 picoseconds. - Weight::from_parts(7_458_000, 0) + // Minimum execution time: 7_081_000 picoseconds. + Weight::from_parts(7_397_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -123,8 +181,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_205_000 picoseconds. - Weight::from_parts(2_360_000, 0) + // Minimum execution time: 2_007_000 picoseconds. + Weight::from_parts(2_183_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -150,8 +208,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 29_099_000 picoseconds. - Weight::from_parts(29_580_000, 0) + // Minimum execution time: 28_790_000 picoseconds. + Weight::from_parts(29_767_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -176,8 +234,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 31_161_000 picoseconds. - Weight::from_parts(31_933_000, 0) + // Minimum execution time: 30_951_000 picoseconds. + Weight::from_parts(31_804_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -188,8 +246,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_158_000 picoseconds. - Weight::from_parts(2_316_000, 0) + // Minimum execution time: 2_164_000 picoseconds. + Weight::from_parts(2_311_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -199,8 +257,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `162` // Estimated: `11052` - // Minimum execution time: 16_934_000 picoseconds. - Weight::from_parts(17_655_000, 0) + // Minimum execution time: 16_906_000 picoseconds. + Weight::from_parts(17_612_000, 0) .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -211,8 +269,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `166` // Estimated: `11056` - // Minimum execution time: 17_658_000 picoseconds. - Weight::from_parts(17_973_000, 0) + // Minimum execution time: 17_443_000 picoseconds. + Weight::from_parts(18_032_000, 0) .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -223,8 +281,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `13538` - // Minimum execution time: 18_673_000 picoseconds. - Weight::from_parts(19_027_000, 0) + // Minimum execution time: 18_992_000 picoseconds. + Weight::from_parts(19_464_000, 0) .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } @@ -246,8 +304,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 27_171_000 picoseconds. - Weight::from_parts(27_802_000, 0) + // Minimum execution time: 28_011_000 picoseconds. + Weight::from_parts(28_716_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -258,8 +316,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `8621` - // Minimum execution time: 9_423_000 picoseconds. - Weight::from_parts(9_636_000, 0) + // Minimum execution time: 9_533_000 picoseconds. + Weight::from_parts(9_856_000, 0) .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -269,8 +327,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `11063` - // Minimum execution time: 17_442_000 picoseconds. - Weight::from_parts(17_941_000, 0) + // Minimum execution time: 17_628_000 picoseconds. + Weight::from_parts(18_146_000, 0) .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -293,8 +351,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `215` // Estimated: `11105` - // Minimum execution time: 34_340_000 picoseconds. - Weight::from_parts(34_934_000, 0) + // Minimum execution time: 34_877_000 picoseconds. + Weight::from_parts(35_607_000, 0) .saturating_add(Weight::from_parts(0, 11105)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -307,8 +365,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 5_496_000 picoseconds. - Weight::from_parts(5_652_000, 0) + // Minimum execution time: 5_370_000 picoseconds. + Weight::from_parts(5_616_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -319,8 +377,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 26_140_000 picoseconds. - Weight::from_parts(26_824_000, 0) + // Minimum execution time: 26_820_000 picoseconds. + Weight::from_parts(27_143_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_westend.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs similarity index 72% rename from cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_westend.rs rename to cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs index 8c344b44f78084ca2a37167669ba392aaca6c72b..775bc3bdb80f54a8db97d1c1fdbf5a837fdb95b1 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_westend.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -48,38 +48,34 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm_bridge_hub_router`. pub struct WeightInfo(PhantomData); impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn on_initialize_when_non_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `193` - // Estimated: `3658` - // Minimum execution time: 8_528_000 picoseconds. - Weight::from_parts(8_886_000, 0) - .saturating_add(Weight::from_parts(0, 3658)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `154` + // Estimated: `1639` + // Minimum execution time: 7_853_000 picoseconds. + Weight::from_parts(8_443_000, 0) + .saturating_add(Weight::from_parts(0, 1639)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_when_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `183` - // Estimated: `3648` - // Minimum execution time: 5_170_000 picoseconds. - Weight::from_parts(5_433_000, 0) - .saturating_add(Weight::from_parts(0, 3648)) - .saturating_add(T::DbWeight::get().reads(3)) + // Measured: `144` + // Estimated: `1629` + // Minimum execution time: 4_333_000 picoseconds. + Weight::from_parts(4_501_000, 0) + .saturating_add(Weight::from_parts(0, 1629)) + .saturating_add(T::DbWeight::get().reads(2)) } /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) @@ -87,24 +83,24 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `150` // Estimated: `1502` - // Minimum execution time: 10_283_000 picoseconds. - Weight::from_parts(10_762_000, 0) + // Minimum execution time: 10_167_000 picoseconds. + Weight::from_parts(10_667_000, 0) .saturating_add(Weight::from_parts(0, 1502)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + /// Storage: `PolkadotXcm::SupportedVersion` (r:2 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) /// Storage: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) /// Proof: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) @@ -113,18 +109,18 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send_message() -> Weight { // Proof Size summary in bytes: - // Measured: `387` - // Estimated: `3852` - // Minimum execution time: 52_040_000 picoseconds. - Weight::from_parts(53_500_000, 0) - .saturating_add(Weight::from_parts(0, 3852)) - .saturating_add(T::DbWeight::get().reads(11)) + // Measured: `448` + // Estimated: `6388` + // Minimum execution time: 60_584_000 picoseconds. + Weight::from_parts(62_467_000, 0) + .saturating_add(Weight::from_parts(0, 6388)) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_rococo.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_rococo.rs deleted file mode 100644 index ff00ace25b8b2131b50cc4bb0b1a66d40c12ebfd..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_rococo.rs +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm_bridge_hub_router` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_bridge_hub_router -// --chain=asset-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm_bridge_hub_router`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn on_initialize_when_non_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `265` - // Estimated: `3730` - // Minimum execution time: 9_084_000 picoseconds. - Weight::from_parts(9_441_000, 0) - .saturating_add(Weight::from_parts(0, 3730)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn on_initialize_when_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `202` - // Estimated: `3667` - // Minimum execution time: 5_971_000 picoseconds. - Weight::from_parts(6_260_000, 0) - .saturating_add(Weight::from_parts(0, 3667)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn report_bridge_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `117` - // Estimated: `1502` - // Minimum execution time: 10_231_000 picoseconds. - Weight::from_parts(10_861_000, 0) - .saturating_add(Weight::from_parts(0, 1502)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) - /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn send_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `478` - // Estimated: `3943` - // Minimum execution time: 53_966_000 picoseconds. - Weight::from_parts(55_224_000, 0) - .saturating_add(Weight::from_parts(0, 3943)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_wococo.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_wococo.rs deleted file mode 100644 index ca371f1e6ce4158c71f58a10ebe12685c73d8968..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_wococo.rs +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm_bridge_hub_router` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_bridge_hub_router -// --chain=asset-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm_bridge_hub_router`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ToWococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToWococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn on_initialize_when_non_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `231` - // Estimated: `3696` - // Minimum execution time: 9_115_000 picoseconds. - Weight::from_parts(9_522_000, 0) - .saturating_add(Weight::from_parts(0, 3696)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn on_initialize_when_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `183` - // Estimated: `3648` - // Minimum execution time: 5_207_000 picoseconds. - Weight::from_parts(5_534_000, 0) - .saturating_add(Weight::from_parts(0, 3648)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `ToWococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToWococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn report_bridge_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `83` - // Estimated: `1502` - // Minimum execution time: 10_437_000 picoseconds. - Weight::from_parts(10_956_000, 0) - .saturating_add(Weight::from_parts(0, 1502)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) - /// Storage: `ToWococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToWococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn send_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `425` - // Estimated: `3890` - // Minimum execution time: 52_176_000 picoseconds. - Weight::from_parts(54_067_000, 0) - .saturating_add(Weight::from_parts(0, 3890)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index fe5123a427c71f4fc8aa59b67fb0f3556c537526..7fab35842509deceba14e89e5bbf6bebe2240528 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 20_940_000 picoseconds. - Weight::from_parts(21_453_000, 3593) + // Minimum execution time: 21_643_000 picoseconds. + Weight::from_parts(22_410_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,15 +65,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 44_310_000 picoseconds. - Weight::from_parts(44_948_000, 6196) + // Minimum execution time: 43_758_000 picoseconds. + Weight::from_parts(44_654_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } // Storage: `System::Account` (r:3 w:3) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -92,25 +90,21 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `8799` - // Minimum execution time: 87_226_000 picoseconds. - Weight::from_parts(89_399_000, 8799) - .saturating_add(T::DbWeight::get().reads(11)) + // Minimum execution time: 87_978_000 picoseconds. + Weight::from_parts(88_517_000, 8799) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) pub fn reserve_asset_deposited() -> Weight { // Proof Size summary in bytes: - // Measured: `39` - // Estimated: `3504` - // Minimum execution time: 7_320_000 picoseconds. - Weight::from_parts(7_453_000, 3504) - .saturating_add(T::DbWeight::get().reads(2)) + // Measured: `0` + // Estimated: `1489` + // Minimum execution time: 6_883_000 picoseconds. + Weight::from_parts(6_979_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -131,17 +125,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 183_539_000 picoseconds. - Weight::from_parts(190_968_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 198_882_000 picoseconds. + Weight::from_parts(199_930_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn receive_teleported_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_068_000 picoseconds. - Weight::from_parts(3_228_000, 0) + // Minimum execution time: 3_343_000 picoseconds. + Weight::from_parts(3_487_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -149,15 +143,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 18_788_000 picoseconds. - Weight::from_parts(19_240_000, 3593) + // Minimum execution time: 19_399_000 picoseconds. + Weight::from_parts(19_659_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: `System::Account` (r:2 w:2) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -176,13 +168,11 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6196` - // Minimum execution time: 58_577_000 picoseconds. - Weight::from_parts(59_729_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 59_017_000 picoseconds. + Weight::from_parts(60_543_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -203,9 +193,9 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 45_804_000 picoseconds. - Weight::from_parts(46_702_000, 3610) - .saturating_add(T::DbWeight::get().reads(9)) + // Minimum execution time: 45_409_000 picoseconds. + Weight::from_parts(47_041_000, 3610) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index e2fe122a12d258fad435a224e12e9bc529556234..4454494badcbfe9b4f429312e24b63786b83ef75 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -48,8 +48,6 @@ use sp_std::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); impl WeightInfo { - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -70,17 +68,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 415_688_000 picoseconds. - Weight::from_parts(433_876_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 440_298_000 picoseconds. + Weight::from_parts(446_508_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_209_000 picoseconds. - Weight::from_parts(3_465_000, 0) + // Minimum execution time: 3_313_000 picoseconds. + Weight::from_parts(3_422_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -88,61 +86,59 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3568` - // Minimum execution time: 7_940_000 picoseconds. - Weight::from_parts(8_208_000, 3568) + // Minimum execution time: 9_691_000 picoseconds. + Weight::from_parts(9_948_000, 3568) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_336_000 picoseconds. - Weight::from_parts(9_733_000, 0) + // Minimum execution time: 10_384_000 picoseconds. + Weight::from_parts(11_085_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_368_000 picoseconds. - Weight::from_parts(3_700_000, 0) + // Minimum execution time: 3_438_000 picoseconds. + Weight::from_parts(3_577_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(2_034_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_243_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_870_000 picoseconds. - Weight::from_parts(1_972_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_207_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_890_000 picoseconds. - Weight::from_parts(1_962_000, 0) + // Minimum execution time: 2_105_000 picoseconds. + Weight::from_parts(2_193_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_660_000 picoseconds. - Weight::from_parts(2_744_000, 0) + // Minimum execution time: 2_999_000 picoseconds. + Weight::from_parts(3_056_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_845_000 picoseconds. - Weight::from_parts(1_945_000, 0) + // Minimum execution time: 2_091_000 picoseconds. + Weight::from_parts(2_176_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -163,9 +159,9 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 54_283_000 picoseconds. - Weight::from_parts(54_969_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 55_728_000 picoseconds. + Weight::from_parts(56_704_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) @@ -174,8 +170,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 11_850_000 picoseconds. - Weight::from_parts(12_328_000, 3625) + // Minimum execution time: 12_839_000 picoseconds. + Weight::from_parts(13_457_000, 3625) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -183,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_891_000 picoseconds. - Weight::from_parts(1_950_000, 0) + // Minimum execution time: 2_116_000 picoseconds. + Weight::from_parts(2_219_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -204,8 +200,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 23_644_000 picoseconds. - Weight::from_parts(24_296_000, 3610) + // Minimum execution time: 24_891_000 picoseconds. + Weight::from_parts(25_583_000, 3610) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -215,47 +211,45 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_719_000 picoseconds. - Weight::from_parts(3_896_000, 0) + // Minimum execution time: 3_968_000 picoseconds. + Weight::from_parts(4_122_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 125_710_000 picoseconds. - Weight::from_parts(132_434_000, 0) + // Minimum execution time: 136_220_000 picoseconds. + Weight::from_parts(137_194_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_650_000 picoseconds. - Weight::from_parts(12_277_000, 0) + // Minimum execution time: 12_343_000 picoseconds. + Weight::from_parts(12_635_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_978_000 picoseconds. - Weight::from_parts(2_070_000, 0) + // Minimum execution time: 2_237_000 picoseconds. + Weight::from_parts(2_315_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_899_000 picoseconds. - Weight::from_parts(2_002_000, 0) + // Minimum execution time: 2_094_000 picoseconds. + Weight::from_parts(2_231_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_133_000 picoseconds. - Weight::from_parts(2_194_000, 0) + // Minimum execution time: 2_379_000 picoseconds. + Weight::from_parts(2_455_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -276,20 +270,18 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 58_644_000 picoseconds. - Weight::from_parts(60_614_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 60_734_000 picoseconds. + Weight::from_parts(61_964_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_185_000 picoseconds. - Weight::from_parts(5_366_000, 0) + // Minimum execution time: 5_500_000 picoseconds. + Weight::from_parts(5_720_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -310,56 +302,54 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 54_443_000 picoseconds. - Weight::from_parts(55_873_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 55_767_000 picoseconds. + Weight::from_parts(56_790_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_909_000 picoseconds. - Weight::from_parts(2_011_000, 0) + // Minimum execution time: 2_201_000 picoseconds. + Weight::from_parts(2_291_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_814_000 picoseconds. - Weight::from_parts(1_956_000, 0) + // Minimum execution time: 2_164_000 picoseconds. + Weight::from_parts(2_241_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_875_000 picoseconds. - Weight::from_parts(2_003_000, 0) + // Minimum execution time: 2_127_000 picoseconds. + Weight::from_parts(2_236_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) pub fn universal_origin() -> Weight { // Proof Size summary in bytes: - // Measured: `39` - // Estimated: `3504` - // Minimum execution time: 7_376_000 picoseconds. - Weight::from_parts(7_620_000, 3504) - .saturating_add(T::DbWeight::get().reads(2)) + // Measured: `0` + // Estimated: `1489` + // Minimum execution time: 4_275_000 picoseconds. + Weight::from_parts(4_381_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_863_000 picoseconds. - Weight::from_parts(1_964_000, 0) + // Minimum execution time: 2_132_000 picoseconds. + Weight::from_parts(2_216_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_956_000 picoseconds. - Weight::from_parts(2_057_000, 0) + // Minimum execution time: 2_265_000 picoseconds. + Weight::from_parts(2_332_000, 0) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 9b980e3026e026049daf01d110fc6593bbb1b7d2..f054919683cd5adc69129e45a97c40c761c9abbc 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -16,59 +16,62 @@ use super::{ AccountId, AllPalletsWithSystem, Assets, Authorship, Balance, Balances, BaseDeliveryFee, FeeAssetId, ForeignAssets, ForeignAssetsInstance, ParachainInfo, ParachainSystem, PolkadotXcm, - PoolAssets, Runtime, RuntimeCall, RuntimeEvent, RuntimeFlavor, RuntimeOrigin, - ToRococoXcmRouter, ToWestendXcmRouter, ToWococoXcmRouter, TransactionByteFee, - TrustBackedAssetsInstance, WeightToFee, XcmpQueue, + PoolAssets, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, ToWestendXcmRouter, + TransactionByteFee, TrustBackedAssetsInstance, WeightToFee, XcmpQueue, }; use assets_common::{ local_and_foreign_assets::MatchesLocalAndForeignAssetsMultiLocation, +<<<<<<< HEAD matching::IsSiblingParachainAsset, +======= + matching::{FromNetwork, FromSiblingParachain, IsForeignConcreteAsset}, +>>>>>>> b62df695923c6b64c42c3081692194375b91f7b4 }; use frame_support::{ match_types, parameter_types, - traits::{ConstU32, Contains, Equals, Everything, Get, Nothing, PalletInfoAccess}, + traits::{ConstU32, Contains, Equals, Everything, Nothing, PalletInfoAccess}, }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; use parachains_common::{ impls::ToStakingPot, xcm_config::{ - AssetFeeAsExistentialDepositMultiplier, ConcreteAssetFromSystem, - RelayOrOtherSystemParachains, + AllSiblingSystemParachains, AssetFeeAsExistentialDepositMultiplier, + ConcreteAssetFromSystem, ParentRelayOrSiblingParachains, RelayOrOtherSystemParachains, }, TREASURY_PALLET_ID, }; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; -use rococo_runtime_constants::system_parachain; +use snowbridge_rococo_common::EthereumNetwork; +use snowbridge_router_primitives::inbound::GlobalConsensusEthereumConvertsFor; use sp_runtime::traits::{AccountIdConversion, ConvertInto}; use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, - DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, - EnsureXcmOrigin, FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, - IsConcrete, LocalMint, NetworkExportTableItem, NoChecking, ParentAsSuperuser, ParentIsPreset, - RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, StartsWith, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, + DenyThenTry, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FungiblesAdapter, + GlobalConsensusParachainConvertsFor, HashedDescription, IsConcrete, LocalMint, + NetworkExportTableItem, NoChecking, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignPaidRemoteExporter, SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; -#[cfg(feature = "runtime-benchmarks")] -use cumulus_primitives_core::ParaId; - parameter_types! { - pub storage Flavor: RuntimeFlavor = RuntimeFlavor::default(); pub const TokenLocation: MultiLocation = MultiLocation::parent(); + pub const RelayNetwork: NetworkId = NetworkId::Rococo; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())); pub UniversalLocationNetworkId: NetworkId = UniversalLocation::get().global_consensus().unwrap(); - pub TrustBackedAssetsPalletLocation: MultiLocation = - PalletInstance(::index() as u8).into(); + pub AssetsPalletIndex: u32 = ::index() as u32; + pub TrustBackedAssetsPalletLocation: MultiLocation = PalletInstance(AssetsPalletIndex::get() as u8).into(); pub ForeignAssetsPalletLocation: MultiLocation = PalletInstance(::index() as u8).into(); pub PoolAssetsPalletLocation: MultiLocation = @@ -79,22 +82,6 @@ parameter_types! { pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); } -/// Adapter for resolving `NetworkId` based on `pub storage Flavor: RuntimeFlavor`. -pub struct RelayNetwork; -impl Get> for RelayNetwork { - fn get() -> Option { - Some(Self::get()) - } -} -impl Get for RelayNetwork { - fn get() -> NetworkId { - match Flavor::get() { - RuntimeFlavor::Rococo => NetworkId::Rococo, - RuntimeFlavor::Wococo => NetworkId::Wococo, - } - } -} - /// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used /// when determining ownership of accounts for asset transacting and when attempting to use XCM /// `Transact` in order to determine the dispatch Origin. @@ -110,9 +97,13 @@ pub type LocationToAccountId = ( // Different global consensus parachain sovereign account. // (Used for over-bridge transfers and reserve processing) GlobalConsensusParachainConvertsFor, + // Ethereum contract sovereign account. + // (Used to get convert ethereum contract locations to sovereign account) + GlobalConsensusEthereumConvertsFor, ); /// Means for transacting the native currency on this chain. +#[allow(deprecated)] pub type CurrencyTransactor = CurrencyAdapter< // Use this currency: Balances, @@ -126,7 +117,7 @@ pub type CurrencyTransactor = CurrencyAdapter< (), >; -/// `AssetId`/`Balance` converter for `PoolAssets`. +/// `AssetId`/`Balance` converter for `TrustBackedAssets`. pub type TrustBackedAssetsConvertedConcreteId = assets_common::TrustBackedAssetsConvertedConcreteId; @@ -147,7 +138,7 @@ pub type FungiblesTransactor = FungiblesAdapter< CheckingAccount, >; -/// `AssetId/Balance` converter for `TrustBackedAssets` +/// `AssetId`/`Balance` converter for `ForeignAssets`. pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConvertedConcreteId< ( // Ignore `TrustBackedAssets` explicitly @@ -256,10 +247,6 @@ match_types! { MultiLocation { parents: 1, interior: Here } | MultiLocation { parents: 1, interior: X1(Plurality { .. }) } }; - pub type ParentOrSiblings: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(_) } - }; } /// A call filter for the XCM Transact instruction. This is a temporary measure until we properly @@ -282,35 +269,32 @@ impl Contains for SafeCallFilter { // Allow to change dedicated storage items (called by governance-like) match call { RuntimeCall::System(frame_system::Call::set_storage { items }) - if items.iter().all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterByteFee::key())) || - items - .iter() - .all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterBaseFee::key())) || - items.iter().all(|(k, _)| k.eq(&Flavor::key())) => + if items.iter().all(|(k, _)| { + k.eq(&bridging::XcmBridgeHubRouterByteFee::key()) | + k.eq(&bridging::XcmBridgeHubRouterBaseFee::key()) | + k.eq(&bridging::to_ethereum::BridgeHubEthereumBaseFee::key()) + }) => return true, _ => (), }; matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::authorize_upgrade { .. } | + frame_system::Call::authorize_upgrade_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | + RuntimeCall::CollatorSelection(..) | + RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::XcmpQueue(..) | RuntimeCall::MessageQueue(..) | RuntimeCall::Assets( @@ -475,12 +459,8 @@ impl Contains for SafeCallFilter { pallet_uniques::Call::set_collection_max_supply { .. } | pallet_uniques::Call::set_price { .. } | pallet_uniques::Call::buy_item { .. } - ) | RuntimeCall::ToWococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } ) | RuntimeCall::ToWestendXcmRouter( pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } - ) | RuntimeCall::ToRococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } ) ) } @@ -507,7 +487,7 @@ pub type Barrier = TrailingSetTopicAsId< Equals, )>, // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom, + AllowSubscriptionsFrom, ), UniversalLocation, ConstU32<8>, @@ -533,25 +513,13 @@ pub type ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger = ForeignAssetsInstance, >; -match_types! { - pub type SystemParachains: impl Contains = { - MultiLocation { - parents: 1, - interior: X1(Parachain( - system_parachain::ASSET_HUB_ID | - system_parachain::BRIDGE_HUB_ID | - system_parachain::CONTRACTS_ID | - system_parachain::ENCOINTER_ID - )), - } - }; -} - /// Locations that will not be charged fees in the executor, /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. -pub type WaivedLocations = - (RelayOrOtherSystemParachains, Equals); +pub type WaivedLocations = ( + RelayOrOtherSystemParachains, + Equals, +); /// Cases where a remote origin is accepted as trusted Teleporter for a given asset: /// @@ -573,9 +541,8 @@ impl xcm_executor::Config for XcmConfig { // held). Asset Hub may _act_ as a reserve location for ROC and assets created // under `pallet-assets`. Users must use teleport where allowed (e.g. ROC with the Relay Chain). type IsReserve = ( - bridging::to_wococo::IsTrustedBridgedReserveLocationForConcreteAsset, bridging::to_westend::IsTrustedBridgedReserveLocationForConcreteAsset, - bridging::to_rococo::IsTrustedBridgedReserveLocationForConcreteAsset, + bridging::to_ethereum::IsTrustedBridgedReserveLocationForForeignAsset, ); type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; @@ -627,11 +594,8 @@ impl xcm_executor::Config for XcmConfig { XcmFeeToAccount, >; type MessageExporter = (); - type UniversalAliases = ( - bridging::to_wococo::UniversalAliases, - bridging::to_rococo::UniversalAliases, - bridging::to_westend::UniversalAliases, - ); + type UniversalAliases = + (bridging::to_westend::UniversalAliases, bridging::to_ethereum::UniversalAliases); type CallDispatcher = WithOriginFilter; type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; @@ -656,22 +620,14 @@ type LocalXcmRouter = ( /// queues. pub type XcmRouter = WithUniqueTopic<( LocalXcmRouter, - // Router which wraps and sends xcm to BridgeHub to be delivered to the Wococo - // GlobalConsensus - ToWococoXcmRouter, // Router which wraps and sends xcm to BridgeHub to be delivered to the Westend // GlobalConsensus ToWestendXcmRouter, - // Router which wraps and sends xcm to BridgeHub to be delivered to the Rococo + // Router which wraps and sends xcm to BridgeHub to be delivered to the Ethereum // GlobalConsensus - ToRococoXcmRouter, + SovereignPaidRemoteExporter, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -701,8 +657,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); @@ -717,6 +671,7 @@ pub type ForeignCreatorsSovereignAccountOf = ( SiblingParachainConvertsVia, AccountId32Aliases, ParentIsPreset, + GlobalConsensusEthereumConvertsFor, ); /// Simple conversion of `u32` into an `AssetId` for use in benchmarking. @@ -728,40 +683,13 @@ impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { } } -#[cfg(feature = "runtime-benchmarks")] -pub struct BenchmarkMultiLocationConverter { - _phantom: sp_std::marker::PhantomData, -} - -#[cfg(feature = "runtime-benchmarks")] -impl - pallet_asset_conversion::BenchmarkHelper> - for BenchmarkMultiLocationConverter -where - SelfParaId: Get, -{ - fn asset_id(asset_id: u32) -> MultiLocation { - MultiLocation { - parents: 1, - interior: X3( - Parachain(SelfParaId::get().into()), - PalletInstance(::index() as u8), - GeneralIndex(asset_id.into()), - ), - } - } - fn multiasset_id(asset_id: u32) -> sp_std::boxed::Box { - sp_std::boxed::Box::new(Self::asset_id(asset_id)) - } -} - /// All configuration related to bridging pub mod bridging { use super::*; use assets_common::matching; use sp_std::collections::btree_set::BTreeSet; - // common/shared parameters for Wococo/Rococo + // common/shared parameters parameter_types! { /// Base price of every byte of the Rococo -> Westend message. Can be adjusted via /// governance `set_storage` call. @@ -782,10 +710,7 @@ pub mod bridging { /// governance `set_storage` call. pub storage XcmBridgeHubRouterByteFee: Balance = TransactionByteFee::get(); - pub SiblingBridgeHubParaId: u32 = match Flavor::get() { - RuntimeFlavor::Rococo => bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - RuntimeFlavor::Wococo => bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - }; + pub SiblingBridgeHubParaId: u32 = bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID; pub SiblingBridgeHub: MultiLocation = MultiLocation::new(1, X1(Parachain(SiblingBridgeHubParaId::get()))); /// Router expects payment with this `AssetId`. /// (`AssetId` has to be aligned with `BridgeTable`) @@ -793,89 +718,18 @@ pub mod bridging { pub BridgeTable: sp_std::vec::Vec = sp_std::vec::Vec::new().into_iter() - .chain(to_wococo::BridgeTable::get()) .chain(to_westend::BridgeTable::get()) - .chain(to_rococo::BridgeTable::get()) + .collect(); + + pub EthereumBridgeTable: sp_std::vec::Vec = + sp_std::vec::Vec::new().into_iter() + .chain(to_ethereum::BridgeTable::get()) .collect(); } pub type NetworkExportTable = xcm_builder::NetworkExportTable; - pub mod to_wococo { - use super::*; - - parameter_types! { - pub SiblingBridgeHubWithBridgeHubWococoInstance: MultiLocation = MultiLocation::new( - 1, - X2( - Parachain(SiblingBridgeHubParaId::get()), - PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WOCOCO_MESSAGES_PALLET_INDEX) - ) - ); - - pub const WococoNetwork: NetworkId = NetworkId::Wococo; - pub AssetHubWococo: MultiLocation = MultiLocation::new(2, X2(GlobalConsensus(WococoNetwork::get()), Parachain(bp_asset_hub_wococo::ASSET_HUB_WOCOCO_PARACHAIN_ID))); - pub WocLocation: MultiLocation = MultiLocation::new(2, X1(GlobalConsensus(WococoNetwork::get()))); - - pub WocFromAssetHubWococo: (MultiAssetFilter, MultiLocation) = ( - Wild(AllOf { fun: WildFungible, id: Concrete(WocLocation::get()) }), - AssetHubWococo::get() - ); - - /// Set up exporters configuration. - /// `Option` represents static "base fee" which is used for total delivery fee calculation. - pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ - NetworkExportTableItem::new( - WococoNetwork::get(), - Some(sp_std::vec![ - AssetHubWococo::get().interior.split_global().expect("invalid configuration for AssetHubWococo").1, - ]), - SiblingBridgeHub::get(), - // base delivery fee to local `BridgeHub` - Some(( - XcmBridgeHubRouterFeeAssetId::get(), - XcmBridgeHubRouterBaseFee::get(), - ).into()) - ) - ]; - - /// Universal aliases - pub UniversalAliases: BTreeSet<(MultiLocation, Junction)> = BTreeSet::from_iter( - sp_std::vec![ - (SiblingBridgeHubWithBridgeHubWococoInstance::get(), GlobalConsensus(WococoNetwork::get())) - ] - ); - } - - impl Contains<(MultiLocation, Junction)> for UniversalAliases { - fn contains(alias: &(MultiLocation, Junction)) -> bool { - UniversalAliases::get().contains(alias) - } - } - - /// Trusted reserve locations filter for `xcm_executor::Config::IsReserve`. - /// Locations from which the runtime accepts reserved assets. - pub type IsTrustedBridgedReserveLocationForConcreteAsset = - matching::IsTrustedBridgedReserveLocationForConcreteAsset< - UniversalLocation, - ( - // allow receive WOC from AssetHubWococo - xcm_builder::Case, - // and nothing else - ), - >; - - impl Contains for ToWococoXcmRouter { - fn contains(call: &RuntimeCall) -> bool { - matches!( - call, - RuntimeCall::ToWococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } - ) - ) - } - } - } + pub type EthereumNetworkExportTable = xcm_builder::NetworkExportTable; pub mod to_westend { use super::*; @@ -953,80 +807,54 @@ pub mod bridging { } } - pub mod to_rococo { + pub mod to_ethereum { use super::*; parameter_types! { - pub SiblingBridgeHubWithBridgeHubRococoInstance: MultiLocation = MultiLocation::new( + /// User fee for ERC20 token transfer back to Ethereum. + /// (initially was calculated by test `OutboundQueue::calculate_fees` - ETH/ROC 1/400 and fee_per_gas 20 GWEI = 2200698000000 + *25%) + /// Needs to be more than fee calculated from DefaultFeeConfig FeeConfigRecord in snowbridge:parachain/pallets/outbound-queue/src/lib.rs + /// Polkadot uses 10 decimals, Kusama and Rococo 12 decimals. + pub const DefaultBridgeHubEthereumBaseFee: Balance = 2_750_872_500_000; + pub storage BridgeHubEthereumBaseFee: Balance = DefaultBridgeHubEthereumBaseFee::get(); + pub SiblingBridgeHubWithEthereumInboundQueueInstance: MultiLocation = MultiLocation::new( 1, X2( Parachain(SiblingBridgeHubParaId::get()), - PalletInstance(bp_bridge_hub_wococo::WITH_BRIDGE_WOCOCO_TO_ROCOCO_MESSAGES_PALLET_INDEX) + PalletInstance(snowbridge_rococo_common::INBOUND_QUEUE_MESSAGES_PALLET_INDEX) ) ); - pub const RococoNetwork: NetworkId = NetworkId::Rococo; - pub AssetHubRococo: MultiLocation = MultiLocation::new(2, X2(GlobalConsensus(RococoNetwork::get()), Parachain(bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID))); - pub RocLocation: MultiLocation = MultiLocation::new(2, X1(GlobalConsensus(RococoNetwork::get()))); - - pub RocFromAssetHubRococo: (MultiAssetFilter, MultiLocation) = ( - Wild(AllOf { fun: WildFungible, id: Concrete(RocLocation::get()) }), - AssetHubRococo::get() - ); - /// Set up exporters configuration. /// `Option` represents static "base fee" which is used for total delivery fee calculation. pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ NetworkExportTableItem::new( - RococoNetwork::get(), - Some(sp_std::vec![ - AssetHubRococo::get().interior.split_global().expect("invalid configuration for AssetHubRococo").1, - ]), + EthereumNetwork::get(), + Some(sp_std::vec![Junctions::Here]), SiblingBridgeHub::get(), - // base delivery fee to local `BridgeHub` Some(( XcmBridgeHubRouterFeeAssetId::get(), - XcmBridgeHubRouterBaseFee::get(), + BridgeHubEthereumBaseFee::get(), ).into()) - ) + ), ]; /// Universal aliases pub UniversalAliases: BTreeSet<(MultiLocation, Junction)> = BTreeSet::from_iter( sp_std::vec![ - (SiblingBridgeHubWithBridgeHubRococoInstance::get(), GlobalConsensus(RococoNetwork::get())) + (SiblingBridgeHubWithEthereumInboundQueueInstance::get(), GlobalConsensus(EthereumNetwork::get())), ] ); } + pub type IsTrustedBridgedReserveLocationForForeignAsset = + matching::IsForeignConcreteAsset>; + impl Contains<(MultiLocation, Junction)> for UniversalAliases { fn contains(alias: &(MultiLocation, Junction)) -> bool { UniversalAliases::get().contains(alias) } } - - /// Reserve locations filter for `xcm_executor::Config::IsReserve`. - /// Locations from which the runtime accepts reserved assets. - pub type IsTrustedBridgedReserveLocationForConcreteAsset = - matching::IsTrustedBridgedReserveLocationForConcreteAsset< - UniversalLocation, - ( - // allow receive ROC from AssetHubRococo - xcm_builder::Case, - // and nothing else - ), - >; - - impl Contains for ToRococoXcmRouter { - fn contains(call: &RuntimeCall) -> bool { - matches!( - call, - RuntimeCall::ToRococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } - ) - ) - } - } } /// Benchmarks helper for bridging configuration. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index c3d3c4abbbb293093e56352e3d559e64fbe72aae..42c91cc8ea69c7c907bf9593200ebbf676c2b6f0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -28,8 +28,8 @@ pub use asset_hub_rococo_runtime::{ }, AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, RuntimeFlavor, SessionKeys, System, ToRococoXcmRouterInstance, - ToWestendXcmRouterInstance, ToWococoXcmRouterInstance, TrustBackedAssetsInstance, XcmpQueue, + RuntimeCall, RuntimeEvent, SessionKeys, System, ToWestendXcmRouterInstance, + TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{ test_cases_over_bridge::TestBridgingConfig, CollatorSessionKey, CollatorSessionKeys, ExtBuilder, @@ -529,12 +529,6 @@ asset_test_utils::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1000 ); @@ -679,18 +673,16 @@ fn limited_reserve_transfer_assets_for_native_asset_over_bridge_works( mod asset_hub_rococo_tests { use super::*; - - fn bridging_to_asset_hub_wococo() -> TestBridgingConfig { - asset_test_utils::test_cases_over_bridge::TestBridgingConfig { - bridged_network: bridging::to_wococo::WococoNetwork::get(), - local_bridge_hub_para_id: bridging::SiblingBridgeHubParaId::get(), - local_bridge_hub_location: bridging::SiblingBridgeHub::get(), - bridged_target_location: bridging::to_wococo::AssetHubWococo::get(), - } - } + use asset_hub_rococo_runtime::{PolkadotXcm, RuntimeOrigin}; fn bridging_to_asset_hub_westend() -> TestBridgingConfig { - asset_test_utils::test_cases_over_bridge::TestBridgingConfig { + let _ = PolkadotXcm::force_xcm_version( + RuntimeOrigin::root(), + Box::new(bridging::to_westend::AssetHubWestend::get()), + XCM_VERSION, + ) + .expect("version saved!"); + TestBridgingConfig { bridged_network: bridging::to_westend::WestendNetwork::get(), local_bridge_hub_para_id: bridging::SiblingBridgeHubParaId::get(), local_bridge_hub_location: bridging::SiblingBridgeHub::get(), @@ -698,13 +690,6 @@ mod asset_hub_rococo_tests { } } - #[test] - fn limited_reserve_transfer_assets_for_native_asset_to_asset_hub_wococo_works() { - limited_reserve_transfer_assets_for_native_asset_over_bridge_works( - bridging_to_asset_hub_wococo, - ) - } - #[test] fn limited_reserve_transfer_assets_for_native_asset_to_asset_hub_westend_works() { limited_reserve_transfer_assets_for_native_asset_over_bridge_works( @@ -712,31 +697,6 @@ mod asset_hub_rococo_tests { ) } - #[test] - fn receive_reserve_asset_deposited_woc_from_asset_hub_wococo_works() { - const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; - asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - LocationToAccountId, - ForeignAssetsInstance, - >( - collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), - ExistentialDeposit::get(), - AccountId::from([73; 32]), - AccountId::from(BLOCK_AUTHOR_ACCOUNT), - // receiving WOCs - (MultiLocation { parents: 2, interior: X1(GlobalConsensus(Wococo)) }, 1000000000000, 1_000_000_000), - bridging_to_asset_hub_wococo, - ( - X1(PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WOCOCO_MESSAGES_PALLET_INDEX)), - GlobalConsensus(Wococo), - X1(Parachain(1000)) - ) - ) - } - #[test] fn receive_reserve_asset_deposited_wnd_from_asset_hub_westend_works() { const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; @@ -762,58 +722,6 @@ mod asset_hub_rococo_tests { ) } - #[test] - fn report_bridge_status_from_xcm_bridge_router_for_wococo_works() { - asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - LocationToAccountId, - ToWococoXcmRouterInstance, - >( - collator_session_keys(), - bridging_to_asset_hub_wococo, - || { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_rococo::Call::ToWococoXcmRouter( - bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode() - .into(), - } - ] - .into() - }, - || { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_rococo::Call::ToWococoXcmRouter( - bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: false, - } - ) - .encode() - .into(), - } - ] - .into() - }, - ) - } - #[test] fn report_bridge_status_from_xcm_bridge_router_for_westend_works() { asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< @@ -869,22 +777,6 @@ mod asset_hub_rococo_tests { #[test] fn test_report_bridge_status_call_compatibility() { // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding - assert_eq!( - RuntimeCall::ToWococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode(), - bp_asset_hub_rococo::Call::ToWococoXcmRouter( - bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode() - ); assert_eq!( RuntimeCall::ToWestendXcmRouter( pallet_xcm_bridge_hub_router::Call::report_bridge_status { @@ -903,19 +795,6 @@ mod asset_hub_rococo_tests { ); } - #[test] - fn check_sane_weight_report_bridge_status_for_wococo() { - use pallet_xcm_bridge_hub_router::WeightInfo; - let actual = >::WeightInfo::report_bridge_status(); - let max_weight = bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(); - assert!( - actual.all_lte(max_weight), - "max_weight: {:?} should be adjusted to actual {:?}", - max_weight, - actual - ); - } - #[test] fn check_sane_weight_report_bridge_status_for_westend() { use pallet_xcm_bridge_hub_router::WeightInfo; @@ -930,165 +809,33 @@ mod asset_hub_rococo_tests { actual ); } -} - -mod asset_hub_wococo_tests { - use super::*; - - fn bridging_to_asset_hub_rococo() -> TestBridgingConfig { - TestBridgingConfig { - bridged_network: bridging::to_rococo::RococoNetwork::get(), - local_bridge_hub_para_id: bridging::SiblingBridgeHubParaId::get(), - local_bridge_hub_location: bridging::SiblingBridgeHub::get(), - bridged_target_location: bridging::to_rococo::AssetHubRococo::get(), - } - } - - pub(crate) fn set_wococo_flavor() { - let flavor_key = xcm_config::Flavor::key().to_vec(); - let flavor = RuntimeFlavor::Wococo; - - // encode `set_storage` call - let set_storage_call = RuntimeCall::System(frame_system::Call::::set_storage { - items: vec![(flavor_key, flavor.encode())], - }) - .encode(); - - // estimate - storing just 1 value - use frame_system::WeightInfo; - let require_weight_at_most = - ::SystemWeightInfo::set_storage(1); - - // execute XCM with Transact to `set_storage` as governance does - assert_ok!(RuntimeHelper::execute_as_governance(set_storage_call, require_weight_at_most) - .ensure_complete()); - - // check if stored - assert_eq!(flavor, xcm_config::Flavor::get()); - } - - fn with_wococo_flavor_bridging_to_asset_hub_rococo() -> TestBridgingConfig { - set_wococo_flavor(); - bridging_to_asset_hub_rococo() - } #[test] - fn limited_reserve_transfer_assets_for_native_asset_to_asset_hub_rococo_works() { - limited_reserve_transfer_assets_for_native_asset_over_bridge_works( - with_wococo_flavor_bridging_to_asset_hub_rococo, - ) - } - - #[test] - fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_works() { - const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; - asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - LocationToAccountId, - ForeignAssetsInstance, - >( - collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), - ExistentialDeposit::get(), - AccountId::from([73; 32]), - AccountId::from(BLOCK_AUTHOR_ACCOUNT), - // receiving ROCs - (MultiLocation { parents: 2, interior: X1(GlobalConsensus(Rococo)) }, 1000000000000, 1_000_000_000), - with_wococo_flavor_bridging_to_asset_hub_rococo, - ( - X1(PalletInstance(bp_bridge_hub_wococo::WITH_BRIDGE_WOCOCO_TO_ROCOCO_MESSAGES_PALLET_INDEX)), - GlobalConsensus(Rococo), - X1(Parachain(1000)) - ) - ) - } - - #[test] - fn report_bridge_status_from_xcm_bridge_router_works() { - asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< + fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< Runtime, AllPalletsWithoutSystem, XcmConfig, + ParachainSystem, + XcmpQueue, LocationToAccountId, - ToRococoXcmRouterInstance, >( collator_session_keys(), - with_wococo_flavor_bridging_to_asset_hub_rococo, - || { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_wococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_wococo::Call::ToRococoXcmRouter( - bp_asset_hub_wococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode() - .into(), - } - ] - .into() - }, - || { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_wococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_wococo::Call::ToRococoXcmRouter( - bp_asset_hub_wococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: false, - } - ) - .encode() - .into(), - } - ] - .into() - }, - ) - } - - #[test] - fn test_report_bridge_status_call_compatibility() { - // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding - assert_eq!( - RuntimeCall::ToRococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, } - ) - .encode(), - bp_asset_hub_wococo::Call::ToRococoXcmRouter( - bp_asset_hub_wococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, } - ) - .encode() - ) - } - - #[test] - fn check_sane_weight_report_bridge_status() { - use pallet_xcm_bridge_hub_router::WeightInfo; - let actual = >::WeightInfo::report_bridge_status(); - let max_weight = bp_asset_hub_wococo::XcmBridgeHubRouterTransactCallMaxWeight::get(); - assert!( - actual.all_lte(max_weight), - "max_weight: {:?} should be adjusted to actual {:?}", - max_weight, - actual + }), + WeightLimit::Unlimited, ); } } @@ -1118,3 +865,55 @@ fn change_xcm_bridge_hub_router_byte_fee_by_governance_works() { }, ) } + +#[test] +fn change_xcm_bridge_hub_router_base_fee_by_governance_works() { + asset_test_utils::test_cases::change_storage_constant_by_governance_works::< + Runtime, + bridging::XcmBridgeHubRouterBaseFee, + Balance, + >( + collator_session_keys(), + 1000, + Box::new(|call| RuntimeCall::System(call).encode()), + || { + ( + bridging::XcmBridgeHubRouterBaseFee::key().to_vec(), + bridging::XcmBridgeHubRouterBaseFee::get(), + ) + }, + |old_value| { + if let Some(new_value) = old_value.checked_add(1) { + new_value + } else { + old_value.checked_sub(1).unwrap() + } + }, + ) +} + +#[test] +fn change_xcm_bridge_hub_ethereum_base_fee_by_governance_works() { + asset_test_utils::test_cases::change_storage_constant_by_governance_works::< + Runtime, + bridging::to_ethereum::BridgeHubEthereumBaseFee, + Balance, + >( + collator_session_keys(), + 1000, + Box::new(|call| RuntimeCall::System(call).encode()), + || { + ( + bridging::to_ethereum::BridgeHubEthereumBaseFee::key().to_vec(), + bridging::to_ethereum::BridgeHubEthereumBaseFee::get(), + ) + }, + |old_value| { + if let Some(new_value) = old_value.checked_add(1) { + new_value + } else { + old_value.checked_sub(1).unwrap() + } + }, + ) +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 49b80b067cfbf13c30f827434e63713937ddfe95..1a1ed0465a34ee62bba6c2921df08dee27cd56f2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true description = "Westend variant of Asset Hub parachain runtime" license = "Apache-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1", optional = true } @@ -14,63 +17,62 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false} -pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false} -pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false} -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } +pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } +pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false } +pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false } +pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false } +pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # num-traits feature needed for dex integer sq root: -primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "scale-info", "num-traits"] } +primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "num-traits", "scale-info"] } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -95,10 +97,9 @@ asset-test-utils = { path = "../test-utils" } substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "assets-common/runtime-benchmarks", - "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", @@ -133,7 +134,6 @@ runtime-benchmarks = [ ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", @@ -172,7 +172,6 @@ std = [ "bp-bridge-hub-westend/std", "codec/std", "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", @@ -236,9 +235,9 @@ std = [ "xcm/std", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index f1a15265b90d435713adf487e6f011f9153c373d..e0dff0c4516e3fa3fe9dd348a1171e6db74cfba4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -27,24 +27,23 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); mod weights; pub mod xcm_config; -use crate::xcm_config::{ - LocalAndForeignAssetsMultiLocationMatcher, TrustBackedAssetsPalletLocation, -}; use assets_common::{ - local_and_foreign_assets::{LocalAndForeignAssets, MultiLocationConverter}, + local_and_foreign_assets::{LocalFromLeft, TargetFromLeft}, AssetIdForTrustBackedAssetsConvert, }; use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, ord_parameter_types, parameter_types, traits::{ - tokens::nonfungibles_v2::Inspect, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, - ConstU64, ConstU8, Equals, InstanceFilter, TransformOrigin, + fungible, fungibles, + tokens::{imbalance::ResolveAssetTo, nonfungibles_v2::Inspect}, + AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, Equals, + InstanceFilter, TransformOrigin, }, weights::{ConstantMultiplier, Weight}, BoundedVec, PalletId, @@ -54,7 +53,7 @@ use frame_system::{ EnsureRoot, EnsureSigned, EnsureSignedBy, }; use pallet_asset_conversion_tx_payment::AssetConversionAdapter; -use pallet_nfts::PalletFeatures; +use pallet_nfts::{DestroyWitness, PalletFeatures}; use pallet_xcm::EnsureXcm; pub use parachains_common as common; use parachains_common::{ @@ -69,7 +68,7 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Verify}, + traits::{AccountIdConversion, BlakeTwo256, Block as BlockT, Saturating, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Perbill, Permill, RuntimeDebug, }; @@ -80,7 +79,8 @@ use sp_version::RuntimeVersion; use xcm::opaque::v3::MultiLocation; use xcm_config::{ ForeignAssetsConvertedConcreteId, PoolAssetsConvertedConcreteId, - TrustBackedAssetsConvertedConcreteId, WestendLocation, XcmOriginToTransactDispatchOrigin, + TrustBackedAssetsConvertedConcreteId, TrustBackedAssetsPalletLocation, WestendLocation, + XcmOriginToTransactDispatchOrigin, }; #[cfg(any(feature = "std", test))] @@ -109,10 +109,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_003_000, + spec_version: 1_005_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 13, + transaction_version: 14, state_version: 0, }; @@ -148,25 +148,17 @@ parameter_types! { } // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; type BlockHashCount = BlockHashCount; type DbWeight = RocksDbWeight; type Version = Version; - type PalletInfo = PalletInfo; - type OnNewAccount = (); - type OnKilledAccount = (); type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; type SS58Prefix = SS58Prefix; @@ -270,8 +262,6 @@ impl pallet_assets::Config for Runtime { parameter_types! { pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); - pub const AllowMultiAssetPools: bool = false; - // should be non-zero if AllowMultiAssetPools is true, otherwise can be zero pub const LiquidityWithdrawalFee: Permill = Permill::from_percent(0); } @@ -305,35 +295,50 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = (); } +/// Union fungibles implementation for `Assets`` and `ForeignAssets`. +pub type LocalAndForeignAssets = fungibles::UnionOf< + Assets, + ForeignAssets, + LocalFromLeft< + AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssets, + >, + MultiLocation, + AccountId, +>; + impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type HigherPrecisionBalance = sp_core::U256; - type Currency = Balances; - type AssetBalance = Balance; - type AssetId = MultiLocation; - type Assets = LocalAndForeignAssets< - Assets, - AssetIdForTrustBackedAssetsConvert, - ForeignAssets, + type AssetKind = MultiLocation; + type Assets = fungible::UnionOf< + Balances, + LocalAndForeignAssets, + TargetFromLeft, + Self::AssetKind, + Self::AccountId, >; - type PoolAssets = PoolAssets; + type PoolId = (Self::AssetKind, Self::AssetKind); + type PoolLocator = + pallet_asset_conversion::WithFirstAsset; type PoolAssetId = u32; + type PoolAssets = PoolAssets; type PoolSetupFee = ConstU128<0>; // Asset class deposit fees are sufficient to prevent spam - type PoolSetupFeeReceiver = AssetConversionOrigin; - type LiquidityWithdrawalFee = LiquidityWithdrawalFee; // should be non-zero if AllowMultiAssetPools is true, otherwise can be zero. + type PoolSetupFeeAsset = WestendLocation; + type PoolSetupFeeTarget = ResolveAssetTo; + type LiquidityWithdrawalFee = LiquidityWithdrawalFee; type LPFee = ConstU32<3>; type PalletId = AssetConversionPalletId; - type AllowMultiAssetPools = AllowMultiAssetPools; - type MaxSwapPathLength = ConstU32<4>; - type MultiAssetId = Box; - type MultiAssetIdConverter = - MultiLocationConverter; + type MaxSwapPathLength = ConstU32<3>; type MintMinLiquidity = ConstU128<100>; type WeightInfo = weights::pallet_asset_conversion::WeightInfo; #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = - crate::xcm_config::BenchmarkMultiLocationConverter>; + type BenchmarkHelper = assets_common::benchmarks::AssetPairFactory< + WestendLocation, + parachain_info::Pallet, + xcm_config::AssetsPalletIndex, + >; } parameter_types! { @@ -665,12 +670,6 @@ parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - parameter_types! { pub const Period: u32 = 6 * HOURS; pub const Offset: u32 = 0; @@ -724,12 +723,9 @@ impl pallet_collator_selection::Config for Runtime { impl pallet_asset_conversion_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type Fungibles = LocalAndForeignAssets< - Assets, - AssetIdForTrustBackedAssetsConvert, - ForeignAssets, - >; - type OnChargeAssetTransaction = AssetConversionAdapter; + type Fungibles = LocalAndForeignAssets; + type OnChargeAssetTransaction = + AssetConversionAdapter; } parameter_types! { @@ -836,6 +832,7 @@ impl pallet_xcm_bridge_hub_router::Config for Runtime type UniversalLocation = xcm_config::UniversalLocation; type BridgedNetworkId = xcm_config::bridging::to_rococo::RococoNetwork; type Bridges = xcm_config::bridging::NetworkExportTable; + type DestinationVersion = PolkadotXcm; #[cfg(not(feature = "runtime-benchmarks"))] type BridgeHubOrigin = EnsureXcm>; @@ -888,7 +885,6 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, // Bridge utilities. ToRococoXcmRouter: pallet_xcm_bridge_hub_router::::{Pallet, Storage, Call} = 34, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 35, @@ -939,13 +935,84 @@ pub type Migrations = ( // unreleased pallet_collator_selection::migration::v1::MigrateToV1, // unreleased - migrations::NativeAssetParents0ToParents1Migration, - // unreleased pallet_multisig::migrations::v1::MigrateToV1, // unreleased InitStorageVersions, + // unreleased + DeleteUndecodableStorage, + // unreleased + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, ); +/// Asset Hub Westend has some undecodable storage, delete it. +/// See for more info. +/// +/// First we remove the bad Hold, then the bad NFT collection. +pub struct DeleteUndecodableStorage; + +impl frame_support::traits::OnRuntimeUpgrade for DeleteUndecodableStorage { + fn on_runtime_upgrade() -> Weight { + use sp_core::crypto::Ss58Codec; + + let mut writes = 0; + + // Remove Holds for account with undecodable hold + // Westend doesn't have any HoldReasons implemented yet, so it's safe to just blanket remove + // any for this account. + match AccountId::from_ss58check("5GCCJthVSwNXRpbeg44gysJUx9vzjdGdfWhioeM7gCg6VyXf") { + Ok(a) => { + log::info!("Removing holds for account with bad hold"); + pallet_balances::Holds::::remove(a); + writes.saturating_inc(); + }, + Err(_) => { + log::error!("CleanupUndecodableStorage: Somehow failed to convert valid SS58 address into an AccountId!"); + }, + }; + + // Destroy undecodable NFT item 1 + writes.saturating_inc(); + match pallet_nfts::Pallet::::do_burn(3, 1, |_| Ok(())) { + Ok(_) => { + log::info!("Destroyed undecodable NFT item 1"); + }, + Err(e) => { + log::error!("Failed to destroy undecodable NFT item: {:?}", e); + return ::DbWeight::get().reads_writes(0, writes) + }, + } + + // Destroy undecodable NFT item 2 + writes.saturating_inc(); + match pallet_nfts::Pallet::::do_burn(3, 2, |_| Ok(())) { + Ok(_) => { + log::info!("Destroyed undecodable NFT item 2"); + }, + Err(e) => { + log::error!("Failed to destroy undecodable NFT item: {:?}", e); + return ::DbWeight::get().reads_writes(0, writes) + }, + } + + // Finally, we can destroy the collection + writes.saturating_inc(); + match pallet_nfts::Pallet::::do_destroy_collection( + 3, + DestroyWitness { attributes: 0, item_metadatas: 1, item_configs: 0 }, + None, + ) { + Ok(_) => { + log::info!("Destroyed undecodable NFT collection"); + }, + Err(e) => { + log::error!("Failed to destroy undecodable NFT collection: {:?}", e); + }, + }; + + ::DbWeight::get().reads_writes(0, writes) + } +} + /// Migration to initialize storage versions for pallets added after genesis. /// /// Ideally this would be done automatically (see @@ -957,7 +1024,6 @@ pub struct InitStorageVersions; impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { fn on_runtime_upgrade() -> Weight { use frame_support::traits::{GetStorageVersion, StorageVersion}; - use sp_runtime::traits::Saturating; let mut writes = 0; @@ -1010,10 +1076,9 @@ mod benches { [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] [pallet_xcm_bridge_hub_router, ToRococo] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1161,20 +1226,19 @@ impl_runtime_apis! { impl pallet_asset_conversion::AssetConversionApi< Block, Balance, - u128, - Box, + MultiLocation, > for Runtime { - fn quote_price_exact_tokens_for_tokens(asset1: Box, asset2: Box, amount: u128, include_fee: bool) -> Option { + fn quote_price_exact_tokens_for_tokens(asset1: MultiLocation, asset2: MultiLocation, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) } - fn quote_price_tokens_for_exact_tokens(asset1: Box, asset2: Box, amount: u128, include_fee: bool) -> Option { + fn quote_price_tokens_for_exact_tokens(asset1: MultiLocation, asset2: MultiLocation, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) } - fn get_reserves(asset1: Box, asset2: Box) -> Option<(Balance, Balance)> { - AssetConversion::get_reserves(&asset1, &asset2).ok() + fn get_reserves(asset1: MultiLocation, asset2: MultiLocation) -> Option<(Balance, Balance)> { + AssetConversion::get_reserves(asset1, asset2).ok() } } @@ -1297,6 +1361,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; // This is defined once again in dispatch_benchmark, because list_benchmarks! @@ -1343,6 +1408,88 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) + } + + fn set_up_complex_asset_transfer( + ) -> Option<(MultiAssets, u32, MultiLocation, Box)> { + // Transfer to Relay some local AH asset (local-reserve-transfer) while paying + // fees using teleported native token. + // (We don't care that Relay doesn't accept incoming unknown AH local asset) + let dest = Parent.into(); + + let fee_amount = EXISTENTIAL_DEPOSIT; + let fee_asset: MultiAsset = (MultiLocation::parent(), fee_amount).into(); + + let who = frame_benchmarking::whitelisted_caller(); + // Give some multiple of the existential deposit + let balance = fee_amount + EXISTENTIAL_DEPOSIT * 1000; + let _ = >::make_free_balance_be( + &who, balance, + ); + // verify initial balance + assert_eq!(Balances::free_balance(&who), balance); + + // set up local asset + let asset_amount = 10u128; + let initial_asset_amount = asset_amount * 10; + let (asset_id, _, _) = pallet_assets::benchmarking::create_default_minted_asset::< + Runtime, + pallet_assets::Instance1 + >(true, initial_asset_amount); + let asset_location = MultiLocation::new( + 0, + X2(PalletInstance(50), GeneralIndex(u32::from(asset_id).into())) + ); + let transfer_asset: MultiAsset = (asset_location, asset_amount).into(); + + let assets: MultiAssets = vec![fee_asset.clone(), transfer_asset].into(); + let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; + + // verify transferred successfully + let verify = Box::new(move || { + // verify native balance after transfer, decreased by transferred fee amount + // (plus transport fees) + assert!(Balances::free_balance(&who) <= balance - fee_amount); + // verify asset balance decreased by exactly transferred amount + assert_eq!( + Assets::balance(asset_id.into(), &who), + initial_asset_amount - asset_amount, + ); + }); + Some((assets, fee_index as u32, dest, verify)) + } + } + use pallet_xcm_bridge_hub_router::benchmarking::{ Pallet as XcmBridgeHubRouterBench, Config as XcmBridgeHubRouterConfig, @@ -1354,11 +1501,26 @@ impl_runtime_apis! { xcm_config::bridging::SiblingBridgeHubParaId::get().into() ); } - fn ensure_bridged_target_destination() -> MultiLocation { + fn ensure_bridged_target_destination() -> Result { ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( xcm_config::bridging::SiblingBridgeHubParaId::get().into() ); - xcm_config::bridging::to_rococo::AssetHubRococo::get() + let bridged_asset_hub = xcm_config::bridging::to_rococo::AssetHubRococo::get(); + let _ = PolkadotXcm::force_xcm_version( + RuntimeOrigin::root(), + Box::new(bridged_asset_hub), + XCM_VERSION, + ).map_err(|e| { + log::error!( + "Failed to dispatch `force_xcm_version({:?}, {:?}, {:?})`, error: {:?}", + RuntimeOrigin::root(), + bridged_asset_hub, + XCM_VERSION, + e + ); + BenchmarkError::Stop("XcmVersion was not stored!") + })?; + Ok(bridged_asset_hub) } } @@ -1536,120 +1698,3 @@ cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } - -pub mod migrations { - use super::*; - use frame_support::{ - pallet_prelude::Get, - traits::{ - fungibles::{Inspect, Mutate}, - tokens::Preservation, - OnRuntimeUpgrade, OriginTrait, - }, - }; - use parachains_common::impls::AccountIdOf; - use sp_runtime::{traits::StaticLookup, Saturating}; - - /// Temporary migration because of bug with native asset, it can be removed once applied on - /// `AssetHubWestend`. Migrates pools with `MultiLocation { parents: 0, interior: Here }` to - /// `MultiLocation { parents: 1, interior: Here }` - pub struct NativeAssetParents0ToParents1Migration(sp_std::marker::PhantomData); - impl< - T: pallet_asset_conversion::Config< - MultiAssetId = Box, - AssetId = MultiLocation, - >, - > OnRuntimeUpgrade for NativeAssetParents0ToParents1Migration - where - ::PoolAssetId: Into, - AccountIdOf: Into<[u8; 32]>, - ::AccountId: - Into<<::RuntimeOrigin as OriginTrait>::AccountId>, - <::Lookup as StaticLookup>::Source: - From<::AccountId>, - sp_runtime::AccountId32: From<::AccountId>, - { - fn on_runtime_upgrade() -> Weight { - let invalid_native_asset = MultiLocation { parents: 0, interior: Here }; - let valid_native_asset = WestendLocation::get(); - - let mut reads: u64 = 1; - let mut writes: u64 = 0; - - // migrate pools with invalid native asset - let pools = pallet_asset_conversion::Pools::::iter().collect::>(); - reads.saturating_accrue(1); - for (old_pool_id, pool_info) in pools { - let old_pool_account = - pallet_asset_conversion::Pallet::::get_pool_account(&old_pool_id); - reads.saturating_accrue(1); - let pool_asset_id = pool_info.lp_token.clone(); - if old_pool_id.0.as_ref() != &invalid_native_asset { - // skip, if ok - continue - } - - // fix new account - let new_pool_id = pallet_asset_conversion::Pallet::::get_pool_id( - Box::new(valid_native_asset), - old_pool_id.1.clone(), - ); - let new_pool_account = - pallet_asset_conversion::Pallet::::get_pool_account(&new_pool_id); - frame_system::Pallet::::inc_providers(&new_pool_account); - reads.saturating_accrue(2); - writes.saturating_accrue(1); - - // move currency - let _ = Balances::transfer_all( - RuntimeOrigin::signed(sp_runtime::AccountId32::from(old_pool_account.clone())), - sp_runtime::AccountId32::from(new_pool_account.clone()).into(), - false, - ); - reads.saturating_accrue(2); - writes.saturating_accrue(2); - - // move LP token - let _ = T::PoolAssets::transfer( - pool_asset_id.clone(), - &old_pool_account, - &new_pool_account, - T::PoolAssets::balance(pool_asset_id.clone(), &old_pool_account), - Preservation::Expendable, - ); - reads.saturating_accrue(1); - writes.saturating_accrue(2); - - // change the ownership of LP token - let _ = pallet_assets::Pallet::::transfer_ownership( - RuntimeOrigin::signed(sp_runtime::AccountId32::from(old_pool_account.clone())), - pool_asset_id.into(), - sp_runtime::AccountId32::from(new_pool_account.clone()).into(), - ); - reads.saturating_accrue(1); - writes.saturating_accrue(2); - - // move LocalOrForeignAssets - let _ = T::Assets::transfer( - *old_pool_id.1.as_ref(), - &old_pool_account, - &new_pool_account, - T::Assets::balance(*old_pool_id.1.as_ref(), &old_pool_account), - Preservation::Expendable, - ); - reads.saturating_accrue(1); - writes.saturating_accrue(2); - - // dec providers for old account - let _ = frame_system::Pallet::::dec_providers(&old_pool_account); - writes.saturating_accrue(1); - - // change pool key - pallet_asset_conversion::Pools::::insert(new_pool_id, pool_info); - pallet_asset_conversion::Pools::::remove(old_pool_id); - } - - T::DbWeight::get().reads_writes(reads, writes) - } - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system.rs index 6c741af2a13dcca9cc490c13933397f6ab841c28..687b87e43915bbfa26330dd981a462571f5b79f3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system.rs @@ -151,4 +151,31 @@ impl frame_system::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 33_027_000 picoseconds. + Weight::from_parts(33_027_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `22` + // Estimated: `1518` + // Minimum execution time: 118_101_992_000 picoseconds. + Weight::from_parts(118_101_992_000, 0) + .saturating_add(Weight::from_parts(0, 1518)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs index 1646c00989d543bb876bb04ad3c297516e6dd946..2f1fcfb05f39151e018d74e8587faa0e79afd8b6 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs @@ -14,7 +14,6 @@ // limitations under the License. pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; @@ -42,5 +41,4 @@ pub mod xcm; pub use block_weights::constants::BlockExecutionWeight; pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs index e48f2e2ef7267cf714b5ea385290c4180365ddb5..7a5aed3d7c69ce54b229d859f56a6a2dd4881460 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs @@ -16,27 +16,23 @@ //! Autogenerated weights for `pallet_asset_conversion` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-30, STEPS: `20`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// ./target/debug/polkadot-parachain // benchmark // pallet // --chain=asset-hub-westend-dev -// --wasm-execution=compiled -// --pallet=pallet_asset_conversion -// --no-storage-info -// --no-median-slopes -// --no-min-squares +// --steps=20 +// --repeat=2 +// --pallet=pallet-asset-conversion // --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -51,9 +47,7 @@ pub struct WeightInfo(PhantomData); impl pallet_asset_conversion::WeightInfo for WeightInfo { /// Storage: `AssetConversion::Pools` (r:1 w:1) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x76a2c49709deec21d9c05f96c1f47351` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x76a2c49709deec21d9c05f96c1f47351` (r:1 w:0) - /// Storage: `System::Account` (r:2 w:1) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ForeignAssets::Account` (r:1 w:1) /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) @@ -67,22 +61,22 @@ impl pallet_asset_conversion::WeightInfo for WeightInfo /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: - // Measured: `480` - // Estimated: `6196` - // Minimum execution time: 90_011_000 picoseconds. - Weight::from_parts(92_372_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `408` + // Estimated: `4689` + // Minimum execution time: 922_000_000 picoseconds. + Weight::from_parts(1_102_000_000, 0) + .saturating_add(Weight::from_parts(0, 4689)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(7)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ForeignAssets::Asset` (r:1 w:1) /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) /// Storage: `ForeignAssets::Account` (r:2 w:2) /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Account` (r:2 w:2) @@ -91,34 +85,32 @@ impl pallet_asset_conversion::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `1117` // Estimated: `7404` - // Minimum execution time: 153_484_000 picoseconds. - Weight::from_parts(155_465_000, 0) + // Minimum execution time: 1_597_000_000 picoseconds. + Weight::from_parts(1_655_000_000, 0) .saturating_add(Weight::from_parts(0, 7404)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(7)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ForeignAssets::Asset` (r:1 w:1) /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) /// Storage: `ForeignAssets::Account` (r:2 w:2) /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x2433d831722b1f4aeb1666953f1c0e77` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x2433d831722b1f4aeb1666953f1c0e77` (r:1 w:0) /// Storage: `PoolAssets::Account` (r:1 w:1) /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn remove_liquidity() -> Weight { // Proof Size summary in bytes: // Measured: `1106` // Estimated: `7404` - // Minimum execution time: 141_326_000 picoseconds. - Weight::from_parts(143_882_000, 0) + // Minimum execution time: 1_500_000_000 picoseconds. + Weight::from_parts(1_633_000_000, 0) .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `ForeignAssets::Asset` (r:2 w:2) @@ -127,15 +119,19 @@ impl pallet_asset_conversion::WeightInfo for WeightInfo /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn swap_exact_tokens_for_tokens() -> Weight { + /// The range of component `n` is `[2, 3]`. + fn swap_exact_tokens_for_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1148` - // Estimated: `13818` - // Minimum execution time: 168_556_000 picoseconds. - Weight::from_parts(170_313_000, 0) - .saturating_add(Weight::from_parts(0, 13818)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(8)) + // Measured: `0 + n * (557 ±0)` + // Estimated: `7404 + n * (393 ±92)` + // Minimum execution time: 930_000_000 picoseconds. + Weight::from_parts(960_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + // Standard Error: 17_993_720 + .saturating_add(Weight::from_parts(41_959_183, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 393).saturating_mul(n.into())) } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -143,14 +139,18 @@ impl pallet_asset_conversion::WeightInfo for WeightInfo /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) /// Storage: `ForeignAssets::Account` (r:4 w:4) /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn swap_tokens_for_exact_tokens() -> Weight { + /// The range of component `n` is `[2, 3]`. + fn swap_tokens_for_exact_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1148` - // Estimated: `13818` - // Minimum execution time: 167_704_000 picoseconds. - Weight::from_parts(170_034_000, 0) - .saturating_add(Weight::from_parts(0, 13818)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(8)) + // Measured: `0 + n * (557 ±0)` + // Estimated: `7404 + n * (393 ±92)` + // Minimum execution time: 940_000_000 picoseconds. + Weight::from_parts(956_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + // Standard Error: 15_746_647 + .saturating_add(Weight::from_parts(39_193_877, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 393).saturating_mul(n.into())) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs index 095e784cf66d41566a7d20d6d5d2f7348fd47999..1fac2d59ab9609a0220a8d39a3a711dd8bd98437 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs @@ -123,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -177,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index 340edafb0b0c3ea6319e1c2213bcface8a9399b4..504731f4a9ef743e62090582901a63f7aee78829 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,40 +64,102 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 25_534_000 picoseconds. - Weight::from_parts(26_413_000, 0) + // Minimum execution time: 25_482_000 picoseconds. + Weight::from_parts(26_622_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 20_513_000 picoseconds. - Weight::from_parts(20_837_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 87_319_000 picoseconds. + Weight::from_parts(89_764_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 14_977_000 picoseconds. - Weight::from_parts(15_207_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `367` + // Estimated: `6196` + // Minimum execution time: 139_133_000 picoseconds. + Weight::from_parts(141_507_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `496` + // Estimated: `6208` + // Minimum execution time: 144_241_000 picoseconds. + Weight::from_parts(149_709_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().writes(7)) } fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_440_000 picoseconds. - Weight::from_parts(7_651_000, 0) + // Minimum execution time: 10_392_000 picoseconds. + Weight::from_parts(10_779_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) @@ -106,8 +168,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_253_000 picoseconds. - Weight::from_parts(7_584_000, 0) + // Minimum execution time: 7_088_000 picoseconds. + Weight::from_parts(7_257_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -117,8 +179,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_299_000 picoseconds. - Weight::from_parts(2_435_000, 0) + // Minimum execution time: 2_095_000 picoseconds. + Weight::from_parts(2_136_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -144,8 +206,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 29_440_000 picoseconds. - Weight::from_parts(30_675_000, 0) + // Minimum execution time: 28_728_000 picoseconds. + Weight::from_parts(29_349_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -170,8 +232,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 31_876_000 picoseconds. - Weight::from_parts(32_588_000, 0) + // Minimum execution time: 30_605_000 picoseconds. + Weight::from_parts(31_477_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -182,8 +244,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_385_000 picoseconds. - Weight::from_parts(2_607_000, 0) + // Minimum execution time: 2_137_000 picoseconds. + Weight::from_parts(2_303_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -193,8 +255,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `162` // Estimated: `11052` - // Minimum execution time: 16_927_000 picoseconds. - Weight::from_parts(17_554_000, 0) + // Minimum execution time: 16_719_000 picoseconds. + Weight::from_parts(17_329_000, 0) .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -205,8 +267,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `166` // Estimated: `11056` - // Minimum execution time: 16_965_000 picoseconds. - Weight::from_parts(17_807_000, 0) + // Minimum execution time: 16_687_000 picoseconds. + Weight::from_parts(17_405_000, 0) .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -217,8 +279,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `13538` - // Minimum execution time: 18_763_000 picoseconds. - Weight::from_parts(19_359_000, 0) + // Minimum execution time: 18_751_000 picoseconds. + Weight::from_parts(19_130_000, 0) .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } @@ -240,8 +302,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 27_371_000 picoseconds. - Weight::from_parts(28_185_000, 0) + // Minimum execution time: 27_189_000 picoseconds. + Weight::from_parts(27_760_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -252,8 +314,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `8621` - // Minimum execution time: 9_165_000 picoseconds. - Weight::from_parts(9_539_000, 0) + // Minimum execution time: 9_307_000 picoseconds. + Weight::from_parts(9_691_000, 0) .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -263,8 +325,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `11063` - // Minimum execution time: 17_384_000 picoseconds. - Weight::from_parts(17_777_000, 0) + // Minimum execution time: 17_607_000 picoseconds. + Weight::from_parts(18_090_000, 0) .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -287,8 +349,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `215` // Estimated: `11105` - // Minimum execution time: 34_260_000 picoseconds. - Weight::from_parts(35_428_000, 0) + // Minimum execution time: 34_322_000 picoseconds. + Weight::from_parts(35_754_000, 0) .saturating_add(Weight::from_parts(0, 11105)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -301,8 +363,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_710_000 picoseconds. - Weight::from_parts(4_900_000, 0) + // Minimum execution time: 4_513_000 picoseconds. + Weight::from_parts(4_754_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -313,8 +375,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 26_843_000 picoseconds. - Weight::from_parts(27_404_000, 0) + // Minimum execution time: 27_860_000 picoseconds. + Weight::from_parts(28_279_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs index 9d0d0cbc655586937893e4d017475175b2fca63e..84d717b0283c764cac14cce63ca34f81c9f58e8c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -48,8 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm_bridge_hub_router`. pub struct WeightInfo(PhantomData); impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) @@ -58,22 +58,22 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `193` // Estimated: `1678` - // Minimum execution time: 8_157_000 picoseconds. - Weight::from_parts(8_481_000, 0) + // Minimum execution time: 8_095_000 picoseconds. + Weight::from_parts(8_393_000, 0) .saturating_add(Weight::from_parts(0, 1678)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_when_congested() -> Weight { // Proof Size summary in bytes: // Measured: `111` // Estimated: `1596` - // Minimum execution time: 3_319_000 picoseconds. - Weight::from_parts(3_445_000, 0) + // Minimum execution time: 3_417_000 picoseconds. + Weight::from_parts(3_583_000, 0) .saturating_add(Weight::from_parts(0, 1596)) .saturating_add(T::DbWeight::get().reads(2)) } @@ -83,22 +83,24 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `117` // Estimated: `1502` - // Minimum execution time: 10_396_000 picoseconds. - Weight::from_parts(10_914_000, 0) + // Minimum execution time: 10_280_000 picoseconds. + Weight::from_parts(10_703_000, 0) .saturating_add(Weight::from_parts(0, 1502)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::SupportedVersion` (r:2 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) /// Storage: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) /// Proof: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) @@ -107,18 +109,18 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send_message() -> Weight { // Proof Size summary in bytes: - // Measured: `426` - // Estimated: `3891` - // Minimum execution time: 45_902_000 picoseconds. - Weight::from_parts(46_887_000, 0) - .saturating_add(Weight::from_parts(0, 3891)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `487` + // Estimated: `6427` + // Minimum execution time: 63_624_000 picoseconds. + Weight::from_parts(66_071_000, 0) + .saturating_add(Weight::from_parts(0, 6427)) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 96045ac3322ec2b20d4271ba5700b2c08b1297be..4c30e8bf51b4887946fec93dd9c63f64450f00c4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -32,33 +32,30 @@ use pallet_xcm::XcmPassthrough; use parachains_common::{ impls::ToStakingPot, xcm_config::{ - AssetFeeAsExistentialDepositMultiplier, ConcreteAssetFromSystem, - RelayOrOtherSystemParachains, + AllSiblingSystemParachains, AssetFeeAsExistentialDepositMultiplier, + ConcreteAssetFromSystem, RelayOrOtherSystemParachains, }, TREASURY_PALLET_ID, }; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::{AccountIdConversion, ConvertInto}; -use westend_runtime_constants::system_parachain; use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, - DenyReserveTransferToRelayChain, DenyThenTry, DescribeFamily, DescribePalletTerminal, - EnsureXcmOrigin, FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, - IsConcrete, LocalMint, NetworkExportTableItem, NoChecking, ParentAsSuperuser, ParentIsPreset, - RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, StartsWith, - StartsWithExplicitGlobalConsensus, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, - XcmFeeToAccount, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, + DenyThenTry, DescribeFamily, DescribePalletTerminal, EnsureXcmOrigin, FungiblesAdapter, + GlobalConsensusParachainConvertsFor, HashedDescription, IsConcrete, LocalMint, + NetworkExportTableItem, NoChecking, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, + TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, + WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; -#[cfg(feature = "runtime-benchmarks")] -use {cumulus_primitives_core::ParaId, sp_core::Get}; - parameter_types! { pub const WestendLocation: MultiLocation = MultiLocation::parent(); pub const RelayNetwork: Option = Some(NetworkId::Westend); @@ -66,8 +63,8 @@ parameter_types! { pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())); pub UniversalLocationNetworkId: NetworkId = UniversalLocation::get().global_consensus().unwrap(); - pub TrustBackedAssetsPalletLocation: MultiLocation = - PalletInstance(::index() as u8).into(); + pub AssetsPalletIndex: u32 = ::index() as u32; + pub TrustBackedAssetsPalletLocation: MultiLocation = PalletInstance(AssetsPalletIndex::get() as u8).into(); pub ForeignAssetsPalletLocation: MultiLocation = PalletInstance(::index() as u8).into(); pub PoolAssetsPalletLocation: MultiLocation = @@ -96,6 +93,7 @@ pub type LocationToAccountId = ( ); /// Means for transacting the native currency on this chain. +#[allow(deprecated)] pub type CurrencyTransactor = CurrencyAdapter< // Use this currency: Balances, @@ -109,7 +107,7 @@ pub type CurrencyTransactor = CurrencyAdapter< (), >; -/// `AssetId/Balance` converter for `TrustBackedAssets` +/// `AssetId`/`Balance` converter for `TrustBackedAssets`. pub type TrustBackedAssetsConvertedConcreteId = assets_common::TrustBackedAssetsConvertedConcreteId; @@ -130,7 +128,7 @@ pub type FungiblesTransactor = FungiblesAdapter< CheckingAccount, >; -/// `AssetId/Balance` converter for `TrustBackedAssets` +/// `AssetId`/`Balance` converter for `ForeignAssets`. pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConvertedConcreteId< ( // Ignore `TrustBackedAssets` explicitly @@ -240,6 +238,18 @@ match_types! { MultiLocation { parents: 1, interior: Here } | MultiLocation { parents: 1, interior: X1(Plurality { .. }) } }; + pub type FellowshipEntities: impl Contains = { + // Fellowship Plurality + MultiLocation { parents: 1, interior: X2(Parachain(1001), Plurality { id: BodyId::Technical, ..}) } | + // Fellowship Salary Pallet + MultiLocation { parents: 1, interior: X2(Parachain(1001), PalletInstance(64)) } | + // Fellowship Treasury Pallet + MultiLocation { parents: 1, interior: X2(Parachain(1001), PalletInstance(65)) } + }; + pub type AmbassadorEntities: impl Contains = { + // Ambassador Salary Pallet + MultiLocation { parents: 1, interior: X2(Parachain(1001), PalletInstance(74)) } + }; } /// A call filter for the XCM Transact instruction. This is a temporary measure until we properly @@ -272,24 +282,21 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::authorize_upgrade { .. } | + frame_system::Call::authorize_upgrade_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | + RuntimeCall::CollatorSelection(..) | + RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::XcmpQueue(..) | RuntimeCall::MessageQueue(..) | RuntimeCall::Assets( @@ -486,6 +493,8 @@ pub type Barrier = TrailingSetTopicAsId< ParentOrParentsPlurality, Equals, Equals, + FellowshipEntities, + AmbassadorEntities, )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, @@ -518,24 +527,15 @@ pub type ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger = ForeignAssetsInstance, >; -match_types! { - pub type SystemParachains: impl Contains = { - MultiLocation { - parents: 1, - interior: X1(Parachain( - system_parachain::ASSET_HUB_ID | - system_parachain::COLLECTIVES_ID | - system_parachain::BRIDGE_HUB_ID - )), - } - }; -} - /// Locations that will not be charged fees in the executor, /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. -pub type WaivedLocations = - (RelayOrOtherSystemParachains, Equals); +pub type WaivedLocations = ( + RelayOrOtherSystemParachains, + Equals, + FellowshipEntities, + AmbassadorEntities, +); /// Cases where a remote origin is accepted as trusted Teleporter for a given asset: /// @@ -636,11 +636,6 @@ pub type XcmRouter = WithUniqueTopic<( ToRococoXcmRouter, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -666,8 +661,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); @@ -693,34 +686,6 @@ impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { } } -#[cfg(feature = "runtime-benchmarks")] -pub struct BenchmarkMultiLocationConverter { - _phantom: sp_std::marker::PhantomData, -} - -#[cfg(feature = "runtime-benchmarks")] -impl - pallet_asset_conversion::BenchmarkHelper> - for BenchmarkMultiLocationConverter -where - SelfParaId: Get, -{ - fn asset_id(asset_id: u32) -> MultiLocation { - MultiLocation { - parents: 1, - interior: X3( - Parachain(SelfParaId::get().into()), - PalletInstance(::index() as u8), - GeneralIndex(asset_id.into()), - ), - } - } - - fn multiasset_id(asset_id: u32) -> sp_std::boxed::Box { - sp_std::boxed::Box::new(Self::asset_id(asset_id)) - } -} - /// All configuration related to bridging pub mod bridging { use super::*; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index de87a98fb0b81147b62fb9723a03b0e282c1acd9..0aaf1d91879aecfc78af5ed75c0220c29933f039 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -24,9 +24,9 @@ use asset_hub_westend_runtime::{ WestendLocation, XcmConfig, }, AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, - ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, ToRococoXcmRouterInstance, TrustBackedAssetsInstance, - XcmpQueue, + ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, + PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, + ToRococoXcmRouterInstance, TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{ test_cases_over_bridge::TestBridgingConfig, CollatorSessionKey, CollatorSessionKeys, ExtBuilder, @@ -525,12 +525,6 @@ asset_test_utils::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1000 ); @@ -641,6 +635,12 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p ); fn bridging_to_asset_hub_rococo() -> TestBridgingConfig { + let _ = PolkadotXcm::force_xcm_version( + RuntimeOrigin::root(), + Box::new(bridging::to_rococo::AssetHubRococo::get()), + XCM_VERSION, + ) + .expect("version saved!"); TestBridgingConfig { bridged_network: bridging::to_rococo::RococoNetwork::get(), local_bridge_hub_para_id: bridging::SiblingBridgeHubParaId::get(), @@ -815,3 +815,32 @@ fn change_xcm_bridge_hub_router_byte_fee_by_governance_works() { }, ) } + +#[test] +fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + WeightLimit::Unlimited, + ); +} diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index 49fc2a0fa5eba5cc5897c144347e3da0aa029202..22729df5ed5ca845d7a554758e3f9e3b7db8aaec 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true description = "Assets common utilities" license = "Apache-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } @@ -13,18 +16,18 @@ log = { version = "0.4.20", default-features = false } impl-trait-for-tuples = "0.2.2" # Substrate -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } pallet-asset-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus parachains-common = { path = "../../../common", default-features = false } @@ -34,7 +37,7 @@ cumulus-primitives-core = { path = "../../../../primitives/core", default-featur substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/parachains/runtimes/assets/common/src/benchmarks.rs b/cumulus/parachains/runtimes/assets/common/src/benchmarks.rs new file mode 100644 index 0000000000000000000000000000000000000000..344cb5ca336840046d0de80ae203ba34bede7cc4 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/common/src/benchmarks.rs @@ -0,0 +1,44 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use cumulus_primitives_core::ParaId; +use sp_runtime::traits::Get; +use sp_std::marker::PhantomData; +use xcm::latest::prelude::*; + +/// Creates asset pairs for liquidity pools with `Target` always being the first asset. +pub struct AssetPairFactory( + PhantomData<(Target, SelfParaId, PalletId)>, +); +impl, SelfParaId: Get, PalletId: Get> + pallet_asset_conversion::BenchmarkHelper + for AssetPairFactory +{ + fn create_pair(seed1: u32, seed2: u32) -> (MultiLocation, MultiLocation) { + let with_id = MultiLocation::new( + 1, + X3( + Parachain(SelfParaId::get().into()), + PalletInstance(PalletId::get() as u8), + GeneralIndex(seed2.into()), + ), + ); + if seed1 % 2 == 0 { + (with_id, Target::get()) + } else { + (Target::get(), with_id) + } + } +} diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs index f45c3289aab49c16e2b435671d3625f78692f07d..15327f51b2a994fb41e12a72c6e848a2d5139178 100644 --- a/cumulus/parachains/runtimes/assets/common/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs @@ -15,6 +15,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarks; pub mod foreign_creators; pub mod fungible_conversion; pub mod local_and_foreign_assets; diff --git a/cumulus/parachains/runtimes/assets/common/src/local_and_foreign_assets.rs b/cumulus/parachains/runtimes/assets/common/src/local_and_foreign_assets.rs index 9f429016f5302a28a61753331126a48f77ee7565..7dd497797eaa54e819f4991979c1c126df9baac3 100644 --- a/cumulus/parachains/runtimes/assets/common/src/local_and_foreign_assets.rs +++ b/cumulus/parachains/runtimes/assets/common/src/local_and_foreign_assets.rs @@ -13,49 +13,42 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::traits::{ - fungibles::{Balanced, Create, HandleImbalanceDrop, Inspect, Mutate, Unbalanced}, - tokens::{ - DepositConsequence, Fortitude, Precision, Preservation, Provenance, WithdrawConsequence, - }, - AccountTouch, Contains, ContainsPair, Get, PalletInfoAccess, +use frame_support::traits::Get; +use sp_runtime::{ + traits::{Convert, MaybeEquivalence}, + Either, + Either::{Left, Right}, }; -use pallet_asset_conversion::{MultiAssetIdConversionResult, MultiAssetIdConverter}; -use parachains_common::AccountId; -use sp_runtime::{traits::MaybeEquivalence, DispatchError, DispatchResult}; -use sp_std::{boxed::Box, marker::PhantomData}; +use sp_std::marker::PhantomData; use xcm::latest::MultiLocation; -pub struct MultiLocationConverter, MultiLocationMatcher> { - _phantom: PhantomData<(NativeAssetLocation, MultiLocationMatcher)>, +/// Converts a given [`MultiLocation`] to [`Either::Left`] when equal to `Target`, or +/// [`Either::Right`] otherwise. +/// +/// Suitable for use as a `Criterion` with [`frame_support::traits::tokens::fungible::UnionOf`]. +pub struct TargetFromLeft(PhantomData); +impl> Convert> + for TargetFromLeft +{ + fn convert(l: MultiLocation) -> Either<(), MultiLocation> { + Target::get().eq(&l).then(|| Left(())).map_or(Right(l), |n| n) + } } -impl - MultiAssetIdConverter, MultiLocation> - for MultiLocationConverter +/// Converts a given [`MultiLocation`] to [`Either::Left`] based on the `Equivalence` criteria. +/// Returns [`Either::Right`] if not equivalent. +/// +/// Suitable for use as a `Criterion` with [`frame_support::traits::tokens::fungibles::UnionOf`]. +pub struct LocalFromLeft(PhantomData<(Equivalence, AssetId)>); +impl Convert> + for LocalFromLeft where - NativeAssetLocation: Get, - MultiLocationMatcher: Contains, + Equivalence: MaybeEquivalence, { - fn get_native() -> Box { - Box::new(NativeAssetLocation::get()) - } - - fn is_native(asset_id: &Box) -> bool { - *asset_id == Self::get_native() - } - - fn try_convert( - asset_id: &Box, - ) -> MultiAssetIdConversionResult, MultiLocation> { - if Self::is_native(&asset_id) { - return MultiAssetIdConversionResult::Native - } - - if MultiLocationMatcher::contains(&asset_id) { - MultiAssetIdConversionResult::Converted(*asset_id.clone()) - } else { - MultiAssetIdConversionResult::Unsupported(asset_id.clone()) + fn convert(l: MultiLocation) -> Either { + match Equivalence::convert(&l) { + Some(id) => Left(id), + None => Right(l), } } } @@ -64,407 +57,3 @@ pub trait MatchesLocalAndForeignAssetsMultiLocation { fn is_local(location: &MultiLocation) -> bool; fn is_foreign(location: &MultiLocation) -> bool; } - -pub struct LocalAndForeignAssets { - _phantom: PhantomData<(Assets, LocalAssetIdConverter, ForeignAssets)>, -} - -impl Unbalanced - for LocalAndForeignAssets -where - Assets: Inspect - + Unbalanced - + Balanced - + PalletInfoAccess, - LocalAssetIdConverter: MaybeEquivalence, - ForeignAssets: Inspect - + Unbalanced - + Balanced, -{ - fn handle_dust(dust: frame_support::traits::fungibles::Dust) { - let credit = dust.into_credit(); - - if let Some(asset) = LocalAssetIdConverter::convert(&credit.asset()) { - Assets::handle_raw_dust(asset, credit.peek()); - } else { - ForeignAssets::handle_raw_dust(credit.asset(), credit.peek()); - } - - // As we have already handled the dust, we must stop credit's drop from happening: - sp_std::mem::forget(credit); - } - - fn write_balance( - asset: >::AssetId, - who: &AccountId, - amount: >::Balance, - ) -> Result>::Balance>, DispatchError> { - if let Some(asset) = LocalAssetIdConverter::convert(&asset) { - Assets::write_balance(asset, who, amount) - } else { - ForeignAssets::write_balance(asset, who, amount) - } - } - - /// Set the total issuance of `asset` to `amount`. - fn set_total_issuance(asset: Self::AssetId, amount: Self::Balance) { - if let Some(asset) = LocalAssetIdConverter::convert(&asset) { - Assets::set_total_issuance(asset, amount) - } else { - ForeignAssets::set_total_issuance(asset, amount) - } - } - - fn decrease_balance( - asset: Self::AssetId, - who: &AccountId, - amount: Self::Balance, - precision: Precision, - preservation: Preservation, - force: Fortitude, - ) -> Result { - if let Some(asset) = LocalAssetIdConverter::convert(&asset) { - Assets::decrease_balance(asset, who, amount, precision, preservation, force) - } else { - ForeignAssets::decrease_balance(asset, who, amount, precision, preservation, force) - } - } - - fn increase_balance( - asset: Self::AssetId, - who: &AccountId, - amount: Self::Balance, - precision: Precision, - ) -> Result { - if let Some(asset) = LocalAssetIdConverter::convert(&asset) { - Assets::increase_balance(asset, who, amount, precision) - } else { - ForeignAssets::increase_balance(asset, who, amount, precision) - } - } -} - -impl Inspect - for LocalAndForeignAssets -where - Assets: Inspect, - LocalAssetIdConverter: MaybeEquivalence, - ForeignAssets: Inspect, -{ - type AssetId = MultiLocation; - type Balance = u128; - - /// The total amount of issuance in the system. - fn total_issuance(asset: Self::AssetId) -> Self::Balance { - if let Some(asset) = LocalAssetIdConverter::convert(&asset) { - Assets::total_issuance(asset) - } else { - ForeignAssets::total_issuance(asset) - } - } - - /// The minimum balance any single account may have. - fn minimum_balance(asset: Self::AssetId) -> Self::Balance { - if let Some(asset) = LocalAssetIdConverter::convert(&asset) { - Assets::minimum_balance(asset) - } else { - ForeignAssets::minimum_balance(asset) - } - } - - fn total_balance( - asset: >::AssetId, - account: &AccountId, - ) -> >::Balance { - if let Some(asset) = LocalAssetIdConverter::convert(&asset) { - Assets::total_balance(asset, account) - } else { - ForeignAssets::total_balance(asset, account) - } - } - - /// Get the `asset` balance of `who`. - fn balance(asset: Self::AssetId, who: &AccountId) -> Self::Balance { - if let Some(asset) = LocalAssetIdConverter::convert(&asset) { - Assets::balance(asset, who) - } else { - ForeignAssets::balance(asset, who) - } - } - - /// Get the maximum amount of `asset` that `who` can withdraw/transfer successfully. - fn reducible_balance( - asset: Self::AssetId, - who: &AccountId, - presevation: Preservation, - fortitude: Fortitude, - ) -> Self::Balance { - if let Some(asset) = LocalAssetIdConverter::convert(&asset) { - Assets::reducible_balance(asset, who, presevation, fortitude) - } else { - ForeignAssets::reducible_balance(asset, who, presevation, fortitude) - } - } - - /// Returns `true` if the `asset` balance of `who` may be increased by `amount`. - /// - /// - `asset`: The asset that should be deposited. - /// - `who`: The account of which the balance should be increased by `amount`. - /// - `amount`: How much should the balance be increased? - /// - `mint`: Will `amount` be minted to deposit it into `account`? - fn can_deposit( - asset: Self::AssetId, - who: &AccountId, - amount: Self::Balance, - mint: Provenance, - ) -> DepositConsequence { - if let Some(asset) = LocalAssetIdConverter::convert(&asset) { - Assets::can_deposit(asset, who, amount, mint) - } else { - ForeignAssets::can_deposit(asset, who, amount, mint) - } - } - - /// Returns `Failed` if the `asset` balance of `who` may not be decreased by `amount`, otherwise - /// the consequence. - fn can_withdraw( - asset: Self::AssetId, - who: &AccountId, - amount: Self::Balance, - ) -> WithdrawConsequence { - if let Some(asset) = LocalAssetIdConverter::convert(&asset) { - Assets::can_withdraw(asset, who, amount) - } else { - ForeignAssets::can_withdraw(asset, who, amount) - } - } - - /// Returns `true` if an `asset` exists. - fn asset_exists(asset: Self::AssetId) -> bool { - if let Some(asset) = LocalAssetIdConverter::convert(&asset) { - Assets::asset_exists(asset) - } else { - ForeignAssets::asset_exists(asset) - } - } -} - -impl Mutate - for LocalAndForeignAssets -where - Assets: Mutate - + Inspect - + Balanced - + PalletInfoAccess, - LocalAssetIdConverter: MaybeEquivalence, - ForeignAssets: Mutate - + Inspect - + Balanced, -{ - /// Transfer funds from one account into another. - fn transfer( - asset: MultiLocation, - source: &AccountId, - dest: &AccountId, - amount: Self::Balance, - keep_alive: Preservation, - ) -> Result { - if let Some(asset_id) = LocalAssetIdConverter::convert(&asset) { - Assets::transfer(asset_id, source, dest, amount, keep_alive) - } else { - ForeignAssets::transfer(asset, source, dest, amount, keep_alive) - } - } -} - -impl Create - for LocalAndForeignAssets -where - Assets: Create + Inspect, - LocalAssetIdConverter: MaybeEquivalence, - ForeignAssets: Create + Inspect, -{ - /// Create a new fungible asset. - fn create( - asset_id: Self::AssetId, - admin: AccountId, - is_sufficient: bool, - min_balance: Self::Balance, - ) -> DispatchResult { - if let Some(asset_id) = LocalAssetIdConverter::convert(&asset_id) { - Assets::create(asset_id, admin, is_sufficient, min_balance) - } else { - ForeignAssets::create(asset_id, admin, is_sufficient, min_balance) - } - } -} - -impl AccountTouch - for LocalAndForeignAssets -where - Assets: AccountTouch, - LocalAssetIdConverter: MaybeEquivalence, - ForeignAssets: AccountTouch, -{ - type Balance = u128; - - fn deposit_required( - asset_id: MultiLocation, - ) -> >::Balance { - if let Some(asset_id) = LocalAssetIdConverter::convert(&asset_id) { - Assets::deposit_required(asset_id) - } else { - ForeignAssets::deposit_required(asset_id) - } - } - - fn touch( - asset_id: MultiLocation, - who: AccountId, - depositor: AccountId, - ) -> Result<(), DispatchError> { - if let Some(asset_id) = LocalAssetIdConverter::convert(&asset_id) { - Assets::touch(asset_id, who, depositor) - } else { - ForeignAssets::touch(asset_id, who, depositor) - } - } -} - -/// Implements [`ContainsPair`] trait for a pair of asset and account IDs. -impl ContainsPair - for LocalAndForeignAssets -where - Assets: PalletInfoAccess + ContainsPair, - LocalAssetIdConverter: MaybeEquivalence, - ForeignAssets: ContainsPair, -{ - /// Check if an account with the given asset ID and account address exists. - fn contains(asset_id: &MultiLocation, who: &AccountId) -> bool { - if let Some(asset_id) = LocalAssetIdConverter::convert(asset_id) { - Assets::contains(&asset_id, &who) - } else { - ForeignAssets::contains(&asset_id, &who) - } - } -} - -impl Balanced - for LocalAndForeignAssets -where - Assets: - Balanced + Inspect + PalletInfoAccess, - LocalAssetIdConverter: MaybeEquivalence, - ForeignAssets: - Balanced + Inspect, -{ - type OnDropDebt = DebtDropIndirection; - type OnDropCredit = CreditDropIndirection; -} - -pub struct DebtDropIndirection { - _phantom: PhantomData>, -} - -impl HandleImbalanceDrop - for DebtDropIndirection -where - Assets: Balanced + Inspect, - LocalAssetIdConverter: MaybeEquivalence, - ForeignAssets: - Balanced + Inspect, -{ - fn handle(asset: MultiLocation, amount: u128) { - if let Some(asset_id) = LocalAssetIdConverter::convert(&asset) { - Assets::OnDropDebt::handle(asset_id, amount); - } else { - ForeignAssets::OnDropDebt::handle(asset, amount); - } - } -} - -pub struct CreditDropIndirection { - _phantom: PhantomData>, -} - -impl HandleImbalanceDrop - for CreditDropIndirection -where - Assets: Balanced + Inspect, - LocalAssetIdConverter: MaybeEquivalence, - ForeignAssets: - Balanced + Inspect, -{ - fn handle(asset: MultiLocation, amount: u128) { - if let Some(asset_id) = LocalAssetIdConverter::convert(&asset) { - Assets::OnDropCredit::handle(asset_id, amount); - } else { - ForeignAssets::OnDropCredit::handle(asset, amount); - } - } -} - -#[cfg(test)] -mod tests { - use crate::{ - local_and_foreign_assets::MultiLocationConverter, AssetIdForPoolAssetsConvert, - AssetIdForTrustBackedAssetsConvert, - }; - use frame_support::traits::EverythingBut; - use pallet_asset_conversion::{MultiAssetIdConversionResult, MultiAssetIdConverter}; - use sp_runtime::traits::MaybeEquivalence; - use xcm::latest::prelude::*; - use xcm_builder::StartsWith; - - #[test] - fn test_multi_location_converter_works() { - frame_support::parameter_types! { - pub const WestendLocation: MultiLocation = MultiLocation::parent(); - pub TrustBackedAssetsPalletLocation: MultiLocation = PalletInstance(50_u8).into(); - pub PoolAssetsPalletLocation: MultiLocation = PalletInstance(55_u8).into(); - } - - type C = MultiLocationConverter< - WestendLocation, - EverythingBut>, - >; - - let native_asset = WestendLocation::get(); - let local_asset = - AssetIdForTrustBackedAssetsConvert::::convert_back( - &123, - ) - .unwrap(); - let pool_asset = - AssetIdForPoolAssetsConvert::::convert_back(&456).unwrap(); - let foreign_asset1 = MultiLocation { parents: 1, interior: X1(Parachain(2222)) }; - let foreign_asset2 = MultiLocation { - parents: 2, - interior: X2(GlobalConsensus(ByGenesis([1; 32])), Parachain(2222)), - }; - - assert!(C::is_native(&Box::new(native_asset))); - assert!(!C::is_native(&Box::new(local_asset))); - assert!(!C::is_native(&Box::new(pool_asset))); - assert!(!C::is_native(&Box::new(foreign_asset1))); - assert!(!C::is_native(&Box::new(foreign_asset2))); - - assert_eq!(C::try_convert(&Box::new(native_asset)), MultiAssetIdConversionResult::Native); - assert_eq!( - C::try_convert(&Box::new(local_asset)), - MultiAssetIdConversionResult::Converted(local_asset) - ); - assert_eq!( - C::try_convert(&Box::new(pool_asset)), - MultiAssetIdConversionResult::Unsupported(Box::new(pool_asset)) - ); - assert_eq!( - C::try_convert(&Box::new(foreign_asset1)), - MultiAssetIdConversionResult::Converted(foreign_asset1) - ); - assert_eq!( - C::try_convert(&Box::new(foreign_asset2)), - MultiAssetIdConversionResult::Converted(foreign_asset2) - ); - } -} diff --git a/cumulus/parachains/runtimes/assets/common/src/matching.rs b/cumulus/parachains/runtimes/assets/common/src/matching.rs index 4014f5f7afa13f6b0cd5ff9c96e5e4f11e73c641..0a43a75a83f18803ee283900905c97531d709cb9 100644 --- a/cumulus/parachains/runtimes/assets/common/src/matching.rs +++ b/cumulus/parachains/runtimes/assets/common/src/matching.rs @@ -58,6 +58,37 @@ impl> ContainsPair } } +/// Checks if `a` is from the expected global consensus network. Checks that `MultiLocation-a` +/// starts with `MultiLocation-b`, and that network is a foreign consensus system. +pub struct FromNetwork( + sp_std::marker::PhantomData<(UniversalLocation, ExpectedNetworkId)>, +); +impl, ExpectedNetworkId: Get> + ContainsPair for FromNetwork +{ + fn contains(&a: &MultiLocation, b: &MultiLocation) -> bool { + // `a` needs to be from `b` at least + if !a.starts_with(b) { + return false + } + + let universal_source = UniversalLocation::get(); + + // ensure that `a`` is remote and from the expected network + match ensure_is_remote(universal_source, a) { + Ok((network_id, _)) => network_id == ExpectedNetworkId::get(), + Err(e) => { + log::trace!( + target: "xcm::contains", + "FromNetwork origin: {:?} is not remote to the universal_source: {:?} {:?}", + a, universal_source, e + ); + false + }, + } + } +} + /// Adapter verifies if it is allowed to receive `MultiAsset` from `MultiLocation`. /// /// Note: `MultiLocation` has to be from a different global consensus. @@ -95,3 +126,92 @@ impl< Reserves::contains(asset, origin) } } + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::parameter_types; + + parameter_types! { + pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Rococo), Parachain(1000)); + pub ExpectedNetworkId: NetworkId = Wococo; + } + + #[test] + fn from_network_contains_works() { + // asset and origin from foreign consensus works + let asset: MultiLocation = ( + Parent, + Parent, + GlobalConsensus(Wococo), + Parachain(1000), + PalletInstance(1), + GeneralIndex(1), + ) + .into(); + let origin: MultiLocation = + (Parent, Parent, GlobalConsensus(Wococo), Parachain(1000)).into(); + assert!(FromNetwork::::contains(&asset, &origin)); + + // asset and origin from local consensus fails + let asset: MultiLocation = ( + Parent, + Parent, + GlobalConsensus(Rococo), + Parachain(1000), + PalletInstance(1), + GeneralIndex(1), + ) + .into(); + let origin: MultiLocation = + (Parent, Parent, GlobalConsensus(Rococo), Parachain(1000)).into(); + assert!(!FromNetwork::::contains(&asset, &origin)); + + // asset and origin from here fails + let asset: MultiLocation = (PalletInstance(1), GeneralIndex(1)).into(); + let origin: MultiLocation = Here.into(); + assert!(!FromNetwork::::contains(&asset, &origin)); + + // asset from different consensus fails + let asset: MultiLocation = ( + Parent, + Parent, + GlobalConsensus(Polkadot), + Parachain(1000), + PalletInstance(1), + GeneralIndex(1), + ) + .into(); + let origin: MultiLocation = + (Parent, Parent, GlobalConsensus(Wococo), Parachain(1000)).into(); + assert!(!FromNetwork::::contains(&asset, &origin)); + + // origin from different consensus fails + let asset: MultiLocation = ( + Parent, + Parent, + GlobalConsensus(Wococo), + Parachain(1000), + PalletInstance(1), + GeneralIndex(1), + ) + .into(); + let origin: MultiLocation = + (Parent, Parent, GlobalConsensus(Polkadot), Parachain(1000)).into(); + assert!(!FromNetwork::::contains(&asset, &origin)); + + // asset and origin from unexpected consensus fails + let asset: MultiLocation = ( + Parent, + Parent, + GlobalConsensus(Polkadot), + Parachain(1000), + PalletInstance(1), + GeneralIndex(1), + ) + .into(); + let origin: MultiLocation = + (Parent, Parent, GlobalConsensus(Polkadot), Parachain(1000)).into(); + assert!(!FromNetwork::::contains(&asset, &origin)); + } +} diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index 1dc7cecbb62eec9724fd626d69fe1c3e0f0554f0..a3ed37596002987d68f7c59310f3abc6e9e0ee63 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -6,23 +6,26 @@ edition.workspace = true description = "Test utils for Asset Hub runtimes." license = "Apache-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } # Substrate -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -50,7 +53,7 @@ hex-literal = "0.4.1" substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] std = [ "assets-common/std", "codec/std", diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs b/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs index 471b1f09b567eba51be2189cab0a11c2959ce099..872ad06ddd5b063d4013b296654541c1a6e02681 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs @@ -27,6 +27,21 @@ use std::fmt::Debug; use xcm::latest::prelude::*; use xcm_builder::{CreateMatcher, MatchXcm}; +/// Given a message, a sender, and a destination, it returns the delivery fees +fn get_fungible_delivery_fees(destination: MultiLocation, message: Xcm<()>) -> u128 { + let Ok((_, delivery_fees)) = validate_send::(destination, message) else { + unreachable!("message can be sent; qed") + }; + if let Some(delivery_fee) = delivery_fees.inner().first() { + let Fungible(delivery_fee_amount) = delivery_fee.fun else { + unreachable!("asset is fungible; qed"); + }; + delivery_fee_amount + } else { + 0 + } +} + /// Helper function to verify `xcm` contains all relevant instructions expected on destination /// chain as part of a reserve-asset-transfer. pub(crate) fn assert_matches_reserve_asset_deposited_instructions( diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 5fb34e7a571f5efa60338eddecdb1914c78767c0..915d99470c36510745925da4509c129b7c57533b 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -16,25 +16,28 @@ //! Module contains predefined test-case scenarios for `Runtime` with various assets. use super::xcm_helpers; +use crate::{assert_matches_reserve_asset_deposited_instructions, get_fungible_delivery_fees}; use codec::Encode; +use cumulus_primitives_core::XcmpMessageSource; use frame_support::{ assert_noop, assert_ok, traits::{ - fungible::Mutate, fungibles::InspectEnumerable, Get, OnFinalize, OnInitialize, OriginTrait, + fungible::Mutate, fungibles::InspectEnumerable, Currency, Get, OnFinalize, OnInitialize, + OriginTrait, }, weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; use parachains_common::{AccountId, Balance}; use parachains_runtimes_test_utils::{ - assert_metadata, assert_total, AccountIdOf, BalanceOf, CollatorSessionKeys, ExtBuilder, - ValidatorIdOf, XcmReceivedFrom, + assert_metadata, assert_total, mock_open_hrmp_channel, AccountIdOf, BalanceOf, + CollatorSessionKeys, ExtBuilder, ValidatorIdOf, XcmReceivedFrom, }; use sp_runtime::{ traits::{MaybeEquivalence, StaticLookup, Zero}, DispatchError, Saturating, }; -use xcm::latest::prelude::*; +use xcm::{latest::prelude::*, VersionedMultiAssets}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; type RuntimeHelper = @@ -43,8 +46,8 @@ type RuntimeHelper = // Re-export test_case from `parachains-runtimes-test-utils` pub use parachains_runtimes_test_utils::test_cases::change_storage_constant_by_governance_works; -/// Test-case makes sure that `Runtime` can receive native asset from relay chain -/// and can teleport it back and to the other parachains +/// Test-case makes sure that `Runtime` can receive native asset from relay chain and can teleport +/// it back pub fn teleports_for_native_asset_works< Runtime, AllPalletsWithoutSystem, @@ -57,9 +60,6 @@ pub fn teleports_for_native_asset_works< existential_deposit: BalanceOf, target_account: AccountIdOf, unwrap_pallet_xcm_event: Box) -> Option>>, - unwrap_xcmp_queue_event: Box< - dyn Fn(Vec) -> Option>, - >, runtime_para_id: u32, ) where Runtime: frame_system::Config @@ -152,7 +152,7 @@ pub fn teleports_for_native_asset_works< hash, RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Parent), ); - assert_eq!(outcome.ensure_complete(), Ok(())); + assert_ok!(outcome.ensure_complete()); // check Balances after assert_ne!(>::free_balance(&target_account), 0.into()); @@ -164,12 +164,13 @@ pub fn teleports_for_native_asset_works< // 2. try to teleport asset back to the relaychain { let dest = MultiLocation::parent(); - let dest_beneficiary = MultiLocation::parent() + let mut dest_beneficiary = MultiLocation::parent() .appended_with(AccountId32 { network: None, id: sp_runtime::AccountId32::new([3; 32]).into(), }) .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); let target_account_balance_before_teleport = >::free_balance(&target_account); @@ -223,65 +224,53 @@ pub fn teleports_for_native_asset_works< ); } - // 3. try to teleport asset away to other parachain (1234) + // 3. try to teleport assets away to other parachain (2345): should not work as we don't + // trust `IsTeleporter` for `(relay-native-asset, para(2345))` pair { - let other_para_id = 1234; + let other_para_id = 2345; let dest = MultiLocation::new(1, X1(Parachain(other_para_id))); - let dest_beneficiary = MultiLocation::new(1, X1(Parachain(other_para_id))) + let mut dest_beneficiary = MultiLocation::new(1, X1(Parachain(other_para_id))) .appended_with(AccountId32 { network: None, id: sp_runtime::AccountId32::new([3; 32]).into(), }) .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); let target_account_balance_before_teleport = >::free_balance(&target_account); + let native_asset_to_teleport_away = native_asset_amount_unit * 3.into(); assert!( native_asset_to_teleport_away < target_account_balance_before_teleport - existential_deposit ); - - assert_ok!(RuntimeHelper::::do_teleport_assets::( - RuntimeHelper::::origin_of(target_account.clone()), - dest, - dest_beneficiary, - (native_asset_id, native_asset_to_teleport_away.into()), - Some((runtime_para_id, other_para_id)), - included_head, - &alice, - )); - - let delivery_fees = - xcm_helpers::transfer_assets_delivery_fees::( - (native_asset_id, native_asset_to_teleport_away.into()).into(), - 0, - Unlimited, - dest_beneficiary, + assert_eq!( + RuntimeHelper::::do_teleport_assets::( + RuntimeHelper::::origin_of(target_account.clone()), dest, - ); + dest_beneficiary, + (native_asset_id, native_asset_to_teleport_away.into()), + Some((runtime_para_id, other_para_id)), + included_head, + &alice, + ), + Err(DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0,], + message: Some("Filtered",), + },),) + ); // check balances assert_eq!( >::free_balance(&target_account), - target_account_balance_before_teleport - - native_asset_to_teleport_away - - delivery_fees.into() + target_account_balance_before_teleport ); assert_eq!( >::free_balance(&CheckingAccount::get()), 0.into() ); - - // check events - RuntimeHelper::::assert_pallet_xcm_event_outcome( - &unwrap_pallet_xcm_event, - |outcome| { - assert_ok!(outcome.ensure_complete()); - }, - ); - assert!(RuntimeHelper::::xcmp_queue_message_sent(unwrap_xcmp_queue_event) - .is_some()); } }) } @@ -298,7 +287,6 @@ macro_rules! include_teleports_for_native_asset_works( $collator_session_key:expr, $existential_deposit:expr, $unwrap_pallet_xcm_event:expr, - $unwrap_xcmp_queue_event:expr, $runtime_para_id:expr ) => { #[test] @@ -318,15 +306,14 @@ macro_rules! include_teleports_for_native_asset_works( $existential_deposit, target_account, $unwrap_pallet_xcm_event, - $unwrap_xcmp_queue_event, $runtime_para_id ) } } ); -/// Test-case makes sure that `Runtime` can receive teleported assets from sibling parachain relay -/// chain +/// Test-case makes sure that `Runtime` can receive teleported assets from sibling parachain, and +/// can teleport it back pub fn teleports_for_foreign_assets_works< Runtime, AllPalletsWithoutSystem, @@ -381,7 +368,7 @@ pub fn teleports_for_foreign_assets_works< ::AccountId: From, ForeignAssetsPalletInstance: 'static, { - // foreign parachain with the same consenus currency as asset + // foreign parachain with the same consensus currency as asset let foreign_para_id = 2222; let foreign_asset_id_multilocation = MultiLocation { parents: 1, @@ -473,7 +460,7 @@ pub fn teleports_for_foreign_assets_works< >(foreign_asset_id_multilocation, 0, 0); assert!(teleported_foreign_asset_amount > asset_minimum_asset_balance); - // 1. process received teleported assets from relaychain + // 1. process received teleported assets from sibling parachain (foreign_para_id) let xcm = Xcm(vec![ // BuyExecution with relaychain native token WithdrawAsset(buy_execution_fee.clone().into()), @@ -512,7 +499,7 @@ pub fn teleports_for_foreign_assets_works< hash, RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Sibling), ); - assert_eq!(outcome.ensure_complete(), Ok(())); + assert_ok!(outcome.ensure_complete()); // checks target_account after assert_eq!( @@ -551,12 +538,13 @@ pub fn teleports_for_foreign_assets_works< // 2. try to teleport asset back to source parachain (foreign_para_id) { let dest = MultiLocation::new(1, X1(Parachain(foreign_para_id))); - let dest_beneficiary = MultiLocation::new(1, X1(Parachain(foreign_para_id))) + let mut dest_beneficiary = MultiLocation::new(1, X1(Parachain(foreign_para_id))) .appended_with(AccountId32 { network: None, id: sp_runtime::AccountId32::new([3; 32]).into(), }) .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); let target_account_balance_before_teleport = >::balance( @@ -1108,7 +1096,7 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor AssetId: Clone + Copy, AssetIdConverter: MaybeEquivalence, { - // foreign parachain with the same consenus currency as asset + // foreign parachain with the same consensus currency as asset let foreign_asset_id_multilocation = MultiLocation { parents: 1, interior: X2(Parachain(2222), GeneralIndex(1234567)) }; let asset_id = AssetIdConverter::convert(&foreign_asset_id_multilocation).unwrap(); @@ -1223,7 +1211,7 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor hash, RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Sibling), ); - assert_eq!(outcome.ensure_complete(), Ok(())); + assert_ok!(outcome.ensure_complete()); // check events let mut events = >::events() @@ -1331,7 +1319,7 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor hash, RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Sibling), ); - assert_eq!(outcome.ensure_complete(), Ok(())); + assert_ok!(outcome.ensure_complete()); additional_checks_after(); }) @@ -1388,3 +1376,199 @@ macro_rules! include_create_and_manage_foreign_assets_for_local_consensus_parach } } ); + +/// Test-case makes sure that `Runtime` can reserve-transfer asset to other parachains (where +/// teleport is not trusted) +pub fn reserve_transfer_native_asset_to_non_teleport_para_works< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + HrmpChannelOpener, + HrmpChannelSource, + LocationToAccountId, +>( + collator_session_keys: CollatorSessionKeys, + existential_deposit: BalanceOf, + alice_account: AccountIdOf, + unwrap_pallet_xcm_event: Box) -> Option>>, + unwrap_xcmp_queue_event: Box< + dyn Fn(Vec) -> Option>, + >, + weight_limit: WeightLimit, +) where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config, + AllPalletsWithoutSystem: + OnInitialize> + OnFinalize>, + AccountIdOf: Into<[u8; 32]>, + ValidatorIdOf: From>, + BalanceOf: From, + ::Balance: From + Into, + XcmConfig: xcm_executor::Config, + LocationToAccountId: ConvertLocation>, + ::AccountId: + Into<<::RuntimeOrigin as OriginTrait>::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + ::AccountId: From, + HrmpChannelOpener: frame_support::inherent::ProvideInherent< + Call = cumulus_pallet_parachain_system::Call, + >, + HrmpChannelSource: XcmpMessageSource, +{ + let runtime_para_id = 1000; + ExtBuilder::::default() + .with_collators(collator_session_keys.collators()) + .with_session_keys(collator_session_keys.session_keys()) + .with_tracing() + .with_safe_xcm_version(3) + .with_para_id(runtime_para_id.into()) + .build() + .execute_with(|| { + let mut alice = [0u8; 32]; + alice[0] = 1; + let included_head = RuntimeHelper::::run_to_block( + 2, + AccountId::from(alice).into(), + ); + + // reserve-transfer native asset with local reserve to remote parachain (2345) + + let other_para_id = 2345; + let native_asset = MultiLocation::parent(); + let dest = MultiLocation::new(1, X1(Parachain(other_para_id))); + let mut dest_beneficiary = MultiLocation::new(1, X1(Parachain(other_para_id))) + .appended_with(AccountId32 { + network: None, + id: sp_runtime::AccountId32::new([3; 32]).into(), + }) + .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); + + let reserve_account = LocationToAccountId::convert_location(&dest) + .expect("Sovereign account for reserves"); + let balance_to_transfer = 1_000_000_000_000_u128; + + // open HRMP to other parachain + mock_open_hrmp_channel::( + runtime_para_id.into(), + other_para_id.into(), + included_head, + &alice, + ); + + // we calculate exact delivery fees _after_ sending the message by weighing the sent + // xcm, and this delivery fee varies for different runtimes, so just add enough buffer, + // then verify the arithmetics check out on final balance. + let delivery_fees_buffer = 40_000_000_000u128; + // drip 2xED + transfer_amount + delivery_fees_buffer to Alice account + let alice_account_init_balance = existential_deposit.saturating_mul(2.into()) + + balance_to_transfer.into() + + delivery_fees_buffer.into(); + let _ = >::deposit_creating( + &alice_account, + alice_account_init_balance, + ); + // SA of target location needs to have at least ED, otherwise making reserve fails + let _ = >::deposit_creating( + &reserve_account, + existential_deposit, + ); + + // we just check here, that user retains enough balance after withdrawal + // and also we check if `balance_to_transfer` is more than `existential_deposit`, + assert!( + (>::free_balance(&alice_account) - + balance_to_transfer.into()) >= + existential_deposit + ); + // SA has just ED + assert_eq!( + >::free_balance(&reserve_account), + existential_deposit + ); + + // local native asset (pallet_balances) + let asset_to_transfer = MultiAsset { + fun: Fungible(balance_to_transfer.into()), + id: Concrete(native_asset), + }; + + // pallet_xcm call reserve transfer + assert_ok!(>::limited_reserve_transfer_assets( + RuntimeHelper::::origin_of(alice_account.clone()), + Box::new(dest.into_versioned()), + Box::new(dest_beneficiary.into_versioned()), + Box::new(VersionedMultiAssets::from(MultiAssets::from(asset_to_transfer))), + 0, + weight_limit, + )); + + // check events + // check pallet_xcm attempted + RuntimeHelper::::assert_pallet_xcm_event_outcome( + &unwrap_pallet_xcm_event, + |outcome| { + assert_ok!(outcome.ensure_complete()); + }, + ); + + // check that xcm was sent + let xcm_sent_message_hash = >::events() + .into_iter() + .filter_map(|e| unwrap_xcmp_queue_event(e.event.encode())) + .find_map(|e| match e { + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { message_hash } => + Some(message_hash), + _ => None, + }); + + // read xcm + let xcm_sent = RuntimeHelper::::take_xcm( + other_para_id.into(), + ) + .unwrap(); + + let delivery_fees = get_fungible_delivery_fees::< + ::XcmSender, + >(dest, Xcm::try_from(xcm_sent.clone()).unwrap()); + + assert_eq!( + xcm_sent_message_hash, + Some(xcm_sent.using_encoded(sp_io::hashing::blake2_256)) + ); + let mut xcm_sent: Xcm<()> = xcm_sent.try_into().expect("versioned xcm"); + + // check sent XCM Program to other parachain + println!("reserve_transfer_native_asset_works sent xcm: {:?}", xcm_sent); + let reserve_assets_deposited = MultiAssets::from(vec![MultiAsset { + id: Concrete(MultiLocation { parents: 1, interior: Here }), + fun: Fungible(1000000000000), + }]); + + assert_matches_reserve_asset_deposited_instructions( + &mut xcm_sent, + &reserve_assets_deposited, + &dest_beneficiary, + ); + + // check alice account decreased by balance_to_transfer ( + delivery_fees) + assert_eq!( + >::free_balance(&alice_account), + alice_account_init_balance - balance_to_transfer.into() - delivery_fees.into() + ); + + // check reserve account + // check reserve account increased by balance_to_transfer + assert_eq!( + >::free_balance(&reserve_account), + existential_deposit + balance_to_transfer.into() + ); + }) +} diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 6c8ac8c6452b3bb86be5e64ac52b1992fb37cff1..8007b275cb513a8ea974bd807bee2d810b723090 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -16,7 +16,7 @@ //! Module contains predefined test-case scenarios for `Runtime` with various assets transferred //! over a bridge. -use crate::assert_matches_reserve_asset_deposited_instructions; +use crate::{assert_matches_reserve_asset_deposited_instructions, get_fungible_delivery_fees}; use codec::Encode; use cumulus_primitives_core::XcmpMessageSource; use frame_support::{ @@ -32,10 +32,7 @@ use parachains_runtimes_test_utils::{ use sp_runtime::{traits::StaticLookup, Saturating}; use xcm::{latest::prelude::*, VersionedMultiAssets}; use xcm_builder::{CreateMatcher, MatchXcm}; -use xcm_executor::{ - traits::{ConvertLocation, TransactAsset}, - XcmExecutor, -}; +use xcm_executor::{traits::ConvertLocation, XcmExecutor}; pub struct TestBridgingConfig { pub bridged_network: NetworkId, @@ -129,8 +126,13 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< &alice, ); - // drip ED to account - let alice_account_init_balance = existential_deposit + balance_to_transfer.into(); + // we calculate exact delivery fees _after_ sending the message by weighing the sent + // xcm, and this delivery fee varies for different runtimes, so just add enough buffer, + // then verify the arithmetics check out on final balance. + let delivery_fees_buffer = 8_000_000_000_000u128; + // drip ED + transfer_amount + delivery_fees_buffer to Alice account + let alice_account_init_balance = + existential_deposit + balance_to_transfer.into() + delivery_fees_buffer.into(); let _ = >::deposit_creating( &alice_account, alice_account_init_balance, @@ -183,56 +185,6 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< let expected_beneficiary = target_destination_account; - // Make sure sender has enough funds for paying delivery fees - let handling_delivery_fees = { - // Probable XCM with `ReserveAssetDeposited`. - let mut expected_reserve_asset_deposited_message = Xcm(vec![ - ReserveAssetDeposited(MultiAssets::from(expected_assets.clone())), - ClearOrigin, - BuyExecution { - fees: MultiAsset { - id: Concrete(Default::default()), - fun: Fungible(balance_to_transfer), - }, - weight_limit: Unlimited, - }, - DepositAsset { assets: Wild(AllCounted(1)), beneficiary: expected_beneficiary }, - SetTopic([ - 220, 188, 144, 32, 213, 83, 111, 175, 44, 210, 111, 19, 90, 165, 191, 112, - 140, 247, 192, 124, 42, 17, 153, 141, 114, 34, 189, 20, 83, 69, 237, 173, - ]), - ]); - assert_matches_reserve_asset_deposited_instructions( - &mut expected_reserve_asset_deposited_message, - &expected_assets, - &expected_beneficiary, - ); - - // Call `SendXcm::validate` to get delivery fees. - let (_, delivery_fees): (_, MultiAssets) = XcmConfig::XcmSender::validate( - &mut Some(target_location_from_different_consensus), - &mut Some(expected_reserve_asset_deposited_message), - ) - .expect("validate passes"); - // Drip delivery fee to Alice account. - let mut delivery_fees_added = false; - for delivery_fee in delivery_fees.inner() { - assert_ok!(::deposit_asset( - &delivery_fee, - &MultiLocation { - parents: 0, - interior: X1(AccountId32 { - network: None, - id: alice_account.clone().into(), - }), - }, - None, - )); - delivery_fees_added = true; - } - delivery_fees_added - }; - // do pallet_xcm call reserve transfer assert_ok!(>::limited_reserve_transfer_assets( RuntimeHelper::::origin_of(alice_account.clone()), @@ -275,6 +227,7 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< // check sent XCM ExportMessage to BridgeHub + let mut delivery_fees = 0; // 1. check paid or unpaid if let Some(expected_fee_asset_id) = maybe_paid_export_message { xcm_sent @@ -315,6 +268,10 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< .split_global() .expect("split works"); assert_eq!(destination, &target_location_junctions_without_global_consensus); + // Call `SendXcm::validate` to get delivery fees. + delivery_fees = get_fungible_delivery_fees::< + ::XcmSender, + >(target_location_from_different_consensus, inner_xcm.clone()); assert_matches_reserve_asset_deposited_instructions( inner_xcm, &expected_assets, @@ -330,8 +287,8 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< assert_eq!( >::free_balance(&alice_account), alice_account_init_balance - .saturating_sub(existential_deposit) .saturating_sub(balance_to_transfer.into()) + .saturating_sub(delivery_fees.into()) ); // check reserve account increased by balance_to_transfer @@ -341,14 +298,13 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< ); // check dedicated account increased by delivery fees (if configured) - if handling_delivery_fees { - if let Some(delivery_fees_account) = delivery_fees_account { - let delivery_fees_account_balance_after = - >::free_balance(&delivery_fees_account); - assert!( - delivery_fees_account_balance_after > delivery_fees_account_balance_before - ); - } + if let Some(delivery_fees_account) = delivery_fees_account { + let delivery_fees_account_balance_after = + >::free_balance(&delivery_fees_account); + assert!( + delivery_fees_account_balance_after - delivery_fees.into() >= + delivery_fees_account_balance_before + ); } }) } @@ -527,7 +483,7 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< XcmReceivedFrom::Sibling, ), ); - assert_eq!(outcome.ensure_complete(), Ok(())); + assert_ok!(outcome.ensure_complete()); // author actual balance after (received fees from Trader for ForeignAssets) let author_received_fees = @@ -632,7 +588,7 @@ pub fn report_bridge_status_from_xcm_bridge_router_works< hash, RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Sibling), ); - assert_eq!(outcome.ensure_complete(), Ok(())); + assert_ok!(outcome.ensure_complete()); assert_eq!(is_congested, pallet_xcm_bridge_hub_router::Pallet::::bridge().is_congested); }; diff --git a/cumulus/parachains/runtimes/bridge-hubs/README.md b/cumulus/parachains/runtimes/bridge-hubs/README.md index 9bd6557f350c484fa3d6f48c3127bc51b25b1def..cf617db730dd7faa19d932dbe6a0406b7a0c15f2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/README.md +++ b/cumulus/parachains/runtimes/bridge-hubs/README.md @@ -1,14 +1,5 @@ - [Bridge-hub Parachains](#bridge-hub-parachains) - [Requirements for local run/testing](#requirements-for-local-runtesting) - - [How to test local Rococo <-> Wococo bridge](#how-to-test-local-rococo---wococo-bridge) - - [Run Rococo/Wococo chains with zombienet](#run-rococowococo-chains-with-zombienet) - - [Init bridge and run relayer between BridgeHubRococo and - BridgeHubWococo](#init-bridge-and-run-relayer-between-bridgehubrococo-and-bridgehubwococo) - - [Initialize configuration for transfer asset over bridge - (ROCs/WOCs)](#initialize-configuration-for-transfer-asset-over-bridge-rocswocs) - - [Send messages - transfer asset over bridge (ROCs/WOCs)](#send-messages---transfer-asset-over-bridge-rocswocs) - - [Claim relayer's rewards on BridgeHubRococo and - BridgeHubWococo](#claim-relayers-rewards-on-bridgehubrococo-and-bridgehubwococo) - [How to test local Rococo <-> Westend bridge](#how-to-test-local-rococo---westend-bridge) - [Run Rococo/Westend chains with zombienet](#run-rococowestend-chains-with-zombienet) - [Init bridge and run relayer between BridgeHubRococo and @@ -53,17 +44,7 @@ Copy the apropriate binary (zombienet-linux) from the latest release to ~/local_ --- # 2. Build polkadot binary -# If you want to test Kusama/Polkadot bridge, we need "sudo pallet + fast-runtime", -# so we need to use sudofi in polkadot directory. -# -# Install sudofi: (skip if already installed) -# cd -# git clone https://github.com/paritytech/parachain-utils.git -# cd parachain-utils # -> this is -# cargo build --release --bin sudofi -# -# cd /polkadot -# /target/release/sudofi +We need polkadot binary with "fast-runtime" feature: cd cargo build --release --features fast-runtime --bin polkadot @@ -100,112 +81,6 @@ cp target/release/polkadot-parachain ~/local_bridge_testing/bin/polkadot-paracha cp target/release/polkadot-parachain ~/local_bridge_testing/bin/polkadot-parachain-asset-hub ``` - -## How to test local Rococo <-> Wococo bridge - -### Run Rococo/Wococo chains with zombienet - -``` -cd - -# Rococo + BridgeHubRococo + AssetHub for Rococo (mirroring Kusama) -POLKADOT_BINARY_PATH=~/local_bridge_testing/bin/polkadot \ -POLKADOT_PARACHAIN_BINARY_PATH=~/local_bridge_testing/bin/polkadot-parachain \ -POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO=~/local_bridge_testing/bin/polkadot-parachain-asset-hub \ - ~/local_bridge_testing/bin/zombienet-linux --provider native spawn ./cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml -``` - -``` -cd - -# Wococo + BridgeHubWococo + AssetHub for Wococo (mirroring Polkadot) -POLKADOT_BINARY_PATH=~/local_bridge_testing/bin/polkadot \ -POLKADOT_PARACHAIN_BINARY_PATH=~/local_bridge_testing/bin/polkadot-parachain \ -POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO=~/local_bridge_testing/bin/polkadot-parachain-asset-hub \ - ~/local_bridge_testing/bin/zombienet-linux --provider native spawn ./cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml -``` - -### Init bridge and run relayer between BridgeHubRococo and BridgeHubWococo - -**Accounts of BridgeHub parachains:** -- `Bob` is pallet owner of all bridge pallets - -#### Run with script -``` -cd - -./cumulus/scripts/bridges_rococo_wococo.sh run-relay -``` - -**Check relay-chain headers relaying:** -- Rococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8943#/chainstate - Pallet: - **bridgeWococoGrandpa** - Keys: **bestFinalized()** -- Wococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8945#/chainstate - Pallet: - **bridgeRococoGrandpa** - Keys: **bestFinalized()** - -**Check parachain headers relaying:** -- Rococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8943#/chainstate - Pallet: - **bridgeWococoParachains** - Keys: **parasInfo(None)** -- Wococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8945#/chainstate - Pallet: - **bridgeRococoParachains** - Keys: **parasInfo(None)** - -### Initialize configuration for transfer asset over bridge (ROCs/WOCs) - -This initialization does several things: -- creates `ForeignAssets` for wrappedROCs/wrappedWOCs -- drips SA for AssetHubRococo on AssetHubWococo (and vice versa) which holds reserved assets on source chains -``` -cd - -./cumulus/scripts/bridges_rococo_wococo.sh init-asset-hub-rococo-local -./cumulus/scripts/bridges_rococo_wococo.sh init-bridge-hub-rococo-local -./cumulus/scripts/bridges_rococo_wococo.sh init-asset-hub-wococo-local -./cumulus/scripts/bridges_rococo_wococo.sh init-bridge-hub-wococo-local -``` - -### Send messages - transfer asset over bridge (ROCs/WOCs) - -Do (asset) transfers: -``` -cd - -# ROCs from Rococo's Asset Hub to Wococo's. -./cumulus/scripts/bridges_rococo_wococo.sh reserve-transfer-assets-from-asset-hub-rococo-local -``` -``` -cd - -# WOCs from Wococo's Asset Hub to Rococo's. -./cumulus/scripts/bridges_rococo_wococo.sh reserve-transfer-assets-from-asset-hub-wococo-local -``` - -- open explorers: (see zombienets) - - AssetHubRococo (see events `xcmpQueue.XcmpMessageSent`, `polkadotXcm.Attempted`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9910#/explorer - - BridgeHubRococo (see `bridgeWococoMessages.MessageAccepted`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer - - BridgeHubWococo (see `bridgeRococoMessages.MessagesReceived`, `xcmpQueue.XcmpMessageSent`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8945#/explorer - - AssetHubWococo (see `foreignAssets.Issued`, `xcmpQueue.Success`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9010#/explorer - - BridgeHubRocococ (see `bridgeWococoMessages.MessagesDelivered`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer - -### Claim relayer's rewards on BridgeHubRococo and BridgeHubWococo - -**Accounts of BridgeHub parachains:** -- `//Charlie` is relayer account on BridgeHubRococo -- `//Charlie` is relayer account on BridgeHubWococo - -``` -cd - -# Claim rewards on BridgeHubWococo: -./cumulus/scripts/bridges_rococo_wococo.sh claim-rewards-bridge-hub-rococo-local - -# Claim rewards on BridgeHubWococo: -./cumulus/scripts/bridges_rococo_wococo.sh claim-rewards-bridge-hub-wococo-local -``` - -- open explorers: (see zombienets) - - BridgeHubRococo (see 2x `bridgeRelayers.RewardPaid`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer - - BridgeHubWococo (see 2x `bridgeRelayers.RewardPaid`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8945#/explorer - ## How to test local Rococo <-> Westend bridge ### Run Rococo/Westend chains with zombienet @@ -270,7 +145,7 @@ cd ### Send messages - transfer asset over bridge (ROCs/WNDs) -Do (asset) transfers: +Do reserve-backed transfers: ``` cd @@ -291,6 +166,20 @@ cd - AssetHubWestend (see `foreignAssets.Issued`, `xcmpQueue.Success`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9010#/explorer - BridgeHubRocococ (see `bridgeWestendMessages.MessagesDelivered`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer +Do reserve withdraw transfers: (when previous is finished) +``` +cd + +# wrappedWNDs from Rococo's Asset Hub to Westend's. +./cumulus/scripts/bridges_rococo_westend.sh withdraw-reserve-assets-from-asset-hub-rococo-local +``` +``` +cd + +# wrappedROCs from Westend's Asset Hub to Rococo's. +./cumulus/scripts/bridges_rococo_westend.sh withdraw-reserve-assets-from-asset-hub-westend-local +``` + ### Claim relayer's rewards on BridgeHubRococo and BridgeHubWestend **Accounts of BridgeHub parachains:** diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs deleted file mode 100644 index a30a2ae8d4d0b50cc5ef9e17e1028e457611f31e..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_parachain_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// bridge-hub-kusama-dev -// --pallet -// cumulus_pallet_parachain_system -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights -// --steps -// 50 -// --repeat -// 20 - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_parachain_system`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { - /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) - /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) - /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) - /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue Pages (r:0 w:16) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 1000]`. - fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `12` - // Estimated: `8013` - // Minimum execution time: 1_686_000 picoseconds. - Weight::from_parts(1_761_000, 0) - .saturating_add(Weight::from_parts(0, 8013)) - // Standard Error: 28_250 - .saturating_add(Weight::from_parts(24_261_433, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs deleted file mode 100644 index ffd311ceecdce25e25c39eddd13aad73e60e3ae4..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_xcmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --pallet -// cumulus-pallet-xcmp-queue -// --chain -// bridge-hub-kusama-dev -// --output -// cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs -// --extrinsic -// - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_xcmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_u32() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 5_000_000 picoseconds. - Weight::from_parts(6_000_000, 0) - .saturating_add(Weight::from_parts(0, 1561)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn enqueue_xcmp_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `82` - // Estimated: `3517` - // Minimum execution time: 14_000_000 picoseconds. - Weight::from_parts(14_000_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn suspend_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 3_000_000 picoseconds. - Weight::from_parts(4_000_000, 0) - .saturating_add(Weight::from_parts(0, 1561)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn resume_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `111` - // Estimated: `1596` - // Minimum execution time: 4_000_000 picoseconds. - Weight::from_parts(5_000_000, 0) - .saturating_add(Weight::from_parts(0, 1596)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn take_first_concatenated_xcm() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 44_000_000 picoseconds. - Weight::from_parts(45_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) - /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65711` - // Estimated: `69176` - // Minimum execution time: 60_000_000 picoseconds. - Weight::from_parts(63_000_000, 0) - .saturating_add(Weight::from_parts(0, 69176)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65710` - // Estimated: `69175` - // Minimum execution time: 42_000_000 picoseconds. - Weight::from_parts(43_000_000, 0) - .saturating_add(Weight::from_parts(0, 69175)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/extrinsic_weights.rs deleted file mode 100644 index 1a4adb968bb7195428ea00d59cd92dcd3b6eea5f..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/extrinsic_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Executing a NO-OP `System::remarks` Extrinsic. - pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(125_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::ExtrinsicBaseWeight::get(); - - // At least 10 µs. - assert!( - w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 10 µs." - ); - // At most 1 ms. - assert!( - w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/frame_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/frame_system.rs deleted file mode 100644 index 6b9313cdababf085c24f22e79b63acc86534bfbb..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/frame_system.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `frame_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=frame_system -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `frame_system`. -pub struct WeightInfo(PhantomData); -impl frame_system::WeightInfo for WeightInfo { - /// The range of component `b` is `[0, 3932160]`. - fn remark(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_985_000 picoseconds. - Weight::from_parts(2_177_341, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(386, 0).saturating_mul(b.into())) - } - /// The range of component `b` is `[0, 3932160]`. - fn remark_with_event(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_288_000 picoseconds. - Weight::from_parts(23_888_468, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_718, 0).saturating_mul(b.into())) - } - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - fn set_heap_pages() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 3_700_000 picoseconds. - Weight::from_parts(3_867_000, 0) - .saturating_add(Weight::from_parts(0, 1485)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) - /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) - /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_code() -> Weight { - // Proof Size summary in bytes: - // Measured: `119` - // Estimated: `1604` - // Minimum execution time: 100_298_586_000 picoseconds. - Weight::from_parts(101_869_369_000, 0) - .saturating_add(Weight::from_parts(0, 1604)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn set_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_052_000 picoseconds. - Weight::from_parts(2_115_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_048 - .saturating_add(Weight::from_parts(755_436, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn kill_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_044_000 picoseconds. - Weight::from_parts(2_110_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_011 - .saturating_add(Weight::from_parts(569_993, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `p` is `[0, 1000]`. - fn kill_prefix(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `68 + p * (69 ±0)` - // Estimated: `66 + p * (70 ±0)` - // Minimum execution time: 3_741_000 picoseconds. - Weight::from_parts(3_838_000, 0) - .saturating_add(Weight::from_parts(0, 66)) - // Standard Error: 2_455 - .saturating_add(Weight::from_parts(1_216_154, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_balances.rs deleted file mode 100644 index 04ceb5bed756964dd5932cf649f2b016e06326a5..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_balances.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_balances` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_balances -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_balances`. -pub struct WeightInfo(PhantomData); -impl pallet_balances::WeightInfo for WeightInfo { - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_allow_death() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 55_163_000 picoseconds. - Weight::from_parts(56_056_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 40_829_000 picoseconds. - Weight::from_parts(42_182_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_creating() -> Weight { - // Proof Size summary in bytes: - // Measured: `174` - // Estimated: `3593` - // Minimum execution time: 15_212_000 picoseconds. - Weight::from_parts(15_782_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_killing() -> Weight { - // Proof Size summary in bytes: - // Measured: `174` - // Estimated: `3593` - // Minimum execution time: 22_866_000 picoseconds. - Weight::from_parts(23_452_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `6196` - // Minimum execution time: 57_047_000 picoseconds. - Weight::from_parts(58_536_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_all() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 51_622_000 picoseconds. - Weight::from_parts(52_912_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_unreserve() -> Weight { - // Proof Size summary in bytes: - // Measured: `174` - // Estimated: `3593` - // Minimum execution time: 17_723_000 picoseconds. - Weight::from_parts(18_383_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:999 w:999) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `u` is `[1, 1000]`. - fn upgrade_accounts(u: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + u * (136 ±0)` - // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 17_089_000 picoseconds. - Weight::from_parts(17_379_000, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 17_071 - .saturating_add(Weight::from_parts(15_647_341, 0).saturating_mul(u.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs deleted file mode 100644 index cccb7c60924a03c6ed9e8a6df4561e6578190b14..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_collator_selection` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_collator_selection -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collator_selection`. -pub struct WeightInfo(PhantomData); -impl pallet_collator_selection::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:20 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 20]`. - fn set_invulnerables(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `196 + b * (79 ±0)` - // Estimated: `1187 + b * (2555 ±0)` - // Minimum execution time: 14_329_000 picoseconds. - Weight::from_parts(11_605_842, 0) - .saturating_add(Weight::from_parts(0, 1187)) - // Standard Error: 4_784 - .saturating_add(Weight::from_parts(3_297_183, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) - } - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 19]`. - /// The range of component `c` is `[1, 99]`. - fn add_invulnerable(b: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `757 + b * (32 ±0) + c * (53 ±0)` - // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` - // Minimum execution time: 47_110_000 picoseconds. - Weight::from_parts(45_234_418, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 14_452 - .saturating_add(Weight::from_parts(156_031, 0).saturating_mul(b.into())) - // Standard Error: 2_739 - .saturating_add(Weight::from_parts(216_162, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[5, 20]`. - fn remove_invulnerable(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `82 + b * (32 ±0)` - // Estimated: `6287` - // Minimum execution time: 15_326_000 picoseconds. - Weight::from_parts(14_914_611, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_274 - .saturating_add(Weight::from_parts(201_234, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn set_desired_candidates() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_288_000 picoseconds. - Weight::from_parts(7_472_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_137_000 picoseconds. - Weight::from_parts(7_374_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[1, 99]`. - fn register_as_candidate(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `740 + c * (52 ±0)` - // Estimated: `6287 + c * (54 ±0)` - // Minimum execution time: 40_718_000 picoseconds. - Weight::from_parts(43_911_837, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 3_053 - .saturating_add(Weight::from_parts(229_337, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[3, 100]`. - fn leave_intent(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `334 + c * (49 ±0)` - // Estimated: `6287` - // Minimum execution time: 32_953_000 picoseconds. - Weight::from_parts(34_817_275, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_476 - .saturating_add(Weight::from_parts(198_023, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - fn note_author() -> Weight { - // Proof Size summary in bytes: - // Measured: `155` - // Estimated: `6196` - // Minimum execution time: 45_130_000 picoseconds. - Weight::from_parts(46_733_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:97 w:97) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `r` is `[1, 100]`. - /// The range of component `c` is `[1, 100]`. - fn new_session(r: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `2263 + c * (97 ±0) + r * (115 ±0)` - // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` - // Minimum execution time: 16_690_000 picoseconds. - Weight::from_parts(17_188_000, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 345_320 - .saturating_add(Weight::from_parts(15_166_422, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_message_queue.rs deleted file mode 100644 index c5a4235055d120f85eec6279a963b8e39c009c62..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_message_queue.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_message_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// bridge-hub-kusama-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `pallet_message_queue`. -pub struct WeightInfo(PhantomData); -impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn ready_ring_knit() -> Weight { - // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 11_692_000 picoseconds. - Weight::from_parts(11_692_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - fn ready_ring_unknit() -> Weight { - // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 10_614_000 picoseconds. - Weight::from_parts(10_614_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn service_queue_base() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3517` - // Minimum execution time: 7_085_000 picoseconds. - Weight::from_parts(7_085_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 5_813_000 picoseconds. - Weight::from_parts(5_813_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_no_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_090_000 picoseconds. - Weight::from_parts(6_090_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn service_page_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 58_905_000 picoseconds. - Weight::from_parts(58_905_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn bump_service_head() -> Weight { - // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 6_501_000 picoseconds. - Weight::from_parts(6_501_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn reap_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 39_695_000 picoseconds. - Weight::from_parts(39_695_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_removed() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 50_543_000 picoseconds. - Weight::from_parts(50_543_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_updated() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 69_294_000 picoseconds. - Weight::from_parts(69_294_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_multisig.rs deleted file mode 100644 index f4135e975fbed00ea5dfd0628138b4cf7bd9c7ca..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_multisig.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_multisig` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_multisig -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_multisig`. -pub struct WeightInfo(PhantomData); -impl pallet_multisig::WeightInfo for WeightInfo { - /// The range of component `z` is `[0, 10000]`. - fn as_multi_threshold_1(z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_602_000 picoseconds. - Weight::from_parts(14_565_036, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 8 - .saturating_add(Weight::from_parts(518, 0).saturating_mul(z.into())) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_create(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `263 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 46_075_000 picoseconds. - Weight::from_parts(33_730_493, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_049 - .saturating_add(Weight::from_parts(134_211, 0).saturating_mul(s.into())) - // Standard Error: 10 - .saturating_add(Weight::from_parts(1_448, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[3, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_approve(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 29_389_000 picoseconds. - Weight::from_parts(19_639_583, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 976 - .saturating_add(Weight::from_parts(106_598, 0).saturating_mul(s.into())) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_457, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_complete(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `388 + s * (33 ±0)` - // Estimated: `6811` - // Minimum execution time: 50_438_000 picoseconds. - Weight::from_parts(36_195_308, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_689 - .saturating_add(Weight::from_parts(176_067, 0).saturating_mul(s.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_545, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_create(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `263 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 32_134_000 picoseconds. - Weight::from_parts(32_149_785, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_082 - .saturating_add(Weight::from_parts(145_390, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_approve(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 17_560_000 picoseconds. - Weight::from_parts(18_144_079, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 763 - .saturating_add(Weight::from_parts(114_298, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn cancel_as_multi(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `454 + s * (1 ±0)` - // Estimated: `6811` - // Minimum execution time: 32_360_000 picoseconds. - Weight::from_parts(33_566_579, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_314 - .saturating_add(Weight::from_parts(126_583, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_session.rs deleted file mode 100644 index f508e1daaef02ac7fe2d2e022e0507960fc797ee..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_session.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_session` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_session -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_session`. -pub struct WeightInfo(PhantomData); -impl pallet_session::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:1 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn set_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `297` - // Estimated: `3762` - // Minimum execution time: 17_170_000 picoseconds. - Weight::from_parts(17_523_000, 0) - .saturating_add(Weight::from_parts(0, 3762)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:0 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn purge_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `279` - // Estimated: `3744` - // Minimum execution time: 13_273_000 picoseconds. - Weight::from_parts(14_200_000, 0) - .saturating_add(Weight::from_parts(0, 3744)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_utility.rs deleted file mode 100644 index 93d0ea596e721127fe459d6a59105ebbb5d19a7c..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_utility.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_utility` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_utility -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_utility`. -pub struct WeightInfo(PhantomData); -impl pallet_utility::WeightInfo for WeightInfo { - /// The range of component `c` is `[0, 1000]`. - fn batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_641_000 picoseconds. - Weight::from_parts(7_103_558, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_172 - .saturating_add(Weight::from_parts(4_907_384, 0).saturating_mul(c.into())) - } - fn as_derivative() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_741_000 picoseconds. - Weight::from_parts(4_870_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn batch_all(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_561_000 picoseconds. - Weight::from_parts(12_252_064, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_750 - .saturating_add(Weight::from_parts(5_193_404, 0).saturating_mul(c.into())) - } - fn dispatch_as() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 8_646_000 picoseconds. - Weight::from_parts(8_927_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn force_batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_726_000 picoseconds. - Weight::from_parts(8_025_954, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_746 - .saturating_add(Weight::from_parts(4_936_537, 0).saturating_mul(c.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/rocksdb_weights.rs deleted file mode 100644 index 3dd817aa6f137085b0e5fdf2b11b7f50e5c8b002..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/rocksdb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout - /// the runtime. - pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::RocksDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs deleted file mode 100644 index ff3cb452a8a4fad0ea7996080e159ae1327df538..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_benchmarks::fungible -// --chain=bridge-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weights for `pallet_xcm_benchmarks::fungible`. -pub struct WeightInfo(PhantomData); -impl WeightInfo { - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn withdraw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `101` - // Estimated: `3593` - // Minimum execution time: 25_447_000 picoseconds. - Weight::from_parts(25_810_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn transfer_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `153` - // Estimated: `6196` - // Minimum execution time: 53_908_000 picoseconds. - Weight::from_parts(54_568_000, 6196) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn transfer_reserve_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `223` - // Estimated: `6196` - // Minimum execution time: 79_923_000 picoseconds. - Weight::from_parts(80_790_000, 6196) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) - } - // Storage: `Benchmark::Override` (r:0 w:0) - // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn reserve_asset_deposited() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn initiate_reserve_withdraw() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 31_923_000 picoseconds. - Weight::from_parts(32_499_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn receive_teleported_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_903_000 picoseconds. - Weight::from_parts(4_065_000, 0) - } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn deposit_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `52` - // Estimated: `3593` - // Minimum execution time: 26_987_000 picoseconds. - Weight::from_parts(27_486_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn deposit_reserve_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `122` - // Estimated: `3593` - // Minimum execution time: 56_012_000 picoseconds. - Weight::from_parts(58_067_000, 3593) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(3)) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn initiate_teleport() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 32_350_000 picoseconds. - Weight::from_parts(33_403_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs deleted file mode 100644 index c5c14e6917eb244d91a5f655880199b523046a11..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_xcm_benchmarks::generic` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::generic -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weights for `pallet_xcm_benchmarks::generic`. -pub struct WeightInfo(PhantomData); -impl WeightInfo { - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_holding() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 33_141_000 picoseconds. - Weight::from_parts(34_380_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn buy_execution() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_803_000 picoseconds. - Weight::from_parts(2_904_000, 0) - } - // Storage: `PolkadotXcm::Queries` (r:1 w:0) - // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn query_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `3497` - // Minimum execution time: 10_308_000 picoseconds. - Weight::from_parts(10_753_000, 3497) - .saturating_add(T::DbWeight::get().reads(1)) - } - pub fn transact() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 11_499_000 picoseconds. - Weight::from_parts(11_786_000, 0) - } - pub fn refund_surplus() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_102_000 picoseconds. - Weight::from_parts(3_161_000, 0) - } - pub fn set_error_handler() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_678_000 picoseconds. - Weight::from_parts(2_795_000, 0) - } - pub fn set_appendix() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_685_000 picoseconds. - Weight::from_parts(2_758_000, 0) - } - pub fn clear_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_590_000 picoseconds. - Weight::from_parts(2_754_000, 0) - } - pub fn descend_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_297_000 picoseconds. - Weight::from_parts(3_419_000, 0) - } - pub fn clear_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_606_000 picoseconds. - Weight::from_parts(2_717_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 26_242_000 picoseconds. - Weight::from_parts(29_220_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) - // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn claim_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `90` - // Estimated: `3555` - // Minimum execution time: 14_106_000 picoseconds. - Weight::from_parts(14_535_000, 3555) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn trap() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_634_000 picoseconds. - Weight::from_parts(2_763_000, 0) - } - // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) - // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn subscribe_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 27_802_000 picoseconds. - Weight::from_parts(28_495_000, 3503) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) - // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn unsubscribe_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_683_000 picoseconds. - Weight::from_parts(4_907_000, 0) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn burn_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_941_000 picoseconds. - Weight::from_parts(4_080_000, 0) - } - pub fn expect_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_775_000 picoseconds. - Weight::from_parts(2_908_000, 0) - } - pub fn expect_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_743_000 picoseconds. - Weight::from_parts(2_863_000, 0) - } - pub fn expect_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_641_000 picoseconds. - Weight::from_parts(2_771_000, 0) - } - pub fn expect_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_838_000 picoseconds. - Weight::from_parts(2_950_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn query_pallet() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 29_284_000 picoseconds. - Weight::from_parts(29_867_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn expect_pallet() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_734_000 picoseconds. - Weight::from_parts(4_876_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 26_154_000 picoseconds. - Weight::from_parts(26_851_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn clear_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_678_000 picoseconds. - Weight::from_parts(2_748_000, 0) - } - pub fn set_topic() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_585_000 picoseconds. - Weight::from_parts(2_697_000, 0) - } - pub fn clear_topic() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_576_000 picoseconds. - Weight::from_parts(2_701_000, 0) - } - pub fn set_fees_mode() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_597_000 picoseconds. - Weight::from_parts(2_735_000, 0) - } - pub fn unpaid_execution() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_744_000 picoseconds. - Weight::from_parts(2_809_000, 0) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs deleted file mode 100644 index 893524e12f66230f5de2a48966344b8b93f08489..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -pub use bridge_hub_kusama_runtime::{ - xcm_config::XcmConfig, AllPalletsWithoutSystem, Balances, ExistentialDeposit, ParachainSystem, - PolkadotXcm, Runtime, RuntimeEvent, SessionKeys, -}; -use codec::Decode; -use frame_support::parameter_types; -use parachains_common::{kusama::fee::WeightToFee, AccountId, AuraId}; - -const ALICE: [u8; 32] = [1u8; 32]; - -parameter_types! { - pub CheckingAccount: AccountId = PolkadotXcm::check_account(); -} - -bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - bridge_hub_test_utils::CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) } - ), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - 1002 -); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs deleted file mode 100644 index 4b0cface1466a7833e3a57cbbb21efb2714e4591..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_parachain_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// bridge-hub-polkadot-dev -// --pallet -// cumulus_pallet_parachain_system -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights -// --steps -// 50 -// --repeat -// 20 - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_parachain_system`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { - /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) - /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) - /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) - /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue Pages (r:0 w:16) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 1000]`. - fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `12` - // Estimated: `8013` - // Minimum execution time: 1_686_000 picoseconds. - Weight::from_parts(1_729_000, 0) - .saturating_add(Weight::from_parts(0, 8013)) - // Standard Error: 19_565 - .saturating_add(Weight::from_parts(24_482_828, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/frame_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/frame_system.rs deleted file mode 100644 index 8676be67b2f57529c091cb497b77932d9bb21c04..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/frame_system.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `frame_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=frame_system -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `frame_system`. -pub struct WeightInfo(PhantomData); -impl frame_system::WeightInfo for WeightInfo { - /// The range of component `b` is `[0, 3932160]`. - fn remark(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_047_000 picoseconds. - Weight::from_parts(2_087_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(390, 0).saturating_mul(b.into())) - } - /// The range of component `b` is `[0, 3932160]`. - fn remark_with_event(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_335_000 picoseconds. - Weight::from_parts(7_507_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_751, 0).saturating_mul(b.into())) - } - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - fn set_heap_pages() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 3_673_000 picoseconds. - Weight::from_parts(3_953_000, 0) - .saturating_add(Weight::from_parts(0, 1485)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) - /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) - /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_code() -> Weight { - // Proof Size summary in bytes: - // Measured: `119` - // Estimated: `1604` - // Minimum execution time: 98_791_992_000 picoseconds. - Weight::from_parts(101_799_041_000, 0) - .saturating_add(Weight::from_parts(0, 1604)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn set_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_144_000 picoseconds. - Weight::from_parts(2_206_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_254 - .saturating_add(Weight::from_parts(740_881, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn kill_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_117_000 picoseconds. - Weight::from_parts(2_192_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_024 - .saturating_add(Weight::from_parts(558_397, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `p` is `[0, 1000]`. - fn kill_prefix(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `68 + p * (69 ±0)` - // Estimated: `66 + p * (70 ±0)` - // Minimum execution time: 3_907_000 picoseconds. - Weight::from_parts(4_050_000, 0) - .saturating_add(Weight::from_parts(0, 66)) - // Standard Error: 2_228 - .saturating_add(Weight::from_parts(1_212_760, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_balances.rs deleted file mode 100644 index b95ea83585f9963b15d5ba122418f4eca4ba646b..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_balances.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_balances` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_balances -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_balances`. -pub struct WeightInfo(PhantomData); -impl pallet_balances::WeightInfo for WeightInfo { - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_allow_death() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 54_518_000 picoseconds. - Weight::from_parts(55_244_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 40_152_000 picoseconds. - Weight::from_parts(41_084_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_creating() -> Weight { - // Proof Size summary in bytes: - // Measured: `174` - // Estimated: `3593` - // Minimum execution time: 15_234_000 picoseconds. - Weight::from_parts(15_576_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_killing() -> Weight { - // Proof Size summary in bytes: - // Measured: `174` - // Estimated: `3593` - // Minimum execution time: 22_173_000 picoseconds. - Weight::from_parts(22_964_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `6196` - // Minimum execution time: 56_636_000 picoseconds. - Weight::from_parts(57_316_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_all() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 50_829_000 picoseconds. - Weight::from_parts(51_264_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_unreserve() -> Weight { - // Proof Size summary in bytes: - // Measured: `174` - // Estimated: `3593` - // Minimum execution time: 17_887_000 picoseconds. - Weight::from_parts(18_365_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:999 w:999) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `u` is `[1, 1000]`. - fn upgrade_accounts(u: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + u * (136 ±0)` - // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 16_754_000 picoseconds. - Weight::from_parts(17_237_000, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 15_088 - .saturating_add(Weight::from_parts(15_392_959, 0).saturating_mul(u.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs deleted file mode 100644 index 6ed2c42918692e7cdcf71a9b861ed7cbcbde2502..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_collator_selection` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_collator_selection -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collator_selection`. -pub struct WeightInfo(PhantomData); -impl pallet_collator_selection::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:20 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 20]`. - fn set_invulnerables(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `196 + b * (79 ±0)` - // Estimated: `1187 + b * (2555 ±0)` - // Minimum execution time: 14_735_000 picoseconds. - Weight::from_parts(11_846_916, 0) - .saturating_add(Weight::from_parts(0, 1187)) - // Standard Error: 8_592 - .saturating_add(Weight::from_parts(3_270_517, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) - } - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 19]`. - /// The range of component `c` is `[1, 99]`. - fn add_invulnerable(b: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `757 + b * (32 ±0) + c * (53 ±0)` - // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` - // Minimum execution time: 48_332_000 picoseconds. - Weight::from_parts(46_158_586, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 13_938 - .saturating_add(Weight::from_parts(174_493, 0).saturating_mul(b.into())) - // Standard Error: 2_642 - .saturating_add(Weight::from_parts(196_691, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[5, 20]`. - fn remove_invulnerable(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `82 + b * (32 ±0)` - // Estimated: `6287` - // Minimum execution time: 15_323_000 picoseconds. - Weight::from_parts(15_016_873, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_970 - .saturating_add(Weight::from_parts(199_160, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn set_desired_candidates() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_393_000 picoseconds. - Weight::from_parts(7_723_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_426_000 picoseconds. - Weight::from_parts(7_783_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[1, 99]`. - fn register_as_candidate(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `740 + c * (52 ±0)` - // Estimated: `6287 + c * (54 ±0)` - // Minimum execution time: 41_040_000 picoseconds. - Weight::from_parts(43_902_200, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_360 - .saturating_add(Weight::from_parts(211_897, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[3, 100]`. - fn leave_intent(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `334 + c * (49 ±0)` - // Estimated: `6287` - // Minimum execution time: 33_429_000 picoseconds. - Weight::from_parts(36_413_045, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_947 - .saturating_add(Weight::from_parts(177_461, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - fn note_author() -> Weight { - // Proof Size summary in bytes: - // Measured: `155` - // Estimated: `6196` - // Minimum execution time: 45_300_000 picoseconds. - Weight::from_parts(46_280_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:97 w:97) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `r` is `[1, 100]`. - /// The range of component `c` is `[1, 100]`. - fn new_session(r: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `2263 + c * (97 ±0) + r * (115 ±0)` - // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` - // Minimum execution time: 17_524_000 picoseconds. - Weight::from_parts(17_590_000, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 354_091 - .saturating_add(Weight::from_parts(15_829_767, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_message_queue.rs deleted file mode 100644 index 38cc21cfad950169b062476a23b2dc1ee60bb237..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_message_queue.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_message_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// bridge-hub-polkadot-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `pallet_message_queue`. -pub struct WeightInfo(PhantomData); -impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn ready_ring_knit() -> Weight { - // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 38_974_000 picoseconds. - Weight::from_parts(38_974_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - fn ready_ring_unknit() -> Weight { - // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 11_194_000 picoseconds. - Weight::from_parts(11_194_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn service_queue_base() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3517` - // Minimum execution time: 5_196_000 picoseconds. - Weight::from_parts(5_196_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_408_000 picoseconds. - Weight::from_parts(6_408_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_no_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_354_000 picoseconds. - Weight::from_parts(6_354_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn service_page_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 63_855_000 picoseconds. - Weight::from_parts(63_855_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn bump_service_head() -> Weight { - // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 6_764_000 picoseconds. - Weight::from_parts(6_764_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn reap_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 40_293_000 picoseconds. - Weight::from_parts(40_293_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_removed() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 50_903_000 picoseconds. - Weight::from_parts(50_903_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_updated() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 96_657_000 picoseconds. - Weight::from_parts(96_657_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_multisig.rs deleted file mode 100644 index 44f3da351f65fe43c04e29e45a69cb17221eba6b..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_multisig.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_multisig` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_multisig -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_multisig`. -pub struct WeightInfo(PhantomData); -impl pallet_multisig::WeightInfo for WeightInfo { - /// The range of component `z` is `[0, 10000]`. - fn as_multi_threshold_1(z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_284_000 picoseconds. - Weight::from_parts(14_761_699, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 7 - .saturating_add(Weight::from_parts(491, 0).saturating_mul(z.into())) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_create(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `263 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 44_043_000 picoseconds. - Weight::from_parts(32_303_705, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_280 - .saturating_add(Weight::from_parts(133_233, 0).saturating_mul(s.into())) - // Standard Error: 12 - .saturating_add(Weight::from_parts(1_467, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[3, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_approve(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 28_494_000 picoseconds. - Weight::from_parts(19_053_318, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 791 - .saturating_add(Weight::from_parts(112_935, 0).saturating_mul(s.into())) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_427, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_complete(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `388 + s * (33 ±0)` - // Estimated: `6811` - // Minimum execution time: 49_505_000 picoseconds. - Weight::from_parts(36_407_515, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_595 - .saturating_add(Weight::from_parts(166_201, 0).saturating_mul(s.into())) - // Standard Error: 15 - .saturating_add(Weight::from_parts(1_481, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_create(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `263 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 30_977_000 picoseconds. - Weight::from_parts(32_222_158, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_872 - .saturating_add(Weight::from_parts(125_197, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_approve(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 17_351_000 picoseconds. - Weight::from_parts(18_130_793, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 902 - .saturating_add(Weight::from_parts(109_485, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn cancel_as_multi(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `454 + s * (1 ±0)` - // Estimated: `6811` - // Minimum execution time: 31_554_000 picoseconds. - Weight::from_parts(33_116_785, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 882 - .saturating_add(Weight::from_parts(119_357, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_session.rs deleted file mode 100644 index 86ecc787e97c1c86d02c9478cf947d1af7a1a1a3..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_session.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_session` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_session -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_session`. -pub struct WeightInfo(PhantomData); -impl pallet_session::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:1 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn set_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `297` - // Estimated: `3762` - // Minimum execution time: 16_905_000 picoseconds. - Weight::from_parts(17_310_000, 0) - .saturating_add(Weight::from_parts(0, 3762)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:0 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn purge_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `279` - // Estimated: `3744` - // Minimum execution time: 12_511_000 picoseconds. - Weight::from_parts(13_055_000, 0) - .saturating_add(Weight::from_parts(0, 3744)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_timestamp.rs deleted file mode 100644 index a0984d72aaca375e798ee4545b3c261b8596f223..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_timestamp.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_timestamp` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_timestamp -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_timestamp`. -pub struct WeightInfo(PhantomData); -impl pallet_timestamp::WeightInfo for WeightInfo { - /// Storage: `Timestamp::Now` (r:1 w:1) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Aura::CurrentSlot` (r:1 w:0) - /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn set() -> Weight { - // Proof Size summary in bytes: - // Measured: `49` - // Estimated: `1493` - // Minimum execution time: 7_675_000 picoseconds. - Weight::from_parts(7_947_000, 0) - .saturating_add(Weight::from_parts(0, 1493)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn on_finalize() -> Weight { - // Proof Size summary in bytes: - // Measured: `57` - // Estimated: `0` - // Minimum execution time: 3_342_000 picoseconds. - Weight::from_parts(3_443_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_utility.rs deleted file mode 100644 index 2f04094b34787105b3ea1df10d7ce697198be792..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_utility.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_utility` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_utility -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_utility`. -pub struct WeightInfo(PhantomData); -impl pallet_utility::WeightInfo for WeightInfo { - /// The range of component `c` is `[0, 1000]`. - fn batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_810_000 picoseconds. - Weight::from_parts(6_290_871, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_678 - .saturating_add(Weight::from_parts(5_193_419, 0).saturating_mul(c.into())) - } - fn as_derivative() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_753_000 picoseconds. - Weight::from_parts(4_890_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn batch_all(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_873_000 picoseconds. - Weight::from_parts(9_780_422, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_035 - .saturating_add(Weight::from_parts(5_473_943, 0).saturating_mul(c.into())) - } - fn dispatch_as() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 8_443_000 picoseconds. - Weight::from_parts(8_904_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn force_batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_820_000 picoseconds. - Weight::from_parts(8_206_355, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_327 - .saturating_add(Weight::from_parts(5_187_839, 0).saturating_mul(c.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs deleted file mode 100644 index b73c009cbda09645e222a339a7f187237f9f7eb3..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=bridge-hub-polkadot-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 22_442_000 picoseconds. - Weight::from_parts(23_346_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 19_655_000 picoseconds. - Weight::from_parts(20_086_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_858_000 picoseconds. - Weight::from_parts(7_225_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_default_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_099_000 picoseconds. - Weight::from_parts(2_190_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_subscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 27_073_000 picoseconds. - Weight::from_parts(27_584_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_unsubscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `255` - // Estimated: `3720` - // Minimum execution time: 29_949_000 picoseconds. - Weight::from_parts(30_760_000, 0) - .saturating_add(Weight::from_parts(0, 3720)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) - /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_suspension() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_192_000 picoseconds. - Weight::from_parts(2_276_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_supported_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 14_681_000 picoseconds. - Weight::from_parts(15_131_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notifiers() -> Weight { - // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 14_523_000 picoseconds. - Weight::from_parts(15_113_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn already_notified_target() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 15_989_000 picoseconds. - Weight::from_parts(16_518_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn notify_current_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `6046` - // Minimum execution time: 25_127_000 picoseconds. - Weight::from_parts(25_773_000, 0) - .saturating_add(Weight::from_parts(0, 6046)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn notify_target_migration_fail() -> Weight { - // Proof Size summary in bytes: - // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_352_000 picoseconds. - Weight::from_parts(8_592_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notify_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 14_658_000 picoseconds. - Weight::from_parts(15_345_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn migrate_and_notify_old_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 31_478_000 picoseconds. - Weight::from_parts(32_669_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn new_query() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1517` - // Minimum execution time: 4_066_000 picoseconds. - Weight::from_parts(4_267_000, 0) - .saturating_add(Weight::from_parts(0, 1517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::Queries` (r:1 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn take_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `7669` - // Estimated: `11134` - // Minimum execution time: 25_260_000 picoseconds. - Weight::from_parts(25_570_000, 0) - .saturating_add(Weight::from_parts(0, 11134)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/paritydb_weights.rs deleted file mode 100644 index 25679703831a13b8d1bb7fb7dd4d92fa84b1f255..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/paritydb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights - /// are available for brave runtime engineers who may want to try this out as default. - pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::ParityDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs deleted file mode 100644 index 33a48f368122167c716155c21a3312d44c5e2dc7..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -mod pallet_xcm_benchmarks_fungible; -mod pallet_xcm_benchmarks_generic; - -use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; -use frame_support::weights::Weight; -use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; -use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; -use xcm::{latest::prelude::*, DoubleEncoded}; - -trait WeighMultiAssets { - fn weigh_multi_assets(&self, weight: Weight) -> Weight; -} - -const MAX_ASSETS: u64 = 100; - -impl WeighMultiAssets for MultiAssetFilter { - fn weigh_multi_assets(&self, weight: Weight) -> Weight { - match self { - Self::Definite(assets) => weight.saturating_mul(assets.inner().iter().count() as u64), - Self::Wild(asset) => match asset { - All => weight.saturating_mul(MAX_ASSETS), - AllOf { fun, .. } => match fun { - WildFungibility::Fungible => weight, - // Magic number 2 has to do with the fact that we could have up to 2 times - // MaxAssetsIntoHolding in the worst-case scenario. - WildFungibility::NonFungible => - weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64), - }, - AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), - AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), - }, - } - } -} - -impl WeighMultiAssets for MultiAssets { - fn weigh_multi_assets(&self, weight: Weight) -> Weight { - weight.saturating_mul(self.inner().iter().count() as u64) - } -} - -pub struct BridgeHubPolkadotXcmWeight(core::marker::PhantomData); -impl XcmWeightInfo for BridgeHubPolkadotXcmWeight { - fn withdraw_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) - } - fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) - } - fn receive_teleported_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) - } - fn query_response( - _query_id: &u64, - _response: &Response, - _max_weight: &Weight, - _querier: &Option, - ) -> Weight { - XcmGeneric::::query_response() - } - fn transfer_asset(assets: &MultiAssets, _dest: &MultiLocation) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::transfer_asset()) - } - fn transfer_reserve_asset( - assets: &MultiAssets, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::transfer_reserve_asset()) - } - fn transact( - _origin_type: &OriginKind, - _require_weight_at_most: &Weight, - _call: &DoubleEncoded, - ) -> Weight { - XcmGeneric::::transact() - } - fn hrmp_new_channel_open_request( - _sender: &u32, - _max_message_size: &u32, - _max_capacity: &u32, - ) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn hrmp_channel_accepted(_recipient: &u32) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn hrmp_channel_closing(_initiator: &u32, _sender: &u32, _recipient: &u32) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn clear_origin() -> Weight { - XcmGeneric::::clear_origin() - } - fn descend_origin(_who: &InteriorMultiLocation) -> Weight { - XcmGeneric::::descend_origin() - } - fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::report_error() - } - - fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) - } - fn deposit_reserve_asset( - assets: &MultiAssetFilter, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::deposit_reserve_asset()) - } - fn exchange_asset(_give: &MultiAssetFilter, _receive: &MultiAssets, _maximal: &bool) -> Weight { - Weight::MAX - } - fn initiate_reserve_withdraw( - assets: &MultiAssetFilter, - _reserve: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::initiate_reserve_withdraw()) - } - fn initiate_teleport( - assets: &MultiAssetFilter, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::initiate_teleport()) - } - fn report_holding(_response_info: &QueryResponseInfo, _assets: &MultiAssetFilter) -> Weight { - XcmGeneric::::report_holding() - } - fn buy_execution(_fees: &MultiAsset, _weight_limit: &WeightLimit) -> Weight { - XcmGeneric::::buy_execution() - } - fn refund_surplus() -> Weight { - XcmGeneric::::refund_surplus() - } - fn set_error_handler(_xcm: &Xcm) -> Weight { - XcmGeneric::::set_error_handler() - } - fn set_appendix(_xcm: &Xcm) -> Weight { - XcmGeneric::::set_appendix() - } - fn clear_error() -> Weight { - XcmGeneric::::clear_error() - } - fn claim_asset(_assets: &MultiAssets, _ticket: &MultiLocation) -> Weight { - XcmGeneric::::claim_asset() - } - fn trap(_code: &u64) -> Weight { - XcmGeneric::::trap() - } - fn subscribe_version(_query_id: &QueryId, _max_response_weight: &Weight) -> Weight { - XcmGeneric::::subscribe_version() - } - fn unsubscribe_version() -> Weight { - XcmGeneric::::unsubscribe_version() - } - fn burn_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmGeneric::::burn_asset()) - } - fn expect_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmGeneric::::expect_asset()) - } - fn expect_origin(_origin: &Option) -> Weight { - XcmGeneric::::expect_origin() - } - fn expect_error(_error: &Option<(u32, XcmError)>) -> Weight { - XcmGeneric::::expect_error() - } - fn expect_transact_status(_transact_status: &MaybeErrorCode) -> Weight { - XcmGeneric::::expect_transact_status() - } - fn query_pallet(_module_name: &Vec, _response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::query_pallet() - } - fn expect_pallet( - _index: &u32, - _name: &Vec, - _module_name: &Vec, - _crate_major: &u32, - _min_crate_minor: &u32, - ) -> Weight { - XcmGeneric::::expect_pallet() - } - fn report_transact_status(_response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::report_transact_status() - } - fn clear_transact_status() -> Weight { - XcmGeneric::::clear_transact_status() - } - fn universal_origin(_: &Junction) -> Weight { - Weight::MAX - } - fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight { - Weight::MAX - } - fn lock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn unlock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn note_unlockable(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn request_unlock(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn set_fees_mode(_: &bool) -> Weight { - XcmGeneric::::set_fees_mode() - } - fn set_topic(_topic: &[u8; 32]) -> Weight { - XcmGeneric::::set_topic() - } - fn clear_topic() -> Weight { - XcmGeneric::::clear_topic() - } - fn alias_origin(_: &MultiLocation) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX - } - fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { - XcmGeneric::::unpaid_execution() - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs deleted file mode 100644 index 814c416bd4c0cbf84e756392650a3a8432470428..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_benchmarks::fungible -// --chain=bridge-hub-polkadot-dev -// --header=./cumulus/file_header.txt -// --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weights for `pallet_xcm_benchmarks::fungible`. -pub struct WeightInfo(PhantomData); -impl WeightInfo { - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn withdraw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `101` - // Estimated: `3593` - // Minimum execution time: 24_237_000 picoseconds. - Weight::from_parts(24_697_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn transfer_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `153` - // Estimated: `6196` - // Minimum execution time: 52_269_000 picoseconds. - Weight::from_parts(53_848_000, 6196) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn transfer_reserve_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `223` - // Estimated: `6196` - // Minimum execution time: 77_611_000 picoseconds. - Weight::from_parts(82_634_000, 6196) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) - } - // Storage: `Benchmark::Override` (r:0 w:0) - // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn reserve_asset_deposited() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn initiate_reserve_withdraw() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 29_506_000 picoseconds. - Weight::from_parts(30_269_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn receive_teleported_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_541_000 picoseconds. - Weight::from_parts(3_629_000, 0) - } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn deposit_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `52` - // Estimated: `3593` - // Minimum execution time: 25_651_000 picoseconds. - Weight::from_parts(26_078_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn deposit_reserve_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `122` - // Estimated: `3593` - // Minimum execution time: 52_050_000 picoseconds. - Weight::from_parts(53_293_000, 3593) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(3)) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn initiate_teleport() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 30_009_000 picoseconds. - Weight::from_parts(30_540_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs deleted file mode 100644 index 9a039a6d63b26c5d5d615980b40a0a99a2058834..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_xcm_benchmarks::generic` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::generic -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weights for `pallet_xcm_benchmarks::generic`. -pub struct WeightInfo(PhantomData); -impl WeightInfo { - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_holding() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 30_923_000 picoseconds. - Weight::from_parts(31_653_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn buy_execution() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_837_000 picoseconds. - Weight::from_parts(2_932_000, 0) - } - // Storage: `PolkadotXcm::Queries` (r:1 w:0) - // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn query_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `3497` - // Minimum execution time: 10_319_000 picoseconds. - Weight::from_parts(10_614_000, 3497) - .saturating_add(T::DbWeight::get().reads(1)) - } - pub fn transact() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 11_466_000 picoseconds. - Weight::from_parts(12_005_000, 0) - } - pub fn refund_surplus() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_039_000 picoseconds. - Weight::from_parts(3_125_000, 0) - } - pub fn set_error_handler() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_655_000 picoseconds. - Weight::from_parts(2_717_000, 0) - } - pub fn set_appendix() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_655_000 picoseconds. - Weight::from_parts(2_695_000, 0) - } - pub fn clear_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_612_000 picoseconds. - Weight::from_parts(2_685_000, 0) - } - pub fn descend_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_286_000 picoseconds. - Weight::from_parts(3_425_000, 0) - } - pub fn clear_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_613_000 picoseconds. - Weight::from_parts(2_699_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 24_616_000 picoseconds. - Weight::from_parts(25_147_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) - // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn claim_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `90` - // Estimated: `3555` - // Minimum execution time: 14_511_000 picoseconds. - Weight::from_parts(14_831_000, 3555) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn trap() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_640_000 picoseconds. - Weight::from_parts(2_702_000, 0) - } - // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) - // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn subscribe_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 26_044_000 picoseconds. - Weight::from_parts(26_561_000, 3503) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) - // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn unsubscribe_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_568_000 picoseconds. - Weight::from_parts(4_764_000, 0) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn burn_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_953_000 picoseconds. - Weight::from_parts(4_079_000, 0) - } - pub fn expect_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_793_000 picoseconds. - Weight::from_parts(2_914_000, 0) - } - pub fn expect_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_719_000 picoseconds. - Weight::from_parts(2_829_000, 0) - } - pub fn expect_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_710_000 picoseconds. - Weight::from_parts(2_824_000, 0) - } - pub fn expect_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_941_000 picoseconds. - Weight::from_parts(3_201_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn query_pallet() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 28_080_000 picoseconds. - Weight::from_parts(28_920_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn expect_pallet() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_752_000 picoseconds. - Weight::from_parts(4_982_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 24_810_000 picoseconds. - Weight::from_parts(25_270_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn clear_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_676_000 picoseconds. - Weight::from_parts(2_780_000, 0) - } - pub fn set_topic() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_624_000 picoseconds. - Weight::from_parts(2_710_000, 0) - } - pub fn clear_topic() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_611_000 picoseconds. - Weight::from_parts(2_707_000, 0) - } - pub fn set_fees_mode() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_653_000 picoseconds. - Weight::from_parts(2_740_000, 0) - } - pub fn unpaid_execution() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_821_000 picoseconds. - Weight::from_parts(2_874_000, 0) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs deleted file mode 100644 index 0be87bd46facfc079cda4a1ea4cf6b7a34114d75..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -pub use bridge_hub_polkadot_runtime::{ - xcm_config::XcmConfig, AllPalletsWithoutSystem, Balances, ExistentialDeposit, ParachainSystem, - PolkadotXcm, Runtime, RuntimeEvent, SessionKeys, -}; -use codec::Decode; -use frame_support::parameter_types; -use parachains_common::{polkadot::fee::WeightToFee, AccountId, AuraId}; - -const ALICE: [u8; 32] = [1u8; 32]; - -parameter_types! { - pub CheckingAccount: AccountId = PolkadotXcm::check_account(); -} - -bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - bridge_hub_test_utils::CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) } - ), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - 1002 -); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 671d38e808fc5ee40ada0e47aa36e53752710c30..d362c5f12a605c0fd75be0bb818dee23fa78ad8f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -6,68 +6,78 @@ edition.workspace = true description = "Rococo's BridgeHub parachain runtime" license = "Apache-2.0" +[lints] +workspace = true + [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ + "derive", +] } hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } +serde = { version = "1.0.193", optional = true, features = ["derive"] } smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false} -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = [ + "parameterized-consensus-hook", +] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = [ + "bridging", +] } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } @@ -77,53 +87,69 @@ parachains-common = { path = "../../../common", default-features = false } # Bridges bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-asset-hub-wococo = { path = "../../../../../bridges/primitives/chain-asset-hub-wococo", default-features = false } +bp-bridge-hub-polkadot = { path = "../../../../../bridges/primitives/chain-bridge-hub-polkadot", default-features = false } bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } -bp-bridge-hub-wococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-wococo", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } +bp-polkadot-bulletin = { path = "../../../../../bridges/primitives/chain-polkadot-bulletin", default-features = false } bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } bp-rococo = { path = "../../../../../bridges/primitives/chain-rococo", default-features = false } bp-westend = { path = "../../../../../bridges/primitives/chain-westend", default-features = false } -bp-wococo = { path = "../../../../../bridges/primitives/chain-wococo", default-features = false } pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", default-features = false } +pallet-xcm-bridge-hub = { path = "../../../../../bridges/modules/xcm-bridge-hub", default-features = false } bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } +# Ethereum Bridge (Snowbridge) +snowbridge-beacon-primitives = { path = "../../../../../bridges/snowbridge/parachain/primitives/beacon", default-features = false } +snowbridge-system = { path = "../../../../../bridges/snowbridge/parachain/pallets/system", default-features = false } +snowbridge-system-runtime-api = { path = "../../../../../bridges/snowbridge/parachain/pallets/system/runtime-api", default-features = false } +snowbridge-core = { path = "../../../../../bridges/snowbridge/parachain/primitives/core", default-features = false } +snowbridge-ethereum-beacon-client = { path = "../../../../../bridges/snowbridge/parachain/pallets/ethereum-beacon-client", default-features = false } +snowbridge-inbound-queue = { path = "../../../../../bridges/snowbridge/parachain/pallets/inbound-queue", default-features = false } +snowbridge-outbound-queue = { path = "../../../../../bridges/snowbridge/parachain/pallets/outbound-queue", default-features = false } +snowbridge-outbound-queue-runtime-api = { path = "../../../../../bridges/snowbridge/parachain/pallets/outbound-queue/runtime-api", default-features = false } +snowbridge-router-primitives = { path = "../../../../../bridges/snowbridge/parachain/primitives/router", default-features = false } +snowbridge-runtime-common = { path = "../../../../../bridges/snowbridge/parachain/runtime/runtime-common", default-features = false } +snowbridge-rococo-common = { path = "../../../../../bridges/snowbridge/parachain/runtime/rococo-common", default-features = false } + +bridge-hub-common = { path = "../common", default-features = false } + [dev-dependencies] static_assertions = "1.1" bridge-hub-test-utils = { path = "../test-utils" } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", features = ["integrity-test"] } +bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", features = [ + "integrity-test", +] } sp-keyring = { path = "../../../../../substrate/primitives/keyring" } [features] -default = [ "std" ] +default = ["beacon-spec-mainnet", "std"] std = [ "bp-asset-hub-rococo/std", "bp-asset-hub-westend/std", - "bp-asset-hub-wococo/std", + "bp-bridge-hub-polkadot/std", "bp-bridge-hub-rococo/std", "bp-bridge-hub-westend/std", - "bp-bridge-hub-wococo/std", "bp-header-chain/std", "bp-messages/std", "bp-parachains/std", + "bp-polkadot-bulletin/std", "bp-polkadot-core/std", "bp-relayers/std", "bp-rococo/std", "bp-runtime/std", "bp-westend/std", - "bp-wococo/std", + "bridge-hub-common/std", "bridge-runtime-common/std", "codec/std", "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", @@ -154,6 +180,7 @@ std = [ "pallet-transaction-payment/std", "pallet-utility/std", "pallet-xcm-benchmarks?/std", + "pallet-xcm-bridge-hub/std", "pallet-xcm/std", "parachain-info/std", "parachains-common/std", @@ -163,6 +190,17 @@ std = [ "rococo-runtime-constants/std", "scale-info/std", "serde", + "snowbridge-beacon-primitives/std", + "snowbridge-core/std", + "snowbridge-ethereum-beacon-client/std", + "snowbridge-inbound-queue/std", + "snowbridge-outbound-queue-runtime-api/std", + "snowbridge-outbound-queue/std", + "snowbridge-rococo-common/std", + "snowbridge-router-primitives/std", + "snowbridge-runtime-common/std", + "snowbridge-system-runtime-api/std", + "snowbridge-system/std", "sp-api/std", "sp-block-builder/std", "sp-consensus-aura/std", @@ -178,14 +216,16 @@ std = [ "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", + "substrate-wasm-builder", "xcm-builder/std", "xcm-executor/std", "xcm/std", ] runtime-benchmarks = [ + "beacon-spec-mainnet", + "bridge-hub-common/runtime-benchmarks", "bridge-runtime-common/runtime-benchmarks", - "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", @@ -206,10 +246,19 @@ runtime-benchmarks = [ "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", + "pallet-xcm-bridge-hub/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", + "snowbridge-core/runtime-benchmarks", + "snowbridge-ethereum-beacon-client/runtime-benchmarks", + "snowbridge-inbound-queue/runtime-benchmarks", + "snowbridge-outbound-queue/runtime-benchmarks", + "snowbridge-rococo-common/runtime-benchmarks", + "snowbridge-router-primitives/runtime-benchmarks", + "snowbridge-runtime-common/runtime-benchmarks", + "snowbridge-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", @@ -217,7 +266,6 @@ runtime-benchmarks = [ try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", @@ -239,15 +287,23 @@ try-runtime = [ "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", "pallet-utility/try-runtime", + "pallet-xcm-bridge-hub/try-runtime", "pallet-xcm/try-runtime", "parachain-info/try-runtime", "polkadot-runtime-common/try-runtime", + "snowbridge-ethereum-beacon-client/try-runtime", + "snowbridge-inbound-queue/try-runtime", + "snowbridge-outbound-queue/try-runtime", + "snowbridge-system/try-runtime", "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] +beacon-spec-mainnet = [ + "snowbridge-ethereum-beacon-client/beacon-spec-mainnet", +] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs index 296ec88a85699b6818eb46e20cc80131dc8e5f3a..93ef9470363cd3dd41a92fe529226ad3fd7b2e00 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs @@ -21,19 +21,20 @@ //! For example, the messaging pallet needs to know the sending and receiving chains, but the //! GRANDPA tracking pallet only needs to be aware of one chain. -use super::{weights, AccountId, Balance, Balances, BlockNumber, Runtime, RuntimeEvent}; +use super::{ + weights, AccountId, Balance, Balances, BlockNumber, Runtime, RuntimeEvent, RuntimeOrigin, +}; use bp_parachains::SingleParaStoredHeaderDataBuilder; +use bp_runtime::UnderlyingChainProvider; +use bridge_runtime_common::messages::ThisChainWithMessages; use frame_support::{parameter_types, traits::ConstU32}; +use sp_runtime::RuntimeDebug; parameter_types! { pub const RelayChainHeadersToKeep: u32 = 1024; pub const ParachainHeadsToKeep: u32 = 64; - pub const RococoBridgeParachainPalletName: &'static str = "Paras"; - pub const MaxRococoParaHeadDataSize: u32 = bp_rococo::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; - pub const WococoBridgeParachainPalletName: &'static str = "Paras"; - pub const MaxWococoParaHeadDataSize: u32 = bp_wococo::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; - pub const WestendBridgeParachainPalletName: &'static str = "Paras"; + pub const WestendBridgeParachainPalletName: &'static str = bp_westend::PARAS_PALLET_NAME; pub const MaxWestendParaHeadDataSize: u32 = bp_westend::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; pub storage RequiredStakeForStakeAndSlash: Balance = 1_000_000; @@ -43,52 +44,6 @@ parameter_types! { pub storage DeliveryRewardInBalance: u64 = 1_000_000; } -/// Add GRANDPA bridge pallet to track Wococo relay chain. -pub type BridgeGrandpaWococoInstance = pallet_bridge_grandpa::Instance1; -impl pallet_bridge_grandpa::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type BridgedChain = bp_wococo::Wococo; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; - type HeadersToKeep = RelayChainHeadersToKeep; - type WeightInfo = weights::pallet_bridge_grandpa_wococo_finality::WeightInfo; -} - -/// Add parachain bridge pallet to track Wococo BridgeHub parachain -pub type BridgeParachainWococoInstance = pallet_bridge_parachains::Instance1; -impl pallet_bridge_parachains::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_parachains_within_wococo::WeightInfo; - type BridgesGrandpaPalletInstance = BridgeGrandpaWococoInstance; - type ParasPalletName = WococoBridgeParachainPalletName; - type ParaStoredHeaderDataBuilder = - SingleParaStoredHeaderDataBuilder; - type HeadsToKeep = ParachainHeadsToKeep; - type MaxParaHeadDataSize = MaxWococoParaHeadDataSize; -} - -/// Add GRANDPA bridge pallet to track Rococo relay chain. -pub type BridgeGrandpaRococoInstance = pallet_bridge_grandpa::Instance2; -impl pallet_bridge_grandpa::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type BridgedChain = bp_rococo::Rococo; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; - type HeadersToKeep = RelayChainHeadersToKeep; - type WeightInfo = weights::pallet_bridge_grandpa_rococo_finality::WeightInfo; -} - -/// Add parachain bridge pallet to track Rococo BridgeHub parachain -pub type BridgeParachainRococoInstance = pallet_bridge_parachains::Instance2; -impl pallet_bridge_parachains::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_parachains_within_rococo::WeightInfo; - type BridgesGrandpaPalletInstance = BridgeGrandpaRococoInstance; - type ParasPalletName = RococoBridgeParachainPalletName; - type ParaStoredHeaderDataBuilder = - SingleParaStoredHeaderDataBuilder; - type HeadsToKeep = ParachainHeadsToKeep; - type MaxParaHeadDataSize = MaxRococoParaHeadDataSize; -} - /// Add GRANDPA bridge pallet to track Westend relay chain. pub type BridgeGrandpaWestendInstance = pallet_bridge_grandpa::Instance3; impl pallet_bridge_grandpa::Config for Runtime { @@ -96,14 +51,14 @@ impl pallet_bridge_grandpa::Config for Runtime { type BridgedChain = bp_westend::Westend; type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; type HeadersToKeep = RelayChainHeadersToKeep; - type WeightInfo = weights::pallet_bridge_grandpa_westend_finality::WeightInfo; + type WeightInfo = weights::pallet_bridge_grandpa::WeightInfo; } /// Add parachain bridge pallet to track Westend BridgeHub parachain pub type BridgeParachainWestendInstance = pallet_bridge_parachains::Instance3; impl pallet_bridge_parachains::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_parachains_within_westend::WeightInfo; + type WeightInfo = weights::pallet_bridge_parachains::WeightInfo; type BridgesGrandpaPalletInstance = BridgeGrandpaWestendInstance; type ParasPalletName = WestendBridgeParachainPalletName; type ParaStoredHeaderDataBuilder = @@ -128,3 +83,33 @@ impl pallet_bridge_relayers::Config for Runtime { >; type WeightInfo = weights::pallet_bridge_relayers::WeightInfo; } + +/// Add GRANDPA bridge pallet to track Rococo Bulletin chain. +pub type BridgeGrandpaRococoBulletinInstance = pallet_bridge_grandpa::Instance4; +impl pallet_bridge_grandpa::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type BridgedChain = bp_polkadot_bulletin::PolkadotBulletin; + type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; + type HeadersToKeep = RelayChainHeadersToKeep; + // Technically this is incorrect - we have two pallet instances and ideally we shall + // benchmark every instance separately. But the benchmarking engine has a flaw - it + // messes with components. E.g. in Kusama maximal validators count is 1024 and in + // Bulletin chain it is 100. But benchmarking engine runs Bulletin benchmarks using + // components range, computed for Kusama => it causes an error. + // + // In practice, however, GRANDPA pallet works the same way for all bridged chains, so + // weights are also the same for both bridges. + type WeightInfo = weights::pallet_bridge_grandpa::WeightInfo; +} + +/// BridgeHubRococo chain from message lane point of view. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct BridgeHubRococo; + +impl UnderlyingChainProvider for BridgeHubRococo { + type Chain = bp_bridge_hub_rococo::BridgeHubRococo; +} + +impl ThisChainWithMessages for BridgeHubRococo { + type RuntimeOrigin = RuntimeOrigin; +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..c9d7f60e71a56d0f0ad7e1ce10cbec6d488d99a3 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -0,0 +1,292 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Bridge definitions used on BridgeHubRococo for bridging to Rococo Bulletin. +//! +//! Rococo Bulletin chain will be the 1:1 copy of the Polkadot Bulletin, so we +//! are reusing Polkadot Bulletin chain primitives everywhere here. + +use crate::{ + bridge_common_config::{BridgeGrandpaRococoBulletinInstance, BridgeHubRococo}, + weights, + xcm_config::UniversalLocation, + AccountId, BridgeRococoBulletinGrandpa, BridgeRococoBulletinMessages, PolkadotXcm, Runtime, + RuntimeEvent, XcmOverRococoBulletin, XcmRouter, +}; +use bp_messages::LaneId; +use bridge_runtime_common::{ + messages, + messages::{ + source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, + target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, + MessageBridge, UnderlyingChainProvider, + }, + messages_xcm_extension::{ + SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, + XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, + }, + refund_relayer_extension::{ + ActualFeeRefund, RefundBridgedGrandpaMessages, RefundSignedExtensionAdapter, + RefundableMessagesLane, + }, +}; + +use frame_support::{parameter_types, traits::PalletInfoAccess}; +use sp_runtime::RuntimeDebug; +use xcm::{ + latest::prelude::*, + prelude::{InteriorMultiLocation, NetworkId}, +}; +use xcm_builder::BridgeBlobDispatcher; + +parameter_types! { + /// Maximal number of entries in the unrewarded relayers vector at the Rococo Bridge Hub. It matches the + /// maximal number of unrewarded relayers that the single confirmation transaction at Rococo Bulletin Chain + /// may process. + pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = + bp_polkadot_bulletin::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; + /// Maximal number of unconfirmed messages at the Rococo Bridge Hub. It matches the maximal number of + /// unconfirmed messages that the single confirmation transaction at Rococo Bulletin Chain may process. + pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = + bp_polkadot_bulletin::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; + /// Bridge specific chain (network) identifier of the Rococo Bulletin Chain. + pub const RococoBulletinChainId: bp_runtime::ChainId = bp_runtime::POLKADOT_BULLETIN_CHAIN_ID; + /// Interior location (relative to this runtime) of the with-RococoBulletin messages pallet. + pub BridgeRococoToRococoBulletinMessagesPalletInstance: InteriorMultiLocation = X1( + PalletInstance(::index() as u8) + ); + /// Rococo Bulletin Network identifier. + pub RococoBulletinGlobalConsensusNetwork: NetworkId = NetworkId::PolkadotBulletin; + /// Relative location of the Rococo Bulletin chain. + pub RococoBulletinGlobalConsensusNetworkLocation: MultiLocation = MultiLocation { + parents: 2, + interior: X1(GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get())) + }; + /// All active lanes that the current bridge supports. + pub ActiveOutboundLanesToRococoBulletin: &'static [bp_messages::LaneId] + = &[XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN]; + /// Lane identifier, used to connect Rococo People and Rococo Bulletin chain. + pub const RococoPeopleToRococoBulletinMessagesLane: bp_messages::LaneId + = XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN; + + /// Priority boost that the registered relayer receives for every additional message in the message + /// delivery transaction. + /// + /// It is determined semi-automatically - see `FEE_BOOST_PER_MESSAGE` constant to get the + /// meaning of this value. + pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; + + /// Identifier of the sibling Rococo People parachain. + pub RococoPeopleParaId: cumulus_primitives_core::ParaId = rococo_runtime_constants::system_parachain::PEOPLE_ID.into(); + /// A route (XCM location and bridge lane) that the Rococo People Chain -> Rococo Bulletin Chain + /// message is following. + pub FromRococoPeopleToRococoBulletinRoute: SenderAndLane = SenderAndLane::new( + ParentThen(X1(Parachain(RococoPeopleParaId::get().into()))).into(), + XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN, + ); + /// All active routes and their destinations. + pub ActiveLanes: sp_std::vec::Vec<(SenderAndLane, (NetworkId, InteriorMultiLocation))> = sp_std::vec![ + ( + FromRococoPeopleToRococoBulletinRoute::get(), + (RococoBulletinGlobalConsensusNetwork::get(), Here) + ) + ]; + + /// XCM message that is never sent. + pub NeverSentMessage: Option> = None; +} +pub const XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN: LaneId = LaneId([0, 0, 0, 0]); + +/// Proof of messages, coming from Rococo Bulletin chain. +pub type FromRococoBulletinMessagesProof = + FromBridgedChainMessagesProof; +/// Messages delivery proof for Rococo Bridge Hub -> Rococo Bulletin messages. +pub type ToRococoBulletinMessagesDeliveryProof = + FromBridgedChainMessagesDeliveryProof; + +/// Dispatches received XCM messages from other bridge. +type FromRococoBulletinMessageBlobDispatcher = BridgeBlobDispatcher< + XcmRouter, + UniversalLocation, + BridgeRococoToRococoBulletinMessagesPalletInstance, +>; + +/// Export XCM messages to be relayed to the other side +pub type ToRococoBulletinHaulBlobExporter = XcmOverRococoBulletin; + +pub struct ToRococoBulletinXcmBlobHauler; +impl XcmBlobHauler for ToRococoBulletinXcmBlobHauler { + type Runtime = Runtime; + type MessagesInstance = WithRococoBulletinMessagesInstance; + type ToSourceChainSender = XcmRouter; + type CongestedMessage = NeverSentMessage; + type UncongestedMessage = NeverSentMessage; +} + +/// On messages delivered callback. +type OnMessagesDeliveredFromRococoBulletin = + XcmBlobHaulerAdapter; + +/// Messaging Bridge configuration for BridgeHubRococo -> Rococo Bulletin. +pub struct WithRococoBulletinMessageBridge; +impl MessageBridge for WithRococoBulletinMessageBridge { + // Bulletin chain assumes it is bridged with Polkadot Bridge Hub + const BRIDGED_MESSAGES_PALLET_NAME: &'static str = + bp_bridge_hub_polkadot::WITH_BRIDGE_HUB_POLKADOT_MESSAGES_PALLET_NAME; + type ThisChain = BridgeHubRococo; + type BridgedChain = RococoBulletin; + type BridgedHeaderChain = BridgeRococoBulletinGrandpa; +} + +/// Message verifier for RococoBulletin messages sent from BridgeHubRococo. +pub type ToRococoBulletinMessageVerifier = + messages::source::FromThisChainMessageVerifier; + +/// Maximal outbound payload size of BridgeHubRococo -> RococoBulletin messages. +pub type ToRococoBulletinMaximalOutboundPayloadSize = + messages::source::FromThisChainMaximalOutboundPayloadSize; + +/// RococoBulletin chain from message lane point of view. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct RococoBulletin; + +impl UnderlyingChainProvider for RococoBulletin { + type Chain = bp_polkadot_bulletin::PolkadotBulletin; +} + +impl messages::BridgedChainWithMessages for RococoBulletin {} + +/// Signed extension that refunds relayers that are delivering messages from the Rococo Bulletin +/// chain. +pub type OnBridgeHubRococoRefundRococoBulletinMessages = RefundSignedExtensionAdapter< + RefundBridgedGrandpaMessages< + Runtime, + BridgeGrandpaRococoBulletinInstance, + RefundableMessagesLane< + WithRococoBulletinMessagesInstance, + RococoPeopleToRococoBulletinMessagesLane, + >, + ActualFeeRefund, + PriorityBoostPerMessage, + StrOnBridgeHubRococoRefundRococoBulletinMessages, + >, +>; +bp_runtime::generate_static_str_provider!(OnBridgeHubRococoRefundRococoBulletinMessages); + +/// Add XCM messages support for BridgeHubRococo to support Rococo->Rococo Bulletin XCM messages. +pub type WithRococoBulletinMessagesInstance = pallet_bridge_messages::Instance4; +impl pallet_bridge_messages::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = + weights::pallet_bridge_messages_rococo_to_rococo_bulletin::WeightInfo; + type BridgedChainId = RococoBulletinChainId; + type ActiveOutboundLanes = ActiveOutboundLanesToRococoBulletin; + type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; + type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; + + type MaximalOutboundPayloadSize = ToRococoBulletinMaximalOutboundPayloadSize; + type OutboundPayload = XcmAsPlainPayload; + + type InboundPayload = XcmAsPlainPayload; + type InboundRelayer = AccountId; + type DeliveryPayments = (); + + type TargetHeaderChain = TargetHeaderChainAdapter; + type LaneMessageVerifier = ToRococoBulletinMessageVerifier; + type DeliveryConfirmationPayments = (); + + type SourceHeaderChain = SourceHeaderChainAdapter; + type MessageDispatch = + XcmBlobMessageDispatch; + type OnMessagesDelivered = OnMessagesDeliveredFromRococoBulletin; +} + +/// Add support for the export and dispatch of XCM programs. +pub type XcmOverPolkadotBulletinInstance = pallet_xcm_bridge_hub::Instance2; +impl pallet_xcm_bridge_hub::Config for Runtime { + type UniversalLocation = UniversalLocation; + type BridgedNetwork = RococoBulletinGlobalConsensusNetworkLocation; + type BridgeMessagesPalletInstance = WithRococoBulletinMessagesInstance; + type MessageExportPrice = (); + type DestinationVersion = + XcmVersionOfDestAndRemoteBridge; + type Lanes = ActiveLanes; + type LanesSupport = ToRococoBulletinXcmBlobHauler; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::bridge_common_config::BridgeGrandpaRococoBulletinInstance; + use bridge_runtime_common::{ + assert_complete_bridge_types, integrity::check_message_lane_weights, + }; + use parachains_common::{rococo, Balance}; + + /// Every additional message in the message delivery transaction boosts its priority. + /// So the priority of transaction with `N+1` messages is larger than priority of + /// transaction with `N` messages by the `PriorityBoostPerMessage`. + /// + /// Economically, it is an equivalent of adding tip to the transaction with `N` messages. + /// The `FEE_BOOST_PER_MESSAGE` constant is the value of this tip. + /// + /// We want this tip to be large enough (delivery transactions with more messages = less + /// operational costs and a faster bridge), so this value should be significant. + const FEE_BOOST_PER_MESSAGE: Balance = 2 * rococo::currency::UNITS; + + #[test] + fn ensure_bridge_hub_rococo_message_lane_weights_are_correct() { + check_message_lane_weights::< + bp_bridge_hub_rococo::BridgeHubRococo, + Runtime, + WithRococoBulletinMessagesInstance, + >( + bp_polkadot_bulletin::EXTRA_STORAGE_PROOF_SIZE, + bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, + true, + ); + } + + #[test] + fn ensure_bridge_integrity() { + assert_complete_bridge_types!( + runtime: Runtime, + with_bridged_chain_grandpa_instance: BridgeGrandpaRococoBulletinInstance, + with_bridged_chain_messages_instance: WithRococoBulletinMessagesInstance, + bridge: WithRococoBulletinMessageBridge, + this_chain: bp_rococo::Rococo, + bridged_chain: bp_polkadot_bulletin::PolkadotBulletin, + ); + + // we can't use `assert_complete_bridge_constants` here, because there's a trick with + // Bulletin chain - it has the same (almost) runtime for Polkadot Bulletin and Rococo + // Bulletin, so we have to adhere Polkadot names here + + bridge_runtime_common::priority_calculator::ensure_priority_boost_is_sane::< + Runtime, + WithRococoBulletinMessagesInstance, + PriorityBoostPerMessage, + >(FEE_BOOST_PER_MESSAGE); + + assert_eq!( + BridgeRococoToRococoBulletinMessagesPalletInstance::get(), + X1(PalletInstance( + bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_BULLETIN_MESSAGES_PALLET_INDEX + )) + ); + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..b0526148fa3a9da11e54ff676c3aeee141c623d4 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use crate::{ + xcm_config::{AgentIdOf, UniversalLocation}, + Runtime, +}; +use snowbridge_rococo_common::EthereumNetwork; +use snowbridge_router_primitives::outbound::EthereumBlobExporter; + +/// Exports message to the Ethereum Gateway contract. +pub type SnowbridgeExporter = EthereumBlobExporter< + UniversalLocation, + EthereumNetwork, + snowbridge_outbound_queue::Pallet, + AgentIdOf, +>; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_rococo_config.rs deleted file mode 100644 index 35497c84068423cc597442a172bb74f74a846b09..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_rococo_config.rs +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Bridge definitions used on BridgeHub with the Wococo flavor for bridging to BridgeHubRococo. - -use crate::{ - bridge_common_config::{BridgeParachainRococoInstance, DeliveryRewardInBalance}, - weights, AccountId, BridgeRococoMessages, ParachainInfo, Runtime, RuntimeEvent, RuntimeOrigin, - XcmRouter, -}; -use bp_messages::LaneId; -use bridge_runtime_common::{ - messages, - messages::{ - source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, - MessageBridge, ThisChainWithMessages, UnderlyingChainProvider, - }, - messages_xcm_extension::{ - SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, - XcmBlobMessageDispatch, - }, - refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, - }, -}; -use codec::Encode; -use frame_support::{parameter_types, traits::PalletInfoAccess}; -use sp_runtime::RuntimeDebug; -use xcm::{ - latest::prelude::*, - prelude::{InteriorMultiLocation, NetworkId}, -}; -use xcm_builder::{BridgeBlobDispatcher, HaulBlobExporter}; - -parameter_types! { - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - pub const BridgeHubRococoChainId: bp_runtime::ChainId = bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID; - pub BridgeHubWococoUniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Wococo), Parachain(ParachainInfo::parachain_id().into())); - pub BridgeWococoToRococoMessagesPalletInstance: InteriorMultiLocation = X1(PalletInstance(::index() as u8)); - pub RococoGlobalConsensusNetwork: NetworkId = NetworkId::Rococo; - pub ActiveOutboundLanesToBridgeHubRococo: &'static [bp_messages::LaneId] = &[XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO]; - pub const AssetHubWococoToAssetHubRococoMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO; - // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value - pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; - - pub AssetHubWococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_wococo::ASSET_HUB_WOCOCO_PARACHAIN_ID.into(); - pub AssetHubRococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID.into(); - - pub FromAssetHubWococoToAssetHubRococoRoute: SenderAndLane = SenderAndLane::new( - ParentThen(X1(Parachain(AssetHubWococoParaId::get().into()))).into(), - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - ); - - pub CongestedMessage: Xcm<()> = build_congestion_message(true).into(); - - pub UncongestedMessage: Xcm<()> = build_congestion_message(false).into(); -} -pub const XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO: LaneId = LaneId([0, 0, 0, 1]); - -fn build_congestion_message(is_congested: bool) -> sp_std::vec::Vec> { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_wococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_wococo::Call::ToRococoXcmRouter( - bp_asset_hub_wococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested, - } - ) - .encode() - .into(), - } - ] -} - -/// Proof of messages, coming from Rococo. -pub type FromRococoBridgeHubMessagesProof = - FromBridgedChainMessagesProof; -/// Messages delivery proof for RococoBridge Hub -> Wococo BridgeHub messages. -pub type ToRococoBridgeHubMessagesDeliveryProof = - FromBridgedChainMessagesDeliveryProof; - -/// Dispatches received XCM messages from other bridge -type FromRococoMessageBlobDispatcher = BridgeBlobDispatcher< - XcmRouter, - BridgeHubWococoUniversalLocation, - BridgeWococoToRococoMessagesPalletInstance, ->; - -/// Export XCM messages to be relayed to the other side -pub type ToBridgeHubRococoHaulBlobExporter = HaulBlobExporter< - XcmBlobHaulerAdapter, - RococoGlobalConsensusNetwork, - (), ->; -pub struct ToBridgeHubRococoXcmBlobHauler; -impl XcmBlobHauler for ToBridgeHubRococoXcmBlobHauler { - type Runtime = Runtime; - type MessagesInstance = WithBridgeHubRococoMessagesInstance; - type SenderAndLane = FromAssetHubWococoToAssetHubRococoRoute; - - type ToSourceChainSender = XcmRouter; - type CongestedMessage = CongestedMessage; - type UncongestedMessage = UncongestedMessage; -} - -/// On messages delivered callback. -type OnMessagesDelivered = XcmBlobHaulerAdapter; - -/// Messaging Bridge configuration for BridgeHubWococo -> BridgeHubRococo -pub struct WithBridgeHubRococoMessageBridge; -impl MessageBridge for WithBridgeHubRococoMessageBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_wococo::WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubWococo; - type BridgedChain = BridgeHubRococo; - type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< - Runtime, - BridgeParachainRococoInstance, - bp_bridge_hub_rococo::BridgeHubRococo, - >; -} - -/// Message verifier for BridgeHubRococo messages sent from BridgeHubWococo -pub type ToBridgeHubRococoMessageVerifier = - messages::source::FromThisChainMessageVerifier; - -/// Maximal outbound payload size of BridgeHubWococo -> BridgeHubRococo messages. -pub type ToBridgeHubRococoMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// BridgeHubRococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubRococo; - -impl UnderlyingChainProvider for BridgeHubRococo { - type Chain = bp_bridge_hub_rococo::BridgeHubRococo; -} - -impl messages::BridgedChainWithMessages for BridgeHubRococo {} - -/// BridgeHubWococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubWococo; - -impl UnderlyingChainProvider for BridgeHubWococo { - type Chain = bp_bridge_hub_wococo::BridgeHubWococo; -} - -impl ThisChainWithMessages for BridgeHubWococo { - type RuntimeOrigin = RuntimeOrigin; -} - -/// Signed extension that refunds relayers that are delivering messages from the Rococo parachain. -pub type OnBridgeHubWococoRefundBridgeHubRococoMessages = RefundSignedExtensionAdapter< - RefundBridgedParachainMessages< - Runtime, - RefundableParachain, - RefundableMessagesLane< - WithBridgeHubRococoMessagesInstance, - AssetHubWococoToAssetHubRococoMessagesLane, - >, - ActualFeeRefund, - PriorityBoostPerMessage, - StrOnBridgeHubWococoRefundBridgeHubRococoMessages, - >, ->; -bp_runtime::generate_static_str_provider!(OnBridgeHubWococoRefundBridgeHubRococoMessages); - -/// Add XCM messages support for BridgeHubWococo to support Wococo->Rococo XCM messages -pub type WithBridgeHubRococoMessagesInstance = pallet_bridge_messages::Instance2; -impl pallet_bridge_messages::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_messages_wococo_to_rococo::WeightInfo; - type BridgedChainId = BridgeHubRococoChainId; - type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubRococo; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type MaximalOutboundPayloadSize = ToBridgeHubRococoMaximalOutboundPayloadSize; - type OutboundPayload = XcmAsPlainPayload; - - type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; - type DeliveryPayments = (); - - type TargetHeaderChain = TargetHeaderChainAdapter; - type LaneMessageVerifier = ToBridgeHubRococoMessageVerifier; - type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< - Runtime, - WithBridgeHubRococoMessagesInstance, - DeliveryRewardInBalance, - >; - - type SourceHeaderChain = SourceHeaderChainAdapter; - type MessageDispatch = XcmBlobMessageDispatch< - FromRococoMessageBlobDispatcher, - Self::WeightInfo, - cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider< - AssetHubWococoParaId, - Runtime, - >, - >; - type OnMessagesDelivered = OnMessagesDelivered; -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::bridge_common_config::BridgeGrandpaRococoInstance; - use bridge_runtime_common::{ - assert_complete_bridge_types, - integrity::{ - assert_complete_bridge_constants, check_message_lane_weights, - AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, - AssertCompleteBridgeConstants, - }, - }; - use parachains_common::{wococo, Balance}; - - /// Every additional message in the message delivery transaction boosts its priority. - /// So the priority of transaction with `N+1` messages is larger than priority of - /// transaction with `N` messages by the `PriorityBoostPerMessage`. - /// - /// Economically, it is an equivalent of adding tip to the transaction with `N` messages. - /// The `FEE_BOOST_PER_MESSAGE` constant is the value of this tip. - /// - /// We want this tip to be large enough (delivery transactions with more messages = less - /// operational costs and a faster bridge), so this value should be significant. - const FEE_BOOST_PER_MESSAGE: Balance = 2 * wococo::currency::UNITS; - - #[test] - fn ensure_bridge_hub_wococo_message_lane_weights_are_correct() { - check_message_lane_weights::< - bp_bridge_hub_wococo::BridgeHubWococo, - Runtime, - WithBridgeHubRococoMessagesInstance, - >( - bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE, - bp_bridge_hub_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - bp_bridge_hub_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - true, - ); - } - - #[test] - fn ensure_bridge_integrity() { - assert_complete_bridge_types!( - runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaRococoInstance, - with_bridged_chain_messages_instance: WithBridgeHubRococoMessagesInstance, - bridge: WithBridgeHubRococoMessageBridge, - this_chain: bp_wococo::Wococo, - bridged_chain: bp_rococo::Rococo, - ); - - assert_complete_bridge_constants::< - Runtime, - BridgeGrandpaRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >(AssertCompleteBridgeConstants { - this_chain_constants: AssertChainConstants { - block_length: bp_bridge_hub_wococo::BlockLength::get(), - block_weights: bp_bridge_hub_wococo::BlockWeights::get(), - }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: - bp_bridge_hub_wococo::WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: bp_rococo::WITH_ROCOCO_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, - }, - }); - - bridge_runtime_common::priority_calculator::ensure_priority_boost_is_sane::< - Runtime, - WithBridgeHubRococoMessagesInstance, - PriorityBoostPerMessage, - >(FEE_BOOST_PER_MESSAGE); - - assert_eq!( - BridgeWococoToRococoMessagesPalletInstance::get(), - X1(PalletInstance( - bp_bridge_hub_wococo::WITH_BRIDGE_WOCOCO_TO_ROCOCO_MESSAGES_PALLET_INDEX - )) - ); - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index 36dcab09dea786a6909991920349fafa0631a9be..961b47e1e13b630a8c152caa5657b756f6b67844 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -14,11 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -//! Bridge definitions used on BridgeHub with the Rococo flavor for bridging to BridgeHubWestend. +//! Bridge definitions used on BridgeHubRococo for bridging to BridgeHubWestend. use crate::{ - bridge_common_config::{BridgeParachainWestendInstance, DeliveryRewardInBalance}, - weights, AccountId, BridgeWestendMessages, ParachainInfo, Runtime, RuntimeEvent, RuntimeOrigin, + bridge_common_config::{ + BridgeHubRococo, BridgeParachainWestendInstance, DeliveryRewardInBalance, + }, + weights, + xcm_config::UniversalLocation, + AccountId, BridgeWestendMessages, PolkadotXcm, Runtime, RuntimeEvent, XcmOverBridgeHubWestend, XcmRouter, }; use bp_messages::LaneId; @@ -27,11 +31,11 @@ use bridge_runtime_common::{ messages::{ source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, - MessageBridge, ThisChainWithMessages, UnderlyingChainProvider, + MessageBridge, UnderlyingChainProvider, }, messages_xcm_extension::{ SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, - XcmBlobMessageDispatch, + XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, }, refund_relayer_extension::{ ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, @@ -46,7 +50,7 @@ use xcm::{ latest::prelude::*, prelude::{InteriorMultiLocation, NetworkId}, }; -use xcm_builder::{BridgeBlobDispatcher, HaulBlobExporter}; +use xcm_builder::BridgeBlobDispatcher; parameter_types! { pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = @@ -55,24 +59,41 @@ parameter_types! { bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; pub const BridgeHubWestendChainId: bp_runtime::ChainId = bp_runtime::BRIDGE_HUB_WESTEND_CHAIN_ID; pub BridgeRococoToWestendMessagesPalletInstance: InteriorMultiLocation = X1(PalletInstance(::index() as u8)); - pub BridgeHubRococoUniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Rococo), Parachain(ParachainInfo::parachain_id().into())); pub WestendGlobalConsensusNetwork: NetworkId = NetworkId::Westend; - pub ActiveOutboundLanesToBridgeHubWestend: &'static [bp_messages::LaneId] = &[XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND]; - pub const AssetHubRococoToAssetHubWestendMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND; + pub WestendGlobalConsensusNetworkLocation: MultiLocation = MultiLocation { + parents: 2, + interior: X1(GlobalConsensus(WestendGlobalConsensusNetwork::get())) + }; // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; pub AssetHubRococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID.into(); pub AssetHubWestendParaId: cumulus_primitives_core::ParaId = bp_asset_hub_westend::ASSET_HUB_WESTEND_PARACHAIN_ID.into(); + // Lanes + pub ActiveOutboundLanesToBridgeHubWestend: &'static [bp_messages::LaneId] = &[XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND]; + pub const AssetHubRococoToAssetHubWestendMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND; pub FromAssetHubRococoToAssetHubWestendRoute: SenderAndLane = SenderAndLane::new( ParentThen(X1(Parachain(AssetHubRococoParaId::get().into()))).into(), XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, ); + pub ActiveLanes: sp_std::vec::Vec<(SenderAndLane, (NetworkId, InteriorMultiLocation))> = sp_std::vec![ + ( + FromAssetHubRococoToAssetHubWestendRoute::get(), + (WestendGlobalConsensusNetwork::get(), X1(Parachain(AssetHubWestendParaId::get().into()))) + ) + ]; pub CongestedMessage: Xcm<()> = build_congestion_message(true).into(); - pub UncongestedMessage: Xcm<()> = build_congestion_message(false).into(); + + pub BridgeHubWestendLocation: MultiLocation = MultiLocation { + parents: 2, + interior: X2( + GlobalConsensus(WestendGlobalConsensusNetwork::get()), + Parachain(::PARACHAIN_ID) + ) + }; } pub const XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND: LaneId = LaneId([0, 0, 0, 2]); @@ -103,31 +124,24 @@ pub type ToWestendBridgeHubMessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof; /// Dispatches received XCM messages from other bridge -type FromWestendMessageBlobDispatcher = BridgeBlobDispatcher< - XcmRouter, - BridgeHubRococoUniversalLocation, - BridgeRococoToWestendMessagesPalletInstance, ->; +type FromWestendMessageBlobDispatcher = + BridgeBlobDispatcher; /// Export XCM messages to be relayed to the other side -pub type ToBridgeHubWestendHaulBlobExporter = HaulBlobExporter< - XcmBlobHaulerAdapter, - WestendGlobalConsensusNetwork, - (), ->; +pub type ToBridgeHubWestendHaulBlobExporter = XcmOverBridgeHubWestend; + pub struct ToBridgeHubWestendXcmBlobHauler; impl XcmBlobHauler for ToBridgeHubWestendXcmBlobHauler { type Runtime = Runtime; type MessagesInstance = WithBridgeHubWestendMessagesInstance; - type SenderAndLane = FromAssetHubRococoToAssetHubWestendRoute; - type ToSourceChainSender = XcmRouter; type CongestedMessage = CongestedMessage; type UncongestedMessage = UncongestedMessage; } /// On messages delivered callback. -type OnMessagesDeliveredFromWestend = XcmBlobHaulerAdapter; +type OnMessagesDeliveredFromWestend = + XcmBlobHaulerAdapter; /// Messaging Bridge configuration for BridgeHubRococo -> BridgeHubWestend pub struct WithBridgeHubWestendMessageBridge; @@ -161,18 +175,6 @@ impl UnderlyingChainProvider for BridgeHubWestend { impl messages::BridgedChainWithMessages for BridgeHubWestend {} -/// BridgeHubRococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubRococo; - -impl UnderlyingChainProvider for BridgeHubRococo { - type Chain = bp_bridge_hub_rococo::BridgeHubRococo; -} - -impl ThisChainWithMessages for BridgeHubRococo { - type RuntimeOrigin = RuntimeOrigin; -} - /// Signed extension that refunds relayers that are delivering messages from the Westend parachain. pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = RefundSignedExtensionAdapter< RefundBridgedParachainMessages< @@ -229,6 +231,19 @@ impl pallet_bridge_messages::Config for Ru type OnMessagesDelivered = OnMessagesDeliveredFromWestend; } +/// Add support for the export and dispatch of XCM programs. +pub type XcmOverBridgeHubWestendInstance = pallet_xcm_bridge_hub::Instance1; +impl pallet_xcm_bridge_hub::Config for Runtime { + type UniversalLocation = UniversalLocation; + type BridgedNetwork = WestendGlobalConsensusNetworkLocation; + type BridgeMessagesPalletInstance = WithBridgeHubWestendMessagesInstance; + type MessageExportPrice = (); + type DestinationVersion = + XcmVersionOfDestAndRemoteBridge; + type Lanes = ActiveLanes; + type LanesSupport = ToBridgeHubWestendXcmBlobHauler; +} + #[cfg(test)] mod tests { use super::*; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_wococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_wococo_config.rs deleted file mode 100644 index 7780b02632cb7e60ae11a202f5dc62bb547b2295..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_wococo_config.rs +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Bridge definitions used on BridgeHub with the Rococo flavor for bridging to BridgeHubWococo. - -use crate::{ - bridge_common_config::{BridgeParachainWococoInstance, DeliveryRewardInBalance}, - weights, AccountId, BridgeWococoMessages, ParachainInfo, Runtime, RuntimeEvent, RuntimeOrigin, - XcmRouter, -}; -use bp_messages::LaneId; -use bridge_runtime_common::{ - messages, - messages::{ - source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, - MessageBridge, ThisChainWithMessages, UnderlyingChainProvider, - }, - messages_xcm_extension::{ - SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, - XcmBlobMessageDispatch, - }, - refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, - }, -}; - -use codec::Encode; -use frame_support::{parameter_types, traits::PalletInfoAccess}; -use sp_runtime::RuntimeDebug; -use xcm::{ - latest::prelude::*, - prelude::{InteriorMultiLocation, NetworkId}, -}; -use xcm_builder::{BridgeBlobDispatcher, HaulBlobExporter}; - -parameter_types! { - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - pub const BridgeHubWococoChainId: bp_runtime::ChainId = bp_runtime::BRIDGE_HUB_WOCOCO_CHAIN_ID; - pub BridgeRococoToWococoMessagesPalletInstance: InteriorMultiLocation = X1(PalletInstance(::index() as u8)); - pub BridgeHubRococoUniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Rococo), Parachain(ParachainInfo::parachain_id().into())); - pub WococoGlobalConsensusNetwork: NetworkId = NetworkId::Wococo; - pub ActiveOutboundLanesToBridgeHubWococo: &'static [bp_messages::LaneId] = &[XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO]; - pub const AssetHubRococoToAssetHubWococoMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO; - // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value - pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; - - pub AssetHubRococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID.into(); - pub AssetHubWococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_wococo::ASSET_HUB_WOCOCO_PARACHAIN_ID.into(); - - pub FromAssetHubRococoToAssetHubWococoRoute: SenderAndLane = SenderAndLane::new( - ParentThen(X1(Parachain(AssetHubRococoParaId::get().into()))).into(), - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - ); - - pub CongestedMessage: Xcm<()> = build_congestion_message(true).into(); - - pub UncongestedMessage: Xcm<()> = build_congestion_message(false).into(); -} -pub const XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO: LaneId = LaneId([0, 0, 0, 1]); - -fn build_congestion_message(is_congested: bool) -> sp_std::vec::Vec> { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_rococo::Call::ToWococoXcmRouter( - bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested, - } - ) - .encode() - .into(), - } - ] -} - -/// Proof of messages, coming from Wococo. -pub type FromWococoBridgeHubMessagesProof = - FromBridgedChainMessagesProof; -/// Messages delivery proof for Rococo Bridge Hub -> Wococo Bridge Hub messages. -pub type ToWococoBridgeHubMessagesDeliveryProof = - FromBridgedChainMessagesDeliveryProof; - -/// Dispatches received XCM messages from other bridge -type FromWococoMessageBlobDispatcher = BridgeBlobDispatcher< - XcmRouter, - BridgeHubRococoUniversalLocation, - BridgeRococoToWococoMessagesPalletInstance, ->; - -/// Export XCM messages to be relayed to the other side -pub type ToBridgeHubWococoHaulBlobExporter = HaulBlobExporter< - XcmBlobHaulerAdapter, - WococoGlobalConsensusNetwork, - (), ->; -pub struct ToBridgeHubWococoXcmBlobHauler; -impl XcmBlobHauler for ToBridgeHubWococoXcmBlobHauler { - type Runtime = Runtime; - type MessagesInstance = WithBridgeHubWococoMessagesInstance; - type SenderAndLane = FromAssetHubRococoToAssetHubWococoRoute; - - type ToSourceChainSender = XcmRouter; - type CongestedMessage = CongestedMessage; - type UncongestedMessage = UncongestedMessage; -} - -/// On messages delivered callback. -type OnMessagesDeliveredFromWococo = XcmBlobHaulerAdapter; - -/// Messaging Bridge configuration for BridgeHubRococo -> BridgeHubWococo -pub struct WithBridgeHubWococoMessageBridge; -impl MessageBridge for WithBridgeHubWococoMessageBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubRococo; - type BridgedChain = BridgeHubWococo; - type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< - Runtime, - BridgeParachainWococoInstance, - bp_bridge_hub_wococo::BridgeHubWococo, - >; -} - -/// Message verifier for BridgeHubWococo messages sent from BridgeHubRococo -pub type ToBridgeHubWococoMessageVerifier = - messages::source::FromThisChainMessageVerifier; - -/// Maximal outbound payload size of BridgeHubRococo -> BridgeHubWococo messages. -pub type ToBridgeHubWococoMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// BridgeHubWococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubWococo; - -impl UnderlyingChainProvider for BridgeHubWococo { - type Chain = bp_bridge_hub_wococo::BridgeHubWococo; -} - -impl messages::BridgedChainWithMessages for BridgeHubWococo {} - -/// BridgeHubRococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubRococo; - -impl UnderlyingChainProvider for BridgeHubRococo { - type Chain = bp_bridge_hub_rococo::BridgeHubRococo; -} - -impl ThisChainWithMessages for BridgeHubRococo { - type RuntimeOrigin = RuntimeOrigin; -} - -/// Signed extension that refunds relayers that are delivering messages from the Wococo parachain. -pub type OnBridgeHubRococoRefundBridgeHubWococoMessages = RefundSignedExtensionAdapter< - RefundBridgedParachainMessages< - Runtime, - RefundableParachain, - RefundableMessagesLane< - WithBridgeHubWococoMessagesInstance, - AssetHubRococoToAssetHubWococoMessagesLane, - >, - ActualFeeRefund, - PriorityBoostPerMessage, - StrOnBridgeHubRococoRefundBridgeHubWococoMessages, - >, ->; -bp_runtime::generate_static_str_provider!(OnBridgeHubRococoRefundBridgeHubWococoMessages); - -/// Add XCM messages support for BridgeHubRococo to support Rococo->Wococo XCM messages -pub type WithBridgeHubWococoMessagesInstance = pallet_bridge_messages::Instance1; -impl pallet_bridge_messages::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_messages_rococo_to_wococo::WeightInfo; - type BridgedChainId = BridgeHubWococoChainId; - type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubWococo; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type MaximalOutboundPayloadSize = ToBridgeHubWococoMaximalOutboundPayloadSize; - type OutboundPayload = XcmAsPlainPayload; - - type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; - type DeliveryPayments = (); - - type TargetHeaderChain = TargetHeaderChainAdapter; - type LaneMessageVerifier = ToBridgeHubWococoMessageVerifier; - type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< - Runtime, - WithBridgeHubWococoMessagesInstance, - DeliveryRewardInBalance, - >; - - type SourceHeaderChain = SourceHeaderChainAdapter; - type MessageDispatch = XcmBlobMessageDispatch< - FromWococoMessageBlobDispatcher, - Self::WeightInfo, - cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider< - AssetHubRococoParaId, - Runtime, - >, - >; - type OnMessagesDelivered = OnMessagesDeliveredFromWococo; -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::bridge_common_config::BridgeGrandpaWococoInstance; - use bridge_runtime_common::{ - assert_complete_bridge_types, - integrity::{ - assert_complete_bridge_constants, check_message_lane_weights, - AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, - AssertCompleteBridgeConstants, - }, - }; - use parachains_common::{rococo, Balance}; - - /// Every additional message in the message delivery transaction boosts its priority. - /// So the priority of transaction with `N+1` messages is larger than priority of - /// transaction with `N` messages by the `PriorityBoostPerMessage`. - /// - /// Economically, it is an equivalent of adding tip to the transaction with `N` messages. - /// The `FEE_BOOST_PER_MESSAGE` constant is the value of this tip. - /// - /// We want this tip to be large enough (delivery transactions with more messages = less - /// operational costs and a faster bridge), so this value should be significant. - const FEE_BOOST_PER_MESSAGE: Balance = 2 * rococo::currency::UNITS; - - #[test] - fn ensure_bridge_hub_rococo_message_lane_weights_are_correct() { - check_message_lane_weights::< - bp_bridge_hub_rococo::BridgeHubRococo, - Runtime, - WithBridgeHubWococoMessagesInstance, - >( - bp_bridge_hub_wococo::EXTRA_STORAGE_PROOF_SIZE, - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - true, - ); - } - - #[test] - fn ensure_bridge_integrity() { - assert_complete_bridge_types!( - runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaWococoInstance, - with_bridged_chain_messages_instance: WithBridgeHubWococoMessagesInstance, - bridge: WithBridgeHubWococoMessageBridge, - this_chain: bp_rococo::Rococo, - bridged_chain: bp_wococo::Wococo, - ); - - assert_complete_bridge_constants::< - Runtime, - BridgeGrandpaWococoInstance, - WithBridgeHubWococoMessagesInstance, - WithBridgeHubWococoMessageBridge, - >(AssertCompleteBridgeConstants { - this_chain_constants: AssertChainConstants { - block_length: bp_bridge_hub_rococo::BlockLength::get(), - block_weights: bp_bridge_hub_rococo::BlockWeights::get(), - }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_bridge_hub_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_bridge_hub_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: bp_runtime::BRIDGE_HUB_WOCOCO_CHAIN_ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: bp_wococo::WITH_WOCOCO_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_bridge_hub_wococo::WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME, - }, - }); - - bridge_runtime_common::priority_calculator::ensure_priority_boost_is_sane::< - Runtime, - WithBridgeHubWococoMessagesInstance, - PriorityBoostPerMessage, - >(FEE_BOOST_PER_MESSAGE); - - assert_eq!( - BridgeRococoToWococoMessagesPalletInstance::get(), - X1(PalletInstance( - bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WOCOCO_MESSAGES_PALLET_INDEX - )) - ); - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index e5d38bcac2325e8305a9fec0f60b0f83bb6a7219..b21cde248e1135e5c39cfdff11162864f7b4b6aa 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -16,15 +16,9 @@ //! # Bridge Hub Rococo Runtime //! -//! This runtime is also used for Bridge Hub Wococo. We dont want to create -//! another exact copy of Bridge Hub Rococo, so we injected some tweaks backed by `RuntimeFlavor` -//! and `pub storage Flavor: RuntimeFlavor`. (For example this is needed for successful asset -//! transfer between Asset Hub Rococo and Asset Hub Wococo, where we need to have correct -//! `xcm_config::UniversalLocation` with correct `GlobalConsensus`. -//! //! This runtime currently supports bridging between: -//! - Rococo <> Wococo //! - Rococo <> Westend +//! - Rococo <> Rococo Bulletin #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. @@ -35,21 +29,25 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod bridge_common_config; -pub mod bridge_to_rococo_config; +pub mod bridge_to_bulletin_config; +pub mod bridge_to_ethereum_config; pub mod bridge_to_westend_config; -pub mod bridge_to_wococo_config; mod weights; pub mod xcm_config; -use codec::{Decode, Encode}; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use snowbridge_beacon_primitives::{Fork, ForkVersions}; +use snowbridge_core::{ + gwei, meth, outbound::Message, AgentId, AllowSiblingsOnly, PricingParameters, Rewards, +}; +use snowbridge_router_primitives::inbound::MessageToXcm; use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata, H160}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::{Block as BlockT, Keccak256}, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, + ApplyExtrinsicResult, FixedU128, }; use sp_std::prelude::*; @@ -57,13 +55,13 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use cumulus_primitives_core::ParaId; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, Everything, TransformOrigin}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, TransformOrigin}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -71,17 +69,25 @@ use frame_system::{ limits::{BlockLength, BlockWeights}, EnsureRoot, }; -use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; -pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; -pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use xcm_config::{XcmOriginToTransactDispatchOrigin, XcmRouter}; use bp_runtime::HeaderId; +#[cfg(not(feature = "runtime-benchmarks"))] +use bridge_hub_common::BridgeHubMessageRouter; +use bridge_hub_common::{ + message_queue::{NarrowOriginToSibling, ParaIdToSibling}, + AggregateMessageOrigin, +}; +use pallet_xcm::EnsureXcm; +pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; +pub use sp_runtime::{MultiAddress, Perbill, Permill}; +use xcm::VersionedMultiLocation; +use xcm_config::{TreasuryAccount, XcmOriginToTransactDispatchOrigin, XcmRouter}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; +use rococo_runtime_constants::system_parachain::{ASSET_HUB_ID, BRIDGE_HUB_ID}; use xcm::latest::prelude::*; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; @@ -93,14 +99,18 @@ use parachains_common::{ HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; -/// Enum for handling differences in the runtime configuration for BridgeHubRococo vs -/// BridgeHubWococo. -#[derive(Default, Eq, PartialEq, Debug, Clone, Copy, Decode, Encode)] -pub enum RuntimeFlavor { - #[default] - Rococo, - Wococo, -} +#[cfg(feature = "runtime-benchmarks")] +use crate::xcm_config::benchmark_helpers::DoNothingRouter; +#[cfg(feature = "runtime-benchmarks")] +use snowbridge_beacon_primitives::CompactExecutionHeader; +#[cfg(feature = "runtime-benchmarks")] +use snowbridge_core::RingBufferMap; +#[cfg(feature = "runtime-benchmarks")] +pub use snowbridge_ethereum_beacon_client::ExecutionHeaderBuffer; +#[cfg(feature = "runtime-benchmarks")] +use snowbridge_inbound_queue::BenchmarkHelper; +#[cfg(feature = "runtime-benchmarks")] +use sp_core::H256; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -126,9 +136,8 @@ pub type SignedExtra = ( pallet_transaction_payment::ChargeTransactionPayment, BridgeRejectObsoleteHeadersAndMessages, ( - bridge_to_wococo_config::OnBridgeHubRococoRefundBridgeHubWococoMessages, bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, - bridge_to_rococo_config::OnBridgeHubWococoRefundBridgeHubRococoMessages, + bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages, ), ); @@ -141,6 +150,13 @@ pub type Migrations = ( pallet_collator_selection::migration::v1::MigrateToV1, pallet_multisig::migrations::v1::MigrateToV1, InitStorageVersions, + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + // unreleased + snowbridge_system::migration::v0::InitializeOnUpgrade< + Runtime, + ConstU32, + ConstU32, + >, ); /// Migration to initialize storage versions for pallets added after genesis. @@ -193,10 +209,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_003_000, + spec_version: 1_005_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 3, + transaction_version: 4, state_version: 1, }; @@ -233,41 +249,24 @@ parameter_types! { // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; /// The index type for storing how many extrinsics an account has signed. type Nonce = Nonce; /// The type for hashing blocks and tries. type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; /// The block type. type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// Runtime version. type Version = Version; - /// Converts a module to an index of this module in the runtime. - type PalletInfo = PalletInfo; /// The data to be stored in an account. type AccountData = pallet_balances::AccountData; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; - /// The basic call filter to use in dispatchable. - type BaseCallFilter = Everything; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; /// Block & extrinsics weights: base values and limits. @@ -359,21 +358,27 @@ impl cumulus_pallet_parachain_system::Config for Runtime { impl parachain_info::Config for Runtime {} parameter_types! { - pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; + /// Amount of weight that can be spent per block to service messages. This was increased + /// from 35% to 60% of the max block weight to accommodate the Ethereum beacon light client + /// extrinsics. The force_checkpoint and submit extrinsics (for submit, optionally) includes + /// the sync committee's pubkeys (512 x 48 bytes) + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(60) * RuntimeBlockWeights::get().max_block; } impl pallet_message_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_message_queue::WeightInfo; #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; + type MessageProcessor = + pallet_message_queue::mock_helpers::NoopMessageProcessor; #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = xcm_builder::ProcessXcmMessage< - AggregateMessageOrigin, - xcm_executor::XcmExecutor, - RuntimeCall, + type MessageProcessor = BridgeHubMessageRouter< + xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >, + EthereumOutboundQueue, >; type Size = u32; // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: @@ -417,12 +422,6 @@ parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; @@ -496,6 +495,151 @@ impl pallet_utility::Config for Runtime { type WeightInfo = weights::pallet_utility::WeightInfo; } +// Ethereum Bridge + +#[cfg(not(feature = "runtime-benchmarks"))] +parameter_types! { + pub storage EthereumGatewayAddress: H160 = H160::zero(); +} + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub storage EthereumGatewayAddress: H160 = H160(hex_literal::hex!("EDa338E4dC46038493b885327842fD3E301CaB39")); +} + +parameter_types! { + pub const CreateAssetCall: [u8;2] = [53, 0]; + pub const CreateAssetDeposit: u128 = (UNITS / 10) + EXISTENTIAL_DEPOSIT; + pub const InboundQueuePalletInstance: u8 = snowbridge_rococo_common::INBOUND_QUEUE_MESSAGES_PALLET_INDEX; + pub Parameters: PricingParameters = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 400), + fee_per_gas: gwei(20), + rewards: Rewards { local: 1 * UNITS, remote: meth(1) } + }; +} + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelper for Runtime { + fn initialize_storage(block_hash: H256, header: CompactExecutionHeader) { + >::insert(block_hash, header); + } +} + +impl snowbridge_inbound_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Verifier = snowbridge_ethereum_beacon_client::Pallet; + type Token = Balances; + #[cfg(not(feature = "runtime-benchmarks"))] + type XcmSender = XcmRouter; + #[cfg(feature = "runtime-benchmarks")] + type XcmSender = DoNothingRouter; + type ChannelLookup = EthereumSystem; + type GatewayAddress = EthereumGatewayAddress; + #[cfg(feature = "runtime-benchmarks")] + type Helper = Runtime; + type MessageConverter = MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + >; + type WeightToFee = WeightToFee; + type LengthToFee = ConstantMultiplier; + type MaxMessageSize = ConstU32<2048>; + type WeightInfo = weights::snowbridge_inbound_queue::WeightInfo; + type PricingParameters = EthereumSystem; +} + +impl snowbridge_outbound_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Hashing = Keccak256; + type MessageQueue = MessageQueue; + type Decimals = ConstU8<12>; + type MaxMessagePayloadSize = ConstU32<2048>; + type MaxMessagesPerBlock = ConstU32<32>; + type GasMeter = snowbridge_core::outbound::ConstantGasMeter; + type Balance = Balance; + type WeightToFee = WeightToFee; + type WeightInfo = weights::snowbridge_outbound_queue::WeightInfo; + type PricingParameters = EthereumSystem; + type Channels = EthereumSystem; +} + +#[cfg(not(feature = "beacon-spec-mainnet"))] +parameter_types! { + pub const ChainForkVersions: ForkVersions = ForkVersions { + genesis: Fork { + version: [0, 0, 0, 1], // 0x00000001 + epoch: 0, + }, + altair: Fork { + version: [1, 0, 0, 1], // 0x01000001 + epoch: 0, + }, + bellatrix: Fork { + version: [2, 0, 0, 1], // 0x02000001 + epoch: 0, + }, + capella: Fork { + version: [3, 0, 0, 1], // 0x03000001 + epoch: 0, + }, + }; + pub const MaxExecutionHeadersToKeep:u32 = 1000; +} + +#[cfg(feature = "beacon-spec-mainnet")] +parameter_types! { + pub const ChainForkVersions: ForkVersions = ForkVersions { + genesis: Fork { + version: [0, 0, 16, 32], // 0x00001020 + epoch: 0, + }, + altair: Fork { + version: [1, 0, 16, 32], // 0x01001020 + epoch: 36660, + }, + bellatrix: Fork { + version: [2, 0, 16, 32], // 0x02001020 + epoch: 112260, + }, + capella: Fork { + version: [3, 0, 16, 32], // 0x03001020 + epoch: 162304, + }, + }; + pub const MaxExecutionHeadersToKeep:u32 = 8192 * 2; +} + +impl snowbridge_ethereum_beacon_client::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ForkVersions = ChainForkVersions; + type MaxExecutionHeadersToKeep = MaxExecutionHeadersToKeep; + type WeightInfo = weights::snowbridge_ethereum_beacon_client::WeightInfo; +} + +#[cfg(feature = "runtime-benchmarks")] +impl snowbridge_system::BenchmarkHelper for () { + fn make_xcm_origin(location: xcm::latest::MultiLocation) -> RuntimeOrigin { + RuntimeOrigin::from(pallet_xcm::Origin::Xcm(location)) + } +} + +impl snowbridge_system::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OutboundQueue = EthereumOutboundQueue; + type SiblingOrigin = EnsureXcm; + type AgentIdOf = xcm_config::AgentIdOf; + type TreasuryAccount = TreasuryAccount; + type Token = Balances; + type WeightInfo = weights::snowbridge_system::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type Helper = (); + type DefaultPricingParameters = Parameters; + type InboundDeliveryCost = EthereumInboundQueue; +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime @@ -521,48 +665,44 @@ construct_runtime!( // XCM helpers. XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, + PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, // Handy utilities. Utility: pallet_utility::{Pallet, Call, Event} = 40, Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 36, - // Rococo, Wococo and Westend BridgeHubs are sharing the runtime, so this runtime has several sets of - // bridge pallets. - // - // BridgeHubRococo uses: - // - BridgeWococoGrandpa - // - BridgeWestendGrandpa - // - BridgeWococoParachains - // - BridgeWestendParachains - // - BridgeWococoMessages - // - BridgeWestendMessages - // - BridgeRelayers - // - // BridgeHubWococo uses: - // - BridgeRococoGrandpa - // - BridgeRococoParachains - // - BridgeRococoMessages - // - BridgeRelayers - - // GRANDPA bridge modules. - BridgeWococoGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 41, - BridgeRococoGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 43, - BridgeWestendGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 48, + // Bridge relayers pallet, used by several bridges here. + BridgeRelayers: pallet_bridge_relayers::{Pallet, Call, Storage, Event} = 47, - // Parachain bridge modules. - BridgeWococoParachains: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 42, - BridgeRococoParachains: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 44, + // With-Westend GRANDPA bridge module. + BridgeWestendGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 48, + // With-Westend parachain bridge module. BridgeWestendParachains: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 49, - - // Messaging bridge modules. - BridgeWococoMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 46, - BridgeRococoMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 45, + // With-Westend messaging bridge module. BridgeWestendMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 51, + // With-Westend bridge hub pallet. + XcmOverBridgeHubWestend: pallet_xcm_bridge_hub::::{Pallet} = 52, - BridgeRelayers: pallet_bridge_relayers::{Pallet, Call, Storage, Event} = 47, + // With-Rococo Bulletin GRANDPA bridge module. + // + // we can't use `BridgeRococoBulletinGrandpa` name here, because the same Bulletin runtime will be + // used for both Rococo and Polkadot Bulletin chains AND this name affects runtime storage keys, used + // by the relayer process + BridgePolkadotBulletinGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 60, + // With-Rococo Bulletin messaging bridge module. + // + // we can't use `BridgeRococoBulletinMessages` name here, because the same Bulletin runtime will be + // used for both Rococo and Polkadot Bulletin chains AND this name affects runtime storage keys, used + // by this runtime and the relayer process + BridgePolkadotBulletinMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 61, + // With-Rococo Bulletin bridge hub pallet. + XcmOverPolkadotBulletin: pallet_xcm_bridge_hub::::{Pallet} = 62, + + EthereumInboundQueue: snowbridge_inbound_queue::{Pallet, Call, Storage, Event} = 80, + EthereumOutboundQueue: snowbridge_outbound_queue::{Pallet, Call, Storage, Event} = 81, + EthereumBeaconClient: snowbridge_ethereum_beacon_client::{Pallet, Call, Storage, Event} = 82, + EthereumSystem: snowbridge_system::{Pallet, Call, Storage, Config, Event} = 83, // Message Queue. Importantly, is registered last so that messages are processed after // the `on_initialize` hooks of bridging pallets. @@ -570,14 +710,23 @@ construct_runtime!( } ); +/// Proper alias for bridge GRANDPA pallet used to bridge with the bulletin chain. +pub type BridgeRococoBulletinGrandpa = BridgePolkadotBulletinGrandpa; +/// Proper alias for bridge messages pallet used to bridge with the bulletin chain. +pub type BridgeRococoBulletinMessages = BridgePolkadotBulletinMessages; +/// Proper alias for bridge messages pallet used to bridge with the bulletin chain. +pub type XcmOverRococoBulletin = XcmOverPolkadotBulletin; + bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { RuntimeCall, AccountId, // Grandpa - BridgeRococoGrandpa, BridgeWococoGrandpa, BridgeWestendGrandpa, + BridgeWestendGrandpa, + BridgeRococoBulletinGrandpa, // Parachains - BridgeRococoParachains, BridgeWococoParachains, BridgeWestendParachains, + BridgeWestendParachains, // Messages - BridgeRococoMessages, BridgeWococoMessages, BridgeWestendMessages + BridgeWestendMessages, + BridgeRococoBulletinMessages } #[cfg(feature = "runtime-benchmarks")] @@ -593,23 +742,22 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] // Bridge pallets - [pallet_bridge_grandpa, WococoFinality] [pallet_bridge_grandpa, WestendFinality] - [pallet_bridge_grandpa, RococoFinality] - [pallet_bridge_parachains, WithinWococo] [pallet_bridge_parachains, WithinWestend] - [pallet_bridge_parachains, WithinRococo] - [pallet_bridge_messages, RococoToWococo] [pallet_bridge_messages, RococoToWestend] - [pallet_bridge_messages, WococoToRococo] + [pallet_bridge_messages, RococoToRococoBulletin] [pallet_bridge_relayers, BridgeRelayersBench::] + // Ethereum Bridge + [snowbridge_inbound_queue, EthereumInboundQueue] + [snowbridge_outbound_queue, EthereumOutboundQueue] + [snowbridge_system, EthereumSystem] + [snowbridge_ethereum_beacon_client, EthereumBeaconClient] ); } @@ -757,26 +905,6 @@ impl_runtime_apis! { } } - impl bp_rococo::RococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeRococoGrandpa::best_finalized() - } - fn synced_headers_grandpa_info( - ) -> Vec> { - BridgeRococoGrandpa::synced_headers_grandpa_info() - } - } - - impl bp_wococo::WococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeWococoGrandpa::best_finalized() - } - fn synced_headers_grandpa_info( - ) -> Vec> { - BridgeWococoGrandpa::synced_headers_grandpa_info() - } - } - impl bp_westend::WestendFinalityApi for Runtime { fn best_finalized() -> Option> { BridgeWestendGrandpa::best_finalized() @@ -787,22 +915,6 @@ impl_runtime_apis! { } } - impl bp_bridge_hub_rococo::BridgeHubRococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeRococoParachains::best_parachain_head_id::< - bp_bridge_hub_rococo::BridgeHubRococo - >().unwrap_or(None) - } - } - - impl bp_bridge_hub_wococo::BridgeHubWococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeWococoParachains::best_parachain_head_id::< - bp_bridge_hub_wococo::BridgeHubWococo - >().unwrap_or(None) - } - } - impl bp_bridge_hub_westend::BridgeHubWestendFinalityApi for Runtime { fn best_finalized() -> Option> { BridgeWestendParachains::best_parachain_head_id::< @@ -812,20 +924,20 @@ impl_runtime_apis! { } // This is exposed by BridgeHubRococo - impl bp_bridge_hub_wococo::FromBridgeHubWococoInboundLaneApi for Runtime { + impl bp_bridge_hub_westend::FromBridgeHubWestendInboundLaneApi for Runtime { fn message_details( lane: bp_messages::LaneId, messages: Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, ) -> Vec { bridge_runtime_common::messages_api::inbound_message_details::< Runtime, - bridge_to_wococo_config::WithBridgeHubWococoMessagesInstance, + bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, >(lane, messages) } } // This is exposed by BridgeHubRococo - impl bp_bridge_hub_wococo::ToBridgeHubWococoOutboundLaneApi for Runtime { + impl bp_bridge_hub_westend::ToBridgeHubWestendOutboundLaneApi for Runtime { fn message_details( lane: bp_messages::LaneId, begin: bp_messages::MessageNonce, @@ -833,26 +945,35 @@ impl_runtime_apis! { ) -> Vec { bridge_runtime_common::messages_api::outbound_message_details::< Runtime, - bridge_to_wococo_config::WithBridgeHubWococoMessagesInstance, + bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, >(lane, begin, end) } } - // This is exposed by BridgeHubRococo - impl bp_bridge_hub_westend::FromBridgeHubWestendInboundLaneApi for Runtime { + impl bp_polkadot_bulletin::PolkadotBulletinFinalityApi for Runtime { + fn best_finalized() -> Option> { + BridgePolkadotBulletinGrandpa::best_finalized() + } + + fn synced_headers_grandpa_info( + ) -> Vec> { + BridgePolkadotBulletinGrandpa::synced_headers_grandpa_info() + } + } + + impl bp_polkadot_bulletin::FromPolkadotBulletinInboundLaneApi for Runtime { fn message_details( lane: bp_messages::LaneId, messages: Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, ) -> Vec { bridge_runtime_common::messages_api::inbound_message_details::< Runtime, - bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, + bridge_to_bulletin_config::WithRococoBulletinMessagesInstance, >(lane, messages) } } - // This is exposed by BridgeHubRococo - impl bp_bridge_hub_westend::ToBridgeHubWestendOutboundLaneApi for Runtime { + impl bp_polkadot_bulletin::ToPolkadotBulletinOutboundLaneApi for Runtime { fn message_details( lane: bp_messages::LaneId, begin: bp_messages::MessageNonce, @@ -860,47 +981,24 @@ impl_runtime_apis! { ) -> Vec { bridge_runtime_common::messages_api::outbound_message_details::< Runtime, - bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, + bridge_to_bulletin_config::WithRococoBulletinMessagesInstance, >(lane, begin, end) } } - // This is exposed by BridgeHubWococo - impl bp_bridge_hub_rococo::FromBridgeHubRococoInboundLaneApi for Runtime { - fn message_details( - lane: bp_messages::LaneId, - messages: Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, - ) -> Vec { - // use different instance according to flavor - match xcm_config::Flavor::get() { - RuntimeFlavor::Wococo => { - bridge_runtime_common::messages_api::inbound_message_details::< - Runtime, - bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, - >(lane, messages) - }, - flavor @ _ => unimplemented!("Unsupported `FromBridgeHubRococoInboundLaneApi` for flavor: {:?}", flavor) - } + impl snowbridge_outbound_queue_runtime_api::OutboundQueueApi for Runtime { + fn prove_message(leaf_index: u64) -> Option { + snowbridge_outbound_queue::api::prove_message::(leaf_index) + } + + fn calculate_fee(message: Message) -> Option { + snowbridge_outbound_queue::api::calculate_fee::(message) } } - // This is exposed by BridgeHubWococo and BridgeHubWestend - impl bp_bridge_hub_rococo::ToBridgeHubRococoOutboundLaneApi for Runtime { - fn message_details( - lane: bp_messages::LaneId, - begin: bp_messages::MessageNonce, - end: bp_messages::MessageNonce, - ) -> Vec { - // use different instance according to flavor - match xcm_config::Flavor::get() { - RuntimeFlavor::Wococo => { - bridge_runtime_common::messages_api::outbound_message_details::< - Runtime, - bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, - >(lane, begin, end) - }, - flavor @ _ => unimplemented!("Unsupported `ToBridgeHubRococoOutboundLaneApi` for flavor: {:?}", flavor) - } + impl snowbridge_system_runtime_api::ControlApi for Runtime { + fn agent_id(location: VersionedMultiLocation) -> Option { + snowbridge_system::api::agent_id::(location) } } @@ -933,6 +1031,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -942,15 +1041,10 @@ impl_runtime_apis! { use pallet_bridge_relayers::benchmarking::Pallet as BridgeRelayersBench; // Change weight file names. - type WococoFinality = BridgeWococoGrandpa; type WestendFinality = BridgeWestendGrandpa; - type RococoFinality = BridgeRococoGrandpa; - type WithinWococo = pallet_bridge_parachains::benchmarking::Pallet::; type WithinWestend = pallet_bridge_parachains::benchmarking::Pallet::; - type WithinRococo = pallet_bridge_parachains::benchmarking::Pallet::; - type RococoToWococo = pallet_bridge_messages::benchmarking::Pallet ::; type RococoToWestend = pallet_bridge_messages::benchmarking::Pallet ::; - type WococoToRococo = pallet_bridge_messages::benchmarking::Pallet ::; + type RococoToRococoBulletin = pallet_bridge_messages::benchmarking::Pallet ::; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -980,6 +1074,41 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + + fn set_up_complex_asset_transfer( + ) -> Option<(MultiAssets, u32, MultiLocation, Box)> { + // BH only supports teleports to system parachain. + // Relay/native token can be teleported between BH and Relay. + let native_location = Parent.into(); + let dest = Parent.into(); + pallet_xcm::benchmarking::helpers::native_teleport_as_asset_transfer::( + native_location, + dest + ) + } + } + use xcm::latest::prelude::*; use xcm_config::TokenLocation; @@ -994,7 +1123,7 @@ impl_runtime_apis! { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationToAccountId; type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, + xcm_config::XcmConfig, ExistentialDepositMultiAsset, xcm_config::PriceForParentDelivery, >; @@ -1074,7 +1203,28 @@ impl_runtime_apis! { fn export_message_origin_and_destination( ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError> { - Ok((TokenLocation::get(), NetworkId::Wococo, X1(Parachain(100)))) + // save XCM version for remote bridge hub + let _ = PolkadotXcm::force_xcm_version( + RuntimeOrigin::root(), + Box::new(bridge_to_westend_config::BridgeHubWestendLocation::get()), + XCM_VERSION, + ).map_err(|e| { + log::error!( + "Failed to dispatch `force_xcm_version({:?}, {:?}, {:?})`, error: {:?}", + RuntimeOrigin::root(), + bridge_to_westend_config::BridgeHubWestendLocation::get(), + XCM_VERSION, + e + ); + BenchmarkError::Stop("XcmVersion was not stored!") + })?; + Ok( + ( + bridge_to_westend_config::FromAssetHubRococoToAssetHubWestendRoute::get().location, + NetworkId::Westend, + X1(Parachain(bridge_to_westend_config::AssetHubWestendParaId::get().into())) + ) + ) } fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError> { @@ -1085,18 +1235,15 @@ impl_runtime_apis! { type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - type WococoFinality = BridgeWococoGrandpa; type WestendFinality = BridgeWestendGrandpa; - type RococoFinality = BridgeRococoGrandpa; - type WithinWococo = pallet_bridge_parachains::benchmarking::Pallet::; type WithinWestend = pallet_bridge_parachains::benchmarking::Pallet::; - type WithinRococo = pallet_bridge_parachains::benchmarking::Pallet::; - type RococoToWococo = pallet_bridge_messages::benchmarking::Pallet ::; type RococoToWestend = pallet_bridge_messages::benchmarking::Pallet ::; - type WococoToRococo = pallet_bridge_messages::benchmarking::Pallet ::; + type RococoToRococoBulletin = pallet_bridge_messages::benchmarking::Pallet ::; use bridge_runtime_common::messages_benchmarking::{ + prepare_message_delivery_proof_from_grandpa_chain, prepare_message_delivery_proof_from_parachain, + prepare_message_proof_from_grandpa_chain, prepare_message_proof_from_parachain, generate_xcm_builder_bridge_message_sample, }; @@ -1106,49 +1253,6 @@ impl_runtime_apis! { MessageProofParams, }; - impl BridgeMessagesConfig for Runtime { - fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool { - let bench_lane_id = >::bench_lane_id(); - let bridged_chain_id = bp_runtime::BRIDGE_HUB_WOCOCO_CHAIN_ID; - pallet_bridge_relayers::Pallet::::relayer_reward( - relayer, - bp_relayers::RewardsAccountParams::new( - bench_lane_id, - bridged_chain_id, - bp_relayers::RewardsAccountOwner::BridgedChain - ) - ).is_some() - } - - fn prepare_message_proof( - params: MessageProofParams, - ) -> (bridge_to_wococo_config::FromWococoBridgeHubMessagesProof, Weight) { - use cumulus_primitives_core::XcmpMessageSource; - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); - prepare_message_proof_from_parachain::< - Runtime, - bridge_common_config::BridgeGrandpaWococoInstance, - bridge_to_wococo_config::WithBridgeHubWococoMessageBridge, - >(params, generate_xcm_builder_bridge_message_sample(X2(GlobalConsensus(Rococo), Parachain(42)))) - } - - fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> bridge_to_wococo_config::ToWococoBridgeHubMessagesDeliveryProof { - prepare_message_delivery_proof_from_parachain::< - Runtime, - bridge_common_config::BridgeGrandpaWococoInstance, - bridge_to_wococo_config::WithBridgeHubWococoMessageBridge, - >(params) - } - - fn is_message_successfully_dispatched(_nonce: bp_messages::MessageNonce) -> bool { - use cumulus_primitives_core::XcmpMessageSource; - !XcmpQueue::take_outbound_messages(usize::MAX).is_empty() - } - } - impl BridgeMessagesConfig for Runtime { fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool { let bench_lane_id = >::bench_lane_id(); @@ -1192,40 +1296,32 @@ impl_runtime_apis! { } } - impl BridgeMessagesConfig for Runtime { - fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool { - let bench_lane_id = >::bench_lane_id(); - let bridged_chain_id = bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID; - pallet_bridge_relayers::Pallet::::relayer_reward( - relayer, - bp_relayers::RewardsAccountParams::new( - bench_lane_id, - bridged_chain_id, - bp_relayers::RewardsAccountOwner::BridgedChain - ) - ).is_some() + impl BridgeMessagesConfig for Runtime { + fn is_relayer_rewarded(_relayer: &Self::AccountId) -> bool { + // we do not pay any rewards in this bridge + true } fn prepare_message_proof( params: MessageProofParams, - ) -> (bridge_to_rococo_config::FromRococoBridgeHubMessagesProof, Weight) { + ) -> (bridge_to_bulletin_config::FromRococoBulletinMessagesProof, Weight) { use cumulus_primitives_core::XcmpMessageSource; assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); - prepare_message_proof_from_parachain::< + prepare_message_proof_from_grandpa_chain::< Runtime, - bridge_common_config::BridgeGrandpaRococoInstance, - bridge_to_rococo_config::WithBridgeHubRococoMessageBridge, - >(params, generate_xcm_builder_bridge_message_sample(X2(GlobalConsensus(Wococo), Parachain(42)))) + bridge_common_config::BridgeGrandpaRococoBulletinInstance, + bridge_to_bulletin_config::WithRococoBulletinMessageBridge, + >(params, generate_xcm_builder_bridge_message_sample(X2(GlobalConsensus(Rococo), Parachain(42)))) } fn prepare_message_delivery_proof( params: MessageDeliveryProofParams, - ) -> bridge_to_rococo_config::ToRococoBridgeHubMessagesDeliveryProof { - prepare_message_delivery_proof_from_parachain::< + ) -> bridge_to_bulletin_config::ToRococoBulletinMessagesDeliveryProof { + prepare_message_delivery_proof_from_grandpa_chain::< Runtime, - bridge_common_config::BridgeGrandpaRococoInstance, - bridge_to_rococo_config::WithBridgeHubRococoMessageBridge, + bridge_common_config::BridgeGrandpaRococoBulletinInstance, + bridge_to_bulletin_config::WithRococoBulletinMessageBridge, >(params) } @@ -1242,30 +1338,6 @@ impl_runtime_apis! { Config as BridgeRelayersConfig, }; - impl BridgeParachainsConfig for Runtime { - fn parachains() -> Vec { - use bp_runtime::Parachain; - vec![bp_polkadot_core::parachains::ParaId(bp_bridge_hub_wococo::BridgeHubWococo::PARACHAIN_ID)] - } - - fn prepare_parachain_heads_proof( - parachains: &[bp_polkadot_core::parachains::ParaId], - parachain_head_size: u32, - proof_size: bp_runtime::StorageProofSize, - ) -> ( - pallet_bridge_parachains::RelayBlockNumber, - pallet_bridge_parachains::RelayBlockHash, - bp_polkadot_core::parachains::ParaHeadsProof, - Vec<(bp_polkadot_core::parachains::ParaId, bp_polkadot_core::parachains::ParaHash)>, - ) { - prepare_parachain_heads_proof::( - parachains, - parachain_head_size, - proof_size, - ) - } - } - impl BridgeParachainsConfig for Runtime { fn parachains() -> Vec { use bp_runtime::Parachain; @@ -1290,30 +1362,6 @@ impl_runtime_apis! { } } - impl BridgeParachainsConfig for Runtime { - fn parachains() -> Vec { - use bp_runtime::Parachain; - vec![bp_polkadot_core::parachains::ParaId(bp_bridge_hub_rococo::BridgeHubRococo::PARACHAIN_ID)] - } - - fn prepare_parachain_heads_proof( - parachains: &[bp_polkadot_core::parachains::ParaId], - parachain_head_size: u32, - proof_size: bp_runtime::StorageProofSize, - ) -> ( - pallet_bridge_parachains::RelayBlockNumber, - pallet_bridge_parachains::RelayBlockHash, - bp_polkadot_core::parachains::ParaHeadsProof, - Vec<(bp_polkadot_core::parachains::ParaId, bp_polkadot_core::parachains::ParaHash)>, - ) { - prepare_parachain_heads_proof::( - parachains, - parachain_head_size, - proof_size, - ) - } - } - impl BridgeRelayersConfig for Runtime { fn prepare_rewards_account( account_params: bp_relayers::RewardsAccountParams, @@ -1395,10 +1443,9 @@ mod tests { pallet_transaction_payment::ChargeTransactionPayment::from(10), BridgeRejectObsoleteHeadersAndMessages, ( - bridge_to_wococo_config::OnBridgeHubRococoRefundBridgeHubWococoMessages::default(), bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), - bridge_to_rococo_config::OnBridgeHubWococoRefundBridgeHubRococoMessages::default(), - ), + bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), + ) ); // for BridgeHubRococo @@ -1418,24 +1465,6 @@ mod tests { bhr_indirect_payload.additional_signed().unwrap().encode() ) } - - // for BridgeHubWococo - { - let bhw_indirect_payload = bp_bridge_hub_wococo::SignedExtension::from_params( - VERSION.spec_version, - VERSION.transaction_version, - bp_runtime::TransactionEra::Immortal, - System::block_hash(BlockNumber::zero()), - 10, - 10, - (((), ()), ((), ())), - ); - assert_eq!(payload.encode(), bhw_indirect_payload.encode()); - assert_eq!( - payload.additional_signed().unwrap().encode(), - bhw_indirect_payload.additional_signed().unwrap().encode() - ) - } }); } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system.rs index b0f7806be8ee7b3509895652a94f10a272913d09..df440a68a36deefbb8928b3a0e2a9b0aa498b66b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system.rs @@ -151,4 +151,31 @@ impl frame_system::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 33_027_000 picoseconds. + Weight::from_parts(33_027_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `22` + // Estimated: `1518` + // Minimum execution time: 118_101_992_000 picoseconds. + Weight::from_parts(118_101_992_000, 0) + .saturating_add(Weight::from_parts(0, 1518)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs index 66f8f1edf3c15796111c3d015a1a212b74da018c..b134bb41ed134565fa4669c9b5d2a5414efcc756 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs @@ -17,22 +17,19 @@ //! Expose the auto generated weight files. +use ::pallet_bridge_messages::WeightInfoExt as MessagesWeightInfoExt; +use ::pallet_bridge_parachains::WeightInfoExt as ParachainsWeightInfoExt; + pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; pub mod pallet_balances; -pub mod pallet_bridge_grandpa_rococo_finality; -pub mod pallet_bridge_grandpa_westend_finality; -pub mod pallet_bridge_grandpa_wococo_finality; +pub mod pallet_bridge_grandpa; +pub mod pallet_bridge_messages_rococo_to_rococo_bulletin; pub mod pallet_bridge_messages_rococo_to_westend; -pub mod pallet_bridge_messages_rococo_to_wococo; -pub mod pallet_bridge_messages_wococo_to_rococo; -pub mod pallet_bridge_parachains_within_rococo; -pub mod pallet_bridge_parachains_within_westend; -pub mod pallet_bridge_parachains_within_wococo; +pub mod pallet_bridge_parachains; pub mod pallet_bridge_relayers; pub mod pallet_collator_selection; pub mod pallet_message_queue; @@ -43,11 +40,14 @@ pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; pub mod rocksdb_weights; +pub mod snowbridge_ethereum_beacon_client; +pub mod snowbridge_inbound_queue; +pub mod snowbridge_outbound_queue; +pub mod snowbridge_system; pub mod xcm; pub use block_weights::constants::BlockExecutionWeight; pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; pub use rocksdb_weights::constants::RocksDbWeight; use crate::Runtime; @@ -56,28 +56,11 @@ use frame_support::weights::Weight; // import trait from dependency module use ::pallet_bridge_relayers::WeightInfoExt as _; -impl pallet_bridge_messages::WeightInfoExt - for pallet_bridge_messages_wococo_to_rococo::WeightInfo -{ - fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE - } - - fn receive_messages_proof_overhead_from_runtime() -> Weight { - pallet_bridge_relayers::WeightInfo::::receive_messages_proof_overhead_from_runtime( - ) - } - - fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight { - pallet_bridge_relayers::WeightInfo::::receive_messages_delivery_proof_overhead_from_runtime() - } -} - -impl pallet_bridge_messages::WeightInfoExt - for pallet_bridge_messages_rococo_to_wococo::WeightInfo +impl MessagesWeightInfoExt + for pallet_bridge_messages_rococo_to_rococo_bulletin::WeightInfo { fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_wococo::EXTRA_STORAGE_PROOF_SIZE + bp_polkadot_bulletin::EXTRA_STORAGE_PROOF_SIZE } fn receive_messages_proof_overhead_from_runtime() -> Weight { @@ -90,7 +73,7 @@ impl pallet_bridge_messages::WeightInfoExt } } -impl pallet_bridge_messages::WeightInfoExt +impl MessagesWeightInfoExt for pallet_bridge_messages_rococo_to_westend::WeightInfo { fn expected_extra_storage_proof_size() -> u32 { @@ -107,26 +90,8 @@ impl pallet_bridge_messages::WeightInfoExt } } -impl pallet_bridge_parachains::WeightInfoExt - for pallet_bridge_parachains_within_rococo::WeightInfo -{ - fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE - } -} - -impl pallet_bridge_parachains::WeightInfoExt - for pallet_bridge_parachains_within_westend::WeightInfo -{ +impl ParachainsWeightInfoExt for pallet_bridge_parachains::WeightInfo { fn expected_extra_storage_proof_size() -> u32 { bp_bridge_hub_westend::EXTRA_STORAGE_PROOF_SIZE } } - -impl pallet_bridge_parachains::WeightInfoExt - for pallet_bridge_parachains_within_wococo::WeightInfo -{ - fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_wococo::EXTRA_STORAGE_PROOF_SIZE - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs index 8ef05f17856f1c470e174490cd01fdfad197d43c..8c2435599f59780be56dcaa5060addee4c5c1d15 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_bridge_grandpa` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_bridge_grandpa -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_grandpa +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,33 +48,31 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_grandpa`. pub struct WeightInfo(PhantomData); impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: BridgeRococoGrandpa PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa BestFinalized (r:1 w:1) - /// Proof: BridgeRococoGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: 531, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa CurrentAuthoritySet (r:1 w:0) - /// Proof: BridgeRococoGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(50250), added: 50745, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa ImportedHashesPointer (r:1 w:1) - /// Proof: BridgeRococoGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa ImportedHashes (r:1 w:1) - /// Proof: BridgeRococoGrandpa ImportedHashes (max_values: Some(1024), max_size: Some(36), added: 1521, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa ImportedHeaders (r:0 w:2) - /// Proof: BridgeRococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. + /// Storage: `BridgeWestendGrandpa::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashes` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 w:2) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 838]`. /// The range of component `v` is `[50, 100]`. fn submit_finality_proof(p: u32, v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `231 + p * (60 ±0)` + // Measured: `335 + p * (60 ±0)` // Estimated: `51735` - // Minimum execution time: 241_332_000 picoseconds. - Weight::from_parts(69_790_821, 0) + // Minimum execution time: 310_124_000 picoseconds. + Weight::from_parts(18_294_977, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 6_013 - .saturating_add(Weight::from_parts(47_580_554, 0).saturating_mul(p.into())) - // Standard Error: 100_298 - .saturating_add(Weight::from_parts(1_213_475, 0).saturating_mul(v.into())) + // Standard Error: 5_665 + .saturating_add(Weight::from_parts(55_380_719, 0).saturating_mul(p.into())) + // Standard Error: 94_494 + .saturating_add(Weight::from_parts(2_765_959, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_rococo_finality.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_rococo_finality.rs deleted file mode 100644 index 0bb798bd9ecc836c42ef61f2c20d530c7b9fd2bf..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_rococo_finality.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_grandpa` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_grandpa -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_grandpa`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoGrandpa::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::BestFinalized` (r:1 w:1) - /// Proof: `BridgeRococoGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHashesPointer` (r:1 w:1) - /// Proof: `BridgeRococoGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHashes` (r:1 w:1) - /// Proof: `BridgeRococoGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:0 w:2) - /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - fn submit_finality_proof(p: u32, v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `268 + p * (60 ±0)` - // Estimated: `51735` - // Minimum execution time: 304_726_000 picoseconds. - Weight::from_parts(16_868_060, 0) - .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 2_802 - .saturating_add(Weight::from_parts(55_200_017, 0).saturating_mul(p.into())) - // Standard Error: 46_745 - .saturating_add(Weight::from_parts(2_689_151, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_westend_finality.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_westend_finality.rs deleted file mode 100644 index 4ed140b7d17827b2dbc9acf1d799bc6d561fca3e..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_westend_finality.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_bridge_grandpa` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_grandpa -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_grandpa`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: `BridgeWestendGrandpa::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:1 w:1) - /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) - /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHashes` (r:1 w:1) - /// Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 w:2) - /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - fn submit_finality_proof(p: u32, v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `335 + p * (60 ±0)` - // Estimated: `51735` - // Minimum execution time: 305_905_000 picoseconds. - Weight::from_parts(2_636_863, 0) - .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 2_724 - .saturating_add(Weight::from_parts(55_199_477, 0).saturating_mul(p.into())) - // Standard Error: 45_444 - .saturating_add(Weight::from_parts(2_835_596, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_wococo_finality.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_wococo_finality.rs deleted file mode 100644 index a82854e0c67950823018738c17fff3191bc4f178..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_wococo_finality.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_grandpa` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_grandpa -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_grandpa`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: `BridgeWococoGrandpa::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::BestFinalized` (r:1 w:1) - /// Proof: `BridgeWococoGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHashesPointer` (r:1 w:1) - /// Proof: `BridgeWococoGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHashes` (r:1 w:1) - /// Proof: `BridgeWococoGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:0 w:2) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - fn submit_finality_proof(p: u32, v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `302 + p * (60 ±0)` - // Estimated: `51735` - // Minimum execution time: 305_146_000 picoseconds. - Weight::from_parts(308_711_000, 0) - .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 2_651 - .saturating_add(Weight::from_parts(55_082_480, 0).saturating_mul(p.into())) - // Standard Error: 20_462 - .saturating_add(Weight::from_parts(298_367, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs deleted file mode 100644 index 319a4de8e969bd7dab88ccba82e4eebd643baf53..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_messages` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./artifacts/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-rococo-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_bridge_messages -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_messages`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - fn receive_single_message_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `52645` - // Minimum execution time: 43_187_000 picoseconds. - Weight::from_parts(43_681_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `52645` - // Minimum execution time: 54_131_000 picoseconds. - Weight::from_parts(54_813_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `52645` - // Minimum execution time: 48_120_000 picoseconds. - Weight::from_parts(48_733_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `335` - // Estimated: `52645` - // Minimum execution time: 41_028_000 picoseconds. - Weight::from_parts(41_635_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_16_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `335` - // Estimated: `52645` - // Minimum execution time: 68_499_000 picoseconds. - Weight::from_parts(69_263_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages OutboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: 539, mode: MaxEncodedLen) - /// Storage: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof Skipped: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - fn receive_delivery_proof_for_single_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `3804` - // Minimum execution time: 32_277_000 picoseconds. - Weight::from_parts(32_880_000, 0) - .saturating_add(Weight::from_parts(0, 3804)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages OutboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: 539, mode: MaxEncodedLen) - /// Storage: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof Skipped: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `3804` - // Minimum execution time: 32_504_000 picoseconds. - Weight::from_parts(33_085_000, 0) - .saturating_add(Weight::from_parts(0, 3804)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages OutboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: 539, mode: MaxEncodedLen) - /// Storage: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof Skipped: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `6086` - // Minimum execution time: 34_963_000 picoseconds. - Weight::from_parts(35_473_000, 0) - .saturating_add(Weight::from_parts(0, 6086)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem RelevantMessagingState (r:1 w:0) - /// Proof Skipped: ParachainSystem RelevantMessagingState (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmpQueue OutboundXcmpStatus (r:1 w:1) - /// Proof Skipped: XcmpQueue OutboundXcmpStatus (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmpQueue OutboundXcmpMessages (r:0 w:1) - /// Proof Skipped: XcmpQueue OutboundXcmpMessages (max_values: None, max_size: None, mode: Measured) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `635` - // Estimated: `52645` - // Minimum execution time: 129_978_000 picoseconds. - Weight::from_parts(98_246_356, 0) - .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 2_554 - .saturating_add(Weight::from_parts(544_728, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_rococo_bulletin.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_rococo_bulletin.rs new file mode 100644 index 0000000000000000000000000000000000000000..d3255ab3875dd32e36958757685299800daf67f2 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_rococo_bulletin.rs @@ -0,0 +1,221 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_bridge_messages` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_messages +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_bridge_messages`. +pub struct WeightInfo(PhantomData); +impl pallet_bridge_messages::WeightInfo for WeightInfo { + /// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgePolkadotBulletinMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn receive_single_message_proof() -> Weight { + // Proof Size summary in bytes: + // Measured: `621` + // Estimated: `52645` + // Minimum execution time: 36_661_000 picoseconds. + Weight::from_parts(38_106_000, 0) + .saturating_add(Weight::from_parts(0, 52645)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgePolkadotBulletinMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn receive_two_messages_proof() -> Weight { + // Proof Size summary in bytes: + // Measured: `621` + // Estimated: `52645` + // Minimum execution time: 47_599_000 picoseconds. + Weight::from_parts(49_731_000, 0) + .saturating_add(Weight::from_parts(0, 52645)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgePolkadotBulletinMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn receive_single_message_proof_with_outbound_lane_state() -> Weight { + // Proof Size summary in bytes: + // Measured: `621` + // Estimated: `52645` + // Minimum execution time: 42_211_000 picoseconds. + Weight::from_parts(43_454_000, 0) + .saturating_add(Weight::from_parts(0, 52645)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgePolkadotBulletinMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + fn receive_single_message_proof_1_kb() -> Weight { + // Proof Size summary in bytes: + // Measured: `589` + // Estimated: `52645` + // Minimum execution time: 36_072_000 picoseconds. + Weight::from_parts(37_260_000, 0) + .saturating_add(Weight::from_parts(0, 52645)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgePolkadotBulletinMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + fn receive_single_message_proof_16_kb() -> Weight { + // Proof Size summary in bytes: + // Measured: `589` + // Estimated: `52645` + // Minimum execution time: 66_995_000 picoseconds. + Weight::from_parts(68_661_000, 0) + .saturating_add(Weight::from_parts(0, 52645)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgePolkadotBulletinMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + fn receive_delivery_proof_for_single_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `588` + // Estimated: `2543` + // Minimum execution time: 25_553_000 picoseconds. + Weight::from_parts(26_205_000, 0) + .saturating_add(Weight::from_parts(0, 2543)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgePolkadotBulletinMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { + // Proof Size summary in bytes: + // Measured: `588` + // Estimated: `2543` + // Minimum execution time: 25_610_000 picoseconds. + Weight::from_parts(26_273_000, 0) + .saturating_add(Weight::from_parts(0, 2543)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgePolkadotBulletinMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { + // Proof Size summary in bytes: + // Measured: `588` + // Estimated: `2543` + // Minimum execution time: 25_651_000 picoseconds. + Weight::from_parts(26_172_000, 0) + .saturating_add(Weight::from_parts(0, 2543)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgePolkadotBulletinMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgePolkadotBulletinMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[128, 2048]`. + /// The range of component `i` is `[128, 2048]`. + fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `780` + // Estimated: `52645` + // Minimum execution time: 64_219_000 picoseconds. + Weight::from_parts(65_848_290, 0) + .saturating_add(Weight::from_parts(0, 52645)) + // Standard Error: 43 + .saturating_add(Weight::from_parts(7_577, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs index 6513b63474a61eb30864efb5fd0bf2f18cd053bc..30ea9eed4a5b4f187ea76633400cff8c39991b46 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_bridge_messages` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -48,170 +48,170 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_messages`. pub struct WeightInfo(PhantomData); impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `643` + // Measured: `605` // Estimated: `52645` - // Minimum execution time: 41_873_000 picoseconds. - Weight::from_parts(43_434_000, 0) + // Minimum execution time: 40_349_000 picoseconds. + Weight::from_parts(41_856_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `643` + // Measured: `605` // Estimated: `52645` - // Minimum execution time: 53_328_000 picoseconds. - Weight::from_parts(54_592_000, 0) + // Minimum execution time: 50_514_000 picoseconds. + Weight::from_parts(52_254_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `643` + // Measured: `605` // Estimated: `52645` - // Minimum execution time: 47_486_000 picoseconds. - Weight::from_parts(48_721_000, 0) + // Minimum execution time: 45_761_000 picoseconds. + Weight::from_parts(47_075_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: - // Measured: `611` + // Measured: `573` // Estimated: `52645` - // Minimum execution time: 41_093_000 picoseconds. - Weight::from_parts(42_050_000, 0) + // Minimum execution time: 39_098_000 picoseconds. + Weight::from_parts(40_577_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: - // Measured: `611` + // Measured: `573` // Estimated: `52645` - // Minimum execution time: 71_947_000 picoseconds. - Weight::from_parts(74_564_000, 0) + // Minimum execution time: 69_120_000 picoseconds. + Weight::from_parts(71_810_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `482` - // Estimated: `3947` - // Minimum execution time: 31_235_000 picoseconds. - Weight::from_parts(32_051_000, 0) - .saturating_add(Weight::from_parts(0, 3947)) + // Measured: `447` + // Estimated: `3912` + // Minimum execution time: 32_325_000 picoseconds. + Weight::from_parts(33_070_000, 0) + .saturating_add(Weight::from_parts(0, 3912)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `482` - // Estimated: `3947` - // Minimum execution time: 31_320_000 picoseconds. - Weight::from_parts(31_973_000, 0) - .saturating_add(Weight::from_parts(0, 3947)) + // Measured: `447` + // Estimated: `3912` + // Minimum execution time: 32_180_000 picoseconds. + Weight::from_parts(33_202_000, 0) + .saturating_add(Weight::from_parts(0, 3912)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `482` + // Measured: `447` // Estimated: `6086` - // Minimum execution time: 33_656_000 picoseconds. - Weight::from_parts(34_779_000, 0) + // Minimum execution time: 36_774_000 picoseconds. + Weight::from_parts(37_774_000, 0) .saturating_add(Weight::from_parts(0, 6086)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) @@ -228,17 +228,15 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[128, 2048]`. /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `772` + // Measured: `736` // Estimated: `52645` - // Minimum execution time: 61_671_000 picoseconds. - Weight::from_parts(62_656_321, 0) + // Minimum execution time: 65_934_000 picoseconds. + Weight::from_parts(67_915_916, 0) .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 25 - .saturating_add(Weight::from_parts(6_641, 0).saturating_mul(i.into())) + // Standard Error: 65 + .saturating_add(Weight::from_parts(7_190, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_wococo.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_wococo.rs deleted file mode 100644 index e2f58cdfad5eb7246dc4ae30877425fc5c35c7e9..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_wococo.rs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_messages` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_messages -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_messages`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `609` - // Estimated: `52645` - // Minimum execution time: 42_407_000 picoseconds. - Weight::from_parts(43_917_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `609` - // Estimated: `52645` - // Minimum execution time: 53_258_000 picoseconds. - Weight::from_parts(55_144_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - // Proof Size summary in bytes: - // Measured: `609` - // Estimated: `52645` - // Minimum execution time: 47_950_000 picoseconds. - Weight::from_parts(49_315_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `577` - // Estimated: `52645` - // Minimum execution time: 41_383_000 picoseconds. - Weight::from_parts(42_898_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `577` - // Estimated: `52645` - // Minimum execution time: 72_118_000 picoseconds. - Weight::from_parts(74_643_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_single_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `448` - // Estimated: `3913` - // Minimum execution time: 30_993_000 picoseconds. - Weight::from_parts(31_793_000, 0) - .saturating_add(Weight::from_parts(0, 3913)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - // Proof Size summary in bytes: - // Measured: `448` - // Estimated: `3913` - // Minimum execution time: 30_894_000 picoseconds. - Weight::from_parts(31_925_000, 0) - .saturating_add(Weight::from_parts(0, 3913)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - // Proof Size summary in bytes: - // Measured: `448` - // Estimated: `6086` - // Minimum execution time: 33_804_000 picoseconds. - Weight::from_parts(34_560_000, 0) - .saturating_add(Weight::from_parts(0, 6086)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `811` - // Estimated: `52645` - // Minimum execution time: 62_616_000 picoseconds. - Weight::from_parts(64_073_891, 0) - .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 43 - .saturating_add(Weight::from_parts(6_525, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_wococo_to_rococo.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_wococo_to_rococo.rs deleted file mode 100644 index d9c0fd15468ea1c8037db02cbf7290ab285bc232..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_wococo_to_rococo.rs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_messages` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_messages -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_messages`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `52645` - // Minimum execution time: 42_086_000 picoseconds. - Weight::from_parts(42_833_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `52645` - // Minimum execution time: 51_927_000 picoseconds. - Weight::from_parts(53_847_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `52645` - // Minimum execution time: 47_218_000 picoseconds. - Weight::from_parts(48_380_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `52645` - // Minimum execution time: 40_585_000 picoseconds. - Weight::from_parts(41_714_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `52645` - // Minimum execution time: 71_197_000 picoseconds. - Weight::from_parts(73_983_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_single_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `3841` - // Minimum execution time: 30_823_000 picoseconds. - Weight::from_parts(31_501_000, 0) - .saturating_add(Weight::from_parts(0, 3841)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `3841` - // Minimum execution time: 30_854_000 picoseconds. - Weight::from_parts(31_663_000, 0) - .saturating_add(Weight::from_parts(0, 3841)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `6086` - // Minimum execution time: 33_463_000 picoseconds. - Weight::from_parts(34_290_000, 0) - .saturating_add(Weight::from_parts(0, 6086)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `739` - // Estimated: `52645` - // Minimum execution time: 61_523_000 picoseconds. - Weight::from_parts(62_686_055, 0) - .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 26 - .saturating_add(Weight::from_parts(6_563, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs index bd7384a05fe70b0fe6df9529380201580b96a8ad..ea68852804e3955577bf822d42887bf5bd772657 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_bridge_parachains` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_bridge_parachains -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_parachains +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,64 +48,65 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_parachains`. pub struct WeightInfo(PhantomData); impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: BridgeWococoParachain PalletOperatingMode (r:1 w:0) - /// Proof: BridgeWococoParachain PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeWococoGrandpa ImportedHeaders (r:1 w:0) - /// Proof: BridgeWococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ParasInfo (r:1 w:1) - /// Proof: BridgeWococoParachain ParasInfo (max_values: Some(1), max_size: Some(60), added: 555, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHashes (r:1 w:1) - /// Proof: BridgeWococoParachain ImportedParaHashes (max_values: Some(64), max_size: Some(64), added: 1054, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHeads (r:0 w:1) - /// Proof: BridgeWococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// The range of component `p` is `[1, 2]`. + /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(_p: u32, ) -> Weight { + fn submit_parachain_heads_with_n_parachains(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `434` // Estimated: `2543` - // Minimum execution time: 34_759_000 picoseconds. - Weight::from_parts(35_709_034, 0) + // Minimum execution time: 31_135_000 picoseconds. + Weight::from_parts(32_061_351, 0) .saturating_add(Weight::from_parts(0, 2543)) + // Standard Error: 80_309 + .saturating_add(Weight::from_parts(99_724, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: BridgeWococoParachain PalletOperatingMode (r:1 w:0) - /// Proof: BridgeWococoParachain PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeWococoGrandpa ImportedHeaders (r:1 w:0) - /// Proof: BridgeWococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ParasInfo (r:1 w:1) - /// Proof: BridgeWococoParachain ParasInfo (max_values: Some(1), max_size: Some(60), added: 555, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHashes (r:1 w:1) - /// Proof: BridgeWococoParachain ImportedParaHashes (max_values: Some(64), max_size: Some(64), added: 1054, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHeads (r:0 w:1) - /// Proof: BridgeWococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) + /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `434` // Estimated: `2543` - // Minimum execution time: 36_005_000 picoseconds. - Weight::from_parts(36_492_000, 0) + // Minimum execution time: 32_263_000 picoseconds. + Weight::from_parts(33_139_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: BridgeWococoParachain PalletOperatingMode (r:1 w:0) - /// Proof: BridgeWococoParachain PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeWococoGrandpa ImportedHeaders (r:1 w:0) - /// Proof: BridgeWococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ParasInfo (r:1 w:1) - /// Proof: BridgeWococoParachain ParasInfo (max_values: Some(1), max_size: Some(60), added: 555, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHashes (r:1 w:1) - /// Proof: BridgeWococoParachain ImportedParaHashes (max_values: Some(64), max_size: Some(64), added: 1054, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHeads (r:0 w:1) - /// Proof: BridgeWococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) + /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `434` // Estimated: `2543` - // Minimum execution time: 62_374_000 picoseconds. - Weight::from_parts(62_977_000, 0) + // Minimum execution time: 61_313_000 picoseconds. + Weight::from_parts(62_200_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_rococo.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_rococo.rs deleted file mode 100644 index e36bbcca42ec49ced99b98b9584d7f4662e004dc..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_rococo.rs +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_parachains` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_parachains -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_parachains`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(_p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `2543` - // Minimum execution time: 31_241_000 picoseconds. - Weight::from_parts(32_488_584, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeRococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_1kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `2543` - // Minimum execution time: 32_962_000 picoseconds. - Weight::from_parts(33_658_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeRococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_16kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `2543` - // Minimum execution time: 62_685_000 picoseconds. - Weight::from_parts(64_589_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_westend.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_westend.rs deleted file mode 100644 index bfe93b4c36a9cbc82caf611a0fd56225f97436ac..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_westend.rs +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_bridge_parachains` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_parachains -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_parachains`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: `BridgeWestendParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `468` - // Estimated: `2543` - // Minimum execution time: 31_493_000 picoseconds. - Weight::from_parts(32_511_270, 0) - .saturating_add(Weight::from_parts(0, 2543)) - // Standard Error: 33_650 - .saturating_add(Weight::from_parts(20_764, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWestendParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_1kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `468` - // Estimated: `2543` - // Minimum execution time: 32_976_000 picoseconds. - Weight::from_parts(33_647_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWestendParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_16kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `468` - // Estimated: `2543` - // Minimum execution time: 62_898_000 picoseconds. - Weight::from_parts(64_463_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_wococo.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_wococo.rs deleted file mode 100644 index d685daf930f8c8b077673f66595c2e6cf9feeaf0..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_wococo.rs +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_parachains` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_parachains -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_parachains`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: `BridgeWococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `2543` - // Minimum execution time: 31_573_000 picoseconds. - Weight::from_parts(32_739_400, 0) - .saturating_add(Weight::from_parts(0, 2543)) - // Standard Error: 49_518 - .saturating_add(Weight::from_parts(5_166, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_1kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `2543` - // Minimum execution time: 32_780_000 picoseconds. - Weight::from_parts(33_797_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_16kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `2543` - // Minimum execution time: 62_847_000 picoseconds. - Weight::from_parts(63_991_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs index 48f0c1f949b3243292c2b893eafe7c5c8d38c80d..5ab4cb900d848f37f1a5777b686d294837688495 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs @@ -1,24 +1,25 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_bridge_relayers` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -53,10 +54,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_rewards() -> Weight { // Proof Size summary in bytes: - // Measured: `207` + // Measured: `244` // Estimated: `3593` - // Minimum execution time: 45_338_000 picoseconds. - Weight::from_parts(45_836_000, 0) + // Minimum execution time: 45_393_000 picoseconds. + Weight::from_parts(46_210_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -69,10 +70,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `Balances::Reserves` (`max_values`: None, `max_size`: Some(1249), added: 3724, mode: `MaxEncodedLen`) fn register() -> Weight { // Proof Size summary in bytes: - // Measured: `61` + // Measured: `97` // Estimated: `4714` - // Minimum execution time: 23_561_000 picoseconds. - Weight::from_parts(24_012_000, 0) + // Minimum execution time: 23_767_000 picoseconds. + Weight::from_parts(24_217_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -83,10 +84,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `Balances::Reserves` (`max_values`: None, `max_size`: Some(1249), added: 3724, mode: `MaxEncodedLen`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `160` + // Measured: `197` // Estimated: `4714` - // Minimum execution time: 25_133_000 picoseconds. - Weight::from_parts(25_728_000, 0) + // Minimum execution time: 25_745_000 picoseconds. + Weight::from_parts(26_319_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -99,10 +100,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn slash_and_deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `263` + // Measured: `300` // Estimated: `4714` - // Minimum execution time: 27_356_000 picoseconds. - Weight::from_parts(27_828_000, 0) + // Minimum execution time: 27_497_000 picoseconds. + Weight::from_parts(27_939_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -111,10 +112,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn register_relayer_reward() -> Weight { // Proof Size summary in bytes: - // Measured: `6` + // Measured: `42` // Estimated: `3538` - // Minimum execution time: 2_955_000 picoseconds. - Weight::from_parts(3_084_000, 0) + // Minimum execution time: 5_584_000 picoseconds. + Weight::from_parts(5_908_000, 0) .saturating_add(Weight::from_parts(0, 3538)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs index 1fb0b765c066a17c7c73a8eff0b3f5590504c5f2..f7e233189abb4443320c7bee6f28ee4856302452 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs @@ -123,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -177,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index 5aa4999c624cf1b516ef0930d869dac460680396..5faded42aa82df52f403b68de2a470ad4a5a17b7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,26 +62,39 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 24_179_000 picoseconds. - Weight::from_parts(24_684_000, 0) - .saturating_add(Weight::from_parts(0, 3540)) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 23_683_000 picoseconds. + Weight::from_parts(24_199_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 21_093_000 picoseconds. - Weight::from_parts(21_523_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(2)) + // Measured: `70` + // Estimated: `3593` + // Minimum execution time: 89_524_000 picoseconds. + Weight::from_parts(91_401_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -93,6 +106,32 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3593` + // Minimum execution time: 91_890_000 picoseconds. + Weight::from_parts(93_460_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) + } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { @@ -109,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_938_000 picoseconds. - Weight::from_parts(7_243_000, 0) + // Minimum execution time: 7_152_000 picoseconds. + Weight::from_parts(7_355_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -120,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_159_000 picoseconds. - Weight::from_parts(2_290_000, 0) + // Minimum execution time: 2_081_000 picoseconds. + Weight::from_parts(2_258_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -145,11 +184,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 28_337_000 picoseconds. - Weight::from_parts(29_265_000, 0) - .saturating_add(Weight::from_parts(0, 3540)) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 28_067_000 picoseconds. + Weight::from_parts(28_693_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -171,11 +210,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `292` - // Estimated: `3757` - // Minimum execution time: 30_599_000 picoseconds. - Weight::from_parts(31_272_000, 0) - .saturating_add(Weight::from_parts(0, 3757)) + // Measured: `255` + // Estimated: `3720` + // Minimum execution time: 30_420_000 picoseconds. + Weight::from_parts(31_373_000, 0) + .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -185,8 +224,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_132_000 picoseconds. - Weight::from_parts(2_280_000, 0) + // Minimum execution time: 2_087_000 picoseconds. + Weight::from_parts(2_243_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -194,11 +233,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `187` - // Estimated: `11077` - // Minimum execution time: 18_262_000 picoseconds. - Weight::from_parts(18_640_000, 0) - .saturating_add(Weight::from_parts(0, 11077)) + // Measured: `95` + // Estimated: `10985` + // Minimum execution time: 15_142_000 picoseconds. + Weight::from_parts(15_598_000, 0) + .saturating_add(Weight::from_parts(0, 10985)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -206,11 +245,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `191` - // Estimated: `11081` - // Minimum execution time: 18_512_000 picoseconds. - Weight::from_parts(18_888_000, 0) - .saturating_add(Weight::from_parts(0, 11081)) + // Measured: `99` + // Estimated: `10989` + // Minimum execution time: 15_041_000 picoseconds. + Weight::from_parts(15_493_000, 0) + .saturating_add(Weight::from_parts(0, 10989)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -218,11 +257,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: - // Measured: `198` - // Estimated: `13563` - // Minimum execution time: 19_362_000 picoseconds. - Weight::from_parts(20_056_000, 0) - .saturating_add(Weight::from_parts(0, 13563)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 16_624_000 picoseconds. + Weight::from_parts(17_031_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) @@ -241,11 +280,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `6082` - // Minimum execution time: 27_318_000 picoseconds. - Weight::from_parts(28_075_000, 0) - .saturating_add(Weight::from_parts(0, 6082)) + // Measured: `106` + // Estimated: `6046` + // Minimum execution time: 26_398_000 picoseconds. + Weight::from_parts(26_847_000, 0) + .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -253,22 +292,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `8587` - // Minimum execution time: 9_930_000 picoseconds. - Weight::from_parts(10_192_000, 0) - .saturating_add(Weight::from_parts(0, 8587)) + // Measured: `136` + // Estimated: `8551` + // Minimum execution time: 8_741_000 picoseconds. + Weight::from_parts(8_954_000, 0) + .saturating_add(Weight::from_parts(0, 8551)) .saturating_add(T::DbWeight::get().reads(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `198` - // Estimated: `11088` - // Minimum execution time: 18_305_000 picoseconds. - Weight::from_parts(18_738_000, 0) - .saturating_add(Weight::from_parts(0, 11088)) + // Measured: `106` + // Estimated: `10996` + // Minimum execution time: 15_306_000 picoseconds. + Weight::from_parts(15_760_000, 0) + .saturating_add(Weight::from_parts(0, 10996)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -288,11 +327,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `204` - // Estimated: `11094` - // Minimum execution time: 34_559_000 picoseconds. - Weight::from_parts(35_241_000, 0) - .saturating_add(Weight::from_parts(0, 11094)) + // Measured: `112` + // Estimated: `11002` + // Minimum execution time: 33_127_000 picoseconds. + Weight::from_parts(33_938_000, 0) + .saturating_add(Weight::from_parts(0, 11002)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -302,11 +341,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn new_query() -> Weight { // Proof Size summary in bytes: - // Measured: `69` - // Estimated: `1554` - // Minimum execution time: 4_512_000 picoseconds. - Weight::from_parts(4_671_000, 0) - .saturating_add(Weight::from_parts(0, 1554)) + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 4_290_000 picoseconds. + Weight::from_parts(4_450_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -314,11 +353,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn take_response() -> Weight { // Proof Size summary in bytes: - // Measured: `7706` - // Estimated: `11171` - // Minimum execution time: 26_473_000 picoseconds. - Weight::from_parts(26_960_000, 0) - .saturating_add(Weight::from_parts(0, 11171)) + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 26_408_000 picoseconds. + Weight::from_parts(26_900_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_ethereum_beacon_client.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_ethereum_beacon_client.rs new file mode 100644 index 0000000000000000000000000000000000000000..cd960597b4410fbacdf99a766eebffb94061b812 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_ethereum_beacon_client.rs @@ -0,0 +1,151 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `snowbridge_ethereum_beacon_client` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `ip-172-31-8-124`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --base-path +// /mnt/scratch/benchmark +// --chain=bridge-hub-rococo-dev +// --pallet=snowbridge_ethereum_beacon_client +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --steps +// 50 +// --repeat +// 20 +// --output +// ./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_ethereum_beacon_client.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `snowbridge_ethereum_beacon_client`. +pub struct WeightInfo(PhantomData); +impl snowbridge_ethereum_beacon_client::WeightInfo for WeightInfo { + /// Storage: EthereumBeaconClient FinalizedBeaconStateIndex (r:1 w:1) + /// Proof: EthereumBeaconClient FinalizedBeaconStateIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient FinalizedBeaconStateMapping (r:1 w:1) + /// Proof: EthereumBeaconClient FinalizedBeaconStateMapping (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient NextSyncCommittee (r:0 w:1) + /// Proof: EthereumBeaconClient NextSyncCommittee (max_values: Some(1), max_size: Some(92372), added: 92867, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient InitialCheckpointRoot (r:0 w:1) + /// Proof: EthereumBeaconClient InitialCheckpointRoot (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient ValidatorsRoot (r:0 w:1) + /// Proof: EthereumBeaconClient ValidatorsRoot (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient LatestFinalizedBlockRoot (r:0 w:1) + /// Proof: EthereumBeaconClient LatestFinalizedBlockRoot (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient CurrentSyncCommittee (r:0 w:1) + /// Proof: EthereumBeaconClient CurrentSyncCommittee (max_values: Some(1), max_size: Some(92372), added: 92867, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient LatestExecutionState (r:0 w:1) + /// Proof: EthereumBeaconClient LatestExecutionState (max_values: Some(1), max_size: Some(80), added: 575, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient FinalizedBeaconState (r:0 w:1) + /// Proof: EthereumBeaconClient FinalizedBeaconState (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) + fn force_checkpoint() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3501` + // Minimum execution time: 97_185_781_000 picoseconds. + Weight::from_parts(97_263_571_000, 0) + .saturating_add(Weight::from_parts(0, 3501)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(9)) + } + /// Storage: EthereumBeaconClient LatestFinalizedBlockRoot (r:1 w:1) + /// Proof: EthereumBeaconClient LatestFinalizedBlockRoot (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient FinalizedBeaconState (r:1 w:1) + /// Proof: EthereumBeaconClient FinalizedBeaconState (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient LatestExecutionState (r:1 w:0) + /// Proof: EthereumBeaconClient LatestExecutionState (max_values: Some(1), max_size: Some(80), added: 575, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient NextSyncCommittee (r:1 w:0) + /// Proof: EthereumBeaconClient NextSyncCommittee (max_values: Some(1), max_size: Some(92372), added: 92867, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient CurrentSyncCommittee (r:1 w:0) + /// Proof: EthereumBeaconClient CurrentSyncCommittee (max_values: Some(1), max_size: Some(92372), added: 92867, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient ValidatorsRoot (r:1 w:0) + /// Proof: EthereumBeaconClient ValidatorsRoot (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient FinalizedBeaconStateIndex (r:1 w:1) + /// Proof: EthereumBeaconClient FinalizedBeaconStateIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient FinalizedBeaconStateMapping (r:1 w:1) + /// Proof: EthereumBeaconClient FinalizedBeaconStateMapping (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `92753` + // Estimated: `93857` + // Minimum execution time: 25_999_968_000 picoseconds. + Weight::from_parts(26_051_019_000, 0) + .saturating_add(Weight::from_parts(0, 93857)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: EthereumBeaconClient LatestFinalizedBlockRoot (r:1 w:0) + /// Proof: EthereumBeaconClient LatestFinalizedBlockRoot (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient FinalizedBeaconState (r:1 w:0) + /// Proof: EthereumBeaconClient FinalizedBeaconState (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient LatestExecutionState (r:1 w:0) + /// Proof: EthereumBeaconClient LatestExecutionState (max_values: Some(1), max_size: Some(80), added: 575, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient NextSyncCommittee (r:1 w:1) + /// Proof: EthereumBeaconClient NextSyncCommittee (max_values: Some(1), max_size: Some(92372), added: 92867, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient CurrentSyncCommittee (r:1 w:0) + /// Proof: EthereumBeaconClient CurrentSyncCommittee (max_values: Some(1), max_size: Some(92372), added: 92867, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient ValidatorsRoot (r:1 w:0) + /// Proof: EthereumBeaconClient ValidatorsRoot (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + fn submit_with_sync_committee() -> Weight { + // Proof Size summary in bytes: + // Measured: `92717` + // Estimated: `93857` + // Minimum execution time: 122_354_917_000 picoseconds. + Weight::from_parts(122_461_312_000, 0) + .saturating_add(Weight::from_parts(0, 93857)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: EthereumBeaconClient LatestFinalizedBlockRoot (r:1 w:0) + /// Proof: EthereumBeaconClient LatestFinalizedBlockRoot (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient FinalizedBeaconState (r:1 w:0) + /// Proof: EthereumBeaconClient FinalizedBeaconState (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient LatestExecutionState (r:1 w:1) + /// Proof: EthereumBeaconClient LatestExecutionState (max_values: Some(1), max_size: Some(80), added: 575, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient ExecutionHeaderIndex (r:1 w:1) + /// Proof: EthereumBeaconClient ExecutionHeaderIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient ExecutionHeaderMapping (r:1 w:1) + /// Proof: EthereumBeaconClient ExecutionHeaderMapping (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient ExecutionHeaders (r:0 w:1) + /// Proof: EthereumBeaconClient ExecutionHeaders (max_values: None, max_size: Some(136), added: 2611, mode: MaxEncodedLen) + fn submit_execution_header() -> Weight { + // Proof Size summary in bytes: + // Measured: `386` + // Estimated: `3537` + // Minimum execution time: 108_761_000 picoseconds. + Weight::from_parts(113_158_000, 0) + .saturating_add(Weight::from_parts(0, 3537)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_inbound_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_inbound_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..f734227a4111f66e583560656fca434a02067815 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_inbound_queue.rs @@ -0,0 +1,69 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `snowbridge_inbound_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-09-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `macbook pro 14 m2`, CPU: `m2-arm64` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --pallet=snowbridge_inbound_queue +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --steps +// 50 +// --repeat +// 20 +// --output +// ./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_inbound_queue.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `snowbridge_inbound_queue`. +pub struct WeightInfo(PhantomData); +impl snowbridge_inbound_queue::WeightInfo for WeightInfo { + /// Storage: EthereumInboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumInboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient ExecutionHeaders (r:1 w:0) + /// Proof: EthereumBeaconClient ExecutionHeaders (max_values: None, max_size: Some(136), added: 2611, mode: MaxEncodedLen) + /// Storage: EthereumInboundQueue Nonce (r:1 w:1) + /// Proof: EthereumInboundQueue Nonce (max_values: None, max_size: Some(20), added: 2495, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `457` + // Estimated: `3601` + // Minimum execution time: 69_000_000 picoseconds. + Weight::from_parts(70_000_000, 0) + .saturating_add(Weight::from_parts(0, 3601)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_outbound_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_outbound_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..6cffbc5344a6bd231a19b2801a252a2e287ed081 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_outbound_queue.rs @@ -0,0 +1,87 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `snowbridge_outbound_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-20, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `192.168.1.13`, CPU: `` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// ../target/release/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --pallet=snowbridge_outbound_queue +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --output +// ../parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_outbound_queue.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `snowbridge_outbound_queue`. +pub struct WeightInfo(PhantomData); +impl snowbridge_outbound_queue::WeightInfo for WeightInfo { + /// Storage: EthereumOutboundQueue MessageLeaves (r:1 w:1) + /// Proof Skipped: EthereumOutboundQueue MessageLeaves (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: EthereumOutboundQueue PendingHighPriorityMessageCount (r:1 w:1) + /// Proof: EthereumOutboundQueue PendingHighPriorityMessageCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue Nonce (r:1 w:1) + /// Proof: EthereumOutboundQueue Nonce (max_values: None, max_size: Some(20), added: 2495, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue Messages (r:1 w:1) + /// Proof Skipped: EthereumOutboundQueue Messages (max_values: Some(1), max_size: None, mode: Measured) + fn do_process_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3485` + // Minimum execution time: 39_000_000 picoseconds. + Weight::from_parts(39_000_000, 3485) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: EthereumOutboundQueue MessageLeaves (r:1 w:0) + /// Proof Skipped: EthereumOutboundQueue MessageLeaves (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: System Digest (r:1 w:1) + /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) + fn commit() -> Weight { + // Proof Size summary in bytes: + // Measured: `1094` + // Estimated: `2579` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(28_000_000, 2579) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + + fn commit_single() -> Weight { + // Proof Size summary in bytes: + // Measured: `1094` + // Estimated: `2579` + // Minimum execution time: 9_000_000 picoseconds. + Weight::from_parts(9_000_000, 1586) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..88c6c669c880299c6c4417b65e7baa1f3ea9922f --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_system.rs @@ -0,0 +1,256 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `snowbridge_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-09, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `crake.local`, CPU: `` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// bridge-hub-rococo-dev +// --pallet=snowbridge_system +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --output +// parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_system.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `snowbridge_system`. +pub struct WeightInfo(PhantomData); +impl snowbridge_system::WeightInfo for WeightInfo { + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 47_000_000 picoseconds. + Weight::from_parts(47_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: EthereumSystem Agents (r:1 w:1) + /// Proof: EthereumSystem Agents (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: System Account (r:2 w:2) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn create_agent() -> Weight { + // Proof Size summary in bytes: + // Measured: `187` + // Estimated: `6196` + // Minimum execution time: 87_000_000 picoseconds. + Weight::from_parts(87_000_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: System Account (r:2 w:2) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: EthereumSystem Agents (r:1 w:0) + /// Proof: EthereumSystem Agents (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: EthereumSystem Channels (r:1 w:1) + /// Proof: EthereumSystem Channels (max_values: None, max_size: Some(12), added: 2487, mode: MaxEncodedLen) + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn create_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `602` + // Estimated: `69050` + // Minimum execution time: 84_000_000 picoseconds. + Weight::from_parts(84_000_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: EthereumSystem Channels (r:1 w:0) + /// Proof: EthereumSystem Channels (max_values: None, max_size: Some(12), added: 2487, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn update_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `256` + // Estimated: `6044` + // Minimum execution time: 41_000_000 picoseconds. + Weight::from_parts(41_000_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: EthereumSystem Channels (r:1 w:0) + /// Proof: EthereumSystem Channels (max_values: None, max_size: Some(12), added: 2487, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn force_update_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `256` + // Estimated: `6044` + // Minimum execution time: 41_000_000 picoseconds. + Weight::from_parts(41_000_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn set_operating_mode() -> Weight { + // Proof Size summary in bytes: + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 30_000_000 picoseconds. + Weight::from_parts(30_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: EthereumSystem Agents (r:1 w:0) + /// Proof: EthereumSystem Agents (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn transfer_native_from_agent() -> Weight { + // Proof Size summary in bytes: + // Measured: `252` + // Estimated: `6044` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(43_000_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: EthereumSystem Agents (r:1 w:0) + /// Proof: EthereumSystem Agents (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn force_transfer_native_from_agent() -> Weight { + // Proof Size summary in bytes: + // Measured: `252` + // Estimated: `6044` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(42_000_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn set_token_transfer_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 31_000_000 picoseconds. + Weight::from_parts(42_000_000, 3517) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumOutboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:0 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn set_pricing_parameters() -> Weight { + // Proof Size summary in bytes: + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 31_000_000 picoseconds. + Weight::from_parts(42_000_000, 3517) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index cb7ad7a7803666f6a287e19f6b2d55caab993208..d7e8c41ff8ac41acfeb60f30774e8282939b6c1c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -1,24 +1,25 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -53,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 19_037_000 picoseconds. - Weight::from_parts(19_602_000, 3593) + // Minimum execution time: 19_610_000 picoseconds. + Weight::from_parts(19_980_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -64,15 +65,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 43_115_000 picoseconds. - Weight::from_parts(43_897_000, 6196) + // Minimum execution time: 44_411_000 picoseconds. + Weight::from_parts(45_110_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } // Storage: `System::Account` (r:3 w:3) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -89,11 +88,11 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `294` + // Measured: `223` // Estimated: `8799` - // Minimum execution time: 90_267_000 picoseconds. - Weight::from_parts(91_460_000, 8799) - .saturating_add(T::DbWeight::get().reads(11)) + // Minimum execution time: 89_739_000 picoseconds. + Weight::from_parts(91_256_000, 8799) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } // Storage: `Benchmark::Override` (r:0 w:0) @@ -105,8 +104,6 @@ impl WeightInfo { // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. Weight::from_parts(18_446_744_073_709_551_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -125,19 +122,19 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_reserve_withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 60_477_000 picoseconds. - Weight::from_parts(61_314_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 60_045_000 picoseconds. + Weight::from_parts(60_710_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn receive_teleported_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_996_000 picoseconds. - Weight::from_parts(3_107_000, 0) + // Minimum execution time: 3_257_000 picoseconds. + Weight::from_parts(3_392_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -145,15 +142,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 18_907_000 picoseconds. - Weight::from_parts(19_475_000, 3593) + // Minimum execution time: 19_423_000 picoseconds. + Weight::from_parts(19_823_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: `System::Account` (r:2 w:2) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -170,15 +165,13 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `193` + // Measured: `122` // Estimated: `6196` - // Minimum execution time: 59_143_000 picoseconds. - Weight::from_parts(60_316_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 60_484_000 picoseconds. + Weight::from_parts(61_634_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -197,11 +190,11 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_teleport() -> Weight { // Proof Size summary in bytes: - // Measured: `141` - // Estimated: `3606` - // Minimum execution time: 44_459_000 picoseconds. - Weight::from_parts(45_365_000, 3606) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `70` + // Estimated: `3593` + // Minimum execution time: 44_863_000 picoseconds. + Weight::from_parts(45_549_000, 3593) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 4eee8f0e613edce87cd47d905e72228d9dd06021..abd84f8e89b07799758c36b002c30db742305927 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -1,24 +1,25 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -47,8 +48,6 @@ use sp_std::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); impl WeightInfo { - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -67,81 +66,79 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 62_732_000 picoseconds. - Weight::from_parts(64_581_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 61_813_000 picoseconds. + Weight::from_parts(62_996_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_987_000 picoseconds. - Weight::from_parts(2_107_000, 0) + // Minimum execution time: 2_044_000 picoseconds. + Weight::from_parts(2_112_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn query_response() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3568` - // Minimum execution time: 8_098_000 picoseconds. - Weight::from_parts(8_564_000, 3568) + // Measured: `32` + // Estimated: `3497` + // Minimum execution time: 7_472_000 picoseconds. + Weight::from_parts(7_723_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_539_000 picoseconds. - Weight::from_parts(9_085_000, 0) + // Minimum execution time: 8_414_000 picoseconds. + Weight::from_parts(8_765_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_205_000 picoseconds. - Weight::from_parts(2_369_000, 0) + // Minimum execution time: 2_192_000 picoseconds. + Weight::from_parts(2_243_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_828_000 picoseconds. - Weight::from_parts(1_994_000, 0) + // Minimum execution time: 1_866_000 picoseconds. + Weight::from_parts(1_931_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_869_000 picoseconds. - Weight::from_parts(1_946_000, 0) + // Minimum execution time: 1_847_000 picoseconds. + Weight::from_parts(1_921_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_842_000 picoseconds. - Weight::from_parts(1_949_000, 0) + // Minimum execution time: 1_797_000 picoseconds. + Weight::from_parts(1_880_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_460_000 picoseconds. - Weight::from_parts(2_593_000, 0) + // Minimum execution time: 2_458_000 picoseconds. + Weight::from_parts(2_523_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(2_003_000, 0) + // Minimum execution time: 1_833_000 picoseconds. + Weight::from_parts(1_906_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -160,21 +157,21 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 56_813_000 picoseconds. - Weight::from_parts(57_728_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 54_659_000 picoseconds. + Weight::from_parts(56_025_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn claim_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `160` - // Estimated: `3625` - // Minimum execution time: 11_364_000 picoseconds. - Weight::from_parts(11_872_000, 3625) + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 10_953_000 picoseconds. + Weight::from_parts(11_220_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -182,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_821_000 picoseconds. - Weight::from_parts(1_936_000, 0) + // Minimum execution time: 1_834_000 picoseconds. + Weight::from_parts(1_892_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -201,10 +198,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn subscribe_version() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 23_081_000 picoseconds. - Weight::from_parts(23_512_000, 3574) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 22_238_000 picoseconds. + Weight::from_parts(22_690_000, 3503) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -214,47 +211,45 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_747_000 picoseconds. - Weight::from_parts(4_068_000, 0) + // Minimum execution time: 3_798_000 picoseconds. + Weight::from_parts(3_936_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_045_000 picoseconds. - Weight::from_parts(3_208_000, 0) + // Minimum execution time: 2_985_000 picoseconds. + Weight::from_parts(3_099_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_962_000 picoseconds. - Weight::from_parts(2_284_000, 0) + // Minimum execution time: 1_955_000 picoseconds. + Weight::from_parts(2_050_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_951_000 picoseconds. - Weight::from_parts(2_026_000, 0) + // Minimum execution time: 1_939_000 picoseconds. + Weight::from_parts(1_990_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_837_000 picoseconds. - Weight::from_parts(2_084_000, 0) + // Minimum execution time: 1_841_000 picoseconds. + Weight::from_parts(1_900_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_042_000 picoseconds. + // Minimum execution time: 2_081_000 picoseconds. Weight::from_parts(2_145_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -273,22 +268,20 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 61_350_000 picoseconds. - Weight::from_parts(62_440_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 59_600_000 picoseconds. + Weight::from_parts(61_572_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_993_000 picoseconds. - Weight::from_parts(5_309_000, 0) + // Minimum execution time: 4_390_000 picoseconds. + Weight::from_parts(4_517_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -307,70 +300,70 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 57_133_000 picoseconds. - Weight::from_parts(58_100_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 53_864_000 picoseconds. + Weight::from_parts(55_527_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_899_000 picoseconds. - Weight::from_parts(2_153_000, 0) + // Minimum execution time: 1_879_000 picoseconds. + Weight::from_parts(1_947_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_880_000 picoseconds. - Weight::from_parts(1_960_000, 0) + // Minimum execution time: 1_827_000 picoseconds. + Weight::from_parts(1_900_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_825_000 picoseconds. - Weight::from_parts(1_960_000, 0) + // Minimum execution time: 1_824_000 picoseconds. + Weight::from_parts(1_898_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - // Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) - // Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::OutboundLanesCongestedSignals` (r:1 w:0) - // Proof: `BridgeRococoToWococoMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::OutboundMessages` (r:0 w:1) - // Proof: `BridgeRococoToWococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:2 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + // Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + // Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::OutboundLanesCongestedSignals` (r:1 w:0) + // Proof: `BridgeWestendMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::OutboundMessages` (r:0 w:1) + // Proof: `BridgeWestendMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 1000]`. pub fn export_message(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `139` - // Estimated: `3604` - // Minimum execution time: 28_419_000 picoseconds. - Weight::from_parts(29_387_791, 3604) - // Standard Error: 552 - .saturating_add(Weight::from_parts(316_277, 0).saturating_mul(x.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `190` + // Estimated: `6130` + // Minimum execution time: 41_598_000 picoseconds. + Weight::from_parts(42_219_173, 6130) + // Standard Error: 426 + .saturating_add(Weight::from_parts(452_460, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_903_000 picoseconds. - Weight::from_parts(2_023_000, 0) + // Minimum execution time: 1_812_000 picoseconds. + Weight::from_parts(1_898_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_963_000 picoseconds. - Weight::from_parts(2_143_000, 0) + // Minimum execution time: 1_915_000 picoseconds. + Weight::from_parts(1_976_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index d98012e061bbd61af66667fff23ef70d6de13931..ac5c4afd52d8885b396cf0fd5ef5253ccf929fe5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -16,12 +16,17 @@ use super::{ AccountId, AllPalletsWithSystem, Balances, BaseDeliveryFee, FeeAssetId, ParachainInfo, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeFlavor, RuntimeOrigin, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, }; -use crate::bridge_common_config::{ - BridgeGrandpaRococoInstance, BridgeGrandpaWestendInstance, BridgeGrandpaWococoInstance, - DeliveryRewardInBalance, RequiredStakeForStakeAndSlash, +use crate::{ + bridge_common_config::{ + BridgeGrandpaRococoBulletinInstance, BridgeGrandpaWestendInstance, + BridgeParachainWestendInstance, DeliveryRewardInBalance, RequiredStakeForStakeAndSlash, + }, + bridge_to_bulletin_config::WithRococoBulletinMessagesInstance, + bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, + EthereumGatewayAddress, }; use bp_messages::LaneId; use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; @@ -34,57 +39,48 @@ use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; use parachains_common::{ impls::ToStakingPot, - xcm_config::{ConcreteAssetFromSystem, RelayOrOtherSystemParachains}, + xcm_config::{ + AllSiblingSystemParachains, ConcreteAssetFromSystem, ParentRelayOrSiblingParachains, + RelayOrOtherSystemParachains, + }, TREASURY_PALLET_ID, }; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; -use rococo_runtime_constants::system_parachain; -use sp_core::Get; +use snowbridge_core::DescribeHere; +use snowbridge_rococo_common::EthereumNetwork; +use snowbridge_runtime_common::XcmExportFeeToSibling; +use sp_core::{Get, H256}; use sp_runtime::traits::AccountIdConversion; use sp_std::marker::PhantomData; use xcm::latest::prelude::*; +#[allow(deprecated)] use xcm_builder::{ deposit_or_burn_fee, AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, HandleFee, - IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, + DescribeFamily, EnsureXcmOrigin, HandleFee, HashedDescription, IsConcrete, ParentAsSuperuser, + ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeToAccount, }; use xcm_executor::{ - traits::{FeeReason, TransactAsset, WithOriginFilter}, + traits::{FeeManager, FeeReason, FeeReason::Export, TransactAsset, WithOriginFilter}, XcmExecutor, }; parameter_types! { - pub storage Flavor: RuntimeFlavor = RuntimeFlavor::default(); pub const TokenLocation: MultiLocation = MultiLocation::parent(); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); + pub RelayNetwork: NetworkId = NetworkId::Rococo; pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); -} - -/// Adapter for resolving `NetworkId` based on `pub storage Flavor: RuntimeFlavor`. -pub struct RelayNetwork; -impl Get> for RelayNetwork { - fn get() -> Option { - Some(Self::get()) - } -} -impl Get for RelayNetwork { - fn get() -> NetworkId { - match Flavor::get() { - RuntimeFlavor::Rococo => NetworkId::Rococo, - RuntimeFlavor::Wococo => NetworkId::Wococo, - } - } + pub SiblingPeople: MultiLocation = (Parent, Parachain(rococo_runtime_constants::system_parachain::PEOPLE_ID)).into(); } /// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used @@ -100,6 +96,7 @@ pub type LocationToAccountId = ( ); /// Means for transacting the native currency on this chain. +#[allow(deprecated)] pub type CurrencyTransactor = CurrencyAdapter< // Use this currency: Balances, @@ -142,10 +139,6 @@ match_types! { MultiLocation { parents: 1, interior: Here } | MultiLocation { parents: 1, interior: X1(Plurality { .. }) } }; - pub type ParentOrSiblings: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(_) } - }; } /// A call filter for the XCM Transact instruction. This is a temporary measure until we properly @@ -171,7 +164,7 @@ impl Contains for SafeCallFilter { if items.iter().all(|(k, _)| { k.eq(&DeliveryRewardInBalance::key()) | k.eq(&RequiredStakeForStakeAndSlash::key()) | - k.eq(&Flavor::key()) + k.eq(&EthereumGatewayAddress::key()) }) => return true, _ => (), @@ -179,38 +172,59 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::authorize_upgrade { .. } | + frame_system::Call::authorize_upgrade_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | + RuntimeCall::CollatorSelection(..) | + RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::XcmpQueue(..) | RuntimeCall::MessageQueue(..) | - RuntimeCall::BridgeRococoGrandpa(pallet_bridge_grandpa::Call::< + RuntimeCall::BridgeWestendGrandpa(pallet_bridge_grandpa::Call::< Runtime, - BridgeGrandpaRococoInstance, + BridgeGrandpaWestendInstance, >::initialize { .. }) | RuntimeCall::BridgeWestendGrandpa(pallet_bridge_grandpa::Call::< Runtime, BridgeGrandpaWestendInstance, + >::set_operating_mode { .. }) | + RuntimeCall::BridgeWestendParachains(pallet_bridge_parachains::Call::< + Runtime, + BridgeParachainWestendInstance, + >::set_operating_mode { .. }) | + RuntimeCall::BridgeWestendMessages(pallet_bridge_messages::Call::< + Runtime, + WithBridgeHubWestendMessagesInstance, + >::set_operating_mode { .. }) | + RuntimeCall::BridgePolkadotBulletinGrandpa(pallet_bridge_grandpa::Call::< + Runtime, + BridgeGrandpaRococoBulletinInstance, >::initialize { .. }) | - RuntimeCall::BridgeWococoGrandpa(pallet_bridge_grandpa::Call::< + RuntimeCall::BridgePolkadotBulletinGrandpa(pallet_bridge_grandpa::Call::< + Runtime, + BridgeGrandpaRococoBulletinInstance, + >::set_operating_mode { .. }) | + RuntimeCall::BridgePolkadotBulletinMessages(pallet_bridge_messages::Call::< Runtime, - BridgeGrandpaWococoInstance, - >::initialize { .. }) + WithRococoBulletinMessagesInstance, + >::set_operating_mode { .. }) | + RuntimeCall::EthereumBeaconClient( + snowbridge_ethereum_beacon_client::Call::force_checkpoint { .. } | + snowbridge_ethereum_beacon_client::Call::set_operating_mode { .. }, + ) | RuntimeCall::EthereumInboundQueue( + snowbridge_inbound_queue::Call::set_operating_mode { .. }, + ) | RuntimeCall::EthereumOutboundQueue( + snowbridge_outbound_queue::Call::set_operating_mode { .. }, + ) | RuntimeCall::EthereumSystem(..) ) } } @@ -228,14 +242,15 @@ pub type Barrier = TrailingSetTopicAsId< // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, - // Parent, its pluralities (i.e. governance bodies) and relay treasury pallet - // get free execution. + // Parent, its pluralities (i.e. governance bodies), relay treasury pallet + // and sibling People get free execution. AllowExplicitUnpaidExecutionFrom<( ParentOrParentsPlurality, Equals, + Equals, )>, // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom, + AllowSubscriptionsFrom, ), UniversalLocation, ConstU32<8>, @@ -244,25 +259,13 @@ pub type Barrier = TrailingSetTopicAsId< >, >; -match_types! { - pub type SystemParachains: impl Contains = { - MultiLocation { - parents: 1, - interior: X1(Parachain( - system_parachain::ASSET_HUB_ID | - system_parachain::BRIDGE_HUB_ID | - system_parachain::CONTRACTS_ID | - system_parachain::ENCOINTER_ID - )), - } - }; -} - /// Locations that will not be charged fees in the executor, /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. -pub type WaivedLocations = - (RelayOrOtherSystemParachains, Equals); +pub type WaivedLocations = ( + RelayOrOtherSystemParachains, + Equals, +); /// Cases where a remote origin is accepted as trusted Teleporter for a given asset: /// - NativeToken with the parent Relay Chain and sibling parachains. @@ -295,16 +298,9 @@ impl xcm_executor::Config for XcmConfig { type SubscriptionService = PolkadotXcm; type PalletInstancesInfo = AllPalletsWithSystem; type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type FeeManager = XcmFeeManagerFromComponents< + type FeeManager = XcmFeeManagerFromComponentsBridgeHub< WaivedLocations, ( - XcmExportFeeToRelayerRewardAccounts< - Self::AssetTransactor, - crate::bridge_to_wococo_config::WococoGlobalConsensusNetwork, - crate::bridge_to_wococo_config::AssetHubWococoParaId, - crate::bridge_to_wococo_config::BridgeHubWococoChainId, - crate::bridge_to_wococo_config::AssetHubRococoToAssetHubWococoMessagesLane, - >, XcmExportFeeToRelayerRewardAccounts< Self::AssetTransactor, crate::bridge_to_westend_config::WestendGlobalConsensusNetwork, @@ -312,20 +308,21 @@ impl xcm_executor::Config for XcmConfig { crate::bridge_to_westend_config::BridgeHubWestendChainId, crate::bridge_to_westend_config::AssetHubRococoToAssetHubWestendMessagesLane, >, - XcmExportFeeToRelayerRewardAccounts< + XcmExportFeeToSibling< + bp_rococo::Balance, + AccountId, + TokenLocation, + EthereumNetwork, Self::AssetTransactor, - crate::bridge_to_rococo_config::RococoGlobalConsensusNetwork, - crate::bridge_to_rococo_config::AssetHubRococoParaId, - crate::bridge_to_rococo_config::BridgeHubRococoChainId, - crate::bridge_to_rococo_config::AssetHubWococoToAssetHubRococoMessagesLane, + crate::EthereumOutboundQueue, >, XcmFeeToAccount, ), >; type MessageExporter = ( crate::bridge_to_westend_config::ToBridgeHubWestendHaulBlobExporter, - crate::bridge_to_wococo_config::ToBridgeHubWococoHaulBlobExporter, - crate::bridge_to_rococo_config::ToBridgeHubRococoHaulBlobExporter, + crate::bridge_to_bulletin_config::ToRococoBulletinHaulBlobExporter, + crate::bridge_to_ethereum_config::SnowbridgeExporter, ); type UniversalAliases = Nothing; type CallDispatcher = WithOriginFilter; @@ -349,11 +346,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type XcmRouter = XcmRouter; @@ -381,8 +373,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); @@ -393,6 +383,10 @@ impl cumulus_pallet_xcm::Config for Runtime { type XcmExecutor = XcmExecutor; } +/// Creates an AgentId from a MultiLocation. An AgentId is a unique mapping to a Agent contract on +/// Ethereum which acts as the sovereign account for the MultiLocation. +pub type AgentIdOf = HashedDescription)>; + /// A `HandleFee` implementation that simply deposits the fees for `ExportMessage` XCM instructions /// into the accounts that are used for paying the relayer rewards. /// Burns the fees in case of a failure. @@ -485,3 +479,41 @@ impl< fee } } + +pub struct XcmFeeManagerFromComponentsBridgeHub( + PhantomData<(WaivedLocations, HandleFee)>, +); +impl, FeeHandler: HandleFee> FeeManager + for XcmFeeManagerFromComponentsBridgeHub +{ + fn is_waived(origin: Option<&MultiLocation>, fee_reason: FeeReason) -> bool { + let Some(loc) = origin else { return false }; + if let Export { network, destination: Here } = fee_reason { + return !(network == EthereumNetwork::get()) + } + WaivedLocations::contains(loc) + } + + fn handle_fee(fee: MultiAssets, context: Option<&XcmContext>, reason: FeeReason) { + FeeHandler::handle_fee(fee, context, reason); + } +} + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmark_helpers { + use crate::{MultiAssets, MultiLocation, SendError, SendResult, SendXcm, Xcm, XcmHash}; + + pub struct DoNothingRouter; + impl SendXcm for DoNothingRouter { + type Ticket = (); + fn validate( + _dest: &mut Option, + _msg: &mut Option>, + ) -> SendResult<()> { + Ok(((), MultiAssets::new())) + } + fn deliver(_: ()) -> Result { + Ok([0; 32]) + } + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 65cca67dac198f541bb0965a58d8526a92ba8e04..0fba28c47b439a699e457e816b63ea4e76f0e0a2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -18,17 +18,16 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ - bridge_common_config, bridge_to_rococo_config, bridge_to_westend_config, - bridge_to_wococo_config, + bridge_common_config, bridge_to_bulletin_config, bridge_to_westend_config, xcm_config::{RelayNetwork, TokenLocation, XcmConfig}, - AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, SignedExtra, - TransactionPayment, UncheckedExtrinsic, + AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, EthereumGatewayAddress, + Executive, ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, + RuntimeEvent, RuntimeOrigin, SessionKeys, SignedExtra, TransactionPayment, UncheckedExtrinsic, }; use codec::{Decode, Encode}; -use frame_support::{dispatch::GetDispatchInfo, parameter_types}; -use frame_system::pallet_prelude::HeaderFor; +use frame_support::{dispatch::GetDispatchInfo, parameter_types, traits::ConstU8}; use parachains_common::{rococo::fee::WeightToFee, AccountId, AuraId, Balance}; +use sp_core::H160; use sp_keyring::AccountKeyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, @@ -36,9 +35,6 @@ use sp_runtime::{ }; use xcm::latest::prelude::*; -// Para id of sibling chain used in tests. -pub const SIBLING_PARACHAIN_ID: u32 = 1000; - parameter_types! { pub CheckingAccount: AccountId = PolkadotXcm::check_account(); } @@ -47,27 +43,29 @@ fn construct_extrinsic( sender: sp_keyring::AccountKeyring, call: RuntimeCall, ) -> UncheckedExtrinsic { + let account_id = AccountId32::from(sender.public()); let extra: SignedExtra = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), frame_system::CheckGenesis::::new(), frame_system::CheckEra::::from(Era::immortal()), - frame_system::CheckNonce::::from(0), + frame_system::CheckNonce::::from( + frame_system::Pallet::::account(&account_id).nonce, + ), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), ( - bridge_to_wococo_config::OnBridgeHubRococoRefundBridgeHubWococoMessages::default(), bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), - bridge_to_rococo_config::OnBridgeHubWococoRefundBridgeHubRococoMessages::default(), + bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); UncheckedExtrinsic::new_signed( call, - AccountId32::from(sender.public()).into(), + account_id.into(), Signature::Sr25519(signature.clone()), extra, ) @@ -75,10 +73,9 @@ fn construct_extrinsic( fn construct_and_apply_extrinsic( relayer_at_target: sp_keyring::AccountKeyring, - batch: pallet_utility::Call, + call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { - let batch_call = RuntimeCall::Utility(batch); - let xt = construct_extrinsic(relayer_at_target, batch_call); + let xt = construct_extrinsic(relayer_at_target, call); let r = Executive::apply_extrinsic(xt); r.unwrap() } @@ -90,10 +87,6 @@ fn construct_and_estimate_extrinsic_fee(batch: pallet_utility::Call) -> TransactionPayment::compute_fee(xt.encoded_size() as _, &batch_info, 0) } -fn executive_init_block(header: &HeaderFor) { - Executive::initialize_block(header) -} - fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys { bridge_hub_test_utils::CollatorSessionKeys::new( AccountId::from(Alice), @@ -102,127 +95,137 @@ fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID +); + +#[test] +fn change_required_stake_by_governance_works() { + bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< + Runtime, + bridge_common_config::RequiredStakeForStakeAndSlash, + Balance, + >( + collator_session_keys(), + bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, + Box::new(|call| RuntimeCall::System(call).encode()), + || { + ( + bridge_common_config::RequiredStakeForStakeAndSlash::key().to_vec(), + bridge_common_config::RequiredStakeForStakeAndSlash::get(), + ) + }, + |old_value| old_value.checked_mul(2).unwrap(), + ) +} + +mod bridge_hub_westend_tests { use super::*; use bridge_common_config::{ - BridgeGrandpaWestendInstance, BridgeGrandpaWococoInstance, BridgeParachainWestendInstance, - BridgeParachainWococoInstance, DeliveryRewardInBalance, RequiredStakeForStakeAndSlash, + BridgeGrandpaWestendInstance, BridgeParachainWestendInstance, DeliveryRewardInBalance, }; + use bridge_hub_test_utils::test_cases::from_parachain; use bridge_to_westend_config::{ - BridgeHubWestendChainId, WestendGlobalConsensusNetwork, WithBridgeHubWestendMessageBridge, - WithBridgeHubWestendMessagesInstance, XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, - }; - use bridge_to_wococo_config::{ - BridgeHubWococoChainId, WithBridgeHubWococoMessageBridge, - WithBridgeHubWococoMessagesInstance, WococoGlobalConsensusNetwork, - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, + BridgeHubWestendChainId, BridgeHubWestendLocation, WestendGlobalConsensusNetwork, + WithBridgeHubWestendMessageBridge, WithBridgeHubWestendMessagesInstance, + XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, }; - bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( + // Para id of sibling chain used in tests. + pub const SIBLING_PARACHAIN_ID: u32 = 1000; + + // Runtime from tests PoV + type RuntimeTestsAdapter = from_parachain::WithRemoteParachainHelperAdapter< Runtime, AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID - ); + BridgeGrandpaWestendInstance, + BridgeParachainWestendInstance, + WithBridgeHubWestendMessagesInstance, + WithBridgeHubWestendMessageBridge, + >; #[test] fn initialize_bridge_by_governance_works() { - // for Wococo finality + // for RococoBulletin finality bridge_hub_test_utils::test_cases::initialize_bridge_by_governance_works::< Runtime, - BridgeGrandpaWococoInstance, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::BridgeWococoGrandpa(call).encode()), - ); + BridgeGrandpaWestendInstance, + >(collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID) + } + + #[test] + fn change_bridge_grandpa_pallet_mode_by_governance_works() { // for Westend finality - bridge_hub_test_utils::test_cases::initialize_bridge_by_governance_works::< + bridge_hub_test_utils::test_cases::change_bridge_grandpa_pallet_mode_by_governance_works::< Runtime, BridgeGrandpaWestendInstance, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::BridgeWestendGrandpa(call).encode()), - ) + >(collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID) } #[test] - fn change_delivery_reward_by_governance_works() { + fn change_bridge_parachains_pallet_mode_by_governance_works() { + // for Westend finality + bridge_hub_test_utils::test_cases::change_bridge_parachains_pallet_mode_by_governance_works::< + Runtime, + BridgeParachainWestendInstance, + >(collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID) + } + + #[test] + fn change_bridge_messages_pallet_mode_by_governance_works() { + // for Westend finality + bridge_hub_test_utils::test_cases::change_bridge_messages_pallet_mode_by_governance_works::< + Runtime, + WithBridgeHubWestendMessagesInstance, + >(collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID) + } + + #[test] + fn change_ethereum_gateway_by_governance_works() { bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< Runtime, - DeliveryRewardInBalance, - u64, + EthereumGatewayAddress, + H160, >( collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, Box::new(|call| RuntimeCall::System(call).encode()), - || (DeliveryRewardInBalance::key().to_vec(), DeliveryRewardInBalance::get()), - |old_value| old_value.checked_mul(2).unwrap(), + || (EthereumGatewayAddress::key().to_vec(), EthereumGatewayAddress::get()), + |_| [1; 20].into(), ) } #[test] - fn change_required_stake_by_governance_works() { + fn change_delivery_reward_by_governance_works() { bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< Runtime, - RequiredStakeForStakeAndSlash, - Balance, + DeliveryRewardInBalance, + u64, >( collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, Box::new(|call| RuntimeCall::System(call).encode()), - || { - ( - RequiredStakeForStakeAndSlash::key().to_vec(), - RequiredStakeForStakeAndSlash::get(), - ) - }, + || (DeliveryRewardInBalance::key().to_vec(), DeliveryRewardInBalance::get()), |old_value| old_value.checked_mul(2).unwrap(), ) } #[test] fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { - // for Wococo - bridge_hub_test_utils::test_cases::handle_export_message_from_system_parachain_to_outbound_queue_works::< - Runtime, - XcmConfig, - WithBridgeHubWococoMessagesInstance, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::BridgeWococoMessages(event)) => Some(event), - _ => None, - } - }), - || ExportMessage { network: Wococo, destination: X1(Parachain(1234)), xcm: Xcm(vec![]) }, - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - Some((TokenLocation::get(), ExistentialDeposit::get()).into()), - // value should be >= than value generated by `can_calculate_weight_for_paid_export_message_with_reserve_transfer` - Some((TokenLocation::get(), bp_bridge_hub_rococo::BridgeHubRococoBaseXcmFeeInRocs::get()).into()), - || (), - ); // for Westend bridge_hub_test_utils::test_cases::handle_export_message_from_system_parachain_to_outbound_queue_works::< Runtime, @@ -238,54 +241,27 @@ mod bridge_hub_rococo_tests { _ => None, } }), - || ExportMessage { network: Westend, destination: X1(Parachain(1234)), xcm: Xcm(vec![]) }, + || ExportMessage { network: Westend, destination: X1(Parachain(bridge_to_westend_config::AssetHubWestendParaId::get().into())), xcm: Xcm(vec![]) }, XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, Some((TokenLocation::get(), ExistentialDeposit::get()).into()), // value should be >= than value generated by `can_calculate_weight_for_paid_export_message_with_reserve_transfer` Some((TokenLocation::get(), bp_bridge_hub_rococo::BridgeHubRococoBaseXcmFeeInRocs::get()).into()), - || (), + || PolkadotXcm::force_xcm_version(RuntimeOrigin::root(), Box::new(BridgeHubWestendLocation::get()), XCM_VERSION).expect("version saved!"), ) } #[test] fn message_dispatch_routing_works() { - // from Wococo - bridge_hub_test_utils::test_cases::message_dispatch_routing_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - WithBridgeHubWococoMessagesInstance, - RelayNetwork, - WococoGlobalConsensusNetwork, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::ParachainSystem(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - || (), - ); // from Westend bridge_hub_test_utils::test_cases::message_dispatch_routing_works::< Runtime, AllPalletsWithoutSystem, XcmConfig, ParachainSystem, - WithBridgeHubWococoMessagesInstance, + WithBridgeHubWestendMessagesInstance, RelayNetwork, WestendGlobalConsensusNetwork, + ConstU8<2>, >( collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, @@ -309,82 +285,24 @@ mod bridge_hub_rococo_tests { #[test] fn relayed_incoming_message_works() { - // from Wococo - bridge_hub_test_utils::test_cases::relayed_incoming_message_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaWococoInstance, - BridgeParachainWococoInstance, - WithBridgeHubWococoMessagesInstance, - WithBridgeHubWococoMessageBridge, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Rococo, - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - || (), - ); // from Westend - bridge_hub_test_utils::test_cases::relayed_incoming_message_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaWestendInstance, - BridgeParachainWestendInstance, - WithBridgeHubWestendMessagesInstance, - WithBridgeHubWestendMessageBridge, - >( + from_parachain::relayed_incoming_message_works::( collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, + BridgeHubWestendChainId::get(), SIBLING_PARACHAIN_ID, Rococo, XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, || (), + construct_and_apply_extrinsic, ) } #[test] pub fn complex_relay_extrinsic_works() { - // for Wococo - bridge_hub_test_utils::test_cases::complex_relay_extrinsic_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaWococoInstance, - BridgeParachainWococoInstance, - WithBridgeHubWococoMessagesInstance, - WithBridgeHubWococoMessageBridge, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - BridgeHubWococoChainId::get(), - Rococo, - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - ExistentialDeposit::get(), - executive_init_block, - construct_and_apply_extrinsic, - || (), - ); // for Westend - bridge_hub_test_utils::test_cases::complex_relay_extrinsic_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaWestendInstance, - BridgeParachainWestendInstance, - WithBridgeHubWestendMessagesInstance, - WithBridgeHubWestendMessageBridge, - >( + from_parachain::complex_relay_extrinsic_works::( collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, @@ -392,10 +310,8 @@ mod bridge_hub_rococo_tests { BridgeHubWestendChainId::get(), Rococo, XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, - ExistentialDeposit::get(), - executive_init_block, - construct_and_apply_extrinsic, || (), + construct_and_apply_extrinsic, ); } @@ -419,16 +335,9 @@ mod bridge_hub_rococo_tests { #[test] pub fn can_calculate_fee_for_complex_message_delivery_transaction() { - let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_delivery_transaction::< - Runtime, - BridgeGrandpaWestendInstance, - BridgeParachainWestendInstance, - WithBridgeHubWestendMessagesInstance, - WithBridgeHubWestendMessageBridge, - >( - collator_session_keys(), - construct_and_estimate_extrinsic_fee - ); + let estimated = from_parachain::can_calculate_fee_for_complex_message_delivery_transaction::< + RuntimeTestsAdapter, + >(collator_session_keys(), construct_and_estimate_extrinsic_fee); // check if estimated value is sane let max_expected = bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs::get(); @@ -442,16 +351,10 @@ mod bridge_hub_rococo_tests { #[test] pub fn can_calculate_fee_for_complex_message_confirmation_transaction() { - let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_confirmation_transaction::< - Runtime, - BridgeGrandpaWestendInstance, - BridgeParachainWestendInstance, - WithBridgeHubWestendMessagesInstance, - WithBridgeHubWestendMessageBridge, - >( - collator_session_keys(), - construct_and_estimate_extrinsic_fee - ); + let estimated = + from_parachain::can_calculate_fee_for_complex_message_confirmation_transaction::< + RuntimeTestsAdapter, + >(collator_session_keys(), construct_and_estimate_extrinsic_fee); // check if estimated value is sane let max_expected = bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs::get(); @@ -464,153 +367,99 @@ mod bridge_hub_rococo_tests { } } -mod bridge_hub_wococo_tests { +mod bridge_hub_bulletin_tests { use super::*; - use bridge_common_config::{ - BridgeGrandpaRococoInstance, BridgeParachainRococoInstance, DeliveryRewardInBalance, - RequiredStakeForStakeAndSlash, - }; - use bridge_hub_rococo_runtime::{xcm_config, AllPalletsWithoutSystem, RuntimeFlavor}; - use bridge_to_rococo_config::{ - BridgeHubRococoChainId, RococoGlobalConsensusNetwork, WithBridgeHubRococoMessageBridge, - WithBridgeHubRococoMessagesInstance, XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, + use bridge_common_config::BridgeGrandpaRococoBulletinInstance; + use bridge_hub_test_utils::test_cases::from_grandpa_chain; + use bridge_to_bulletin_config::{ + RococoBulletinChainId, RococoBulletinGlobalConsensusNetwork, + RococoBulletinGlobalConsensusNetworkLocation, WithRococoBulletinMessageBridge, + WithRococoBulletinMessagesInstance, XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN, }; - use frame_support::assert_ok; - - type RuntimeHelper = bridge_hub_test_utils::RuntimeHelper; - - pub(crate) fn set_wococo_flavor() { - let flavor_key = xcm_config::Flavor::key().to_vec(); - let flavor = RuntimeFlavor::Wococo; - - // encode `set_storage` call - let set_storage_call = RuntimeCall::System(frame_system::Call::::set_storage { - items: vec![(flavor_key, flavor.encode())], - }) - .encode(); - // estimate - storing just 1 value - use frame_system::WeightInfo; - let require_weight_at_most = - ::SystemWeightInfo::set_storage(1); + // Para id of sibling chain used in tests. + pub const SIBLING_PARACHAIN_ID: u32 = rococo_runtime_constants::system_parachain::PEOPLE_ID; - // execute XCM with Transact to `set_storage` as governance does - assert_ok!(RuntimeHelper::execute_as_governance(set_storage_call, require_weight_at_most) - .ensure_complete()); - - // check if stored - assert_eq!(flavor, xcm_config::Flavor::get()); - } - - bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( + // Runtime from tests PoV + type RuntimeTestsAdapter = from_grandpa_chain::WithRemoteGrandpaChainHelperAdapter< Runtime, AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID - ); + BridgeGrandpaRococoBulletinInstance, + WithRococoBulletinMessagesInstance, + WithRococoBulletinMessageBridge, + >; #[test] fn initialize_bridge_by_governance_works() { + // for Bulletin finality bridge_hub_test_utils::test_cases::initialize_bridge_by_governance_works::< Runtime, - BridgeGrandpaRococoInstance, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::BridgeRococoGrandpa(call).encode()), - ) + BridgeGrandpaRococoBulletinInstance, + >(collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID) } #[test] - fn change_delivery_reward_by_governance_works() { - bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< + fn change_bridge_grandpa_pallet_mode_by_governance_works() { + // for Bulletin finality + bridge_hub_test_utils::test_cases::change_bridge_grandpa_pallet_mode_by_governance_works::< Runtime, - DeliveryRewardInBalance, - u64, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::System(call).encode()), - || (DeliveryRewardInBalance::key().to_vec(), DeliveryRewardInBalance::get()), - |old_value| old_value.checked_mul(2).unwrap(), - ) + BridgeGrandpaRococoBulletinInstance, + >(collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID) } #[test] - fn change_required_stake_by_governance_works() { - bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< + fn change_bridge_messages_pallet_mode_by_governance_works() { + // for Bulletin finality + bridge_hub_test_utils::test_cases::change_bridge_messages_pallet_mode_by_governance_works::< Runtime, - RequiredStakeForStakeAndSlash, - Balance, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::System(call).encode()), - || { - ( - RequiredStakeForStakeAndSlash::key().to_vec(), - RequiredStakeForStakeAndSlash::get(), - ) - }, - |old_value| old_value.checked_mul(2).unwrap(), - ) + WithRococoBulletinMessagesInstance, + >(collator_session_keys(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID) } #[test] fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { + // for Bulletin bridge_hub_test_utils::test_cases::handle_export_message_from_system_parachain_to_outbound_queue_works::< Runtime, XcmConfig, - WithBridgeHubRococoMessagesInstance, + WithRococoBulletinMessagesInstance, >( collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, + bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, SIBLING_PARACHAIN_ID, Box::new(|runtime_event_encoded: Vec| { match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::BridgeRococoMessages(event)) => Some(event), + Ok(RuntimeEvent::BridgePolkadotBulletinMessages(event)) => Some(event), _ => None, } }), - || ExportMessage { network: Rococo, destination: X1(Parachain(4321)), xcm: Xcm(vec![]) }, - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, + || ExportMessage { + network: RococoBulletinGlobalConsensusNetwork::get(), + destination: Here, + xcm: Xcm(vec![]), + }, + XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN, Some((TokenLocation::get(), ExistentialDeposit::get()).into()), - // value should be >= than value generated by `can_calculate_weight_for_paid_export_message_with_reserve_transfer` - Some((TokenLocation::get(), bp_bridge_hub_wococo::BridgeHubWococoBaseXcmFeeInWocs::get()).into()), - set_wococo_flavor, + None, + || PolkadotXcm::force_xcm_version(RuntimeOrigin::root(), Box::new(RococoBulletinGlobalConsensusNetworkLocation::get()), XCM_VERSION).expect("version saved!"), ) } #[test] fn message_dispatch_routing_works() { + // from Bulletin bridge_hub_test_utils::test_cases::message_dispatch_routing_works::< Runtime, AllPalletsWithoutSystem, XcmConfig, ParachainSystem, - WithBridgeHubRococoMessagesInstance, + WithRococoBulletinMessagesInstance, RelayNetwork, - RococoGlobalConsensusNetwork, + RococoBulletinGlobalConsensusNetwork, + ConstU8<2>, >( collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, + bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, SIBLING_PARACHAIN_ID, Box::new(|runtime_event_encoded: Vec| { match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { @@ -624,56 +473,38 @@ mod bridge_hub_wococo_tests { _ => None, } }), - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - set_wococo_flavor, + XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN, + || (), ) } #[test] fn relayed_incoming_message_works() { - bridge_hub_test_utils::test_cases::relayed_incoming_message_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( + // from Bulletin + from_grandpa_chain::relayed_incoming_message_works::( collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, + RococoBulletinChainId::get(), SIBLING_PARACHAIN_ID, - Wococo, - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - set_wococo_flavor, + Rococo, + XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN, + || (), + construct_and_apply_extrinsic, ) } #[test] pub fn complex_relay_extrinsic_works() { - bridge_hub_test_utils::test_cases::complex_relay_extrinsic_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( + // for Bulletin + from_grandpa_chain::complex_relay_extrinsic_works::( collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, SIBLING_PARACHAIN_ID, - BridgeHubRococoChainId::get(), - Wococo, - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - ExistentialDeposit::get(), - executive_init_block, + RococoBulletinChainId::get(), + Rococo, + XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN, + || (), construct_and_apply_extrinsic, - set_wococo_flavor, ); } @@ -686,10 +517,10 @@ mod bridge_hub_wococo_tests { >(); // check if estimated value is sane - let max_expected = bp_bridge_hub_wococo::BridgeHubWococoBaseXcmFeeInWocs::get(); + let max_expected = bp_bridge_hub_rococo::BridgeHubRococoBaseXcmFeeInRocs::get(); assert!( estimated <= max_expected, - "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_wococo::BridgeHubWococoBaseXcmFeeInWocs` value", + "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_rococo::BridgeHubRococoBaseXcmFeeInRocs` value", estimated, max_expected ); @@ -697,22 +528,16 @@ mod bridge_hub_wococo_tests { #[test] pub fn can_calculate_fee_for_complex_message_delivery_transaction() { - let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_delivery_transaction::< - Runtime, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( - collator_session_keys(), - construct_and_estimate_extrinsic_fee - ); + let estimated = + from_grandpa_chain::can_calculate_fee_for_complex_message_delivery_transaction::< + RuntimeTestsAdapter, + >(collator_session_keys(), construct_and_estimate_extrinsic_fee); // check if estimated value is sane - let max_expected = bp_bridge_hub_wococo::BridgeHubWococoBaseDeliveryFeeInWocs::get(); + let max_expected = bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs::get(); assert!( estimated <= max_expected, - "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_wococo::BridgeHubWococoBaseDeliveryFeeInWocs` value", + "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs` value", estimated, max_expected ); @@ -720,22 +545,16 @@ mod bridge_hub_wococo_tests { #[test] pub fn can_calculate_fee_for_complex_message_confirmation_transaction() { - let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_confirmation_transaction::< - Runtime, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( - collator_session_keys(), - construct_and_estimate_extrinsic_fee - ); + let estimated = + from_grandpa_chain::can_calculate_fee_for_complex_message_confirmation_transaction::< + RuntimeTestsAdapter, + >(collator_session_keys(), construct_and_estimate_extrinsic_fee); // check if estimated value is sane - let max_expected = bp_bridge_hub_wococo::BridgeHubWococoBaseConfirmationFeeInWocs::get(); + let max_expected = bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs::get(); assert!( estimated <= max_expected, - "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_wococo::BridgeHubWococoBaseConfirmationFeeInWocs` value", + "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs` value", estimated, max_expected ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 7e384126ab670148f8435bac01e9743fc2a72016..94e29fb90ac30dde084a79da585212e649b64885 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true description = "Westend's BridgeHub parachain runtime" license = "Apache-2.0" +[lints] +workspace = true + [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } @@ -14,58 +17,57 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true, features = ["derive"] } +serde = { version = "1.0.193", optional = true, features = ["derive"] } smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false} -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -75,6 +77,7 @@ parachain-info = { package = "staging-parachain-info", path = "../../../pallets/ parachains-common = { path = "../../../common", default-features = false } # Bridges +bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } @@ -90,7 +93,9 @@ pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", defau pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", default-features = false } +pallet-xcm-bridge-hub = { path = "../../../../../bridges/modules/xcm-bridge-hub", default-features = false } bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } +bridge-hub-common = { path = "../../bridge-hubs/common", default-features = false } [dev-dependencies] static_assertions = "1.1" @@ -99,8 +104,9 @@ bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", fe sp-keyring = { path = "../../../../../substrate/primitives/keyring" } [features] -default = [ "std" ] +default = ["std"] std = [ + "bp-asset-hub-rococo/std", "bp-asset-hub-westend/std", "bp-bridge-hub-rococo/std", "bp-bridge-hub-westend/std", @@ -112,10 +118,10 @@ std = [ "bp-rococo/std", "bp-runtime/std", "bp-westend/std", + "bridge-hub-common/std", "bridge-runtime-common/std", "codec/std", "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", @@ -146,6 +152,7 @@ std = [ "pallet-transaction-payment/std", "pallet-utility/std", "pallet-xcm-benchmarks?/std", + "pallet-xcm-bridge-hub/std", "pallet-xcm/std", "parachain-info/std", "parachains-common/std", @@ -176,8 +183,8 @@ std = [ ] runtime-benchmarks = [ + "bridge-hub-common/runtime-benchmarks", "bridge-runtime-common/runtime-benchmarks", - "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", @@ -198,6 +205,7 @@ runtime-benchmarks = [ "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", + "pallet-xcm-bridge-hub/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", @@ -209,7 +217,6 @@ runtime-benchmarks = [ try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", @@ -231,15 +238,16 @@ try-runtime = [ "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", "pallet-utility/try-runtime", + "pallet-xcm-bridge-hub/try-runtime", "pallet-xcm/try-runtime", "parachain-info/try-runtime", "polkadot-runtime-common/try-runtime", "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs index 70ff43c09e3f22ba746966136aa457ebd9c82b4f..eb5493872b40917dd7edf36994df45864a14c48f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -17,8 +17,9 @@ //! Bridge definitions used on BridgeHub with the Westend flavor. use crate::{ - bridge_common_config::DeliveryRewardInBalance, weights, AccountId, BridgeRococoMessages, - ParachainInfo, Runtime, RuntimeEvent, RuntimeOrigin, XcmRouter, + bridge_common_config::DeliveryRewardInBalance, weights, xcm_config::UniversalLocation, + AccountId, BridgeRococoMessages, PolkadotXcm, Runtime, RuntimeEvent, RuntimeOrigin, + XcmOverBridgeHubRococo, XcmRouter, }; use bp_messages::LaneId; use bp_parachains::SingleParaStoredHeaderDataBuilder; @@ -31,7 +32,7 @@ use bridge_runtime_common::{ }, messages_xcm_extension::{ SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, - XcmBlobMessageDispatch, + XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, }, refund_relayer_extension::{ ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, @@ -48,7 +49,7 @@ use xcm::{ latest::prelude::*, prelude::{InteriorMultiLocation, NetworkId}, }; -use xcm_builder::{BridgeBlobDispatcher, HaulBlobExporter}; +use xcm_builder::BridgeBlobDispatcher; parameter_types! { pub const RelayChainHeadersToKeep: u32 = 1024; @@ -62,24 +63,42 @@ parameter_types! { pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = bp_bridge_hub_westend::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; pub const BridgeHubRococoChainId: bp_runtime::ChainId = bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID; - pub BridgeHubWestendUniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Westend), Parachain(ParachainInfo::parachain_id().into())); pub BridgeWestendToRococoMessagesPalletInstance: InteriorMultiLocation = X1(PalletInstance(::index() as u8)); pub RococoGlobalConsensusNetwork: NetworkId = NetworkId::Rococo; - pub ActiveOutboundLanesToBridgeHubRococo: &'static [bp_messages::LaneId] = &[XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO]; - pub const AssetHubWestendToAssetHubRococoMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO; + pub RococoGlobalConsensusNetworkLocation: MultiLocation = MultiLocation { + parents: 2, + interior: X1(GlobalConsensus(RococoGlobalConsensusNetwork::get())) + }; // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; pub AssetHubWestendParaId: cumulus_primitives_core::ParaId = bp_asset_hub_westend::ASSET_HUB_WESTEND_PARACHAIN_ID.into(); + pub AssetHubRococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID.into(); + // Lanes + pub ActiveOutboundLanesToBridgeHubRococo: &'static [bp_messages::LaneId] = &[XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO]; + pub const AssetHubWestendToAssetHubRococoMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO; pub FromAssetHubWestendToAssetHubRococoRoute: SenderAndLane = SenderAndLane::new( ParentThen(X1(Parachain(AssetHubWestendParaId::get().into()))).into(), XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, ); + pub ActiveLanes: sp_std::vec::Vec<(SenderAndLane, (NetworkId, InteriorMultiLocation))> = sp_std::vec![ + ( + FromAssetHubWestendToAssetHubRococoRoute::get(), + (RococoGlobalConsensusNetwork::get(), X1(Parachain(AssetHubRococoParaId::get().into()))) + ) + ]; pub CongestedMessage: Xcm<()> = build_congestion_message(true).into(); - pub UncongestedMessage: Xcm<()> = build_congestion_message(false).into(); + + pub BridgeHubRococoLocation: MultiLocation = MultiLocation { + parents: 2, + interior: X2( + GlobalConsensus(RococoGlobalConsensusNetwork::get()), + Parachain(::PARACHAIN_ID) + ) + }; } pub const XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO: LaneId = LaneId([0, 0, 0, 2]); @@ -110,23 +129,16 @@ pub type ToRococoBridgeHubMessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof; /// Dispatches received XCM messages from other bridge -type FromRococoMessageBlobDispatcher = BridgeBlobDispatcher< - XcmRouter, - BridgeHubWestendUniversalLocation, - BridgeWestendToRococoMessagesPalletInstance, ->; +type FromRococoMessageBlobDispatcher = + BridgeBlobDispatcher; /// Export XCM messages to be relayed to the other side -pub type ToBridgeHubRococoHaulBlobExporter = HaulBlobExporter< - XcmBlobHaulerAdapter, - RococoGlobalConsensusNetwork, - (), ->; +pub type ToBridgeHubRococoHaulBlobExporter = XcmOverBridgeHubRococo; + pub struct ToBridgeHubRococoXcmBlobHauler; impl XcmBlobHauler for ToBridgeHubRococoXcmBlobHauler { type Runtime = Runtime; type MessagesInstance = WithBridgeHubRococoMessagesInstance; - type SenderAndLane = FromAssetHubWestendToAssetHubRococoRoute; type ToSourceChainSender = XcmRouter; type CongestedMessage = CongestedMessage; @@ -134,7 +146,7 @@ impl XcmBlobHauler for ToBridgeHubRococoXcmBlobHauler { } /// On messages delivered callback. -type OnMessagesDelivered = XcmBlobHaulerAdapter; +type OnMessagesDelivered = XcmBlobHaulerAdapter; /// Messaging Bridge configuration for BridgeHubWestend -> BridgeHubRococo pub struct WithBridgeHubRococoMessageBridge; @@ -256,6 +268,18 @@ impl pallet_bridge_messages::Config for Run type OnMessagesDelivered = OnMessagesDelivered; } +/// Add support for the export and dispatch of XCM programs. +pub type XcmOverBridgeHubRococoInstance = pallet_xcm_bridge_hub::Instance1; +impl pallet_xcm_bridge_hub::Config for Runtime { + type UniversalLocation = UniversalLocation; + type BridgedNetwork = RococoGlobalConsensusNetworkLocation; + type BridgeMessagesPalletInstance = WithBridgeHubRococoMessagesInstance; + type MessageExportPrice = (); + type DestinationVersion = XcmVersionOfDestAndRemoteBridge; + type Lanes = ActiveLanes; + type LanesSupport = ToBridgeHubRococoXcmBlobHauler; +} + #[cfg(test)] mod tests { use super::*; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 458876ce46c1fe3d159b7ec43c22b3e303cd516a..717cde6280dbf2f7ce714a279cb1a8df0b56b526 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -33,13 +33,12 @@ mod weights; pub mod xcm_config; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; -use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; +use cumulus_primitives_core::ParaId; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::Block as BlockT, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; @@ -49,12 +48,16 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; +use bridge_hub_common::{ + message_queue::{NarrowOriginToSibling, ParaIdToSibling}, + AggregateMessageOrigin, +}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, Everything, TransformOrigin}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, TransformOrigin}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -118,6 +121,8 @@ pub type Migrations = ( pallet_collator_selection::migration::v1::MigrateToV1, pallet_multisig::migrations::v1::MigrateToV1, InitStorageVersions, + // unreleased + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, ); /// Migration to initialize storage versions for pallets added after genesis. @@ -170,10 +175,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_003_000, + spec_version: 1_005_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 3, + transaction_version: 4, state_version: 1, }; @@ -210,41 +215,24 @@ parameter_types! { // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; /// The index type for storing how many extrinsics an account has signed. type Nonce = Nonce; /// The type for hashing blocks and tries. type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; /// The block type. type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// Runtime version. type Version = Version; - /// Converts a module to an index of this module in the runtime. - type PalletInfo = PalletInfo; /// The data to be stored in an account. type AccountData = pallet_balances::AccountData; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; - /// The basic call filter to use in dispatchable. - type BaseCallFilter = Everything; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; /// Block & extrinsics weights: base values and limits. @@ -343,9 +331,8 @@ impl pallet_message_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_message_queue::WeightInfo; #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; + type MessageProcessor = + pallet_message_queue::mock_helpers::NoopMessageProcessor; #[cfg(not(feature = "runtime-benchmarks"))] type MessageProcessor = xcm_builder::ProcessXcmMessage< AggregateMessageOrigin, @@ -393,12 +380,6 @@ parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; @@ -497,9 +478,8 @@ construct_runtime!( // XCM helpers. XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, + PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, // Handy utilities. Utility: pallet_utility::{Pallet, Call, Event} = 40, @@ -510,6 +490,7 @@ construct_runtime!( BridgeRococoGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 42, BridgeRococoParachains: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 43, BridgeRococoMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 44, + XcmOverBridgeHubRococo: pallet_xcm_bridge_hub::::{Pallet} = 45, // Message Queue. Importantly, is registered last so that messages are processed after // the `on_initialize` hooks of bridging pallets. @@ -544,7 +525,7 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -772,6 +753,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -813,6 +795,41 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + + fn set_up_complex_asset_transfer( + ) -> Option<(MultiAssets, u32, MultiLocation, Box)> { + // BH only supports teleports to system parachain. + // Relay/native token can be teleported between BH and Relay. + let native_location = Parent.into(); + let dest = Parent.into(); + pallet_xcm::benchmarking::helpers::native_teleport_as_asset_transfer::( + native_location, + dest + ) + } + } + use xcm::latest::prelude::*; use xcm_config::WestendLocation; @@ -907,7 +924,28 @@ impl_runtime_apis! { fn export_message_origin_and_destination( ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError> { - Ok((WestendLocation::get(), NetworkId::Rococo, X1(Parachain(100)))) + // save XCM version for remote bridge hub + let _ = PolkadotXcm::force_xcm_version( + RuntimeOrigin::root(), + Box::new(bridge_to_rococo_config::BridgeHubRococoLocation::get()), + XCM_VERSION, + ).map_err(|e| { + log::error!( + "Failed to dispatch `force_xcm_version({:?}, {:?}, {:?})`, error: {:?}", + RuntimeOrigin::root(), + bridge_to_rococo_config::BridgeHubRococoLocation::get(), + XCM_VERSION, + e + ); + BenchmarkError::Stop("XcmVersion was not stored!") + })?; + Ok( + ( + bridge_to_rococo_config::FromAssetHubWestendToAssetHubRococoRoute::get().location, + NetworkId::Rococo, + X1(Parachain(bridge_to_rococo_config::AssetHubRococoParaId::get().into())) + ) + ) } fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError> { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system.rs index 3dec4cc7f182c9aede28084122747dca63b24431..7db371d6af93068467cbeafaa454330325da7791 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system.rs @@ -152,4 +152,31 @@ impl frame_system::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 33_027_000 picoseconds. + Weight::from_parts(33_027_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `22` + // Estimated: `1518` + // Minimum execution time: 118_101_992_000 picoseconds. + Weight::from_parts(118_101_992_000, 0) + .saturating_add(Weight::from_parts(0, 1518)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs index 833944ebfa52eb3def20271dc129b0b314aa8198..a65ee31d3e55ff8135fdd7dec35120e0a463409b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs @@ -21,7 +21,6 @@ use ::pallet_bridge_messages::WeightInfoExt as MessagesWeightInfoExt; use ::pallet_bridge_parachains::WeightInfoExt as ParachainsWeightInfoExt; pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; @@ -44,7 +43,6 @@ pub mod xcm; pub use block_weights::constants::BlockExecutionWeight; pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; pub use rocksdb_weights::constants::RocksDbWeight; use crate::Runtime; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs index b0634ff2ccf499687ed14b9a833a02ea29f38019..e87ed668dfc7acb1a92a7535d92392a272370277 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_bridge_grandpa` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain @@ -33,9 +33,9 @@ // --heap-pages=4096 // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_bridge_grandpa -// --chain=bridge-hub-rococo-dev +// --chain=bridge-hub-westend-dev // --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -62,21 +62,17 @@ impl pallet_bridge_grandpa::WeightInfo for WeightInfo Weight { // Proof Size summary in bytes: - // Measured: `268 + p * (60 ±0)` + // Measured: `231 + p * (60 ±0)` // Estimated: `51735` - // Minimum execution time: 304_726_000 picoseconds. - Weight::from_parts(16_868_060, 0) + // Minimum execution time: 303_549_000 picoseconds. + Weight::from_parts(306_232_000, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 2_802 - .saturating_add(Weight::from_parts(55_200_017, 0).saturating_mul(p.into())) - // Standard Error: 46_745 - .saturating_add(Weight::from_parts(2_689_151, 0).saturating_mul(v.into())) + // Standard Error: 4_641 + .saturating_add(Weight::from_parts(55_196_301, 0).saturating_mul(p.into())) + // Standard Error: 35_813 + .saturating_add(Weight::from_parts(70_584, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_messages.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_messages.rs index 5d229497f3eb477912dec9304dbca1aed38b7881..305a8726fa1bb67da8ac239d9f2b66e795582fe5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_messages.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_messages.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_bridge_messages` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain @@ -33,9 +33,9 @@ // --heap-pages=4096 // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_bridge_messages -// --chain=bridge-hub-rococo-dev +// --chain=bridge-hub-westend-dev // --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,170 +48,170 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_messages`. pub struct WeightInfo(PhantomData); impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWestendToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `575` + // Measured: `502` // Estimated: `52645` - // Minimum execution time: 42_332_000 picoseconds. - Weight::from_parts(43_375_000, 0) + // Minimum execution time: 40_646_000 picoseconds. + Weight::from_parts(41_754_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWestendToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `575` + // Measured: `502` // Estimated: `52645` - // Minimum execution time: 53_139_000 picoseconds. - Weight::from_parts(54_236_000, 0) + // Minimum execution time: 50_898_000 picoseconds. + Weight::from_parts(52_743_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWestendToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `575` + // Measured: `502` // Estimated: `52645` - // Minimum execution time: 47_466_000 picoseconds. - Weight::from_parts(48_724_000, 0) + // Minimum execution time: 45_848_000 picoseconds. + Weight::from_parts(47_036_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWestendToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: - // Measured: `543` + // Measured: `433` // Estimated: `52645` - // Minimum execution time: 40_962_000 picoseconds. - Weight::from_parts(42_002_000, 0) + // Minimum execution time: 39_085_000 picoseconds. + Weight::from_parts(41_623_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWestendToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: - // Measured: `543` + // Measured: `433` // Estimated: `52645` - // Minimum execution time: 71_599_000 picoseconds. - Weight::from_parts(74_307_000, 0) + // Minimum execution time: 72_754_000 picoseconds. + Weight::from_parts(74_985_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendToRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWestendToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `414` - // Estimated: `3879` - // Minimum execution time: 31_206_000 picoseconds. - Weight::from_parts(32_045_000, 0) - .saturating_add(Weight::from_parts(0, 3879)) + // Measured: `337` + // Estimated: `3802` + // Minimum execution time: 31_479_000 picoseconds. + Weight::from_parts(32_280_000, 0) + .saturating_add(Weight::from_parts(0, 3802)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendToRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWestendToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `414` - // Estimated: `3879` - // Minimum execution time: 31_211_000 picoseconds. - Weight::from_parts(32_171_000, 0) - .saturating_add(Weight::from_parts(0, 3879)) + // Measured: `337` + // Estimated: `3802` + // Minimum execution time: 31_807_000 picoseconds. + Weight::from_parts(32_219_000, 0) + .saturating_add(Weight::from_parts(0, 3802)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendToRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWestendToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `414` + // Measured: `337` // Estimated: `6086` - // Minimum execution time: 33_790_000 picoseconds. - Weight::from_parts(34_708_000, 0) + // Minimum execution time: 36_450_000 picoseconds. + Weight::from_parts(37_288_000, 0) .saturating_add(Weight::from_parts(0, 6086)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `BridgeWestendToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWestendToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) @@ -227,18 +227,15 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `777` + // Measured: `633` // Estimated: `52645` - // Minimum execution time: 61_938_000 picoseconds. - Weight::from_parts(63_009_714, 0) + // Minimum execution time: 67_047_000 picoseconds. + Weight::from_parts(68_717_105, 0) .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 23 - .saturating_add(Weight::from_parts(6_677, 0).saturating_mul(i.into())) + // Standard Error: 138 + .saturating_add(Weight::from_parts(8_056, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs index 81cb0a66b7d277731a5d87f386301058d2634588..9819bd4065411bec6799de3f2aa41c318f53a122 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_bridge_parachains` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain @@ -33,9 +33,9 @@ // --heap-pages=4096 // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_bridge_parachains -// --chain=bridge-hub-rococo-dev +// --chain=bridge-hub-westend-dev // --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,65 +48,63 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_parachains`. pub struct WeightInfo(PhantomData); impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeRococoParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:1 w:0) /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. + /// Storage: `BridgeRococoParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeRococoParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeRococoParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 2]`. fn submit_parachain_heads_with_n_parachains(_p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `291` // Estimated: `2543` - // Minimum execution time: 31_241_000 picoseconds. - Weight::from_parts(32_488_584, 0) + // Minimum execution time: 29_994_000 picoseconds. + Weight::from_parts(31_005_636, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `BridgeRococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeRococoParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:1 w:0) /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeRococoParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeRococoParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `291` // Estimated: `2543` - // Minimum execution time: 32_962_000 picoseconds. - Weight::from_parts(33_658_000, 0) + // Minimum execution time: 31_425_000 picoseconds. + Weight::from_parts(32_163_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `BridgeRococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeRococoParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:1 w:0) /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeRococoParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeRococoParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `291` // Estimated: `2543` - // Minimum execution time: 62_685_000 picoseconds. - Weight::from_parts(64_589_000, 0) + // Minimum execution time: 60_062_000 picoseconds. + Weight::from_parts(61_201_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs index fde670ab927ce8d64cb7d8a2146cd90954a8f903..ed96f0cd87c9e73ee8c842ab9f4f5d60bf81c2ac 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_bridge_relayers` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain @@ -33,9 +33,9 @@ // --heap-pages=4096 // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_bridge_relayers -// --chain=bridge-hub-rococo-dev +// --chain=bridge-hub-westend-dev // --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,8 +56,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `207` // Estimated: `3593` - // Minimum execution time: 45_338_000 picoseconds. - Weight::from_parts(45_836_000, 0) + // Minimum execution time: 45_732_000 picoseconds. + Weight::from_parts(46_282_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -72,8 +72,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `61` // Estimated: `4714` - // Minimum execution time: 23_561_000 picoseconds. - Weight::from_parts(24_012_000, 0) + // Minimum execution time: 22_934_000 picoseconds. + Weight::from_parts(23_531_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -86,8 +86,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `160` // Estimated: `4714` - // Minimum execution time: 25_133_000 picoseconds. - Weight::from_parts(25_728_000, 0) + // Minimum execution time: 25_187_000 picoseconds. + Weight::from_parts(25_679_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -102,8 +102,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `263` // Estimated: `4714` - // Minimum execution time: 27_356_000 picoseconds. - Weight::from_parts(27_828_000, 0) + // Minimum execution time: 27_015_000 picoseconds. + Weight::from_parts(27_608_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -114,8 +114,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `6` // Estimated: `3538` - // Minimum execution time: 2_955_000 picoseconds. - Weight::from_parts(3_084_000, 0) + // Minimum execution time: 5_207_000 picoseconds. + Weight::from_parts(5_394_000, 0) .saturating_add(Weight::from_parts(0, 3538)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs index 9cbfa6ce80e3e4b45338786c3705500bf27a220f..9dcee77082b99f586707a77a540cf6b13bd2be16 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs @@ -124,7 +124,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -178,6 +178,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs index 9f17d327024c4f792d0f61d4001067fece87f7f5..83e4260e77198355d23ea0c38481d7b8e68267c7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=bridge-hub-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -62,24 +62,39 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 29_724_000 picoseconds. - Weight::from_parts(30_440_000, 0) - .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 23_219_000 picoseconds. + Weight::from_parts(23_818_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 26_779_000 picoseconds. - Weight::from_parts(27_249_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `70` + // Estimated: `3593` + // Minimum execution time: 90_120_000 picoseconds. + Weight::from_parts(92_545_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -91,6 +106,32 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3593` + // Minimum execution time: 91_339_000 picoseconds. + Weight::from_parts(93_204_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) + } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { @@ -107,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_170_000 picoseconds. - Weight::from_parts(9_629_000, 0) + // Minimum execution time: 6_976_000 picoseconds. + Weight::from_parts(7_284_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_769_000 picoseconds. - Weight::from_parts(2_933_000, 0) + // Minimum execution time: 2_044_000 picoseconds. + Weight::from_parts(2_223_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -127,6 +168,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -141,16 +184,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 34_547_000 picoseconds. - Weight::from_parts(35_653_000, 0) - .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 27_778_000 picoseconds. + Weight::from_parts(28_318_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -165,12 +210,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `292` - // Estimated: `3757` - // Minimum execution time: 36_274_000 picoseconds. - Weight::from_parts(37_281_000, 0) - .saturating_add(Weight::from_parts(0, 3757)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `255` + // Estimated: `3720` + // Minimum execution time: 30_446_000 picoseconds. + Weight::from_parts(31_925_000, 0) + .saturating_add(Weight::from_parts(0, 3720)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -179,8 +224,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_749_000 picoseconds. - Weight::from_parts(2_917_000, 0) + // Minimum execution time: 2_037_000 picoseconds. + Weight::from_parts(2_211_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -188,11 +233,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `187` - // Estimated: `11077` - // Minimum execution time: 17_649_000 picoseconds. - Weight::from_parts(17_964_000, 0) - .saturating_add(Weight::from_parts(0, 11077)) + // Measured: `95` + // Estimated: `10985` + // Minimum execution time: 15_620_000 picoseconds. + Weight::from_parts(15_984_000, 0) + .saturating_add(Weight::from_parts(0, 10985)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -200,11 +245,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `191` - // Estimated: `11081` - // Minimum execution time: 17_551_000 picoseconds. - Weight::from_parts(18_176_000, 0) - .saturating_add(Weight::from_parts(0, 11081)) + // Measured: `99` + // Estimated: `10989` + // Minimum execution time: 15_689_000 picoseconds. + Weight::from_parts(16_093_000, 0) + .saturating_add(Weight::from_parts(0, 10989)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -212,15 +257,17 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: - // Measured: `198` - // Estimated: `13563` - // Minimum execution time: 19_261_000 picoseconds. - Weight::from_parts(19_714_000, 0) - .saturating_add(Weight::from_parts(0, 13563)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 16_946_000 picoseconds. + Weight::from_parts(17_192_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -233,39 +280,41 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `6082` - // Minimum execution time: 31_630_000 picoseconds. - Weight::from_parts(32_340_000, 0) - .saturating_add(Weight::from_parts(0, 6082)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `106` + // Estimated: `6046` + // Minimum execution time: 27_164_000 picoseconds. + Weight::from_parts(27_760_000, 0) + .saturating_add(Weight::from_parts(0, 6046)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `8587` - // Minimum execution time: 9_218_000 picoseconds. - Weight::from_parts(9_558_000, 0) - .saturating_add(Weight::from_parts(0, 8587)) + // Measured: `136` + // Estimated: `8551` + // Minimum execution time: 8_689_000 picoseconds. + Weight::from_parts(8_874_000, 0) + .saturating_add(Weight::from_parts(0, 8551)) .saturating_add(T::DbWeight::get().reads(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `198` - // Estimated: `11088` - // Minimum execution time: 18_133_000 picoseconds. - Weight::from_parts(18_663_000, 0) - .saturating_add(Weight::from_parts(0, 11088)) + // Measured: `106` + // Estimated: `10996` + // Minimum execution time: 15_944_000 picoseconds. + Weight::from_parts(16_381_000, 0) + .saturating_add(Weight::from_parts(0, 10996)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -278,12 +327,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `204` - // Estimated: `11094` - // Minimum execution time: 38_878_000 picoseconds. - Weight::from_parts(39_779_000, 0) - .saturating_add(Weight::from_parts(0, 11094)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `112` + // Estimated: `11002` + // Minimum execution time: 33_826_000 picoseconds. + Weight::from_parts(34_784_000, 0) + .saturating_add(Weight::from_parts(0, 11002)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -294,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_142_000 picoseconds. - Weight::from_parts(4_308_000, 0) + // Minimum execution time: 4_257_000 picoseconds. + Weight::from_parts(4_383_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -306,11 +355,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_814_000 picoseconds. - Weight::from_parts(26_213_000, 0) + // Minimum execution time: 26_924_000 picoseconds. + Weight::from_parts(27_455_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } } - diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 7c686190208fd2fec4a00b7b34ef25038b4815b1..9281a880c7e1266d65d29436ca88e51e896c0363 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain @@ -33,10 +33,10 @@ // --heap-pages=4096 // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm_benchmarks::generic -// --chain=bridge-hub-rococo-dev +// --chain=bridge-hub-westend-dev // --header=./cumulus/file_header.txt // --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/ +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,8 +48,6 @@ use sp_std::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); impl WeightInfo { - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -68,81 +66,79 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `208` // Estimated: `6196` - // Minimum execution time: 62_732_000 picoseconds. - Weight::from_parts(64_581_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 61_577_000 picoseconds. + Weight::from_parts(63_216_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_987_000 picoseconds. - Weight::from_parts(2_107_000, 0) + // Minimum execution time: 2_019_000 picoseconds. + Weight::from_parts(2_146_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn query_response() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3568` - // Minimum execution time: 8_098_000 picoseconds. - Weight::from_parts(8_564_000, 3568) + // Measured: `32` + // Estimated: `3497` + // Minimum execution time: 7_473_000 picoseconds. + Weight::from_parts(7_784_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_539_000 picoseconds. - Weight::from_parts(9_085_000, 0) + // Minimum execution time: 8_385_000 picoseconds. + Weight::from_parts(8_768_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_205_000 picoseconds. - Weight::from_parts(2_369_000, 0) + // Minimum execution time: 2_181_000 picoseconds. + Weight::from_parts(2_304_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_828_000 picoseconds. - Weight::from_parts(1_994_000, 0) + // Minimum execution time: 1_858_000 picoseconds. + Weight::from_parts(1_919_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_869_000 picoseconds. - Weight::from_parts(1_946_000, 0) + // Minimum execution time: 1_855_000 picoseconds. + Weight::from_parts(1_979_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_842_000 picoseconds. - Weight::from_parts(1_949_000, 0) + // Minimum execution time: 1_823_000 picoseconds. + Weight::from_parts(1_890_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_460_000 picoseconds. - Weight::from_parts(2_593_000, 0) + // Minimum execution time: 2_407_000 picoseconds. + Weight::from_parts(2_507_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(2_003_000, 0) + // Minimum execution time: 1_838_000 picoseconds. + Weight::from_parts(1_894_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -161,21 +157,21 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `208` // Estimated: `6196` - // Minimum execution time: 56_813_000 picoseconds. - Weight::from_parts(57_728_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 54_847_000 picoseconds. + Weight::from_parts(55_742_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn claim_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `160` - // Estimated: `3625` - // Minimum execution time: 11_364_000 picoseconds. - Weight::from_parts(11_872_000, 3625) + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 10_614_000 picoseconds. + Weight::from_parts(11_344_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -183,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_821_000 picoseconds. - Weight::from_parts(1_936_000, 0) + // Minimum execution time: 1_826_000 picoseconds. + Weight::from_parts(1_899_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -202,10 +198,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn subscribe_version() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 23_081_000 picoseconds. - Weight::from_parts(23_512_000, 3574) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 22_312_000 picoseconds. + Weight::from_parts(22_607_000, 3503) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -215,47 +211,45 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_747_000 picoseconds. - Weight::from_parts(4_068_000, 0) + // Minimum execution time: 3_728_000 picoseconds. + Weight::from_parts(3_914_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_045_000 picoseconds. - Weight::from_parts(3_208_000, 0) + // Minimum execution time: 3_054_000 picoseconds. + Weight::from_parts(3_140_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_962_000 picoseconds. - Weight::from_parts(2_284_000, 0) + // Minimum execution time: 1_996_000 picoseconds. + Weight::from_parts(2_148_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_951_000 picoseconds. - Weight::from_parts(2_026_000, 0) + // Minimum execution time: 2_008_000 picoseconds. + Weight::from_parts(2_077_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` // Minimum execution time: 1_837_000 picoseconds. - Weight::from_parts(2_084_000, 0) + Weight::from_parts(1_913_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_042_000 picoseconds. - Weight::from_parts(2_145_000, 0) + // Minimum execution time: 2_052_000 picoseconds. + Weight::from_parts(2_120_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -274,22 +268,20 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `208` // Estimated: `6196` - // Minimum execution time: 61_350_000 picoseconds. - Weight::from_parts(62_440_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 58_725_000 picoseconds. + Weight::from_parts(60_271_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_993_000 picoseconds. - Weight::from_parts(5_309_000, 0) + // Minimum execution time: 4_570_000 picoseconds. + Weight::from_parts(4_707_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -308,70 +300,70 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `208` // Estimated: `6196` - // Minimum execution time: 57_133_000 picoseconds. - Weight::from_parts(58_100_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 54_903_000 picoseconds. + Weight::from_parts(55_711_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_899_000 picoseconds. - Weight::from_parts(2_153_000, 0) + // Minimum execution time: 1_872_000 picoseconds. + Weight::from_parts(1_938_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_880_000 picoseconds. - Weight::from_parts(1_960_000, 0) + // Minimum execution time: 1_836_000 picoseconds. + Weight::from_parts(1_903_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_825_000 picoseconds. - Weight::from_parts(1_960_000, 0) + // Minimum execution time: 1_847_000 picoseconds. + Weight::from_parts(1_900_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - // Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) - // Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::OutboundLanesCongestedSignals` (r:1 w:0) - // Proof: `BridgeRococoToWococoMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::OutboundMessages` (r:0 w:1) - // Proof: `BridgeRococoToWococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:2 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) + // Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + // Storage: `BridgeRococoMessages::OutboundLanes` (r:1 w:1) + // Proof: `BridgeRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + // Storage: `BridgeRococoMessages::OutboundLanesCongestedSignals` (r:1 w:0) + // Proof: `BridgeRococoMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) + // Storage: `BridgeRococoMessages::OutboundMessages` (r:0 w:1) + // Proof: `BridgeRococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 1000]`. pub fn export_message(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `139` - // Estimated: `3604` - // Minimum execution time: 28_419_000 picoseconds. - Weight::from_parts(29_387_791, 3604) - // Standard Error: 552 - .saturating_add(Weight::from_parts(316_277, 0).saturating_mul(x.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `225` + // Estimated: `6165` + // Minimum execution time: 41_750_000 picoseconds. + Weight::from_parts(43_496_915, 6165) + // Standard Error: 623 + .saturating_add(Weight::from_parts(457_907, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_903_000 picoseconds. - Weight::from_parts(2_023_000, 0) + // Minimum execution time: 1_826_000 picoseconds. + Weight::from_parts(1_911_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_963_000 picoseconds. - Weight::from_parts(2_143_000, 0) + // Minimum execution time: 1_967_000 picoseconds. + Weight::from_parts(2_096_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index a6abca42215ac4516c2ba1ea934a2aca5382ada5..397019190f3f167b9cbf68cb7886656b4db11c9e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -28,19 +28,23 @@ use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; use parachains_common::{ impls::ToStakingPot, - xcm_config::{ConcreteAssetFromSystem, RelayOrOtherSystemParachains}, + xcm_config::{ + AllSiblingSystemParachains, ConcreteAssetFromSystem, ParentRelayOrSiblingParachains, + RelayOrOtherSystemParachains, + }, TREASURY_PALLET_ID, }; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::AccountIdConversion; -use westend_runtime_constants::system_parachain; use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, IsConcrete, ParentAsSuperuser, - ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, + DenyThenTry, EnsureXcmOrigin, IsConcrete, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, @@ -72,6 +76,7 @@ pub type LocationToAccountId = ( ); /// Means for transacting the native currency on this chain. +#[allow(deprecated)] pub type CurrencyTransactor = CurrencyAdapter< // Use this currency: Balances, @@ -114,10 +119,6 @@ match_types! { MultiLocation { parents: 1, interior: Here } | MultiLocation { parents: 1, interior: X1(Plurality { .. }) } }; - pub type ParentOrSiblings: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(_) } - }; } /// A call filter for the XCM Transact instruction. This is a temporary measure until we properly @@ -150,30 +151,39 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::authorize_upgrade { .. } | + frame_system::Call::authorize_upgrade_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | + RuntimeCall::CollatorSelection(..) | + RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::XcmpQueue(..) | RuntimeCall::MessageQueue(..) | RuntimeCall::BridgeRococoGrandpa(pallet_bridge_grandpa::Call::< Runtime, crate::bridge_to_rococo_config::BridgeGrandpaRococoInstance, - >::initialize { .. }) + >::initialize { .. }) | + RuntimeCall::BridgeRococoGrandpa(pallet_bridge_grandpa::Call::< + Runtime, + crate::bridge_to_rococo_config::BridgeGrandpaRococoInstance, + >::set_operating_mode { .. }) | + RuntimeCall::BridgeRococoParachains(pallet_bridge_parachains::Call::< + Runtime, + crate::bridge_to_rococo_config::BridgeParachainRococoInstance, + >::set_operating_mode { .. }) | + RuntimeCall::BridgeRococoMessages(pallet_bridge_messages::Call::< + Runtime, + crate::bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, + >::set_operating_mode { .. }) ) } } @@ -198,7 +208,7 @@ pub type Barrier = TrailingSetTopicAsId< Equals, )>, // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom, + AllowSubscriptionsFrom, ), UniversalLocation, ConstU32<8>, @@ -207,24 +217,13 @@ pub type Barrier = TrailingSetTopicAsId< >, >; -match_types! { - pub type SystemParachains: impl Contains = { - MultiLocation { - parents: 1, - interior: X1(Parachain( - system_parachain::ASSET_HUB_ID | - system_parachain::BRIDGE_HUB_ID | - system_parachain::COLLECTIVES_ID - )), - } - }; -} - /// Locations that will not be charged fees in the executor, /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. -pub type WaivedLocations = - (RelayOrOtherSystemParachains, Equals); +pub type WaivedLocations = ( + RelayOrOtherSystemParachains, + Equals, +); /// Cases where a remote origin is accepted as trusted Teleporter for a given asset: /// - NativeToken with the parent Relay Chain and sibling parachains. @@ -284,11 +283,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type XcmRouter = XcmRouter; @@ -316,8 +310,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 16dcd10a2ca43bb074e5bedaf07dd8f33b6da256..0e58b7b408ebbb6b2e6261e343e9a56c85962ea7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -18,21 +18,21 @@ use bp_polkadot_core::Signature; use bridge_common_config::{DeliveryRewardInBalance, RequiredStakeForStakeAndSlash}; +use bridge_hub_test_utils::test_cases::from_parachain; use bridge_hub_westend_runtime::{ bridge_common_config, bridge_to_rococo_config, xcm_config::{RelayNetwork, WestendLocation, XcmConfig}, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, SignedExtra, - TransactionPayment, UncheckedExtrinsic, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, + SignedExtra, TransactionPayment, UncheckedExtrinsic, }; use bridge_to_rococo_config::{ - BridgeGrandpaRococoInstance, BridgeHubRococoChainId, BridgeParachainRococoInstance, - WithBridgeHubRococoMessageBridge, WithBridgeHubRococoMessagesInstance, - XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, + BridgeGrandpaRococoInstance, BridgeHubRococoChainId, BridgeHubRococoLocation, + BridgeParachainRococoInstance, WithBridgeHubRococoMessageBridge, + WithBridgeHubRococoMessagesInstance, XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, }; use codec::{Decode, Encode}; -use frame_support::{dispatch::GetDispatchInfo, parameter_types}; -use frame_system::pallet_prelude::HeaderFor; +use frame_support::{dispatch::GetDispatchInfo, parameter_types, traits::ConstU8}; use parachains_common::{westend::fee::WeightToFee, AccountId, AuraId, Balance}; use sp_keyring::AccountKeyring::Alice; use sp_runtime::{ @@ -44,6 +44,16 @@ use xcm::latest::prelude::*; // Para id of sibling chain used in tests. pub const SIBLING_PARACHAIN_ID: u32 = 1000; +// Runtime from tests PoV +type RuntimeTestsAdapter = from_parachain::WithRemoteParachainHelperAdapter< + Runtime, + AllPalletsWithoutSystem, + BridgeGrandpaRococoInstance, + BridgeParachainRococoInstance, + WithBridgeHubRococoMessagesInstance, + WithBridgeHubRococoMessageBridge, +>; + parameter_types! { pub CheckingAccount: AccountId = PolkadotXcm::check_account(); } @@ -52,13 +62,16 @@ fn construct_extrinsic( sender: sp_keyring::AccountKeyring, call: RuntimeCall, ) -> UncheckedExtrinsic { + let account_id = AccountId32::from(sender.public()); let extra: SignedExtra = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), frame_system::CheckGenesis::::new(), frame_system::CheckEra::::from(Era::immortal()), - frame_system::CheckNonce::::from(0), + frame_system::CheckNonce::::from( + frame_system::Pallet::::account(&account_id).nonce, + ), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), @@ -68,7 +81,7 @@ fn construct_extrinsic( let signature = payload.using_encoded(|e| sender.sign(e)); UncheckedExtrinsic::new_signed( call, - AccountId32::from(sender.public()).into(), + account_id.into(), Signature::Sr25519(signature.clone()), extra, ) @@ -76,10 +89,9 @@ fn construct_extrinsic( fn construct_and_apply_extrinsic( relayer_at_target: sp_keyring::AccountKeyring, - batch: pallet_utility::Call, + call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { - let batch_call = RuntimeCall::Utility(batch); - let xt = construct_extrinsic(relayer_at_target, batch_call); + let xt = construct_extrinsic(relayer_at_target, call); let r = Executive::apply_extrinsic(xt); r.unwrap() } @@ -91,10 +103,6 @@ fn construct_and_estimate_extrinsic_fee(batch: pallet_utility::Call) -> TransactionPayment::compute_fee(xt.encoded_size() as _, &batch_info, 0) } -fn executive_init_block(header: &HeaderFor) { - Executive::initialize_block(header) -} - fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys { bridge_hub_test_utils::CollatorSessionKeys::new( AccountId::from(Alice), @@ -118,12 +126,6 @@ bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID ); @@ -132,11 +134,31 @@ fn initialize_bridge_by_governance_works() { bridge_hub_test_utils::test_cases::initialize_bridge_by_governance_works::< Runtime, BridgeGrandpaRococoInstance, - >( - collator_session_keys(), - bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, - Box::new(|call| RuntimeCall::BridgeRococoGrandpa(call).encode()), - ) + >(collator_session_keys(), bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID) +} + +#[test] +fn change_bridge_grandpa_pallet_mode_by_governance_works() { + bridge_hub_test_utils::test_cases::change_bridge_grandpa_pallet_mode_by_governance_works::< + Runtime, + BridgeGrandpaRococoInstance, + >(collator_session_keys(), bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID) +} + +#[test] +fn change_bridge_parachains_pallet_mode_by_governance_works() { + bridge_hub_test_utils::test_cases::change_bridge_parachains_pallet_mode_by_governance_works::< + Runtime, + BridgeParachainRococoInstance, + >(collator_session_keys(), bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID) +} + +#[test] +fn change_bridge_messages_pallet_mode_by_governance_works() { + bridge_hub_test_utils::test_cases::change_bridge_messages_pallet_mode_by_governance_works::< + Runtime, + WithBridgeHubRococoMessagesInstance, + >(collator_session_keys(), bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID) } #[test] @@ -185,12 +207,12 @@ fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { _ => None, } }), - || ExportMessage { network: Rococo, destination: X1(Parachain(4321)), xcm: Xcm(vec![]) }, + || ExportMessage { network: Rococo, destination: X1(Parachain(bridge_to_rococo_config::AssetHubRococoParaId::get().into())), xcm: Xcm(vec![]) }, XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, Some((WestendLocation::get(), ExistentialDeposit::get()).into()), // value should be >= than value generated by `can_calculate_weight_for_paid_export_message_with_reserve_transfer` Some((WestendLocation::get(), bp_bridge_hub_westend::BridgeHubWestendBaseXcmFeeInWnds::get()).into()), - || (), + || PolkadotXcm::force_xcm_version(RuntimeOrigin::root(), Box::new(BridgeHubRococoLocation::get()), XCM_VERSION).expect("version saved!"), ) } @@ -204,6 +226,7 @@ fn message_dispatch_routing_works() { WithBridgeHubRococoMessagesInstance, RelayNetwork, bridge_to_rococo_config::RococoGlobalConsensusNetwork, + ConstU8<2>, >( collator_session_keys(), bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, @@ -227,38 +250,22 @@ fn message_dispatch_routing_works() { #[test] fn relayed_incoming_message_works() { - bridge_hub_test_utils::test_cases::relayed_incoming_message_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( + from_parachain::relayed_incoming_message_works::( collator_session_keys(), bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, + BridgeHubRococoChainId::get(), SIBLING_PARACHAIN_ID, Westend, XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, || (), + construct_and_apply_extrinsic, ) } #[test] pub fn complex_relay_extrinsic_works() { - bridge_hub_test_utils::test_cases::complex_relay_extrinsic_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( + from_parachain::complex_relay_extrinsic_works::( collator_session_keys(), bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, @@ -266,10 +273,8 @@ pub fn complex_relay_extrinsic_works() { BridgeHubRococoChainId::get(), Westend, XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, - ExistentialDeposit::get(), - executive_init_block, - construct_and_apply_extrinsic, || (), + construct_and_apply_extrinsic, ); } @@ -293,16 +298,9 @@ pub fn can_calculate_weight_for_paid_export_message_with_reserve_transfer() { #[test] pub fn can_calculate_fee_for_complex_message_delivery_transaction() { - let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_delivery_transaction::< - Runtime, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( - collator_session_keys(), - construct_and_estimate_extrinsic_fee - ); + let estimated = from_parachain::can_calculate_fee_for_complex_message_delivery_transaction::< + RuntimeTestsAdapter, + >(collator_session_keys(), construct_and_estimate_extrinsic_fee); // check if estimated value is sane let max_expected = bp_bridge_hub_westend::BridgeHubWestendBaseDeliveryFeeInWnds::get(); @@ -316,16 +314,9 @@ pub fn can_calculate_fee_for_complex_message_delivery_transaction() { #[test] pub fn can_calculate_fee_for_complex_message_confirmation_transaction() { - let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_confirmation_transaction::< - Runtime, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( - collator_session_keys(), - construct_and_estimate_extrinsic_fee - ); + let estimated = from_parachain::can_calculate_fee_for_complex_message_confirmation_transaction::< + RuntimeTestsAdapter, + >(collator_session_keys(), construct_and_estimate_extrinsic_fee); // check if estimated value is sane let max_expected = bp_bridge_hub_westend::BridgeHubWestendBaseConfirmationFeeInWnds::get(); diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..0d75bb2213f8953af88ae20dadbba0858c98cf13 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "bridge-hub-common" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +description = "Bridge hub common utilities" +license = "Apache-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +snowbridge-core = { path = "../../../../../bridges/snowbridge/parachain/primitives/core", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "cumulus-primitives-core/std", + "frame-support/std", + "pallet-message-queue/std", + "scale-info/std", + "snowbridge-core/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", + "xcm/std", +] + +runtime-benchmarks = [ + "cumulus-primitives-core/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "snowbridge-core/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/src/digest_item.rs b/cumulus/parachains/runtimes/bridge-hubs/common/src/digest_item.rs new file mode 100644 index 0000000000000000000000000000000000000000..bdfcaedbe82daf89d015614c4c7aa0f4717efad5 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/common/src/digest_item.rs @@ -0,0 +1,34 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Custom digest items + +use codec::{Decode, Encode}; +use sp_core::{RuntimeDebug, H256}; +use sp_runtime::generic::DigestItem; + +/// Custom header digest items, inserted as DigestItem::Other +#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, RuntimeDebug)] +pub enum CustomDigestItem { + #[codec(index = 0)] + /// Merkle root of outbound Snowbridge messages. + Snowbridge(H256), +} + +/// Convert custom application digest item into a concrete digest item +impl From for DigestItem { + fn from(val: CustomDigestItem) -> Self { + DigestItem::Other(val.encode()) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/build.rs b/cumulus/parachains/runtimes/bridge-hubs/common/src/lib.rs similarity index 76% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/build.rs rename to cumulus/parachains/runtimes/bridge-hubs/common/src/lib.rs index 60f8a125129ff1344a1799246e931acdb1d139d5..aac6eb036526af4414b6f46b9cf8874a899072bb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/build.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/common/src/lib.rs @@ -12,15 +12,10 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +#![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "std")] -fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() -} +pub mod digest_item; +pub mod message_queue; -#[cfg(not(feature = "std"))] -fn main() {} +pub use digest_item::CustomDigestItem; +pub use message_queue::{AggregateMessageOrigin, BridgeHubMessageRouter}; diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..651537ff8b719c7b47b1b2e85ef924c6bac49904 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs @@ -0,0 +1,146 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Runtime configuration for MessageQueue pallet +use codec::{Decode, Encode, MaxEncodedLen}; +use cumulus_primitives_core::{AggregateMessageOrigin as CumulusAggregateMessageOrigin, ParaId}; +use frame_support::{ + traits::{ProcessMessage, ProcessMessageError, QueueFootprint, QueuePausedQuery}, + weights::WeightMeter, +}; +use pallet_message_queue::OnQueueChanged; +use scale_info::TypeInfo; +use snowbridge_core::ChannelId; +use sp_std::{marker::PhantomData, prelude::*}; +use xcm::v3::{Junction, MultiLocation}; + +/// The aggregate origin of an inbound message. +/// This is specialized for BridgeHub, as the snowbridge-outbound-queue pallet is also using +/// the shared MessageQueue pallet. +#[derive(Encode, Decode, Copy, MaxEncodedLen, Clone, Eq, PartialEq, TypeInfo, Debug)] +pub enum AggregateMessageOrigin { + /// The message came from the para-chain itself. + Here, + /// The message came from the relay-chain. + /// + /// This is used by the DMP queue. + Parent, + /// The message came from a sibling para-chain. + /// + /// This is used by the HRMP queue. + Sibling(ParaId), + /// The message came from a snowbridge channel. + /// + /// This is used by Snowbridge inbound queue. + Snowbridge(ChannelId), +} + +impl From for MultiLocation { + fn from(origin: AggregateMessageOrigin) -> Self { + use AggregateMessageOrigin::*; + match origin { + Here => MultiLocation::here(), + Parent => MultiLocation::parent(), + Sibling(id) => MultiLocation::new(1, Junction::Parachain(id.into())), + // NOTE: We don't need this conversion for Snowbridge. However we have to + // implement it anyway as xcm_builder::ProcessXcmMessage requires it. + Snowbridge(_) => MultiLocation::default(), + } + } +} + +impl From for AggregateMessageOrigin { + fn from(origin: CumulusAggregateMessageOrigin) -> Self { + match origin { + CumulusAggregateMessageOrigin::Here => Self::Here, + CumulusAggregateMessageOrigin::Parent => Self::Parent, + CumulusAggregateMessageOrigin::Sibling(id) => Self::Sibling(id), + } + } +} + +#[cfg(feature = "runtime-benchmarks")] +impl From for AggregateMessageOrigin { + fn from(x: u32) -> Self { + match x { + 0 => Self::Here, + 1 => Self::Parent, + p => Self::Sibling(ParaId::from(p)), + } + } +} + +/// Routes messages to either the XCMP or Snowbridge processor. +pub struct BridgeHubMessageRouter( + PhantomData<(XcmpProcessor, SnowbridgeProcessor)>, +) +where + XcmpProcessor: ProcessMessage, + SnowbridgeProcessor: ProcessMessage; + +impl ProcessMessage + for BridgeHubMessageRouter +where + XcmpProcessor: ProcessMessage, + SnowbridgeProcessor: ProcessMessage, +{ + type Origin = AggregateMessageOrigin; + + fn process_message( + message: &[u8], + origin: Self::Origin, + meter: &mut WeightMeter, + id: &mut [u8; 32], + ) -> Result { + use AggregateMessageOrigin::*; + match origin { + Here | Parent | Sibling(_) => + XcmpProcessor::process_message(message, origin, meter, id), + Snowbridge(_) => SnowbridgeProcessor::process_message(message, origin, meter, id), + } + } +} + +/// Narrow the scope of the `Inner` query from `AggregateMessageOrigin` to `ParaId`. +/// +/// All non-`Sibling` variants will be ignored. +pub struct NarrowOriginToSibling(PhantomData); +impl> QueuePausedQuery + for NarrowOriginToSibling +{ + fn is_paused(origin: &AggregateMessageOrigin) -> bool { + match origin { + AggregateMessageOrigin::Sibling(id) => Inner::is_paused(id), + _ => false, + } + } +} + +impl> OnQueueChanged + for NarrowOriginToSibling +{ + fn on_queue_changed(origin: AggregateMessageOrigin, fp: QueueFootprint) { + if let AggregateMessageOrigin::Sibling(id) = origin { + Inner::on_queue_changed(id, fp) + } + } +} + +/// Convert a sibling `ParaId` to an `AggregateMessageOrigin`. +pub struct ParaIdToSibling; +impl sp_runtime::traits::Convert for ParaIdToSibling { + fn convert(para_id: ParaId) -> AggregateMessageOrigin { + AggregateMessageOrigin::Sibling(para_id) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 18181ed3e05db28bd6bce31671555afa4f140e92..3049182cd4e269bba76786c3d18b355103f61d88 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -6,27 +6,32 @@ edition.workspace = true description = "Utils for BridgeHub testing" license = "Apache-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +impl-trait-for-tuples = "0.2" log = { version = "0.4.20", default-features = false } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } sp-keyring = { path = "../../../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } sp-tracing = { path = "../../../../../substrate/primitives/tracing" } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } # Cumulus asset-test-utils = { path = "../../assets/test-utils" } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } @@ -34,15 +39,13 @@ parachains-common = { path = "../../../common", default-features = false } parachains-runtimes-test-utils = { path = "../../test-utils", default-features = false } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Bridges -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-wococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-wococo", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } @@ -57,11 +60,9 @@ pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", def bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "asset-test-utils/std", - "bp-bridge-hub-rococo/std", - "bp-bridge-hub-wococo/std", "bp-header-chain/std", "bp-messages/std", "bp-parachains/std", @@ -94,6 +95,7 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", + "sp-std/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs index 26eb09b73fa6c9755ee613ae6bf4ff776c541ec6..445f001f1a4c111920fc513be95f88d73a25a634 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs @@ -17,5 +17,7 @@ //! Module contains predefined test-case scenarios for "BridgeHub" `Runtime`s. pub mod test_cases; +pub mod test_data; + pub use bp_test_utils::test_header; pub use parachains_runtimes_test_utils::*; diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs deleted file mode 100644 index b421eea6bcf6f286c38d52be5098c746a17f06d1..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs +++ /dev/null @@ -1,1565 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Module contains predefined test-case scenarios for `Runtime` with bridging capabilities. - -use bp_messages::{ - source_chain::TargetHeaderChain, - target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch, SourceHeaderChain}, - LaneId, MessageKey, OutboundLaneData, UnrewardedRelayersState, Weight, -}; -use bp_parachains::{BestParaHeadHash, ParaInfo}; -use bp_polkadot_core::parachains::{ParaHash, ParaId}; -use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; -use bp_runtime::{HeaderOf, Parachain, StorageProofSize, UnderlyingChainOf}; -use bp_test_utils::{make_default_justification, prepare_parachain_heads_proof}; -use bridge_runtime_common::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - BridgedChain as MessageBridgedChain, MessageBridge, - }, - messages_generation::{ - encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, - prepare_messages_storage_proof, - }, - messages_xcm_extension::{XcmAsPlainPayload, XcmBlobMessageDispatchResult}, -}; -use codec::Encode; -use frame_support::{ - assert_ok, - traits::{Get, OnFinalize, OnInitialize, OriginTrait, PalletInfoAccess}, -}; -use frame_system::pallet_prelude::{BlockNumberFor, HeaderFor}; -use pallet_bridge_grandpa::BridgedHeader; -use parachains_common::AccountId; -use parachains_runtimes_test_utils::{ - mock_open_hrmp_channel, AccountIdOf, BalanceOf, CollatorSessionKeys, ExtBuilder, ValidatorIdOf, - XcmReceivedFrom, -}; -use sp_core::H256; -use sp_keyring::AccountKeyring::*; -use sp_runtime::{ - traits::{Header as HeaderT, Zero}, - AccountId32, -}; -use xcm::latest::prelude::*; -use xcm_builder::DispatchBlobError; -use xcm_executor::{ - traits::{TransactAsset, WeightBounds}, - XcmExecutor, -}; - -// Re-export test_case from assets -pub use asset_test_utils::include_teleports_for_native_asset_works; - -type RuntimeHelper = - parachains_runtimes_test_utils::RuntimeHelper; - -// Re-export test_case from `parachains-runtimes-test-utils` -pub use parachains_runtimes_test_utils::test_cases::change_storage_constant_by_governance_works; - -/// Test-case makes sure that `Runtime` can process bridging initialize via governance-like call -pub fn initialize_bridge_by_governance_works( - collator_session_key: CollatorSessionKeys, - runtime_para_id: u32, - runtime_call_encode: Box< - dyn Fn(pallet_bridge_grandpa::Call) -> Vec, - >, -) where - Runtime: frame_system::Config - + pallet_balances::Config - + pallet_session::Config - + pallet_xcm::Config - + parachain_info::Config - + pallet_collator_selection::Config - + cumulus_pallet_parachain_system::Config - + pallet_bridge_grandpa::Config, - GrandpaPalletInstance: 'static, - ValidatorIdOf: From>, -{ - ExtBuilder::::default() - .with_collators(collator_session_key.collators()) - .with_session_keys(collator_session_key.session_keys()) - .with_para_id(runtime_para_id.into()) - .with_tracing() - .build() - .execute_with(|| { - // check mode before - assert_eq!( - pallet_bridge_grandpa::PalletOperatingMode::::try_get(), - Err(()) - ); - - // encode `initialize` call - let initialize_call = runtime_call_encode(pallet_bridge_grandpa::Call::< - Runtime, - GrandpaPalletInstance, - >::initialize { - init_data: test_data::initialization_data::(12345), - }); - - // overestimate - check weight for `pallet_bridge_grandpa::Pallet::initialize()` call - let require_weight_at_most = - ::DbWeight::get().reads_writes(7, 7); - - // execute XCM with Transacts to `initialize bridge` as governance does - assert_ok!(RuntimeHelper::::execute_as_governance( - initialize_call, - require_weight_at_most - ) - .ensure_complete()); - - // check mode after - assert_eq!( - pallet_bridge_grandpa::PalletOperatingMode::::try_get(), - Ok(bp_runtime::BasicOperatingMode::Normal) - ); - }) -} - -/// Test-case makes sure that `Runtime` can handle xcm `ExportMessage`: -/// Checks if received XCM messages is correctly added to the message outbound queue for delivery. -/// For SystemParachains we expect unpaid execution. -pub fn handle_export_message_from_system_parachain_to_outbound_queue_works< - Runtime, - XcmConfig, - MessagesPalletInstance, ->( - collator_session_key: CollatorSessionKeys, - runtime_para_id: u32, - sibling_parachain_id: u32, - unwrap_pallet_bridge_messages_event: Box< - dyn Fn(Vec) -> Option>, - >, - export_message_instruction: fn() -> Instruction, - expected_lane_id: LaneId, - existential_deposit: Option, - maybe_paid_export_message: Option, - prepare_configuration: impl Fn(), -) where - Runtime: frame_system::Config - + pallet_balances::Config - + pallet_session::Config - + pallet_xcm::Config - + parachain_info::Config - + pallet_collator_selection::Config - + cumulus_pallet_parachain_system::Config - + pallet_bridge_messages::Config, - XcmConfig: xcm_executor::Config, - MessagesPalletInstance: 'static, - ValidatorIdOf: From>, -{ - assert_ne!(runtime_para_id, sibling_parachain_id); - let sibling_parachain_location = MultiLocation::new(1, Parachain(sibling_parachain_id)); - - ExtBuilder::::default() - .with_collators(collator_session_key.collators()) - .with_session_keys(collator_session_key.session_keys()) - .with_para_id(runtime_para_id.into()) - .with_tracing() - .build() - .execute_with(|| { - prepare_configuration(); - - // check queue before - assert_eq!( - pallet_bridge_messages::OutboundLanes::::try_get( - expected_lane_id - ), - Err(()) - ); - - // prepare `ExportMessage` - let xcm = if let Some(fee) = maybe_paid_export_message { - // deposit ED to origin (if needed) - if let Some(ed) = existential_deposit { - XcmConfig::AssetTransactor::deposit_asset( - &ed, - &sibling_parachain_location, - Some(&XcmContext::with_message_id([0; 32])), - ) - .expect("deposited ed"); - } - // deposit fee to origin - XcmConfig::AssetTransactor::deposit_asset( - &fee, - &sibling_parachain_location, - Some(&XcmContext::with_message_id([0; 32])), - ) - .expect("deposited fee"); - - Xcm(vec![ - WithdrawAsset(MultiAssets::from(vec![fee.clone()])), - BuyExecution { fees: fee, weight_limit: Unlimited }, - export_message_instruction(), - ]) - } else { - Xcm(vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - export_message_instruction(), - ]) - }; - - // execute XCM - let hash = xcm.using_encoded(sp_io::hashing::blake2_256); - assert_ok!(XcmExecutor::::execute_xcm( - sibling_parachain_location, - xcm, - hash, - RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Sibling), - ) - .ensure_complete()); - - // check queue after - assert_eq!( - pallet_bridge_messages::OutboundLanes::::try_get( - expected_lane_id - ), - Ok(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 0, - latest_generated_nonce: 1, - }) - ); - - // check events - let mut events = >::events() - .into_iter() - .filter_map(|e| unwrap_pallet_bridge_messages_event(e.event.encode())); - assert!( - events.any(|e| matches!(e, pallet_bridge_messages::Event::MessageAccepted { .. })) - ); - }) -} - -/// Test-case makes sure that Runtime can route XCM messages received in inbound queue, -/// We just test here `MessageDispatch` configuration. -/// We expect that runtime can route messages: -/// 1. to Parent (relay chain) -/// 2. to Sibling parachain -pub fn message_dispatch_routing_works< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - HrmpChannelOpener, - MessagesPalletInstance, - RuntimeNetwork, - BridgedNetwork, ->( - collator_session_key: CollatorSessionKeys, - runtime_para_id: u32, - sibling_parachain_id: u32, - unwrap_cumulus_pallet_parachain_system_event: Box< - dyn Fn(Vec) -> Option>, - >, - unwrap_cumulus_pallet_xcmp_queue_event: Box< - dyn Fn(Vec) -> Option>, - >, - expected_lane_id: LaneId, - prepare_configuration: impl Fn(), -) where - Runtime: frame_system::Config - + pallet_balances::Config - + pallet_session::Config - + pallet_xcm::Config - + parachain_info::Config - + pallet_collator_selection::Config - + cumulus_pallet_parachain_system::Config - + cumulus_pallet_xcmp_queue::Config - + pallet_bridge_messages::Config, - AllPalletsWithoutSystem: - OnInitialize> + OnFinalize>, - ::AccountId: - Into<<::RuntimeOrigin as OriginTrait>::AccountId>, - XcmConfig: xcm_executor::Config, - MessagesPalletInstance: 'static, - ValidatorIdOf: From>, - ::AccountId: From, - HrmpChannelOpener: frame_support::inherent::ProvideInherent< - Call = cumulus_pallet_parachain_system::Call, - >, - // MessageDispatcher: MessageDispatch, DispatchLevelResult = - // XcmBlobMessageDispatchResult, DispatchPayload = XcmAsPlainPayload>, - RuntimeNetwork: Get, - BridgedNetwork: Get, -{ - assert_ne!(runtime_para_id, sibling_parachain_id); - - ExtBuilder::::default() - .with_collators(collator_session_key.collators()) - .with_session_keys(collator_session_key.session_keys()) - .with_safe_xcm_version(XCM_VERSION) - .with_para_id(runtime_para_id.into()) - .with_tracing() - .build() - .execute_with(|| { - prepare_configuration(); - - let mut alice = [0u8; 32]; - alice[0] = 1; - - let included_head = RuntimeHelper::::run_to_block( - 2, - AccountId::from(alice).into(), - ); - // 1. this message is sent from other global consensus with destination of this Runtime relay chain (UMP) - let bridging_message = - test_data::simulate_message_exporter_on_bridged_chain::( - (RuntimeNetwork::get(), Here) - ); - let result = <>::MessageDispatch>::dispatch( - test_data::dispatch_message(expected_lane_id, 1, bridging_message) - ); - assert_eq!(format!("{:?}", result.dispatch_level_result), format!("{:?}", XcmBlobMessageDispatchResult::Dispatched)); - - // check events - UpwardMessageSent - let mut events = >::events() - .into_iter() - .filter_map(|e| unwrap_cumulus_pallet_parachain_system_event(e.event.encode())); - assert!( - events.any(|e| matches!(e, cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. })) - ); - - // 2. this message is sent from other global consensus with destination of this Runtime sibling parachain (HRMP) - let bridging_message = - test_data::simulate_message_exporter_on_bridged_chain::( - (RuntimeNetwork::get(), X1(Parachain(sibling_parachain_id))), - ); - - // 2.1. WITHOUT opened hrmp channel -> RoutingError - let result = - <>::MessageDispatch>::dispatch( - DispatchMessage { - key: MessageKey { lane_id: expected_lane_id, nonce: 1 }, - data: DispatchMessageData { payload: Ok(bridging_message.clone()) }, - } - ); - assert_eq!(format!("{:?}", result.dispatch_level_result), format!("{:?}", XcmBlobMessageDispatchResult::NotDispatched(Some(DispatchBlobError::RoutingError)))); - - // check events - no XcmpMessageSent - assert_eq!(>::events() - .into_iter() - .filter_map(|e| unwrap_cumulus_pallet_xcmp_queue_event(e.event.encode())) - .count(), 0); - - // 2.1. WITH hrmp channel -> Ok - mock_open_hrmp_channel::(runtime_para_id.into(), sibling_parachain_id.into(), included_head, &alice); - let result = <>::MessageDispatch>::dispatch( - DispatchMessage { - key: MessageKey { lane_id: expected_lane_id, nonce: 1 }, - data: DispatchMessageData { payload: Ok(bridging_message) }, - } - ); - assert_eq!(format!("{:?}", result.dispatch_level_result), format!("{:?}", XcmBlobMessageDispatchResult::Dispatched)); - - // check events - XcmpMessageSent - let mut events = >::events() - .into_iter() - .filter_map(|e| unwrap_cumulus_pallet_xcmp_queue_event(e.event.encode())); - assert!( - events.any(|e| matches!(e, cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. })) - ); - }) -} - -/// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, -/// with proofs (finality, para heads, message) independently submitted. -pub fn relayed_incoming_message_works( - collator_session_key: CollatorSessionKeys, - runtime_para_id: u32, - bridged_para_id: u32, - sibling_parachain_id: u32, - local_relay_chain_id: NetworkId, - lane_id: LaneId, - prepare_configuration: impl Fn(), -) where - Runtime: frame_system::Config - + pallet_balances::Config - + pallet_session::Config - + pallet_xcm::Config - + parachain_info::Config - + pallet_collator_selection::Config - + cumulus_pallet_parachain_system::Config - + cumulus_pallet_xcmp_queue::Config - + pallet_bridge_grandpa::Config - + pallet_bridge_parachains::Config - + pallet_bridge_messages::Config, - AllPalletsWithoutSystem: OnInitialize> - + OnFinalize>, - GPI: 'static, - PPI: 'static, - MPI: 'static, - MB: MessageBridge, - ::BridgedChain: Send + Sync + 'static, - ::ThisChain: Send + Sync + 'static, - UnderlyingChainOf>: bp_runtime::Chain + Parachain, - XcmConfig: xcm_executor::Config, - HrmpChannelOpener: frame_support::inherent::ProvideInherent< - Call = cumulus_pallet_parachain_system::Call, - >, - ValidatorIdOf: From>, - <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: From>, - <>::BridgedChain as bp_runtime::Chain>::Hash: From, - ParaHash: From<<>::BridgedChain as bp_runtime::Chain>::Hash>, - ::AccountId: - Into<<::RuntimeOrigin as OriginTrait>::AccountId>, - ::AccountId: From, - AccountIdOf: From, - >::InboundRelayer: From, -{ - assert_ne!(runtime_para_id, sibling_parachain_id); - - ExtBuilder::::default() - .with_collators(collator_session_key.collators()) - .with_session_keys(collator_session_key.session_keys()) - .with_safe_xcm_version(XCM_VERSION) - .with_para_id(runtime_para_id.into()) - .with_tracing() - .build() - .execute_with(|| { - prepare_configuration(); - - let mut alice = [0u8; 32]; - alice[0] = 1; - - let included_head = RuntimeHelper::::run_to_block( - 2, - AccountId::from(alice).into(), - ); - mock_open_hrmp_channel::( - runtime_para_id.into(), - sibling_parachain_id.into(), - included_head, - &alice, - ); - - // start with bridged chain block#0 - let init_data = test_data::initialization_data::(0); - pallet_bridge_grandpa::Pallet::::initialize( - RuntimeHelper::::root_origin(), - init_data, - ) - .unwrap(); - - // set up relayer details and proofs - - let message_destination = - X2(GlobalConsensus(local_relay_chain_id), Parachain(sibling_parachain_id)); - // some random numbers (checked by test) - let message_nonce = 1; - let para_header_number = 5; - let relay_header_number = 1; - - let relayer_at_target = Bob; - let relayer_id_on_target: AccountIdOf = relayer_at_target.public().into(); - let relayer_at_source = Dave; - let relayer_id_on_source: AccountId32 = relayer_at_source.public().into(); - - let xcm = vec![xcm::v3::Instruction::<()>::ClearOrigin; 42]; - let expected_dispatch = xcm::latest::Xcm::<()>({ - let mut expected_instructions = xcm.clone(); - // dispatch prepends bridge pallet instance - expected_instructions.insert( - 0, - DescendOrigin(X1(PalletInstance( - as PalletInfoAccess>::index() - as u8, - ))), - ); - expected_instructions - }); - // generate bridged relay chain finality, parachain heads and message proofs, - // to be submitted by relayer to this chain. - let ( - relay_chain_header, - grandpa_justification, - bridged_para_head, - parachain_heads, - para_heads_proof, - message_proof, - ) = test_data::make_complex_relayer_delivery_proofs::< - >::BridgedChain, - MB, - (), - >( - lane_id, - xcm.into(), - message_nonce, - message_destination, - para_header_number, - relay_header_number, - bridged_para_id, - ); - - // submit bridged relay chain finality proof - { - let result = pallet_bridge_grandpa::Pallet::::submit_finality_proof( - RuntimeHelper::::origin_of(relayer_id_on_target.clone()), - Box::new(relay_chain_header.clone()), - grandpa_justification, - ); - assert_ok!(result); - assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); - } - - // verify finality proof correctly imported - assert_eq!( - pallet_bridge_grandpa::BestFinalized::::get().unwrap().1, - relay_chain_header.hash() - ); - assert!(pallet_bridge_grandpa::ImportedHeaders::::contains_key( - relay_chain_header.hash() - )); - - // submit parachain heads proof - { - let result = - pallet_bridge_parachains::Pallet::::submit_parachain_heads( - RuntimeHelper::::origin_of(relayer_id_on_target.clone()), - (relay_header_number, relay_chain_header.hash().into()), - parachain_heads, - para_heads_proof, - ); - assert_ok!(result); - assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); - } - // verify parachain head proof correctly imported - assert_eq!( - pallet_bridge_parachains::ParasInfo::::get(ParaId(bridged_para_id)), - Some(ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: relay_header_number, - head_hash: bridged_para_head.hash() - }, - next_imported_hash_position: 1, - }) - ); - - // import message - assert!(RuntimeHelper::>::take_xcm( - sibling_parachain_id.into() - ) - .is_none()); - assert_eq!( - pallet_bridge_messages::InboundLanes::::get(lane_id) - .last_delivered_nonce(), - 0, - ); - // submit message proof - { - let result = pallet_bridge_messages::Pallet::::receive_messages_proof( - RuntimeHelper::::origin_of(relayer_id_on_target), - relayer_id_on_source.into(), - message_proof.into(), - 1, - Weight::MAX / 1000, - ); - assert_ok!(result); - assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); - } - // verify message correctly imported and dispatched - assert_eq!( - pallet_bridge_messages::InboundLanes::::get(lane_id) - .last_delivered_nonce(), - 1, - ); - - // verify relayed bridged XCM message is dispatched to destination sibling para - let dispatched = RuntimeHelper::>::take_xcm( - sibling_parachain_id.into(), - ) - .unwrap(); - // verify contains original message - let dispatched = xcm::latest::Xcm::<()>::try_from(dispatched).unwrap(); - let mut dispatched_clone = dispatched.clone(); - for (idx, expected_instr) in expected_dispatch.0.iter().enumerate() { - assert_eq!(expected_instr, &dispatched.0[idx]); - assert_eq!(expected_instr, &dispatched_clone.0.remove(0)); - } - match dispatched_clone.0.len() { - 0 => (), - 1 => assert!(matches!(dispatched_clone.0[0], SetTopic(_))), - count => assert!(false, "Unexpected messages count: {:?}", count), - } - }) -} - -/// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, -/// with proofs (finality, para heads, message) batched together in signed extrinsic. -/// Also verifies relayer transaction signed extensions work as intended. -pub fn complex_relay_extrinsic_works( - collator_session_key: CollatorSessionKeys, - runtime_para_id: u32, - bridged_para_id: u32, - sibling_parachain_id: u32, - bridged_chain_id: bp_runtime::ChainId, - local_relay_chain_id: NetworkId, - lane_id: LaneId, - existential_deposit: BalanceOf, - executive_init_block: fn(&HeaderFor), - construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, - pallet_utility::Call:: - ) -> sp_runtime::DispatchOutcome, - prepare_configuration: impl Fn(), -) where - Runtime: frame_system::Config - + pallet_balances::Config - + pallet_utility::Config - + pallet_session::Config - + pallet_xcm::Config - + parachain_info::Config - + pallet_collator_selection::Config - + cumulus_pallet_parachain_system::Config - + cumulus_pallet_xcmp_queue::Config - + pallet_bridge_grandpa::Config - + pallet_bridge_parachains::Config - + pallet_bridge_messages::Config - + pallet_bridge_relayers::Config, - AllPalletsWithoutSystem: OnInitialize> - + OnFinalize>, - GPI: 'static, - PPI: 'static, - MPI: 'static, - MB: MessageBridge, - ::BridgedChain: Send + Sync + 'static, - ::ThisChain: Send + Sync + 'static, - UnderlyingChainOf>: bp_runtime::Chain + Parachain, - XcmConfig: xcm_executor::Config, - HrmpChannelOpener: frame_support::inherent::ProvideInherent< - Call = cumulus_pallet_parachain_system::Call, - >, - ValidatorIdOf: From>, - <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: From>, - <>::BridgedChain as bp_runtime::Chain>::Hash: From, - ParaHash: From<<>::BridgedChain as bp_runtime::Chain>::Hash>, - ::AccountId: - Into<<::RuntimeOrigin as OriginTrait>::AccountId>, - AccountIdOf: From, - ::AccountId: From, - >::InboundRelayer: From, - ::RuntimeCall: - From> - + From> - + From> -{ - assert_ne!(runtime_para_id, sibling_parachain_id); - - // Relayer account at local/this BH. - let relayer_at_target = Bob; - let relayer_id_on_target: AccountIdOf = relayer_at_target.public().into(); - let relayer_initial_balance = existential_deposit * 100000u32.into(); - // Relayer account at remote/bridged BH. - let relayer_at_source = Dave; - let relayer_id_on_source: AccountId32 = relayer_at_source.public().into(); - - ExtBuilder::::default() - .with_collators(collator_session_key.collators()) - .with_session_keys(collator_session_key.session_keys()) - .with_safe_xcm_version(XCM_VERSION) - .with_para_id(runtime_para_id.into()) - .with_balances(vec![(relayer_id_on_target.clone(), relayer_initial_balance)]) - .with_tracing() - .build() - .execute_with(|| { - prepare_configuration(); - - let mut alice = [0u8; 32]; - alice[0] = 1; - - let included_head = RuntimeHelper::::run_to_block( - 2, - AccountId::from(alice).into(), - ); - let zero: BlockNumberFor = 0u32.into(); - let genesis_hash = frame_system::Pallet::::block_hash(zero); - let mut header: HeaderFor = bp_test_utils::test_header(1u32.into()); - header.set_parent_hash(genesis_hash); - executive_init_block(&header); - - mock_open_hrmp_channel::( - runtime_para_id.into(), - sibling_parachain_id.into(), - included_head, - &alice, - ); - - // start with bridged chain block#0 - let init_data = test_data::initialization_data::(0); - pallet_bridge_grandpa::Pallet::::initialize( - RuntimeHelper::::root_origin(), - init_data, - ) - .unwrap(); - - // set up relayer details and proofs - - let message_destination = - X2(GlobalConsensus(local_relay_chain_id), Parachain(sibling_parachain_id)); - // some random numbers (checked by test) - let message_nonce = 1; - let para_header_number = 5; - let relay_header_number = 1; - - let xcm = vec![xcm::latest::Instruction::<()>::ClearOrigin; 42]; - let expected_dispatch = xcm::latest::Xcm::<()>({ - let mut expected_instructions = xcm.clone(); - // dispatch prepends bridge pallet instance - expected_instructions.insert( - 0, - DescendOrigin(X1(PalletInstance( - as PalletInfoAccess>::index() - as u8, - ))), - ); - expected_instructions - }); - // generate bridged relay chain finality, parachain heads and message proofs, - // to be submitted by relayer to this chain. - let ( - relay_chain_header, - grandpa_justification, - bridged_para_head, - parachain_heads, - para_heads_proof, - message_proof, - ) = test_data::make_complex_relayer_delivery_proofs::< - >::BridgedChain, - MB, - (), - >( - lane_id, - xcm.clone().into(), - message_nonce, - message_destination, - para_header_number, - relay_header_number, - bridged_para_id, - ); - - let relay_chain_header_hash = relay_chain_header.hash(); - let batch = test_data::make_complex_relayer_delivery_batch::( - relay_chain_header, - grandpa_justification, - parachain_heads, - para_heads_proof, - message_proof, - relayer_id_on_source, - ); - - // sanity checks - before relayer extrinsic - assert!(RuntimeHelper::>::take_xcm( - sibling_parachain_id.into() - ) - .is_none()); - assert_eq!( - pallet_bridge_messages::InboundLanes::::get(lane_id) - .last_delivered_nonce(), - 0, - ); - let msg_proofs_rewards_account = RewardsAccountParams::new( - lane_id, - bridged_chain_id, - RewardsAccountOwner::ThisChain, - ); - assert_eq!( - pallet_bridge_relayers::RelayerRewards::::get( - relayer_id_on_target.clone(), - msg_proofs_rewards_account - ), - None, - ); - - // construct and apply extrinsic containing batch calls: - // bridged relay chain finality proof - // + parachain heads proof - // + submit message proof - let dispatch_outcome = construct_and_apply_extrinsic(relayer_at_target, batch); - - // verify finality proof correctly imported - assert_ok!(dispatch_outcome); - assert_eq!( - >::get().unwrap().1, - relay_chain_header_hash - ); - assert!(>::contains_key( - relay_chain_header_hash - )); - // verify parachain head proof correctly imported - assert_eq!( - pallet_bridge_parachains::ParasInfo::::get(ParaId(bridged_para_id)), - Some(ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: relay_header_number, - head_hash: bridged_para_head.hash() - }, - next_imported_hash_position: 1, - }) - ); - // verify message correctly imported and dispatched - assert_eq!( - pallet_bridge_messages::InboundLanes::::get(lane_id) - .last_delivered_nonce(), - 1, - ); - // verify relayer is refunded - assert!(pallet_bridge_relayers::RelayerRewards::::get( - relayer_id_on_target, - msg_proofs_rewards_account - ) - .is_some()); - - // verify relayed bridged XCM message is dispatched to destination sibling para - let dispatched = RuntimeHelper::>::take_xcm( - sibling_parachain_id.into(), - ) - .unwrap(); - // verify contains original message - let dispatched = xcm::latest::Xcm::<()>::try_from(dispatched).unwrap(); - let mut dispatched_clone = dispatched.clone(); - for (idx, expected_instr) in expected_dispatch.0.iter().enumerate() { - assert_eq!(expected_instr, &dispatched.0[idx]); - assert_eq!(expected_instr, &dispatched_clone.0.remove(0)); - } - match dispatched_clone.0.len() { - 0 => (), - 1 => assert!(matches!(dispatched_clone.0[0], SetTopic(_))), - count => assert!(false, "Unexpected messages count: {:?}", count), - } - }) -} - -/// Estimates XCM execution fee for paid `ExportMessage` processing. -pub fn can_calculate_weight_for_paid_export_message_with_reserve_transfer< - Runtime, - XcmConfig, - WeightToFee, ->() -> u128 -where - Runtime: frame_system::Config + pallet_balances::Config, - XcmConfig: xcm_executor::Config, - WeightToFee: frame_support::weights::WeightToFee>, - ::Balance: From + Into, -{ - // data here are not relevant for weighing - let mut xcm = Xcm(vec![ - WithdrawAsset(MultiAssets::from(vec![MultiAsset { - id: Concrete(MultiLocation { parents: 1, interior: Here }), - fun: Fungible(34333299), - }])), - BuyExecution { - fees: MultiAsset { - id: Concrete(MultiLocation { parents: 1, interior: Here }), - fun: Fungible(34333299), - }, - weight_limit: Unlimited, - }, - ExportMessage { - network: Polkadot, - destination: X1(Parachain(1000)), - xcm: Xcm(vec![ - ReserveAssetDeposited(MultiAssets::from(vec![MultiAsset { - id: Concrete(MultiLocation { - parents: 2, - interior: X1(GlobalConsensus(Kusama)), - }), - fun: Fungible(1000000000000), - }])), - ClearOrigin, - BuyExecution { - fees: MultiAsset { - id: Concrete(MultiLocation { - parents: 2, - interior: X1(GlobalConsensus(Kusama)), - }), - fun: Fungible(1000000000000), - }, - weight_limit: Unlimited, - }, - DepositAsset { - assets: Wild(AllCounted(1)), - beneficiary: MultiLocation { - parents: 0, - interior: X1(xcm::latest::prelude::AccountId32 { - network: None, - id: [ - 212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, - 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, - 109, 162, 125, - ], - }), - }, - }, - SetTopic([ - 116, 82, 194, 132, 171, 114, 217, 165, 23, 37, 161, 177, 165, 179, 247, 114, - 137, 101, 147, 70, 28, 157, 168, 32, 154, 63, 74, 228, 152, 180, 5, 63, - ]), - ]), - }, - RefundSurplus, - DepositAsset { - assets: Wild(All), - beneficiary: MultiLocation { parents: 1, interior: X1(Parachain(1000)) }, - }, - SetTopic([ - 36, 224, 250, 165, 82, 195, 67, 110, 160, 170, 140, 87, 217, 62, 201, 164, 42, 98, 219, - 157, 124, 105, 248, 25, 131, 218, 199, 36, 109, 173, 100, 122, - ]), - ]); - - // get weight - let weight = XcmConfig::Weigher::weight(&mut xcm); - assert_ok!(weight); - let weight = weight.unwrap(); - // check if sane - let max_expected = Runtime::BlockWeights::get().max_block / 10; - assert!( - weight.all_lte(max_expected), - "calculated weight: {:?}, max_expected: {:?}", - weight, - max_expected - ); - - // check fee, should not be 0 - let estimated_fee = WeightToFee::weight_to_fee(&weight); - assert!(estimated_fee > BalanceOf::::zero()); - - sp_tracing::try_init_simple(); - log::error!( - target: "bridges::estimate", - "Estimate fee: {:?} for `ExportMessage` for runtime: {:?}", - estimated_fee, - Runtime::Version::get(), - ); - - estimated_fee.into() -} - -/// Estimates transaction fee for default message delivery transaction (batched with required -/// proofs) from bridged parachain. -pub fn can_calculate_fee_for_complex_message_delivery_transaction( - collator_session_key: CollatorSessionKeys, - compute_extrinsic_fee: fn(pallet_utility::Call::) -> u128, -) -> u128 -where - Runtime: frame_system::Config - + pallet_balances::Config - + pallet_session::Config - + pallet_xcm::Config - + parachain_info::Config - + pallet_collator_selection::Config - + cumulus_pallet_parachain_system::Config - + pallet_bridge_grandpa::Config - + pallet_bridge_parachains::Config - + pallet_bridge_messages::Config - + pallet_utility::Config, - GPI: 'static, - PPI: 'static, - MPI: 'static, - MB: MessageBridge, - ::BridgedChain: Send + Sync + 'static, - ::ThisChain: Send + Sync + 'static, - UnderlyingChainOf>: bp_runtime::Chain + Parachain, - ValidatorIdOf: From>, - <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: - From>, - <>::BridgedChain as bp_runtime::Chain>::Hash: From, - ParaHash: From<<>::BridgedChain as bp_runtime::Chain>::Hash>, - ::AccountId: - Into<<::RuntimeOrigin as OriginTrait>::AccountId>, - ::AccountId: From, - AccountIdOf: From, - >::InboundRelayer: From, - ::RuntimeCall: - From> - + From> - + From>, -{ - ExtBuilder::::default() - .with_collators(collator_session_key.collators()) - .with_session_keys(collator_session_key.session_keys()) - .with_safe_xcm_version(XCM_VERSION) - .with_para_id(1000.into()) - .with_tracing() - .build() - .execute_with(|| { - // generate bridged relay chain finality, parachain heads and message proofs, - // to be submitted by relayer to this chain. - // - // we don't care about parameter values here, apart from the XCM message size. But we - // do not need to have a large message here, because we're charging for every byte of - // the message additionally - let ( - relay_chain_header, - grandpa_justification, - _, - parachain_heads, - para_heads_proof, - message_proof, - ) = test_data::make_complex_relayer_delivery_proofs::< - >::BridgedChain, - MB, - (), - >( - LaneId::default(), - vec![xcm::v3::Instruction::<()>::ClearOrigin; 1_024].into(), - 1, - X2(GlobalConsensus(Polkadot), Parachain(1_000)), - 1, - 5, - 1_000, - ); - - // generate batch call that provides finality for bridged relay and parachains + message - // proof - let batch = test_data::make_complex_relayer_delivery_batch::( - relay_chain_header, - grandpa_justification, - parachain_heads, - para_heads_proof, - message_proof, - Dave.public().into(), - ); - let estimated_fee = compute_extrinsic_fee(batch); - - log::error!( - target: "bridges::estimate", - "Estimate fee: {:?} for single message delivery for runtime: {:?}", - estimated_fee, - Runtime::Version::get(), - ); - - estimated_fee - }) -} - -/// Estimates transaction fee for default message confirmation transaction (batched with required -/// proofs) from bridged parachain. -pub fn can_calculate_fee_for_complex_message_confirmation_transaction( - collator_session_key: CollatorSessionKeys, - compute_extrinsic_fee: fn(pallet_utility::Call::) -> u128, -) -> u128 -where - Runtime: frame_system::Config - + pallet_balances::Config - + pallet_session::Config - + pallet_xcm::Config - + parachain_info::Config - + pallet_collator_selection::Config - + cumulus_pallet_parachain_system::Config - + pallet_bridge_grandpa::Config - + pallet_bridge_parachains::Config - + pallet_bridge_messages::Config - + pallet_utility::Config, - GPI: 'static, - PPI: 'static, - MPI: 'static, - MB: MessageBridge, - ::BridgedChain: Send + Sync + 'static, - ::ThisChain: Send + Sync + 'static, - <::ThisChain as bp_runtime::Chain>::AccountId: From, - UnderlyingChainOf>: bp_runtime::Chain + Parachain, - ValidatorIdOf: From>, - <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: - From>, - <>::BridgedChain as bp_runtime::Chain>::Hash: From, - ParaHash: From<<>::BridgedChain as bp_runtime::Chain>::Hash>, - ::AccountId: - Into<<::RuntimeOrigin as OriginTrait>::AccountId>, - ::AccountId: From, - AccountIdOf: From, - >::InboundRelayer: From, - <>::TargetHeaderChain as TargetHeaderChain< - XcmAsPlainPayload, - Runtime::AccountId, - >>::MessagesDeliveryProof: From>, - ::RuntimeCall: - From> - + From> - + From>, -{ - ExtBuilder::::default() - .with_collators(collator_session_key.collators()) - .with_session_keys(collator_session_key.session_keys()) - .with_safe_xcm_version(XCM_VERSION) - .with_para_id(1000.into()) - .with_tracing() - .build() - .execute_with(|| { - // generate bridged relay chain finality, parachain heads and message proofs, - // to be submitted by relayer to this chain. - let unrewarded_relayers = UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - ..Default::default() - }; - let ( - relay_chain_header, - grandpa_justification, - _, - parachain_heads, - para_heads_proof, - message_delivery_proof, - ) = test_data::make_complex_relayer_confirmation_proofs::< - >::BridgedChain, - MB, - (), - >(LaneId::default(), 1, 5, 1_000, Alice.public().into(), unrewarded_relayers.clone()); - - // generate batch call that provides finality for bridged relay and parachains + message - // proof - let batch = test_data::make_complex_relayer_confirmation_batch::( - relay_chain_header, - grandpa_justification, - parachain_heads, - para_heads_proof, - message_delivery_proof, - unrewarded_relayers, - ); - let estimated_fee = compute_extrinsic_fee(batch); - - log::error!( - target: "bridges::estimate", - "Estimate fee: {:?} for single message confirmation for runtime: {:?}", - estimated_fee, - Runtime::Version::get(), - ); - - estimated_fee - }) -} - -pub mod test_data { - use super::*; - use bp_header_chain::{justification::GrandpaJustification, ChainWithGrandpa}; - use bp_messages::{DeliveredMessages, InboundLaneData, MessageNonce, UnrewardedRelayer}; - use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; - use bp_runtime::{BasicOperatingMode, HashOf}; - use bp_test_utils::authority_list; - use sp_runtime::{DigestItem, SaturatedConversion}; - use xcm_builder::{HaulBlob, HaulBlobError, HaulBlobExporter}; - use xcm_executor::traits::{validate_export, ExportXcm}; - - pub fn prepare_inbound_xcm( - xcm_message: Xcm, - destination: InteriorMultiLocation, - ) -> Vec { - let location = xcm::VersionedInteriorMultiLocation::V3(destination); - let xcm = xcm::VersionedXcm::::V3(xcm_message); - // this is the `BridgeMessage` from polkadot xcm builder, but it has no constructor - // or public fields, so just tuple - // (double encoding, because `.encode()` is called on original Xcm BLOB when it is pushed - // to the storage) - (location, xcm).encode().encode() - } - - /// Prepare a batch call with relay finality proof, parachain head proof and message proof. - pub fn make_complex_relayer_delivery_batch( - relay_chain_header: BridgedHeader, - grandpa_justification: GrandpaJustification>, - parachain_heads: Vec<(ParaId, ParaHash)>, - para_heads_proof: ParaHeadsProof, - message_proof: FromBridgedChainMessagesProof, - relayer_id_at_bridged_chain: AccountId32, - ) -> pallet_utility::Call where - Runtime:pallet_bridge_grandpa::Config - + pallet_bridge_parachains::Config - + pallet_bridge_messages::Config - + pallet_utility::Config, - GPI: 'static, - PPI: 'static, - MPI: 'static, - ParaHash: From<<>::BridgedChain as bp_runtime::Chain>::Hash>, - <>::BridgedChain as bp_runtime::Chain>::Hash: From, - <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: - From>, - >::InboundRelayer: From, - ::RuntimeCall: - From> - + From> - + From>, - { - let relay_chain_header_hash = relay_chain_header.hash(); - let relay_chain_header_number = *relay_chain_header.number(); - let submit_grandpa = pallet_bridge_grandpa::Call::::submit_finality_proof { - finality_target: Box::new(relay_chain_header), - justification: grandpa_justification, - }; - let submit_para_head = - pallet_bridge_parachains::Call::::submit_parachain_heads { - at_relay_block: ( - relay_chain_header_number.saturated_into(), - relay_chain_header_hash.into(), - ), - parachains: parachain_heads, - parachain_heads_proof: para_heads_proof, - }; - let submit_message = pallet_bridge_messages::Call::::receive_messages_proof { - relayer_id_at_bridged_chain: relayer_id_at_bridged_chain.into(), - proof: message_proof.into(), - messages_count: 1, - dispatch_weight: Weight::from_parts(1000000000, 0), - }; - pallet_utility::Call::::batch_all { - calls: vec![submit_grandpa.into(), submit_para_head.into(), submit_message.into()], - } - } - - /// Prepare a batch call with relay finality proof, parachain head proof and message delivery - /// proof. - pub fn make_complex_relayer_confirmation_batch( - relay_chain_header: BridgedHeader, - grandpa_justification: GrandpaJustification>, - parachain_heads: Vec<(ParaId, ParaHash)>, - para_heads_proof: ParaHeadsProof, - message_delivery_proof: FromBridgedChainMessagesDeliveryProof, - relayers_state: UnrewardedRelayersState, - ) -> pallet_utility::Call where - Runtime:pallet_bridge_grandpa::Config - + pallet_bridge_parachains::Config - + pallet_bridge_messages::Config - + pallet_utility::Config, - GPI: 'static, - PPI: 'static, - MPI: 'static, - ParaHash: From<<>::BridgedChain as bp_runtime::Chain>::Hash>, - <>::BridgedChain as bp_runtime::Chain>::Hash: From, - <>::TargetHeaderChain as TargetHeaderChain< - XcmAsPlainPayload, - Runtime::AccountId, - >>::MessagesDeliveryProof: From>, - ::RuntimeCall: - From> - + From> - + From>, - { - let relay_chain_header_hash = relay_chain_header.hash(); - let relay_chain_header_number = *relay_chain_header.number(); - let submit_grandpa = pallet_bridge_grandpa::Call::::submit_finality_proof { - finality_target: Box::new(relay_chain_header), - justification: grandpa_justification, - }; - let submit_para_head = - pallet_bridge_parachains::Call::::submit_parachain_heads { - at_relay_block: ( - relay_chain_header_number.saturated_into(), - relay_chain_header_hash.into(), - ), - parachains: parachain_heads, - parachain_heads_proof: para_heads_proof, - }; - let submit_message_delivery_proof = - pallet_bridge_messages::Call::::receive_messages_delivery_proof { - proof: message_delivery_proof.into(), - relayers_state, - }; - pallet_utility::Call::::batch_all { - calls: vec![ - submit_grandpa.into(), - submit_para_head.into(), - submit_message_delivery_proof.into(), - ], - } - } - - /// Prepare storage proofs of messages, stored at the source chain. - pub fn make_complex_relayer_delivery_proofs( - lane_id: LaneId, - xcm_message: Xcm, - message_nonce: MessageNonce, - message_destination: Junctions, - para_header_number: u32, - relay_header_number: u32, - bridged_para_id: u32, - ) -> ( - HeaderOf, - GrandpaJustification>, - ParaHead, - Vec<(ParaId, ParaHash)>, - ParaHeadsProof, - FromBridgedChainMessagesProof, - ) - where - BridgedRelayChain: ChainWithGrandpa, - HashOf: From, - MB: MessageBridge, - ::BridgedChain: Send + Sync + 'static, - ::ThisChain: Send + Sync + 'static, - UnderlyingChainOf>: bp_runtime::Chain + Parachain, - { - let message_payload = prepare_inbound_xcm(xcm_message, message_destination); - let message_size = StorageProofSize::Minimal(message_payload.len() as u32); - // prepare para storage proof containing message - let (para_state_root, para_storage_proof) = prepare_messages_storage_proof::( - lane_id, - message_nonce..=message_nonce, - None, - message_size, - message_payload, - encode_all_messages, - encode_lane_data, - ); - - let ( - relay_chain_header, - justification, - bridged_para_head, - parachain_heads, - para_heads_proof, - ) = make_complex_bridged_heads_proof::( - para_state_root, - para_header_number, - relay_header_number, - bridged_para_id, - ); - - let message_proof = FromBridgedChainMessagesProof { - bridged_header_hash: bridged_para_head.hash(), - storage_proof: para_storage_proof, - lane: lane_id, - nonces_start: message_nonce, - nonces_end: message_nonce, - }; - - ( - relay_chain_header, - justification, - bridged_para_head, - parachain_heads, - para_heads_proof, - message_proof, - ) - } - - /// Prepare storage proofs of message confirmations, stored at the target chain. - pub fn make_complex_relayer_confirmation_proofs( - lane_id: LaneId, - para_header_number: u32, - relay_header_number: u32, - bridged_para_id: u32, - relayer_id_at_this_chain: AccountId32, - relayers_state: UnrewardedRelayersState, - ) -> ( - HeaderOf, - GrandpaJustification>, - ParaHead, - Vec<(ParaId, ParaHash)>, - ParaHeadsProof, - FromBridgedChainMessagesDeliveryProof, - ) - where - BridgedRelayChain: ChainWithGrandpa, - HashOf: From, - MB: MessageBridge, - ::BridgedChain: Send + Sync + 'static, - ::ThisChain: Send + Sync + 'static, - <::ThisChain as bp_runtime::Chain>::AccountId: From, - UnderlyingChainOf>: bp_runtime::Chain + Parachain, - { - // prepare para storage proof containing message delivery proof - let (para_state_root, para_storage_proof) = prepare_message_delivery_storage_proof::( - lane_id, - InboundLaneData { - relayers: vec![ - UnrewardedRelayer { - relayer: relayer_id_at_this_chain.into(), - messages: DeliveredMessages::new(1) - }; - relayers_state.unrewarded_relayer_entries as usize - ] - .into(), - last_confirmed_nonce: 1, - }, - StorageProofSize::Minimal(0), - ); - - let ( - relay_chain_header, - justification, - bridged_para_head, - parachain_heads, - para_heads_proof, - ) = make_complex_bridged_heads_proof::( - para_state_root, - para_header_number, - relay_header_number, - bridged_para_id, - ); - - let message_delivery_proof = FromBridgedChainMessagesDeliveryProof { - bridged_header_hash: bridged_para_head.hash(), - storage_proof: para_storage_proof, - lane: lane_id, - }; - - ( - relay_chain_header, - justification, - bridged_para_head, - parachain_heads, - para_heads_proof, - message_delivery_proof, - ) - } - - /// Make bridged parachain header with given state root and relay header that is finalizing it. - pub fn make_complex_bridged_heads_proof( - para_state_root: ParaHash, - para_header_number: u32, - relay_header_number: u32, - bridged_para_id: u32, - ) -> ( - HeaderOf, - GrandpaJustification>, - ParaHead, - Vec<(ParaId, ParaHash)>, - ParaHeadsProof, - ) - where - BridgedRelayChain: ChainWithGrandpa, - HashOf: From, - MB: MessageBridge, - ::BridgedChain: Send + Sync + 'static, - ::ThisChain: Send + Sync + 'static, - UnderlyingChainOf>: bp_runtime::Chain + Parachain, - { - let bridged_para_head = ParaHead( - bp_test_utils::test_header_with_root::>( - para_header_number.into(), - para_state_root.into(), - ) - .encode(), - ); - let (relay_state_root, para_heads_proof, parachain_heads) = - prepare_parachain_heads_proof::>(vec![( - bridged_para_id, - bridged_para_head.clone(), - )]); - assert_eq!(bridged_para_head.hash(), parachain_heads[0].1); - - // import bridged relay chain block#1 with state root containing head#5 of bridged parachain - let mut relay_chain_header: BridgedRelayChain::Header = - bp_test_utils::test_header_with_root( - relay_header_number.into(), - relay_state_root.into(), - ); - // to compute proper cost of GRANDPA call, let's add some dummy bytes to header, so that the - // `submit_finality_proof` call size would be close to maximal expected (and refundable) - let expected_bytes_in_grandpa_call = BridgedRelayChain::AVERAGE_HEADER_SIZE_IN_JUSTIFICATION - .saturating_mul(BridgedRelayChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY) - .saturating_add(BridgedRelayChain::MAX_HEADER_SIZE) - as usize; - let extra_bytes_required = - expected_bytes_in_grandpa_call.saturating_sub(relay_chain_header.encoded_size()); - relay_chain_header - .digest_mut() - .push(DigestItem::Other(vec![42; extra_bytes_required])); - - let justification = make_default_justification(&relay_chain_header); - (relay_chain_header, justification, bridged_para_head, parachain_heads, para_heads_proof) - } - - /// Helper that creates InitializationData mock data, that can be used to initialize bridge - /// GRANDPA pallet - pub fn initialization_data< - Runtime: pallet_bridge_grandpa::Config, - GrandpaPalletInstance: 'static, - >( - block_number: u32, - ) -> bp_header_chain::InitializationData> { - bp_header_chain::InitializationData { - header: Box::new(bp_test_utils::test_header(block_number.into())), - authority_list: authority_list(), - set_id: 1, - operating_mode: BasicOperatingMode::Normal, - } - } - - /// Dummy xcm - pub(crate) fn dummy_xcm() -> Xcm<()> { - vec![Trap(42)].into() - } - - pub(crate) fn dispatch_message( - lane_id: LaneId, - nonce: MessageNonce, - payload: Vec, - ) -> DispatchMessage> { - DispatchMessage { - key: MessageKey { lane_id, nonce }, - data: DispatchMessageData { payload: Ok(payload) }, - } - } - - /// Macro used for simulate_export_message and capturing bytes - macro_rules! grab_haul_blob ( - ($name:ident, $grabbed_payload:ident) => { - std::thread_local! { - static $grabbed_payload: std::cell::RefCell>> = std::cell::RefCell::new(None); - } - - struct $name; - impl HaulBlob for $name { - fn haul_blob(blob: Vec) -> Result<(), HaulBlobError>{ - $grabbed_payload.with(|rm| *rm.borrow_mut() = Some(blob)); - Ok(()) - } - } - } - ); - - /// Simulates `HaulBlobExporter` and all its wrapping and captures generated plain bytes, - /// which are transferred over bridge. - pub(crate) fn simulate_message_exporter_on_bridged_chain< - SourceNetwork: Get, - DestinationNetwork: Get, - >( - (destination_network, destination_junctions): (NetworkId, Junctions), - ) -> Vec { - grab_haul_blob!(GrabbingHaulBlob, GRABBED_HAUL_BLOB_PAYLOAD); - - // lets pretend that some parachain on bridged chain exported the message - let universal_source_on_bridged_chain = - X2(GlobalConsensus(SourceNetwork::get()), Parachain(5678)); - let channel = 1_u32; - - // simulate XCM message export - let (ticket, fee) = - validate_export::>( - destination_network, - channel, - universal_source_on_bridged_chain, - destination_junctions, - dummy_xcm(), - ) - .expect("validate_export to pass"); - log::info!( - target: "simulate_message_exporter_on_bridged_chain", - "HaulBlobExporter::validate fee: {:?}", - fee - ); - let xcm_hash = - HaulBlobExporter::::deliver(ticket) - .expect("deliver to pass"); - log::info!( - target: "simulate_message_exporter_on_bridged_chain", - "HaulBlobExporter::deliver xcm_hash: {:?}", - xcm_hash - ); - - GRABBED_HAUL_BLOB_PAYLOAD.with(|r| r.take().expect("Encoded message should be here")) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs new file mode 100644 index 0000000000000000000000000000000000000000..e0e75f093cfc65c0c6c58243f3483db1a252518e --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs @@ -0,0 +1,437 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Module contains predefined test-case scenarios for `Runtime` with bridging capabilities +//! with remote GRANDPA chain. + +use crate::{ + test_cases::{bridges_prelude::*, helpers, run_test}, + test_data, +}; + +use bp_header_chain::ChainWithGrandpa; +use bp_messages::{ + source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, LaneId, + UnrewardedRelayersState, +}; +use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; +use bp_runtime::{HashOf, UnderlyingChainOf}; +use bridge_runtime_common::{ + messages::{ + source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, + BridgedChain as MessageBridgedChain, MessageBridge, ThisChain as MessageThisChain, + }, + messages_xcm_extension::XcmAsPlainPayload, +}; +use frame_support::traits::{Get, OnFinalize, OnInitialize}; +use frame_system::pallet_prelude::BlockNumberFor; +use parachains_runtimes_test_utils::{ + AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, +}; +use sp_keyring::AccountKeyring::*; +use sp_runtime::{traits::Header as HeaderT, AccountId32}; +use xcm::latest::prelude::*; + +/// Helper trait to test bridges with remote GRANDPA chain. +/// +/// This is only used to decrease amount of lines, dedicated to bounds. +pub trait WithRemoteGrandpaChainHelper { + /// This chain runtime. + type Runtime: BasicParachainRuntime + + cumulus_pallet_xcmp_queue::Config + + BridgeGrandpaConfig< + Self::GPI, + BridgedChain = UnderlyingChainOf>, + > + BridgeMessagesConfig< + Self::MPI, + InboundPayload = XcmAsPlainPayload, + InboundRelayer = bp_runtime::AccountIdOf>, + OutboundPayload = XcmAsPlainPayload, + > + pallet_bridge_relayers::Config; + /// All pallets of this chain, excluding system pallet. + type AllPalletsWithoutSystem: OnInitialize> + + OnFinalize>; + /// Instance of the `pallet-bridge-grandpa`, used to bridge with remote GRANDPA chain. + type GPI: 'static; + /// Instance of the `pallet-bridge-messages`, used to bridge with remote GRANDPA chain. + type MPI: 'static; + /// Messages bridge definition. + type MB: MessageBridge; +} + +/// Adapter struct that implements [`WithRemoteGrandpaChainHelper`]. +pub struct WithRemoteGrandpaChainHelperAdapter( + sp_std::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, MPI, MB)>, +); + +impl WithRemoteGrandpaChainHelper + for WithRemoteGrandpaChainHelperAdapter +where + Runtime: BasicParachainRuntime + + cumulus_pallet_xcmp_queue::Config + + BridgeGrandpaConfig>> + + BridgeMessagesConfig< + MPI, + InboundPayload = XcmAsPlainPayload, + InboundRelayer = bp_runtime::AccountIdOf>, + OutboundPayload = XcmAsPlainPayload, + > + pallet_bridge_relayers::Config, + AllPalletsWithoutSystem: + OnInitialize> + OnFinalize>, + GPI: 'static, + MPI: 'static, + MB: MessageBridge, +{ + type Runtime = Runtime; + type AllPalletsWithoutSystem = AllPalletsWithoutSystem; + type GPI = GPI; + type MPI = MPI; + type MB = MB; +} + +/// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, +/// with proofs (finality, message) independently submitted. +/// Also verifies relayer transaction signed extensions work as intended. +pub fn relayed_incoming_message_works( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + bridged_chain_id: bp_runtime::ChainId, + sibling_parachain_id: u32, + local_relay_chain_id: NetworkId, + lane_id: LaneId, + prepare_configuration: impl Fn(), + construct_and_apply_extrinsic: fn( + sp_keyring::AccountKeyring, + RuntimeCallOf, + ) -> sp_runtime::DispatchOutcome, +) where + RuntimeHelper: WithRemoteGrandpaChainHelper, + AccountIdOf: From, + RuntimeCallOf: From> + + From>, + UnderlyingChainOf>: ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + helpers::relayed_incoming_message_works::< + RuntimeHelper::Runtime, + RuntimeHelper::AllPalletsWithoutSystem, + RuntimeHelper::MPI, + >( + collator_session_key, + runtime_para_id, + sibling_parachain_id, + local_relay_chain_id, + construct_and_apply_extrinsic, + |relayer_id_at_this_chain, + relayer_id_at_bridged_chain, + message_destination, + message_nonce, + xcm| { + let relay_header_number = 5u32.into(); + + prepare_configuration(); + + // start with bridged relay chain block#0 + helpers::initialize_bridge_grandpa_pallet::( + test_data::initialization_data::(0), + ); + + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let (relay_chain_header, grandpa_justification, message_proof) = + test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< + RuntimeHelper::MB, + (), + >(lane_id, xcm.into(), message_nonce, message_destination, relay_header_number); + + let relay_chain_header_hash = relay_chain_header.hash(); + vec![ + ( + BridgeGrandpaCall::::submit_finality_proof { + finality_target: Box::new(relay_chain_header), + justification: grandpa_justification, + }.into(), + helpers::VerifySubmitGrandpaFinalityProofOutcome::::expect_best_header_hash( + relay_chain_header_hash, + ), + ), + ( + BridgeMessagesCall::::receive_messages_proof { + relayer_id_at_bridged_chain, + proof: message_proof, + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + }.into(), + Box::new(( + helpers::VerifySubmitMessagesProofOutcome::::expect_last_delivered_nonce( + lane_id, + 1, + ), + helpers::VerifyRelayerRewarded::::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ), + )), + ), + ] + }, + ); +} + +/// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, +/// with proofs (finality, message) batched together in signed extrinsic. +/// Also verifies relayer transaction signed extensions work as intended. +pub fn complex_relay_extrinsic_works( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + sibling_parachain_id: u32, + bridged_chain_id: bp_runtime::ChainId, + local_relay_chain_id: NetworkId, + lane_id: LaneId, + prepare_configuration: impl Fn(), + construct_and_apply_extrinsic: fn( + sp_keyring::AccountKeyring, + RuntimeCallOf, + ) -> sp_runtime::DispatchOutcome, +) where + RuntimeHelper: WithRemoteGrandpaChainHelper, + RuntimeHelper::Runtime: + pallet_utility::Config>, + AccountIdOf: From, + RuntimeCallOf: From> + + From> + + From>, + UnderlyingChainOf>: ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + helpers::relayed_incoming_message_works::< + RuntimeHelper::Runtime, + RuntimeHelper::AllPalletsWithoutSystem, + RuntimeHelper::MPI, + >( + collator_session_key, + runtime_para_id, + sibling_parachain_id, + local_relay_chain_id, + construct_and_apply_extrinsic, + |relayer_id_at_this_chain, + relayer_id_at_bridged_chain, + message_destination, + message_nonce, + xcm| { + let relay_header_number = 1u32.into(); + + prepare_configuration(); + + // start with bridged relay chain block#0 + helpers::initialize_bridge_grandpa_pallet::( + test_data::initialization_data::(0), + ); + + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let (relay_chain_header, grandpa_justification, message_proof) = + test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< + RuntimeHelper::MB, + (), + >(lane_id, xcm.into(), message_nonce, message_destination, relay_header_number); + + let relay_chain_header_hash = relay_chain_header.hash(); + vec![( + pallet_utility::Call::::batch_all { + calls: vec![ + BridgeGrandpaCall::::submit_finality_proof { + finality_target: Box::new(relay_chain_header), + justification: grandpa_justification, + }.into(), + BridgeMessagesCall::::receive_messages_proof { + relayer_id_at_bridged_chain, + proof: message_proof, + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + }.into(), + ], + } + .into(), + Box::new(( + helpers::VerifySubmitGrandpaFinalityProofOutcome::< + RuntimeHelper::Runtime, + RuntimeHelper::GPI, + >::expect_best_header_hash(relay_chain_header_hash), + helpers::VerifySubmitMessagesProofOutcome::< + RuntimeHelper::Runtime, + RuntimeHelper::MPI, + >::expect_last_delivered_nonce(lane_id, 1), + helpers::VerifyRelayerRewarded::::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ), + )), + )] + }, + ); +} + +/// Estimates transaction fee for default message delivery transaction (batched with required +/// proofs) from bridged GRANDPA chain. +pub fn can_calculate_fee_for_complex_message_delivery_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn(pallet_utility::Call) -> u128, +) -> u128 +where + RuntimeHelper: WithRemoteGrandpaChainHelper, + RuntimeHelper::Runtime: + pallet_utility::Config>, + RuntimeCallOf: From> + + From>, + UnderlyingChainOf>: ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + run_test::(collator_session_key, 1000, vec![], || { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + // + // we don't care about parameter values here, apart from the XCM message size. But we + // do not need to have a large message here, because we're charging for every byte of + // the message additionally + let (relay_chain_header, grandpa_justification, message_proof) = + test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< + RuntimeHelper::MB, + (), + >( + LaneId::default(), + vec![xcm::v3::Instruction::<()>::ClearOrigin; 1_024].into(), + 1, + X2(GlobalConsensus(Polkadot), Parachain(1_000)), + 1u32.into(), + ); + + // generate batch call that provides finality for bridged relay and parachains + message + // proof + let batch = test_data::from_grandpa_chain::make_complex_relayer_delivery_batch::< + RuntimeHelper::Runtime, + RuntimeHelper::GPI, + RuntimeHelper::MPI, + >( + relay_chain_header, + grandpa_justification, + message_proof, + helpers::relayer_id_at_bridged_chain::(), + ); + let estimated_fee = compute_extrinsic_fee(batch); + + log::error!( + target: "bridges::estimate", + "Estimate fee: {:?} for single message delivery for runtime: {:?}", + estimated_fee, + ::Version::get(), + ); + + estimated_fee + }) +} + +/// Estimates transaction fee for default message confirmation transaction (batched with required +/// proofs) from bridged GRANDPA chain. +pub fn can_calculate_fee_for_complex_message_confirmation_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn(pallet_utility::Call) -> u128, +) -> u128 +where + RuntimeHelper: WithRemoteGrandpaChainHelper, + AccountIdOf: From, + RuntimeHelper::Runtime: + pallet_utility::Config>, + MessageThisChain: + bp_runtime::Chain>, + RuntimeCallOf: From> + + From>, + UnderlyingChainOf>: ChainWithGrandpa, + >::TargetHeaderChain: + TargetHeaderChain< + XcmAsPlainPayload, + AccountIdOf, + MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< + HashOf>>, + >, + >, +{ + run_test::(collator_session_key, 1000, vec![], || { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let unrewarded_relayers = UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }; + let (relay_chain_header, grandpa_justification, message_delivery_proof) = + test_data::from_grandpa_chain::make_complex_relayer_confirmation_proofs::< + RuntimeHelper::MB, + (), + >( + LaneId::default(), + 1u32.into(), + AccountId32::from(Alice.public()).into(), + unrewarded_relayers.clone(), + ); + + // generate batch call that provides finality for bridged relay and parachains + message + // proof + let batch = test_data::from_grandpa_chain::make_complex_relayer_confirmation_batch::< + RuntimeHelper::Runtime, + RuntimeHelper::GPI, + RuntimeHelper::MPI, + >( + relay_chain_header, + grandpa_justification, + message_delivery_proof, + unrewarded_relayers, + ); + let estimated_fee = compute_extrinsic_fee(batch); + + log::error!( + target: "bridges::estimate", + "Estimate fee: {:?} for single message confirmation for runtime: {:?}", + estimated_fee, + ::Version::get(), + ); + + estimated_fee + }) +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs new file mode 100644 index 0000000000000000000000000000000000000000..91bebb36b18760b91125b5775a5a3111b801b0a0 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs @@ -0,0 +1,541 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Module contains predefined test-case scenarios for `Runtime` with bridging capabilities +//! with remote parachain. + +use crate::{ + test_cases::{bridges_prelude::*, helpers, run_test}, + test_data, +}; + +use bp_header_chain::ChainWithGrandpa; +use bp_messages::{ + source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, LaneId, + UnrewardedRelayersState, +}; +use bp_polkadot_core::parachains::ParaHash; +use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; +use bp_runtime::{HashOf, Parachain, UnderlyingChainOf}; +use bridge_runtime_common::{ + messages::{ + source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, + BridgedChain as MessageBridgedChain, MessageBridge, ThisChain as MessageThisChain, + }, + messages_xcm_extension::XcmAsPlainPayload, +}; +use frame_support::traits::{Get, OnFinalize, OnInitialize}; +use frame_system::pallet_prelude::BlockNumberFor; +use parachains_runtimes_test_utils::{ + AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, +}; +use sp_keyring::AccountKeyring::*; +use sp_runtime::{traits::Header as HeaderT, AccountId32}; +use xcm::latest::prelude::*; + +/// Helper trait to test bridges with remote parachain. +/// +/// This is only used to decrease amount of lines, dedicated to bounds. +pub trait WithRemoteParachainHelper { + /// This chain runtime. + type Runtime: BasicParachainRuntime + + cumulus_pallet_xcmp_queue::Config + + BridgeGrandpaConfig + + BridgeParachainsConfig + + BridgeMessagesConfig< + Self::MPI, + InboundPayload = XcmAsPlainPayload, + InboundRelayer = bp_runtime::AccountIdOf>, + OutboundPayload = XcmAsPlainPayload, + > + pallet_bridge_relayers::Config; + /// All pallets of this chain, excluding system pallet. + type AllPalletsWithoutSystem: OnInitialize> + + OnFinalize>; + /// Instance of the `pallet-bridge-grandpa`, used to bridge with remote relay chain. + type GPI: 'static; + /// Instance of the `pallet-bridge-parachains`, used to bridge with remote parachain. + type PPI: 'static; + /// Instance of the `pallet-bridge-messages`, used to bridge with remote parachain. + type MPI: 'static; + /// Messages bridge definition. + type MB: MessageBridge; +} + +/// Adapter struct that implements `WithRemoteParachainHelper`. +pub struct WithRemoteParachainHelperAdapter( + sp_std::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, PPI, MPI, MB)>, +); + +impl WithRemoteParachainHelper + for WithRemoteParachainHelperAdapter +where + Runtime: BasicParachainRuntime + + cumulus_pallet_xcmp_queue::Config + + BridgeGrandpaConfig + + BridgeParachainsConfig + + BridgeMessagesConfig< + MPI, + InboundPayload = XcmAsPlainPayload, + InboundRelayer = bp_runtime::AccountIdOf>, + OutboundPayload = XcmAsPlainPayload, + > + pallet_bridge_relayers::Config, + AllPalletsWithoutSystem: + OnInitialize> + OnFinalize>, + GPI: 'static, + PPI: 'static, + MPI: 'static, + MB: MessageBridge, +{ + type Runtime = Runtime; + type AllPalletsWithoutSystem = AllPalletsWithoutSystem; + type GPI = GPI; + type PPI = PPI; + type MPI = MPI; + type MB = MB; +} + +/// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, +/// with proofs (finality, para heads, message) independently submitted. +/// Also verifies relayer transaction signed extensions work as intended. +pub fn relayed_incoming_message_works( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + bridged_para_id: u32, + bridged_chain_id: bp_runtime::ChainId, + sibling_parachain_id: u32, + local_relay_chain_id: NetworkId, + lane_id: LaneId, + prepare_configuration: impl Fn(), + construct_and_apply_extrinsic: fn( + sp_keyring::AccountKeyring, + ::RuntimeCall, + ) -> sp_runtime::DispatchOutcome, +) where + RuntimeHelper: WithRemoteParachainHelper, + AccountIdOf: From, + RuntimeCallOf: From> + + From> + + From>, + UnderlyingChainOf>: + bp_runtime::Chain + Parachain, + >::BridgedChain: + bp_runtime::Chain + ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + helpers::relayed_incoming_message_works::< + RuntimeHelper::Runtime, + RuntimeHelper::AllPalletsWithoutSystem, + RuntimeHelper::MPI, + >( + collator_session_key, + runtime_para_id, + sibling_parachain_id, + local_relay_chain_id, + construct_and_apply_extrinsic, + |relayer_id_at_this_chain, + relayer_id_at_bridged_chain, + message_destination, + message_nonce, + xcm| { + let para_header_number = 5; + let relay_header_number = 1; + + prepare_configuration(); + + // start with bridged relay chain block#0 + helpers::initialize_bridge_grandpa_pallet::( + test_data::initialization_data::(0), + ); + + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let ( + relay_chain_header, + grandpa_justification, + parachain_head, + parachain_heads, + para_heads_proof, + message_proof, + ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< + >::BridgedChain, + RuntimeHelper::MB, + (), + >( + lane_id, + xcm.into(), + message_nonce, + message_destination, + para_header_number, + relay_header_number, + bridged_para_id, + ); + + let parachain_head_hash = parachain_head.hash(); + let relay_chain_header_hash = relay_chain_header.hash(); + let relay_chain_header_number = *relay_chain_header.number(); + vec![ + ( + BridgeGrandpaCall::::submit_finality_proof { + finality_target: Box::new(relay_chain_header), + justification: grandpa_justification, + }.into(), + helpers::VerifySubmitGrandpaFinalityProofOutcome::::expect_best_header_hash( + relay_chain_header_hash, + ), + ), + ( + BridgeParachainsCall::::submit_parachain_heads { + at_relay_block: (relay_chain_header_number, relay_chain_header_hash), + parachains: parachain_heads, + parachain_heads_proof: para_heads_proof, + }.into(), + helpers::VerifySubmitParachainHeaderProofOutcome::::expect_best_header_hash( + bridged_para_id, + parachain_head_hash, + ), + ), + ( + BridgeMessagesCall::::receive_messages_proof { + relayer_id_at_bridged_chain, + proof: message_proof, + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + }.into(), + Box::new(( + helpers::VerifySubmitMessagesProofOutcome::::expect_last_delivered_nonce( + lane_id, + 1, + ), + helpers::VerifyRelayerRewarded::::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ), + )), + ), + ] + }, + ); +} + +/// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, +/// with proofs (finality, para heads, message) batched together in signed extrinsic. +/// Also verifies relayer transaction signed extensions work as intended. +pub fn complex_relay_extrinsic_works( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + bridged_para_id: u32, + sibling_parachain_id: u32, + bridged_chain_id: bp_runtime::ChainId, + local_relay_chain_id: NetworkId, + lane_id: LaneId, + prepare_configuration: impl Fn(), + construct_and_apply_extrinsic: fn( + sp_keyring::AccountKeyring, + ::RuntimeCall, + ) -> sp_runtime::DispatchOutcome, +) where + RuntimeHelper: WithRemoteParachainHelper, + RuntimeHelper::Runtime: + pallet_utility::Config>, + AccountIdOf: From, + RuntimeCallOf: From> + + From> + + From> + + From>, + UnderlyingChainOf>: + bp_runtime::Chain + Parachain, + >::BridgedChain: + bp_runtime::Chain + ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + helpers::relayed_incoming_message_works::< + RuntimeHelper::Runtime, + RuntimeHelper::AllPalletsWithoutSystem, + RuntimeHelper::MPI, + >( + collator_session_key, + runtime_para_id, + sibling_parachain_id, + local_relay_chain_id, + construct_and_apply_extrinsic, + |relayer_id_at_this_chain, + relayer_id_at_bridged_chain, + message_destination, + message_nonce, + xcm| { + let para_header_number = 5; + let relay_header_number = 1; + + prepare_configuration(); + + // start with bridged relay chain block#0 + helpers::initialize_bridge_grandpa_pallet::( + test_data::initialization_data::(0), + ); + + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let ( + relay_chain_header, + grandpa_justification, + parachain_head, + parachain_heads, + para_heads_proof, + message_proof, + ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< + >::BridgedChain, + RuntimeHelper::MB, + (), + >( + lane_id, + xcm.into(), + message_nonce, + message_destination, + para_header_number, + relay_header_number, + bridged_para_id, + ); + + let parachain_head_hash = parachain_head.hash(); + let relay_chain_header_hash = relay_chain_header.hash(); + let relay_chain_header_number = *relay_chain_header.number(); + vec![( + pallet_utility::Call::::batch_all { + calls: vec![ + BridgeGrandpaCall::::submit_finality_proof { + finality_target: Box::new(relay_chain_header), + justification: grandpa_justification, + }.into(), + BridgeParachainsCall::::submit_parachain_heads { + at_relay_block: (relay_chain_header_number, relay_chain_header_hash), + parachains: parachain_heads, + parachain_heads_proof: para_heads_proof, + }.into(), + BridgeMessagesCall::::receive_messages_proof { + relayer_id_at_bridged_chain, + proof: message_proof, + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + }.into(), + ], + } + .into(), + Box::new(( + helpers::VerifySubmitGrandpaFinalityProofOutcome::< + RuntimeHelper::Runtime, + RuntimeHelper::GPI, + >::expect_best_header_hash(relay_chain_header_hash), + helpers::VerifySubmitParachainHeaderProofOutcome::< + RuntimeHelper::Runtime, + RuntimeHelper::PPI, + >::expect_best_header_hash(bridged_para_id, parachain_head_hash), + helpers::VerifySubmitMessagesProofOutcome::< + RuntimeHelper::Runtime, + RuntimeHelper::MPI, + >::expect_last_delivered_nonce(lane_id, 1), + helpers::VerifyRelayerRewarded::::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ), + )), + )] + }, + ); +} + +/// Estimates transaction fee for default message delivery transaction (batched with required +/// proofs) from bridged parachain. +pub fn can_calculate_fee_for_complex_message_delivery_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn(pallet_utility::Call) -> u128, +) -> u128 +where + RuntimeHelper: WithRemoteParachainHelper, + RuntimeHelper::Runtime: + pallet_utility::Config>, + RuntimeCallOf: From> + + From> + + From>, + UnderlyingChainOf>: + bp_runtime::Chain + Parachain, + >::BridgedChain: + bp_runtime::Chain + ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + run_test::(collator_session_key, 1000, vec![], || { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + // + // we don't care about parameter values here, apart from the XCM message size. But we + // do not need to have a large message here, because we're charging for every byte of + // the message additionally + let ( + relay_chain_header, + grandpa_justification, + _, + parachain_heads, + para_heads_proof, + message_proof, + ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< + >::BridgedChain, + RuntimeHelper::MB, + (), + >( + LaneId::default(), + vec![xcm::v3::Instruction::<()>::ClearOrigin; 1_024].into(), + 1, + X2(GlobalConsensus(Polkadot), Parachain(1_000)), + 1, + 5, + 1_000, + ); + + // generate batch call that provides finality for bridged relay and parachains + message + // proof + let batch = test_data::from_parachain::make_complex_relayer_delivery_batch::< + RuntimeHelper::Runtime, + RuntimeHelper::GPI, + RuntimeHelper::PPI, + RuntimeHelper::MPI, + _, + >( + relay_chain_header, + grandpa_justification, + parachain_heads, + para_heads_proof, + message_proof, + helpers::relayer_id_at_bridged_chain::(), + ); + let estimated_fee = compute_extrinsic_fee(batch); + + log::error!( + target: "bridges::estimate", + "Estimate fee: {:?} for single message delivery for runtime: {:?}", + estimated_fee, + ::Version::get(), + ); + + estimated_fee + }) +} + +/// Estimates transaction fee for default message confirmation transaction (batched with required +/// proofs) from bridged parachain. +pub fn can_calculate_fee_for_complex_message_confirmation_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn(pallet_utility::Call) -> u128, +) -> u128 +where + RuntimeHelper: WithRemoteParachainHelper, + AccountIdOf: From, + RuntimeHelper::Runtime: + pallet_utility::Config>, + MessageThisChain: + bp_runtime::Chain>, + RuntimeCallOf: From> + + From> + + From>, + UnderlyingChainOf>: + bp_runtime::Chain + Parachain, + >::BridgedChain: + bp_runtime::Chain + ChainWithGrandpa, + >::TargetHeaderChain: + TargetHeaderChain< + XcmAsPlainPayload, + AccountIdOf, + MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< + HashOf>>, + >, + >, +{ + run_test::(collator_session_key, 1000, vec![], || { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let unrewarded_relayers = UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }; + let ( + relay_chain_header, + grandpa_justification, + _, + parachain_heads, + para_heads_proof, + message_delivery_proof, + ) = test_data::from_parachain::make_complex_relayer_confirmation_proofs::< + >::BridgedChain, + RuntimeHelper::MB, + (), + >( + LaneId::default(), + 1, + 5, + 1_000, + AccountId32::from(Alice.public()).into(), + unrewarded_relayers.clone(), + ); + + // generate batch call that provides finality for bridged relay and parachains + message + // proof + let batch = test_data::from_parachain::make_complex_relayer_confirmation_batch::< + RuntimeHelper::Runtime, + RuntimeHelper::GPI, + RuntimeHelper::PPI, + RuntimeHelper::MPI, + >( + relay_chain_header, + grandpa_justification, + parachain_heads, + para_heads_proof, + message_delivery_proof, + unrewarded_relayers, + ); + let estimated_fee = compute_extrinsic_fee(batch); + + log::error!( + target: "bridges::estimate", + "Estimate fee: {:?} for single message confirmation for runtime: {:?}", + estimated_fee, + ::Version::get(), + ); + + estimated_fee + }) +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs new file mode 100644 index 0000000000000000000000000000000000000000..69aa61db3cc66c3b1e93d04e2b7c7b0317da344a --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs @@ -0,0 +1,345 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Module contains tests code, that is shared by all types of bridges + +use crate::test_cases::{bridges_prelude::*, run_test, RuntimeHelper}; + +use asset_test_utils::BasicParachainRuntime; +use bp_messages::{LaneId, MessageNonce}; +use bp_polkadot_core::parachains::{ParaHash, ParaId}; +use bp_relayers::RewardsAccountParams; +use codec::Decode; +use frame_support::{ + assert_ok, + traits::{OnFinalize, OnInitialize, PalletInfoAccess}, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use pallet_bridge_grandpa::{BridgedBlockHash, BridgedHeader}; +use parachains_common::AccountId; +use parachains_runtimes_test_utils::{ + mock_open_hrmp_channel, AccountIdOf, CollatorSessionKeys, RuntimeCallOf, +}; +use sp_core::Get; +use sp_keyring::AccountKeyring::*; +use sp_runtime::{traits::TrailingZeroInput, AccountId32}; +use sp_std::marker::PhantomData; +use xcm::latest::prelude::*; + +/// Verify that the transaction has succeeded. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait VerifyTransactionOutcome { + fn verify_outcome(&self); +} + +impl VerifyTransactionOutcome for Box { + fn verify_outcome(&self) { + VerifyTransactionOutcome::verify_outcome(&**self) + } +} + +/// Checks that the best finalized header hash in the bridge GRANDPA pallet equals to given one. +pub struct VerifySubmitGrandpaFinalityProofOutcome +where + Runtime: BridgeGrandpaConfig, + GPI: 'static, +{ + expected_best_hash: BridgedBlockHash, +} + +impl VerifySubmitGrandpaFinalityProofOutcome +where + Runtime: BridgeGrandpaConfig, + GPI: 'static, +{ + /// Expect given header hash to be the best after transaction. + pub fn expect_best_header_hash( + expected_best_hash: BridgedBlockHash, + ) -> Box { + Box::new(Self { expected_best_hash }) + } +} + +impl VerifyTransactionOutcome + for VerifySubmitGrandpaFinalityProofOutcome +where + Runtime: BridgeGrandpaConfig, + GPI: 'static, +{ + fn verify_outcome(&self) { + assert_eq!( + pallet_bridge_grandpa::BestFinalized::::get().unwrap().1, + self.expected_best_hash + ); + assert!(pallet_bridge_grandpa::ImportedHeaders::::contains_key( + self.expected_best_hash + )); + } +} + +/// Checks that the best parachain header hash in the bridge parachains pallet equals to given one. +pub struct VerifySubmitParachainHeaderProofOutcome { + bridged_para_id: u32, + expected_best_hash: ParaHash, + _marker: PhantomData<(Runtime, PPI)>, +} + +impl VerifySubmitParachainHeaderProofOutcome +where + Runtime: BridgeParachainsConfig, + PPI: 'static, +{ + /// Expect given header hash to be the best after transaction. + pub fn expect_best_header_hash( + bridged_para_id: u32, + expected_best_hash: ParaHash, + ) -> Box { + Box::new(Self { bridged_para_id, expected_best_hash, _marker: PhantomData }) + } +} + +impl VerifyTransactionOutcome + for VerifySubmitParachainHeaderProofOutcome +where + Runtime: BridgeParachainsConfig, + PPI: 'static, +{ + fn verify_outcome(&self) { + assert_eq!( + pallet_bridge_parachains::ParasInfo::::get(ParaId(self.bridged_para_id)) + .map(|info| info.best_head_hash.head_hash), + Some(self.expected_best_hash), + ); + } +} + +/// Checks that the latest delivered nonce in the bridge messages pallet equals to given one. +pub struct VerifySubmitMessagesProofOutcome { + lane: LaneId, + expected_nonce: MessageNonce, + _marker: PhantomData<(Runtime, MPI)>, +} + +impl VerifySubmitMessagesProofOutcome +where + Runtime: BridgeMessagesConfig, + MPI: 'static, +{ + /// Expect given delivered nonce to be the latest after transaction. + pub fn expect_last_delivered_nonce( + lane: LaneId, + expected_nonce: MessageNonce, + ) -> Box { + Box::new(Self { lane, expected_nonce, _marker: PhantomData }) + } +} + +impl VerifyTransactionOutcome for VerifySubmitMessagesProofOutcome +where + Runtime: BridgeMessagesConfig, + MPI: 'static, +{ + fn verify_outcome(&self) { + assert_eq!( + pallet_bridge_messages::InboundLanes::::get(self.lane) + .last_delivered_nonce(), + self.expected_nonce, + ); + } +} + +/// Verifies that relayer is rewarded at this chain. +pub struct VerifyRelayerRewarded { + relayer: Runtime::AccountId, + reward_params: RewardsAccountParams, +} + +impl VerifyRelayerRewarded +where + Runtime: pallet_bridge_relayers::Config, +{ + /// Expect given delivered nonce to be the latest after transaction. + pub fn expect_relayer_reward( + relayer: Runtime::AccountId, + reward_params: RewardsAccountParams, + ) -> Box { + Box::new(Self { relayer, reward_params }) + } +} + +impl VerifyTransactionOutcome for VerifyRelayerRewarded +where + Runtime: pallet_bridge_relayers::Config, +{ + fn verify_outcome(&self) { + assert!(pallet_bridge_relayers::RelayerRewards::::get( + &self.relayer, + &self.reward_params, + ) + .is_some()); + } +} + +/// Initialize bridge GRANDPA pallet. +pub(crate) fn initialize_bridge_grandpa_pallet( + init_data: bp_header_chain::InitializationData>, +) where + Runtime: BridgeGrandpaConfig, +{ + pallet_bridge_grandpa::Pallet::::initialize( + RuntimeHelper::::root_origin(), + init_data, + ) + .unwrap(); +} + +/// Runtime calls and their verifiers. +pub type CallsAndVerifiers = + Vec<(RuntimeCallOf, Box)>; + +/// Returns relayer id at the bridged chain. +pub fn relayer_id_at_bridged_chain, MPI>( +) -> Runtime::InboundRelayer { + Runtime::InboundRelayer::decode(&mut TrailingZeroInput::zeroes()).unwrap() +} + +/// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, +/// with proofs (finality, message) independently submitted. +pub fn relayed_incoming_message_works( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + sibling_parachain_id: u32, + local_relay_chain_id: NetworkId, + construct_and_apply_extrinsic: fn( + sp_keyring::AccountKeyring, + RuntimeCallOf, + ) -> sp_runtime::DispatchOutcome, + prepare_message_proof_import: impl FnOnce( + Runtime::AccountId, + Runtime::InboundRelayer, + InteriorMultiLocation, + MessageNonce, + Xcm<()>, + ) -> CallsAndVerifiers, +) where + Runtime: BasicParachainRuntime + cumulus_pallet_xcmp_queue::Config + BridgeMessagesConfig, + AllPalletsWithoutSystem: + OnInitialize> + OnFinalize>, + MPI: 'static, + AccountIdOf: From, +{ + let relayer_at_target = Bob; + let relayer_id_on_target: AccountId32 = relayer_at_target.public().into(); + let relayer_id_on_source = relayer_id_at_bridged_chain::(); + + assert_ne!(runtime_para_id, sibling_parachain_id); + + run_test::( + collator_session_key, + runtime_para_id, + vec![( + relayer_id_on_target.clone().into(), + // this value should be enough to cover all transaction costs, but computing the actual + // value here is tricky - there are several transaction payment pallets and we don't + // want to introduce additional bounds and traits here just for that, so let's just + // select some presumably large value + sp_std::cmp::max::(Runtime::ExistentialDeposit::get(), 1u32.into()) * + 100_000_000u32.into(), + )], + || { + let mut alice = [0u8; 32]; + alice[0] = 1; + + let included_head = RuntimeHelper::::run_to_block( + 2, + AccountId::from(alice).into(), + ); + mock_open_hrmp_channel::>( + runtime_para_id.into(), + sibling_parachain_id.into(), + included_head, + &alice, + ); + + // set up relayer details and proofs + + let message_destination = + X2(GlobalConsensus(local_relay_chain_id), Parachain(sibling_parachain_id)); + // some random numbers (checked by test) + let message_nonce = 1; + + let xcm = vec![xcm::v3::Instruction::<()>::ClearOrigin; 42]; + let expected_dispatch = xcm::latest::Xcm::<()>({ + let mut expected_instructions = xcm.clone(); + // dispatch prepends bridge pallet instance + expected_instructions.insert( + 0, + DescendOrigin(X1(PalletInstance( + as PalletInfoAccess>::index() + as u8, + ))), + ); + expected_instructions + }); + + execute_and_verify_calls::( + relayer_at_target, + construct_and_apply_extrinsic, + prepare_message_proof_import( + relayer_id_on_target.clone().into(), + relayer_id_on_source.clone().into(), + message_destination, + message_nonce, + xcm.clone().into(), + ), + ); + + // verify that imported XCM contains original message + let imported_xcm = + RuntimeHelper::>::take_xcm( + sibling_parachain_id.into(), + ) + .unwrap(); + let dispatched = xcm::latest::Xcm::<()>::try_from(imported_xcm).unwrap(); + let mut dispatched_clone = dispatched.clone(); + for (idx, expected_instr) in expected_dispatch.0.iter().enumerate() { + assert_eq!(expected_instr, &dispatched.0[idx]); + assert_eq!(expected_instr, &dispatched_clone.0.remove(0)); + } + match dispatched_clone.0.len() { + 0 => (), + 1 => assert!(matches!(dispatched_clone.0[0], SetTopic(_))), + count => assert!(false, "Unexpected messages count: {:?}", count), + } + }, + ) +} + +/// Execute every call and verify its outcome. +fn execute_and_verify_calls( + submitter: sp_keyring::AccountKeyring, + construct_and_apply_extrinsic: fn( + sp_keyring::AccountKeyring, + RuntimeCallOf, + ) -> sp_runtime::DispatchOutcome, + calls_and_verifiers: CallsAndVerifiers, +) { + for (call, verifier) in calls_and_verifiers { + let dispatch_outcome = construct_and_apply_extrinsic(submitter, call); + assert_ok!(dispatch_outcome); + verifier.verify_outcome(); + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..64ec8726599282671cbb50b26b31ebb307e2dcb7 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs @@ -0,0 +1,657 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Module contains predefined test-case scenarios for `Runtime` with bridging capabilities. +//! +//! This file contains tests, suitable for all bridge runtimes. See `from_parachain` and +//! `from_grandpa_chain` submodules for tests, that are specific to the bridged chain type. + +pub mod from_grandpa_chain; +pub mod from_parachain; + +pub(crate) mod helpers; + +use crate::{test_cases::bridges_prelude::*, test_data}; + +use asset_test_utils::BasicParachainRuntime; +use bp_messages::{ + target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, + LaneId, MessageKey, MessagesOperatingMode, OutboundLaneData, +}; +use bp_runtime::BasicOperatingMode; +use bridge_runtime_common::messages_xcm_extension::{ + XcmAsPlainPayload, XcmBlobMessageDispatchResult, +}; +use codec::Encode; +use frame_support::{ + assert_ok, + dispatch::GetDispatchInfo, + traits::{Get, OnFinalize, OnInitialize, OriginTrait}, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use parachains_common::AccountId; +use parachains_runtimes_test_utils::{ + mock_open_hrmp_channel, AccountIdOf, BalanceOf, CollatorSessionKeys, ExtBuilder, RuntimeCallOf, + XcmReceivedFrom, +}; +use sp_runtime::{traits::Zero, AccountId32}; +use xcm::{latest::prelude::*, AlwaysLatest}; +use xcm_builder::DispatchBlobError; +use xcm_executor::{ + traits::{TransactAsset, WeightBounds}, + XcmExecutor, +}; + +/// Common bridges exports. +pub(crate) mod bridges_prelude { + pub use pallet_bridge_grandpa::{Call as BridgeGrandpaCall, Config as BridgeGrandpaConfig}; + pub use pallet_bridge_messages::{Call as BridgeMessagesCall, Config as BridgeMessagesConfig}; + pub use pallet_bridge_parachains::{ + Call as BridgeParachainsCall, Config as BridgeParachainsConfig, RelayBlockHash, + RelayBlockNumber, + }; +} + +// Re-export test_case from assets +pub use asset_test_utils::include_teleports_for_native_asset_works; + +pub type RuntimeHelper = + parachains_runtimes_test_utils::RuntimeHelper; + +// Re-export test_case from `parachains-runtimes-test-utils` +pub use parachains_runtimes_test_utils::test_cases::change_storage_constant_by_governance_works; + +/// Prepare default runtime storage and run test within this context. +pub fn run_test( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + balances: Vec<(Runtime::AccountId, Runtime::Balance)>, + test: impl FnOnce() -> T, +) -> T +where + Runtime: BasicParachainRuntime, +{ + ExtBuilder::::default() + .with_collators(collator_session_key.collators()) + .with_session_keys(collator_session_key.session_keys()) + .with_safe_xcm_version(XCM_VERSION) + .with_para_id(runtime_para_id.into()) + .with_balances(balances) + .with_tracing() + .build() + .execute_with(|| test()) +} + +/// Test-case makes sure that `Runtime` can process bridging initialize via governance-like call +pub fn initialize_bridge_by_governance_works( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, +) where + Runtime: BasicParachainRuntime + BridgeGrandpaConfig, + GrandpaPalletInstance: 'static, + RuntimeCallOf: + GetDispatchInfo + From>, +{ + run_test::(collator_session_key, runtime_para_id, vec![], || { + // check mode before + assert_eq!( + pallet_bridge_grandpa::PalletOperatingMode::::try_get(), + Err(()) + ); + + // prepare the `initialize` call + let initialize_call = RuntimeCallOf::::from(BridgeGrandpaCall::< + Runtime, + GrandpaPalletInstance, + >::initialize { + init_data: test_data::initialization_data::(12345), + }); + + // execute XCM with Transacts to `initialize bridge` as governance does + assert_ok!(RuntimeHelper::::execute_as_governance( + initialize_call.encode(), + initialize_call.get_dispatch_info().weight, + ) + .ensure_complete()); + + // check mode after + assert_eq!( + pallet_bridge_grandpa::PalletOperatingMode::::try_get(), + Ok(BasicOperatingMode::Normal) + ); + }) +} + +/// Test-case makes sure that `Runtime` can change bridge GRANDPA pallet operating mode via +/// governance-like call. +pub fn change_bridge_grandpa_pallet_mode_by_governance_works( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, +) where + Runtime: BasicParachainRuntime + BridgeGrandpaConfig, + GrandpaPalletInstance: 'static, + RuntimeCallOf: + GetDispatchInfo + From>, +{ + run_test::(collator_session_key, runtime_para_id, vec![], || { + let dispatch_set_operating_mode_call = |old_mode, new_mode| { + // check old mode + assert_eq!( + pallet_bridge_grandpa::PalletOperatingMode::::get(), + old_mode, + ); + + // prepare the `set_operating_mode` call + let set_operating_mode_call = ::RuntimeCall::from( + pallet_bridge_grandpa::Call::::set_operating_mode { + operating_mode: new_mode, + }, + ); + + // execute XCM with Transacts to `initialize bridge` as governance does + assert_ok!(RuntimeHelper::::execute_as_governance( + set_operating_mode_call.encode(), + set_operating_mode_call.get_dispatch_info().weight, + ) + .ensure_complete()); + + // check mode after + assert_eq!( + pallet_bridge_grandpa::PalletOperatingMode::::try_get(), + Ok(new_mode) + ); + }; + + // check mode before + assert_eq!( + pallet_bridge_grandpa::PalletOperatingMode::::try_get(), + Err(()) + ); + + dispatch_set_operating_mode_call(BasicOperatingMode::Normal, BasicOperatingMode::Halted); + dispatch_set_operating_mode_call(BasicOperatingMode::Halted, BasicOperatingMode::Normal); + }); +} + +/// Test-case makes sure that `Runtime` can change bridge parachains pallet operating mode via +/// governance-like call. +pub fn change_bridge_parachains_pallet_mode_by_governance_works( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, +) where + Runtime: BasicParachainRuntime + BridgeParachainsConfig, + ParachainsPalletInstance: 'static, + RuntimeCallOf: + GetDispatchInfo + From>, +{ + run_test::(collator_session_key, runtime_para_id, vec![], || { + let dispatch_set_operating_mode_call = |old_mode, new_mode| { + // check old mode + assert_eq!( + pallet_bridge_parachains::PalletOperatingMode::::get(), + old_mode, + ); + + // prepare the `set_operating_mode` call + let set_operating_mode_call = + RuntimeCallOf::::from(pallet_bridge_parachains::Call::< + Runtime, + ParachainsPalletInstance, + >::set_operating_mode { + operating_mode: new_mode, + }); + + // execute XCM with Transacts to `initialize bridge` as governance does + assert_ok!(RuntimeHelper::::execute_as_governance( + set_operating_mode_call.encode(), + set_operating_mode_call.get_dispatch_info().weight, + ) + .ensure_complete()); + + // check mode after + assert_eq!( + pallet_bridge_parachains::PalletOperatingMode::::try_get(), + Ok(new_mode) + ); + }; + + // check mode before + assert_eq!( + pallet_bridge_parachains::PalletOperatingMode::::try_get(), + Err(()) + ); + + dispatch_set_operating_mode_call(BasicOperatingMode::Normal, BasicOperatingMode::Halted); + dispatch_set_operating_mode_call(BasicOperatingMode::Halted, BasicOperatingMode::Normal); + }); +} + +/// Test-case makes sure that `Runtime` can change bridge messaging pallet operating mode via +/// governance-like call. +pub fn change_bridge_messages_pallet_mode_by_governance_works( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, +) where + Runtime: BasicParachainRuntime + BridgeMessagesConfig, + MessagesPalletInstance: 'static, + RuntimeCallOf: + GetDispatchInfo + From>, +{ + run_test::(collator_session_key, runtime_para_id, vec![], || { + let dispatch_set_operating_mode_call = |old_mode, new_mode| { + // check old mode + assert_eq!( + pallet_bridge_messages::PalletOperatingMode::::get( + ), + old_mode, + ); + + // encode `set_operating_mode` call + let set_operating_mode_call = RuntimeCallOf::::from(BridgeMessagesCall::< + Runtime, + MessagesPalletInstance, + >::set_operating_mode { + operating_mode: new_mode, + }); + + // execute XCM with Transacts to `initialize bridge` as governance does + assert_ok!(RuntimeHelper::::execute_as_governance( + set_operating_mode_call.encode(), + set_operating_mode_call.get_dispatch_info().weight, + ) + .ensure_complete()); + + // check mode after + assert_eq!( + pallet_bridge_messages::PalletOperatingMode::::try_get(), + Ok(new_mode) + ); + }; + + // check mode before + assert_eq!( + pallet_bridge_messages::PalletOperatingMode::::try_get( + ), + Err(()) + ); + + dispatch_set_operating_mode_call( + MessagesOperatingMode::Basic(BasicOperatingMode::Normal), + MessagesOperatingMode::RejectingOutboundMessages, + ); + dispatch_set_operating_mode_call( + MessagesOperatingMode::RejectingOutboundMessages, + MessagesOperatingMode::Basic(BasicOperatingMode::Halted), + ); + dispatch_set_operating_mode_call( + MessagesOperatingMode::Basic(BasicOperatingMode::Halted), + MessagesOperatingMode::Basic(BasicOperatingMode::Normal), + ); + }); +} + +/// Test-case makes sure that `Runtime` can handle xcm `ExportMessage`: +/// Checks if received XCM messages is correctly added to the message outbound queue for delivery. +/// For SystemParachains we expect unpaid execution. +pub fn handle_export_message_from_system_parachain_to_outbound_queue_works< + Runtime, + XcmConfig, + MessagesPalletInstance, +>( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + sibling_parachain_id: u32, + unwrap_pallet_bridge_messages_event: Box< + dyn Fn(Vec) -> Option>, + >, + export_message_instruction: fn() -> Instruction, + expected_lane_id: LaneId, + existential_deposit: Option, + maybe_paid_export_message: Option, + prepare_configuration: impl Fn(), +) where + Runtime: BasicParachainRuntime + BridgeMessagesConfig, + XcmConfig: xcm_executor::Config, + MessagesPalletInstance: 'static, +{ + assert_ne!(runtime_para_id, sibling_parachain_id); + let sibling_parachain_location = MultiLocation::new(1, Parachain(sibling_parachain_id)); + + run_test::(collator_session_key, runtime_para_id, vec![], || { + prepare_configuration(); + + // check queue before + assert_eq!( + pallet_bridge_messages::OutboundLanes::::try_get( + expected_lane_id + ), + Err(()) + ); + + // prepare `ExportMessage` + let xcm = if let Some(fee) = maybe_paid_export_message { + // deposit ED to origin (if needed) + if let Some(ed) = existential_deposit { + XcmConfig::AssetTransactor::deposit_asset( + &ed, + &sibling_parachain_location, + Some(&XcmContext::with_message_id([0; 32])), + ) + .expect("deposited ed"); + } + // deposit fee to origin + XcmConfig::AssetTransactor::deposit_asset( + &fee, + &sibling_parachain_location, + Some(&XcmContext::with_message_id([0; 32])), + ) + .expect("deposited fee"); + + Xcm(vec![ + WithdrawAsset(MultiAssets::from(vec![fee.clone()])), + BuyExecution { fees: fee, weight_limit: Unlimited }, + export_message_instruction(), + ]) + } else { + Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + export_message_instruction(), + ]) + }; + + // execute XCM + let hash = xcm.using_encoded(sp_io::hashing::blake2_256); + assert_ok!(XcmExecutor::::execute_xcm( + sibling_parachain_location, + xcm, + hash, + RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Sibling), + ) + .ensure_complete()); + + // check queue after + assert_eq!( + pallet_bridge_messages::OutboundLanes::::try_get( + expected_lane_id + ), + Ok(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 0, + latest_generated_nonce: 1, + }) + ); + + // check events + let mut events = >::events() + .into_iter() + .filter_map(|e| unwrap_pallet_bridge_messages_event(e.event.encode())); + assert!(events.any(|e| matches!(e, pallet_bridge_messages::Event::MessageAccepted { .. }))); + }) +} + +/// Test-case makes sure that Runtime can route XCM messages received in inbound queue, +/// We just test here `MessageDispatch` configuration. +/// We expect that runtime can route messages: +/// 1. to Parent (relay chain) +/// 2. to Sibling parachain +pub fn message_dispatch_routing_works< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + HrmpChannelOpener, + MessagesPalletInstance, + RuntimeNetwork, + BridgedNetwork, + NetworkDistanceAsParentCount, +>( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + sibling_parachain_id: u32, + unwrap_cumulus_pallet_parachain_system_event: Box< + dyn Fn(Vec) -> Option>, + >, + unwrap_cumulus_pallet_xcmp_queue_event: Box< + dyn Fn(Vec) -> Option>, + >, + expected_lane_id: LaneId, + prepare_configuration: impl Fn(), +) where + Runtime: BasicParachainRuntime + + cumulus_pallet_xcmp_queue::Config + + BridgeMessagesConfig, + AllPalletsWithoutSystem: + OnInitialize> + OnFinalize>, + AccountIdOf: From + + Into<<::RuntimeOrigin as OriginTrait>::AccountId>, + XcmConfig: xcm_executor::Config, + MessagesPalletInstance: 'static, + HrmpChannelOpener: frame_support::inherent::ProvideInherent< + Call = cumulus_pallet_parachain_system::Call, + >, + RuntimeNetwork: Get, + BridgedNetwork: Get, + NetworkDistanceAsParentCount: Get, +{ + struct NetworkWithParentCount(core::marker::PhantomData<(N, C)>); + impl, C: Get> Get for NetworkWithParentCount { + fn get() -> MultiLocation { + MultiLocation { parents: C::get(), interior: X1(GlobalConsensus(N::get())) } + } + } + + assert_ne!(runtime_para_id, sibling_parachain_id); + + run_test::(collator_session_key, runtime_para_id, vec![], || { + prepare_configuration(); + + let mut alice = [0u8; 32]; + alice[0] = 1; + + let included_head = RuntimeHelper::::run_to_block( + 2, + AccountId::from(alice).into(), + ); + // 1. this message is sent from other global consensus with destination of this Runtime + // relay chain (UMP) + let bridging_message = test_data::simulate_message_exporter_on_bridged_chain::< + BridgedNetwork, + NetworkWithParentCount, + AlwaysLatest, + >((RuntimeNetwork::get(), Here)); + let result = + <>::MessageDispatch>::dispatch( + test_data::dispatch_message(expected_lane_id, 1, bridging_message), + ); + assert_eq!( + format!("{:?}", result.dispatch_level_result), + format!("{:?}", XcmBlobMessageDispatchResult::Dispatched) + ); + + // check events - UpwardMessageSent + let mut events = >::events() + .into_iter() + .filter_map(|e| unwrap_cumulus_pallet_parachain_system_event(e.event.encode())); + assert!(events.any(|e| matches!( + e, + cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. } + ))); + + // 2. this message is sent from other global consensus with destination of this Runtime + // sibling parachain (HRMP) + let bridging_message = test_data::simulate_message_exporter_on_bridged_chain::< + BridgedNetwork, + NetworkWithParentCount, + AlwaysLatest, + >((RuntimeNetwork::get(), X1(Parachain(sibling_parachain_id)))); + + // 2.1. WITHOUT opened hrmp channel -> RoutingError + let result = + <>::MessageDispatch>::dispatch( + DispatchMessage { + key: MessageKey { lane_id: expected_lane_id, nonce: 1 }, + data: DispatchMessageData { payload: Ok(bridging_message.clone()) }, + }, + ); + assert_eq!( + format!("{:?}", result.dispatch_level_result), + format!( + "{:?}", + XcmBlobMessageDispatchResult::NotDispatched(Some(DispatchBlobError::RoutingError)) + ) + ); + + // check events - no XcmpMessageSent + assert_eq!( + >::events() + .into_iter() + .filter_map(|e| unwrap_cumulus_pallet_xcmp_queue_event(e.event.encode())) + .count(), + 0 + ); + + // 2.1. WITH hrmp channel -> Ok + mock_open_hrmp_channel::( + runtime_para_id.into(), + sibling_parachain_id.into(), + included_head, + &alice, + ); + let result = + <>::MessageDispatch>::dispatch( + DispatchMessage { + key: MessageKey { lane_id: expected_lane_id, nonce: 1 }, + data: DispatchMessageData { payload: Ok(bridging_message) }, + }, + ); + assert_eq!( + format!("{:?}", result.dispatch_level_result), + format!("{:?}", XcmBlobMessageDispatchResult::Dispatched) + ); + + // check events - XcmpMessageSent + let mut events = >::events() + .into_iter() + .filter_map(|e| unwrap_cumulus_pallet_xcmp_queue_event(e.event.encode())); + assert!( + events.any(|e| matches!(e, cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. })) + ); + }) +} + +/// Estimates XCM execution fee for paid `ExportMessage` processing. +pub fn can_calculate_weight_for_paid_export_message_with_reserve_transfer< + Runtime, + XcmConfig, + WeightToFee, +>() -> u128 +where + Runtime: frame_system::Config + pallet_balances::Config, + XcmConfig: xcm_executor::Config, + WeightToFee: frame_support::weights::WeightToFee>, + ::Balance: From + Into, +{ + // data here are not relevant for weighing + let mut xcm = Xcm(vec![ + WithdrawAsset(MultiAssets::from(vec![MultiAsset { + id: Concrete(MultiLocation { parents: 1, interior: Here }), + fun: Fungible(34333299), + }])), + BuyExecution { + fees: MultiAsset { + id: Concrete(MultiLocation { parents: 1, interior: Here }), + fun: Fungible(34333299), + }, + weight_limit: Unlimited, + }, + ExportMessage { + network: Polkadot, + destination: X1(Parachain(1000)), + xcm: Xcm(vec![ + ReserveAssetDeposited(MultiAssets::from(vec![MultiAsset { + id: Concrete(MultiLocation { + parents: 2, + interior: X1(GlobalConsensus(Kusama)), + }), + fun: Fungible(1000000000000), + }])), + ClearOrigin, + BuyExecution { + fees: MultiAsset { + id: Concrete(MultiLocation { + parents: 2, + interior: X1(GlobalConsensus(Kusama)), + }), + fun: Fungible(1000000000000), + }, + weight_limit: Unlimited, + }, + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: MultiLocation { + parents: 0, + interior: X1(xcm::latest::prelude::AccountId32 { + network: None, + id: [ + 212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, + 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, + 109, 162, 125, + ], + }), + }, + }, + SetTopic([ + 116, 82, 194, 132, 171, 114, 217, 165, 23, 37, 161, 177, 165, 179, 247, 114, + 137, 101, 147, 70, 28, 157, 168, 32, 154, 63, 74, 228, 152, 180, 5, 63, + ]), + ]), + }, + DepositAsset { + assets: Wild(All), + beneficiary: MultiLocation { parents: 1, interior: X1(Parachain(1000)) }, + }, + SetTopic([ + 36, 224, 250, 165, 82, 195, 67, 110, 160, 170, 140, 87, 217, 62, 201, 164, 42, 98, 219, + 157, 124, 105, 248, 25, 131, 218, 199, 36, 109, 173, 100, 122, + ]), + ]); + + // get weight + let weight = XcmConfig::Weigher::weight(&mut xcm); + assert_ok!(weight); + let weight = weight.unwrap(); + // check if sane + let max_expected = Runtime::BlockWeights::get().max_block / 10; + assert!( + weight.all_lte(max_expected), + "calculated weight: {:?}, max_expected: {:?}", + weight, + max_expected + ); + + // check fee, should not be 0 + let estimated_fee = WeightToFee::weight_to_fee(&weight); + assert!(estimated_fee > BalanceOf::::zero()); + + sp_tracing::try_init_simple(); + log::error!( + target: "bridges::estimate", + "Estimate fee: {:?} for `ExportMessage` for runtime: {:?}", + estimated_fee, + Runtime::Version::get(), + ); + + estimated_fee.into() +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs new file mode 100644 index 0000000000000000000000000000000000000000..017ec0fd54052ae0b00c19a2c474a8e265c768b0 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs @@ -0,0 +1,244 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Generating test data for bridges with remote GRANDPA chains. + +use crate::test_data::prepare_inbound_xcm; + +use bp_messages::{ + source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, LaneId, MessageNonce, + UnrewardedRelayersState, +}; +use bp_runtime::{AccountIdOf, BlockNumberOf, HeaderOf, StorageProofSize, UnderlyingChainOf}; +use bp_test_utils::make_default_justification; +use bridge_runtime_common::{ + messages::{ + source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, + BridgedChain as MessageBridgedChain, MessageBridge, ThisChain as MessageThisChain, + }, + messages_generation::{ + encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, + prepare_messages_storage_proof, + }, + messages_xcm_extension::XcmAsPlainPayload, +}; +use codec::Encode; +use pallet_bridge_grandpa::{BridgedChain, BridgedHeader}; +use sp_runtime::traits::Header as HeaderT; +use xcm::latest::prelude::*; + +use bp_header_chain::{justification::GrandpaJustification, ChainWithGrandpa}; +use bp_messages::{DeliveredMessages, InboundLaneData, UnrewardedRelayer}; +use bp_runtime::HashOf; +use sp_runtime::DigestItem; + +/// Prepare a batch call with bridged GRANDPA finality and message proof. +pub fn make_complex_relayer_delivery_batch( + bridged_header: BridgedHeader, + bridged_justification: GrandpaJustification>, + message_proof: FromBridgedChainMessagesProof>>, + relayer_id_at_bridged_chain: AccountIdOf>, +) -> pallet_utility::Call +where + Runtime: pallet_bridge_grandpa::Config + + pallet_bridge_messages::Config< + MPI, + InboundPayload = XcmAsPlainPayload, + InboundRelayer = AccountIdOf>, + > + pallet_utility::Config, + GPI: 'static, + MPI: 'static, + >::SourceHeaderChain: SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof>>, + >, + ::RuntimeCall: From> + + From>, +{ + let submit_grandpa = pallet_bridge_grandpa::Call::::submit_finality_proof { + finality_target: Box::new(bridged_header), + justification: bridged_justification, + }; + let submit_message = pallet_bridge_messages::Call::::receive_messages_proof { + relayer_id_at_bridged_chain, + proof: message_proof, + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + }; + pallet_utility::Call::::batch_all { + calls: vec![submit_grandpa.into(), submit_message.into()], + } +} + +/// Prepare a batch call with bridged GRANDPA finality and message delivery proof. +pub fn make_complex_relayer_confirmation_batch( + bridged_header: BridgedHeader, + bridged_justification: GrandpaJustification>, + message_delivery_proof: FromBridgedChainMessagesDeliveryProof< + HashOf>, + >, + relayers_state: UnrewardedRelayersState, +) -> pallet_utility::Call +where + Runtime: pallet_bridge_grandpa::Config + + pallet_bridge_messages::Config + + pallet_utility::Config, + GPI: 'static, + MPI: 'static, + >::TargetHeaderChain: TargetHeaderChain< + XcmAsPlainPayload, + Runtime::AccountId, + MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< + HashOf>, + >, + >, + ::RuntimeCall: From> + + From>, +{ + let submit_grandpa = pallet_bridge_grandpa::Call::::submit_finality_proof { + finality_target: Box::new(bridged_header), + justification: bridged_justification, + }; + let submit_message_delivery_proof = + pallet_bridge_messages::Call::::receive_messages_delivery_proof { + proof: message_delivery_proof, + relayers_state, + }; + pallet_utility::Call::::batch_all { + calls: vec![submit_grandpa.into(), submit_message_delivery_proof.into()], + } +} + +/// Prepare storage proofs of messages, stored at the (bridged) source GRANDPA chain. +pub fn make_complex_relayer_delivery_proofs( + lane_id: LaneId, + xcm_message: Xcm, + message_nonce: MessageNonce, + message_destination: Junctions, + header_number: BlockNumberOf>, +) -> ( + HeaderOf>, + GrandpaJustification>>, + FromBridgedChainMessagesProof>>, +) +where + MB: MessageBridge, + MessageBridgedChain: Send + Sync + 'static, + UnderlyingChainOf>: ChainWithGrandpa, +{ + let message_payload = prepare_inbound_xcm(xcm_message, message_destination); + let message_size = StorageProofSize::Minimal(message_payload.len() as u32); + // prepare para storage proof containing message + let (state_root, storage_proof) = prepare_messages_storage_proof::( + lane_id, + message_nonce..=message_nonce, + None, + message_size, + message_payload, + encode_all_messages, + encode_lane_data, + ); + + let (header, justification) = make_complex_bridged_grandpa_header_proof::< + MessageBridgedChain, + >(state_root, header_number); + + let message_proof = FromBridgedChainMessagesProof { + bridged_header_hash: header.hash(), + storage_proof, + lane: lane_id, + nonces_start: message_nonce, + nonces_end: message_nonce, + }; + + (header, justification, message_proof) +} + +/// Prepare storage proofs of message confirmations, stored at the (bridged) target GRANDPA chain. +pub fn make_complex_relayer_confirmation_proofs( + lane_id: LaneId, + header_number: BlockNumberOf>, + relayer_id_at_this_chain: AccountIdOf>, + relayers_state: UnrewardedRelayersState, +) -> ( + HeaderOf>, + GrandpaJustification>>, + FromBridgedChainMessagesDeliveryProof>>, +) +where + MB: MessageBridge, + MessageBridgedChain: Send + Sync + 'static, + MessageThisChain: Send + Sync + 'static, + UnderlyingChainOf>: ChainWithGrandpa, +{ + // prepare storage proof containing message delivery proof + let (state_root, storage_proof) = prepare_message_delivery_storage_proof::( + lane_id, + InboundLaneData { + relayers: vec![ + UnrewardedRelayer { + relayer: relayer_id_at_this_chain, + messages: DeliveredMessages::new(1) + }; + relayers_state.unrewarded_relayer_entries as usize + ] + .into(), + last_confirmed_nonce: 1, + }, + StorageProofSize::Minimal(0), + ); + + let (header, justification) = + make_complex_bridged_grandpa_header_proof::(state_root, header_number); + + let message_delivery_proof = FromBridgedChainMessagesDeliveryProof { + bridged_header_hash: header.hash(), + storage_proof, + lane: lane_id, + }; + + (header, justification, message_delivery_proof) +} + +/// Make bridged GRANDPA chain header with given state root. +pub fn make_complex_bridged_grandpa_header_proof( + state_root: HashOf, + header_number: BlockNumberOf, +) -> (HeaderOf, GrandpaJustification>) +where + BridgedChain: ChainWithGrandpa, +{ + let mut header = bp_test_utils::test_header_with_root::>( + header_number.into(), + state_root.into(), + ); + + // to compute proper cost of GRANDPA call, let's add some dummy bytes to header, so that the + // `submit_finality_proof` call size would be close to maximal expected (and refundable) + let extra_bytes_required = maximal_expected_submit_finality_proof_call_size::() + .saturating_sub(header.encoded_size()); + header.digest_mut().push(DigestItem::Other(vec![42; extra_bytes_required])); + + let justification = make_default_justification(&header); + (header, justification) +} + +/// Maximal expected `submit_finality_proof` call size. +pub fn maximal_expected_submit_finality_proof_call_size() -> usize { + bp_header_chain::max_expected_submit_finality_proof_arguments_size::( + false, + BridgedChain::MAX_AUTHORITIES_COUNT * 2 / 3 + 1, + ) as usize +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs new file mode 100644 index 0000000000000000000000000000000000000000..932ba231239973db8b46ccea56faacc5628a4ffb --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs @@ -0,0 +1,327 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Generating test data for bridges with remote parachains. + +use super::{from_grandpa_chain::make_complex_bridged_grandpa_header_proof, prepare_inbound_xcm}; + +use bp_messages::{ + source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, LaneId, + UnrewardedRelayersState, Weight, +}; +use bp_runtime::{ + AccountIdOf, BlockNumberOf, HeaderOf, Parachain, StorageProofSize, UnderlyingChainOf, +}; +use bp_test_utils::prepare_parachain_heads_proof; +use bridge_runtime_common::{ + messages::{ + source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, + BridgedChain as MessageBridgedChain, MessageBridge, ThisChain as MessageThisChain, + }, + messages_generation::{ + encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, + prepare_messages_storage_proof, + }, + messages_xcm_extension::XcmAsPlainPayload, +}; +use codec::Encode; +use pallet_bridge_grandpa::BridgedHeader; +use pallet_bridge_parachains::{RelayBlockHash, RelayBlockNumber}; +use sp_runtime::traits::Header as HeaderT; +use xcm::latest::prelude::*; + +use bp_header_chain::{justification::GrandpaJustification, ChainWithGrandpa}; +use bp_messages::{DeliveredMessages, InboundLaneData, MessageNonce, UnrewardedRelayer}; +use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; +use sp_runtime::SaturatedConversion; + +/// Prepare a batch call with relay finality proof, parachain head proof and message proof. +pub fn make_complex_relayer_delivery_batch( + relay_chain_header: BridgedHeader, + grandpa_justification: GrandpaJustification>, + parachain_heads: Vec<(ParaId, ParaHash)>, + para_heads_proof: ParaHeadsProof, + message_proof: FromBridgedChainMessagesProof, + relayer_id_at_bridged_chain: InboundRelayer, +) -> pallet_utility::Call where + Runtime:pallet_bridge_grandpa::Config + + pallet_bridge_parachains::Config + + pallet_bridge_messages::Config< + MPI, + InboundPayload = XcmAsPlainPayload, + InboundRelayer = InboundRelayer, + > + + pallet_utility::Config, + GPI: 'static, + PPI: 'static, + MPI: 'static, + ParaHash: From<<>::BridgedChain as bp_runtime::Chain>::Hash>, + <>::BridgedChain as bp_runtime::Chain>::Hash: From, + <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: + From>, + ::RuntimeCall: + From> + + From> + + From>, +{ + let relay_chain_header_hash = relay_chain_header.hash(); + let relay_chain_header_number = *relay_chain_header.number(); + let submit_grandpa = pallet_bridge_grandpa::Call::::submit_finality_proof { + finality_target: Box::new(relay_chain_header), + justification: grandpa_justification, + }; + let submit_para_head = pallet_bridge_parachains::Call::::submit_parachain_heads { + at_relay_block: ( + relay_chain_header_number.saturated_into(), + relay_chain_header_hash.into(), + ), + parachains: parachain_heads, + parachain_heads_proof: para_heads_proof, + }; + let submit_message = pallet_bridge_messages::Call::::receive_messages_proof { + relayer_id_at_bridged_chain: relayer_id_at_bridged_chain.into(), + proof: message_proof.into(), + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + }; + pallet_utility::Call::::batch_all { + calls: vec![submit_grandpa.into(), submit_para_head.into(), submit_message.into()], + } +} + +/// Prepare a batch call with relay finality proof, parachain head proof and message delivery +/// proof. +pub fn make_complex_relayer_confirmation_batch( + relay_chain_header: BridgedHeader, + grandpa_justification: GrandpaJustification>, + parachain_heads: Vec<(ParaId, ParaHash)>, + para_heads_proof: ParaHeadsProof, + message_delivery_proof: FromBridgedChainMessagesDeliveryProof, + relayers_state: UnrewardedRelayersState, +) -> pallet_utility::Call +where + Runtime: pallet_bridge_grandpa::Config + + pallet_bridge_parachains::Config + + pallet_bridge_messages::Config + + pallet_utility::Config, + GPI: 'static, + PPI: 'static, + MPI: 'static, + >::BridgedChain: + bp_runtime::Chain + ChainWithGrandpa, + >::TargetHeaderChain: TargetHeaderChain< + XcmAsPlainPayload, + Runtime::AccountId, + MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof, + >, + ::RuntimeCall: From> + + From> + + From>, +{ + let relay_chain_header_hash = relay_chain_header.hash(); + let relay_chain_header_number = *relay_chain_header.number(); + let submit_grandpa = pallet_bridge_grandpa::Call::::submit_finality_proof { + finality_target: Box::new(relay_chain_header), + justification: grandpa_justification, + }; + let submit_para_head = pallet_bridge_parachains::Call::::submit_parachain_heads { + at_relay_block: ( + relay_chain_header_number.saturated_into(), + relay_chain_header_hash.into(), + ), + parachains: parachain_heads, + parachain_heads_proof: para_heads_proof, + }; + let submit_message_delivery_proof = + pallet_bridge_messages::Call::::receive_messages_delivery_proof { + proof: message_delivery_proof, + relayers_state, + }; + pallet_utility::Call::::batch_all { + calls: vec![ + submit_grandpa.into(), + submit_para_head.into(), + submit_message_delivery_proof.into(), + ], + } +} + +/// Prepare storage proofs of messages, stored at the source chain. +pub fn make_complex_relayer_delivery_proofs( + lane_id: LaneId, + xcm_message: Xcm, + message_nonce: MessageNonce, + message_destination: Junctions, + para_header_number: u32, + relay_header_number: u32, + bridged_para_id: u32, +) -> ( + HeaderOf, + GrandpaJustification>, + ParaHead, + Vec<(ParaId, ParaHash)>, + ParaHeadsProof, + FromBridgedChainMessagesProof, +) +where + BridgedRelayChain: + bp_runtime::Chain + ChainWithGrandpa, + MB: MessageBridge, + UnderlyingChainOf>: bp_runtime::Chain + Parachain, +{ + let message_payload = prepare_inbound_xcm(xcm_message, message_destination); + let message_size = StorageProofSize::Minimal(message_payload.len() as u32); + // prepare para storage proof containing message + let (para_state_root, para_storage_proof) = prepare_messages_storage_proof::( + lane_id, + message_nonce..=message_nonce, + None, + message_size, + message_payload, + encode_all_messages, + encode_lane_data, + ); + + let (relay_chain_header, justification, bridged_para_head, parachain_heads, para_heads_proof) = + make_complex_bridged_parachain_heads_proof::( + para_state_root, + para_header_number, + relay_header_number, + bridged_para_id, + ); + + let message_proof = FromBridgedChainMessagesProof { + bridged_header_hash: bridged_para_head.hash(), + storage_proof: para_storage_proof, + lane: lane_id, + nonces_start: message_nonce, + nonces_end: message_nonce, + }; + + ( + relay_chain_header, + justification, + bridged_para_head, + parachain_heads, + para_heads_proof, + message_proof, + ) +} + +/// Prepare storage proofs of message confirmations, stored at the target parachain. +pub fn make_complex_relayer_confirmation_proofs( + lane_id: LaneId, + para_header_number: u32, + relay_header_number: u32, + bridged_para_id: u32, + relayer_id_at_this_chain: AccountIdOf>, + relayers_state: UnrewardedRelayersState, +) -> ( + HeaderOf, + GrandpaJustification>, + ParaHead, + Vec<(ParaId, ParaHash)>, + ParaHeadsProof, + FromBridgedChainMessagesDeliveryProof, +) +where + BridgedRelayChain: + bp_runtime::Chain + ChainWithGrandpa, + MB: MessageBridge, + UnderlyingChainOf>: bp_runtime::Chain + Parachain, +{ + // prepare para storage proof containing message delivery proof + let (para_state_root, para_storage_proof) = prepare_message_delivery_storage_proof::( + lane_id, + InboundLaneData { + relayers: vec![ + UnrewardedRelayer { + relayer: relayer_id_at_this_chain.into(), + messages: DeliveredMessages::new(1) + }; + relayers_state.unrewarded_relayer_entries as usize + ] + .into(), + last_confirmed_nonce: 1, + }, + StorageProofSize::Minimal(0), + ); + + let (relay_chain_header, justification, bridged_para_head, parachain_heads, para_heads_proof) = + make_complex_bridged_parachain_heads_proof::( + para_state_root, + para_header_number, + relay_header_number, + bridged_para_id, + ); + + let message_delivery_proof = FromBridgedChainMessagesDeliveryProof { + bridged_header_hash: bridged_para_head.hash(), + storage_proof: para_storage_proof, + lane: lane_id, + }; + + ( + relay_chain_header, + justification, + bridged_para_head, + parachain_heads, + para_heads_proof, + message_delivery_proof, + ) +} + +/// Make bridged parachain header with given state root and relay header that is finalizing it. +pub fn make_complex_bridged_parachain_heads_proof( + para_state_root: ParaHash, + para_header_number: u32, + relay_header_number: BlockNumberOf, + bridged_para_id: u32, +) -> ( + HeaderOf, + GrandpaJustification>, + ParaHead, + Vec<(ParaId, ParaHash)>, + ParaHeadsProof, +) +where + BridgedRelayChain: + bp_runtime::Chain + ChainWithGrandpa, + MB: MessageBridge, + ::BridgedChain: Send + Sync + 'static, + ::ThisChain: Send + Sync + 'static, + UnderlyingChainOf>: bp_runtime::Chain + Parachain, +{ + let bridged_para_head = ParaHead( + bp_test_utils::test_header_with_root::>( + para_header_number.into(), + para_state_root, + ) + .encode(), + ); + let (relay_state_root, para_heads_proof, parachain_heads) = + prepare_parachain_heads_proof::>(vec![( + bridged_para_id, + bridged_para_head.clone(), + )]); + assert_eq!(bridged_para_head.hash(), parachain_heads[0].1); + + let (relay_chain_header, justification) = make_complex_bridged_grandpa_header_proof::< + BridgedRelayChain, + >(relay_state_root, relay_header_number); + + (relay_chain_header, justification, bridged_para_head, parachain_heads, para_heads_proof) +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..f905d21b1871924e1fe9dc4f523471afaf81f536 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs @@ -0,0 +1,144 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Generating test data, used by all tests. + +pub mod from_grandpa_chain; +pub mod from_parachain; + +use bp_messages::{ + target_chain::{DispatchMessage, DispatchMessageData}, + LaneId, MessageKey, +}; +use codec::Encode; +use frame_support::traits::Get; +use pallet_bridge_grandpa::BridgedHeader; +use xcm::latest::prelude::*; + +use bp_messages::MessageNonce; +use bp_runtime::BasicOperatingMode; +use bp_test_utils::authority_list; +use xcm::GetVersion; +use xcm_builder::{HaulBlob, HaulBlobError, HaulBlobExporter}; +use xcm_executor::traits::{validate_export, ExportXcm}; + +pub fn prepare_inbound_xcm( + xcm_message: Xcm, + destination: InteriorMultiLocation, +) -> Vec { + let location = xcm::VersionedInteriorMultiLocation::V3(destination); + let xcm = xcm::VersionedXcm::::V3(xcm_message); + // this is the `BridgeMessage` from polkadot xcm builder, but it has no constructor + // or public fields, so just tuple + // (double encoding, because `.encode()` is called on original Xcm BLOB when it is pushed + // to the storage) + (location, xcm).encode().encode() +} + +/// Helper that creates InitializationData mock data, that can be used to initialize bridge +/// GRANDPA pallet +pub fn initialization_data< + Runtime: pallet_bridge_grandpa::Config, + GrandpaPalletInstance: 'static, +>( + block_number: u32, +) -> bp_header_chain::InitializationData> { + bp_header_chain::InitializationData { + header: Box::new(bp_test_utils::test_header(block_number.into())), + authority_list: authority_list(), + set_id: 1, + operating_mode: BasicOperatingMode::Normal, + } +} + +/// Dummy xcm +pub(crate) fn dummy_xcm() -> Xcm<()> { + vec![Trap(42)].into() +} + +pub(crate) fn dispatch_message( + lane_id: LaneId, + nonce: MessageNonce, + payload: Vec, +) -> DispatchMessage> { + DispatchMessage { + key: MessageKey { lane_id, nonce }, + data: DispatchMessageData { payload: Ok(payload) }, + } +} + +/// Macro used for simulate_export_message and capturing bytes +macro_rules! grab_haul_blob ( + ($name:ident, $grabbed_payload:ident) => { + std::thread_local! { + static $grabbed_payload: std::cell::RefCell>> = std::cell::RefCell::new(None); + } + + struct $name; + impl HaulBlob for $name { + fn haul_blob(blob: Vec) -> Result<(), HaulBlobError>{ + $grabbed_payload.with(|rm| *rm.borrow_mut() = Some(blob)); + Ok(()) + } + } + } +); + +/// Simulates `HaulBlobExporter` and all its wrapping and captures generated plain bytes, +/// which are transferred over bridge. +pub(crate) fn simulate_message_exporter_on_bridged_chain< + SourceNetwork: Get, + DestinationNetwork: Get, + DestinationVersion: GetVersion, +>( + (destination_network, destination_junctions): (NetworkId, Junctions), +) -> Vec { + grab_haul_blob!(GrabbingHaulBlob, GRABBED_HAUL_BLOB_PAYLOAD); + + // lets pretend that some parachain on bridged chain exported the message + let universal_source_on_bridged_chain = + X2(GlobalConsensus(SourceNetwork::get()), Parachain(5678)); + let channel = 1_u32; + + // simulate XCM message export + let (ticket, fee) = validate_export::< + HaulBlobExporter, + >( + destination_network, + channel, + universal_source_on_bridged_chain, + destination_junctions, + dummy_xcm(), + ) + .expect("validate_export to pass"); + log::info!( + target: "simulate_message_exporter_on_bridged_chain", + "HaulBlobExporter::validate fee: {:?}", + fee + ); + let xcm_hash = + HaulBlobExporter::::deliver( + ticket, + ) + .expect("deliver to pass"); + log::info!( + target: "simulate_message_exporter_on_bridged_chain", + "HaulBlobExporter::deliver xcm_hash: {:?}", + xcm_hash + ); + + GRABBED_HAUL_BLOB_PAYLOAD.with(|r| r.take().expect("Encoded message should be here")) +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/migration.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/migration.rs deleted file mode 100644 index 9350d03a2c9fbdd0806a80d94f2ccfa080e34848..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/migration.rs +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Migrations. - -use frame_support::{pallet_prelude::*, traits::OnRuntimeUpgrade, weights::Weight}; -use log; - -/// Initial import of the Kusama Technical Fellowship. -pub(crate) mod import_kusama_fellowship { - use super::*; - use frame_support::{parameter_types, traits::RankedMembers}; - use pallet_ranked_collective::{Config, MemberCount, Pallet as RankedCollective, Rank}; - #[cfg(feature = "try-runtime")] - use sp_std::vec::Vec; - - const TARGET: &str = "runtime::migration::import_fellowship"; - - parameter_types! { - // The Fellowship addresses from Kusama state. - pub const FellowshipAddresses: [(Rank, [u8; 32]); 47] = [ - (6, hex_literal::hex!("f0673d30606ee26672707e4fd2bc8b58d3becb7aba2d5f60add64abb5fea4710"),), - (6, hex_literal::hex!("3c235e80e35082b668682531b9b062fda39a46edb94f884d9122d86885fd5f1b"),), - (6, hex_literal::hex!("7628a5be63c4d3c8dbb96c2904b1a9682e02831a1af836c7efc808020b92fa63"),), - (5, hex_literal::hex!("9c84f75e0b1b92f6b003bde6212a8b2c9b776f3720f942b33fed8709f103a268"),), - (5, hex_literal::hex!("bc64065524532ed9e805fb0d39a5c0199216b52871168e5e4d0ab612f8797d61"),), - (5, hex_literal::hex!("2e1884c53071526483b14004e894415f02b55fc2e2aef8e1df8ccf7ce5bd5570"),), - (5, hex_literal::hex!("5c5062779d44ea2ab0469e155b8cf3e004fce71b3b3d38263cd9fa9478f12f28"),), - (4, hex_literal::hex!("4adf51a47b72795366d52285e329229c836ea7bbfe139dbe8fa0700c4f86fc56"),), - (4, hex_literal::hex!("1c90e3dabd3fd0f6bc648045018f78fcee8fe24122c22d8d2a14e9905073d10f"),), - (4, hex_literal::hex!("8e851ed992228f2268ee8c614fe6075d3800060ae14098e0309413a0a81c4470"),), - (3, hex_literal::hex!("720d807d46b941703ffe0278e8b173dc6738c5af8af812ceffc90c69390bbf1f"),), - (3, hex_literal::hex!("c4965f7fe7be8174717a24ffddf684986d122c7e293ddf875cdf9700a07b6812"),), - (3, hex_literal::hex!("beae5bcad1a8c156291b7ddf46b38b0c61a6aaacebd57b21c75627bfe7f9ab71"),), - (3, hex_literal::hex!("ccd87fa65729f7bdaa8305581a7a499aa24c118e83f5714152c0e22617c6fc63"),), - (3, hex_literal::hex!("e0f0f94962fc0a8c1a0f0527dc8e592c67939c46c903b6016cc0a8515da0044d"),), - (3, hex_literal::hex!("984e16482c99cfad1436111e321a86d87d0fac203bf64538f888e45d793b5413"),), - (3, hex_literal::hex!("44a3efb5bfa9023d4ef27b7d31d76f531b4d7772b1679b7fb32b6263ac39100e"),), - (2, hex_literal::hex!("2eba9a39dbfdd5f3cba964355d45e27319f0271023c0353d97dc6df2401b0e3d"),), - (2, hex_literal::hex!("ba3e9b87792bcfcc237fa8181185b8883c77f3e24f45e4a92ab31d07a4703520"),), - (2, hex_literal::hex!("9e6eb74b0a6b39de36fb58d1fab20bc2b3fea96023ce5a47941c20480d99f92e"),), - (2, hex_literal::hex!("ee3d9d8c48ee88dce78fd7bafe3ce2052900eb465085b9324d4f5da26b145f2b"),), - (2, hex_literal::hex!("d8290537d6e31fe1ff165eaa62b63f6f3556dcc720b0d3a6d7eab96275617304"),), - (2, hex_literal::hex!("5a090c88f0438b46b451026597cee760a7bac9d396c9c7b529b68fb78aec5f43"),), - (2, hex_literal::hex!("18d30040a8245c5ff17afc9a8169d7d0771fe7ab4135a64a022c254117340720"),), - (1, hex_literal::hex!("b4f7f03bebc56ebe96bc52ea5ed3159d45a0ce3a8d7f082983c33ef133274747"),), - (1, hex_literal::hex!("caafae0aaa6333fcf4dc193146945fe8e4da74aa6c16d481eef0ca35b8279d73"),), - (1, hex_literal::hex!("a66e0f4e1a121cc83fddf3096e8ec8c9e9c85989f276e39e951fb0e4a5398763"),), - (1, hex_literal::hex!("f65f3cade8f68e8f34c6266b0d37e58a754059ca96816e964f98e17c79505073"),), - (1, hex_literal::hex!("8c232c91ef2a9983ba65c4b75bb86fcbae4d909900ea8aa06c3644ca1161db48"),), - (1, hex_literal::hex!("78e4813814891bd48bc745b79254a978833d41fbe0f387df93cd87eae2468926"),), - (1, hex_literal::hex!("d44824ac8d1edecca67639ca74d208bd2044a10e67c9677e288080191e3fec13"),), - (1, hex_literal::hex!("585e982d74da4f4290d20a73800cfd705cf59e1f5880aaee5506b5eaaf544f49"),), - (1, hex_literal::hex!("d851f44a6f0d0d2f3439a51f2f75f66f4ea1a8e6c33c32f9af75fc188afb7546"),), - (1, hex_literal::hex!("dca89b135d1a6aee0a498610a70eeaed056727c8a4d220da245842e540a54a74"),), - (1, hex_literal::hex!("aa91fc0201f26b713a018669bcd269babf25368eee2493323b1ce0190a178a27"),), - (1, hex_literal::hex!("dc20836f2e4b88c1858d1e3f918e7358043b4a8abcd2874e74d91d26c52eca2a"),), - (1, hex_literal::hex!("145d6c503d0cf97f4c7725ca773741bd02e1760bfb52e021af5a9f2de283012c"),), - (1, hex_literal::hex!("307183930b2264c5165f4a210a99520c5f1672b0413d57769fabc19e6866fb25"),), - (1, hex_literal::hex!("6201961514cf5ad87f1c4dd0c392ee28231f805f77975147bf2c33bd671b9822"),), - (1, hex_literal::hex!("c6f57237cd4abfbeed99171495fc784e45a9d5d2814d435de40de00991a73c06"),), - (1, hex_literal::hex!("c1df5c7e8ca56037450c58734326ebe34aec8f7d1928322a12164856365fea73"),), - (1, hex_literal::hex!("12c039004da5e1e846aae808277098c719cef1f4985aed00161a42ac4f0e002f"),), - (1, hex_literal::hex!("7460ac178015d2a7c289bb68ef9fdaac071596ab4425c276a0040aaac7055566"),), - (1, hex_literal::hex!("eec4bd650a277342ebba0954ac786df2623bd6a9d6d3e69b484482336c549f79"),), - (1, hex_literal::hex!("e287c7494655d636a846f5c3347ad2cb3c462a8d46e0832be70fcc0ab54ee62d"),), - (1, hex_literal::hex!("82bf733f44a840f0a5c1935a002d4e541d81298fad6d1da8124073485983860e"),), - (1, hex_literal::hex!("d5b89078eed9b9dfec5c7d8413bac0b720bad3bd4078c4d8c894325713192502"),), - ]; - } - - /// Implements `OnRuntimeUpgrade` trait. - pub struct Migration(PhantomData<(T, I)>); - - impl, I: 'static> OnRuntimeUpgrade for Migration - where - ::AccountId: From<[u8; 32]>, - { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - let onchain_version = RankedCollective::::on_chain_storage_version(); - ensure!(onchain_version == 0, "the storage version must be 0."); - let member_count = MemberCount::::get(0); - ensure!(member_count == 0, "the collective must be uninitialized."); - - Ok(Vec::new()) - } - - fn on_runtime_upgrade() -> Weight { - let current_version = RankedCollective::::current_storage_version(); - let onchain_version = RankedCollective::::on_chain_storage_version(); - let mut weight = T::DbWeight::get().reads(1); - log::info!( - target: TARGET, - "running migration with current storage version {:?} / onchain {:?}.", - current_version, - onchain_version - ); - if onchain_version != 0 { - log::warn!( - target: TARGET, - "unsupported storage version, skipping import_fellowship migration." - ); - return weight - } - let member_count = MemberCount::::get(0); - weight.saturating_accrue(T::DbWeight::get().reads(1)); - if member_count != 0 { - log::warn!( - target: TARGET, - "the collective already initialized, skipping import_fellowship migration." - ); - return weight - } - - for (rank, account_id32) in FellowshipAddresses::get() { - let who: T::AccountId = account_id32.into(); - let _ = as RankedMembers>::induct(&who); - for _ in 0..rank { - let _ = as RankedMembers>::promote(&who); - // 1 write to `IdToIndex` and `IndexToId` per member on each rank. - weight.saturating_accrue(T::DbWeight::get().writes(2)); - } - // 1 write to `IdToIndex` and `IndexToId` per member on each rank. - weight.saturating_accrue(T::DbWeight::get().writes(2)); - // 1 read and 1 write to `Members` and `MemberCount` per member. - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); - } - weight - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { - ensure!(MemberCount::::get(0) == 47, "invalid members count at rank 0."); - ensure!(MemberCount::::get(1) == 47, "invalid members count at rank 1."); - ensure!(MemberCount::::get(2) == 24, "invalid members count at rank 2."); - ensure!(MemberCount::::get(3) == 17, "invalid members count at rank 3."); - ensure!(MemberCount::::get(4) == 10, "invalid members count at rank 4."); - ensure!(MemberCount::::get(5) == 7, "invalid members count at rank 5."); - ensure!(MemberCount::::get(6) == 3, "invalid members count at rank 6."); - ensure!(MemberCount::::get(7) == 0, "invalid members count at rank 7."); - Ok(()) - } - } -} - -#[cfg(test)] -pub mod tests { - use super::import_kusama_fellowship::FellowshipAddresses; - use crate::{FellowshipCollectiveInstance as Fellowship, Runtime, System}; - use frame_support::traits::OnRuntimeUpgrade; - use pallet_ranked_collective::Rank; - use parachains_common::AccountId; - use sp_core::crypto::Ss58Codec; - use sp_runtime::{AccountId32, BuildStorage}; - - #[test] - fn check_fellowship_addresses() { - let fellowship_addresses = FellowshipAddresses::get(); - let kusama_fellowship_ss58: [(Rank, _); 47] = [ - (6, "16SDAKg9N6kKAbhgDyxBXdHEwpwHUHs2CNEiLNGeZV55qHna"), /* proof https://kusama.subscan.io/extrinsic/16832707-4 */ - (6, "12MrP337azmkTdfCUKe5XLnSQrbgEKqqfZ4PQC7CZTJKAWR3"), /* proof https://kusama.subscan.io/extrinsic/16967809-2 */ - (6, "FFFF3gBSSDFSvK2HBq4qgLH75DHqXWPHeCnR1BSksAMacBs"), - (5, "G7YVCdxZb8JLpAm9WMnJdNuojNT84AzU62zmvx5P1FMNtg2"), - (5, "15G1iXDLgFyfnJ51FKq1ts44TduMyUtekvzQi9my4hgYt2hs"), /* proof https://kusama.subscan.io/extrinsic/16917610-2 */ - (5, "Dcm1BqR4N7nHuV43TXdET7pNibt1Nzm42FggPHpxKRven53"), - (5, "1363HWTPzDrzAQ6ChFiMU6mP4b6jmQid2ae55JQcKtZnpLGv"), /* proof https://kusama.subscan.io/extrinsic/16961180-2 */ - (4, "EGVQCe73TpFyAZx5uKfE1222XfkT3BSKozjgcqzLBnc5eYo"), - (4, "1eTPAR2TuqLyidmPT9rMmuycHVm9s9czu78sePqg2KHMDrE"), /* proof https://kusama.subscan.io/extrinsic/16921712-3 */ - (4, "14DsLzVyTUTDMm2eP3czwPbH53KgqnQRp3CJJZS9GR7yxGDP"), /* proof https://kusama.subscan.io/extrinsic/16917519-2 */ - (3, "13aYUFHB3umoPoxBEAHSv451iR3RpsNi3t5yBZjX2trCtTp6"), /* proof https://kusama.subscan.io/extrinsic/16917832-3 */ - (3, "H25aCspunTUqAt4D1gC776vKZ8FX3MvQJ3Jde6qDXPQaFxk"), - (3, "GtLQoW4ZqcjExMPq6qB22bYc6NaX1yMzRuGWpSRiHqnzRb9"), - (3, "15db5ksZgmhWE9U8MDq4wLKUdFivLVBybztWV8nmaJvv3NU1"), /* proof https://kusama.subscan.io/extrinsic/16876631-2 */ - (3, "HfFpz4QUxfbocHudf8UU7cMgHqkHpf855Me5X846PZAsAYE"), - (3, "14ShUZUYUR35RBZW6uVVt1zXDxmSQddkeDdXf1JkMA6P721N"), /* proof https://kusama.subscan.io/extrinsic/16918890-8 */ - (3, "12YzxR5TvGzfMVZNnhAJ5Hwi5zExpRWMKv2MuMwZTrddvgoi"), /* proof https://kusama.subscan.io/extrinsic/16924324-3 */ - (2, "Ddb9puChKMHq4gM6o47E551wAmaNeu6kHngX1jzNNqAw782"), - (2, "15DCWHQknBjc5YPFoVj8Pn2KoqrqYywJJ95BYNYJ4Fj3NLqz"), /* proof https://kusama.subscan.io/extrinsic/16834952-2 */ - (2, "14ajTQdrtCA8wZmC4PgD8Y1B2Gy8L4Z3oi2fodxq9FehcFrM"), /* proof https://kusama.subscan.io/extrinsic/16944257-2 */ - (2, "HxhDbS3grLurk1dhDgPiuDaRowHY1xHCU8Vu8on3fdg85tx"), - (2, "HTk3eccL7WBkiyxz1gBcqQRghsJigoDMD7mnQaz1UAbMpQV"), - (2, "EcNWrSPSDcVBRymwr26kk4JVFg92PdoU5Xwp87W2FgFSt9c"), - (2, "D8sM6vKjWaeKy2zCPYWGkLLbWdUtWQrXBTQqr4dSYnVQo21"), - (1, "GfbnnEgRU94n9ed4RFZ6Z9dBAWs5obykigJSwXKU9hsT2uU"), - (1, "HA5NtttvyZsxo4wGxGoJJSMaWtdEFZAuGUMFHVWD7fgenPv"), - (1, "14mDeKZ7qp9hqBjjDg51c8BFrf9o69om8piSSRwj2fT5Yb1i"), /* proof https://kusama.subscan.io/extrinsic/16919020-4 */ - (1, "16a357f5Sxab3V2ne4emGQvqJaCLeYpTMx3TCjnQhmJQ71DX"), /* proof https://kusama.subscan.io/extrinsic/16836396-5 */ - (1, "14Ak9rrF6RKHHoLLRUYMnzcvvi1t8E1yAMa7tcmiwUfaqzYK"), /* proof https://kusama.subscan.io/extrinsic/16921990-3 */ - (1, "FJq9JpA9P7EXbmfsN9YiewJaDbQyL6vQyksGtJvzfbn6zf8"), - (1, "15oLanodWWweiZJSoDTEBtrX7oGfq6e8ct5y5E6fVRDPhUgj"), /* proof https://kusama.subscan.io/extrinsic/16876423-7 */ - (1, "EaBqDJJNsZmYdQ4xn1vomPJVNh7fjA6UztZeEjn7ZzdeT7V"), - (1, "HTxCvXKVvUZ7PQq175kCRRLu7XkGfTfErrdNXr1ZuuwVZWv"), - (1, "HZe91A6a1xqbKaw6ofx3GFepJjhVXHrwHEwn6YUDDFphpX9"), - (1, "GRy2P3kBEzSHCbmDJfquku1cyUyhZaAqojRcNE4A4U3MnLd"), - (1, "HYwiBo7Mcv7uUDg4MUoKm2fxzv4dMLAtmmNfzHV8qcQJpAE"), - (1, "1ThiBx5DDxFhoD9GY6tz5Fp4Y7Xn1xfLmDddcoFQghDvvjg"), /* proof https://kusama.subscan.io/extrinsic/16918130-2 */ - (1, "DfqY6XQUSETTszBQ1juocTcG9iiDoXhvq1CoVadBSUqTGJS"), - (1, "EnpgVWGGQVrFdSB2qeXRVdtccV6U5ZscNELBoERbkFD8Wi6"), - (1, "H5BuqCmucJhUUuvjAzPazeVwVCtUSXVQdc5Dnx2q5zD7rVn"), - (1, "GxX7S1pTDdeaGUjpEPPF2we6tgHDhbatFG25pVmVFtGHLH6"), - (1, "CzuUtvKhZNZBjyAXeYviaRXwrLhVrsupJ9PrWmdq7BJTjGR"), - (1, "FCunn2Rx8JqfT5g6noUKKazph4jLDba5rUee7o3ZmJ362Ju"), - (1, "HyPMjWRHCpJS7x2SZ2R6M2XG5ZiCiZag4U4r7gBHRsE5mTc"), - (1, "1682A5hxfiS1Kn1jrUnMYv14T9EuEnsgnBbujGfYbeEbSK3w"), /* proof https://kusama.subscan.io/extrinsic/16919077-2 */ - (1, "13xS6fK6MHjApLnjdX7TJYw1niZmiXasSN91bNtiXQjgEtNx"), /* proof https://kusama.subscan.io/extrinsic/16918212-7 */ - (1, "15qE2YAQCs5Y962RHE7RzNjQxU6Pei21nhkkSM9Sojq1hHps"), /* https://kusama.subscan.io/extrinsic/17352973-2 */ - ]; - - for (index, val) in kusama_fellowship_ss58.iter().enumerate() { - let account: AccountId32 = ::from_string(val.1).unwrap(); - let account32: [u8; 32] = account.clone().into(); - assert_eq!( - fellowship_addresses[index].0, kusama_fellowship_ss58[index].0, - "ranks must be equal." - ); - assert_eq!(fellowship_addresses[index].1, account32, "accounts must be equal."); - } - } - - #[test] - fn test_fellowship_import() { - use super::import_kusama_fellowship::Migration; - use pallet_ranked_collective::{IdToIndex, IndexToId, MemberCount, MemberRecord, Members}; - - let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext.execute_with(|| { - assert_eq!(MemberCount::::get(0), 0); - Migration::::on_runtime_upgrade(); - assert_eq!(MemberCount::::get(0), 47); - assert_eq!(MemberCount::::get(6), 3); - assert_eq!(MemberCount::::get(7), 0); - for (rank, account_id32) in FellowshipAddresses::get() { - let who = ::AccountId::from(account_id32); - assert!(IdToIndex::::get(0, &who).is_some()); - assert!(IdToIndex::::get(rank + 1, &who).is_none()); - let index = IdToIndex::::get(rank, &who).unwrap(); - assert_eq!(IndexToId::::get(rank, index).unwrap(), who); - assert_eq!( - Members::::get(&who).unwrap(), - MemberRecord::new(rank) - ); - } - }); - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/block_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/block_weights.rs deleted file mode 100644 index e7fdb2aae2a01ec06076de83d94817e540e205dd..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/block_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Importing a block with 0 Extrinsics. - pub const BlockExecutionWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(5_000_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::BlockExecutionWeight::get(); - - // At least 100 µs. - assert!( - w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 100 µs." - ); - // At most 50 ms. - assert!( - w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 50 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbbb62c1392ae2e7517b5dfe920a5b85..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/extrinsic_weights.rs deleted file mode 100644 index 1a4adb968bb7195428ea00d59cd92dcd3b6eea5f..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/extrinsic_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Executing a NO-OP `System::remarks` Extrinsic. - pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(125_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::ExtrinsicBaseWeight::get(); - - // At least 10 µs. - assert!( - w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 10 µs." - ); - // At most 1 ms. - assert!( - w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/paritydb_weights.rs deleted file mode 100644 index 25679703831a13b8d1bb7fb7dd4d92fa84b1f255..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/paritydb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights - /// are available for brave runtime engineers who may want to try this out as default. - pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::ParityDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml similarity index 77% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml rename to cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index ca83b84cd8fa60f4078fd6b341bb69850e55c27b..dd526a9e044cde5db6ad47084d14ac146c5ee8d2 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -1,10 +1,13 @@ [package] -name = "collectives-polkadot-runtime" +name = "collectives-westend-runtime" version = "1.0.0" authors.workspace = true edition.workspace = true -description = "Polkadot Collectives Parachain Runtime" license = "Apache-2.0" +description = "Westend Collectives Parachain Runtime" + +[lints] +workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } @@ -14,61 +17,63 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-alliance = { path = "../../../../../substrate/frame/alliance", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-collective = { path = "../../../../../substrate/frame/collective", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-asset-rate = { path = "../../../../../substrate/frame/asset-rate", default-features = false } +pallet-alliance = { path = "../../../../../substrate/frame/alliance", default-features = false } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-collective = { path = "../../../../../substrate/frame/collective", default-features = false } +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } pallet-preimage = { path = "../../../../../substrate/frame/preimage", default-features = false } -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} +pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } pallet-scheduler = { path = "../../../../../substrate/frame/scheduler", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -pallet-referenda = { path = "../../../../../substrate/frame/referenda", default-features = false} -pallet-ranked-collective = { path = "../../../../../substrate/frame/ranked-collective", default-features = false} -pallet-core-fellowship = { path = "../../../../../substrate/frame/core-fellowship", default-features = false} -pallet-salary = { path = "../../../../../substrate/frame/salary", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-treasury = { path = "../../../../../substrate/frame/treasury", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +pallet-referenda = { path = "../../../../../substrate/frame/referenda", default-features = false } +pallet-ranked-collective = { path = "../../../../../substrate/frame/ranked-collective", default-features = false } +pallet-core-fellowship = { path = "../../../../../substrate/frame/core-fellowship", default-features = false } +pallet-salary = { path = "../../../../../substrate/frame/salary", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } sp-arithmetic = { path = "../../../../../substrate/primitives/arithmetic", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -82,12 +87,11 @@ parachains-common = { path = "../../../common", default-features = false } substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dev-dependencies] -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} +sp-io = { path = "../../../../../substrate/primitives/io", features = ["std"] } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ - "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", @@ -98,6 +102,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-alliance/runtime-benchmarks", + "pallet-asset-rate/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", "pallet-collective-content/runtime-benchmarks", @@ -112,6 +117,7 @@ runtime-benchmarks = [ "pallet-salary/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", @@ -123,7 +129,6 @@ runtime-benchmarks = [ ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", @@ -132,6 +137,7 @@ try-runtime = [ "frame-system/try-runtime", "frame-try-runtime/try-runtime", "pallet-alliance/try-runtime", + "pallet-asset-rate/try-runtime", "pallet-aura/try-runtime", "pallet-authorship/try-runtime", "pallet-balances/try-runtime", @@ -150,6 +156,7 @@ try-runtime = [ "pallet-session/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", + "pallet-treasury/try-runtime", "pallet-utility/try-runtime", "pallet-xcm/try-runtime", "parachain-info/try-runtime", @@ -159,7 +166,6 @@ try-runtime = [ std = [ "codec/std", "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", @@ -175,6 +181,7 @@ std = [ "frame-try-runtime?/std", "log/std", "pallet-alliance/std", + "pallet-asset-rate/std", "pallet-aura/std", "pallet-authorship/std", "pallet-balances/std", @@ -194,6 +201,7 @@ std = [ "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", + "pallet-treasury/std", "pallet-utility/std", "pallet-xcm/std", "parachain-info/std", @@ -209,7 +217,6 @@ std = [ "sp-core/std", "sp-genesis-builder/std", "sp-inherents/std", - "sp-io/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", @@ -218,9 +225,15 @@ std = [ "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", + "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/build.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs similarity index 100% rename from cumulus/parachains/runtimes/assets/asset-hub-kusama/build.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/build.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs similarity index 91% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/mod.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs index b055ffc8abf1337e1260156cd149fe21b0573ba0..18c1466bf3624088ded34e4442691d65e6d59afd 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs @@ -32,7 +32,7 @@ pub mod origins; mod tracks; use super::*; -use crate::xcm_config::{DotAssetHub, FellowshipAdminBodyId}; +use crate::xcm_config::{FellowshipAdminBodyId, WndAssetHub}; use frame_support::traits::{EitherOf, MapSuccess, TryMapSuccess}; pub use origins::pallet_origins as pallet_ambassador_origins; use origins::pallet_origins::{ @@ -66,7 +66,7 @@ impl pallet_ambassador_origins::Config for Runtime {} pub type AmbassadorCollectiveInstance = pallet_ranked_collective::Instance2; /// Demotion is by any of: -/// - Root can promote arbitrarily. +/// - Root can demote arbitrarily. /// - the FellowshipAdmin origin (i.e. token holder referendum); /// - a senior members vote by the rank two above the current rank. pub type DemoteOrigin = EitherOf< @@ -114,7 +114,8 @@ parameter_types! { pub const AlarmInterval: BlockNumber = 1; pub const SubmissionDeposit: Balance = 0; pub const UndecidingTimeout: BlockNumber = 7 * DAYS; - // The Ambassador Referenda pallet account, used as a temporarily place to deposit a slashed imbalance before teleport to the treasury. + // The Ambassador Referenda pallet account, used as a temporary place to deposit a slashed + // imbalance before teleport to the treasury. pub AmbassadorPalletAccount: AccountId = account::AMBASSADOR_REFERENDA_PALLET_ID.into_account_truncating(); } @@ -135,7 +136,7 @@ impl pallet_referenda::Config for Runtime { >; type CancelOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; type KillOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; - type Slash = ToParentTreasury; + type Slash = ToParentTreasury; type Votes = pallet_ranked_collective::Votes; type Tally = pallet_ranked_collective::TallyOf; type SubmissionDeposit = SubmissionDeposit; @@ -183,8 +184,11 @@ impl pallet_core_fellowship::Config for Runtime { // - the FellowshipAdmin origin (i.e. token holder referendum); // - a vote among all Head Ambassadors. type ParamsOrigin = EitherOfDiverse< - EnsureXcm>, - EnsureHeadAmbassadorsVoice, + EnsureRoot, + EitherOfDiverse< + EnsureXcm>, + EnsureHeadAmbassadorsVoice, + >, >; // Induction (creating a candidate) is by any of: // - Root; @@ -192,14 +196,17 @@ impl pallet_core_fellowship::Config for Runtime { // - a single Head Ambassador; // - a vote among all senior members. type InductOrigin = EitherOfDiverse< - EnsureXcm>, + EnsureRoot, EitherOfDiverse< - pallet_ranked_collective::EnsureMember< - Runtime, - AmbassadorCollectiveInstance, - { ranks::HEAD_AMBASSADOR_TIER_5 }, + EnsureXcm>, + EitherOfDiverse< + pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::HEAD_AMBASSADOR_TIER_5 }, + >, + EnsureAmbassadorsVoiceFrom>, >, - EnsureAmbassadorsVoiceFrom>, >, >; type ApproveOrigin = PromoteOrigin; @@ -215,7 +222,7 @@ parameter_types! { pub AmbassadorSalaryLocation: InteriorMultiLocation = PalletInstance(74).into(); } -/// [`PayOverXcm`] setup to pay the Ambassador salary on the AssetHub in DOT. +/// [`PayOverXcm`] setup to pay the Ambassador salary on the AssetHub in WND. pub type AmbassadorSalaryPaymaster = PayOverXcm< AmbassadorSalaryLocation, crate::xcm_config::XcmRouter, @@ -223,7 +230,7 @@ pub type AmbassadorSalaryPaymaster = PayOverXcm< ConstU32<{ 6 * HOURS }>, AccountId, (), - ConvertToValue, + ConvertToValue, AliasesIntoAccountId32<(), AccountId>, >; diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/origins.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/origins.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/origins.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/origins.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/tracks.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs similarity index 97% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/tracks.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs index 073d8e6ee362ad97f9fa66c612fca5e1a8123ab2..d4a2d3bbf1c7becba8ea6ed560496a0a8114b185 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/tracks.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs @@ -56,7 +56,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 24 * HOURS, min_enactment_period: 1 * HOURS, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -78,7 +78,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 24 * HOURS, min_enactment_period: 1 * HOURS, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -100,7 +100,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 24 * HOURS, min_enactment_period: 1 * HOURS, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -122,7 +122,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 24 * HOURS, min_enactment_period: 1 * HOURS, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -144,7 +144,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 24 * HOURS, min_enactment_period: 1 * HOURS, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -166,7 +166,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 24 * HOURS, min_enactment_period: 1 * HOURS, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -188,7 +188,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 24 * HOURS, min_enactment_period: 1 * HOURS, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -210,7 +210,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 24 * HOURS, min_enactment_period: 1 * HOURS, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -232,7 +232,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 24 * HOURS, min_enactment_period: 1 * HOURS, min_approval: pallet_referenda::Curve::LinearDecreasing { diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs similarity index 62% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs index 2a2757ea5cebc28a1f85bbc28d50e0c523b54d90..3fd108c0a5cfb9221641ae05489c00c6e3bd5994 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs @@ -14,36 +14,48 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -//! The Polkadot Technical Fellowship. +//! The Westend Technical Fellowship. -pub(crate) mod migration; mod origins; mod tracks; use crate::{ impls::ToParentTreasury, weights, - xcm_config::{FellowshipAdminBodyId, UsdtAssetHub}, - AccountId, Balance, Balances, FellowshipReferenda, GovernanceLocation, PolkadotTreasuryAccount, - Preimage, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, Scheduler, DAYS, + xcm_config::{FellowshipAdminBodyId, TreasurerBodyId, UsdtAssetHub}, + AccountId, AssetRate, Balance, Balances, FellowshipReferenda, GovernanceLocation, Preimage, + Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, Scheduler, WestendTreasuryAccount, DAYS, }; use frame_support::{ parameter_types, - traits::{EitherOf, EitherOfDiverse, MapSuccess, OriginTrait, TryWithMorphedArg}, + traits::{ + EitherOf, EitherOfDiverse, MapSuccess, NeverEnsureOrigin, OriginTrait, TryWithMorphedArg, + }, + PalletId, }; -use frame_system::EnsureRootWithSuccess; +use frame_system::{EnsureRoot, EnsureRootWithSuccess}; pub use origins::{ pallet_origins as pallet_fellowship_origins, Architects, EnsureCanPromoteTo, EnsureCanRetainAt, EnsureFellowship, Fellows, Masters, Members, ToVoice, }; use pallet_ranked_collective::EnsureOfRank; use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -use parachains_common::{polkadot::account, HOURS}; +use parachains_common::westend::{account, currency::GRAND}; +use polkadot_runtime_common::impls::{ + LocatableAssetConverter, VersionedLocatableAsset, VersionedMultiLocationConverter, +}; +use sp_arithmetic::Permill; use sp_core::{ConstU128, ConstU32}; -use sp_runtime::traits::{AccountIdConversion, ConstU16, ConvertToValue, Replace, TakeFirst}; +use sp_runtime::traits::{ + AccountIdConversion, ConstU16, ConvertToValue, IdentityLookup, Replace, TakeFirst, +}; +use westend_runtime_constants::time::HOURS; +use xcm::prelude::*; use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; #[cfg(feature = "runtime-benchmarks")] use crate::impls::benchmarks::{OpenHrmpChannel, PayWithEnsure}; +#[cfg(feature = "runtime-benchmarks")] +use parachains_common::westend::currency::DOLLARS; /// The Fellowship members' ranks. pub mod ranks { @@ -91,7 +103,7 @@ impl pallet_referenda::Config for Runtime { >; type CancelOrigin = Architects; type KillOrigin = Masters; - type Slash = ToParentTreasury; + type Slash = ToParentTreasury; type Votes = pallet_ranked_collective::Votes; type Tally = pallet_ranked_collective::TallyOf; type SubmissionDeposit = ConstU128<0>; @@ -192,8 +204,6 @@ impl pallet_core_fellowship::Config for Runtime { pub type FellowshipSalaryInstance = pallet_salary::Instance1; -use xcm::prelude::*; - parameter_types! { // The interior location on AssetHub for the paying account. This is the Fellowship Salary // pallet instance (which sits at index 64). This sovereign account will need funding. @@ -237,3 +247,102 @@ impl pallet_salary::Config for Runtime { // Total monthly salary budget. type Budget = ConstU128<{ 100_000 * USDT_UNITS }>; } + +parameter_types! { + pub const FellowshipTreasuryPalletId: PalletId = account::FELLOWSHIP_TREASURY_PALLET_ID; + pub const HundredPercent: Permill = Permill::from_percent(100); + pub const Burn: Permill = Permill::from_percent(0); + pub const MaxBalance: Balance = Balance::max_value(); + // The asset's interior location for the paying account. This is the Fellowship Treasury + // pallet instance (which sits at index 65). + pub FellowshipTreasuryInteriorLocation: InteriorMultiLocation = PalletInstance(65).into(); +} + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + // Benchmark bond. Needed to make `propose_spend` work. + pub const TenPercent: Permill = Permill::from_percent(10); + // Benchmark minimum. Needed to make `propose_spend` work. + pub const BenchmarkProposalBondMinimum: Balance = 1 * DOLLARS; + // Benchmark maximum. Needed to make `propose_spend` work. + pub const BenchmarkProposalBondMaximum: Balance = 10 * DOLLARS; +} + +/// [`PayOverXcm`] setup to pay the Fellowship Treasury. +pub type FellowshipTreasuryPaymaster = PayOverXcm< + FellowshipTreasuryInteriorLocation, + crate::xcm_config::XcmRouter, + crate::PolkadotXcm, + ConstU32<{ 6 * HOURS }>, + VersionedMultiLocation, + VersionedLocatableAsset, + LocatableAssetConverter, + VersionedMultiLocationConverter, +>; + +pub type FellowshipTreasuryInstance = pallet_treasury::Instance1; + +impl pallet_treasury::Config for Runtime { + // The creation of proposals via the treasury pallet is deprecated and should not be utilized. + // Instead, public or fellowship referenda should be used to propose and command the treasury + // spend or spend_local dispatchables. The parameters below have been configured accordingly to + // discourage its use. + // TODO: replace with `NeverEnsure` once polkadot-sdk 1.5 is released. + type ApproveOrigin = NeverEnsureOrigin<()>; + type OnSlash = (); + #[cfg(not(feature = "runtime-benchmarks"))] + type ProposalBond = HundredPercent; + #[cfg(not(feature = "runtime-benchmarks"))] + type ProposalBondMinimum = MaxBalance; + #[cfg(not(feature = "runtime-benchmarks"))] + type ProposalBondMaximum = MaxBalance; + + #[cfg(feature = "runtime-benchmarks")] + type ProposalBond = TenPercent; + #[cfg(feature = "runtime-benchmarks")] + type ProposalBondMinimum = BenchmarkProposalBondMinimum; + #[cfg(feature = "runtime-benchmarks")] + type ProposalBondMaximum = BenchmarkProposalBondMaximum; + // end. + + type WeightInfo = weights::pallet_treasury::WeightInfo; + type PalletId = FellowshipTreasuryPalletId; + type Currency = Balances; + type RejectOrigin = EitherOfDiverse< + EnsureRoot, + EitherOfDiverse>, Fellows>, + >; + type RuntimeEvent = RuntimeEvent; + type SpendPeriod = ConstU32<{ 7 * DAYS }>; + type Burn = Burn; + type BurnDestination = (); + type SpendFunds = (); + type MaxApprovals = ConstU32<100>; + type SpendOrigin = EitherOf< + EitherOf< + EnsureRootWithSuccess, + MapSuccess< + EnsureXcm>, + Replace>, + >, + >, + EitherOf< + MapSuccess>>, + MapSuccess>>, + >, + >; + type AssetKind = VersionedLocatableAsset; + type Beneficiary = VersionedMultiLocation; + type BeneficiaryLookup = IdentityLookup; + #[cfg(not(feature = "runtime-benchmarks"))] + type Paymaster = FellowshipTreasuryPaymaster; + #[cfg(feature = "runtime-benchmarks")] + type Paymaster = PayWithEnsure>>; + type BalanceConverter = AssetRate; + type PayoutPeriod = ConstU32<{ 30 * DAYS }>; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::TreasuryArguments< + sp_core::ConstU8<1>, + ConstU32<1000>, + >; +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/origins.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/origins.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/origins.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/origins.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/tracks.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs similarity index 98% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/tracks.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs index f4ba4e05ec166d9d9b7e5e5ed9b679042067ba13..099bdf4cf7539ed27c2c5af4178113d8cbbd2d1e 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/tracks.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs @@ -124,7 +124,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 30 * MINUTES, min_enactment_period: 5 * MINUTES, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -146,7 +146,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 30 * MINUTES, min_enactment_period: 5 * MINUTES, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -168,7 +168,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 30 * MINUTES, min_enactment_period: 5 * MINUTES, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -190,7 +190,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 30 * MINUTES, min_enactment_period: 5 * MINUTES, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -212,7 +212,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 30 * MINUTES, min_enactment_period: 5 * MINUTES, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -234,7 +234,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 30 * MINUTES, min_enactment_period: 5 * MINUTES, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -256,7 +256,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 30 * MINUTES, min_enactment_period: 5 * MINUTES, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -278,7 +278,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 30 * MINUTES, min_enactment_period: 5 * MINUTES, min_approval: pallet_referenda::Curve::LinearDecreasing { @@ -300,7 +300,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { max_deciding: 10, decision_deposit: 5 * DOLLARS, prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, + decision_period: 1 * DAYS, confirm_period: 30 * MINUTES, min_enactment_period: 5 * MINUTES, min_approval: pallet_referenda::Curve::LinearDecreasing { diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/impls.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/impls.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs similarity index 90% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index b7bfc9f9c6a19f2feeecaf1e1c9c837455c690e0..9074323fe31f8ccab1284e225567bf13dc5c7171 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -15,13 +15,13 @@ //! # Collectives Parachain //! -//! This parachain is for collectives that serve the Polkadot network. +//! This parachain is for collectives that serve the Westend network. //! Each collective is defined by a specialized (possibly instanced) pallet. //! //! ### Governance //! //! As a system parachain, Collectives defers its governance (namely, its `Root` origin), to -//! its Relay Chain parent, Polkadot. +//! its Relay Chain parent, Westend. //! //! ### Collator Selection //! @@ -45,16 +45,13 @@ pub mod fellowship; pub use ambassador::pallet_ambassador_origins; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use fellowship::{ - migration::import_kusama_fellowship, pallet_fellowship_origins, Fellows, - FellowshipCollectiveInstance, -}; +use fellowship::{pallet_fellowship_origins, Fellows}; use impls::{AllianceProposalProvider, EqualOrGreatestRootCmp, ToParentTreasury}; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::{AccountIdConversion, BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Perbill, }; @@ -67,7 +64,7 @@ use sp_version::RuntimeVersion; use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, @@ -86,20 +83,22 @@ pub use parachains_common as common; use parachains_common::{ impls::DealWithFees, message_queue::*, - polkadot::{account::*, consensus::*, currency::*, fee::WeightToFee}, + westend::{account::*, consensus::*, currency::*, fee::WeightToFee}, AccountId, AuraId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, MINUTES, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; use sp_runtime::RuntimeDebug; -use xcm_config::{GovernanceLocation, XcmOriginToTransactDispatchOrigin}; +use xcm_config::{GovernanceLocation, TreasurerBodyId, XcmOriginToTransactDispatchOrigin}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; // Polkadot imports use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; +use polkadot_runtime_common::{ + impls::VersionedLocatableAsset, BlockHashCount, SlowAdjustingFeeUpdate, +}; use xcm::latest::{prelude::*, BodyId}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; @@ -112,10 +111,10 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("collectives"), - impl_name: create_runtime_str!("collectives"), + spec_name: create_runtime_str!("collectives-westend"), + impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_005_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, @@ -159,25 +158,18 @@ parameter_types! { } // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type AccountId = AccountId; type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; type BlockHashCount = BlockHashCount; type DbWeight = RocksDbWeight; type Version = Version; - type PalletInfo = PalletInfo; - type OnNewAccount = (); - type OnKilledAccount = (); type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; type SS58Prefix = ConstU16<0>; @@ -335,6 +327,7 @@ impl InstanceFilter for ProxyType { RuntimeCall::FellowshipReferenda { .. } | RuntimeCall::FellowshipCore { .. } | RuntimeCall::FellowshipSalary { .. } | + RuntimeCall::FellowshipTreasury { .. } | RuntimeCall::Utility { .. } | RuntimeCall::Multisig { .. } ), @@ -432,7 +425,7 @@ impl cumulus_pallet_aura_ext::Config for Runtime {} parameter_types! { /// The asset ID for the asset that we use to pay for message delivery fees. - pub FeeAssetId: AssetId = Concrete(xcm_config::DotLocation::get()); + pub FeeAssetId: AssetId = Concrete(xcm_config::WndLocation::get()); /// The base fee for the message delivery fees. pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); } @@ -454,20 +447,13 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ControllerOrigin = EitherOfDiverse, Fellows>; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = - polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; @@ -550,11 +536,11 @@ pub const MAX_FELLOWS: u32 = ALLIANCE_MAX_MEMBERS; pub const MAX_ALLIES: u32 = 100; parameter_types! { - pub const AllyDeposit: Balance = 1_000 * UNITS; // 1,000 DOT bond to join as an Ally + pub const AllyDeposit: Balance = 1_000 * UNITS; // 1,000 WND bond to join as an Ally // The Alliance pallet account, used as a temporary place to deposit a slashed imbalance // before the teleport to the Treasury. pub AlliancePalletAccount: AccountId = ALLIANCE_PALLET_ID.into_account_truncating(); - pub PolkadotTreasuryAccount: AccountId = POLKADOT_TREASURY_PALLET_ID.into_account_truncating(); + pub WestendTreasuryAccount: AccountId = WESTEND_TREASURY_PALLET_ID.into_account_truncating(); // The number of blocks a member must wait between giving a retirement notice and retiring. // Supposed to be greater than time required to `kick_member` with alliance motion. pub const AllianceRetirementPeriod: BlockNumber = (90 * DAYS) + ALLIANCE_MOTION_DURATION; @@ -567,7 +553,7 @@ impl pallet_alliance::Config for Runtime { type MembershipManager = RootOrAllianceTwoThirdsMajority; type AnnouncementOrigin = RootOrAllianceTwoThirdsMajority; type Currency = Balances; - type Slashed = ToParentTreasury; + type Slashed = ToParentTreasury; type InitializeMembers = AllianceMotion; type MembershipChanged = AllianceMotion; type RetirementPeriod = AllianceRetirementPeriod; @@ -630,6 +616,21 @@ impl pallet_preimage::Config for Runtime { >; } +impl pallet_asset_rate::Config for Runtime { + type WeightInfo = weights::pallet_asset_rate::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type CreateOrigin = EitherOfDiverse< + EnsureRoot, + EitherOfDiverse>, Fellows>, + >; + type RemoveOrigin = Self::CreateOrigin; + type UpdateOrigin = Self::CreateOrigin; + type Currency = Balances; + type AssetKind = VersionedLocatableAsset; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::AssetRateArguments; +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime @@ -657,7 +658,6 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Handy utilities. @@ -666,6 +666,7 @@ construct_runtime!( Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, Preimage: pallet_preimage::{Pallet, Call, Storage, Event, HoldReason} = 43, Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 44, + AssetRate: pallet_asset_rate::{Pallet, Call, Storage, Event} = 45, // The main stage. @@ -683,6 +684,8 @@ construct_runtime!( FellowshipCore: pallet_core_fellowship::::{Pallet, Call, Storage, Event} = 63, // pub type FellowshipSalaryInstance = pallet_salary::Instance1; FellowshipSalary: pallet_salary::::{Pallet, Call, Storage, Event} = 64, + // pub type FellowshipTreasuryInstance = pallet_treasury::Instance1; + FellowshipTreasury: pallet_treasury::::{Pallet, Call, Storage, Event} = 65, // Ambassador Program. AmbassadorCollective: pallet_ranked_collective::::{Pallet, Call, Storage, Event} = 70, @@ -718,10 +721,10 @@ pub type UncheckedExtrinsic = /// All migrations executed on runtime upgrade as a nested tuple of types implementing /// `OnRuntimeUpgrade`. Included migrations must be idempotent. type Migrations = ( - // v9420 - import_kusama_fellowship::Migration, // unreleased pallet_collator_selection::migration::v1::MigrateToV1, + // unreleased + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, ); /// Executive: handles dispatch to the various modules. @@ -748,10 +751,9 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] [pallet_alliance, Alliance] [pallet_collective, AllianceMotion] - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_preimage, Preimage] [pallet_scheduler, Scheduler] [pallet_referenda, FellowshipReferenda] @@ -763,6 +765,8 @@ mod benches { [pallet_collective_content, AmbassadorContent] [pallet_core_fellowship, AmbassadorCore] [pallet_salary, AmbassadorSalary] + [pallet_treasury, FellowshipTreasury] + [pallet_asset_rate, AssetRate] ); } @@ -939,6 +943,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -968,6 +973,41 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between Collectives and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on Collectives. + None + } + + fn set_up_complex_asset_transfer( + ) -> Option<(MultiAssets, u32, MultiLocation, Box)> { + // Collectives only supports teleports to system parachain. + // Relay/native token can be teleported between Collectives and Relay. + let native_location = Parent.into(); + let dest = Parent.into(); + pallet_xcm::benchmarking::helpers::native_teleport_as_asset_transfer::( + native_location, + dest + ) + } + } + let whitelist: Vec = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/block_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/block_weights.rs similarity index 100% rename from cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/block_weights.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/block_weights.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_parachain_system.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/extrinsic_weights.rs similarity index 100% rename from cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/extrinsic_weights.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/extrinsic_weights.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/frame_system.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs similarity index 84% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/frame_system.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs index b6f1dc8dc08038a8c614f10914f9fd8c14fb10ca..f43c5e0a40b6356f3caee233ad1af7289a95a7df 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs @@ -151,4 +151,31 @@ impl frame_system::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 33_027_000 picoseconds. + Weight::from_parts(33_027_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `22` + // Estimated: `1518` + // Minimum execution time: 118_101_992_000 picoseconds. + Weight::from_parts(118_101_992_000, 0) + .saturating_add(Weight::from_parts(0, 1518)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs similarity index 95% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs index 1d877fdbd2bbe2b18fbefbc6bd39357c8a0e21a6..a9a298e547edb49738b9a0612d04d4141301d4ba 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs @@ -14,12 +14,12 @@ // limitations under the License. pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; pub mod pallet_alliance; +pub mod pallet_asset_rate; pub mod pallet_balances; pub mod pallet_collator_selection; pub mod pallet_collective; @@ -39,6 +39,7 @@ pub mod pallet_salary_fellowship_salary; pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; @@ -46,5 +47,4 @@ pub mod rocksdb_weights; pub use block_weights::constants::BlockExecutionWeight; pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_alliance.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_alliance.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_alliance.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_alliance.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_asset_rate.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_asset_rate.rs new file mode 100644 index 0000000000000000000000000000000000000000..51b0580f8575f643d48f4a094d8a6fb2548acc80 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_asset_rate.rs @@ -0,0 +1,85 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_rate` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-28, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-westend-dev +// --steps=2 +// --repeat=2 +// --pallet=pallet-asset-rate +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_rate`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_rate::WeightInfo for WeightInfo { + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `4703` + // Minimum execution time: 102_000_000 picoseconds. + Weight::from_parts(112_000_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) + fn update() -> Weight { + // Proof Size summary in bytes: + // Measured: `74` + // Estimated: `4703` + // Minimum execution time: 101_000_000 picoseconds. + Weight::from_parts(105_000_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) + fn remove() -> Weight { + // Proof Size summary in bytes: + // Measured: `74` + // Estimated: `4703` + // Minimum execution time: 112_000_000 picoseconds. + Weight::from_parts(116_000_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_balances.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_balances.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_balances.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs similarity index 91% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs index 2c729e8dc1078e70b1329c3b30ff05480109d121..03f3ff602a5b3e91c4e2ff90a4a3433d513079a1 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs @@ -121,7 +121,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -175,6 +175,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective_content.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective_content.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_ambassador_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_ambassador_core.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_fellowship_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_fellowship_core.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_message_queue.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_multisig.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_preimage.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_preimage.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_proxy.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_ambassador_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_ambassador_collective.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_ambassador_collective.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_ambassador_collective.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_fellowship_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_fellowship_collective.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_fellowship_collective.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_fellowship_collective.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_ambassador_referenda.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_ambassador_referenda.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_ambassador_referenda.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_ambassador_referenda.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_fellowship_referenda.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_fellowship_referenda.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_fellowship_referenda.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_fellowship_referenda.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_ambassador_salary.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_ambassador_salary.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_ambassador_salary.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_ambassador_salary.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_fellowship_salary.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_fellowship_salary.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_fellowship_salary.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_fellowship_salary.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_scheduler.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_scheduler.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_session.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_session.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_session.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_timestamp.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_timestamp.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_timestamp.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs new file mode 100644 index 0000000000000000000000000000000000000000..58540e646d8c3885bf84da512e2ddc3e42abc80e --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs @@ -0,0 +1,214 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_treasury` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-28, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-westend-dev +// --steps=2 +// --repeat=2 +// --pallet=pallet-treasury +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_treasury`. +pub struct WeightInfo(PhantomData); +impl pallet_treasury::WeightInfo for WeightInfo { + /// Storage: `FellowshipTreasury::ProposalCount` (r:1 w:1) + /// Proof: `FellowshipTreasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `FellowshipTreasury::Approvals` (r:1 w:1) + /// Proof: `FellowshipTreasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `FellowshipTreasury::Proposals` (r:0 w:1) + /// Proof: `FellowshipTreasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + fn spend_local() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `1887` + // Minimum execution time: 117_000_000 picoseconds. + Weight::from_parts(126_000_000, 0) + .saturating_add(Weight::from_parts(0, 1887)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipTreasury::ProposalCount` (r:1 w:1) + /// Proof: `FellowshipTreasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `FellowshipTreasury::Proposals` (r:0 w:1) + /// Proof: `FellowshipTreasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + fn propose_spend() -> Weight { + // Proof Size summary in bytes: + // Measured: `143` + // Estimated: `1489` + // Minimum execution time: 264_000_000 picoseconds. + Weight::from_parts(277_000_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipTreasury::Proposals` (r:1 w:1) + /// Proof: `FellowshipTreasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn reject_proposal() -> Weight { + // Proof Size summary in bytes: + // Measured: `301` + // Estimated: `3593` + // Minimum execution time: 289_000_000 picoseconds. + Weight::from_parts(312_000_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// The range of component `p` is `[0, 99]`. + fn approve_proposal(_p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 0_000 picoseconds. + Weight::from_parts(0, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `FellowshipTreasury::Approvals` (r:1 w:1) + /// Proof: `FellowshipTreasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + fn remove_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `127` + // Estimated: `1887` + // Minimum execution time: 62_000_000 picoseconds. + Weight::from_parts(65_000_000, 0) + .saturating_add(Weight::from_parts(0, 1887)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:199 w:199) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `FellowshipTreasury::Deactivated` (r:1 w:1) + /// Proof: `FellowshipTreasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:1) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `FellowshipTreasury::Approvals` (r:1 w:1) + /// Proof: `FellowshipTreasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `FellowshipTreasury::Proposals` (r:99 w:99) + /// Proof: `FellowshipTreasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 99]`. + fn on_initialize_proposals(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `145 + p * (250 ±0)` + // Estimated: `256707 + p * (5206 ±0)` + // Minimum execution time: 218_000_000 picoseconds. + Weight::from_parts(221_000_000, 0) + .saturating_add(Weight::from_parts(0, 256707)) + // Standard Error: 154_515 + .saturating_add(Weight::from_parts(399_232_323, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) + } + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) + /// Storage: `FellowshipTreasury::SpendCount` (r:1 w:1) + /// Proof: `FellowshipTreasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `FellowshipTreasury::Spends` (r:0 w:1) + /// Proof: `FellowshipTreasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + fn spend() -> Weight { + // Proof Size summary in bytes: + // Measured: `118` + // Estimated: `4703` + // Minimum execution time: 163_000_000 picoseconds. + Weight::from_parts(171_000_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipTreasury::Spends` (r:1 w:1) + /// Proof: `FellowshipTreasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout() -> Weight { + // Proof Size summary in bytes: + // Measured: `629` + // Estimated: `5318` + // Minimum execution time: 472_000_000 picoseconds. + Weight::from_parts(492_000_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `FellowshipTreasury::Spends` (r:1 w:1) + /// Proof: `FellowshipTreasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn check_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `383` + // Estimated: `5318` + // Minimum execution time: 211_000_000 picoseconds. + Weight::from_parts(215_000_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipTreasury::Spends` (r:1 w:1) + /// Proof: `FellowshipTreasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + fn void_spend() -> Weight { + // Proof Size summary in bytes: + // Measured: `179` + // Estimated: `5318` + // Minimum execution time: 124_000_000 picoseconds. + Weight::from_parts(126_000_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs similarity index 100% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_utility.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs similarity index 75% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs index 57e502841473bcfb0b92a7f8b43fa7b19ed7cfd8..50dfbffde01f21d1138f0fdaa27649f962e245e4 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain @@ -33,9 +33,9 @@ // --heap-pages=4096 // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm -// --chain=collectives-polkadot-dev +// --chain=collectives-westend-dev // --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/ +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,22 +64,37 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 25_050_000 picoseconds. - Weight::from_parts(26_382_000, 0) + // Minimum execution time: 24_540_000 picoseconds. + Weight::from_parts(25_439_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `69` - // Estimated: `1489` - // Minimum execution time: 21_625_000 picoseconds. - Weight::from_parts(22_076_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `214` + // Estimated: `3679` + // Minimum execution time: 86_614_000 picoseconds. + Weight::from_parts(88_884_000, 0) + .saturating_add(Weight::from_parts(0, 3679)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -91,6 +106,32 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `214` + // Estimated: `3679` + // Minimum execution time: 87_915_000 picoseconds. + Weight::from_parts(90_219_000, 0) + .saturating_add(Weight::from_parts(0, 3679)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) + } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { @@ -107,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_076_000 picoseconds. - Weight::from_parts(7_378_000, 0) + // Minimum execution time: 6_872_000 picoseconds. + Weight::from_parts(7_110_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_327_000 picoseconds. - Weight::from_parts(2_454_000, 0) + // Minimum execution time: 2_009_000 picoseconds. + Weight::from_parts(2_163_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -145,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 29_080_000 picoseconds. - Weight::from_parts(29_886_000, 0) + // Minimum execution time: 28_858_000 picoseconds. + Weight::from_parts(29_355_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -171,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 30_746_000 picoseconds. - Weight::from_parts(31_631_000, 0) + // Minimum execution time: 30_598_000 picoseconds. + Weight::from_parts(31_168_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -183,8 +224,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_208_000 picoseconds. - Weight::from_parts(2_341_000, 0) + // Minimum execution time: 2_090_000 picoseconds. + Weight::from_parts(2_253_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -194,8 +235,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `162` // Estimated: `11052` - // Minimum execution time: 16_239_000 picoseconds. - Weight::from_parts(16_881_000, 0) + // Minimum execution time: 16_133_000 picoseconds. + Weight::from_parts(16_433_000, 0) .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -206,8 +247,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `166` // Estimated: `11056` - // Minimum execution time: 16_711_000 picoseconds. - Weight::from_parts(16_944_000, 0) + // Minimum execution time: 16_012_000 picoseconds. + Weight::from_parts(16_449_000, 0) .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -218,8 +259,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `13538` - // Minimum execution time: 18_142_000 picoseconds. - Weight::from_parts(18_470_000, 0) + // Minimum execution time: 17_922_000 picoseconds. + Weight::from_parts(18_426_000, 0) .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } @@ -241,8 +282,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 27_687_000 picoseconds. - Weight::from_parts(28_250_000, 0) + // Minimum execution time: 27_280_000 picoseconds. + Weight::from_parts(28_026_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -253,8 +294,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `8621` - // Minimum execution time: 9_675_000 picoseconds. - Weight::from_parts(9_992_000, 0) + // Minimum execution time: 9_387_000 picoseconds. + Weight::from_parts(9_644_000, 0) .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -264,8 +305,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `11063` - // Minimum execution time: 16_597_000 picoseconds. - Weight::from_parts(17_248_000, 0) + // Minimum execution time: 16_649_000 picoseconds. + Weight::from_parts(17_025_000, 0) .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -288,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `215` // Estimated: `11105` - // Minimum execution time: 34_649_000 picoseconds. - Weight::from_parts(35_475_000, 0) + // Minimum execution time: 34_355_000 picoseconds. + Weight::from_parts(35_295_000, 0) .saturating_add(Weight::from_parts(0, 11105)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -302,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_619_000 picoseconds. - Weight::from_parts(4_756_000, 0) + // Minimum execution time: 4_527_000 picoseconds. + Weight::from_parts(4_699_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -314,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 26_721_000 picoseconds. - Weight::from_parts(27_412_000, 0) + // Minimum execution time: 27_011_000 picoseconds. + Weight::from_parts(27_398_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/paritydb_weights.rs similarity index 100% rename from cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/paritydb_weights.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/paritydb_weights.rs diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/rocksdb_weights.rs similarity index 100% rename from cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/rocksdb_weights.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/rocksdb_weights.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs similarity index 82% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs rename to cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index c0b3108d2fbfb3750902dc57306459ae5e66f2ce..2e64127d6a1dec7e7e31613b3ed91ca0551ffe15 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -16,50 +16,60 @@ use super::{ AccountId, AllPalletsWithSystem, Balances, BaseDeliveryFee, FeeAssetId, Fellows, ParachainInfo, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, - TransactionByteFee, WeightToFee, XcmpQueue, + TransactionByteFee, WeightToFee, WestendTreasuryAccount, XcmpQueue, }; use frame_support::{ match_types, parameter_types, - traits::{ConstU32, Contains, Everything, Nothing}, + traits::{ConstU32, Contains, Equals, Everything, Nothing}, weights::Weight, }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteAssetFromSystem}; +use parachains_common::{ + impls::ToStakingPot, + xcm_config::{ + AllSiblingSystemParachains, ConcreteAssetFromSystem, ParentRelayOrSiblingParachains, + RelayOrOtherSystemParachains, + }, +}; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; +use westend_runtime_constants::xcm as xcm_constants; use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, IsConcrete, - LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, - RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, + DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, IsConcrete, LocatableAssetId, + OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; -const FELLOWSHIP_ADMIN_INDEX: u32 = 1; - parameter_types! { - pub const DotLocation: MultiLocation = MultiLocation::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Polkadot); + pub const WndLocation: MultiLocation = MultiLocation::parent(); + pub const RelayNetwork: Option = Some(NetworkId::Westend); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())); + pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(westend_runtime_constants::TREASURY_PALLET_ID)).into(); pub CheckingAccount: AccountId = PolkadotXcm::check_account(); pub const GovernanceLocation: MultiLocation = MultiLocation::parent(); - pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); + pub const FellowshipAdminBodyId: BodyId = BodyId::Index(xcm_constants::body::FELLOWSHIP_ADMIN_INDEX); + pub const TreasurerBodyId: BodyId = BodyId::Index(xcm_constants::body::TREASURER_INDEX); pub AssetHub: MultiLocation = (Parent, Parachain(1000)).into(); pub AssetHubUsdtId: AssetId = (PalletInstance(50), GeneralIndex(1984)).into(); pub UsdtAssetHub: LocatableAssetId = LocatableAssetId { location: AssetHub::get(), asset_id: AssetHubUsdtId::get(), }; - pub DotAssetHub: LocatableAssetId = LocatableAssetId { + pub WndAssetHub: LocatableAssetId = LocatableAssetId { location: AssetHub::get(), - asset_id: DotLocation::get().into(), + asset_id: WndLocation::get().into(), }; } @@ -76,11 +86,12 @@ pub type LocationToAccountId = ( ); /// Means for transacting the native currency on this chain. +#[allow(deprecated)] pub type CurrencyTransactor = CurrencyAdapter< // Use this currency: Balances, // Use this currency when it is a fungible asset matching the given location or name: - IsConcrete, + IsConcrete, // Convert an XCM MultiLocation into a local account id: LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): @@ -130,10 +141,6 @@ match_types! { MultiLocation { parents: 1, interior: Here } | MultiLocation { parents: 1, interior: X1(Plurality { .. }) } }; - pub type ParentOrSiblings: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(_) } - }; } /// A call filter for the XCM Transact instruction. This is a temporary measure until we properly @@ -159,21 +166,18 @@ impl Contains for SafeCallFilter { frame_system::Call::set_heap_pages { .. } | frame_system::Call::set_code { .. } | frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::authorize_upgrade { .. } | + frame_system::Call::authorize_upgrade_without_checks { .. } | frame_system::Call::kill_prefix { .. }, ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::XcmpQueue(..) | + RuntimeCall::CollatorSelection(..) | + RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::XcmpQueue(..) | RuntimeCall::MessageQueue(..) | RuntimeCall::Alliance( // `init_members` accepts unbounded vecs as arguments, @@ -233,7 +237,7 @@ pub type Barrier = TrailingSetTopicAsId< // Parent and its pluralities (i.e. governance bodies) get free execution. AllowExplicitUnpaidExecutionFrom, // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom, + AllowSubscriptionsFrom, ), UniversalLocation, ConstU32<8>, @@ -242,9 +246,17 @@ pub type Barrier = TrailingSetTopicAsId< >, >; +/// Locations that will not be charged fees in the executor, +/// either execution or delivery. +/// We only waive fees for system functions, which these locations represent. +pub type WaivedLocations = ( + RelayOrOtherSystemParachains, + Equals, +); + /// Cases where a remote origin is accepted as trusted Teleporter for a given asset: /// - DOT with the parent Relay Chain and sibling parachains. -pub type TrustedTeleporters = ConcreteAssetFromSystem; +pub type TrustedTeleporters = ConcreteAssetFromSystem; pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { @@ -252,7 +264,7 @@ impl xcm_executor::Config for XcmConfig { type XcmSender = XcmRouter; type AssetTransactor = CurrencyTransactor; type OriginConverter = XcmOriginToTransactDispatchOrigin; - // Collectives does not recognize a reserve location for any asset. Users must teleport DOT + // Collectives does not recognize a reserve location for any asset. Users must teleport WND // where allowed (e.g. with the Relay Chain). type IsReserve = (); type IsTeleporter = TrustedTeleporters; @@ -260,7 +272,7 @@ impl xcm_executor::Config for XcmConfig { type Barrier = Barrier; type Weigher = FixedWeightBounds; type Trader = - UsingComponents>; + UsingComponents>; type ResponseHandler = PolkadotXcm; type AssetTrap = PolkadotXcm; type AssetClaims = PolkadotXcm; @@ -269,7 +281,10 @@ impl xcm_executor::Config for XcmConfig { type MaxAssetsIntoHolding = MaxAssetsIntoHolding; type AssetLocker = (); type AssetExchanger = (); - type FeeManager = (); + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = WithOriginFilter; @@ -325,8 +340,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index eded360436b1d2c02a749d263f8d2d2118368fb9..54af73c3d03dd78bd21affd35bbdcae8d1be5664 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -20,39 +23,38 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive smallvec = "1.11.0" # Substrate -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-insecure-randomness-collective-flip = { path = "../../../../../substrate/frame/insecure-randomness-collective-flip", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false} -pallet-contracts = { path = "../../../../../substrate/frame/contracts", default-features = false} -pallet-contracts-primitives = { path = "../../../../../substrate/frame/contracts/primitives", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-insecure-randomness-collective-flip = { path = "../../../../../substrate/frame/insecure-randomness-collective-flip", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } +pallet-contracts = { path = "../../../../../substrate/frame/contracts", default-features = false } # Polkadot pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } @@ -68,7 +70,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } @@ -79,7 +81,7 @@ parachain-info = { package = "staging-parachain-info", path = "../../../pallets/ parachains-common = { path = "../../../common", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-aura-ext/std", @@ -102,7 +104,6 @@ std = [ "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", - "pallet-contracts-primitives/std", "pallet-contracts/std", "pallet-insecure-randomness-collective-flip/std", "pallet-message-queue/std", @@ -198,9 +199,9 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index 1c99393d5e52fccf427b40f232d55a833d082c9e..94f2d34b265a8e66feddaa01d524ab7c02cc5549 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -22,13 +22,11 @@ use frame_support::{ traits::{ConstBool, ConstU32, Nothing}, }; use pallet_contracts::{ - migration::{v12, v13, v14, v15}, - weights::SubstrateWeight, - Config, DebugInfo, DefaultAddressGenerator, Frame, Schedule, + weights::SubstrateWeight, Config, DebugInfo, DefaultAddressGenerator, Frame, Schedule, }; use sp_runtime::Perbill; -pub use parachains_common::{rococo::currency::deposit, AVERAGE_ON_INITIALIZE_RATIO}; +pub use parachains_common::rococo::currency::deposit; // Prints debug output of the `contracts` pallet to stdout if the node is // started with `-lruntime::contracts=debug`. @@ -70,13 +68,9 @@ impl Config for Runtime { type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type MaxDelegateDependencies = ConstU32<32>; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; - type Migrations = ( - v12::Migration, - v13::Migration, - v14::Migration, - v15::Migration, - ); + type Migrations = (); type RuntimeHoldReason = RuntimeHoldReason; type Debug = (); type Environment = (); + type Xcm = pallet_xcm::Pallet; } diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index e41db7d9213362a96b8149daac09cd6f55a65d2d..79b6b6be299be1b40fddfd324b68b1e53b9777d6 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -35,7 +35,7 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::Block as BlockT, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Perbill, }; @@ -46,11 +46,11 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ConstBool, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, Everything}, + traits::{ConstBool, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -100,9 +100,11 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( cumulus_pallet_parachain_system::migration::Migration, - cumulus_pallet_xcmp_queue::migration::MigrationToV3, + cumulus_pallet_xcmp_queue::migration::v2::MigrationToV2, + cumulus_pallet_xcmp_queue::migration::v3::MigrationToV3, pallet_contracts::Migration, // unreleased + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, ); type EventRecord = frame_system::EventRecord< @@ -131,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_005_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -169,25 +171,17 @@ parameter_types! { } // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; type BlockHashCount = BlockHashCount; type DbWeight = RocksDbWeight; type Version = Version; - type PalletInfo = PalletInfo; - type OnNewAccount = (); - type OnKilledAccount = (); type AccountData = pallet_balances::AccountData; type SystemWeightInfo = frame_system::weights::SubstrateWeight; type SS58Prefix = ConstU16<42>; @@ -405,7 +399,6 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Smart Contracts. @@ -433,7 +426,7 @@ mod benches { [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [pallet_contracts, Contracts] - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] ); } @@ -589,7 +582,7 @@ impl_runtime_apis! { gas_limit: Option, storage_deposit_limit: Option, input_data: Vec, - ) -> pallet_contracts_primitives::ContractExecResult { + ) -> pallet_contracts::ContractExecResult { let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); Contracts::bare_call( origin, @@ -609,10 +602,10 @@ impl_runtime_apis! { value: Balance, gas_limit: Option, storage_deposit_limit: Option, - code: pallet_contracts_primitives::Code, + code: pallet_contracts::Code, data: Vec, salt: Vec, - ) -> pallet_contracts_primitives::ContractInstantiateResult { + ) -> pallet_contracts::ContractInstantiateResult { let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); Contracts::bare_instantiate( origin, @@ -632,7 +625,7 @@ impl_runtime_apis! { code: Vec, storage_deposit_limit: Option, determinism: pallet_contracts::Determinism, - ) -> pallet_contracts_primitives::CodeUploadResult { + ) -> pallet_contracts::CodeUploadResult { Contracts::bare_upload_code( origin, code, @@ -644,7 +637,7 @@ impl_runtime_apis! { fn get_storage( address: AccountId, key: Vec, - ) -> pallet_contracts_primitives::GetStorageResult { + ) -> pallet_contracts::GetStorageResult { Contracts::get_storage(address, key) } } @@ -678,6 +671,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -707,6 +701,42 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use xcm::latest::prelude::*; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between Contracts-System-Para and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on Contracts-System-Para. + None + } + + fn set_up_complex_asset_transfer( + ) -> Option<(MultiAssets, u32, MultiLocation, Box)> { + // Contracts-System-Para only supports teleports to system parachain. + // Relay/native token can be teleported between Contracts-System-Para and Relay. + let native_location = Parent.into(); + let dest = Parent.into(); + pallet_xcm::benchmarking::helpers::native_teleport_as_asset_transfer::( + native_location, + dest + ) + } + } + let whitelist: Vec = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/weights/mod.rs index 30fa2c4060689ff98cc427c84f81866172845e52..b473d49e20e67329d893e1e565330cbe9290c64f 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/weights/mod.rs @@ -24,5 +24,4 @@ pub mod rocksdb_weights; pub use block_weights::constants::BlockExecutionWeight; pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 4c9f357e1111233f0815863254dd111d8d507f54..569ca6e587c5778a881634bdab441b7a6b938645 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -27,22 +27,26 @@ use frame_support::{ use frame_system::EnsureRoot; use pallet_xcm::{EnsureXcm, IsMajorityOfBody, XcmPassthrough}; use parachains_common::{ - xcm_config::{ConcreteAssetFromSystem, RelayOrOtherSystemParachains}, + xcm_config::{ + AllSiblingSystemParachains, ConcreteAssetFromSystem, ParentRelayOrSiblingParachains, + RelayOrOtherSystemParachains, + }, TREASURY_PALLET_ID, }; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; -use rococo_runtime_constants::system_parachain; use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, IsConcrete, - NativeAsset, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, + DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, IsConcrete, NativeAsset, ParentAsSuperuser, + ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -75,6 +79,7 @@ pub type LocationToAccountId = ( ); /// Means for transacting the native currency on this chain. +#[allow(deprecated)] pub type CurrencyTransactor = CurrencyAdapter< // Use this currency: Balances, @@ -123,10 +128,6 @@ match_types! { MultiLocation { parents: 1, interior: Here } | MultiLocation { parents: 1, interior: X1(Plurality { .. }) } }; - pub type ParentOrSiblings: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(_) } - }; } pub type Barrier = TrailingSetTopicAsId< @@ -149,7 +150,7 @@ pub type Barrier = TrailingSetTopicAsId< Equals, )>, // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom, + AllowSubscriptionsFrom, ), UniversalLocation, ConstU32<8>, @@ -158,25 +159,13 @@ pub type Barrier = TrailingSetTopicAsId< >, >; -match_types! { - pub type SystemParachains: impl Contains = { - MultiLocation { - parents: 1, - interior: X1(Parachain( - system_parachain::ASSET_HUB_ID | - system_parachain::BRIDGE_HUB_ID | - system_parachain::CONTRACTS_ID | - system_parachain::ENCOINTER_ID - )), - } - }; -} - /// Locations that will not be charged fees in the executor, /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. -pub type WaivedLocations = - (RelayOrOtherSystemParachains, Equals); +pub type WaivedLocations = ( + RelayOrOtherSystemParachains, + Equals, +); pub type TrustedTeleporter = ConcreteAssetFromSystem; @@ -227,11 +216,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -258,8 +242,6 @@ impl pallet_xcm::Config for Runtime { type MaxLockers = ConstU32<8>; // FIXME: Replace with benchmarked weight info type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); @@ -311,9 +293,3 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = cumulus_pallet_dmp_queue::weights::SubstrateWeight; - type RuntimeEvent = crate::RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} diff --git a/cumulus/parachains/runtimes/coretime/README.md b/cumulus/parachains/runtimes/coretime/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3f09e57c7d40d3b5161254df4ae543388947aef0 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/README.md @@ -0,0 +1,4 @@ +# Coretime System Chain + +Also known as the "Broker Chain". Described in +[RFC-0001](https://github.com/polkadot-fellows/RFCs/pull/1). diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml similarity index 76% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml rename to cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index 3847a352e0782bda61c26ba6207677ae85e2228d..5f7654fecaeb82cd51ca9a9e2fde3a6a40f0e08f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -1,70 +1,74 @@ [package] -name = "bridge-hub-polkadot-runtime" +name = "coretime-rococo-runtime" version = "0.1.0" authors.workspace = true edition.workspace = true -description = "Polkadot's BridgeHub parachain runtime" +description = "Rococo's Coretime parachain runtime" license = "Apache-2.0" +[lints] +workspace = true + [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1" } +hex-literal = "0.4.1" log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true, features = ["derive"] } +scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.171", optional = true, features = ["derive"] } smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +pallet-broker = { path = "../../../../../substrate/frame/broker", default-features = false } +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -73,15 +77,11 @@ pallet-collator-selection = { path = "../../../../pallets/collator-selection", d parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } -[dev-dependencies] -bridge-hub-test-utils = { path = "../test-utils" } - [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", @@ -99,10 +99,12 @@ std = [ "pallet-aura/std", "pallet-authorship/std", "pallet-balances/std", + "pallet-broker/std", "pallet-collator-selection/std", "pallet-message-queue/std", "pallet-multisig/std", "pallet-session/std", + "pallet-sudo/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", @@ -114,6 +116,7 @@ std = [ "polkadot-core-primitives/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", + "rococo-runtime-constants/std", "scale-info/std", "serde", "sp-api/std", @@ -122,7 +125,6 @@ std = [ "sp-core/std", "sp-genesis-builder/std", "sp-inherents/std", - "sp-io/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", @@ -137,7 +139,6 @@ std = [ ] runtime-benchmarks = [ - "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", @@ -148,9 +149,11 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-broker/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", @@ -165,7 +168,6 @@ runtime-benchmarks = [ try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", @@ -176,10 +178,12 @@ try-runtime = [ "pallet-aura/try-runtime", "pallet-authorship/try-runtime", "pallet-balances/try-runtime", + "pallet-broker/try-runtime", "pallet-collator-selection/try-runtime", "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", "pallet-session/try-runtime", + "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", "pallet-utility/try-runtime", @@ -189,4 +193,4 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/build.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/build.rs similarity index 100% rename from cumulus/parachains/runtimes/assets/asset-hub-polkadot/build.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/build.rs diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs new file mode 100644 index 0000000000000000000000000000000000000000..a85d67f7b4cb815a3a17b159e1ba85677221f64a --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs @@ -0,0 +1,227 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use crate::*; +use codec::{Decode, Encode}; +use cumulus_pallet_parachain_system::RelaychainDataProvider; +use frame_support::{ + parameter_types, + traits::{ + fungible::{Balanced, Credit}, + OnUnbalanced, + }, +}; +use pallet_broker::{CoreAssignment, CoreIndex, CoretimeInterface, PartsOf57600, RCBlockNumberOf}; +use parachains_common::{AccountId, Balance, BlockNumber}; +use xcm::latest::prelude::*; + +pub struct CreditToCollatorPot; +impl OnUnbalanced> for CreditToCollatorPot { + fn on_nonzero_unbalanced(credit: Credit) { + let staking_pot = CollatorSelection::account_id(); + let _ = >::resolve(&staking_pot, credit); + } +} + +/// A type containing the encoding of the coretime pallet in the Relay chain runtime. Used to +/// construct any remote calls. The codec index must correspond to the index of `Coretime` in the +/// `construct_runtime` of the Relay chain. +#[derive(Encode, Decode)] +enum RelayRuntimePallets { + #[codec(index = 74)] + Coretime(CoretimeProviderCalls), +} + +/// Call encoding for the calls needed from the relay coretime pallet. +#[derive(Encode, Decode)] +enum CoretimeProviderCalls { + #[codec(index = 1)] + RequestCoreCount(CoreIndex), + #[codec(index = 2)] + RequestRevenueInfoAt(BlockNumber), + #[codec(index = 3)] + CreditAccount(AccountId, Balance), + #[codec(index = 4)] + AssignCore(CoreIndex, BlockNumber, Vec<(CoreAssignment, PartsOf57600)>, Option), +} + +parameter_types! { + pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); +} + +parameter_types! { + pub storage CoreCount: Option = None; + pub storage CoretimeRevenue: Option<(BlockNumber, Balance)> = None; +} + +/// Type that implements the `CoretimeInterface` for the allocation of Coretime. Meant to operate +/// from the parachain context. That is, the parachain provides a market (broker) for the sale of +/// coretime, but assumes a `CoretimeProvider` (i.e. a Relay Chain) to actually provide cores. +pub struct CoretimeAllocator; +impl CoretimeInterface for CoretimeAllocator { + type AccountId = AccountId; + type Balance = Balance; + type RealyChainBlockNumberProvider = RelaychainDataProvider; + + fn request_core_count(count: CoreIndex) { + use crate::coretime::CoretimeProviderCalls::RequestCoreCount; + let request_core_count_call = RelayRuntimePallets::Coretime(RequestCoreCount(count)); + + let message = Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + Instruction::Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: Weight::from_parts(1000000000, 200000), + call: request_core_count_call.encode().into(), + }, + ]); + + match PolkadotXcm::send_xcm(Here, MultiLocation::parent(), message.clone()) { + Ok(_) => log::info!( + target: "runtime::coretime", + "Request to update schedulable cores sent successfully." + ), + Err(e) => log::error!( + target: "runtime::coretime", + "Failed to send request to update schedulable cores: {:?}", + e + ), + } + } + + fn request_revenue_info_at(when: RCBlockNumberOf) { + use crate::coretime::CoretimeProviderCalls::RequestRevenueInfoAt; + let request_revenue_info_at_call = + RelayRuntimePallets::Coretime(RequestRevenueInfoAt(when)); + + let message = Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + Instruction::Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: Weight::from_parts(1000000000, 200000), + call: request_revenue_info_at_call.encode().into(), + }, + ]); + + match PolkadotXcm::send_xcm(Here, MultiLocation::parent(), message.clone()) { + Ok(_) => log::info!( + target: "runtime::coretime", + "Request for revenue information sent successfully." + ), + Err(e) => log::error!( + target: "runtime::coretime", + "Request for revenue information failed to send: {:?}", + e + ), + } + } + + fn credit_account(who: Self::AccountId, amount: Self::Balance) { + use crate::coretime::CoretimeProviderCalls::CreditAccount; + let credit_account_call = RelayRuntimePallets::Coretime(CreditAccount(who, amount)); + + let message = Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + Instruction::Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: Weight::from_parts(1000000000, 200000), + call: credit_account_call.encode().into(), + }, + ]); + + match PolkadotXcm::send_xcm(Here, MultiLocation::parent(), message.clone()) { + Ok(_) => log::info!( + target: "runtime::coretime", + "Instruction to credit account sent successfully." + ), + Err(e) => log::error!( + target: "runtime::coretime", + "Instruction to credit account failed to send: {:?}", + e + ), + } + } + + fn assign_core( + core: CoreIndex, + begin: RCBlockNumberOf, + assignment: Vec<(CoreAssignment, PartsOf57600)>, + end_hint: Option>, + ) { + use crate::coretime::CoretimeProviderCalls::AssignCore; + let assign_core_call = + RelayRuntimePallets::Coretime(AssignCore(core, begin, assignment, end_hint)); + + let message = Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + Instruction::Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: Weight::from_parts(1000000000, 200000), + call: assign_core_call.encode().into(), + }, + ]); + + match PolkadotXcm::send_xcm(Here, MultiLocation::parent(), message.clone()) { + Ok(_) => log::info!( + target: "runtime::coretime", + "Core assignment sent successfully." + ), + Err(e) => log::error!( + target: "runtime::coretime", + "Core assignment failed to send: {:?}", + e + ), + } + } + + fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)> { + let revenue = CoretimeRevenue::get(); + CoretimeRevenue::set(&None); + revenue + } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_notify_revenue_info(when: RCBlockNumberOf, revenue: Self::Balance) { + CoretimeRevenue::set(&Some((when, revenue))); + } +} + +impl pallet_broker::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type OnRevenue = CreditToCollatorPot; + type TimeslicePeriod = ConstU32<80>; + type MaxLeasedCores = ConstU32<50>; + type MaxReservedCores = ConstU32<10>; + type Coretime = CoretimeAllocator; + type ConvertBalance = sp_runtime::traits::Identity; + type WeightInfo = weights::pallet_broker::WeightInfo; + type PalletId = BrokerPalletId; + type AdminOrigin = EnsureRoot; + type PriceAdapter = pallet_broker::Linear; +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs similarity index 82% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index bd95ec3fda733053c4ee56ee08a4655916f246f2..2e7889ca012314905d1f8923e8fbb934a4ec151f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. @@ -22,34 +21,18 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +mod coretime; mod weights; pub mod xcm_config; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; -use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, -}; - -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ - ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, - }, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -58,27 +41,33 @@ use frame_system::{ EnsureRoot, }; use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; -pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use xcm_config::{FellowshipLocation, GovernanceLocation, XcmOriginToTransactDispatchOrigin}; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; - -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; - -use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; - use parachains_common::{ impls::DealWithFees, - kusama::{consensus::*, currency::*, fee::WeightToFee}, message_queue::{NarrowOriginToSibling, ParaIdToSibling}, - AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, - HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, + rococo::{consensus::*, currency::*, fee::WeightToFee}, + AccountId, AuraId, Balance, BlockNumber, Hash, Header, Nonce, Signature, + AVERAGE_ON_INITIALIZE_RATIO, HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, +}; +use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; +use sp_api::impl_runtime_apis; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::Block as BlockT, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, MultiAddress, Perbill, +}; +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; +use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; +use xcm::latest::prelude::*; +use xcm_config::{ + FellowshipLocation, GovernanceLocation, RocRelayLocation, XcmOriginToTransactDispatchOrigin, }; - -// XCM Imports -use xcm::latest::prelude::BodyId; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -109,10 +98,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. -pub type Migrations = ( - // unreleased - pallet_collator_selection::migration::v1::MigrateToV1, -); +pub type Migrations = (cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4,); /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -132,13 +118,13 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("bridge-hub-kusama"), - impl_name: create_runtime_str!("bridge-hub-kusama"), + spec_name: create_runtime_str!("coretime-rococo"), + impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_005_002, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 3, + transaction_version: 0, state_version: 1, }; @@ -170,46 +156,28 @@ parameter_types! { }) .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) .build_or_panic(); - pub const SS58Prefix: u8 = 2; + pub const SS58Prefix: u8 = 42; } // Configure FRAME pallets to include in runtime. - +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; - /// The index type for storing how many extrinsics an account has signed. + /// The nonce type for storing how many extrinsics an account has signed. type Nonce = Nonce; /// The type for hashing blocks and tries. type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; /// The block type. type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// Runtime version. type Version = Version; - /// Converts a module to an index of this module in the runtime. - type PalletInfo = PalletInfo; /// The data to be stored in an account. type AccountData = pallet_balances::AccountData; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; - /// The basic call filter to use in dispatchable. - type BaseCallFilter = Everything; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; /// Block & extrinsics weights: base values and limits. @@ -240,10 +208,8 @@ parameter_types! { } impl pallet_balances::Config for Runtime { - /// The type for recording an account's balance. type Balance = Balance; type DustRemoval = (); - /// The ubiquitous event type. type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -276,6 +242,7 @@ impl pallet_transaction_payment::Config for Runtime { parameter_types! { pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } impl cumulus_pallet_parachain_system::Config for Runtime { @@ -283,8 +250,8 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; - type OutboundXcmpMessageSource = XcmpQueue; type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type OutboundXcmpMessageSource = XcmpQueue; type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = XcmpQueue; type ReservedXcmpWeight = ReservedXcmpWeight; @@ -297,8 +264,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { >; } -impl parachain_info::Config for Runtime {} - parameter_types! { pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; } @@ -325,10 +290,12 @@ impl pallet_message_queue::Config for Runtime { type ServiceWeight = MessageQueueServiceWeight; } +impl parachain_info::Config for Runtime {} + impl cumulus_pallet_aura_ext::Config for Runtime {} parameter_types! { - // Fellows pluralistic body. + /// Fellows pluralistic body. pub const FellowsBodyId: BodyId = BodyId::Technical; } @@ -338,27 +305,30 @@ pub type RootOrFellows = EitherOfDiverse< EnsureXcm>, >; +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(RocRelayLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; - // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = RootOrFellows; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = NoPriceForMessageDelivery; -} - -parameter_types! { - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } pub const PERIOD: u32 = 6 * HOURS; @@ -390,7 +360,7 @@ impl pallet_aura::Config for Runtime { parameter_types! { pub const PotId: PalletId = PalletId(*b"PotStake"); pub const SessionLength: BlockNumber = 6 * HOURS; - // StakingAdmin pluralistic body. + /// StakingAdmin pluralistic body. pub const StakingAdminBodyId: BodyId = BodyId::Defense; } @@ -417,9 +387,9 @@ impl pallet_collator_selection::Config for Runtime { } parameter_types! { - // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. + /// One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. pub const DepositBase: Balance = deposit(1, 88); - // Additional storage item size of 32 bytes. + /// Additional storage item size of 32 bytes. pub const DepositFactor: Balance = deposit(0, 32); } @@ -440,39 +410,48 @@ impl pallet_utility::Config for Runtime { type WeightInfo = weights::pallet_utility::WeightInfo; } +impl pallet_sudo::Config for Runtime { + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_sudo::weights::SubstrateWeight; +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime { // System support stuff. - System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, - ParachainSystem: cumulus_pallet_parachain_system::{ - Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, - } = 1, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 2, - ParachainInfo: parachain_info::{Pallet, Storage, Config} = 3, + System: frame_system = 0, + ParachainSystem: cumulus_pallet_parachain_system = 1, + Timestamp: pallet_timestamp = 3, + ParachainInfo: parachain_info = 4, // Monetary stuff. - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, - - // Collator support. The order of these 4 are important and shall not change. - Authorship: pallet_authorship::{Pallet, Storage} = 20, - CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, - Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, - Aura: pallet_aura::{Pallet, Storage, Config} = 23, - AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, - - // XCM helpers. - XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, - CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, - MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, + Balances: pallet_balances = 10, + TransactionPayment: pallet_transaction_payment = 11, + + // Collator support. The order of these 5 are important and shall not change. + Authorship: pallet_authorship = 20, + CollatorSelection: pallet_collator_selection = 21, + Session: pallet_session = 22, + Aura: pallet_aura = 23, + AuraExt: cumulus_pallet_aura_ext = 24, + + // XCM & related + XcmpQueue: cumulus_pallet_xcmp_queue = 30, + PolkadotXcm: pallet_xcm = 31, + CumulusXcm: cumulus_pallet_xcm = 32, + MessageQueue: pallet_message_queue = 34, // Handy utilities. - Utility: pallet_utility::{Pallet, Call, Event} = 40, - Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, + Utility: pallet_utility = 40, + Multisig: pallet_multisig = 41, + + // The main stage. + Broker: pallet_broker = 50, + + // Sudo + Sudo: pallet_sudo = 100, } ); @@ -480,17 +459,17 @@ construct_runtime!( mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [cumulus_pallet_parachain_system, ParachainSystem] + [pallet_timestamp, Timestamp] [pallet_balances, Balances] + [pallet_broker, Broker] + [pallet_collator_selection, CollatorSelection] + [pallet_session, SessionBench::] + [cumulus_pallet_xcmp_queue, XcmpQueue] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] - [pallet_session, SessionBench::] [pallet_utility, Utility] - [pallet_timestamp, Timestamp] - [pallet_collator_selection, CollatorSelection] - [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] - // XCM - [pallet_xcm, PolkadotXcm] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -670,6 +649,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -706,31 +686,54 @@ impl_runtime_apis! { impl cumulus_pallet_session_benchmarking::Config for Runtime {} use xcm::latest::prelude::*; - use xcm_config::KsmRelayLocation; + use xcm_config::RocRelayLocation; + + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled + None + } + } parameter_types! { pub ExistentialDepositMultiAsset: Option = Some(( - xcm_config::KsmRelayLocation::get(), + RocRelayLocation::get(), ExistentialDeposit::get() ).into()); } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; - type AccountIdConverter = xcm_config::LocationToAccountId; type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, + xcm_config::XcmConfig, ExistentialDepositMultiAsset, xcm_config::PriceForParentDelivery, >; + type AccountIdConverter = xcm_config::LocationToAccountId; fn valid_destination() -> Result { - Ok(KsmRelayLocation::get()) + Ok(RocRelayLocation::get()) } fn worst_case_holding(_depositable_count: u32) -> MultiAssets { // just concrete assets according to relay chain. let assets: Vec = vec![ MultiAsset { - id: Concrete(KsmRelayLocation::get()), + id: Concrete(RocRelayLocation::get()), fun: Fungible(1_000_000 * UNITS), } ]; @@ -740,8 +743,8 @@ impl_runtime_apis! { parameter_types! { pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - KsmRelayLocation::get(), - MultiAsset { fun: Fungible(UNITS), id: Concrete(KsmRelayLocation::get()) }, + RocRelayLocation::get(), + MultiAsset { fun: Fungible(UNITS), id: Concrete(RocRelayLocation::get()) }, )); pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; @@ -756,15 +759,15 @@ impl_runtime_apis! { fn get_multi_asset() -> MultiAsset { MultiAsset { - id: Concrete(KsmRelayLocation::get()), + id: Concrete(RocRelayLocation::get()), fun: Fungible(UNITS), } } } impl pallet_xcm_benchmarks::generic::Config for Runtime { - type TransactAsset = Balances; type RuntimeCall = RuntimeCall; + type TransactAsset = Balances; fn worst_case_response() -> (u64, Response) { (0u64, Response::Version(Default::default())) @@ -779,16 +782,16 @@ impl_runtime_apis! { } fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { - Ok((KsmRelayLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) + Ok((RocRelayLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) } fn subscribe_origin() -> Result { - Ok(KsmRelayLocation::get()) + Ok(RocRelayLocation::get()) } fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { - let origin = KsmRelayLocation::get(); - let assets: MultiAssets = (Concrete(KsmRelayLocation::get()), 1_000 * UNITS).into(); + let origin = RocRelayLocation::get(); + let assets: MultiAssets = (Concrete(RocRelayLocation::get()), 1_000 * UNITS).into(); let ticket = MultiLocation { parents: 0, interior: Here }; Ok((origin, ticket, assets)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/block_weights.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/block_weights.rs similarity index 96% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/block_weights.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/block_weights.rs index e7fdb2aae2a01ec06076de83d94817e540e205dd..b2092d875c8328210667da4cbb95de0642e60ae3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/block_weights.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/block_weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..f7a1486ed58972ffb430578f63a1326852e2f74d --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,71 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `dagda.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=coretime-rococo-dev +// --wasm-execution=compiled +// --pallet=cumulus_pallet_parachain_system +// --extrinsic=* +// --steps=2 +// --repeat=1 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::LastDmqMqcHead` (r:1 w:1) + /// Proof: `ParachainSystem::LastDmqMqcHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ProcessedDownwardMessages` (r:0 w:1) + /// Proof: `ParachainSystem::ProcessedDownwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1000) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(_n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3517` + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(144_747_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1004)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_xcmp_queue.rs similarity index 72% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_xcmp_queue.rs index ac6ad093faf043b9825ef4b0dd241e4d115def07..f5683f747a3ab1915e0b3fd41b9032db7f13b91a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_xcmp_queue.rs @@ -1,38 +1,40 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `dagda.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/release/polkadot-parachain +// target/release/polkadot-parachain // benchmark // pallet -// --pallet -// cumulus-pallet-xcmp-queue -// --chain -// bridge-hub-polkadot-dev -// --output -// cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs -// --extrinsic -// +// --chain=coretime-rococo-dev +// --wasm-execution=compiled +// --pallet=cumulus_pallet_xcmp_queue +// --extrinsic=* +// --steps=2 +// --repeat=1 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -71,8 +73,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `82` // Estimated: `3517` - // Minimum execution time: 15_000_000 picoseconds. - Weight::from_parts(15_000_000, 0) + // Minimum execution time: 13_000_000 picoseconds. + Weight::from_parts(13_000_000, 0) .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -84,7 +86,7 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Measured: `76` // Estimated: `1561` // Minimum execution time: 3_000_000 picoseconds. - Weight::from_parts(4_000_000, 0) + Weight::from_parts(3_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -96,7 +98,7 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Measured: `111` // Estimated: `1596` // Minimum execution time: 4_000_000 picoseconds. - Weight::from_parts(5_000_000, 0) + Weight::from_parts(4_000_000, 0) .saturating_add(Weight::from_parts(0, 1596)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -105,14 +107,14 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 44_000_000 picoseconds. - Weight::from_parts(45_000_000, 0) + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(42_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) - /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6bedc49980ba3aa32b0a189290fd036649` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6bedc49980ba3aa32b0a189290fd036649` (r:1 w:1) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) @@ -127,20 +129,22 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `65711` // Estimated: `69176` - // Minimum execution time: 61_000_000 picoseconds. - Weight::from_parts(64_000_000, 0) + // Minimum execution time: 86_000_000 picoseconds. + Weight::from_parts(86_000_000, 0) .saturating_add(Weight::from_parts(0, 69176)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - fn on_idle_large_msg() -> Weight { + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6bedc49980ba3aa32b0a189290fd036649` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6bedc49980ba3aa32b0a189290fd036649` (r:1 w:1) + fn on_idle_large_msg() -> Weight { // Proof Size summary in bytes: // Measured: `65710` // Estimated: `69175` - // Minimum execution time: 42_000_000 picoseconds. - Weight::from_parts(44_000_000, 0) + // Minimum execution time: 79_000_000 picoseconds. + Weight::from_parts(79_000_000, 0) .saturating_add(Weight::from_parts(0, 69175)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/extrinsic_weights.rs similarity index 96% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/extrinsic_weights.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/extrinsic_weights.rs index 1a4adb968bb7195428ea00d59cd92dcd3b6eea5f..332c3b324bb9c1b386257bf7953d37aba8f5af13 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/extrinsic_weights.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/extrinsic_weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/frame_system.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system.rs similarity index 50% rename from cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/frame_system.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system.rs index 0b988fd0f6fd3d5fb6b2a3703b23dcd99efcaa65..7c41112152f9e03d82ff8254f2f9519bf270a11f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system.rs @@ -1,42 +1,40 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `frame_system` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `dagda.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/release/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-polkadot-dev +// --chain=coretime-rococo-dev // --wasm-execution=compiled // --pallet=frame_system -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --steps=50 -// --repeat=20 +// --steps=2 +// --repeat=1 // --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,26 +48,22 @@ use core::marker::PhantomData; pub struct WeightInfo(PhantomData); impl frame_system::WeightInfo for WeightInfo { /// The range of component `b` is `[0, 3932160]`. - fn remark(b: u32, ) -> Weight { + fn remark(_b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_956_000 picoseconds. - Weight::from_parts(3_441_280, 0) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(775_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(388, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 3932160]`. - fn remark_with_event(b: u32, ) -> Weight { + fn remark_with_event(_b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_267_000 picoseconds. - Weight::from_parts(7_462_000, 0) + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(4_700_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_816, 0).saturating_mul(b.into())) } /// Storage: `System::Digest` (r:1 w:1) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -79,8 +73,8 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 3_757_000 picoseconds. - Weight::from_parts(4_021_000, 0) + // Minimum execution time: 5_000_000 picoseconds. + Weight::from_parts(5_000_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -99,56 +93,76 @@ impl frame_system::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `119` - // Estimated: `1604` - // Minimum execution time: 97_958_650_000 picoseconds. - Weight::from_parts(102_129_539_000, 0) - .saturating_add(Weight::from_parts(0, 1604)) + // Measured: `164` + // Estimated: `1649` + // Minimum execution time: 79_510_000_000 picoseconds. + Weight::from_parts(79_510_000_000, 0) + .saturating_add(Weight::from_parts(0, 1649)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. - fn set_storage(i: u32, ) -> Weight { + fn set_storage(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_327_000 picoseconds. - Weight::from_parts(2_511_000, 0) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(816_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_186 - .saturating_add(Weight::from_parts(755_085, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + .saturating_add(T::DbWeight::get().writes(1000)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. - fn kill_storage(i: u32, ) -> Weight { + fn kill_storage(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_114_000 picoseconds. - Weight::from_parts(2_177_000, 0) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(598_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_174 - .saturating_add(Weight::from_parts(584_644, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + .saturating_add(T::DbWeight::get().writes(1000)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `p` is `[0, 1000]`. - fn kill_prefix(p: u32, ) -> Weight { + fn kill_prefix(_p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `55 + p * (69 ±0)` + // Estimated: `69609` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(1_091_000_000, 0) + .saturating_add(Weight::from_parts(0, 69609)) + .saturating_add(T::DbWeight::get().reads(1000)) + .saturating_add(T::DbWeight::get().writes(1000)) + } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 33_027_000 picoseconds. + Weight::from_parts(33_027_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `84 + p * (69 ±0)` - // Estimated: `77 + p * (70 ±0)` - // Minimum execution time: 3_799_000 picoseconds. - Weight::from_parts(3_910_000, 0) - .saturating_add(Weight::from_parts(0, 77)) - // Standard Error: 1_968 - .saturating_add(Weight::from_parts(1_220_745, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) + // Measured: `22` + // Estimated: `1518` + // Minimum execution time: 118_101_992_000 picoseconds. + Weight::from_parts(118_101_992_000, 0) + .saturating_add(Weight::from_parts(0, 1518)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs similarity index 94% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/mod.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs index 36733d6d4a6e8f09cd21bd0f14ae8bde53a720c1..7b17d84ac3458acf4853a9683d27abac01b3ef53 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,12 +18,12 @@ //! Expose the auto generated weight files. pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; pub mod pallet_balances; +pub mod pallet_broker; pub mod pallet_collator_selection; pub mod pallet_message_queue; pub mod pallet_multisig; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_balances.rs similarity index 61% rename from cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_balances.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_balances.rs index 79c88f305806495de0d55c51986cc4ae27731750..ee12da7c436fb24cfd208a7398b0a018312e5390 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_balances.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_balances.rs @@ -1,42 +1,40 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_balances` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 +//! HOSTNAME: `dagda.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/release/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-kusama-dev +// --chain=coretime-rococo-dev // --wasm-execution=compiled // --pallet=pallet_balances -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --steps=50 -// --repeat=20 +// --steps=2 +// --repeat=1 // --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,8 +53,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 55_040_000 picoseconds. - Weight::from_parts(56_106_000, 0) + // Minimum execution time: 55_000_000 picoseconds. + Weight::from_parts(55_000_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -67,8 +65,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 41_342_000 picoseconds. - Weight::from_parts(41_890_000, 0) + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(42_000_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -79,8 +77,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 14_723_000 picoseconds. - Weight::from_parts(15_182_000, 0) + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(15_000_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -91,8 +89,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 22_073_000 picoseconds. - Weight::from_parts(22_638_000, 0) + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -103,8 +101,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6196` - // Minimum execution time: 57_265_000 picoseconds. - Weight::from_parts(58_222_000, 0) + // Minimum execution time: 66_000_000 picoseconds. + Weight::from_parts(66_000_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -115,8 +113,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 51_485_000 picoseconds. - Weight::from_parts(52_003_000, 0) + // Minimum execution time: 53_000_000 picoseconds. + Weight::from_parts(53_000_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -127,26 +125,23 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 17_460_000 picoseconds. - Weight::from_parts(17_849_000, 0) + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `System::Account` (r:999 w:999) + /// Storage: `System::Account` (r:1000 w:1000) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `u` is `[1, 1000]`. - fn upgrade_accounts(u: u32, ) -> Weight { + fn upgrade_accounts(_u: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + u * (136 ±0)` - // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 17_259_000 picoseconds. - Weight::from_parts(17_478_000, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 16_756 - .saturating_add(Weight::from_parts(15_291_954, 0).saturating_mul(u.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) + // Measured: `0 + u * (135 ±0)` + // Estimated: `2603990` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(14_684_000_000, 0) + .saturating_add(Weight::from_parts(0, 2603990)) + .saturating_add(T::DbWeight::get().reads(1000)) + .saturating_add(T::DbWeight::get().writes(1000)) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs new file mode 100644 index 0000000000000000000000000000000000000000..3cc51a247f7bb5afbd8d6f1b1a7a86fbcf01d008 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs @@ -0,0 +1,487 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_broker` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `dagda.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=coretime-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_broker +// --extrinsic=* +// --steps=2 +// --repeat=1 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_broker`. +pub struct WeightInfo(PhantomData); +impl pallet_broker::WeightInfo for WeightInfo { + /// Storage: `Broker::Configuration` (r:0 w:1) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + fn configure() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broker::Reservations` (r:1 w:1) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + fn reserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `4878` + // Estimated: `7496` + // Minimum execution time: 31_000_000 picoseconds. + Weight::from_parts(31_000_000, 0) + .saturating_add(Weight::from_parts(0, 7496)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broker::Reservations` (r:1 w:1) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + fn unreserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `6080` + // Estimated: `7496` + // Minimum execution time: 24_000_000 picoseconds. + Weight::from_parts(24_000_000, 0) + .saturating_add(Weight::from_parts(0, 7496)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + fn set_lease() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `1526` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 1526)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::InstaPoolIo` (r:3 w:3) + /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + /// Storage: `Broker::SaleInfo` (r:0 w:1) + /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:0 w:1) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workplan` (r:0 w:10) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn start_sales(_n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `6192` + // Estimated: `8499` + // Minimum execution time: 55_000_000 picoseconds. + Weight::from_parts(57_000_000, 0) + .saturating_add(Weight::from_parts(0, 8499)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(16)) + } + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::SaleInfo` (r:1 w:1) + /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Broker::Regions` (r:0 w:1) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + fn purchase() -> Weight { + // Proof Size summary in bytes: + // Measured: `316` + // Estimated: `3593` + // Minimum execution time: 40_000_000 picoseconds. + Weight::from_parts(40_000_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::SaleInfo` (r:1 w:1) + /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `Broker::AllowedRenewals` (r:1 w:2) + /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workplan` (r:0 w:1) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) + fn renew() -> Weight { + // Proof Size summary in bytes: + // Measured: `434` + // Estimated: `4698` + // Minimum execution time: 58_000_000 picoseconds. + Weight::from_parts(58_000_000, 0) + .saturating_add(Weight::from_parts(0, 4698)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Broker::Regions` (r:1 w:1) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + fn transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `357` + // Estimated: `3550` + // Minimum execution time: 19_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) + .saturating_add(Weight::from_parts(0, 3550)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broker::Regions` (r:1 w:2) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + fn partition() -> Weight { + // Proof Size summary in bytes: + // Measured: `357` + // Estimated: `3550` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(15_000_000, 0) + .saturating_add(Weight::from_parts(0, 3550)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Broker::Regions` (r:1 w:2) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + fn interlace() -> Weight { + // Proof Size summary in bytes: + // Measured: `357` + // Estimated: `3550` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(15_000_000, 0) + .saturating_add(Weight::from_parts(0, 3550)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::Regions` (r:1 w:1) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workplan` (r:1 w:1) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) + fn assign() -> Weight { + // Proof Size summary in bytes: + // Measured: `602` + // Estimated: `4681` + // Minimum execution time: 25_000_000 picoseconds. + Weight::from_parts(25_000_000, 0) + .saturating_add(Weight::from_parts(0, 4681)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::Regions` (r:1 w:1) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workplan` (r:1 w:1) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) + /// Storage: `Broker::InstaPoolIo` (r:2 w:2) + /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Broker::InstaPoolContribution` (r:0 w:1) + /// Proof: `Broker::InstaPoolContribution` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn pool() -> Weight { + // Proof Size summary in bytes: + // Measured: `637` + // Estimated: `5996` + // Minimum execution time: 38_000_000 picoseconds. + Weight::from_parts(38_000_000, 0) + .saturating_add(Weight::from_parts(0, 5996)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Broker::InstaPoolContribution` (r:1 w:1) + /// Proof: `Broker::InstaPoolContribution` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `Broker::InstaPoolHistory` (r:3 w:1) + /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `m` is `[1, 3]`. + fn claim_revenue(_m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `721` + // Estimated: `8550` + // Minimum execution time: 65_000_000 picoseconds. + Weight::from_parts(67_000_000, 0) + .saturating_add(Weight::from_parts(0, 8550)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn purchase_credit() -> Weight { + // Proof Size summary in bytes: + // Measured: `284` + // Estimated: `3749` + // Minimum execution time: 64_000_000 picoseconds. + Weight::from_parts(64_000_000, 0) + .saturating_add(Weight::from_parts(0, 3749)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::Regions` (r:1 w:1) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + fn drop_region() -> Weight { + // Proof Size summary in bytes: + // Measured: `465` + // Estimated: `3550` + // Minimum execution time: 34_000_000 picoseconds. + Weight::from_parts(34_000_000, 0) + .saturating_add(Weight::from_parts(0, 3550)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::InstaPoolContribution` (r:1 w:1) + /// Proof: `Broker::InstaPoolContribution` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn drop_contribution() -> Weight { + // Proof Size summary in bytes: + // Measured: `463` + // Estimated: `3533` + // Minimum execution time: 47_000_000 picoseconds. + Weight::from_parts(47_000_000, 0) + .saturating_add(Weight::from_parts(0, 3533)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) + /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn drop_history() -> Weight { + // Proof Size summary in bytes: + // Measured: `692` + // Estimated: `3593` + // Minimum execution time: 40_000_000 picoseconds. + Weight::from_parts(40_000_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::AllowedRenewals` (r:1 w:1) + /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + fn drop_renewal() -> Weight { + // Proof Size summary in bytes: + // Measured: `387` + // Estimated: `4698` + // Minimum execution time: 24_000_000 picoseconds. + Weight::from_parts(24_000_000, 0) + .saturating_add(Weight::from_parts(0, 4698)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `n` is `[0, 1000]`. + fn request_core_count(_n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `74` + // Estimated: `3539` + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(27_000_000, 0) + .saturating_add(Weight::from_parts(0, 3539)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// The range of component `n` is `[0, 1000]`. + fn process_core_count(_n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `26` + // Estimated: `3491` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 3491)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) + /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn process_revenue() -> Weight { + // Proof Size summary in bytes: + // Measured: `515` + // Estimated: `6196` + // Minimum execution time: 49_000_000 picoseconds. + Weight::from_parts(49_000_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Broker::InstaPoolIo` (r:3 w:3) + /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + /// Storage: `Broker::SaleInfo` (r:0 w:1) + /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workplan` (r:0 w:10) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn rotate_sale(_n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `6143` + // Estimated: `8499` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(47_000_000, 0) + .saturating_add(Weight::from_parts(0, 8499)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(15)) + } + /// Storage: `Broker::InstaPoolIo` (r:1 w:0) + /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Broker::InstaPoolHistory` (r:0 w:1) + /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + fn process_pool() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3493` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(7_000_000, 0) + .saturating_add(Weight::from_parts(0, 3493)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broker::Workplan` (r:1 w:1) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workload` (r:1 w:1) + /// Proof: `Broker::Workload` (`max_values`: None, `max_size`: Some(1212), added: 3687, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn process_core_schedule() -> Weight { + // Proof Size summary in bytes: + // Measured: `1321` + // Estimated: `4786` + // Minimum execution time: 40_000_000 picoseconds. + Weight::from_parts(40_000_000, 0) + .saturating_add(Weight::from_parts(0, 4786)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn request_revenue_info_at() -> Weight { + // Proof Size summary in bytes: + // Measured: `74` + // Estimated: `3539` + // Minimum execution time: 19_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) + .saturating_add(Weight::from_parts(0, 3539)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn notify_core_count() -> Weight { + T::DbWeight::get().reads_writes(1, 1) + } + /// Storage: `Broker::Status` (r:1 w:1) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + fn do_tick_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `3816` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 3816)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_collator_selection.rs new file mode 100644 index 0000000000000000000000000000000000000000..ca740bc3550f3761a32489bf59220d5d155b5233 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_collator_selection.rs @@ -0,0 +1,265 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_collator_selection` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `dagda.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=coretime-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_collator_selection +// --extrinsic=* +// --steps=2 +// --repeat=1 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collator_selection`. +pub struct WeightInfo(PhantomData); +impl pallet_collator_selection::WeightInfo for WeightInfo { + /// Storage: `Session::NextKeys` (r:20 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 20]`. + fn set_invulnerables(_b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `162 + b * (79 ±0)` + // Estimated: `52242` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(74_000_000, 0) + .saturating_add(Weight::from_parts(0, 52242)) + .saturating_add(T::DbWeight::get().reads(20)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::CandidateList` (r:1 w:1) + /// Proof: `CollatorSelection::CandidateList` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 19]`. + /// The range of component `c` is `[1, 99]`. + fn add_invulnerable(b: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `796 + b * (32 ±0) + c * (52 ±0)` + // Estimated: `6287 + b * (32 ±0) + c * (53 ±0)` + // Minimum execution time: 39_000_000 picoseconds. + Weight::from_parts(37_903_628, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 96_225 + .saturating_add(Weight::from_parts(55_555, 0).saturating_mul(b.into())) + // Standard Error: 17_673 + .saturating_add(Weight::from_parts(40_816, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) + } + /// Storage: `CollatorSelection::CandidateList` (r:1 w:0) + /// Proof: `CollatorSelection::CandidateList` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[5, 20]`. + fn remove_invulnerable(_b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `82 + b * (32 ±0)` + // Estimated: `6287` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 6287)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_desired_candidates() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:1) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::CandidateList` (r:1 w:1) + /// Proof: `CollatorSelection::CandidateList` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:100 w:100) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:100) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 100]`. + /// The range of component `k` is `[0, 100]`. + fn set_candidacy_bond(c: u32, k: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + c * (179 ±0) + k * (130 ±0)` + // Estimated: `261290` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(11_000_000, 0) + .saturating_add(Weight::from_parts(0, 261290)) + // Standard Error: 5_514_936 + .saturating_add(Weight::from_parts(6_438_000, 0).saturating_mul(c.into())) + // Standard Error: 5_514_936 + .saturating_add(Weight::from_parts(6_368_000, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) + } + /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::CandidateList` (r:1 w:1) + /// Proof: `CollatorSelection::CandidateList` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// The range of component `c` is `[4, 100]`. + fn update_bond(_c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `243 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(36_000_000, 0) + .saturating_add(Weight::from_parts(0, 6287)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::CandidateList` (r:1 w:1) + /// Proof: `CollatorSelection::CandidateList` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[1, 99]`. + fn register_as_candidate(_c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `441 + c * (54 ±0)` + // Estimated: `9299` + // Minimum execution time: 39_000_000 picoseconds. + Weight::from_parts(40_000_000, 0) + .saturating_add(Weight::from_parts(0, 9299)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::CandidateList` (r:1 w:1) + /// Proof: `CollatorSelection::CandidateList` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:2) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[4, 100]`. + fn take_candidate_slot(_c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `589 + c * (54 ±0)` + // Estimated: `9521` + // Minimum execution time: 54_000_000 picoseconds. + Weight::from_parts(59_000_000, 0) + .saturating_add(Weight::from_parts(0, 9521)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `CollatorSelection::CandidateList` (r:1 w:1) + /// Proof: `CollatorSelection::CandidateList` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[4, 100]`. + fn leave_intent(_c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `268 + c * (48 ±0)` + // Estimated: `6287` + // Minimum execution time: 31_000_000 picoseconds. + Weight::from_parts(38_000_000, 0) + .saturating_add(Weight::from_parts(0, 6287)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn note_author() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 49_000_000 picoseconds. + Weight::from_parts(49_000_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `CollatorSelection::CandidateList` (r:1 w:0) + /// Proof: `CollatorSelection::CandidateList` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 100]`. + /// The range of component `c` is `[1, 100]`. + fn new_session(r: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `340 + c * (97 ±0)` + // Estimated: `6287 + c * (2519 ±0)` + // Minimum execution time: 19_000_000 picoseconds. + Weight::from_parts(11_136_363, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 323_666 + .saturating_add(Weight::from_parts(35_353, 0).saturating_mul(r.into())) + // Standard Error: 323_666 + .saturating_add(Weight::from_parts(4_328_282, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..e0e8c79ca17fceabbf794832a0f5e0736a754d72 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_message_queue.rs @@ -0,0 +1,178 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `dagda.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=coretime-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_message_queue +// --extrinsic=* +// --steps=2 +// --repeat=1 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `223` + // Estimated: `6044` + // Minimum execution time: 13_000_000 picoseconds. + Weight::from_parts(13_000_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `218` + // Estimated: `6044` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(7_000_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(7_000_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 65_000_000 picoseconds. + Weight::from_parts(65_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `171` + // Estimated: `3517` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(8_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 73_000_000 picoseconds. + Weight::from_parts(73_000_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 74_000_000 picoseconds. + Weight::from_parts(74_000_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 109_000_000 picoseconds. + Weight::from_parts(109_000_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_multisig.rs similarity index 54% rename from cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_multisig.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_multisig.rs index 0bb05511d7a863dd0ba641e8e0e6e625c9498313..421fc033e7c47dd4dd6d3932f8291a0607684f69 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_multisig.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_multisig.rs @@ -1,42 +1,40 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_multisig` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `dagda.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/release/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-polkadot-dev +// --chain=coretime-rococo-dev // --wasm-execution=compiled // --pallet=pallet_multisig -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --steps=50 -// --repeat=20 +// --steps=2 +// --repeat=1 // --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,31 +48,27 @@ use core::marker::PhantomData; pub struct WeightInfo(PhantomData); impl pallet_multisig::WeightInfo for WeightInfo { /// The range of component `z` is `[0, 10000]`. - fn as_multi_threshold_1(z: u32, ) -> Weight { + fn as_multi_threshold_1(_z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_710_000 picoseconds. - Weight::from_parts(14_702_959, 0) + // Minimum execution time: 13_000_000 picoseconds. + Weight::from_parts(16_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 7 - .saturating_add(Weight::from_parts(568, 0).saturating_mul(z.into())) } /// Storage: `Multisig::Multisigs` (r:1 w:1) /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. - fn as_multi_create(s: u32, z: u32, ) -> Weight { + fn as_multi_create(_s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `262 + s * (2 ±0)` + // Measured: `172 + s * (3 ±0)` // Estimated: `6811` - // Minimum execution time: 45_518_000 picoseconds. - Weight::from_parts(35_243_068, 0) + // Minimum execution time: 35_000_000 picoseconds. + Weight::from_parts(36_530_612, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_634 - .saturating_add(Weight::from_parts(116_658, 0).saturating_mul(s.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_444, 0).saturating_mul(z.into())) + // Standard Error: 259 + .saturating_add(Weight::from_parts(1_650, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -86,13 +80,13 @@ impl pallet_multisig::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `282` // Estimated: `6811` - // Minimum execution time: 29_590_000 picoseconds. - Weight::from_parts(21_574_604, 0) + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(18_422_680, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_622 - .saturating_add(Weight::from_parts(95_669, 0).saturating_mul(s.into())) - // Standard Error: 15 - .saturating_add(Weight::from_parts(1_459, 0).saturating_mul(z.into())) + // Standard Error: 8_928 + .saturating_add(Weight::from_parts(25_773, 0).saturating_mul(s.into())) + // Standard Error: 86 + .saturating_add(Weight::from_parts(1_250, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -102,62 +96,54 @@ impl pallet_multisig::WeightInfo for WeightInfo { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. - fn as_multi_complete(s: u32, z: u32, ) -> Weight { + fn as_multi_complete(_s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `385 + s * (33 ±0)` + // Measured: `315 + s * (34 ±0)` // Estimated: `6811` - // Minimum execution time: 51_056_000 picoseconds. - Weight::from_parts(35_799_301, 0) + // Minimum execution time: 53_000_000 picoseconds. + Weight::from_parts(56_571_428, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_629 - .saturating_add(Weight::from_parts(183_343, 0).saturating_mul(s.into())) - // Standard Error: 15 - .saturating_add(Weight::from_parts(1_686, 0).saturating_mul(z.into())) + // Standard Error: 86 + .saturating_add(Weight::from_parts(150, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Multisig::Multisigs` (r:1 w:1) /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_create(s: u32, ) -> Weight { + fn approve_as_multi_create(_s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `263 + s * (2 ±0)` + // Measured: `172 + s * (3 ±0)` // Estimated: `6811` - // Minimum execution time: 30_910_000 picoseconds. - Weight::from_parts(32_413_023, 0) + // Minimum execution time: 32_000_000 picoseconds. + Weight::from_parts(35_000_000, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_346 - .saturating_add(Weight::from_parts(128_779, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Multisig::Multisigs` (r:1 w:1) /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_approve(s: u32, ) -> Weight { + fn approve_as_multi_approve(_s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `282` // Estimated: `6811` - // Minimum execution time: 17_926_000 picoseconds. - Weight::from_parts(18_477_305, 0) + // Minimum execution time: 17_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_367 - .saturating_add(Weight::from_parts(113_018, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Multisig::Multisigs` (r:1 w:1) /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. - fn cancel_as_multi(s: u32, ) -> Weight { + fn cancel_as_multi(_s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `454 + s * (1 ±0)` + // Measured: `379 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 32_232_000 picoseconds. - Weight::from_parts(33_724_753, 0) + // Minimum execution time: 31_000_000 picoseconds. + Weight::from_parts(40_000_000, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_192 - .saturating_add(Weight::from_parts(121_574, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_session.rs similarity index 57% rename from cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_session.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_session.rs index 73c3c06945d38a6d9b4ae9409002cc70c8e5735e..5151bcaa9e4eb4e9bec6baeee1c51d3d920d1474 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_session.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_session.rs @@ -1,42 +1,40 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_session` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 +//! HOSTNAME: `dagda.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/release/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-kusama-dev +// --chain=coretime-rococo-dev // --wasm-execution=compiled // --pallet=pallet_session -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --steps=50 -// --repeat=20 +// --steps=2 +// --repeat=1 // --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -57,8 +55,8 @@ impl pallet_session::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `270` // Estimated: `3735` - // Minimum execution time: 16_932_000 picoseconds. - Weight::from_parts(17_357_000, 0) + // Minimum execution time: 19_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) .saturating_add(Weight::from_parts(0, 3735)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -71,8 +69,8 @@ impl pallet_session::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `242` // Estimated: `3707` - // Minimum execution time: 12_157_000 picoseconds. - Weight::from_parts(12_770_000, 0) + // Minimum execution time: 13_000_000 picoseconds. + Weight::from_parts(13_000_000, 0) .saturating_add(Weight::from_parts(0, 3707)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_timestamp.rs similarity index 51% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_timestamp.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_timestamp.rs index 6162b1d48c5fe60e4501d40b95fcaa5e37ae9be2..c2a23bf2a73b2ff2704b640d827efb60dfad61ca 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_timestamp.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_timestamp.rs @@ -1,42 +1,40 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_timestamp` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 +//! HOSTNAME: `dagda.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/release/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-kusama-dev +// --chain=coretime-rococo-dev // --wasm-execution=compiled // --pallet=pallet_timestamp -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --steps=50 -// --repeat=20 +// --steps=2 +// --repeat=1 // --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -57,8 +55,8 @@ impl pallet_timestamp::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `49` // Estimated: `1493` - // Minimum execution time: 7_794_000 picoseconds. - Weight::from_parts(8_075_000, 0) + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(8_000_000, 0) .saturating_add(Weight::from_parts(0, 1493)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -67,8 +65,8 @@ impl pallet_timestamp::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `57` // Estimated: `0` - // Minimum execution time: 3_338_000 picoseconds. - Weight::from_parts(3_471_000, 0) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_utility.rs new file mode 100644 index 0000000000000000000000000000000000000000..cf3cc98b593f8457b9d392c132e14a738f392a71 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_utility.rs @@ -0,0 +1,93 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_utility` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `dagda.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=coretime-rococo-dev +// --wasm-execution=compiled +// --pallet=pallet_utility +// --extrinsic=* +// --steps=2 +// --repeat=1 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_utility`. +pub struct WeightInfo(PhantomData); +impl pallet_utility::WeightInfo for WeightInfo { + /// The range of component `c` is `[0, 1000]`. + fn batch(_c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(4_117_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn as_derivative() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(7_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn batch_all(_c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(4_519_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn dispatch_as() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn force_batch(_c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(4_114_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs similarity index 76% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs index 7f4c2026f2bd8ce762dbd54ba94f55ded5b38de1..538401ef2c577c5b82de14acc7d58ceffd6a1668 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs @@ -17,25 +17,24 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 +//! HOSTNAME: `dagda.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot-parachain +// target/release/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* +// --chain=coretime-rococo-dev // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm -// --chain=bridge-hub-kusama-dev +// --extrinsic=* +// --steps=2 +// --repeat=1 +// --json // --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,8 +47,6 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -62,24 +59,35 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 22_520_000 picoseconds. - Weight::from_parts(23_167_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `74` + // Estimated: `3539` + // Minimum execution time: 37_000_000 picoseconds. + Weight::from_parts(37_000_000, 0) + .saturating_add(Weight::from_parts(0, 3539)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 19_639_000 picoseconds. - Weight::from_parts(20_230_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `106` + // Estimated: `3571` + // Minimum execution time: 86_000_000 picoseconds. + Weight::from_parts(86_000_000, 0) + .saturating_add(Weight::from_parts(0, 3571)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -93,6 +101,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -107,8 +125,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_175_000 picoseconds. - Weight::from_parts(7_496_000, 0) + // Minimum execution time: 13_000_000 picoseconds. + Weight::from_parts(13_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,8 +136,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_126_000 picoseconds. - Weight::from_parts(2_359_000, 0) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -127,8 +145,6 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -143,18 +159,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 27_229_000 picoseconds. - Weight::from_parts(27_673_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `74` + // Estimated: `3539` + // Minimum execution time: 37_000_000 picoseconds. + Weight::from_parts(37_000_000, 0) + .saturating_add(Weight::from_parts(0, 3539)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -169,12 +183,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `255` - // Estimated: `3720` - // Minimum execution time: 29_812_000 picoseconds. - Weight::from_parts(30_649_000, 0) - .saturating_add(Weight::from_parts(0, 3720)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `292` + // Estimated: `3757` + // Minimum execution time: 72_000_000 picoseconds. + Weight::from_parts(72_000_000, 0) + .saturating_add(Weight::from_parts(0, 3757)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -183,8 +197,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_212_000 picoseconds. - Weight::from_parts(2_367_000, 0) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -194,8 +208,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `95` // Estimated: `10985` - // Minimum execution time: 14_768_000 picoseconds. - Weight::from_parts(15_036_000, 0) + // Minimum execution time: 19_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) .saturating_add(Weight::from_parts(0, 10985)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -206,8 +220,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `99` // Estimated: `10989` - // Minimum execution time: 14_662_000 picoseconds. - Weight::from_parts(15_155_000, 0) + // Minimum execution time: 19_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) .saturating_add(Weight::from_parts(0, 10989)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -218,15 +232,13 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 16_198_000 picoseconds. - Weight::from_parts(16_456_000, 0) + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -239,12 +251,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `6046` - // Minimum execution time: 25_825_000 picoseconds. - Weight::from_parts(26_744_000, 0) - .saturating_add(Weight::from_parts(0, 6046)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `142` + // Estimated: `6082` + // Minimum execution time: 32_000_000 picoseconds. + Weight::from_parts(32_000_000, 0) + .saturating_add(Weight::from_parts(0, 6082)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -253,8 +265,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `8551` - // Minimum execution time: 8_622_000 picoseconds. - Weight::from_parts(8_931_000, 0) + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) .saturating_add(Weight::from_parts(0, 8551)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -264,16 +276,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `10996` - // Minimum execution time: 15_397_000 picoseconds. - Weight::from_parts(15_650_000, 0) + // Minimum execution time: 19_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) .saturating_add(Weight::from_parts(0, 10996)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -286,12 +296,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 32_330_000 picoseconds. - Weight::from_parts(33_255_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `148` + // Estimated: `11038` + // Minimum execution time: 39_000_000 picoseconds. + Weight::from_parts(39_000_000, 0) + .saturating_add(Weight::from_parts(0, 11038)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -302,8 +312,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_142_000 picoseconds. - Weight::from_parts(4_308_000, 0) + // Minimum execution time: 5_000_000 picoseconds. + Weight::from_parts(5_000_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -314,8 +324,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_814_000 picoseconds. - Weight::from_parts(26_213_000, 0) + // Minimum execution time: 31_000_000 picoseconds. + Weight::from_parts(31_000_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/paritydb_weights.rs similarity index 97% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/paritydb_weights.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/paritydb_weights.rs index 25679703831a13b8d1bb7fb7dd4d92fa84b1f255..4338d928d807a41cc60ec91d86e91c81bb253631 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/paritydb_weights.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/paritydb_weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/rocksdb_weights.rs similarity index 97% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/rocksdb_weights.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/rocksdb_weights.rs index 3dd817aa6f137085b0e5fdf2b11b7f50e5c8b002..1d115d963facb39fe29d6258918fda3bc8d94900 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/rocksdb_weights.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/rocksdb_weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs similarity index 89% rename from cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs index 405d7c72e55792fd49a39c57baf031046977761d..2319c2e3a5b2ec60ebb77065106991a093e20f08 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs @@ -1,17 +1,18 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; @@ -55,8 +56,8 @@ impl WeighMultiAssets for MultiAssets { } } -pub struct AssetHubKusamaXcmWeight(core::marker::PhantomData); -impl XcmWeightInfo for AssetHubKusamaXcmWeight { +pub struct CoretimeRococoXcmWeight(core::marker::PhantomData); +impl XcmWeightInfo for CoretimeRococoXcmWeight { fn withdraw_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) } @@ -116,7 +117,6 @@ impl XcmWeightInfo for AssetHubKusamaXcmWeight { fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { XcmGeneric::::report_error() } - fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) } @@ -208,7 +208,7 @@ impl XcmWeightInfo for AssetHubKusamaXcmWeight { XcmGeneric::::clear_transact_status() } fn universal_origin(_: &Junction) -> Weight { - Weight::MAX + XcmGeneric::::universal_origin() } fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight { Weight::MAX diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs similarity index 66% rename from cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 1036d4cbf00fb2261ef2a12a7b264c92dee8e103..7fab35842509deceba14e89e5bbf6bebe2240528 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -1,25 +1,26 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-polkadot-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain @@ -32,10 +33,10 @@ // --heap-pages=4096 // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm_benchmarks::fungible -// --chain=asset-hub-polkadot-dev +// --chain=asset-hub-rococo-dev // --header=./cumulus/file_header.txt // --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/ +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -53,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 25_903_000 picoseconds. - Weight::from_parts(26_768_000, 3593) + // Minimum execution time: 21_643_000 picoseconds. + Weight::from_parts(22_410_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -64,15 +65,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 51_042_000 picoseconds. - Weight::from_parts(51_939_000, 6196) + // Minimum execution time: 43_758_000 picoseconds. + Weight::from_parts(44_654_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - // Storage: `System::Account` (r:2 w:2) + // Storage: `System::Account` (r:3 w:3) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -85,49 +88,54 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `176` - // Estimated: `6196` - // Minimum execution time: 74_626_000 picoseconds. - Weight::from_parts(75_963_000, 6196) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `246` + // Estimated: `8799` + // Minimum execution time: 87_978_000 picoseconds. + Weight::from_parts(88_517_000, 8799) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(5)) } - // Storage: `Benchmark::Override` (r:0 w:0) - // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) pub fn reserve_asset_deposited() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) + // Estimated: `1489` + // Minimum execution time: 6_883_000 picoseconds. + Weight::from_parts(6_979_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_reserve_withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 480_030_000 picoseconds. - Weight::from_parts(486_039_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 198_882_000 picoseconds. + Weight::from_parts(199_930_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn receive_teleported_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_936_000 picoseconds. - Weight::from_parts(4_033_000, 0) + // Minimum execution time: 3_343_000 picoseconds. + Weight::from_parts(3_487_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -135,15 +143,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 26_274_000 picoseconds. - Weight::from_parts(26_609_000, 3593) + // Minimum execution time: 19_399_000 picoseconds. + Weight::from_parts(19_659_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: `System::Account` (r:1 w:1) + // Storage: `System::Account` (r:2 w:2) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -156,32 +166,36 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3593` - // Minimum execution time: 52_888_000 picoseconds. - Weight::from_parts(53_835_000, 3593) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(3)) + // Measured: `145` + // Estimated: `6196` + // Minimum execution time: 59_017_000 picoseconds. + Weight::from_parts(60_543_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_teleport() -> Weight { // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 33_395_000 picoseconds. - Weight::from_parts(33_827_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 45_409_000 picoseconds. + Weight::from_parts(47_041_000, 3610) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs similarity index 59% rename from cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 12ef504727effc5008a70b0ee00506297b3ce77a..4454494badcbfe9b4f429312e24b63786b83ef75 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -1,43 +1,42 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-polkadot-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::generic -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::generic +// --chain=asset-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -51,120 +50,128 @@ pub struct WeightInfo(PhantomData); impl WeightInfo { // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 425_235_000 picoseconds. - Weight::from_parts(432_935_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 440_298_000 picoseconds. + Weight::from_parts(446_508_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_070_000 picoseconds. - Weight::from_parts(4_329_000, 0) + // Minimum execution time: 3_313_000 picoseconds. + Weight::from_parts(3_422_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn query_response() -> Weight { // Proof Size summary in bytes: - // Measured: `69` - // Estimated: `3534` - // Minimum execution time: 11_464_000 picoseconds. - Weight::from_parts(11_829_000, 3534) + // Measured: `103` + // Estimated: `3568` + // Minimum execution time: 9_691_000 picoseconds. + Weight::from_parts(9_948_000, 3568) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_574_000 picoseconds. - Weight::from_parts(14_021_000, 0) + // Minimum execution time: 10_384_000 picoseconds. + Weight::from_parts(11_085_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_276_000 picoseconds. - Weight::from_parts(4_479_000, 0) + // Minimum execution time: 3_438_000 picoseconds. + Weight::from_parts(3_577_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_833_000 picoseconds. - Weight::from_parts(2_939_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_243_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_797_000 picoseconds. - Weight::from_parts(2_901_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_207_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_855_000 picoseconds. - Weight::from_parts(2_961_000, 0) + // Minimum execution time: 2_105_000 picoseconds. + Weight::from_parts(2_193_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_589_000 picoseconds. - Weight::from_parts(3_720_000, 0) + // Minimum execution time: 2_999_000 picoseconds. + Weight::from_parts(3_056_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_786_000 picoseconds. - Weight::from_parts(2_889_000, 0) + // Minimum execution time: 2_091_000 picoseconds. + Weight::from_parts(2_176_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 25_740_000 picoseconds. - Weight::from_parts(26_355_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 55_728_000 picoseconds. + Weight::from_parts(56_704_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn claim_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `126` - // Estimated: `3591` - // Minimum execution time: 16_206_000 picoseconds. - Weight::from_parts(16_651_000, 3591) + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 12_839_000 picoseconds. + Weight::from_parts(13_457_000, 3625) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -172,11 +179,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_819_000 picoseconds. - Weight::from_parts(2_944_000, 0) + // Minimum execution time: 2_116_000 picoseconds. + Weight::from_parts(2_219_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -189,11 +198,11 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn subscribe_version() -> Weight { // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 28_216_000 picoseconds. - Weight::from_parts(28_878_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 24_891_000 picoseconds. + Weight::from_parts(25_583_000, 3610) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) @@ -202,127 +211,145 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_795_000 picoseconds. - Weight::from_parts(5_008_000, 0) + // Minimum execution time: 3_968_000 picoseconds. + Weight::from_parts(4_122_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 135_205_000 picoseconds. - Weight::from_parts(140_623_000, 0) + // Minimum execution time: 136_220_000 picoseconds. + Weight::from_parts(137_194_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_791_000 picoseconds. - Weight::from_parts(13_114_000, 0) + // Minimum execution time: 12_343_000 picoseconds. + Weight::from_parts(12_635_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_000_000 picoseconds. - Weight::from_parts(3_091_000, 0) + // Minimum execution time: 2_237_000 picoseconds. + Weight::from_parts(2_315_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_828_000 picoseconds. - Weight::from_parts(2_947_000, 0) + // Minimum execution time: 2_094_000 picoseconds. + Weight::from_parts(2_231_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_980_000 picoseconds. - Weight::from_parts(3_123_000, 0) + // Minimum execution time: 2_379_000 picoseconds. + Weight::from_parts(2_455_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 29_672_000 picoseconds. - Weight::from_parts(30_318_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 60_734_000 picoseconds. + Weight::from_parts(61_964_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_421_000 picoseconds. - Weight::from_parts(5_614_000, 0) + // Minimum execution time: 5_500_000 picoseconds. + Weight::from_parts(5_720_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 25_621_000 picoseconds. - Weight::from_parts(26_486_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 55_767_000 picoseconds. + Weight::from_parts(56_790_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_873_000 picoseconds. - Weight::from_parts(2_973_000, 0) + // Minimum execution time: 2_201_000 picoseconds. + Weight::from_parts(2_291_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_861_000 picoseconds. - Weight::from_parts(2_923_000, 0) + // Minimum execution time: 2_164_000 picoseconds. + Weight::from_parts(2_241_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_845_000 picoseconds. - Weight::from_parts(2_970_000, 0) + // Minimum execution time: 2_127_000 picoseconds. + Weight::from_parts(2_236_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + pub fn universal_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1489` + // Minimum execution time: 4_275_000 picoseconds. + Weight::from_parts(4_381_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_773_000 picoseconds. - Weight::from_parts(2_922_000, 0) + // Minimum execution time: 2_132_000 picoseconds. + Weight::from_parts(2_216_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_980_000 picoseconds. - Weight::from_parts(3_095_000, 0) + // Minimum execution time: 2_265_000 picoseconds. + Weight::from_parts(2_332_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs similarity index 69% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs rename to cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs index 727735c928550bbceab0e93656e0b1f6e9b2c219..00bbe5b5037f9ebfcb1f519c79725f91c80b7e03 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs @@ -1,4 +1,4 @@ -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright 2023 Parity Technologies (UK) Ltd. // This file is part of Cumulus. // Cumulus is free software: you can redistribute it and/or modify @@ -15,33 +15,44 @@ // along with Cumulus. If not, see . use super::{ - AccountId, AllPalletsWithSystem, Balances, ParachainInfo, ParachainSystem, PolkadotXcm, - Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, - CENTS, + AccountId, AllPalletsWithSystem, Balances, BaseDeliveryFee, FeeAssetId, ParachainInfo, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, + TransactionByteFee, WeightToFee, XcmpQueue, }; use frame_support::{ match_types, parameter_types, - traits::{ConstU32, Contains, Everything, Nothing}, + traits::{ConstU32, Contains, Equals, Everything, Nothing}, }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteAssetFromSystem}; +use parachains_common::{ + impls::ToStakingPot, + xcm_config::{ + AllSiblingSystemParachains, ConcreteAssetFromSystem, ParentRelayOrSiblingParachains, + RelayOrOtherSystemParachains, + }, + TREASURY_PALLET_ID, +}; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; +use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, IsConcrete, ParentAsSuperuser, - ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, + DenyThenTry, EnsureXcmOrigin, IsConcrete, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; parameter_types! { - pub const KsmRelayLocation: MultiLocation = MultiLocation::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Kusama); + pub const RocRelayLocation: MultiLocation = MultiLocation::parent(); + pub const RelayNetwork: Option = Some(NetworkId::Rococo); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())); @@ -64,28 +75,30 @@ pub type LocationToAccountId = ( ); /// Means for transacting the native currency on this chain. +#[allow(deprecated)] pub type CurrencyTransactor = CurrencyAdapter< // Use this currency: Balances, // Use this currency when it is a fungible asset matching the given location or name: - IsConcrete, - // Do a simple punn to convert an AccountId32 MultiLocation into a native chain account ID: + IsConcrete, + // Do a simple punn to convert an `AccountId32` `MultiLocation` into a native chain + // `AccountId`: LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): + // Our chain's `AccountId` type (we can't get away without mentioning it explicitly): AccountId, // We don't track any teleports of `Balances`. (), >; /// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, -/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can -/// biases the kind of local `Origin` it will become. +/// ready for dispatching a transaction with XCM's `Transact`. There is an `OriginKind` that can +/// bias the kind of local `Origin` it will become. pub type XcmOriginToTransactDispatchOrigin = ( // Sovereign account converter; this attempts to derive an `AccountId` from the origin location // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for - // foreign chains who want to have a local sovereign account on this chain which they control. + // foreign chains who want to have a local sovereign account on this chain that they control. SovereignSignedViaLocation, - // Native converter for Relay-chain (Parent) location; will converts to a `Relay` origin when + // Native converter for Relay-chain (Parent) location; will convert to a `Relay` origin when // recognized. RelayChainAsNative, // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when @@ -97,7 +110,7 @@ pub type XcmOriginToTransactDispatchOrigin = ( // Native signed account converter; this just converts an `AccountId32` origin into a normal // `RuntimeOrigin::Signed` origin of the same 32-byte value. SignedAccountId32AsNative, - // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. + // XCM origins can be represented natively under the XCM pallet's `Xcm` origin. XcmPassthrough, ); @@ -106,11 +119,8 @@ match_types! { MultiLocation { parents: 1, interior: Here } | MultiLocation { parents: 1, interior: X1(Plurality { .. }) } }; - pub type ParentOrSiblings: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(_) } - }; } + /// A call filter for the XCM Transact instruction. This is a temporary measure until we properly /// account for proof size weights. /// @@ -130,26 +140,26 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | frame_system::Call::set_code { .. } | frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + frame_system::Call::authorize_upgrade { .. } | + frame_system::Call::authorize_upgrade_without_checks { .. } | + frame_system::Call::kill_prefix { .. } | + // Should not be in Polkadot/Kusama. Here in order to speed up testing. + frame_system::Call::set_storage { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | + RuntimeCall::Sudo(..) | + RuntimeCall::CollatorSelection(..) | + RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) + RuntimeCall::Broker(..) ) } } @@ -164,13 +174,13 @@ pub type Barrier = TrailingSetTopicAsId< AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attempts to pay for execution, then + // If the message is one that immediately attemps to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent and its pluralities (i.e. governance bodies) get free execution. AllowExplicitUnpaidExecutionFrom, // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom, + AllowSubscriptionsFrom, ), UniversalLocation, ConstU32<8>, @@ -179,9 +189,17 @@ pub type Barrier = TrailingSetTopicAsId< >, >; -/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: -/// - KSM with the parent Relay Chain and sibling parachains. -pub type TrustedTeleporters = ConcreteAssetFromSystem; +parameter_types! { + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); + pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); +} + +/// Locations that will not be charged fees in the executor, neither for execution nor delivery. +/// We only waive fees for system functions, which these locations represent. +pub type WaivedLocations = ( + RelayOrOtherSystemParachains, + Equals, +); pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { @@ -189,19 +207,20 @@ impl xcm_executor::Config for XcmConfig { type XcmSender = XcmRouter; type AssetTransactor = CurrencyTransactor; type OriginConverter = XcmOriginToTransactDispatchOrigin; - // BridgeHub does not recognize a reserve location for any asset. Users must teleport KSM + // Coretime chain does not recognize a reserve location for any asset. Users must teleport ROC // where allowed (e.g. with the Relay Chain). type IsReserve = (); - type IsTeleporter = TrustedTeleporters; + /// Only allow teleportation of ROC. + type IsTeleporter = ConcreteAssetFromSystem; type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = WeightInfoBounds< - crate::weights::xcm::BridgeHubKusamaXcmWeight, + crate::weights::xcm::CoretimeRococoXcmWeight, RuntimeCall, MaxInstructions, >; type Trader = - UsingComponents>; + UsingComponents>; type ResponseHandler = PolkadotXcm; type AssetTrap = PolkadotXcm; type AssetClaims = PolkadotXcm; @@ -210,7 +229,10 @@ impl xcm_executor::Config for XcmConfig { type MaxAssetsIntoHolding = MaxAssetsIntoHolding; type AssetLocker = (); type AssetExchanger = (); - type FeeManager = (); + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = WithOriginFilter; @@ -218,17 +240,10 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Nothing; } -/// Converts a local signed origin into an XCM multilocation. -/// Forms the basis for local origins sending/executing XCMs. +/// Converts a local signed origin into an XCM multilocation. Forms the basis for local origins +/// sending/executing XCMs. pub type LocalOriginToLocation = SignedToAccountId32; -parameter_types! { - /// The asset ID for the asset that we use to pay for message delivery fees. - pub FeeAssetId: AssetId = Concrete(KsmRelayLocation::get()); - /// The base fee for the message delivery fees. - pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); -} - pub type PriceForParentDelivery = ExponentialPrice; @@ -236,19 +251,14 @@ pub type PriceForParentDelivery = /// queues. pub type XcmRouter = WithUniqueTopic<( // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - // We want to disallow users sending (arbitrary) XCMs from this chain. + // We want to disallow users sending (arbitrary) XCM programs from this chain. type SendXcmOrigin = EnsureXcmOrigin; type XcmRouter = XcmRouter; // We support local origins dispatching XCM executions in principle... @@ -259,7 +269,7 @@ impl pallet_xcm::Config for Runtime { type XcmTeleportFilter = Everything; type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. type Weigher = WeightInfoBounds< - crate::weights::xcm::BridgeHubKusamaXcmWeight, + crate::weights::xcm::CoretimeRococoXcmWeight, RuntimeCall, MaxInstructions, >; @@ -274,8 +284,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml similarity index 77% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml rename to cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 2cd002b1c6013fe470c9be756cdfff34f01a0a45..d68a98790f79e96ec5badc74b870931dcff7862c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -1,70 +1,73 @@ [package] -name = "bridge-hub-kusama-runtime" +name = "coretime-westend-runtime" version = "0.1.0" authors.workspace = true edition.workspace = true -description = "Kusama's BridgeHub parachain runtime" +description = "Westend's Coretime parachain runtime" license = "Apache-2.0" +[lints] +workspace = true + [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1" } +hex-literal = "0.4.1" log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true, features = ["derive"] } +scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.171", optional = true, features = ["derive"] } smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -73,15 +76,11 @@ pallet-collator-selection = { path = "../../../../pallets/collator-selection", d parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } -[dev-dependencies] -bridge-hub-test-utils = { path = "../test-utils" } - [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", @@ -103,6 +102,7 @@ std = [ "pallet-message-queue/std", "pallet-multisig/std", "pallet-session/std", + "pallet-sudo/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", @@ -122,7 +122,6 @@ std = [ "sp-core/std", "sp-genesis-builder/std", "sp-inherents/std", - "sp-io/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", @@ -131,13 +130,13 @@ std = [ "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", + "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", ] runtime-benchmarks = [ - "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", @@ -151,6 +150,7 @@ runtime-benchmarks = [ "pallet-collator-selection/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", @@ -165,7 +165,6 @@ runtime-benchmarks = [ try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", @@ -180,6 +179,7 @@ try-runtime = [ "pallet-message-queue/try-runtime", "pallet-multisig/try-runtime", "pallet-session/try-runtime", + "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", "pallet-utility/try-runtime", @@ -189,4 +189,4 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/build.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/build.rs similarity index 100% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/build.rs rename to cumulus/parachains/runtimes/coretime/coretime-westend/build.rs diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs similarity index 81% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs rename to cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 4744dc08e8ef255543d5b7fa1d168ae48cf7f192..742b3a29275cadcf51d8fd9a2d9af710dd6e6707 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. @@ -26,30 +25,13 @@ mod weights; pub mod xcm_config; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; -use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, -}; - -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ - ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, - }, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -58,27 +40,33 @@ use frame_system::{ EnsureRoot, }; use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; -pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use xcm_config::{FellowshipLocation, GovernanceLocation, XcmOriginToTransactDispatchOrigin}; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; - -// Polkadot imports -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; - -use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; - use parachains_common::{ impls::DealWithFees, message_queue::{NarrowOriginToSibling, ParaIdToSibling}, - polkadot::{consensus::*, currency::*, fee::WeightToFee}, - AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, - HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, + westend::{consensus::*, currency::*, fee::WeightToFee}, + AccountId, AuraId, Balance, BlockNumber, Hash, Header, Nonce, Signature, + AVERAGE_ON_INITIALIZE_RATIO, HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, +}; +use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; +use sp_api::impl_runtime_apis; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::Block as BlockT, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, MultiAddress, Perbill, +}; +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; +use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; +use xcm::latest::prelude::*; +use xcm_config::{ + FellowshipLocation, GovernanceLocation, WndRelayLocation, XcmOriginToTransactDispatchOrigin, }; -// XCM Imports -use xcm::latest::prelude::BodyId; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -109,10 +97,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. -pub type Migrations = ( - // unreleased - pallet_collator_selection::migration::v1::MigrateToV1, -); +pub type Migrations = (); /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -132,13 +117,13 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("bridge-hub-polkadot"), - impl_name: create_runtime_str!("bridge-hub-polkadot"), + spec_name: create_runtime_str!("coretime-westend"), + impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_005_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 2, + transaction_version: 0, state_version: 1, }; @@ -170,46 +155,28 @@ parameter_types! { }) .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) .build_or_panic(); - pub const SS58Prefix: u8 = 0; + pub const SS58Prefix: u8 = 42; } // Configure FRAME pallets to include in runtime. - +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; - /// The index type for storing how many extrinsics an account has signed. + /// The nonce type for storing how many extrinsics an account has signed. type Nonce = Nonce; /// The type for hashing blocks and tries. type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; /// The block type. type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// Runtime version. type Version = Version; - /// Converts a module to an index of this module in the runtime. - type PalletInfo = PalletInfo; /// The data to be stored in an account. type AccountData = pallet_balances::AccountData; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; - /// The basic call filter to use in dispatchable. - type BaseCallFilter = Everything; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; /// Block & extrinsics weights: base values and limits. @@ -240,10 +207,8 @@ parameter_types! { } impl pallet_balances::Config for Runtime { - /// The type for recording an account's balance. type Balance = Balance; type DustRemoval = (); - /// The ubiquitous event type. type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -276,6 +241,7 @@ impl pallet_transaction_payment::Config for Runtime { parameter_types! { pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } impl cumulus_pallet_parachain_system::Config for Runtime { @@ -283,8 +249,8 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; - type OutboundXcmpMessageSource = XcmpQueue; type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type OutboundXcmpMessageSource = XcmpQueue; type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = XcmpQueue; type ReservedXcmpWeight = ReservedXcmpWeight; @@ -297,6 +263,8 @@ impl cumulus_pallet_parachain_system::Config for Runtime { >; } +impl parachain_info::Config for Runtime {} + parameter_types! { pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; } @@ -323,42 +291,43 @@ impl pallet_message_queue::Config for Runtime { type ServiceWeight = MessageQueueServiceWeight; } -impl parachain_info::Config for Runtime {} - impl cumulus_pallet_aura_ext::Config for Runtime {} parameter_types! { - // Fellows pluralistic body. + /// Fellows pluralistic body. pub const FellowsBodyId: BodyId = BodyId::Technical; } -/// Privileged origin that represents Root or Fellows. +/// Privileged origin that represents Root or Fellows pluralistic body. pub type RootOrFellows = EitherOfDiverse< EnsureRoot, EnsureXcm>, >; +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(WndRelayLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; - // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ControllerOrigin = RootOrFellows; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = NoPriceForMessageDelivery; -} - -parameter_types! { - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } pub const PERIOD: u32 = 6 * HOURS; @@ -390,11 +359,11 @@ impl pallet_aura::Config for Runtime { parameter_types! { pub const PotId: PalletId = PalletId(*b"PotStake"); pub const SessionLength: BlockNumber = 6 * HOURS; - // StakingAdmin pluralistic body. + /// StakingAdmin pluralistic body. pub const StakingAdminBodyId: BodyId = BodyId::Defense; } -/// We allow root, the StakingAdmin to execute privileged collator selection operations. +/// We allow Root and the `StakingAdmin` to execute privileged collator selection operations. pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< EnsureRoot, EnsureXcm>, @@ -417,9 +386,9 @@ impl pallet_collator_selection::Config for Runtime { } parameter_types! { - // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. + /// One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. pub const DepositBase: Balance = deposit(1, 88); - // Additional storage item size of 32 bytes. + /// Additional storage item size of 32 bytes. pub const DepositFactor: Balance = deposit(0, 32); } @@ -440,58 +409,66 @@ impl pallet_utility::Config for Runtime { type WeightInfo = weights::pallet_utility::WeightInfo; } +impl pallet_sudo::Config for Runtime { + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_sudo::weights::SubstrateWeight; +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime { // System support stuff. - System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, - ParachainSystem: cumulus_pallet_parachain_system::{ - Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, - } = 1, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 2, - ParachainInfo: parachain_info::{Pallet, Storage, Config} = 3, + System: frame_system = 0, + ParachainSystem: cumulus_pallet_parachain_system = 1, + Timestamp: pallet_timestamp = 3, + ParachainInfo: parachain_info = 4, // Monetary stuff. - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, - - // Collator support. The order of these 4 are important and shall not change. - Authorship: pallet_authorship::{Pallet, Storage} = 20, - CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, - Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, - Aura: pallet_aura::{Pallet, Storage, Config} = 23, - AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, - - // XCM helpers. - XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, - CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, - MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, + Balances: pallet_balances = 10, + TransactionPayment: pallet_transaction_payment = 11, + + // Collator support. The order of these 5 are important and shall not change. + Authorship: pallet_authorship = 20, + CollatorSelection: pallet_collator_selection = 21, + Session: pallet_session = 22, + Aura: pallet_aura = 23, + AuraExt: cumulus_pallet_aura_ext = 24, + + // XCM & related + XcmpQueue: cumulus_pallet_xcmp_queue = 30, + PolkadotXcm: pallet_xcm = 31, + CumulusXcm: cumulus_pallet_xcm = 32, + MessageQueue: pallet_message_queue = 34, // Handy utilities. - Utility: pallet_utility::{Pallet, Call, Event} = 40, - Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, + Utility: pallet_utility = 40, + Multisig: pallet_multisig = 41, + + // Sudo + Sudo: pallet_sudo = 100, } ); +#[cfg(feature = "runtime-benchmarks")] +#[macro_use] +extern crate frame_benchmarking; + #[cfg(feature = "runtime-benchmarks")] mod benches { - frame_benchmarking::define_benchmarks!( + define_benchmarks!( [frame_system, SystemBench::] + [cumulus_pallet_parachain_system, ParachainSystem] + [pallet_timestamp, Timestamp] [pallet_balances, Balances] + [pallet_collator_selection, CollatorSelection] + [pallet_session, SessionBench::] + [cumulus_pallet_xcmp_queue, XcmpQueue] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] - [pallet_session, SessionBench::] [pallet_utility, Utility] - [pallet_timestamp, Timestamp] - [pallet_collator_selection, CollatorSelection] - [cumulus_pallet_parachain_system, ParachainSystem] - [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] - // XCM - [pallet_xcm, PolkadotXcm] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -671,6 +648,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -707,31 +685,54 @@ impl_runtime_apis! { impl cumulus_pallet_session_benchmarking::Config for Runtime {} use xcm::latest::prelude::*; - use xcm_config::DotRelayLocation; + use xcm_config::WndRelayLocation; + + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled + None + } + } parameter_types! { pub ExistentialDepositMultiAsset: Option = Some(( - xcm_config::DotRelayLocation::get(), + WndRelayLocation::get(), ExistentialDeposit::get() ).into()); } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; - type AccountIdConverter = xcm_config::LocationToAccountId; type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, + xcm_config::XcmConfig, ExistentialDepositMultiAsset, xcm_config::PriceForParentDelivery, >; + type AccountIdConverter = xcm_config::LocationToAccountId; fn valid_destination() -> Result { - Ok(DotRelayLocation::get()) + Ok(WndRelayLocation::get()) } fn worst_case_holding(_depositable_count: u32) -> MultiAssets { // just concrete assets according to relay chain. let assets: Vec = vec![ MultiAsset { - id: Concrete(DotRelayLocation::get()), + id: Concrete(WndRelayLocation::get()), fun: Fungible(1_000_000 * UNITS), } ]; @@ -741,8 +742,8 @@ impl_runtime_apis! { parameter_types! { pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - DotRelayLocation::get(), - MultiAsset { fun: Fungible(UNITS), id: Concrete(DotRelayLocation::get()) }, + WndRelayLocation::get(), + MultiAsset { fun: Fungible(UNITS), id: Concrete(WndRelayLocation::get()) }, )); pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; @@ -757,15 +758,15 @@ impl_runtime_apis! { fn get_multi_asset() -> MultiAsset { MultiAsset { - id: Concrete(DotRelayLocation::get()), + id: Concrete(WndRelayLocation::get()), fun: Fungible(UNITS), } } } impl pallet_xcm_benchmarks::generic::Config for Runtime { - type TransactAsset = Balances; type RuntimeCall = RuntimeCall; + type TransactAsset = Balances; fn worst_case_response() -> (u64, Response) { (0u64, Response::Version(Default::default())) @@ -780,16 +781,16 @@ impl_runtime_apis! { } fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { - Ok((DotRelayLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) + Ok((WndRelayLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) } fn subscribe_origin() -> Result { - Ok(DotRelayLocation::get()) + Ok(WndRelayLocation::get()) } fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { - let origin = DotRelayLocation::get(); - let assets: MultiAssets = (Concrete(DotRelayLocation::get()), 1_000 * UNITS).into(); + let origin = WndRelayLocation::get(); + let assets: MultiAssets = (Concrete(WndRelayLocation::get()), 1_000 * UNITS).into(); let ticket = MultiLocation { parents: 0, interior: Here }; Ok((origin, ticket, assets)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/block_weights.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/block_weights.rs similarity index 96% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/block_weights.rs rename to cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/block_weights.rs index e7fdb2aae2a01ec06076de83d94817e540e205dd..2bd7975bf98c36996520716c9dc11822d8287234 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/block_weights.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/block_weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright (C) 2023 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..0303151d7f83dfc5957e7346b1c4ef2950b6dc01 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,53 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_xcmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `8013` + // Minimum execution time: 1_645_000 picoseconds. + Weight::from_parts(1_717_000, 0) + .saturating_add(Weight::from_parts(0, 8013)) + // Standard Error: 12_258 + .saturating_add(Weight::from_parts(24_890_934, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} \ No newline at end of file diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_xcmp_queue.rs similarity index 75% rename from cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs rename to cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_xcmp_queue.rs index 89c80d0be62862b9b4c0488d12c9e7ed7e122a81..124571118aa129e1489aaaf1ebeabbde41ed13c4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_xcmp_queue.rs @@ -1,38 +1,41 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 // Executed Command: -// ./target/release/polkadot-parachain +// ./artifacts/westend-parachain // benchmark // pallet -// --pallet -// cumulus-pallet-xcmp-queue -// --chain -// asset-hub-polkadot-dev -// --output -// cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs -// --extrinsic -// +// --chain=coretime-westend-dev +// --execution=wasm +// --wasm-execution=compiled +// --pallet=cumulus_pallet_xcmp_queue +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_xcmp_queue.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -45,14 +48,14 @@ use core::marker::PhantomData; /// Weight functions for `cumulus_pallet_xcmp_queue`. pub struct WeightInfo(PhantomData); impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: XcmpQueue QueueConfig (r:1 w:1) + /// Proof Skipped: XcmpQueue QueueConfig (max_values: Some(1), max_size: None, mode: Measured) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 6_000_000 picoseconds. - Weight::from_parts(6_000_000, 0) + // Minimum execution time: 5_621_000 picoseconds. + Weight::from_parts(5_845_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -84,7 +87,7 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Measured: `76` // Estimated: `1561` // Minimum execution time: 3_000_000 picoseconds. - Weight::from_parts(4_000_000, 0) + Weight::from_parts(3_000_000, 0) .saturating_add(Weight::from_parts(0, 1561)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -96,7 +99,7 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Measured: `111` // Estimated: `1596` // Minimum execution time: 4_000_000 picoseconds. - Weight::from_parts(5_000_000, 0) + Weight::from_parts(4_000_000, 0) .saturating_add(Weight::from_parts(0, 1596)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -106,7 +109,7 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Measured: `0` // Estimated: `0` // Minimum execution time: 44_000_000 picoseconds. - Weight::from_parts(46_000_000, 0) + Weight::from_parts(45_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) @@ -127,8 +130,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `65711` // Estimated: `69176` - // Minimum execution time: 62_000_000 picoseconds. - Weight::from_parts(68_000_000, 0) + // Minimum execution time: 67_000_000 picoseconds. + Weight::from_parts(73_000_000, 0) .saturating_add(Weight::from_parts(0, 69176)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -139,8 +142,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `65710` // Estimated: `69175` - // Minimum execution time: 42_000_000 picoseconds. - Weight::from_parts(45_000_000, 0) + // Minimum execution time: 49_000_000 picoseconds. + Weight::from_parts(55_000_000, 0) .saturating_add(Weight::from_parts(0, 69175)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/extrinsic_weights.rs similarity index 96% rename from cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/extrinsic_weights.rs rename to cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/extrinsic_weights.rs index 1a4adb968bb7195428ea00d59cd92dcd3b6eea5f..898d72ec5b19519a77ec0b75bb65d757213b35d4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/extrinsic_weights.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/extrinsic_weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright (C) 2023 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..46f8113939e4d4fa3f26ff03d665eec6b4120a6b --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system.rs @@ -0,0 +1,161 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-05-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 + +// Executed Command: +// ./artifacts/westend-parachain +// benchmark +// pallet +// --chain=coretime-westend-dev +// --execution=wasm +// --wasm-execution=compiled +// --pallet=frame_system +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system`. +pub struct WeightInfo(PhantomData); +impl frame_system::WeightInfo for WeightInfo { + /// The range of component `b` is `[0, 3932160]`. + fn remark(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_432_000 picoseconds. + Weight::from_parts(2_458_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 0 + .saturating_add(Weight::from_parts(367, 0).saturating_mul(b.into())) + } + /// The range of component `b` is `[0, 3932160]`. + fn remark_with_event(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_911_000 picoseconds. + Weight::from_parts(8_031_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_405, 0).saturating_mul(b.into())) + } + /// Storage: System Digest (r:1 w:1) + /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: unknown `0x3a686561707061676573` (r:0 w:1) + /// Proof Skipped: unknown `0x3a686561707061676573` (r:0 w:1) + fn set_heap_pages() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 4_304_000 picoseconds. + Weight::from_parts(4_553_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn set_code() -> Weight { + Weight::from_parts(1_000_000, 0) + } + /// Storage: Skipped Metadata (r:0 w:0) + /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// The range of component `i` is `[0, 1000]`. + fn set_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_493_000 picoseconds. + Weight::from_parts(2_523_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_594 + .saturating_add(Weight::from_parts(663_439, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: Skipped Metadata (r:0 w:0) + /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// The range of component `i` is `[0, 1000]`. + fn kill_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_492_000 picoseconds. + Weight::from_parts(2_526_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 784 + .saturating_add(Weight::from_parts(493_844, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: Skipped Metadata (r:0 w:0) + /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// The range of component `p` is `[0, 1000]`. + fn kill_prefix(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `68 + p * (69 ±0)` + // Estimated: `66 + p * (70 ±0)` + // Minimum execution time: 4_200_000 picoseconds. + Weight::from_parts(4_288_000, 0) + .saturating_add(Weight::from_parts(0, 66)) + // Standard Error: 1_195 + .saturating_add(Weight::from_parts(1_021_563, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) + } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 33_027_000 picoseconds. + Weight::from_parts(33_027_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `22` + // Estimated: `1518` + // Minimum execution time: 118_101_992_000 picoseconds. + Weight::from_parts(118_101_992_000, 0) + .saturating_add(Weight::from_parts(0, 1518)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs similarity index 94% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/mod.rs rename to cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs index 36733d6d4a6e8f09cd21bd0f14ae8bde53a720c1..6bc733844e69bba8a532edea643f99fe8b7397b6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright (C) 2023 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,6 @@ //! Expose the auto generated weight files. pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_balances.rs new file mode 100644 index 0000000000000000000000000000000000000000..65d5a55c72eab716b3688bdd50fae38c67587287 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_balances.rs @@ -0,0 +1,151 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_balances` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 + +// Executed Command: +// ./artifacts/westend-parachain +// benchmark +// pallet +// --chain=coretime-westend-dev +// --execution=wasm +// --wasm-execution=compiled +// --pallet=pallet_balances +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_balances.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_balances`. +pub struct WeightInfo(PhantomData); +impl pallet_balances::WeightInfo for WeightInfo { + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn transfer_allow_death() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 59_580_000 picoseconds. + Weight::from_parts(60_317_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 45_490_000 picoseconds. + Weight::from_parts(45_910_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn force_set_balance_creating() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `3593` + // Minimum execution time: 17_353_000 picoseconds. + Weight::from_parts(17_676_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn force_set_balance_killing() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `3593` + // Minimum execution time: 25_017_000 picoseconds. + Weight::from_parts(25_542_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: System Account (r:2 w:2) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 61_161_000 picoseconds. + Weight::from_parts(61_665_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn transfer_all() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 55_422_000 picoseconds. + Weight::from_parts(55_880_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn force_unreserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `3593` + // Minimum execution time: 20_477_000 picoseconds. + Weight::from_parts(20_871_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: System Account (r:999 w:999) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// The range of component `u` is `[1, 1000]`. + fn upgrade_accounts(u: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + u * (136 ±0)` + // Estimated: `990 + u * (2603 ±0)` + // Minimum execution time: 19_501_000 picoseconds. + Weight::from_parts(19_726_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 9_495 + .saturating_add(Weight::from_parts(15_658_957, 0).saturating_mul(u.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_collator_selection.rs new file mode 100644 index 0000000000000000000000000000000000000000..2adddecab264945884d8b4620b9dff9868bfc4f0 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_collator_selection.rs @@ -0,0 +1,243 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_collator_selection` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 + +// Executed Command: +// ./artifacts/westend-parachain +// benchmark +// pallet +// --chain=coretime-westend-dev +// --execution=wasm +// --wasm-execution=compiled +// --pallet=pallet_collator_selection +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_collator_selection.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collator_selection`. +pub struct WeightInfo(PhantomData); +impl pallet_collator_selection::WeightInfo for WeightInfo { + /// Storage: Session NextKeys (r:100 w:0) + /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) + /// Storage: CollatorSelection Invulnerables (r:0 w:1) + /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) + /// The range of component `b` is `[1, 100]`. + fn set_invulnerables(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `214 + b * (78 ±0)` + // Estimated: `1203 + b * (2554 ±0)` + // Minimum execution time: 14_426_000 picoseconds. + Weight::from_parts(14_971_974, 0) + .saturating_add(Weight::from_parts(0, 1203)) + // Standard Error: 2_914 + .saturating_add(Weight::from_parts(2_604_699, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 2554).saturating_mul(b.into())) + } + /// Storage: CollatorSelection DesiredCandidates (r:0 w:1) + /// Proof: CollatorSelection DesiredCandidates (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn set_desired_candidates() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_977_000 picoseconds. + Weight::from_parts(7_246_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_937_000 picoseconds. + Weight::from_parts(8_161_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: CollatorSelection Candidates (r:1 w:1) + /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(48002), added: 48497, mode: MaxEncodedLen) + /// Storage: CollatorSelection DesiredCandidates (r:1 w:0) + /// Proof: CollatorSelection DesiredCandidates (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: CollatorSelection Invulnerables (r:1 w:0) + /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) + /// Storage: Session NextKeys (r:1 w:0) + /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) + /// Storage: CollatorSelection CandidacyBond (r:1 w:0) + /// Proof: CollatorSelection CandidacyBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) + /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// The range of component `c` is `[1, 999]`. + fn register_as_candidate(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1104 + c * (48 ±0)` + // Estimated: `49487 + c * (49 ±0)` + // Minimum execution time: 42_275_000 picoseconds. + Weight::from_parts(33_742_215, 0) + .saturating_add(Weight::from_parts(0, 49487)) + // Standard Error: 1_291 + .saturating_add(Weight::from_parts(103_381, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 49).saturating_mul(c.into())) + } + /// Storage: CollatorSelection Candidates (r:1 w:1) + /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(48002), added: 48497, mode: MaxEncodedLen) + /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) + /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// The range of component `c` is `[6, 1000]`. + fn leave_intent(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `428 + c * (48 ±0)` + // Estimated: `49487` + // Minimum execution time: 33_404_000 picoseconds. + Weight::from_parts(22_612_617, 0) + .saturating_add(Weight::from_parts(0, 49487)) + // Standard Error: 1_341 + .saturating_add(Weight::from_parts(105_669, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: System Account (r:2 w:2) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: System BlockWeight (r:1 w:1) + /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) + /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) + /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + fn note_author() -> Weight { + // Proof Size summary in bytes: + // Measured: `155` + // Estimated: `6196` + // Minimum execution time: 44_415_000 picoseconds. + Weight::from_parts(44_732_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Session NextKeys (r:1 w:0) + /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) + /// Storage: CollatorSelection Invulnerables (r:1 w:1) + /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(641), added: 1136, mode: MaxEncodedLen) + /// Storage: CollatorSelection Candidates (r:1 w:1) + /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(4802), added: 5297, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// The range of component `b` is `[1, 19]`. + /// The range of component `c` is `[1, 99]`. + fn add_invulnerable(b: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `757 + b * (32 ±0) + c * (53 ±0)` + // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` + // Minimum execution time: 52_720_000 picoseconds. + Weight::from_parts(56_102_459, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 12_957 + .saturating_add(Weight::from_parts(26_422, 0).saturating_mul(b.into())) + // Standard Error: 2_456 + .saturating_add(Weight::from_parts(128_528, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) + } + /// Storage: CollatorSelection Invulnerables (r:1 w:1) + /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) + /// The range of component `b` is `[1, 100]`. + fn remove_invulnerable(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `119 + b * (32 ±0)` + // Estimated: `4687` + // Minimum execution time: 183_054_000 picoseconds. + Weight::from_parts(197_205_427, 0) + .saturating_add(Weight::from_parts(0, 4687)) + // Standard Error: 13_533 + .saturating_add(Weight::from_parts(376_231, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: CollatorSelection Candidates (r:1 w:0) + /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(48002), added: 48497, mode: MaxEncodedLen) + /// Storage: CollatorSelection LastAuthoredBlock (r:999 w:0) + /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: CollatorSelection Invulnerables (r:1 w:0) + /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) + /// Storage: System BlockWeight (r:1 w:1) + /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) + /// Storage: System Account (r:995 w:995) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// The range of component `r` is `[1, 1000]`. + /// The range of component `c` is `[1, 1000]`. + fn new_session(r: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `22815 + c * (97 ±0) + r * (116 ±0)` + // Estimated: `49487 + c * (2519 ±0) + r * (2602 ±0)` + // Minimum execution time: 16_765_000 picoseconds. + Weight::from_parts(16_997_000, 0) + .saturating_add(Weight::from_parts(0, 49487)) + // Standard Error: 860_677 + .saturating_add(Weight::from_parts(30_463_094, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2602).saturating_mul(r.into())) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_message_queue.rs similarity index 69% rename from cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_message_queue.rs rename to cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_message_queue.rs index a9f0cb07cfe1385d62df0f1e81cd1b03cc963f7e..651f27e10e5c7b5d941d2bec5197fc06ed035fda 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_message_queue.rs @@ -1,51 +1,27 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. -//! Autogenerated weights for `pallet_message_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westmint-dev"), DB CACHE: 1024 +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// westmint-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/westmint/src/weights +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; -/// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); impl pallet_message_queue::WeightInfo for WeightInfo { /// Storage: MessageQueue ServiceHead (r:1 w:0) @@ -56,8 +32,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `189` // Estimated: `7534` - // Minimum execution time: 12_192_000 picoseconds. - Weight::from_parts(12_192_000, 0) + // Minimum execution time: 11_446_000 picoseconds. + Weight::from_parts(11_446_000, 0) .saturating_add(Weight::from_parts(0, 7534)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -70,8 +46,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `184` // Estimated: `7534` - // Minimum execution time: 10_447_000 picoseconds. - Weight::from_parts(10_447_000, 0) + // Minimum execution time: 10_613_000 picoseconds. + Weight::from_parts(10_613_000, 0) .saturating_add(Weight::from_parts(0, 7534)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -82,8 +58,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `6` // Estimated: `3517` - // Minimum execution time: 4_851_000 picoseconds. - Weight::from_parts(4_851_000, 0) + // Minimum execution time: 4_854_000 picoseconds. + Weight::from_parts(4_854_000, 0) .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -94,8 +70,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `72` // Estimated: `69050` - // Minimum execution time: 6_342_000 picoseconds. - Weight::from_parts(6_342_000, 0) + // Minimum execution time: 5_748_000 picoseconds. + Weight::from_parts(5_748_000, 0) .saturating_add(Weight::from_parts(0, 69050)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -106,8 +82,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `72` // Estimated: `69050` - // Minimum execution time: 6_199_000 picoseconds. - Weight::from_parts(6_199_000, 0) + // Minimum execution time: 6_136_000 picoseconds. + Weight::from_parts(6_136_000, 0) .saturating_add(Weight::from_parts(0, 69050)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -116,8 +92,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 58_612_000 picoseconds. - Weight::from_parts(58_612_000, 0) + // Minimum execution time: 59_505_000 picoseconds. + Weight::from_parts(59_505_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: MessageQueue ServiceHead (r:1 w:1) @@ -128,8 +104,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `99` // Estimated: `5007` - // Minimum execution time: 7_296_000 picoseconds. - Weight::from_parts(7_296_000, 0) + // Minimum execution time: 6_506_000 picoseconds. + Weight::from_parts(6_506_000, 0) .saturating_add(Weight::from_parts(0, 5007)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -142,8 +118,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `65667` // Estimated: `72567` - // Minimum execution time: 48_345_000 picoseconds. - Weight::from_parts(48_345_000, 0) + // Minimum execution time: 40_646_000 picoseconds. + Weight::from_parts(40_646_000, 0) .saturating_add(Weight::from_parts(0, 72567)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -156,8 +132,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `65667` // Estimated: `72567` - // Minimum execution time: 56_441_000 picoseconds. - Weight::from_parts(56_441_000, 0) + // Minimum execution time: 51_424_000 picoseconds. + Weight::from_parts(51_424_000, 0) .saturating_add(Weight::from_parts(0, 72567)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -170,10 +146,10 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `65667` // Estimated: `72567` - // Minimum execution time: 70_858_000 picoseconds. - Weight::from_parts(70_858_000, 0) + // Minimum execution time: 81_153_000 picoseconds. + Weight::from_parts(81_153_000, 0) .saturating_add(Weight::from_parts(0, 72567)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } -} +} \ No newline at end of file diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_multisig.rs new file mode 100644 index 0000000000000000000000000000000000000000..4130e05bf7c4233b5dfdd6fcf0df1295ce77db61 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_multisig.rs @@ -0,0 +1,163 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_multisig` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 + +// Executed Command: +// ./artifacts/westend-parachain +// benchmark +// pallet +// --chain=coretime-westend-dev +// --execution=wasm +// --wasm-execution=compiled +// --pallet=pallet_multisig +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_multisig.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_multisig`. +pub struct WeightInfo(PhantomData); +impl pallet_multisig::WeightInfo for WeightInfo { + /// The range of component `z` is `[0, 10000]`. + fn as_multi_threshold_1(z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_337_000 picoseconds. + Weight::from_parts(11_960_522, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 9 + .saturating_add(Weight::from_parts(504, 0).saturating_mul(z.into())) + } + /// Storage: Multisig Multisigs (r:1 w:1) + /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_create(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `263 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 41_128_000 picoseconds. + Weight::from_parts(35_215_592, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 429 + .saturating_add(Weight::from_parts(65_959, 0).saturating_mul(s.into())) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_230, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Multisig Multisigs (r:1 w:1) + /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// The range of component `s` is `[3, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_approve(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `6811` + // Minimum execution time: 26_878_000 picoseconds. + Weight::from_parts(21_448_577, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 354 + .saturating_add(Weight::from_parts(60_286, 0).saturating_mul(s.into())) + // Standard Error: 3 + .saturating_add(Weight::from_parts(1_236, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Multisig Multisigs (r:1 w:1) + /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_complete(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `388 + s * (33 ±0)` + // Estimated: `6811` + // Minimum execution time: 45_716_000 picoseconds. + Weight::from_parts(38_332_947, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 554 + .saturating_add(Weight::from_parts(81_026, 0).saturating_mul(s.into())) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_265, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Multisig Multisigs (r:1 w:1) + /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_create(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `263 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 32_089_000 picoseconds. + Weight::from_parts(33_664_508, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 487 + .saturating_add(Weight::from_parts(67_443, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Multisig Multisigs (r:1 w:1) + /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_approve(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `6811` + // Minimum execution time: 18_631_000 picoseconds. + Weight::from_parts(19_909_964, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 434 + .saturating_add(Weight::from_parts(62_989, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Multisig Multisigs (r:1 w:1) + /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// The range of component `s` is `[2, 100]`. + fn cancel_as_multi(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `454 + s * (1 ±0)` + // Estimated: `6811` + // Minimum execution time: 32_486_000 picoseconds. + Weight::from_parts(34_303_784, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 585 + .saturating_add(Weight::from_parts(69_979, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_session.rs new file mode 100644 index 0000000000000000000000000000000000000000..d132ef17bbdb2295dcd4ee812ab62ae51fd6ff3a --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_session.rs @@ -0,0 +1,79 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_session` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 + +// Executed Command: +// ./artifacts/westend-parachain +// benchmark +// pallet +// --chain=coretime-westend-dev +// --execution=wasm +// --wasm-execution=compiled +// --pallet=pallet_session +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_session.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_session`. +pub struct WeightInfo(PhantomData); +impl pallet_session::WeightInfo for WeightInfo { + /// Storage: Session NextKeys (r:1 w:1) + /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) + /// Storage: Session KeyOwner (r:1 w:1) + /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) + fn set_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `297` + // Estimated: `3762` + // Minimum execution time: 17_353_000 picoseconds. + Weight::from_parts(18_005_000, 0) + .saturating_add(Weight::from_parts(0, 3762)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Session NextKeys (r:1 w:1) + /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) + /// Storage: Session KeyOwner (r:0 w:1) + /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) + fn purge_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `279` + // Estimated: `3744` + // Minimum execution time: 13_039_000 picoseconds. + Weight::from_parts(13_341_000, 0) + .saturating_add(Weight::from_parts(0, 3744)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_timestamp.rs new file mode 100644 index 0000000000000000000000000000000000000000..722858a3a4655881cdbedbe8e6cae419baefc190 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_timestamp.rs @@ -0,0 +1,73 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_timestamp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 + +// Executed Command: +// ./artifacts/westend-parachain +// benchmark +// pallet +// --chain=coretime-westend-dev +// --execution=wasm +// --wasm-execution=compiled +// --pallet=pallet_timestamp +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_timestamp.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_timestamp`. +pub struct WeightInfo(PhantomData); +impl pallet_timestamp::WeightInfo for WeightInfo { + /// Storage: Timestamp Now (r:1 w:1) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: Aura CurrentSlot (r:1 w:0) + /// Proof: Aura CurrentSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + fn set() -> Weight { + // Proof Size summary in bytes: + // Measured: `49` + // Estimated: `1493` + // Minimum execution time: 7_986_000 picoseconds. + Weight::from_parts(8_134_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `57` + // Estimated: `0` + // Minimum execution time: 3_257_000 picoseconds. + Weight::from_parts(3_366_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_utility.rs new file mode 100644 index 0000000000000000000000000000000000000000..dacd469ebb7ab62eb7fcd7740bf6bf230aace7ee --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_utility.rs @@ -0,0 +1,100 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_utility` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 + +// Executed Command: +// ./artifacts/westend-parachain +// benchmark +// pallet +// --chain=coretime-westend-dev +// --execution=wasm +// --wasm-execution=compiled +// --pallet=pallet_utility +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_utility.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_utility`. +pub struct WeightInfo(PhantomData); +impl pallet_utility::WeightInfo for WeightInfo { + /// The range of component `c` is `[0, 1000]`. + fn batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_697_000 picoseconds. + Weight::from_parts(11_859_145, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 3_146 + .saturating_add(Weight::from_parts(4_300_555, 0).saturating_mul(c.into())) + } + fn as_derivative() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_979_000 picoseconds. + Weight::from_parts(5_066_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn batch_all(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_741_000 picoseconds. + Weight::from_parts(15_928_547, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 3_310 + .saturating_add(Weight::from_parts(4_527_996, 0).saturating_mul(c.into())) + } + fn dispatch_as() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_717_000 picoseconds. + Weight::from_parts(8_909_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn force_batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_814_000 picoseconds. + Weight::from_parts(13_920_831, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 7_605 + .saturating_add(Weight::from_parts(4_306_193, 0).saturating_mul(c.into())) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs new file mode 100644 index 0000000000000000000000000000000000000000..d96ee43463a316d3b8a0c5c8351045d7c340cd13 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs @@ -0,0 +1,323 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 + +// Executed Command: +// ./artifacts/westend-parachain +// benchmark +// pallet +// --chain=coretime-westend-dev +// --execution=wasm +// --wasm-execution=compiled +// --pallet=pallet_xcm +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) + /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem HostConfiguration (r:1 w:0) + /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + fn send() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 25_783_000 picoseconds. + Weight::from_parts(26_398_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1489` + // Minimum execution time: 25_511_000 picoseconds. + Weight::from_parts(26_120_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: Benchmark Override (r:0 w:0) + /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `496` + // Estimated: `6208` + // Minimum execution time: 146_932_000 picoseconds. + Weight::from_parts(153_200_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: Benchmark Override (r:0 w:0) + /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + fn execute() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: PolkadotXcm SupportedVersion (r:0 w:1) + /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + fn force_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_707_000 picoseconds. + Weight::from_parts(9_874_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: PolkadotXcm SafeXcmVersion (r:0 w:1) + /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + fn force_default_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_073_000 picoseconds. + Weight::from_parts(3_183_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) + /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: PolkadotXcm QueryCounter (r:1 w:1) + /// Proof Skipped: PolkadotXcm QueryCounter (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) + /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem HostConfiguration (r:1 w:0) + /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: PolkadotXcm Queries (r:0 w:1) + /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + fn force_subscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 30_999_000 picoseconds. + Weight::from_parts(31_641_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) + /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) + /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem HostConfiguration (r:1 w:0) + /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: PolkadotXcm Queries (r:0 w:1) + /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + fn force_unsubscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `220` + // Estimated: `3685` + // Minimum execution time: 33_036_000 picoseconds. + Weight::from_parts(33_596_000, 0) + .saturating_add(Weight::from_parts(0, 3685)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: PolkadotXcm XcmExecutionSuspended (r:0 w:1) + /// Proof Skipped: PolkadotXcm XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + fn force_suspension() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_035_000 picoseconds. + Weight::from_parts(3_154_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: PolkadotXcm SupportedVersion (r:4 w:2) + /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + fn migrate_supported_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `95` + // Estimated: `10985` + // Minimum execution time: 14_805_000 picoseconds. + Weight::from_parts(15_120_000, 0) + .saturating_add(Weight::from_parts(0, 10985)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: PolkadotXcm VersionNotifiers (r:4 w:2) + /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + fn migrate_version_notifiers() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `10989` + // Minimum execution time: 14_572_000 picoseconds. + Weight::from_parts(14_909_000, 0) + .saturating_add(Weight::from_parts(0, 10989)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: PolkadotXcm VersionNotifyTargets (r:5 w:0) + /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + fn already_notified_target() -> Weight { + // Proof Size summary in bytes: + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 15_341_000 picoseconds. + Weight::from_parts(15_708_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(5)) + } + /// Storage: PolkadotXcm VersionNotifyTargets (r:2 w:1) + /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) + /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem HostConfiguration (r:1 w:0) + /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + fn notify_current_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `106` + // Estimated: `6046` + // Minimum execution time: 27_840_000 picoseconds. + Weight::from_parts(28_248_000, 0) + .saturating_add(Weight::from_parts(0, 6046)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: PolkadotXcm VersionNotifyTargets (r:3 w:0) + /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + fn notify_target_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `136` + // Estimated: `8551` + // Minimum execution time: 8_245_000 picoseconds. + Weight::from_parts(8_523_000, 0) + .saturating_add(Weight::from_parts(0, 8551)) + .saturating_add(T::DbWeight::get().reads(3)) + } + /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) + /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + fn migrate_version_notify_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `106` + // Estimated: `10996` + // Minimum execution time: 14_780_000 picoseconds. + Weight::from_parts(15_173_000, 0) + .saturating_add(Weight::from_parts(0, 10996)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) + /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) + /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem HostConfiguration (r:1 w:0) + /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + fn migrate_and_notify_old_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `112` + // Estimated: `11002` + // Minimum execution time: 33_422_000 picoseconds. + Weight::from_parts(34_076_000, 0) + .saturating_add(Weight::from_parts(0, 11002)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `69` + // Estimated: `1554` + // Minimum execution time: 4_512_000 picoseconds. + Weight::from_parts(4_671_000, 0) + .saturating_add(Weight::from_parts(0, 1554)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7706` + // Estimated: `11171` + // Minimum execution time: 26_473_000 picoseconds. + Weight::from_parts(26_960_000, 0) + .saturating_add(Weight::from_parts(0, 11171)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/paritydb_weights.rs similarity index 97% rename from cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/paritydb_weights.rs rename to cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/paritydb_weights.rs index 25679703831a13b8d1bb7fb7dd4d92fa84b1f255..1c6d2ebe568cc81e91167ec723102eebde49259c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/paritydb_weights.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/paritydb_weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright (C) 2023 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/rocksdb_weights.rs similarity index 97% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/rocksdb_weights.rs rename to cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/rocksdb_weights.rs index 3dd817aa6f137085b0e5fdf2b11b7f50e5c8b002..aa0cb2b4bc377bae5bce9b18cbb78820c91f344d 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/rocksdb_weights.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/rocksdb_weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright (C) 2023 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs similarity index 97% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs rename to cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs index 71732961d3de12bde9058665c4937426b4ed9f88..3dc7b82efc2dbf8bc0704791974d0759e6ae5328 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs @@ -55,8 +55,8 @@ impl WeighMultiAssets for MultiAssets { } } -pub struct BridgeHubKusamaXcmWeight(core::marker::PhantomData); -impl XcmWeightInfo for BridgeHubKusamaXcmWeight { +pub struct CoretimeWestendXcmWeight(core::marker::PhantomData); +impl XcmWeightInfo for CoretimeWestendXcmWeight { fn withdraw_asset(assets: &MultiAssets) -> Weight { assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) } @@ -208,7 +208,7 @@ impl XcmWeightInfo for BridgeHubKusamaXcmWeight { XcmGeneric::::clear_transact_status() } fn universal_origin(_: &Junction) -> Weight { - Weight::MAX + XcmGeneric::::universal_origin() } fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight { Weight::MAX diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs similarity index 73% rename from cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs rename to cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index e680c2d5c1193b25f8408d959ae50d5c8ab0ae00..eaf07aac52cefa88f524e6f3a2180ab9faf2b088 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain @@ -32,10 +32,10 @@ // --heap-pages=4096 // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm_benchmarks::fungible -// --chain=asset-hub-kusama-dev +// --chain=asset-hub-westend-dev // --header=./cumulus/file_header.txt // --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/ +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -53,8 +53,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 25_602_000 picoseconds. - Weight::from_parts(26_312_000, 3593) + // Minimum execution time: 20_295_000 picoseconds. + Weight::from_parts(21_142_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -64,15 +64,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 51_173_000 picoseconds. - Weight::from_parts(52_221_000, 6196) + // Minimum execution time: 42_356_000 picoseconds. + Weight::from_parts(43_552_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - // Storage: `System::Account` (r:2 w:2) + // Storage: `System::Account` (r:3 w:3) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -86,48 +88,53 @@ impl WeightInfo { pub fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: // Measured: `246` - // Estimated: `6196` - // Minimum execution time: 74_651_000 picoseconds. - Weight::from_parts(76_500_000, 6196) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) + // Estimated: `8799` + // Minimum execution time: 85_553_000 picoseconds. + Weight::from_parts(87_177_000, 8799) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(5)) } - // Storage: `Benchmark::Override` (r:0 w:0) - // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) pub fn reserve_asset_deposited() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) + // Estimated: `1489` + // Minimum execution time: 6_166_000 picoseconds. + Weight::from_parts(6_352_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_reserve_withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 458_666_000 picoseconds. - Weight::from_parts(470_470_000, 3610) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 184_462_000 picoseconds. + Weight::from_parts(189_593_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn receive_teleported_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_701_000 picoseconds. - Weight::from_parts(3_887_000, 0) + // Minimum execution time: 3_018_000 picoseconds. + Weight::from_parts(3_098_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -135,15 +142,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 25_709_000 picoseconds. - Weight::from_parts(26_320_000, 3593) + // Minimum execution time: 18_583_000 picoseconds. + Weight::from_parts(19_057_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: `System::Account` (r:1 w:1) + // Storage: `System::Account` (r:2 w:2) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -157,20 +166,24 @@ impl WeightInfo { pub fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 51_663_000 picoseconds. - Weight::from_parts(52_538_000, 3610) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(3)) + // Estimated: `6196` + // Minimum execution time: 56_666_000 picoseconds. + Weight::from_parts(58_152_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) @@ -179,9 +192,9 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 31_972_000 picoseconds. - Weight::from_parts(32_834_000, 3610) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Minimum execution time: 44_197_000 picoseconds. + Weight::from_parts(45_573_000, 3610) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs similarity index 64% rename from cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs rename to cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 9e8f3bfe75c1a1bdf94e701d5bbbdd0bb623dd90..fc196abea0f5e61d746760e2b2bf5a7d8d0a476b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -16,28 +16,26 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-westend-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::generic -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::generic +// --chain=asset-hub-westend-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -51,31 +49,35 @@ pub struct WeightInfo(PhantomData); impl WeightInfo { // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 432_196_000 picoseconds. - Weight::from_parts(438_017_000, 3574) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 415_033_000 picoseconds. + Weight::from_parts(429_573_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_223_000 picoseconds. - Weight::from_parts(4_412_000, 0) + // Minimum execution time: 3_193_000 picoseconds. + Weight::from_parts(3_620_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -83,79 +85,83 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3568` - // Minimum execution time: 11_582_000 picoseconds. - Weight::from_parts(11_830_000, 3568) + // Minimum execution time: 8_045_000 picoseconds. + Weight::from_parts(8_402_000, 3568) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_955_000 picoseconds. - Weight::from_parts(14_320_000, 0) + // Minimum execution time: 9_827_000 picoseconds. + Weight::from_parts(10_454_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_423_000 picoseconds. - Weight::from_parts(4_709_000, 0) + // Minimum execution time: 3_330_000 picoseconds. + Weight::from_parts(3_677_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_028_000 picoseconds. - Weight::from_parts(3_151_000, 0) + // Minimum execution time: 1_947_000 picoseconds. + Weight::from_parts(2_083_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_966_000 picoseconds. - Weight::from_parts(3_076_000, 0) + // Minimum execution time: 1_915_000 picoseconds. + Weight::from_parts(1_993_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_971_000 picoseconds. - Weight::from_parts(3_119_000, 0) + // Minimum execution time: 1_918_000 picoseconds. + Weight::from_parts(2_048_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_772_000 picoseconds. - Weight::from_parts(3_853_000, 0) + // Minimum execution time: 2_683_000 picoseconds. + Weight::from_parts(3_064_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_940_000 picoseconds. - Weight::from_parts(3_050_000, 0) + // Minimum execution time: 1_893_000 picoseconds. + Weight::from_parts(2_159_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 27_734_000 picoseconds. - Weight::from_parts(28_351_000, 3574) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 53_116_000 picoseconds. + Weight::from_parts(54_154_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -163,8 +169,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 16_456_000 picoseconds. - Weight::from_parts(16_846_000, 3625) + // Minimum execution time: 12_381_000 picoseconds. + Weight::from_parts(12_693_000, 3625) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -172,11 +178,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_974_000 picoseconds. - Weight::from_parts(3_108_000, 0) + // Minimum execution time: 1_933_000 picoseconds. + Weight::from_parts(1_983_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -189,11 +197,11 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn subscribe_version() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 29_823_000 picoseconds. - Weight::from_parts(30_776_000, 3574) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 24_251_000 picoseconds. + Weight::from_parts(24_890_000, 3610) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) @@ -202,127 +210,145 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_966_000 picoseconds. - Weight::from_parts(5_157_000, 0) + // Minimum execution time: 3_850_000 picoseconds. + Weight::from_parts(4_082_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 141_875_000 picoseconds. - Weight::from_parts(144_925_000, 0) + // Minimum execution time: 112_248_000 picoseconds. + Weight::from_parts(124_454_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_147_000 picoseconds. - Weight::from_parts(13_420_000, 0) + // Minimum execution time: 11_457_000 picoseconds. + Weight::from_parts(12_060_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_050_000 picoseconds. - Weight::from_parts(3_161_000, 0) + // Minimum execution time: 1_959_000 picoseconds. + Weight::from_parts(2_076_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_930_000 picoseconds. - Weight::from_parts(3_077_000, 0) + // Minimum execution time: 1_920_000 picoseconds. + Weight::from_parts(1_994_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_188_000 picoseconds. - Weight::from_parts(3_299_000, 0) + // Minimum execution time: 2_149_000 picoseconds. + Weight::from_parts(2_394_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 31_678_000 picoseconds. - Weight::from_parts(32_462_000, 3574) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 58_011_000 picoseconds. + Weight::from_parts(59_306_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_638_000 picoseconds. - Weight::from_parts(5_756_000, 0) + // Minimum execution time: 5_031_000 picoseconds. + Weight::from_parts(5_243_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 27_556_000 picoseconds. - Weight::from_parts(28_240_000, 3574) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 53_078_000 picoseconds. + Weight::from_parts(54_345_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } pub fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_932_000 picoseconds. - Weight::from_parts(3_097_000, 0) + // Minimum execution time: 1_936_000 picoseconds. + Weight::from_parts(2_002_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_860_000 picoseconds. - Weight::from_parts(2_957_000, 0) + // Minimum execution time: 1_855_000 picoseconds. + Weight::from_parts(1_950_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_886_000 picoseconds. - Weight::from_parts(3_015_000, 0) + // Minimum execution time: 1_882_000 picoseconds. + Weight::from_parts(1_977_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + pub fn universal_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1489` + // Minimum execution time: 3_912_000 picoseconds. + Weight::from_parts(4_167_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_874_000 picoseconds. - Weight::from_parts(3_060_000, 0) + // Minimum execution time: 1_911_000 picoseconds. + Weight::from_parts(1_971_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_029_000 picoseconds. - Weight::from_parts(3_158_000, 0) + // Minimum execution time: 1_990_000 picoseconds. + Weight::from_parts(2_076_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs similarity index 69% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs rename to cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index ac7e00fc4274f3c3d26e5120f2436260e1063f9a..59d76d10d902306a7c5c95f8cac032034d38f8b4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -1,4 +1,4 @@ -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright 2023 Parity Technologies (UK) Ltd. // This file is part of Cumulus. // Cumulus is free software: you can redistribute it and/or modify @@ -15,33 +15,44 @@ // along with Cumulus. If not, see . use super::{ - AccountId, AllPalletsWithSystem, Balances, ParachainInfo, ParachainSystem, PolkadotXcm, - Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, - CENTS, + AccountId, AllPalletsWithSystem, Balances, BaseDeliveryFee, FeeAssetId, ParachainInfo, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, + TransactionByteFee, WeightToFee, XcmpQueue, }; use frame_support::{ match_types, parameter_types, - traits::{ConstU32, Contains, Everything, Nothing}, + traits::{ConstU32, Contains, Equals, Everything, Nothing}, }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteAssetFromSystem}; +use parachains_common::{ + impls::ToStakingPot, + xcm_config::{ + AllSiblingSystemParachains, ConcreteAssetFromSystem, ParentRelayOrSiblingParachains, + RelayOrOtherSystemParachains, + }, + TREASURY_PALLET_ID, +}; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; +use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, IsConcrete, ParentAsSuperuser, - ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, + DenyThenTry, EnsureXcmOrigin, IsConcrete, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; parameter_types! { - pub const DotRelayLocation: MultiLocation = MultiLocation::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Polkadot); + pub const WndRelayLocation: MultiLocation = MultiLocation::parent(); + pub const RelayNetwork: Option = Some(NetworkId::Westend); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())); @@ -64,28 +75,30 @@ pub type LocationToAccountId = ( ); /// Means for transacting the native currency on this chain. +#[allow(deprecated)] pub type CurrencyTransactor = CurrencyAdapter< // Use this currency: Balances, // Use this currency when it is a fungible asset matching the given location or name: - IsConcrete, - // Do a simple punn to convert an AccountId32 MultiLocation into a native chain account ID: + IsConcrete, + // Do a simple punn to convert an `AccountId32` `MultiLocation` into a native chain + // `AccountId`: LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): + // Our chain's `AccountId` type (we can't get away without mentioning it explicitly): AccountId, // We don't track any teleports of `Balances`. (), >; /// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, -/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can -/// biases the kind of local `Origin` it will become. +/// ready for dispatching a transaction with XCM's `Transact`. There is an `OriginKind` that can +/// bias the kind of local `Origin` it will become. pub type XcmOriginToTransactDispatchOrigin = ( // Sovereign account converter; this attempts to derive an `AccountId` from the origin location // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for - // foreign chains who want to have a local sovereign account on this chain which they control. + // foreign chains who want to have a local sovereign account on this chain that they control. SovereignSignedViaLocation, - // Native converter for Relay-chain (Parent) location; will converts to a `Relay` origin when + // Native converter for Relay-chain (Parent) location; will convert to a `Relay` origin when // recognized. RelayChainAsNative, // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when @@ -97,7 +110,7 @@ pub type XcmOriginToTransactDispatchOrigin = ( // Native signed account converter; this just converts an `AccountId32` origin into a normal // `RuntimeOrigin::Signed` origin of the same 32-byte value. SignedAccountId32AsNative, - // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. + // XCM origins can be represented natively under the XCM pallet's `Xcm` origin. XcmPassthrough, ); @@ -106,14 +119,11 @@ match_types! { MultiLocation { parents: 1, interior: Here } | MultiLocation { parents: 1, interior: X1(Plurality { .. }) } }; - pub type ParentOrSiblings: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(_) } - }; pub type FellowsPlurality: impl Contains = { MultiLocation { parents: 1, interior: X2(Parachain(1001), Plurality { id: BodyId::Technical, ..}) } }; } + /// A call filter for the XCM Transact instruction. This is a temporary measure until we properly /// account for proof size weights. /// @@ -133,26 +143,23 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::authorize_upgrade { .. } | + frame_system::Call::authorize_upgrade_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) + RuntimeCall::CollatorSelection(..) | + RuntimeCall::Sudo(..) | + RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | + RuntimeCall::XcmpQueue(..) ) } } @@ -167,14 +174,14 @@ pub type Barrier = TrailingSetTopicAsId< AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attempts to pay for execution, then + // If the message is one that immediately attemps to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent, its pluralities (i.e. governance bodies), and the Fellows plurality // get free execution. AllowExplicitUnpaidExecutionFrom<(ParentOrParentsPlurality, FellowsPlurality)>, // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom, + AllowSubscriptionsFrom, ), UniversalLocation, ConstU32<8>, @@ -183,9 +190,17 @@ pub type Barrier = TrailingSetTopicAsId< >, >; -/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: -/// - DOT with the parent Relay Chain and sibling parachains. -pub type TrustedTeleporters = ConcreteAssetFromSystem; +parameter_types! { + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); + pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(westend_runtime_constants::TREASURY_PALLET_ID)).into(); +} + +/// Locations that will not be charged fees in the executor, neither for execution nor delivery. +/// We only waive fees for system functions, which these locations represent. +pub type WaivedLocations = ( + RelayOrOtherSystemParachains, + Equals, +); pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { @@ -193,19 +208,20 @@ impl xcm_executor::Config for XcmConfig { type XcmSender = XcmRouter; type AssetTransactor = CurrencyTransactor; type OriginConverter = XcmOriginToTransactDispatchOrigin; - // BridgeHub does not recognize a reserve location for any asset. Users must teleport DOT + // Coretime chain does not recognize a reserve location for any asset. Users must teleport WND // where allowed (e.g. with the Relay Chain). type IsReserve = (); - type IsTeleporter = TrustedTeleporters; + /// Only allow teleportation of WND. + type IsTeleporter = ConcreteAssetFromSystem; type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = WeightInfoBounds< - crate::weights::xcm::BridgeHubPolkadotXcmWeight, + crate::weights::xcm::CoretimeWestendXcmWeight, RuntimeCall, MaxInstructions, >; type Trader = - UsingComponents>; + UsingComponents>; type ResponseHandler = PolkadotXcm; type AssetTrap = PolkadotXcm; type AssetClaims = PolkadotXcm; @@ -214,7 +230,10 @@ impl xcm_executor::Config for XcmConfig { type MaxAssetsIntoHolding = MaxAssetsIntoHolding; type AssetLocker = (); type AssetExchanger = (); - type FeeManager = (); + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = WithOriginFilter; @@ -222,17 +241,10 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Nothing; } -/// Converts a local signed origin into an XCM multilocation. -/// Forms the basis for local origins sending/executing XCMs. +/// Converts a local signed origin into an XCM multilocation. Forms the basis for local origins +/// sending/executing XCMs. pub type LocalOriginToLocation = SignedToAccountId32; -parameter_types! { - /// The asset ID for the asset that we use to pay for message delivery fees. - pub FeeAssetId: AssetId = Concrete(DotRelayLocation::get()); - /// The base fee for the message delivery fees. - pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); -} - pub type PriceForParentDelivery = ExponentialPrice; @@ -240,19 +252,14 @@ pub type PriceForParentDelivery = /// queues. pub type XcmRouter = WithUniqueTopic<( // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - // We want to disallow users sending (arbitrary) XCMs from this chain. + // We want to disallow users sending (arbitrary) XCM programs from this chain. type SendXcmOrigin = EnsureXcmOrigin; type XcmRouter = XcmRouter; // We support local origins dispatching XCM executions in principle... @@ -263,7 +270,7 @@ impl pallet_xcm::Config for Runtime { type XcmTeleportFilter = Everything; type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. type Weigher = WeightInfoBounds< - crate::weights::xcm::BridgeHubPolkadotXcmWeight, + crate::weights::xcm::CoretimeWestendXcmWeight, RuntimeCall, MaxInstructions, >; @@ -278,8 +285,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/cumulus_pallet_parachain_system.rs deleted file mode 100644 index f787aa3270118b87202bc78b58dcb8084d5f5a5b..0000000000000000000000000000000000000000 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/cumulus_pallet_parachain_system.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_parachain_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemint-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// statemint-dev -// --pallet -// cumulus_pallet_parachain_system -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/statemint/src/weights -// --steps -// 50 -// --repeat -// 20 - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_parachain_system`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { - /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) - /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) - /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) - /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue Pages (r:0 w:16) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 1000]`. - fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `12` - // Estimated: `8013` - // Minimum execution time: 1_660_000 picoseconds. - Weight::from_parts(1_720_000, 0) - .saturating_add(Weight::from_parts(0, 8013)) - // Standard Error: 28_418 - .saturating_add(Weight::from_parts(24_636_963, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml similarity index 83% rename from cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml rename to cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index 7f5feb1c880f82cdb32086e6c149b7c75f7ff8ea..831e3242766418fca9c6ed4d9a97e6ae037c4193 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -1,50 +1,53 @@ [package] -name = "glutton-runtime" +name = "glutton-westend-runtime" version = "1.0.0" -description = "Glutton parachain runtime." authors.workspace = true edition.workspace = true license = "Apache-2.0" +description = "Glutton parachain runtime." + +[lints] +workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false, optional = true} -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false, optional = true} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false, optional = true } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false, optional = true } pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -56,7 +59,7 @@ parachains-common = { path = "../../../common", default-features = false } substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", @@ -130,9 +133,9 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/build.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs similarity index 100% rename from cumulus/parachains/runtimes/glutton/glutton-kusama/build.rs rename to cumulus/parachains/runtimes/glutton/glutton-westend/build.rs diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs similarity index 94% rename from cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs rename to cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 60a5d004e6c1c3d2e99d590423f58d357fe95289..2c51791c0740553ea798c58c5157d3b7ab9f73c7 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -13,13 +13,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Glutton Runtime +//! # Glutton Westend Runtime //! //! The purpose of the Glutton parachain is to do stress testing on the Kusama -//! network. +//! network. This runtime targets the Westend runtime to allow development +//! separate to the Kusama runtime. //! //! There may be multiple instances of the Glutton parachain deployed and -//! connected to Kusama. +//! connected to its parent relay chain. //! //! These parachains are not holding any real value. Their purpose is to stress //! test the network. @@ -27,14 +28,14 @@ //! ### Governance //! //! Glutton defers its governance (namely, its `Root` origin), to its Relay -//! Chain parent, Kusama. +//! Chain parent, Kusama (or Westend for development purposes). //! //! ### XCM //! //! Since the main goal of Glutton is solely stress testing, the parachain will -//! only be able receive XCM messages from Kusama via DMP. This way the Glutton -//! parachains will be able to listen for upgrades that are coming from the -//! Relay chain. +//! only be able receive XCM messages from the Relay Chain via DMP. This way the +//! Glutton parachains will be able to listen for upgrades that are coming from +//! the Relay chain. #![cfg_attr(not(feature = "std"), no_std)] #![recursion_limit = "256"] @@ -52,7 +53,7 @@ pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::{BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; @@ -63,7 +64,7 @@ use sp_version::RuntimeVersion; use cumulus_primitives_core::AggregateMessageOrigin; pub use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, @@ -95,10 +96,10 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("glutton"), - impl_name: create_runtime_str!("glutton"), + spec_name: create_runtime_str!("glutton-westend"), + impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_005_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -164,28 +165,17 @@ parameter_types! { }) .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) .build_or_panic(); - pub const SS58Prefix: u8 = 2; + pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; type BlockHashCount = BlockHashCount; type Version = Version; - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type SS58Prefix = SS58Prefix; @@ -352,8 +342,11 @@ extern crate frame_benchmarking; #[cfg(feature = "runtime-benchmarks")] mod benches { define_benchmarks!( + [cumulus_pallet_parachain_system, ParachainSystem] [frame_system, SystemBench::] [pallet_glutton, Glutton] + [pallet_message_queue, MessageQueue] + [pallet_timestamp, Timestamp] ); } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..bc8299ab1bd678e58a7909c1de4b0b55f4e1bedf --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,75 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_parachain_system +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::LastDmqMqcHead` (r:1 w:1) + /// Proof: `ParachainSystem::LastDmqMqcHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ProcessedDownwardMessages` (r:0 w:1) + /// Proof: `ParachainSystem::ProcessedDownwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1000) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `3517` + // Minimum execution time: 1_745_000 picoseconds. + Weight::from_parts(1_859_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + // Standard Error: 53_384 + .saturating_add(Weight::from_parts(196_309_089, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/frame_system.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs similarity index 54% rename from cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/frame_system.rs rename to cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs index cf7ef948fd630f892122807250e6973a089955d2..b68f16c9865894170b4e8aba3e524e4771fde2fc 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/frame_system.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `frame_system` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-kusama-dev-1300")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=glutton-kusama-dev-1300 -// --wasm-execution=compiled -// --pallet=frame_system -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/glutton/glutton-kusama/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=frame_system +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -54,8 +53,8 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_717_000 picoseconds. - Weight::from_parts(1_782_325, 0) + // Minimum execution time: 1_570_000 picoseconds. + Weight::from_parts(1_626_000, 0) .saturating_add(Weight::from_parts(0, 0)) // Standard Error: 0 .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) @@ -65,11 +64,11 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_089_000 picoseconds. - Weight::from_parts(6_353_000, 0) + // Minimum execution time: 4_200_000 picoseconds. + Weight::from_parts(4_262_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_788, 0).saturating_mul(b.into())) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_791, 0).saturating_mul(b.into())) } /// Storage: `System::Digest` (r:1 w:1) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -79,8 +78,8 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 3_389_000 picoseconds. - Weight::from_parts(3_605_000, 0) + // Minimum execution time: 2_680_000 picoseconds. + Weight::from_parts(2_936_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -99,11 +98,11 @@ impl frame_system::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `119` - // Estimated: `1604` - // Minimum execution time: 97_701_839_000 picoseconds. - Weight::from_parts(100_104_315_000, 0) - .saturating_add(Weight::from_parts(0, 1604)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 119_097_302_000 picoseconds. + Weight::from_parts(120_914_576_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -114,11 +113,11 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_638_000 picoseconds. - Weight::from_parts(1_726_000, 0) + // Minimum execution time: 1_606_000 picoseconds. + Weight::from_parts(1_704_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_886 - .saturating_add(Weight::from_parts(809_561, 0).saturating_mul(i.into())) + // Standard Error: 2_090 + .saturating_add(Weight::from_parts(765_829, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -128,11 +127,11 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_569_000 picoseconds. - Weight::from_parts(1_690_000, 0) + // Minimum execution time: 1_646_000 picoseconds. + Weight::from_parts(1_719_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 963 - .saturating_add(Weight::from_parts(580_145, 0).saturating_mul(i.into())) + // Standard Error: 1_067 + .saturating_add(Weight::from_parts(578_598, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -140,15 +139,42 @@ impl frame_system::WeightInfo for WeightInfo { /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `52 + p * (69 ±0)` - // Estimated: `46 + p * (70 ±0)` - // Minimum execution time: 3_039_000 picoseconds. - Weight::from_parts(3_090_000, 0) - .saturating_add(Weight::from_parts(0, 46)) - // Standard Error: 2_007 - .saturating_add(Weight::from_parts(1_269_045, 0).saturating_mul(p.into())) + // Measured: `58 + p * (69 ±0)` + // Estimated: `53 + p * (70 ±0)` + // Minimum execution time: 2_933_000 picoseconds. + Weight::from_parts(3_069_000, 0) + .saturating_add(Weight::from_parts(0, 53)) + // Standard Error: 1_844 + .saturating_add(Weight::from_parts(1_214_377, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 33_027_000 picoseconds. + Weight::from_parts(33_027_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `22` + // Estimated: `1518` + // Minimum execution time: 118_101_992_000 picoseconds. + Weight::from_parts(118_101_992_000, 0) + .saturating_add(Weight::from_parts(0, 1518)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/mod.rs similarity index 100% rename from cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs rename to cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/mod.rs diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_glutton.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs similarity index 71% rename from cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_glutton.rs rename to cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs index e1b0c5bf232e5ca8b2a4b534095f7b30917ca382..9345458a704af3dd86b77b0030ae6861b8b2ed23 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_glutton.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_glutton` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-kusama-dev-1300")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=glutton-kusama-dev-1300 -// --wasm-execution=compiled -// --pallet=pallet_glutton -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/glutton/glutton-kusama/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_glutton +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -58,11 +57,11 @@ impl pallet_glutton::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `87` // Estimated: `1489` - // Minimum execution time: 8_925_000 picoseconds. - Weight::from_parts(9_186_000, 0) + // Minimum execution time: 6_453_000 picoseconds. + Weight::from_parts(6_629_000, 0) .saturating_add(Weight::from_parts(0, 1489)) - // Standard Error: 3_091 - .saturating_add(Weight::from_parts(9_666_196, 0).saturating_mul(n.into())) + // Standard Error: 3_416 + .saturating_add(Weight::from_parts(9_938_610, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -76,11 +75,11 @@ impl pallet_glutton::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `120` // Estimated: `1489` - // Minimum execution time: 8_924_000 picoseconds. - Weight::from_parts(8_963_000, 0) + // Minimum execution time: 6_456_000 picoseconds. + Weight::from_parts(6_564_000, 0) .saturating_add(Weight::from_parts(0, 1489)) - // Standard Error: 1_202 - .saturating_add(Weight::from_parts(1_139_080, 0).saturating_mul(n.into())) + // Standard Error: 1_336 + .saturating_add(Weight::from_parts(1_141_705, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -90,11 +89,11 @@ impl pallet_glutton::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 708_000 picoseconds. - Weight::from_parts(1_698_031, 0) + // Minimum execution time: 679_000 picoseconds. + Weight::from_parts(3_310_101, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 12 - .saturating_add(Weight::from_parts(106_500, 0).saturating_mul(i.into())) + // Standard Error: 10 + .saturating_add(Weight::from_parts(103_703, 0).saturating_mul(i.into())) } /// Storage: `Glutton::TrashData` (r:5000 w:0) /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) @@ -103,11 +102,11 @@ impl pallet_glutton::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `119115 + i * (1022 ±0)` // Estimated: `990 + i * (3016 ±0)` - // Minimum execution time: 698_000 picoseconds. - Weight::from_parts(970_000, 0) + // Minimum execution time: 765_000 picoseconds. + Weight::from_parts(1_004_000, 0) .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 4_022 - .saturating_add(Weight::from_parts(6_320_519, 0).saturating_mul(i.into())) + // Standard Error: 4_008 + .saturating_add(Weight::from_parts(6_130_770, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) } @@ -121,8 +120,8 @@ impl pallet_glutton::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1900498` // Estimated: `5239782` - // Minimum execution time: 100_079_897_000 picoseconds. - Weight::from_parts(100_515_306_000, 0) + // Minimum execution time: 97_248_614_000 picoseconds. + Weight::from_parts(97_728_420_000, 0) .saturating_add(Weight::from_parts(0, 5239782)) .saturating_add(T::DbWeight::get().reads(1739)) } @@ -136,8 +135,8 @@ impl pallet_glutton::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `9548` // Estimated: `16070` - // Minimum execution time: 100_237_009_000 picoseconds. - Weight::from_parts(100_472_213_000, 0) + // Minimum execution time: 97_305_112_000 picoseconds. + Weight::from_parts(97_427_728_000, 0) .saturating_add(Weight::from_parts(0, 16070)) .saturating_add(T::DbWeight::get().reads(7)) } @@ -149,8 +148,8 @@ impl pallet_glutton::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `87` // Estimated: `1493` - // Minimum execution time: 5_120_000 picoseconds. - Weight::from_parts(5_262_000, 0) + // Minimum execution time: 4_125_000 picoseconds. + Weight::from_parts(4_339_000, 0) .saturating_add(Weight::from_parts(0, 1493)) .saturating_add(T::DbWeight::get().reads(2)) } @@ -160,8 +159,8 @@ impl pallet_glutton::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_947_000 picoseconds. - Weight::from_parts(6_171_000, 0) + // Minimum execution time: 3_879_000 picoseconds. + Weight::from_parts(4_211_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -171,8 +170,8 @@ impl pallet_glutton::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_964_000 picoseconds. - Weight::from_parts(6_166_000, 0) + // Minimum execution time: 3_920_000 picoseconds. + Weight::from_parts(4_081_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..eab6c15a40d28019967dd3112b1b2bc037b93106 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_message_queue +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `223` + // Estimated: `6044` + // Minimum execution time: 10_833_000 picoseconds. + Weight::from_parts(11_237_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `218` + // Estimated: `6044` + // Minimum execution time: 9_399_000 picoseconds. + Weight::from_parts(9_773_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 3_277_000 picoseconds. + Weight::from_parts(3_358_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_429_000 picoseconds. + Weight::from_parts(5_667_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_538_000 picoseconds. + Weight::from_parts(5_803_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 89_888_000 picoseconds. + Weight::from_parts(90_929_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `171` + // Estimated: `3517` + // Minimum execution time: 6_129_000 picoseconds. + Weight::from_parts(6_414_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 52_366_000 picoseconds. + Weight::from_parts(53_500_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 67_848_000 picoseconds. + Weight::from_parts(68_910_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 107_564_000 picoseconds. + Weight::from_parts(109_377_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs similarity index 74% rename from cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs rename to cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs index 8edae065f1b9173a3767a037c167f05420b95a70..4218dcc73f4e608e8b40ae5c4fcdd927747a5084 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs @@ -1,4 +1,4 @@ -// Copyright Parity Technologies (UK) Ltd. +// Copyright (C) Parity Technologies (UK) Ltd. // This file is part of Cumulus. // Cumulus is free software: you can redistribute it and/or modify @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_timestamp` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_timestamp -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_timestamp +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -58,8 +56,8 @@ impl pallet_timestamp::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1493` - // Minimum execution time: 9_313_000 picoseconds. - Weight::from_parts(9_775_000, 0) + // Minimum execution time: 6_306_000 picoseconds. + Weight::from_parts(6_592_000, 0) .saturating_add(Weight::from_parts(0, 1493)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -68,8 +66,8 @@ impl pallet_timestamp::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `57` // Estimated: `0` - // Minimum execution time: 3_322_000 picoseconds. - Weight::from_parts(3_577_000, 0) + // Minimum execution time: 2_900_000 picoseconds. + Weight::from_parts(3_030_000, 0) .saturating_add(Weight::from_parts(0, 0)) } } diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs similarity index 96% rename from cumulus/parachains/runtimes/glutton/glutton-kusama/src/xcm_config.rs rename to cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs index fb7b78b79d2a808021c5d0a8494e760c66417057..5ebb0ade123175bc17303c19812b78377fab1153 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs @@ -29,8 +29,8 @@ use xcm_builder::{ }; parameter_types! { - pub const KusamaLocation: MultiLocation = MultiLocation::parent(); - pub const KusamaNetwork: Option = Some(NetworkId::Kusama); + pub const WestendLocation: MultiLocation = MultiLocation::parent(); + pub const WestendNetwork: Option = Some(NetworkId::Westend); pub UniversalLocation: InteriorMultiLocation = X1(Parachain(ParachainInfo::parachain_id().into())); } diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index 65ca58ac8b39ebc06ac6475b9818561fde2ae9ee..37a3bb4ca26ff6362e064503de8c331b8e973629 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -6,34 +6,37 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-solo-to-para = { path = "../../../../pallets/solo-to-para", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } @@ -44,7 +47,7 @@ parachains-common = { path = "../../../common", default-features = false } substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-aura-ext/std", @@ -77,4 +80,4 @@ std = [ "substrate-wasm-builder", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index 43c8f1488a6c139d3adaafbf35ede5f7cca7d9e8..cb868627e799efad56c39110a8dc2b266d0c8600 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -44,7 +44,7 @@ use sp_version::RuntimeVersion; // A few exports that help ease life for downstream crates. pub use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, @@ -75,7 +75,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("seedling"), impl_name: create_runtime_str!("seedling"), authoring_version: 1, - spec_version: 10000, + spec_version: 1, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, @@ -134,6 +134,8 @@ parameter_types! { .build_or_panic(); pub const SS58Prefix: u8 = 42; } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index 77449f977bb298a11a21f45fd5576422ef83bad7..3d7042ecd49fb0d3e046e136b70f069770848bbc 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -6,39 +6,42 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } @@ -48,7 +51,7 @@ parachains-common = { path = "../../../common", default-features = false } substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-aura-ext/std", @@ -97,4 +100,4 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index f67c0c19ec640bb8e131fea2536943315cc6686a..de95969f71d10c508c75d4d6dc5ef054cccd9e2d 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -52,7 +52,7 @@ use sp_version::RuntimeVersion; // A few exports that help ease life for downstream crates. pub use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, @@ -143,6 +143,7 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index 62bce02bd3581064259443769cea476d992fb686..cd100c472ce58d2dc5bfeb874ffeebef25c91753 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -6,38 +6,41 @@ edition.workspace = true description = "Utils for Runtimes testing" license = "Apache-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } # Substrate -frame-support = { path = "../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../substrate/frame/system", default-features = false} -pallet-assets = { path = "../../../../substrate/frame/assets", default-features = false} -pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false} -pallet-session = { path = "../../../../substrate/frame/session", default-features = false} -sp-consensus-aura = { path = "../../../../substrate/primitives/consensus/aura", default-features = false} -sp-io = { path = "../../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../../substrate/primitives/std", default-features = false} +frame-support = { path = "../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../substrate/frame/system", default-features = false } +pallet-assets = { path = "../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } +pallet-session = { path = "../../../../substrate/frame/session", default-features = false } +sp-consensus-aura = { path = "../../../../substrate/primitives/consensus/aura", default-features = false } +sp-io = { path = "../../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../../substrate/primitives/std", default-features = false } sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false} +sp-core = { path = "../../../../substrate/primitives/core", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-xcmp-queue = { path = "../../../pallets/xcmp-queue", default-features = false } pallet-collator-selection = { path = "../../../pallets/collator-selection", default-features = false } parachains-common = { path = "../../common", default-features = false } -parachain-info = {package = "staging-parachain-info", path = "../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../pallets/parachain-info", default-features = false } assets-common = { path = "../assets/common", default-features = false } cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../../primitives/parachain-inherent", default-features = false } cumulus-test-relay-sproof-builder = { path = "../../../test/relay-sproof-builder", default-features = false } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false} -pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-parachain-primitives = { path = "../../../../polkadot/parachain", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-parachain-primitives = { path = "../../../../polkadot/parachain", default-features = false } [dev-dependencies] hex-literal = "0.4.1" @@ -46,7 +49,7 @@ hex-literal = "0.4.1" substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] std = [ "assets-common/std", "codec/std", diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index e2a6fb45aec33acb6e2e5d729674b9939ce682aa..6d43875a8868502a4bfc7afc5d72972c0651653f 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -47,6 +47,7 @@ pub mod test_cases; pub type BalanceOf = ::Balance; pub type AccountIdOf = ::AccountId; +pub type RuntimeCallOf = ::RuntimeCall; pub type ValidatorIdOf = ::ValidatorId; pub type SessionKeysOf = ::Keys; @@ -114,35 +115,48 @@ impl BasicParachainRuntime for T +where + T: frame_system::Config + pallet_balances::Config + pallet_session::Config + pallet_xcm::Config - + parachain_info::Config, -> { + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config, + ValidatorIdOf: From>, +{ +} + +/// Basic builder based on balances, collators and pallet_session. +pub struct ExtBuilder { // endowed accounts with balances balances: Vec<(AccountIdOf, BalanceOf)>, // collators to test block prod collators: Vec>, // keys added to pallet session keys: Vec<(AccountIdOf, ValidatorIdOf, SessionKeysOf)>, - // safe xcm version for pallet_xcm + // safe XCM version for pallet_xcm safe_xcm_version: Option, // para id para_id: Option, _runtime: PhantomData, } -impl< - Runtime: frame_system::Config - + pallet_balances::Config - + pallet_session::Config - + pallet_xcm::Config - + parachain_info::Config, - > Default for ExtBuilder -{ +impl Default for ExtBuilder { fn default() -> ExtBuilder { ExtBuilder { balances: vec![], @@ -155,14 +169,7 @@ impl< } } -impl< - Runtime: frame_system::Config - + pallet_balances::Config - + pallet_session::Config - + pallet_xcm::Config - + parachain_info::Config, - > ExtBuilder -{ +impl ExtBuilder { pub fn with_balances( mut self, balances: Vec<(AccountIdOf, BalanceOf)>, @@ -198,12 +205,7 @@ impl< self } - pub fn build(self) -> sp_io::TestExternalities - where - Runtime: - pallet_collator_selection::Config + pallet_balances::Config + pallet_session::Config, - ValidatorIdOf: From>, - { + pub fn build(self) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_xcm::GenesisConfig:: { diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index fb66275b025af8c782bd450793de8c47ae4154ed..a21023a933137448bcc84e6e6151f8a932c7f998 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true edition.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -22,52 +25,52 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-asset-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-asset-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false } +pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -polkadot-primitives = { path = "../../../../../polkadot/primitives", default-features = false} -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-primitives = { path = "../../../../../polkadot/primitives", default-features = false } +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -75,10 +78,13 @@ cumulus-primitives-utility = { path = "../../../../primitives/utility", default- pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } +assets-common = { path = "../../assets/common", default-features = false } +snowbridge-rococo-common = { path = "../../../../../bridges/snowbridge/parachain/runtime/rococo-common", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ + "assets-common/std", "codec/std", "cumulus-pallet-aura-ext/std", "cumulus-pallet-dmp-queue/std", @@ -115,6 +121,7 @@ std = [ "polkadot-primitives/std", "polkadot-runtime-common/std", "scale-info/std", + "snowbridge-rococo-common/std", "sp-api/std", "sp-block-builder/std", "sp-consensus-aura/std", @@ -135,6 +142,7 @@ std = [ ] runtime-benchmarks = [ + "assets-common/runtime-benchmarks", "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", @@ -158,6 +166,7 @@ runtime-benchmarks = [ "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", + "snowbridge-rococo-common/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", @@ -190,4 +199,4 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 5ef9af7c7127bc637a3f70627803ee3362f82072..541bcd05644f58474b2f8bd51f6fb7ac0444823f 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -32,10 +32,11 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); mod weights; pub mod xcm_config; +use assets_common::MultiLocationForAssetId; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, pallet_prelude::Weight, @@ -230,7 +231,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("penpal-parachain"), impl_name: create_runtime_str!("penpal-parachain"), authoring_version: 1, - spec_version: 10000, + spec_version: 1, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -323,6 +324,7 @@ parameter_types! { // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; @@ -434,7 +436,7 @@ parameter_types! { // pub type AssetsForceOrigin = // EnsureOneOf, EnsureXcm>>; -impl pallet_assets::Config for Runtime { +impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type AssetId = AssetId; @@ -457,6 +459,41 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = (); } +parameter_types! { + // we just reuse the same deposits + pub const ForeignAssetsAssetDeposit: Balance = AssetDeposit::get(); + pub const ForeignAssetsAssetAccountDeposit: Balance = AssetAccountDeposit::get(); + pub const ForeignAssetsApprovalDeposit: Balance = ApprovalDeposit::get(); + pub const ForeignAssetsAssetsStringLimit: u32 = AssetsStringLimit::get(); + pub const ForeignAssetsMetadataDepositBase: Balance = MetadataDepositBase::get(); + pub const ForeignAssetsMetadataDepositPerByte: Balance = MetadataDepositPerByte::get(); +} + +/// Another pallet assets instance to store foreign assets from bridgehub. +pub type ForeignAssetsInstance = pallet_assets::Instance2; +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = MultiLocationForAssetId; + type AssetIdParameter = MultiLocationForAssetId; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = EnsureRoot; + type AssetDeposit = ForeignAssetsAssetDeposit; + type MetadataDepositBase = ForeignAssetsMetadataDepositBase; + type MetadataDepositPerByte = ForeignAssetsMetadataDepositPerByte; + type ApprovalDeposit = ForeignAssetsApprovalDeposit; + type StringLimit = ForeignAssetsAssetsStringLimit; + type Freezer = (); + type Extra = (); + type WeightInfo = pallet_assets::weights::SubstrateWeight; + type CallbackHandle = (); + type AssetAccountDeposit = ForeignAssetsAssetAccountDeposit; + type RemoveItemsLimit = frame_support::traits::ConstU32<1000>; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = xcm_config::XcmBenchmarkHelper; +} + parameter_types! { pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); @@ -577,7 +614,12 @@ impl pallet_asset_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; type OnChargeAssetTransaction = pallet_asset_tx_payment::FungiblesAdapter< - pallet_assets::BalanceToAssetBalance, + pallet_assets::BalanceToAssetBalance< + Balances, + Runtime, + ConvertInto, + pallet_assets::Instance1, + >, AssetsToBlockAuthor, >; } @@ -619,7 +661,8 @@ construct_runtime!( MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // The main stage. - Assets: pallet_assets::{Pallet, Call, Storage, Event} = 50, + Assets: pallet_assets::::{Pallet, Call, Storage, Event} = 50, + ForeignAssets: pallet_assets::::{Pallet, Call, Storage, Event} = 51, Sudo: pallet_sudo::{Pallet, Call, Storage, Event, Config} = 255, } diff --git a/cumulus/parachains/runtimes/testing/penpal/src/weights/mod.rs b/cumulus/parachains/runtimes/testing/penpal/src/weights/mod.rs index 30fa2c4060689ff98cc427c84f81866172845e52..b473d49e20e67329d893e1e565330cbe9290c64f 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/weights/mod.rs @@ -24,5 +24,4 @@ pub mod rocksdb_weights; pub use block_weights::constants::BlockExecutionWeight; pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 710dfd79877cb1e26e50a228584791d3caa21165..ed405aeddb38a04377660c185cdcb445000fce90 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -24,8 +24,8 @@ //! soon. use super::{ AccountId, AllPalletsWithSystem, AssetId as AssetIdPalletAssets, Assets, Balance, Balances, - ParachainInfo, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, - WeightToFee, XcmpQueue, + ForeignAssets, ParachainInfo, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, + RuntimeOrigin, WeightToFee, XcmpQueue, }; use core::marker::PhantomData; use frame_support::{ @@ -38,19 +38,23 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_asset_tx_payment::HandleCredit; +use pallet_assets::Instance1; use pallet_xcm::XcmPassthrough; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::impls::ToAuthor; +use snowbridge_rococo_common::EthereumNetwork; use sp_runtime::traits::Zero; use xcm::latest::prelude::*; +#[allow(deprecated)] use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, AsPrefixedGeneralIndex, ConvertedConcreteId, CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, FungiblesAdapter, IsConcrete, LocalMint, NativeAsset, - ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + NoChecking, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, StartsWith, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::{traits::JustTry, XcmExecutor}; @@ -74,6 +78,7 @@ pub type LocationToAccountId = ( ); /// Means for transacting assets on this chain. +#[allow(deprecated)] pub type CurrencyTransactor = CurrencyAdapter< // Use this currency: Balances, @@ -92,12 +97,24 @@ pub type FungiblesTransactor = FungiblesAdapter< // Use this fungibles implementation: Assets, // Use this currency when it is a fungible asset matching the given location or name: - ConvertedConcreteId< - AssetIdPalletAssets, - Balance, - AsPrefixedGeneralIndex, - JustTry, - >, + ( + ConvertedConcreteId< + AssetIdPalletAssets, + Balance, + AsPrefixedGeneralIndex, + JustTry, + >, + ConvertedConcreteId< + AssetIdPalletAssets, + Balance, + AsPrefixedGeneralIndex< + SystemAssetHubAssetsPalletLocation, + AssetIdPalletAssets, + JustTry, + >, + JustTry, + >, + ), // Convert an XCM MultiLocation into a local account id: LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): @@ -109,8 +126,28 @@ pub type FungiblesTransactor = FungiblesAdapter< CheckingAccount, >; +/// `AssetId/Balance` converter for `TrustBackedAssets` +pub type ForeignAssetsConvertedConcreteId = + assets_common::ForeignAssetsConvertedConcreteId, Balance>; + +/// Means for transacting foreign assets from different global consensus. +pub type ForeignFungiblesTransactor = FungiblesAdapter< + // Use this fungibles implementation: + ForeignAssets, + // Use this currency when it is a fungible asset matching the given location or name: + ForeignAssetsConvertedConcreteId, + // Convert an XCM MultiLocation into a local account id: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We dont need to check teleports here. + NoChecking, + // The account to use for tracking teleports. + CheckingAccount, +>; + /// Means for transacting assets on this chain. -pub type AssetTransactors = (CurrencyTransactor, FungiblesTransactor); +pub type AssetTransactors = (CurrencyTransactor, ForeignFungiblesTransactor, FungiblesTransactor); /// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, /// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can @@ -126,6 +163,9 @@ pub type XcmOriginToTransactDispatchOrigin = ( // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when // recognized. SiblingParachainAsNative, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, // Native signed account converter; this just converts an `AccountId32` origin into a normal // `RuntimeOrigin::Signed` origin of the same 32-byte value. SignedAccountId32AsNative, @@ -182,14 +222,31 @@ pub type Barrier = TrailingSetTopicAsId< /// Type alias to conveniently refer to `frame_system`'s `Config::AccountId`. pub type AccountIdOf = ::AccountId; -/// Asset filter that allows all assets from a certain location. -pub struct AssetsFrom(PhantomData); -impl> ContainsPair for AssetsFrom { +/// Asset filter that allows all assets from a certain location matching asset id. +pub struct AssetPrefixFrom(PhantomData<(Prefix, Origin)>); +impl ContainsPair for AssetPrefixFrom +where + Prefix: Get, + Origin: Get, +{ + fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { + let loc = Origin::get(); + &loc == origin && + matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } + if asset_loc.starts_with(&Prefix::get())) + } +} + +type AssetsFrom = AssetPrefixFrom; + +/// Asset filter that allows native/relay asset if coming from a certain location. +pub struct NativeAssetFrom(PhantomData); +impl> ContainsPair for NativeAssetFrom { fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { let loc = T::get(); &loc == origin && matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } - if asset_loc.match_and_split(&loc).is_some()) + if *asset_loc == MultiLocation::from(Parent)) } } @@ -208,56 +265,21 @@ where /// A `HandleCredit` implementation that naively transfers the fees to the block author. /// Will drop and burn the assets in case the transfer fails. pub struct AssetsToBlockAuthor(PhantomData); -impl HandleCredit, pallet_assets::Pallet> for AssetsToBlockAuthor +impl HandleCredit, pallet_assets::Pallet> for AssetsToBlockAuthor where - R: pallet_authorship::Config + pallet_assets::Config, + R: pallet_authorship::Config + pallet_assets::Config, AccountIdOf: From + Into, { - fn handle_credit(credit: Credit, pallet_assets::Pallet>) { + fn handle_credit(credit: Credit, pallet_assets::Pallet>) { if let Some(author) = pallet_authorship::Pallet::::author() { // In case of error: Will drop the result triggering the `OnDrop` of the imbalance. - let _ = pallet_assets::Pallet::::resolve(&author, credit); + let _ = pallet_assets::Pallet::::resolve(&author, credit); } } } -pub trait Reserve { - /// Returns assets reserve location. - fn reserve(&self) -> Option; -} - -// Takes the chain part of a MultiAsset -impl Reserve for MultiAsset { - fn reserve(&self) -> Option { - if let AssetId::Concrete(location) = self.id { - let first_interior = location.first_interior(); - let parents = location.parent_count(); - match (parents, first_interior) { - (0, Some(Parachain(id))) => Some(MultiLocation::new(0, X1(Parachain(*id)))), - (1, Some(Parachain(id))) => Some(MultiLocation::new(1, X1(Parachain(*id)))), - (1, _) => Some(MultiLocation::parent()), - _ => None, - } - } else { - None - } - } -} - -/// A `FilterAssetLocation` implementation. Filters multi native assets whose -/// reserve is same with `origin`. -pub struct MultiNativeAsset; -impl ContainsPair for MultiNativeAsset { - fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { - if let Some(ref reserve) = asset.reserve() { - if reserve == origin { - return true - } - } - false - } -} - +// This asset can be added to AH as ForeignAsset and teleported between Penpal and AH +pub const TELEPORTABLE_ASSET_ID: u32 = 2; parameter_types! { /// The location that this chain recognizes as the Relay network's Asset Hub. pub SystemAssetHubLocation: MultiLocation = MultiLocation::new(1, X1(Parachain(1000))); @@ -265,10 +287,35 @@ parameter_types! { // the Relay Chain's Asset Hub's Assets pallet index pub SystemAssetHubAssetsPalletLocation: MultiLocation = MultiLocation::new(1, X2(Parachain(1000), PalletInstance(50))); + pub AssetsPalletLocation: MultiLocation = + MultiLocation::new(0, X1(PalletInstance(50))); pub CheckingAccount: AccountId = PolkadotXcm::check_account(); + pub LocalTeleportableToAssetHub: MultiLocation = MultiLocation::new( + 0, + X2(PalletInstance(50), GeneralIndex(TELEPORTABLE_ASSET_ID.into())) + ); + pub EthereumLocation: MultiLocation = MultiLocation::new(2, X1(GlobalConsensus(EthereumNetwork::get()))); } -pub type Reserves = (NativeAsset, AssetsFrom); +/// Accepts asset with ID `AssetLocation` and is coming from `Origin` chain. +pub struct AssetFromChain(PhantomData<(AssetLocation, Origin)>); +impl, Origin: Get> + ContainsPair for AssetFromChain +{ + fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { + log::trace!(target: "xcm::contains", "AssetFromChain asset: {:?}, origin: {:?}", asset, origin); + *origin == Origin::get() && matches!(asset.id, Concrete(id) if id == AssetLocation::get()) + } +} + +pub type Reserves = ( + NativeAsset, + AssetsFrom, + NativeAssetFrom, + AssetPrefixFrom, +); +pub type TrustedTeleporters = + (AssetFromChain,); pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { @@ -277,8 +324,9 @@ impl xcm_executor::Config for XcmConfig { // How to withdraw and deposit an asset. type AssetTransactor = AssetTransactors; type OriginConverter = XcmOriginToTransactDispatchOrigin; - type IsReserve = MultiNativeAsset; // TODO: maybe needed to be replaced by Reserves - type IsTeleporter = NativeAsset; + type IsReserve = Reserves; + // no teleport trust established with other chains + type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = FixedWeightBounds; @@ -312,11 +360,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -342,8 +385,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); @@ -353,3 +394,12 @@ impl cumulus_pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type XcmExecutor = XcmExecutor; } + +/// Simple conversion of `u32` into an `AssetId` for use in benchmarking. +pub struct XcmBenchmarkHelper; +#[cfg(feature = "runtime-benchmarks")] +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> MultiLocation { + MultiLocation { parents: 1, interior: X1(Parachain(id)) } + } +} diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 5e9d347a25deb4c0ab7d9bdb5543aff69de86036..a23b7558bcec00a1eda4c4435e0545e42844a389 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -6,52 +6,56 @@ edition.workspace = true description = "Simple runtime used by the rococo parachain(s)" license = "Apache-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-ping = { path = "../../../pallets/ping", default-features = false } +cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -61,7 +65,7 @@ parachain-info = { package = "staging-parachain-info", path = "../../../pallets/ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-aura-ext/std", @@ -70,6 +74,7 @@ std = [ "cumulus-pallet-xcm/std", "cumulus-pallet-xcmp-queue/std", "cumulus-ping/std", + "cumulus-primitives-aura/std", "cumulus-primitives-core/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", @@ -131,4 +136,4 @@ runtime-benchmarks = [ "xcm-executor/runtime-benchmarks", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 4cb83ccf8201a88ff6473dd01ab5d4d53503fb67..206e4970bae9092931333f194174a3a92fdd6130 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -22,13 +22,13 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use sp_api::impl_runtime_apis; use sp_core::OpaqueMetadata; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, Hash as HashT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; @@ -39,7 +39,7 @@ use sp_version::RuntimeVersion; // A few exports that help ease life for downstream crates. pub use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, match_types, parameter_types, @@ -83,12 +83,14 @@ use xcm_executor::traits::JustTry; use pallet_xcm::{EnsureXcm, IsMajorityOfBody, XcmPassthrough}; use polkadot_parachain_primitives::primitives::Sibling; use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom, - CurrencyAdapter, EnsureXcmOrigin, FixedWeightBounds, IsConcrete, NativeAsset, - ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, UsingComponents, + EnsureXcmOrigin, FixedWeightBounds, IsConcrete, NativeAsset, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + UsingComponents, }; use xcm_executor::XcmExecutor; @@ -106,14 +108,14 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_005_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, state_version: 0, }; -pub const MILLISECS_PER_BLOCK: u64 = 12000; +pub const MILLISECS_PER_BLOCK: u64 = 6000; pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; @@ -143,18 +145,18 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// We allow for .5 seconds of compute with a 12 second average block time. +/// We allow for 2 seconds of compute with a 6 second average block time. const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( - WEIGHT_REF_TIME_PER_SECOND.saturating_div(2), + WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, ); /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included /// into the relay chain. -const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; /// How many parachain blocks are processed by the relay chain per parent. Limits the /// number of blocks authored per slot. -const BLOCK_PROCESSING_VELOCITY: u32 = 1; +const BLOCK_PROCESSING_VELOCITY: u32 = 2; /// Relay chain slot duration, in milliseconds. const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; @@ -184,6 +186,7 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; @@ -277,6 +280,13 @@ parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } +type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, +>; + impl cumulus_pallet_parachain_system::Config for Runtime { type WeightInfo = (); type RuntimeEvent = RuntimeEvent; @@ -287,13 +297,8 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = XcmpQueue; type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = ConsensusHook; } impl parachain_info::Config for Runtime {} @@ -342,6 +347,7 @@ pub type LocationToAccountId = ( ); /// Means for transacting assets on this chain. +#[allow(deprecated)] pub type CurrencyTransactor = CurrencyAdapter< // Use this currency: Balances, @@ -492,11 +498,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -518,8 +519,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); @@ -591,9 +590,9 @@ impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; type DisabledValidators = (); type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; + type AllowMultipleBlocksPerSlot = ConstBool; #[cfg(feature = "experimental")] - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; + type SlotDuration = ConstU64; } construct_runtime! { @@ -631,7 +630,7 @@ pub type Balance = u128; /// Index of a transaction in the chain. pub type Nonce = u32; /// A hash of some data used by the chain. -pub type Hash = sp_core::H256; +pub type Hash = ::Output; /// An index to a block. pub type BlockNumber = u32; /// The address format for describing accounts. @@ -758,7 +757,7 @@ impl_runtime_apis! { impl sp_consensus_aura::AuraApi for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION) } fn authorities() -> Vec { @@ -831,6 +830,15 @@ impl_runtime_apis! { build_config::(config) } } + + impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { + fn can_build_upon( + included_hash: ::Hash, + slot: cumulus_primitives_aura::Slot, + ) -> bool { + ConsensusHook::can_build_upon(included_hash, slot) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index f7252a39a38aa485e591d0cd27c46f06654504af..1c055f6b2dd26043c86c01acae92f4101c4e7249 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -1,40 +1,41 @@ [package] name = "polkadot-parachain-bin" -version = "1.1.0" +version = "1.5.0" authors.workspace = true build = "build.rs" edition.workspace = true description = "Runs a polkadot parachain node which could be a collator." license = "Apache-2.0" +[lints] +workspace = true + [[bin]] name = "polkadot-parachain" path = "src/main.rs" [dependencies] -async-trait = "0.1.73" -clap = { version = "4.4.6", features = ["derive"] } +async-trait = "0.1.74" +clap = { version = "4.4.11", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.28" hex-literal = "0.4.1" log = "0.4.20" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" # Local rococo-parachain-runtime = { path = "../parachains/runtimes/testing/rococo-parachain" } shell-runtime = { path = "../parachains/runtimes/starters/shell" } -glutton-runtime = { path = "../parachains/runtimes/glutton/glutton-kusama" } +glutton-westend-runtime = { path = "../parachains/runtimes/glutton/glutton-westend" } seedling-runtime = { path = "../parachains/runtimes/starters/seedling" } -asset-hub-polkadot-runtime = { path = "../parachains/runtimes/assets/asset-hub-polkadot" } -asset-hub-kusama-runtime = { path = "../parachains/runtimes/assets/asset-hub-kusama" } asset-hub-rococo-runtime = { path = "../parachains/runtimes/assets/asset-hub-rococo" } asset-hub-westend-runtime = { path = "../parachains/runtimes/assets/asset-hub-westend" } -collectives-polkadot-runtime = { path = "../parachains/runtimes/collectives/collectives-polkadot" } +collectives-westend-runtime = { path = "../parachains/runtimes/collectives/collectives-westend" } contracts-rococo-runtime = { path = "../parachains/runtimes/contracts/contracts-rococo" } bridge-hub-rococo-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-rococo" } -bridge-hub-kusama-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-kusama" } -bridge-hub-polkadot-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-polkadot" } +coretime-rococo-runtime = { path = "../parachains/runtimes/coretime/coretime-rococo" } +coretime-westend-runtime = { path = "../parachains/runtimes/coretime/coretime-westend" } bridge-hub-westend-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-westend" } penpal-runtime = { path = "../parachains/runtimes/testing/penpal" } jsonrpsee = { version = "0.16.2", features = ["server"] } @@ -43,12 +44,14 @@ parachains-common = { path = "../parachains/common" } # Substrate frame-benchmarking = { path = "../../substrate/frame/benchmarking" } frame-benchmarking-cli = { path = "../../substrate/utils/frame/benchmarking-cli" } -sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false} +sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } sp-io = { path = "../../substrate/primitives/io" } sp-core = { path = "../../substrate/primitives/core" } sp-session = { path = "../../substrate/primitives/session" } +frame-try-runtime = { path = "../../substrate/frame/try-runtime", optional = true } sc-consensus = { path = "../../substrate/client/consensus/common" } sp-tracing = { path = "../../substrate/primitives/tracing" } +frame-support = { path = "../../substrate/frame/support" } sc-cli = { path = "../../substrate/client/cli" } sc-client-api = { path = "../../substrate/client/api" } sc-executor = { path = "../../substrate/client/executor" } @@ -61,12 +64,19 @@ sc-network-sync = { path = "../../substrate/client/network/sync" } sc-basic-authorship = { path = "../../substrate/client/basic-authorship" } sp-timestamp = { path = "../../substrate/primitives/timestamp" } sp-blockchain = { path = "../../substrate/primitives/blockchain" } +sp-genesis-builder = { path = "../../substrate/primitives/genesis-builder", default-features = false } sp-block-builder = { path = "../../substrate/primitives/block-builder" } sp-keystore = { path = "../../substrate/primitives/keystore" } sc-chain-spec = { path = "../../substrate/client/chain-spec" } sc-rpc = { path = "../../substrate/client/rpc" } +sp-version = { path = "../../substrate/primitives/version" } sc-tracing = { path = "../../substrate/client/tracing" } sp-offchain = { path = "../../substrate/primitives/offchain" } +frame-system-rpc-runtime-api = { path = "../../substrate/frame/system/rpc/runtime-api" } +pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } +pallet-transaction-payment-rpc-runtime-api = { path = "../../substrate/frame/transaction-payment/rpc/runtime-api" } +sp-std = { path = "../../substrate/primitives/std" } +sp-inherents = { path = "../../substrate/primitives/inherents" } sp-api = { path = "../../substrate/primitives/api" } sp-consensus-aura = { path = "../../substrate/primitives/consensus/aura" } sc-sysinfo = { path = "../../substrate/client/sysinfo" } @@ -104,26 +114,25 @@ substrate-build-script-utils = { path = "../../substrate/utils/build-script-util assert_cmd = "2.0" nix = { version = "0.26.1", features = ["signal"] } tempfile = "3.8.0" -tokio = { version = "1.32.0", features = ["macros", "time", "parking_lot"] } +tokio = { version = "1.32.0", features = ["macros", "parking_lot", "time"] } wait-timeout = "0.2" [features] default = [] runtime-benchmarks = [ - "asset-hub-kusama-runtime/runtime-benchmarks", - "asset-hub-polkadot-runtime/runtime-benchmarks", "asset-hub-rococo-runtime/runtime-benchmarks", "asset-hub-westend-runtime/runtime-benchmarks", - "bridge-hub-kusama-runtime/runtime-benchmarks", - "bridge-hub-polkadot-runtime/runtime-benchmarks", "bridge-hub-rococo-runtime/runtime-benchmarks", "bridge-hub-westend-runtime/runtime-benchmarks", - "collectives-polkadot-runtime/runtime-benchmarks", + "collectives-westend-runtime/runtime-benchmarks", "contracts-rococo-runtime/runtime-benchmarks", + "coretime-rococo-runtime/runtime-benchmarks", + "coretime-westend-runtime/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", - "glutton-runtime/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "glutton-westend-runtime/runtime-benchmarks", "parachains-common/runtime-benchmarks", "penpal-runtime/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", @@ -134,17 +143,18 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", ] try-runtime = [ - "asset-hub-kusama-runtime/try-runtime", - "asset-hub-polkadot-runtime/try-runtime", "asset-hub-rococo-runtime/try-runtime", "asset-hub-westend-runtime/try-runtime", - "bridge-hub-kusama-runtime/try-runtime", - "bridge-hub-polkadot-runtime/try-runtime", "bridge-hub-rococo-runtime/try-runtime", "bridge-hub-westend-runtime/try-runtime", - "collectives-polkadot-runtime/try-runtime", + "collectives-westend-runtime/try-runtime", "contracts-rococo-runtime/try-runtime", - "glutton-runtime/try-runtime", + "coretime-rococo-runtime/try-runtime", + "coretime-westend-runtime/try-runtime", + "frame-support/try-runtime", + "frame-try-runtime/try-runtime", + "glutton-westend-runtime/try-runtime", + "pallet-transaction-payment/try-runtime", "penpal-runtime/try-runtime", "polkadot-cli/try-runtime", "polkadot-service/try-runtime", diff --git a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs index b4a73ff8aaa8a0a523db4cd18dd03aa7fc30db82..f889e05a1661a82e4e21aa94a52a16d474d9e86e 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs @@ -15,50 +15,20 @@ // along with Cumulus. If not, see . use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, + get_account_id_from_seed, get_collator_keys_from_seed, Extensions, GenericChainSpec, + SAFE_XCM_VERSION, }; use cumulus_primitives_core::ParaId; use hex_literal::hex; -use parachains_common::{AccountId, AssetHubPolkadotAuraId, AuraId, Balance as AssetHubBalance}; +use parachains_common::{AccountId, AuraId, Balance as AssetHubBalance}; use sc_service::ChainType; use sp_core::{crypto::UncheckedInto, sr25519}; -/// Specialized `ChainSpec` for the normal parachain runtime. -pub type AssetHubPolkadotChainSpec = - sc_service::GenericChainSpec; -pub type AssetHubKusamaChainSpec = - sc_service::GenericChainSpec; -pub type AssetHubWestendChainSpec = - sc_service::GenericChainSpec; -pub type AssetHubRococoChainSpec = - sc_service::GenericChainSpec; -pub type AssetHubWococoChainSpec = AssetHubRococoChainSpec; - -const ASSET_HUB_POLKADOT_ED: AssetHubBalance = - parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; -const ASSET_HUB_KUSAMA_ED: AssetHubBalance = - parachains_common::kusama::currency::EXISTENTIAL_DEPOSIT; const ASSET_HUB_WESTEND_ED: AssetHubBalance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; const ASSET_HUB_ROCOCO_ED: AssetHubBalance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; -/// Generate the session keys from individual elements. -/// -/// The input must be a tuple of individual keys (a single arg for now since we have just one key). -pub fn asset_hub_polkadot_session_keys( - keys: AssetHubPolkadotAuraId, -) -> asset_hub_polkadot_runtime::SessionKeys { - asset_hub_polkadot_runtime::SessionKeys { aura: keys } -} - -/// Generate the session keys from individual elements. -/// -/// The input must be a tuple of individual keys (a single arg for now since we have just one key). -pub fn asset_hub_kusama_session_keys(keys: AuraId) -> asset_hub_kusama_runtime::SessionKeys { - asset_hub_kusama_runtime::SessionKeys { aura: keys } -} - /// Generate the session keys from individual elements. /// /// The input must be a tuple of individual keys (a single arg for now since we have just one key). @@ -73,347 +43,12 @@ pub fn asset_hub_westend_session_keys(keys: AuraId) -> asset_hub_westend_runtime asset_hub_westend_runtime::SessionKeys { aura: keys } } -pub fn asset_hub_polkadot_development_config() -> AssetHubPolkadotChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 0.into()); - properties.insert("tokenSymbol".into(), "DOT".into()); - properties.insert("tokenDecimals".into(), 10.into()); - - AssetHubPolkadotChainSpec::builder( - asset_hub_polkadot_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "polkadot-dev".into(), para_id: 1000 }, - ) - .with_name("Polkadot Asset Hub Development") - .with_id("asset-hub-polkadot-dev") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(asset_hub_polkadot_genesis( - // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - 1000.into(), - )) - .with_properties(properties) - .build() -} - -pub fn asset_hub_polkadot_local_config() -> AssetHubPolkadotChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 0.into()); - properties.insert("tokenSymbol".into(), "DOT".into()); - properties.insert("tokenDecimals".into(), 10.into()); - - AssetHubPolkadotChainSpec::builder( - asset_hub_polkadot_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "polkadot-local".into(), para_id: 1000 }, - ) - .with_name("Polkadot Asset Hub Local") - .with_id("asset-hub-polkadot-local") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(asset_hub_polkadot_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - 1000.into(), - )) - .with_boot_nodes(Vec::new()) - .with_properties(properties) - .build() -} - -// Not used for syncing, but just to determine the genesis values set for the upgrade from shell. -pub fn asset_hub_polkadot_config() -> AssetHubPolkadotChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 0.into()); - properties.insert("tokenSymbol".into(), "DOT".into()); - properties.insert("tokenDecimals".into(), 10.into()); - - AssetHubPolkadotChainSpec::builder( - asset_hub_polkadot_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "polkadot".into(), para_id: 1000 }, - ) - .with_name("Polkadot Asset Hub") - .with_id("asset-hub-polkadot") - .with_chain_type(ChainType::Live) - .with_genesis_config_patch(asset_hub_polkadot_genesis( - // initial collators. - vec![ - ( - hex!("4c3d674d2a01060f0ded218e5dcc6f90c1726f43df79885eb3e22d97a20d5421").into(), - hex!("4c3d674d2a01060f0ded218e5dcc6f90c1726f43df79885eb3e22d97a20d5421") - .unchecked_into(), - ), - ( - hex!("c7d7d38d16bc23c6321152c50306212dc22c0efc04a2e52b5cccfc31ab3d7811").into(), - hex!("c7d7d38d16bc23c6321152c50306212dc22c0efc04a2e52b5cccfc31ab3d7811") - .unchecked_into(), - ), - ( - hex!("c5c07ba203d7375675f5c1ebe70f0a5bb729ae57b48bcc877fcc2ab21309b762").into(), - hex!("c5c07ba203d7375675f5c1ebe70f0a5bb729ae57b48bcc877fcc2ab21309b762") - .unchecked_into(), - ), - ( - hex!("0b2d0013fb974794bd7aa452465b567d48ef70373fe231a637c1fb7c547e85b3").into(), - hex!("0b2d0013fb974794bd7aa452465b567d48ef70373fe231a637c1fb7c547e85b3") - .unchecked_into(), - ), - ], - vec![], - 1000u32.into(), - )) - .with_boot_nodes(vec![ - "/ip4/34.65.251.121/tcp/30334/p2p/12D3KooWG3GrM6XKMM4gp3cvemdwUvu96ziYoJmqmetLZBXE8bSa" - .parse() - .unwrap(), - "/ip4/34.65.35.228/tcp/30334/p2p/12D3KooWMRyTLrCEPcAQD6c4EnudL3vVzg9zji3whvsMYPUYevpq" - .parse() - .unwrap(), - "/ip4/34.83.247.146/tcp/30334/p2p/12D3KooWE4jFh5FpJDkWVZhnWtFnbSqRhdjvC7Dp9b8b3FTuubQC" - .parse() - .unwrap(), - "/ip4/104.199.117.230/tcp/30334/p2p/12D3KooWG9R8pVXKumVo2rdkeVD4j5PVhRTqmYgLHY3a4yPYgLqM" - .parse() - .unwrap(), - ]) - .with_properties(properties) - .build() -} - -fn asset_hub_polkadot_genesis( - invulnerables: Vec<(AccountId, AssetHubPolkadotAuraId)>, - endowed_accounts: Vec, - id: ParaId, -) -> serde_json::Value { - serde_json::json!( { - "balances": { - "balances": endowed_accounts - .iter() - .cloned() - .map(|k| (k, ASSET_HUB_POLKADOT_ED * 4096)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": id, - }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": ASSET_HUB_POLKADOT_ED * 16, - }, - "session": { - "keys": invulnerables - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - asset_hub_polkadot_session_keys(aura), // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - } - }) -} - -pub fn asset_hub_kusama_development_config() -> AssetHubKusamaChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 2.into()); - properties.insert("tokenSymbol".into(), "KSM".into()); - properties.insert("tokenDecimals".into(), 12.into()); - - AssetHubKusamaChainSpec::builder( - asset_hub_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "kusama-dev".into(), para_id: 1000 }, - ) - .with_name("Kusama Asset Hub Development") - .with_id("asset-hub-kusama-dev") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(asset_hub_kusama_genesis( - // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - 1000.into(), - )) - .with_properties(properties) - .build() -} - -pub fn asset_hub_kusama_local_config() -> AssetHubKusamaChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 2.into()); - properties.insert("tokenSymbol".into(), "KSM".into()); - properties.insert("tokenDecimals".into(), 12.into()); - - AssetHubKusamaChainSpec::builder( - asset_hub_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "kusama-local".into(), para_id: 1000 }, - ) - .with_name("Kusama Asset Hub Local") - .with_id("asset-hub-kusama-local") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(asset_hub_kusama_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - 1000.into(), - )) - .with_properties(properties) - .build() -} - -pub fn asset_hub_kusama_config() -> AssetHubKusamaChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 2.into()); - properties.insert("tokenSymbol".into(), "KSM".into()); - properties.insert("tokenDecimals".into(), 12.into()); - - AssetHubKusamaChainSpec::builder( - asset_hub_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "kusama".into(), para_id: 1000 }, - ) - .with_name("Kusama Asset Hub") - .with_id("asset-hub-kusama") - .with_chain_type(ChainType::Live) - .with_genesis_config_patch(asset_hub_kusama_genesis( - // initial collators. - vec![ - ( - hex!("50673d59020488a4ffc9d8c6de3062a65977046e6990915617f85fef6d349730").into(), - hex!("50673d59020488a4ffc9d8c6de3062a65977046e6990915617f85fef6d349730") - .unchecked_into(), - ), - ( - hex!("fe8102dbc244e7ea2babd9f53236d67403b046154370da5c3ea99def0bd0747a").into(), - hex!("fe8102dbc244e7ea2babd9f53236d67403b046154370da5c3ea99def0bd0747a") - .unchecked_into(), - ), - ( - hex!("38144b5398e5d0da5ec936a3af23f5a96e782f676ab19d45f29075ee92eca76a").into(), - hex!("38144b5398e5d0da5ec936a3af23f5a96e782f676ab19d45f29075ee92eca76a") - .unchecked_into(), - ), - ( - hex!("3253947640e309120ae70fa458dcacb915e2ddd78f930f52bd3679ec63fc4415").into(), - hex!("3253947640e309120ae70fa458dcacb915e2ddd78f930f52bd3679ec63fc4415") - .unchecked_into(), - ), - ], - Vec::new(), - 1000.into(), - )) - .with_properties(properties) - .build() -} - -fn asset_hub_kusama_genesis( - invulnerables: Vec<(AccountId, AuraId)>, - endowed_accounts: Vec, - id: ParaId, -) -> serde_json::Value { - serde_json::json!( { - "balances": { - "balances": endowed_accounts - .iter() - .cloned() - .map(|k| (k, ASSET_HUB_KUSAMA_ED * 524_288)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": id, - }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": ASSET_HUB_KUSAMA_ED * 16, - }, - "session": { - "keys": invulnerables - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - asset_hub_kusama_session_keys(aura), // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - }, - }) -} - -pub fn asset_hub_westend_development_config() -> AssetHubWestendChainSpec { +pub fn asset_hub_westend_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "WND".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubWestendChainSpec::builder( + GenericChainSpec::builder( asset_hub_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend".into(), para_id: 1000 }, @@ -433,18 +68,19 @@ pub fn asset_hub_westend_development_config() -> AssetHubWestendChainSpec { get_account_id_from_seed::("Alice//stash"), get_account_id_from_seed::("Bob//stash"), ], + parachains_common::westend::currency::UNITS * 1_000_000, 1000.into(), )) .with_properties(properties) .build() } -pub fn asset_hub_westend_local_config() -> AssetHubWestendChainSpec { +pub fn asset_hub_westend_local_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "WND".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubWestendChainSpec::builder( + GenericChainSpec::builder( asset_hub_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend-local".into(), para_id: 1000 }, @@ -478,18 +114,19 @@ pub fn asset_hub_westend_local_config() -> AssetHubWestendChainSpec { get_account_id_from_seed::("Eve//stash"), get_account_id_from_seed::("Ferdie//stash"), ], + parachains_common::westend::currency::UNITS * 1_000_000, 1000.into(), )) .with_properties(properties) .build() } -pub fn asset_hub_westend_config() -> AssetHubWestendChainSpec { +pub fn asset_hub_westend_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "WND".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubWestendChainSpec::builder( + GenericChainSpec::builder( asset_hub_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend".into(), para_id: 1000 }, @@ -522,6 +159,7 @@ pub fn asset_hub_westend_config() -> AssetHubWestendChainSpec { ), ], Vec::new(), + ASSET_HUB_WESTEND_ED * 4096, 1000.into(), )) .with_properties(properties) @@ -531,6 +169,7 @@ pub fn asset_hub_westend_config() -> AssetHubWestendChainSpec { fn asset_hub_westend_genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, + endowment: AssetHubBalance, id: ParaId, ) -> serde_json::Value { serde_json::json!({ @@ -538,7 +177,7 @@ fn asset_hub_westend_genesis( "balances": endowed_accounts .iter() .cloned() - .map(|k| (k, ASSET_HUB_WESTEND_ED * 4096)) + .map(|k| (k, endowment)) .collect::>(), }, "parachainInfo": { @@ -566,7 +205,7 @@ fn asset_hub_westend_genesis( }) } -pub fn asset_hub_rococo_development_config() -> AssetHubRococoChainSpec { +pub fn asset_hub_rococo_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenSymbol".into(), "ROC".into()); @@ -579,26 +218,13 @@ pub fn asset_hub_rococo_development_config() -> AssetHubRococoChainSpec { ) } -pub fn asset_hub_wococo_development_config() -> AssetHubWococoChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 42.into()); - properties.insert("tokenSymbol".into(), "WOC".into()); - properties.insert("tokenDecimals".into(), 12.into()); - asset_hub_rococo_like_development_config( - properties, - "Wococo Asset Hub Development", - "asset-hub-wococo-dev", - 1000, - ) -} - fn asset_hub_rococo_like_development_config( properties: sc_chain_spec::Properties, name: &str, chain_id: &str, para_id: u32, -) -> AssetHubRococoChainSpec { - AssetHubRococoChainSpec::builder( +) -> GenericChainSpec { + GenericChainSpec::builder( asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-dev".into(), para_id }, ) @@ -617,13 +243,14 @@ fn asset_hub_rococo_like_development_config( get_account_id_from_seed::("Alice//stash"), get_account_id_from_seed::("Bob//stash"), ], + parachains_common::rococo::currency::UNITS * 1_000_000, para_id.into(), )) .with_properties(properties) .build() } -pub fn asset_hub_rococo_local_config() -> AssetHubRococoChainSpec { +pub fn asset_hub_rococo_local_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenSymbol".into(), "ROC".into()); @@ -636,26 +263,13 @@ pub fn asset_hub_rococo_local_config() -> AssetHubRococoChainSpec { ) } -pub fn asset_hub_wococo_local_config() -> AssetHubWococoChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 42.into()); - properties.insert("tokenSymbol".into(), "WOC".into()); - properties.insert("tokenDecimals".into(), 12.into()); - asset_hub_rococo_like_local_config( - properties, - "Wococo Asset Hub Local", - "asset-hub-wococo-local", - 1000, - ) -} - fn asset_hub_rococo_like_local_config( properties: sc_chain_spec::Properties, name: &str, chain_id: &str, para_id: u32, -) -> AssetHubRococoChainSpec { - AssetHubRococoChainSpec::builder( +) -> GenericChainSpec { + GenericChainSpec::builder( asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), para_id }, ) @@ -688,18 +302,19 @@ fn asset_hub_rococo_like_local_config( get_account_id_from_seed::("Eve//stash"), get_account_id_from_seed::("Ferdie//stash"), ], + parachains_common::rococo::currency::UNITS * 1_000_000, para_id.into(), )) .with_properties(properties) .build() } -pub fn asset_hub_rococo_genesis_config() -> AssetHubRococoChainSpec { +pub fn asset_hub_rococo_genesis_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "ROC".into()); properties.insert("tokenDecimals".into(), 12.into()); let para_id = 1000; - AssetHubRococoChainSpec::builder( + GenericChainSpec::builder( asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo".into(), para_id }, ) @@ -735,54 +350,7 @@ pub fn asset_hub_rococo_genesis_config() -> AssetHubRococoChainSpec { ), ], Vec::new(), - para_id.into(), - )) - .with_properties(properties) - .build() -} - -pub fn asset_hub_wococo_genesis_config() -> AssetHubWococoChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 42.into()); - properties.insert("tokenSymbol".into(), "WOC".into()); - properties.insert("tokenDecimals".into(), 12.into()); - let para_id = 1000; - AssetHubRococoChainSpec::builder( - asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "wococo".into(), para_id }, - ) - .with_name("Wococo Asset Hub") - .with_id("asset-hub-wococo") - .with_chain_type(ChainType::Live) - .with_genesis_config_patch(asset_hub_rococo_genesis( - // initial collators. - vec![ - // 5C8RGkS8t5K93fB2hkgKbvSYs5iG6AknJMuQmbBDeazon9Lj - ( - hex!("02d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534").into(), - hex!("02d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534") - .unchecked_into(), - ), - // 5GePeDZQeBagXH7kH5QPKnQKi39Z5hoYFB5FmUtEvc4yxKej - ( - hex!("caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d631814").into(), - hex!("caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d631814") - .unchecked_into(), - ), - // 5CfnTTb9NMJDNKDntA83mHKoedZ7wjDC8ypLCTDd4NwUx3zv - ( - hex!("1ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f965").into(), - hex!("1ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f965") - .unchecked_into(), - ), - // 5EqheiwiG22gvGpN7cvrbeaQzhg7rzsYYVkYK4yj5vRrTQRQ - ( - hex!("7ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66").into(), - hex!("7ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66") - .unchecked_into(), - ), - ], - Vec::new(), + ASSET_HUB_ROCOCO_ED * 524_288, para_id.into(), )) .with_properties(properties) @@ -792,6 +360,7 @@ pub fn asset_hub_wococo_genesis_config() -> AssetHubWococoChainSpec { fn asset_hub_rococo_genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, + endowment: AssetHubBalance, id: ParaId, ) -> serde_json::Value { serde_json::json!({ @@ -799,7 +368,7 @@ fn asset_hub_rococo_genesis( balances: endowed_accounts .iter() .cloned() - .map(|k| (k, ASSET_HUB_ROCOCO_ED * 524_288)) + .map(|k| (k, endowment)) .collect(), }, "parachainInfo": asset_hub_rococo_runtime::ParachainInfoConfig { diff --git a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs index 71fb9e5b1404bca0c155b13242e2b67e251ab229..1f43edf2243c0436bb9564c1724a1860ce404624 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs @@ -14,34 +14,24 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::chain_spec::{get_account_id_from_seed, get_collator_keys_from_seed}; +use crate::chain_spec::{get_account_id_from_seed, get_collator_keys_from_seed, GenericChainSpec}; use cumulus_primitives_core::ParaId; use parachains_common::Balance as BridgeHubBalance; use sc_chain_spec::ChainSpec; use sp_core::sr25519; -use std::{path::PathBuf, str::FromStr}; +use std::str::FromStr; /// Collects all supported BridgeHub configurations #[derive(Debug, PartialEq)] pub enum BridgeHubRuntimeType { + Kusama, + Polkadot, + Rococo, RococoLocal, // used by benchmarks RococoDevelopment, - Wococo, - WococoLocal, - - Kusama, - KusamaLocal, - // used by benchmarks - KusamaDevelopment, - - Polkadot, - PolkadotLocal, - // used by benchmarks - PolkadotDevelopment, - Westend, WestendLocal, // used by benchmarks @@ -54,20 +44,13 @@ impl FromStr for BridgeHubRuntimeType { fn from_str(value: &str) -> Result { match value { polkadot::BRIDGE_HUB_POLKADOT => Ok(BridgeHubRuntimeType::Polkadot), - polkadot::BRIDGE_HUB_POLKADOT_LOCAL => Ok(BridgeHubRuntimeType::PolkadotLocal), - polkadot::BRIDGE_HUB_POLKADOT_DEVELOPMENT => - Ok(BridgeHubRuntimeType::PolkadotDevelopment), kusama::BRIDGE_HUB_KUSAMA => Ok(BridgeHubRuntimeType::Kusama), - kusama::BRIDGE_HUB_KUSAMA_LOCAL => Ok(BridgeHubRuntimeType::KusamaLocal), - kusama::BRIDGE_HUB_KUSAMA_DEVELOPMENT => Ok(BridgeHubRuntimeType::KusamaDevelopment), westend::BRIDGE_HUB_WESTEND => Ok(BridgeHubRuntimeType::Westend), westend::BRIDGE_HUB_WESTEND_LOCAL => Ok(BridgeHubRuntimeType::WestendLocal), westend::BRIDGE_HUB_WESTEND_DEVELOPMENT => Ok(BridgeHubRuntimeType::WestendDevelopment), rococo::BRIDGE_HUB_ROCOCO => Ok(BridgeHubRuntimeType::Rococo), rococo::BRIDGE_HUB_ROCOCO_LOCAL => Ok(BridgeHubRuntimeType::RococoLocal), rococo::BRIDGE_HUB_ROCOCO_DEVELOPMENT => Ok(BridgeHubRuntimeType::RococoDevelopment), - wococo::BRIDGE_HUB_WOCOCO => Ok(BridgeHubRuntimeType::Wococo), - wococo::BRIDGE_HUB_WOCOCO_LOCAL => Ok(BridgeHubRuntimeType::WococoLocal), _ => Err(format!("Value '{}' is not configured yet", value)), } } @@ -76,67 +59,17 @@ impl FromStr for BridgeHubRuntimeType { impl BridgeHubRuntimeType { pub const ID_PREFIX: &'static str = "bridge-hub"; - pub fn chain_spec_from_json_file(&self, path: PathBuf) -> Result, String> { - match self { - BridgeHubRuntimeType::Polkadot | - BridgeHubRuntimeType::PolkadotLocal | - BridgeHubRuntimeType::PolkadotDevelopment => - Ok(Box::new(polkadot::BridgeHubChainSpec::from_json_file(path)?)), - BridgeHubRuntimeType::Kusama | - BridgeHubRuntimeType::KusamaLocal | - BridgeHubRuntimeType::KusamaDevelopment => - Ok(Box::new(kusama::BridgeHubChainSpec::from_json_file(path)?)), - BridgeHubRuntimeType::Westend | - BridgeHubRuntimeType::WestendLocal | - BridgeHubRuntimeType::WestendDevelopment => - Ok(Box::new(westend::BridgeHubChainSpec::from_json_file(path)?)), - BridgeHubRuntimeType::Rococo | - BridgeHubRuntimeType::RococoLocal | - BridgeHubRuntimeType::RococoDevelopment => - Ok(Box::new(rococo::BridgeHubChainSpec::from_json_file(path)?)), - BridgeHubRuntimeType::Wococo | BridgeHubRuntimeType::WococoLocal => - Ok(Box::new(wococo::BridgeHubChainSpec::from_json_file(path)?)), - } - } - pub fn load_config(&self) -> Result, String> { match self { - BridgeHubRuntimeType::Polkadot => - Ok(Box::new(polkadot::BridgeHubChainSpec::from_json_bytes( - &include_bytes!("../../chain-specs/bridge-hub-polkadot.json")[..], - )?)), - BridgeHubRuntimeType::PolkadotLocal => Ok(Box::new(polkadot::local_config( - polkadot::BRIDGE_HUB_POLKADOT_LOCAL, - "Polkadot BridgeHub Local", - "polkadot-local", - ParaId::new(1002), - ))), - BridgeHubRuntimeType::PolkadotDevelopment => Ok(Box::new(polkadot::local_config( - polkadot::BRIDGE_HUB_POLKADOT_DEVELOPMENT, - "Polkadot BridgeHub Development", - "polkadot-dev", - ParaId::new(1002), - ))), - BridgeHubRuntimeType::Kusama => - Ok(Box::new(kusama::BridgeHubChainSpec::from_json_bytes( - &include_bytes!("../../chain-specs/bridge-hub-kusama.json")[..], - )?)), - BridgeHubRuntimeType::KusamaLocal => Ok(Box::new(kusama::local_config( - kusama::BRIDGE_HUB_KUSAMA_LOCAL, - "Kusama BridgeHub Local", - "kusama-local", - ParaId::new(1003), - ))), - BridgeHubRuntimeType::KusamaDevelopment => Ok(Box::new(kusama::local_config( - kusama::BRIDGE_HUB_KUSAMA_DEVELOPMENT, - "Kusama BridgeHub Development", - "kusama-dev", - ParaId::new(1003), - ))), - BridgeHubRuntimeType::Westend => - Ok(Box::new(westend::BridgeHubChainSpec::from_json_bytes( - &include_bytes!("../../chain-specs/bridge-hub-westend.json")[..], - )?)), + BridgeHubRuntimeType::Polkadot => Ok(Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../../chain-specs/bridge-hub-polkadot.json")[..], + )?)), + BridgeHubRuntimeType::Kusama => Ok(Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../../chain-specs/bridge-hub-kusama.json")[..], + )?)), + BridgeHubRuntimeType::Westend => Ok(Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../../chain-specs/bridge-hub-westend.json")[..], + )?)), BridgeHubRuntimeType::WestendLocal => Ok(Box::new(westend::local_config( westend::BRIDGE_HUB_WESTEND_LOCAL, "Westend BridgeHub Local", @@ -151,10 +84,9 @@ impl BridgeHubRuntimeType { ParaId::new(1002), Some("Bob".to_string()), ))), - BridgeHubRuntimeType::Rococo => - Ok(Box::new(rococo::BridgeHubChainSpec::from_json_bytes( - &include_bytes!("../../chain-specs/bridge-hub-rococo.json")[..], - )?)), + BridgeHubRuntimeType::Rococo => Ok(Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../../chain-specs/bridge-hub-rococo.json")[..], + )?)), BridgeHubRuntimeType::RococoLocal => Ok(Box::new(rococo::local_config( rococo::BRIDGE_HUB_ROCOCO_LOCAL, "Rococo BridgeHub Local", @@ -171,17 +103,6 @@ impl BridgeHubRuntimeType { Some("Bob".to_string()), |_| (), ))), - BridgeHubRuntimeType::Wococo => - Ok(Box::new(wococo::BridgeHubChainSpec::from_json_bytes( - &include_bytes!("../../chain-specs/bridge-hub-wococo.json")[..], - )?)), - BridgeHubRuntimeType::WococoLocal => Ok(Box::new(wococo::local_config( - wococo::BRIDGE_HUB_WOCOCO_LOCAL, - "Wococo BridgeHub Local", - "wococo-local", - ParaId::new(1014), - Some("Bob".to_string()), - ))), } } } @@ -202,7 +123,7 @@ fn ensure_id(id: &str) -> Result<&str, String> { /// Sub-module for Rococo setup pub mod rococo { use super::{get_account_id_from_seed, get_collator_keys_from_seed, sr25519, ParaId}; - use crate::chain_spec::{Extensions, SAFE_XCM_VERSION}; + use crate::chain_spec::{Extensions, GenericChainSpec, SAFE_XCM_VERSION}; use parachains_common::{AccountId, AuraId}; use sc_chain_spec::ChainType; @@ -214,11 +135,6 @@ pub mod rococo { const BRIDGE_HUB_ROCOCO_ED: BridgeHubBalance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; - /// Specialized `ChainSpec` for the normal parachain runtime. - pub type BridgeHubChainSpec = sc_service::GenericChainSpec<(), Extensions>; - - pub type RuntimeApi = bridge_hub_rococo_runtime::RuntimeApi; - pub fn local_config( id: &str, chain_name: &str, @@ -226,7 +142,7 @@ pub mod rococo { para_id: ParaId, bridges_pallet_owner_seed: Option, modify_props: ModifyProperties, - ) -> BridgeHubChainSpec { + ) -> GenericChainSpec { // Rococo defaults let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 42.into()); @@ -234,7 +150,7 @@ pub mod rococo { properties.insert("tokenDecimals".into(), 12.into()); modify_props(&mut properties); - BridgeHubChainSpec::builder( + GenericChainSpec::builder( bridge_hub_rococo_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, @@ -309,174 +225,29 @@ pub mod rococo { "polkadotXcm": { "safeXcmVersion": Some(SAFE_XCM_VERSION), }, - - "bridgeWococoGrandpa": { - "owner": bridges_pallet_owner.clone(), - }, "bridgeWestendGrandpa": { "owner": bridges_pallet_owner.clone(), }, - "bridgeRococoGrandpa": { - "owner": bridges_pallet_owner.clone(), - }, - "bridgeRococoMessages": { - "owner": bridges_pallet_owner.clone(), - }, - "bridgeWococoMessages": { - "owner": bridges_pallet_owner.clone(), - }, "bridgeWestendMessages": { "owner": bridges_pallet_owner.clone(), }, + "ethereumSystem": { + "paraId": id, + "assetHubParaId": 1000 + } }) } } -/// Sub-module for Wococo setup (reuses stuff from Rococo) -pub mod wococo { - use super::ParaId; - use crate::chain_spec::bridge_hubs::rococo; - - pub(crate) const BRIDGE_HUB_WOCOCO: &str = "bridge-hub-wococo"; - pub(crate) const BRIDGE_HUB_WOCOCO_LOCAL: &str = "bridge-hub-wococo-local"; - - pub type BridgeHubChainSpec = rococo::BridgeHubChainSpec; - pub type RuntimeApi = rococo::RuntimeApi; - - pub fn local_config( - id: &str, - chain_name: &str, - relay_chain: &str, - para_id: ParaId, - bridges_pallet_owner_seed: Option, - ) -> BridgeHubChainSpec { - rococo::local_config( - id, - chain_name, - relay_chain, - para_id, - bridges_pallet_owner_seed, - |properties| { - properties.insert("tokenSymbol".into(), "WOOK".into()); - }, - ) - } -} - /// Sub-module for Kusama setup pub mod kusama { - use super::{BridgeHubBalance, ParaId}; - use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, - }; - use parachains_common::{AccountId, AuraId}; - use sc_chain_spec::ChainType; - use sp_core::sr25519; - pub(crate) const BRIDGE_HUB_KUSAMA: &str = "bridge-hub-kusama"; - pub(crate) const BRIDGE_HUB_KUSAMA_LOCAL: &str = "bridge-hub-kusama-local"; - pub(crate) const BRIDGE_HUB_KUSAMA_DEVELOPMENT: &str = "bridge-hub-kusama-dev"; - const BRIDGE_HUB_KUSAMA_ED: BridgeHubBalance = - parachains_common::kusama::currency::EXISTENTIAL_DEPOSIT; - - /// Specialized `ChainSpec` for the normal parachain runtime. - pub type BridgeHubChainSpec = sc_service::GenericChainSpec<(), Extensions>; - pub type RuntimeApi = bridge_hub_kusama_runtime::RuntimeApi; - - pub fn local_config( - id: &str, - chain_name: &str, - relay_chain: &str, - para_id: ParaId, - ) -> BridgeHubChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 2.into()); - properties.insert("tokenSymbol".into(), "KSM".into()); - properties.insert("tokenDecimals".into(), 12.into()); - - BridgeHubChainSpec::builder( - bridge_hub_kusama_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, - ) - .with_name(chain_name) - .with_id(super::ensure_id(id).expect("invalid id")) - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - para_id, - )) - .with_properties(properties) - .build() - } - - fn genesis( - invulnerables: Vec<(AccountId, AuraId)>, - endowed_accounts: Vec, - id: ParaId, - ) -> serde_json::Value { - serde_json::json!({ - "balances": { - "balances": endowed_accounts - .iter() - .cloned() - .map(|k| (k, BRIDGE_HUB_KUSAMA_ED * 524_288)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": id, - }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": BRIDGE_HUB_KUSAMA_ED * 16, - }, - "session": { - "keys": invulnerables - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - bridge_hub_kusama_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - } - }) - } } /// Sub-module for Westend setup. pub mod westend { use super::{get_account_id_from_seed, get_collator_keys_from_seed, sr25519, ParaId}; - use crate::chain_spec::{Extensions, SAFE_XCM_VERSION}; + use crate::chain_spec::{Extensions, GenericChainSpec, SAFE_XCM_VERSION}; use parachains_common::{AccountId, AuraId}; use sc_chain_spec::ChainType; @@ -488,23 +259,18 @@ pub mod westend { const BRIDGE_HUB_WESTEND_ED: BridgeHubBalance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; - /// Specialized `ChainSpec` for the normal parachain runtime. - pub type BridgeHubChainSpec = - sc_service::GenericChainSpec; - pub type RuntimeApi = bridge_hub_westend_runtime::RuntimeApi; - pub fn local_config( id: &str, chain_name: &str, relay_chain: &str, para_id: ParaId, bridges_pallet_owner_seed: Option, - ) -> BridgeHubChainSpec { + ) -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "WND".into()); properties.insert("tokenDecimals".into(), 12.into()); - BridgeHubChainSpec::builder( + GenericChainSpec::builder( bridge_hub_westend_runtime::WASM_BINARY .expect("WASM binary was not build, please build it!"), Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, @@ -591,110 +357,5 @@ pub mod westend { /// Sub-module for Polkadot setup pub mod polkadot { - use super::{BridgeHubBalance, ParaId}; - use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, - }; - use parachains_common::{AccountId, AuraId}; - use sc_chain_spec::ChainType; - use sp_core::sr25519; - pub(crate) const BRIDGE_HUB_POLKADOT: &str = "bridge-hub-polkadot"; - pub(crate) const BRIDGE_HUB_POLKADOT_LOCAL: &str = "bridge-hub-polkadot-local"; - pub(crate) const BRIDGE_HUB_POLKADOT_DEVELOPMENT: &str = "bridge-hub-polkadot-dev"; - const BRIDGE_HUB_POLKADOT_ED: BridgeHubBalance = - parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; - - /// Specialized `ChainSpec` for the normal parachain runtime. - pub type BridgeHubChainSpec = sc_service::GenericChainSpec<(), Extensions>; - pub type RuntimeApi = bridge_hub_polkadot_runtime::RuntimeApi; - - pub fn local_config( - id: &str, - chain_name: &str, - relay_chain: &str, - para_id: ParaId, - ) -> BridgeHubChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 0.into()); - properties.insert("tokenSymbol".into(), "DOT".into()); - properties.insert("tokenDecimals".into(), 10.into()); - - BridgeHubChainSpec::builder( - bridge_hub_polkadot_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, - ) - .with_name(chain_name) - .with_id(super::ensure_id(id).expect("invalid id")) - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - para_id, - )) - .with_properties(properties) - .build() - } - - fn genesis( - invulnerables: Vec<(AccountId, AuraId)>, - endowed_accounts: Vec, - id: ParaId, - ) -> serde_json::Value { - serde_json::json!({ - "balances": { - "balances": endowed_accounts - .iter() - .cloned() - .map(|k| (k, BRIDGE_HUB_POLKADOT_ED * 4096)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": id, - }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": BRIDGE_HUB_POLKADOT_ED * 16, - }, - "session": { - "keys": invulnerables - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - bridge_hub_polkadot_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - } - }) - } } diff --git a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs index 0a8064f50ca589a2bccea79ef95fc913ce54c3f2..dd67bf975f773e8933430c36229334a45de8454b 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs @@ -15,42 +15,39 @@ // along with Cumulus. If not, see . use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, + get_account_id_from_seed, get_collator_keys_from_seed, Extensions, GenericChainSpec, + SAFE_XCM_VERSION, }; use cumulus_primitives_core::ParaId; use parachains_common::{AccountId, AuraId, Balance as CollectivesBalance}; use sc_service::ChainType; use sp_core::sr25519; -pub type CollectivesPolkadotChainSpec = sc_service::GenericChainSpec<(), Extensions>; - -const COLLECTIVES_POLKADOT_ED: CollectivesBalance = - parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; +const COLLECTIVES_WESTEND_ED: CollectivesBalance = + parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; /// Generate the session keys from individual elements. /// /// The input must be a tuple of individual keys (a single arg for now since we have just one key). -pub fn collectives_polkadot_session_keys( - keys: AuraId, -) -> collectives_polkadot_runtime::SessionKeys { - collectives_polkadot_runtime::SessionKeys { aura: keys } +pub fn collectives_westend_session_keys(keys: AuraId) -> collectives_westend_runtime::SessionKeys { + collectives_westend_runtime::SessionKeys { aura: keys } } -pub fn collectives_polkadot_development_config() -> CollectivesPolkadotChainSpec { +pub fn collectives_westend_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 0.into()); - properties.insert("tokenSymbol".into(), "DOT".into()); - properties.insert("tokenDecimals".into(), 10.into()); + properties.insert("ss58Format".into(), 42.into()); + properties.insert("tokenSymbol".into(), "WND".into()); + properties.insert("tokenDecimals".into(), 12.into()); - CollectivesPolkadotChainSpec::builder( - collectives_polkadot_runtime::WASM_BINARY + GenericChainSpec::builder( + collectives_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "polkadot-dev".into(), para_id: 1002 }, + Extensions { relay_chain: "westend-dev".into(), para_id: 1002 }, ) - .with_name("Polkadot Collectives Development") - .with_id("collectives_polkadot_dev") + .with_name("Westend Collectives Development") + .with_id("collectives_westend_dev") .with_chain_type(ChainType::Local) - .with_genesis_config_patch(collectives_polkadot_genesis( + .with_genesis_config_patch(collectives_westend_genesis( // initial collators. vec![( get_account_id_from_seed::("Alice"), @@ -71,22 +68,22 @@ pub fn collectives_polkadot_development_config() -> CollectivesPolkadotChainSpec .build() } -/// Collectives Polkadot Local Config. -pub fn collectives_polkadot_local_config() -> CollectivesPolkadotChainSpec { +/// Collectives Westend Local Config. +pub fn collectives_westend_local_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 0.into()); - properties.insert("tokenSymbol".into(), "DOT".into()); - properties.insert("tokenDecimals".into(), 10.into()); + properties.insert("ss58Format".into(), 42.into()); + properties.insert("tokenSymbol".into(), "WND".into()); + properties.insert("tokenDecimals".into(), 12.into()); - CollectivesPolkadotChainSpec::builder( - collectives_polkadot_runtime::WASM_BINARY + GenericChainSpec::builder( + collectives_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "polkadot-local".into(), para_id: 1002 }, + Extensions { relay_chain: "westend-local".into(), para_id: 1002 }, ) - .with_name("Polkadot Collectives Local") - .with_id("collectives_polkadot_local") + .with_name("Westend Collectives Local") + .with_id("collectives_westend_local") .with_chain_type(ChainType::Local) - .with_genesis_config_patch(collectives_polkadot_genesis( + .with_genesis_config_patch(collectives_westend_genesis( // initial collators. vec![ ( @@ -119,7 +116,7 @@ pub fn collectives_polkadot_local_config() -> CollectivesPolkadotChainSpec { .build() } -fn collectives_polkadot_genesis( +fn collectives_westend_genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, id: ParaId, @@ -129,7 +126,7 @@ fn collectives_polkadot_genesis( "balances": endowed_accounts .iter() .cloned() - .map(|k| (k, COLLECTIVES_POLKADOT_ED * 4096)) + .map(|k| (k, COLLECTIVES_WESTEND_ED * 4096)) .collect::>(), }, "parachainInfo": { @@ -137,7 +134,7 @@ fn collectives_polkadot_genesis( }, "collatorSelection": { "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": COLLECTIVES_POLKADOT_ED * 16, + "candidacyBond": COLLECTIVES_WESTEND_ED * 16, }, "session": { "keys": invulnerables @@ -146,7 +143,7 @@ fn collectives_polkadot_genesis( ( acc.clone(), // account id acc, // validator id - collectives_polkadot_session_keys(aura), // session keys + collectives_westend_session_keys(aura), // session keys ) }) .collect::>(), diff --git a/cumulus/polkadot-parachain/src/chain_spec/contracts.rs b/cumulus/polkadot-parachain/src/chain_spec/contracts.rs index 7ca66354fbfde220b550d033dda39d2be6fa4836..87ac1ed2fa18970fbe54cc6bd70dcf0305e98620 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/contracts.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/contracts.rs @@ -15,7 +15,8 @@ // along with Cumulus. If not, see . use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, + get_account_id_from_seed, get_collator_keys_from_seed, Extensions, GenericChainSpec, + SAFE_XCM_VERSION, }; use cumulus_primitives_core::ParaId; use hex_literal::hex; @@ -23,8 +24,6 @@ use parachains_common::{AccountId, AuraId}; use sc_service::ChainType; use sp_core::{crypto::UncheckedInto, sr25519}; -pub type ContractsRococoChainSpec = sc_service::GenericChainSpec<(), Extensions>; - /// No relay chain suffix because the id is the same over all relay chains. const CONTRACTS_PARACHAIN_ID: u32 = 1002; @@ -32,12 +31,12 @@ const CONTRACTS_PARACHAIN_ID: u32 = 1002; const CONTRACTS_ROCOCO_ED: contracts_rococo_runtime::Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; -pub fn contracts_rococo_development_config() -> ContractsRococoChainSpec { +pub fn contracts_rococo_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "ROC".into()); properties.insert("tokenDecimals".into(), 12.into()); - ContractsRococoChainSpec::builder( + GenericChainSpec::builder( contracts_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), // You MUST set this to the correct network! @@ -79,12 +78,12 @@ pub fn contracts_rococo_development_config() -> ContractsRococoChainSpec { .build() } -pub fn contracts_rococo_local_config() -> ContractsRococoChainSpec { +pub fn contracts_rococo_local_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "ROC".into()); properties.insert("tokenDecimals".into(), 12.into()); - ContractsRococoChainSpec::builder( + GenericChainSpec::builder( contracts_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), // You MUST set this to the correct network! @@ -126,13 +125,13 @@ pub fn contracts_rococo_local_config() -> ContractsRococoChainSpec { .build() } -pub fn contracts_rococo_config() -> ContractsRococoChainSpec { +pub fn contracts_rococo_config() -> GenericChainSpec { // Give your base currency a unit name and decimal places let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "ROC".into()); properties.insert("tokenDecimals".into(), 12.into()); - ContractsRococoChainSpec::builder( + GenericChainSpec::builder( contracts_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo".into(), para_id: CONTRACTS_PARACHAIN_ID } ) diff --git a/cumulus/polkadot-parachain/src/chain_spec/coretime.rs b/cumulus/polkadot-parachain/src/chain_spec/coretime.rs new file mode 100644 index 0000000000000000000000000000000000000000..958336c03b5694876d48a75b2bd961200667e25f --- /dev/null +++ b/cumulus/polkadot-parachain/src/chain_spec/coretime.rs @@ -0,0 +1,282 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use crate::chain_spec::GenericChainSpec; +use cumulus_primitives_core::ParaId; +use sc_chain_spec::{ChainSpec, ChainType}; +use std::{borrow::Cow, str::FromStr}; + +/// Collects all supported Coretime configurations. +#[derive(Debug, PartialEq, Clone)] +pub enum CoretimeRuntimeType { + // Live + Rococo, + // Local + RococoLocal, + // Benchmarks + RococoDevelopment, + + // Local + WestendLocal, + // Benchmarks + WestendDevelopment, +} + +impl FromStr for CoretimeRuntimeType { + type Err = String; + + fn from_str(value: &str) -> Result { + match value { + rococo::CORETIME_ROCOCO => Ok(CoretimeRuntimeType::Rococo), + rococo::CORETIME_ROCOCO_LOCAL => Ok(CoretimeRuntimeType::RococoLocal), + rococo::CORETIME_ROCOCO_DEVELOPMENT => Ok(CoretimeRuntimeType::RococoDevelopment), + westend::CORETIME_WESTEND_LOCAL => Ok(CoretimeRuntimeType::WestendLocal), + westend::CORETIME_WESTEND_DEVELOPMENT => Ok(CoretimeRuntimeType::WestendDevelopment), + _ => Err(format!("Value '{}' is not configured yet", value)), + } + } +} + +impl From for &str { + fn from(runtime_type: CoretimeRuntimeType) -> Self { + match runtime_type { + CoretimeRuntimeType::Rococo => rococo::CORETIME_ROCOCO, + CoretimeRuntimeType::RococoLocal => rococo::CORETIME_ROCOCO_LOCAL, + CoretimeRuntimeType::RococoDevelopment => rococo::CORETIME_ROCOCO_DEVELOPMENT, + CoretimeRuntimeType::WestendLocal => westend::CORETIME_WESTEND_LOCAL, + CoretimeRuntimeType::WestendDevelopment => westend::CORETIME_WESTEND_DEVELOPMENT, + } + } +} + +impl From for ChainType { + fn from(runtime_type: CoretimeRuntimeType) -> Self { + match runtime_type { + CoretimeRuntimeType::Rococo => ChainType::Live, + CoretimeRuntimeType::RococoLocal | CoretimeRuntimeType::WestendLocal => + ChainType::Local, + CoretimeRuntimeType::RococoDevelopment | CoretimeRuntimeType::WestendDevelopment => + ChainType::Development, + } + } +} + +pub const CORETIME_PARA_ID: ParaId = ParaId::new(1005); + +impl CoretimeRuntimeType { + pub const ID_PREFIX: &'static str = "coretime"; + + pub fn load_config(&self) -> Result, String> { + match self { + CoretimeRuntimeType::Rococo => Ok(Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../../../parachains/chain-specs/coretime-rococo.json")[..], + )?)), + CoretimeRuntimeType::RococoLocal => + Ok(Box::new(rococo::local_config(self, "rococo-local"))), + CoretimeRuntimeType::RococoDevelopment => + Ok(Box::new(rococo::local_config(self, "rococo-dev"))), + CoretimeRuntimeType::WestendLocal => + Ok(Box::new(westend::local_config(self, "westend-local"))), + CoretimeRuntimeType::WestendDevelopment => + Ok(Box::new(westend::local_config(self, "westend-dev"))), + } + } +} + +/// Generate the name directly from the ChainType +pub fn chain_type_name(chain_type: &ChainType) -> Cow { + match chain_type { + ChainType::Development => "Development", + ChainType::Local => "Local", + ChainType::Live => "Live", + ChainType::Custom(name) => name, + } + .into() +} + +/// Sub-module for Rococo setup. +pub mod rococo { + use super::{chain_type_name, CoretimeRuntimeType, GenericChainSpec, ParaId}; + use crate::chain_spec::{ + get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, + }; + use parachains_common::{AccountId, AuraId, Balance}; + use sp_core::sr25519; + + pub(crate) const CORETIME_ROCOCO: &str = "coretime-rococo"; + pub(crate) const CORETIME_ROCOCO_LOCAL: &str = "coretime-rococo-local"; + pub(crate) const CORETIME_ROCOCO_DEVELOPMENT: &str = "coretime-rococo-dev"; + const CORETIME_ROCOCO_ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; + + pub fn local_config(runtime_type: &CoretimeRuntimeType, relay_chain: &str) -> GenericChainSpec { + // Rococo defaults + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("ss58Format".into(), 42.into()); + properties.insert("tokenSymbol".into(), "ROC".into()); + properties.insert("tokenDecimals".into(), 12.into()); + + let chain_type = runtime_type.clone().into(); + let chain_name = format!("Coretime Rococo {}", chain_type_name(&chain_type)); + let para_id = super::CORETIME_PARA_ID; + + GenericChainSpec::builder( + coretime_rococo_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, + ) + .with_name(&chain_name) + .with_id(runtime_type.clone().into()) + .with_chain_type(chain_type) + .with_genesis_config_patch(genesis( + // initial collators. + vec![( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + )], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + para_id, + )) + .with_properties(properties) + .build() + } + + fn genesis( + invulnerables: Vec<(AccountId, AuraId)>, + endowed_accounts: Vec, + id: ParaId, + ) -> serde_json::Value { + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().cloned().map(|k| (k, CORETIME_ROCOCO_ED * 4096)).collect::>(), + }, + "parachainInfo": { + "parachainId": id, + }, + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": CORETIME_ROCOCO_ED * 16, + }, + "session": { + "keys": invulnerables + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + coretime_rococo_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect::>(), + }, + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), + }, + "sudo": { + "key": Some(get_account_id_from_seed::("Alice")), + }, + }) + } +} + +/// Sub-module for Westend setup. +pub mod westend { + use super::{chain_type_name, CoretimeRuntimeType, GenericChainSpec, ParaId}; + use crate::chain_spec::{ + get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, + }; + use parachains_common::{AccountId, AuraId, Balance}; + use sp_core::sr25519; + + pub(crate) const CORETIME_WESTEND_LOCAL: &str = "coretime-westend-local"; + pub(crate) const CORETIME_WESTEND_DEVELOPMENT: &str = "coretime-westend-dev"; + const CORETIME_WESTEND_ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; + + pub fn local_config(runtime_type: &CoretimeRuntimeType, relay_chain: &str) -> GenericChainSpec { + // westend defaults + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("ss58Format".into(), 42.into()); + properties.insert("tokenSymbol".into(), "WND".into()); + properties.insert("tokenDecimals".into(), 12.into()); + + let chain_type = runtime_type.clone().into(); + let chain_name = format!("Coretime Westend {}", chain_type_name(&chain_type)); + let para_id = super::CORETIME_PARA_ID; + + GenericChainSpec::builder( + coretime_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, + ) + .with_name(&chain_name) + .with_id(runtime_type.clone().into()) + .with_chain_type(chain_type) + .with_genesis_config_patch(genesis( + // initial collators. + vec![( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + )], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + para_id, + )) + .with_properties(properties) + .build() + } + + fn genesis( + invulnerables: Vec<(AccountId, AuraId)>, + endowed_accounts: Vec, + id: ParaId, + ) -> serde_json::Value { + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().cloned().map(|k| (k, CORETIME_WESTEND_ED * 4096)).collect::>(), + }, + "parachainInfo": { + "parachainId": id, + }, + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": CORETIME_WESTEND_ED * 16, + }, + "session": { + "keys": invulnerables + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + coretime_westend_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect::>(), + }, + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), + } + }) + } +} diff --git a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs index 1a0a06404c58213316bbd2987b56d80a7349bd57..77a4123b13ee11b0f158332c26a1695cfe37a668 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::chain_spec::{get_account_id_from_seed, Extensions}; +use crate::chain_spec::{get_account_id_from_seed, Extensions, GenericChainSpec}; use cumulus_primitives_core::ParaId; use parachains_common::AuraId; use sc_service::ChainType; @@ -22,16 +22,25 @@ use sp_core::sr25519; use super::get_collator_keys_from_seed; -/// Specialized `ChainSpec` for the Glutton parachain runtime. -pub type GluttonChainSpec = sc_service::GenericChainSpec<(), Extensions>; +fn glutton_genesis(parachain_id: ParaId, collators: Vec) -> serde_json::Value { + serde_json::json!( { + "parachainInfo": { + "parachainId": parachain_id + }, + "sudo": { + "key": Some(get_account_id_from_seed::("Alice")), + }, + "aura": { "authorities": collators }, + }) +} -pub fn glutton_development_config(para_id: ParaId) -> GluttonChainSpec { - GluttonChainSpec::builder( - glutton_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "kusama-dev".into(), para_id: para_id.into() }, +pub fn glutton_westend_development_config(para_id: ParaId) -> GenericChainSpec { + GenericChainSpec::builder( + glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-dev".into(), para_id: para_id.into() }, ) .with_name("Glutton Development") - .with_id("glutton_dev") + .with_id("glutton_westend_dev") .with_chain_type(ChainType::Local) .with_genesis_config_patch(glutton_genesis( para_id, @@ -40,13 +49,13 @@ pub fn glutton_development_config(para_id: ParaId) -> GluttonChainSpec { .build() } -pub fn glutton_local_config(para_id: ParaId) -> GluttonChainSpec { - GluttonChainSpec::builder( - glutton_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "kusama-local".into(), para_id: para_id.into() }, +pub fn glutton_westend_local_config(para_id: ParaId) -> GenericChainSpec { + GenericChainSpec::builder( + glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-local".into(), para_id: para_id.into() }, ) .with_name("Glutton Local") - .with_id("glutton_local") + .with_id("glutton_westend_local") .with_chain_type(ChainType::Local) .with_genesis_config_patch(glutton_genesis( para_id, @@ -58,30 +67,30 @@ pub fn glutton_local_config(para_id: ParaId) -> GluttonChainSpec { .build() } -pub fn glutton_config(para_id: ParaId) -> GluttonChainSpec { +pub fn glutton_westend_config(para_id: ParaId) -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 2.into()); + properties.insert("ss58Format".into(), 42.into()); - GluttonChainSpec::builder( - glutton_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "kusama".into(), para_id: para_id.into() }, + GenericChainSpec::builder( + glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend".into(), para_id: para_id.into() }, ) .with_name(format!("Glutton {}", para_id).as_str()) - .with_id(format!("glutton-kusama-{}", para_id).as_str()) + .with_id(format!("glutton-westend-{}", para_id).as_str()) .with_chain_type(ChainType::Live) - .with_genesis_config_patch(glutton_genesis( + .with_genesis_config_patch(glutton_westend_genesis( para_id, vec![ get_collator_keys_from_seed::("Alice"), get_collator_keys_from_seed::("Bob"), ], )) - .with_protocol_id(format!("glutton-kusama-{}", para_id).as_str()) + .with_protocol_id(format!("glutton-westend-{}", para_id).as_str()) .with_properties(properties) .build() } -fn glutton_genesis(parachain_id: ParaId, collators: Vec) -> serde_json::Value { +fn glutton_westend_genesis(parachain_id: ParaId, collators: Vec) -> serde_json::Value { serde_json::json!( { "parachainInfo": { "parachainId": parachain_id diff --git a/cumulus/polkadot-parachain/src/chain_spec/mod.rs b/cumulus/polkadot-parachain/src/chain_spec/mod.rs index 9cd0a37ad633ba069cdf8beae71d3324af4485c5..6c0670d24b83ceeda75635567f01e5ad0905f8c5 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/mod.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/mod.rs @@ -24,6 +24,7 @@ pub mod asset_hubs; pub mod bridge_hubs; pub mod collectives; pub mod contracts; +pub mod coretime; pub mod glutton; pub mod penpal; pub mod rococo_parachain; @@ -50,6 +51,9 @@ impl Extensions { } } +/// Generic chain spec for all polkadot-parachain runtimes +pub type GenericChainSpec = sc_service::GenericChainSpec<(), Extensions>; + /// Helper function to generate a crypto pair from seed pub fn get_from_seed(seed: &str) -> ::Public { TPublic::Pair::from_string(&format!("//{}", seed), None) diff --git a/cumulus/polkadot-parachain/src/chain_spec/penpal.rs b/cumulus/polkadot-parachain/src/chain_spec/penpal.rs index 2e35ee231dfff086452162c68212fe76483705bb..cb1cb632d63843681a121261be4b83bf5fb88e32 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/penpal.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/penpal.rs @@ -15,23 +15,22 @@ // along with Cumulus. If not, see . use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, + get_account_id_from_seed, get_collator_keys_from_seed, Extensions, GenericChainSpec, + SAFE_XCM_VERSION, }; use cumulus_primitives_core::ParaId; use parachains_common::{AccountId, AuraId}; use sc_service::ChainType; use sp_core::sr25519; -/// Specialized `ChainSpec` for the normal parachain runtime. -pub type PenpalChainSpec = sc_service::GenericChainSpec<(), Extensions>; -pub fn get_penpal_chain_spec(id: ParaId, relay_chain: &str) -> PenpalChainSpec { +pub fn get_penpal_chain_spec(id: ParaId, relay_chain: &str) -> GenericChainSpec { // Give your base currency a unit name and decimal places let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "UNIT".into()); properties.insert("tokenDecimals".into(), 12u32.into()); properties.insert("ss58Format".into(), 42u32.into()); - PenpalChainSpec::builder( + GenericChainSpec::builder( penpal_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: relay_chain.into(), // You MUST set this to the correct network! diff --git a/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs b/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs index c2ba443145683ac8002f0f60c4e205c66ddb21c0..0434e5f7be8fb3ffb5f93cbb639f5c90469cf763 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs @@ -16,7 +16,7 @@ //! ChainSpecs dedicated to Rococo parachain setups (for testing and example purposes) -use crate::chain_spec::{get_from_seed, Extensions, SAFE_XCM_VERSION}; +use crate::chain_spec::{get_from_seed, Extensions, GenericChainSpec, SAFE_XCM_VERSION}; use cumulus_primitives_core::ParaId; use hex_literal::hex; use parachains_common::AccountId; @@ -25,10 +25,8 @@ use rococo_parachain_runtime::AuraId; use sc_chain_spec::ChainType; use sp_core::{crypto::UncheckedInto, sr25519}; -pub type RococoParachainChainSpec = sc_service::GenericChainSpec<(), Extensions>; - -pub fn rococo_parachain_local_config() -> RococoParachainChainSpec { - RococoParachainChainSpec::builder( +pub fn rococo_parachain_local_config() -> GenericChainSpec { + GenericChainSpec::builder( rococo_parachain_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), para_id: 1000 }, ) @@ -57,9 +55,9 @@ pub fn rococo_parachain_local_config() -> RococoParachainChainSpec { .build() } -pub fn staging_rococo_parachain_local_config() -> RococoParachainChainSpec { +pub fn staging_rococo_parachain_local_config() -> GenericChainSpec { #[allow(deprecated)] - RococoParachainChainSpec::builder( + GenericChainSpec::builder( rococo_parachain_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), para_id: 1000 }, ) diff --git a/cumulus/polkadot-parachain/src/chain_spec/seedling.rs b/cumulus/polkadot-parachain/src/chain_spec/seedling.rs index b034588e14c08b8e2b04025e3b4878acc773ea05..32d51622054575d103cb5c3684a216a9dbde6556 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/seedling.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/seedling.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::chain_spec::{get_account_id_from_seed, Extensions}; +use crate::chain_spec::{get_account_id_from_seed, Extensions, GenericChainSpec}; use cumulus_primitives_core::ParaId; use parachains_common::{AccountId, AuraId}; use sc_service::ChainType; @@ -22,11 +22,8 @@ use sp_core::sr25519; use super::get_collator_keys_from_seed; -/// Specialized `ChainSpec` for the seedling parachain runtime. -pub type SeedlingChainSpec = sc_service::GenericChainSpec<(), Extensions>; - -pub fn get_seedling_chain_spec() -> SeedlingChainSpec { - SeedlingChainSpec::builder( +pub fn get_seedling_chain_spec() -> GenericChainSpec { + GenericChainSpec::builder( seedling_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend".into(), para_id: 2000 }, ) diff --git a/cumulus/polkadot-parachain/src/chain_spec/shell.rs b/cumulus/polkadot-parachain/src/chain_spec/shell.rs index 02c65e809a6c9153fa4dccc6e9191dd31198061f..e0a9875fb96f28870a726073d3c8b8cb249628a9 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/shell.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/shell.rs @@ -14,18 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::chain_spec::Extensions; +use crate::chain_spec::{Extensions, GenericChainSpec}; use cumulus_primitives_core::ParaId; use parachains_common::AuraId; use sc_service::ChainType; use super::get_collator_keys_from_seed; -/// Specialized `ChainSpec` for the shell parachain runtime. -pub type ShellChainSpec = sc_service::GenericChainSpec<(), Extensions>; - -pub fn get_shell_chain_spec() -> ShellChainSpec { - ShellChainSpec::builder( +pub fn get_shell_chain_spec() -> GenericChainSpec { + GenericChainSpec::builder( shell_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend".into(), para_id: 1000 }, ) diff --git a/cumulus/polkadot-parachain/src/cli.rs b/cumulus/polkadot-parachain/src/cli.rs index 63e4baf27aeb24bbf0d0647c5b2fda5ff34f0fa8..fec6e144e40f16a0f82ad06a0cb4d870d27948cb 100644 --- a/cumulus/polkadot-parachain/src/cli.rs +++ b/cumulus/polkadot-parachain/src/cli.rs @@ -45,7 +45,8 @@ pub enum Subcommand { PurgeChain(cumulus_client_cli::PurgeChainCmd), /// Export the genesis state of the parachain. - ExportGenesisState(cumulus_client_cli::ExportGenesisStateCommand), + #[command(alias = "export-genesis-state")] + ExportGenesisHead(cumulus_client_cli::ExportGenesisHeadCommand), /// Export the genesis wasm of the parachain. ExportGenesisWasm(cumulus_client_cli::ExportGenesisWasmCommand), diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 3f93c90558a3e978ceaa005111b4b8be2ee5f9a1..ea56e277112b839f4a68c10860d95c1ac804fe6c 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -16,7 +16,11 @@ use crate::{ chain_spec, + chain_spec::GenericChainSpec, cli::{Cli, RelayChainCli, Subcommand}, + fake_runtime_api::{ + asset_hub_polkadot_aura::RuntimeApi as AssetHubPolkadotRuntimeApi, aura::RuntimeApi, + }, service::{new_partial, Block}, }; use cumulus_primitives_core::ParaId; @@ -43,14 +47,15 @@ enum Runtime { AssetHubPolkadot, AssetHubKusama, AssetHubRococo, - AssetHubWococo, AssetHubWestend, Penpal(ParaId), ContractsRococo, CollectivesPolkadot, CollectivesWestend, Glutton, + GluttonWestend, BridgeHub(chain_spec::bridge_hubs::BridgeHubRuntimeType), + Coretime(chain_spec::coretime::CoretimeRuntimeType), } trait RuntimeResolver { @@ -94,8 +99,6 @@ fn runtime(id: &str) -> Runtime { Runtime::AssetHubKusama } else if id.starts_with("asset-hub-rococo") { Runtime::AssetHubRococo - } else if id.starts_with("asset-hub-wococo") { - Runtime::AssetHubWococo } else if id.starts_with("asset-hub-westend") | id.starts_with("westmint") { Runtime::AssetHubWestend } else if id.starts_with("penpal") { @@ -111,6 +114,12 @@ fn runtime(id: &str) -> Runtime { id.parse::() .expect("Invalid value"), ) + } else if id.starts_with(chain_spec::coretime::CoretimeRuntimeType::ID_PREFIX) { + Runtime::Coretime( + id.parse::().expect("Invalid value"), + ) + } else if id.starts_with("glutton-westend") { + Runtime::GluttonWestend } else if id.starts_with("glutton") { Runtime::Glutton } else { @@ -125,50 +134,29 @@ fn load_spec(id: &str) -> std::result::Result, String> { // - Defaul-like "staging" => Box::new(chain_spec::rococo_parachain::staging_rococo_parachain_local_config()), - "tick" => - Box::new(chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/tick.json")[..], - )?), - "trick" => - Box::new(chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/trick.json")[..], - )?), - "track" => - Box::new(chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/track.json")[..], - )?), + "tick" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/tick.json")[..], + )?), + "trick" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/trick.json")[..], + )?), + "track" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/track.json")[..], + )?), // -- Starters "shell" => Box::new(chain_spec::shell::get_shell_chain_spec()), "seedling" => Box::new(chain_spec::seedling::get_seedling_chain_spec()), // -- Asset Hub Polkadot - "asset-hub-polkadot-dev" | "statemint-dev" => - Box::new(chain_spec::asset_hubs::asset_hub_polkadot_development_config()), - "asset-hub-polkadot-local" | "statemint-local" => - Box::new(chain_spec::asset_hubs::asset_hub_polkadot_local_config()), - // the chain spec as used for generating the upgrade genesis values - "asset-hub-polkadot-genesis" | "statemint-genesis" => - Box::new(chain_spec::asset_hubs::asset_hub_polkadot_config()), - // the shell-based chain spec as used for syncing - "asset-hub-polkadot" | "statemint" => - Box::new(chain_spec::asset_hubs::AssetHubPolkadotChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/asset-hub-polkadot.json")[..], - )?), + "asset-hub-polkadot" | "statemint" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/asset-hub-polkadot.json")[..], + )?), // -- Asset Hub Kusama - "asset-hub-kusama-dev" | "statemine-dev" => - Box::new(chain_spec::asset_hubs::asset_hub_kusama_development_config()), - "asset-hub-kusama-local" | "statemine-local" => - Box::new(chain_spec::asset_hubs::asset_hub_kusama_local_config()), - // the chain spec as used for generating the upgrade genesis values - "asset-hub-kusama-genesis" | "statemine-genesis" => - Box::new(chain_spec::asset_hubs::asset_hub_kusama_config()), - // the shell-based chain spec as used for syncing - "asset-hub-kusama" | "statemine" => - Box::new(chain_spec::asset_hubs::AssetHubKusamaChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/asset-hub-kusama.json")[..], - )?), + "asset-hub-kusama" | "statemine" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/asset-hub-kusama.json")[..], + )?), // -- Asset Hub Rococo "asset-hub-rococo-dev" => @@ -178,23 +166,9 @@ fn load_spec(id: &str) -> std::result::Result, String> { // the chain spec as used for generating the upgrade genesis values "asset-hub-rococo-genesis" => Box::new(chain_spec::asset_hubs::asset_hub_rococo_genesis_config()), - "asset-hub-rococo" => - Box::new(chain_spec::asset_hubs::AssetHubRococoChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/asset-hub-rococo.json")[..], - )?), - - // -- Asset Hub Wococo - "asset-hub-wococo-dev" => - Box::new(chain_spec::asset_hubs::asset_hub_wococo_development_config()), - "asset-hub-wococo-local" => - Box::new(chain_spec::asset_hubs::asset_hub_wococo_local_config()), - // the chain spec as used for generating the upgrade genesis values - "asset-hub-wococo-genesis" => - Box::new(chain_spec::asset_hubs::asset_hub_wococo_genesis_config()), - "asset-hub-wococo" => - Box::new(chain_spec::asset_hubs::AssetHubWococoChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/asset-hub-wococo.json")[..], - )?), + "asset-hub-rococo" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/asset-hub-rococo.json")[..], + )?), // -- Asset Hub Westend "asset-hub-westend-dev" | "westmint-dev" => @@ -205,24 +179,23 @@ fn load_spec(id: &str) -> std::result::Result, String> { "asset-hub-westend-genesis" | "westmint-genesis" => Box::new(chain_spec::asset_hubs::asset_hub_westend_config()), // the shell-based chain spec as used for syncing - "asset-hub-westend" | "westmint" => - Box::new(chain_spec::asset_hubs::AssetHubWestendChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/asset-hub-westend.json")[..], - )?), + "asset-hub-westend" | "westmint" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/asset-hub-westend.json")[..], + )?), // -- Polkadot Collectives - "collectives-polkadot-dev" => - Box::new(chain_spec::collectives::collectives_polkadot_development_config()), - "collectives-polkadot-local" => - Box::new(chain_spec::collectives::collectives_polkadot_local_config()), - "collectives-polkadot" => - Box::new(chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/collectives-polkadot.json")[..], - )?), - "collectives-westend" => - Box::new(chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/collectives-westend.json")[..], - )?), + "collectives-polkadot" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/collectives-polkadot.json")[..], + )?), + + // -- Westend Collectives + "collectives-westend-dev" => + Box::new(chain_spec::collectives::collectives_westend_development_config()), + "collectives-westend-local" => + Box::new(chain_spec::collectives::collectives_westend_local_config()), + "collectives-westend" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/collectives-westend.json")[..], + )?), // -- Contracts on Rococo "contracts-rococo-dev" => @@ -230,10 +203,9 @@ fn load_spec(id: &str) -> std::result::Result, String> { "contracts-rococo-local" => Box::new(chain_spec::contracts::contracts_rococo_local_config()), "contracts-rococo-genesis" => Box::new(chain_spec::contracts::contracts_rococo_config()), - "contracts-rococo" => - Box::new(chain_spec::contracts::ContractsRococoChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/contracts-rococo.json")[..], - )?), + "contracts-rococo" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/contracts-rococo.json")[..], + )?), // -- BridgeHub bridge_like_id @@ -244,25 +216,34 @@ fn load_spec(id: &str) -> std::result::Result, String> { .expect("invalid value") .load_config()?, - // -- Penpall - "penpal-kusama" => Box::new(chain_spec::penpal::get_penpal_chain_spec( + // -- Coretime + coretime_like_id + if coretime_like_id + .starts_with(chain_spec::coretime::CoretimeRuntimeType::ID_PREFIX) => + coretime_like_id + .parse::() + .expect("invalid value") + .load_config()?, + + // -- Penpal + "penpal-rococo" => Box::new(chain_spec::penpal::get_penpal_chain_spec( para_id.expect("Must specify parachain id"), - "kusama-local", + "rococo-local", )), - "penpal-polkadot" => Box::new(chain_spec::penpal::get_penpal_chain_spec( + "penpal-westend" => Box::new(chain_spec::penpal::get_penpal_chain_spec( para_id.expect("Must specify parachain id"), - "polkadot-local", + "westend-local", )), - // -- Glutton - "glutton-kusama-dev" => Box::new(chain_spec::glutton::glutton_development_config( + // -- Glutton Westend + "glutton-westend-dev" => Box::new(chain_spec::glutton::glutton_westend_development_config( para_id.expect("Must specify parachain id"), )), - "glutton-kusama-local" => Box::new(chain_spec::glutton::glutton_local_config( + "glutton-westend-local" => Box::new(chain_spec::glutton::glutton_westend_local_config( para_id.expect("Must specify parachain id"), )), // the chain spec as used for generating the upgrade genesis values - "glutton-kusama-genesis" => Box::new(chain_spec::glutton::glutton_config( + "glutton-westend-genesis" => Box::new(chain_spec::glutton::glutton_westend_config( para_id.expect("Must specify parachain id"), )), @@ -273,41 +254,7 @@ fn load_spec(id: &str) -> std::result::Result, String> { }, // -- Loading a specific spec from disk - path => { - let path: PathBuf = path.into(); - match path.runtime() { - Runtime::AssetHubPolkadot => Box::new( - chain_spec::asset_hubs::AssetHubPolkadotChainSpec::from_json_file(path)?, - ), - Runtime::AssetHubKusama => - Box::new(chain_spec::asset_hubs::AssetHubKusamaChainSpec::from_json_file(path)?), - Runtime::AssetHubRococo => - Box::new(chain_spec::asset_hubs::AssetHubRococoChainSpec::from_json_file(path)?), - Runtime::AssetHubWococo => - Box::new(chain_spec::asset_hubs::AssetHubWococoChainSpec::from_json_file(path)?), - Runtime::AssetHubWestend => Box::new( - chain_spec::asset_hubs::AssetHubWestendChainSpec::from_json_file(path)?, - ), - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => Box::new( - chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_file(path)?, - ), - Runtime::Shell => - Box::new(chain_spec::shell::ShellChainSpec::from_json_file(path)?), - Runtime::Seedling => - Box::new(chain_spec::seedling::SeedlingChainSpec::from_json_file(path)?), - Runtime::ContractsRococo => - Box::new(chain_spec::contracts::ContractsRococoChainSpec::from_json_file(path)?), - Runtime::BridgeHub(bridge_hub_runtime_type) => - bridge_hub_runtime_type.chain_spec_from_json_file(path)?, - Runtime::Penpal(_para_id) => - Box::new(chain_spec::penpal::PenpalChainSpec::from_json_file(path)?), - Runtime::Glutton => - Box::new(chain_spec::glutton::GluttonChainSpec::from_json_file(path)?), - Runtime::Default => Box::new( - chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_file(path)?, - ), - } - }, + path => Box::new(GenericChainSpec::from_json_file(path.into())?), }) } @@ -315,6 +262,7 @@ fn load_spec(id: &str) -> std::result::Result, String> { /// (H/T to Phala for the idea) /// E.g. "penpal-kusama-2004" yields ("penpal-kusama", Some(2004)) fn extract_parachain_id(id: &str) -> (&str, &str, Option) { + const ROCOCO_TEST_PARA_PREFIX: &str = "penpal-rococo-"; const KUSAMA_TEST_PARA_PREFIX: &str = "penpal-kusama-"; const POLKADOT_TEST_PARA_PREFIX: &str = "penpal-polkadot-"; @@ -322,7 +270,14 @@ fn extract_parachain_id(id: &str) -> (&str, &str, Option) { const GLUTTON_PARA_LOCAL_PREFIX: &str = "glutton-kusama-local-"; const GLUTTON_PARA_GENESIS_PREFIX: &str = "glutton-kusama-genesis-"; - let (norm_id, orig_id, para) = if let Some(suffix) = id.strip_prefix(KUSAMA_TEST_PARA_PREFIX) { + const GLUTTON_WESTEND_PARA_DEV_PREFIX: &str = "glutton-westend-dev-"; + const GLUTTON_WESTEND_PARA_LOCAL_PREFIX: &str = "glutton-westend-local-"; + const GLUTTON_WESTEND_PARA_GENESIS_PREFIX: &str = "glutton-westend-genesis-"; + + let (norm_id, orig_id, para) = if let Some(suffix) = id.strip_prefix(ROCOCO_TEST_PARA_PREFIX) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + (&id[..ROCOCO_TEST_PARA_PREFIX.len() - 1], id, Some(para_id)) + } else if let Some(suffix) = id.strip_prefix(KUSAMA_TEST_PARA_PREFIX) { let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); (&id[..KUSAMA_TEST_PARA_PREFIX.len() - 1], id, Some(para_id)) } else if let Some(suffix) = id.strip_prefix(POLKADOT_TEST_PARA_PREFIX) { @@ -337,6 +292,15 @@ fn extract_parachain_id(id: &str) -> (&str, &str, Option) { } else if let Some(suffix) = id.strip_prefix(GLUTTON_PARA_GENESIS_PREFIX) { let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); (&id[..GLUTTON_PARA_GENESIS_PREFIX.len() - 1], id, Some(para_id)) + } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_DEV_PREFIX) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + (&id[..GLUTTON_WESTEND_PARA_DEV_PREFIX.len() - 1], id, Some(para_id)) + } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_LOCAL_PREFIX) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + (&id[..GLUTTON_WESTEND_PARA_LOCAL_PREFIX.len() - 1], id, Some(para_id)) + } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_GENESIS_PREFIX) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + (&id[..GLUTTON_WESTEND_PARA_GENESIS_PREFIX.len() - 1], id, Some(para_id)) } else { (id, id, None) }; @@ -420,122 +384,47 @@ impl SubstrateCli for RelayChainCli { macro_rules! construct_partials { ($config:expr, |$partials:ident| $code:expr) => { match $config.chain_spec.runtime() { - Runtime::AssetHubKusama => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, - Runtime::AssetHubRococo | Runtime::AssetHubWococo => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, - Runtime::AssetHubWestend => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, Runtime::AssetHubPolkadot => { - let $partials = new_partial::( + let $partials = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AssetHubPolkadotAuraId>, )?; $code }, - Runtime::BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { - chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotDevelopment => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaDevelopment => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Rococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Wococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WococoLocal => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, - }, - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { - let $partials = new_partial::( + Runtime::AssetHubKusama | + Runtime::AssetHubRococo | + Runtime::AssetHubWestend | + Runtime::BridgeHub(_) | + Runtime::CollectivesPolkadot | + Runtime::CollectivesWestend | + Runtime::Coretime(_) => { + let $partials = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AuraId>, )?; $code }, - Runtime::Shell => { - let $partials = new_partial::( - &$config, - crate::service::shell_build_import_queue, - )?; - $code - }, - Runtime::Seedling => { - let $partials = new_partial::( + Runtime::GluttonWestend | Runtime::Glutton | Runtime::Shell | Runtime::Seedling => { + let $partials = new_partial::( &$config, crate::service::shell_build_import_queue, )?; $code }, Runtime::ContractsRococo => { - let $partials = new_partial::( + let $partials = new_partial::( &$config, crate::service::contracts_rococo_build_import_queue, )?; $code }, Runtime::Penpal(_) | Runtime::Default => { - let $partials = new_partial::( + let $partials = new_partial::( &$config, crate::service::rococo_parachain_build_import_queue, )?; $code }, - Runtime::Glutton => { - let $partials = new_partial::( - &$config, - crate::service::shell_build_import_queue, - )?; - $code - }, } }; } @@ -544,39 +433,9 @@ macro_rules! construct_async_run { (|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{ let runner = $cli.create_runner($cmd)?; match runner.config().chain_spec.runtime() { - Runtime::AssetHubWestend => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::AssetHubRococo | Runtime::AssetHubWococo => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::AssetHubKusama => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, Runtime::AssetHubPolkadot => { runner.async_run(|$config| { - let $components = new_partial::( + let $components = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AssetHubPolkadotAuraId>, )?; @@ -584,9 +443,15 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { + Runtime::AssetHubKusama | + Runtime::AssetHubRococo | + Runtime::AssetHubWestend | + Runtime::BridgeHub(_) | + Runtime::CollectivesPolkadot | + Runtime::CollectivesWestend | + Runtime::Coretime(_) => { runner.async_run(|$config| { - let $components = new_partial::( + let $components = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AuraId>, )?; @@ -594,29 +459,22 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - Runtime::Shell => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::shell_build_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::Seedling => { + Runtime::Shell | + Runtime::Seedling | + Runtime::GluttonWestend | + Runtime::Glutton => { runner.async_run(|$config| { - let $components = new_partial::( + let $components = new_partial::( &$config, crate::service::shell_build_import_queue, )?; let task_manager = $components.task_manager; { $( $code )* }.map(|v| (v, task_manager)) }) - }, + } Runtime::ContractsRococo => { runner.async_run(|$config| { - let $components = new_partial::( + let $components = new_partial::( &$config, crate::service::contracts_rococo_build_import_queue, )?; @@ -624,78 +482,10 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - Runtime::BridgeHub(bridge_hub_runtime_type) => { - match bridge_hub_runtime_type { - chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotDevelopment => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaDevelopment => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Rococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Wococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WococoLocal => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - } - } - }, Runtime::Penpal(_) | Runtime::Default => { runner.async_run(|$config| { let $components = new_partial::< - rococo_parachain_runtime::RuntimeApi, + RuntimeApi, _, >( &$config, @@ -705,16 +495,6 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - Runtime::Glutton => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::shell_build_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - } } }} } @@ -770,10 +550,10 @@ pub fn run() -> Result<()> { cmd.run(config, polkadot_config) }) }, - Some(Subcommand::ExportGenesisState(cmd)) => { + Some(Subcommand::ExportGenesisHead(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| { - construct_partials!(config, |partials| cmd.run(&*config.chain_spec, &*partials.client)) + construct_partials!(config, |partials| cmd.run(partials.client)) }) }, Some(Subcommand::ExportGenesisWasm(cmd)) => { @@ -836,31 +616,31 @@ pub fn run() -> Result<()> { // that both file paths exist, the node will exit, as the user must decide (by // deleting one path) the information that they want to use as their DB. let old_name = match config.chain_spec.id() { - "asset-hub-polkadot" => Some("statemint"), - "asset-hub-kusama" => Some("statemine"), - "asset-hub-westend" => Some("westmint"), - "asset-hub-rococo" => Some("rockmine"), - _ => None, + "asset-hub-polkadot" => Some("statemint"), + "asset-hub-kusama" => Some("statemine"), + "asset-hub-westend" => Some("westmint"), + "asset-hub-rococo" => Some("rockmine"), + _ => None, }; if let Some(old_name) = old_name { - let new_path = config.base_path.config_dir(config.chain_spec.id()); - let old_path = config.base_path.config_dir(old_name); + let new_path = config.base_path.config_dir(config.chain_spec.id()); + let old_path = config.base_path.config_dir(old_name); - if old_path.exists() && new_path.exists() { - return Err(format!( + if old_path.exists() && new_path.exists() { + return Err(format!( "Found legacy {} path {} and new asset-hub path {}. Delete one path such that only one exists.", old_name, old_path.display(), new_path.display() ).into()) - } + } - if old_path.exists() { - std::fs::rename(old_path.clone(), new_path.clone())?; + if old_path.exists() { + std::fs::rename(old_path.clone(), new_path.clone())?; info!( "Statemint renamed to Asset Hub. The filepath with associated data on disk has been renamed from {} to {}.", old_path.display(), new_path.display() ); - } + } } let hwbench = (!cli.no_hardware_benchmarks).then_some( @@ -894,43 +674,51 @@ pub fn run() -> Result<()> { match config.chain_spec.runtime() { Runtime::AssetHubPolkadot => crate::service::start_asset_hub_node::< - asset_hub_polkadot_runtime::RuntimeApi, + AssetHubPolkadotRuntimeApi, AssetHubPolkadotAuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), Runtime::AssetHubKusama => crate::service::start_asset_hub_node::< - asset_hub_kusama_runtime::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), - Runtime::AssetHubRococo | Runtime::AssetHubWococo => crate::service::start_asset_hub_node::< - asset_hub_rococo_runtime::RuntimeApi, + Runtime::AssetHubRococo => crate::service::start_asset_hub_node::< + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), Runtime::AssetHubWestend => crate::service::start_asset_hub_node::< - asset_hub_westend_runtime::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => + Runtime::CollectivesPolkadot => crate::service::start_generic_aura_node::< - collectives_polkadot_runtime::RuntimeApi, + RuntimeApi, + AuraId, + >(config, polkadot_config, collator_options, id, hwbench) + .await + .map(|r| r.0) + .map_err(Into::into), + Runtime::CollectivesWestend => + crate::service::start_generic_aura_node::< + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), Runtime::Shell => - crate::service::start_shell_node::( + crate::service::start_shell_node::( config, polkadot_config, collator_options, @@ -941,7 +729,7 @@ pub fn run() -> Result<()> { .map(|r| r.0) .map_err(Into::into), Runtime::Seedling => - crate::service::start_shell_node::( + crate::service::start_shell_node::( config, polkadot_config, collator_options, @@ -961,21 +749,18 @@ pub fn run() -> Result<()> { .await .map(|r| r.0) .map_err(Into::into), + Runtime::BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { - chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotDevelopment => + chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot => crate::service::start_generic_aura_node::< - chain_spec::bridge_hubs::polkadot::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), - chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaDevelopment => + chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama => crate::service::start_generic_aura_node::< - chain_spec::bridge_hubs::kusama::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await @@ -984,7 +769,7 @@ pub fn run() -> Result<()> { chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment => crate::service::start_generic_aura_node::< - chain_spec::bridge_hubs::westend::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await @@ -993,21 +778,29 @@ pub fn run() -> Result<()> { chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment => crate::service::start_generic_aura_node::< - chain_spec::bridge_hubs::rococo::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), - chain_spec::bridge_hubs::BridgeHubRuntimeType::Wococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WococoLocal => + } + .map_err(Into::into), + + Runtime::Coretime(coretime_runtime_type) => match coretime_runtime_type { + chain_spec::coretime::CoretimeRuntimeType::Rococo | + chain_spec::coretime::CoretimeRuntimeType::RococoLocal | + chain_spec::coretime::CoretimeRuntimeType::RococoDevelopment | + chain_spec::coretime::CoretimeRuntimeType::WestendLocal | + chain_spec::coretime::CoretimeRuntimeType::WestendDevelopment => crate::service::start_generic_aura_node::< - chain_spec::bridge_hubs::wococo::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), } .map_err(Into::into), + Runtime::Penpal(_) | Runtime::Default => crate::service::start_rococo_parachain_node( config, @@ -1019,9 +812,17 @@ pub fn run() -> Result<()> { .await .map(|r| r.0) .map_err(Into::into), + Runtime::GluttonWestend => + crate::service::start_basic_lookahead_node::< + RuntimeApi, + AuraId, + >(config, polkadot_config, collator_options, id, hwbench) + .await + .map(|r| r.0) + .map_err(Into::into), Runtime::Glutton => crate::service::start_basic_lookahead_node::< - glutton_runtime::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await @@ -1254,12 +1055,6 @@ mod tests { ); assert_eq!(Runtime::Default, path.runtime()); - let path = store_configuration( - &temp_dir, - Box::new(crate::chain_spec::asset_hubs::asset_hub_kusama_local_config()), - ); - assert_eq!(Runtime::AssetHubKusama, path.runtime()); - let path = store_configuration( &temp_dir, Box::new(crate::chain_spec::contracts::contracts_rococo_local_config()), diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs new file mode 100644 index 0000000000000000000000000000000000000000..76dd7347ccbc35127747e144af7c8705a15022e4 --- /dev/null +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs @@ -0,0 +1,200 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! These are used to provide a type that implements these runtime APIs without requiring to import +//! the native runtimes. + +use frame_support::weights::Weight; +use parachains_common::{AccountId, AssetHubPolkadotAuraId, Balance, Nonce}; +use polkadot_primitives::Block; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + traits::Block as BlockT, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, +}; + +pub struct Runtime; + +sp_api::impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + + fn execute_block(_: Block) { + unimplemented!() + } + + fn initialize_block(_: &::Header) { + unimplemented!() + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + unimplemented!() + } + + fn metadata_at_version(_: u32) -> Option { + unimplemented!() + } + + fn metadata_versions() -> sp_std::vec::Vec { + unimplemented!() + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + unimplemented!() + } + + fn authorities() -> Vec { + unimplemented!() + } + } + + impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { + fn can_build_upon( + _: ::Hash, + _: cumulus_primitives_aura::Slot, + ) -> bool { + unimplemented!() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(_: ::Extrinsic) -> ApplyExtrinsicResult { + unimplemented!() + } + + fn finalize_block() -> ::Header { + unimplemented!() + } + + fn inherent_extrinsics(_: sp_inherents::InherentData) -> Vec<::Extrinsic> { + unimplemented!() + } + + fn check_inherents(_: Block, _: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { + unimplemented!() + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + _: TransactionSource, + _: ::Extrinsic, + _: ::Hash, + ) -> TransactionValidity { + unimplemented!() + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(_: &::Header) { + unimplemented!() + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(_: Option>) -> Vec { + unimplemented!() + } + + fn decode_session_keys( + _: Vec, + ) -> Option, KeyTypeId)>> { + unimplemented!() + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + _: ::Extrinsic, + _: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + unimplemented!() + } + fn query_fee_details( + _: ::Extrinsic, + _: u32, + ) -> pallet_transaction_payment::FeeDetails { + unimplemented!() + } + fn query_weight_to_fee(_: Weight) -> Balance { + unimplemented!() + } + fn query_length_to_fee(_: u32) -> Balance { + unimplemented!() + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(_: &::Header) -> cumulus_primitives_core::CollationInfo { + unimplemented!() + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(_: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + unimplemented!() + } + + fn execute_block( + _: Block, + _: bool, + _: bool, + _: frame_try_runtime::TryStateSelect, + ) -> Weight { + unimplemented!() + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(_: AccountId) -> Nonce { + unimplemented!() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(_: bool) -> ( + Vec, + Vec, + ) { + unimplemented!() + } + + fn dispatch_benchmark( + _: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + unimplemented!() + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + unimplemented!() + } + + fn build_config(_: Vec) -> sp_genesis_builder::Result { + unimplemented!() + } + } +} diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs new file mode 100644 index 0000000000000000000000000000000000000000..0f01b85ebcf6fa63aabd9115c2ef553c18badea8 --- /dev/null +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs @@ -0,0 +1,200 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! These are used to provide a type that implements these runtime APIs without requiring to import +//! the native runtimes. + +use frame_support::weights::Weight; +use parachains_common::{AccountId, AuraId, Balance, Nonce}; +use polkadot_primitives::Block; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + traits::Block as BlockT, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, +}; + +pub struct Runtime; + +sp_api::impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + + fn execute_block(_: Block) { + unimplemented!() + } + + fn initialize_block(_: &::Header) { + unimplemented!() + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + unimplemented!() + } + + fn metadata_at_version(_: u32) -> Option { + unimplemented!() + } + + fn metadata_versions() -> sp_std::vec::Vec { + unimplemented!() + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + unimplemented!() + } + + fn authorities() -> Vec { + unimplemented!() + } + } + + impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { + fn can_build_upon( + _: ::Hash, + _: cumulus_primitives_aura::Slot, + ) -> bool { + unimplemented!() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(_: ::Extrinsic) -> ApplyExtrinsicResult { + unimplemented!() + } + + fn finalize_block() -> ::Header { + unimplemented!() + } + + fn inherent_extrinsics(_: sp_inherents::InherentData) -> Vec<::Extrinsic> { + unimplemented!() + } + + fn check_inherents(_: Block, _: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { + unimplemented!() + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + _: TransactionSource, + _: ::Extrinsic, + _: ::Hash, + ) -> TransactionValidity { + unimplemented!() + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(_: &::Header) { + unimplemented!() + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(_: Option>) -> Vec { + unimplemented!() + } + + fn decode_session_keys( + _: Vec, + ) -> Option, KeyTypeId)>> { + unimplemented!() + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + _: ::Extrinsic, + _: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + unimplemented!() + } + fn query_fee_details( + _: ::Extrinsic, + _: u32, + ) -> pallet_transaction_payment::FeeDetails { + unimplemented!() + } + fn query_weight_to_fee(_: Weight) -> Balance { + unimplemented!() + } + fn query_length_to_fee(_: u32) -> Balance { + unimplemented!() + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(_: &::Header) -> cumulus_primitives_core::CollationInfo { + unimplemented!() + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(_: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + unimplemented!() + } + + fn execute_block( + _: Block, + _: bool, + _: bool, + _: frame_try_runtime::TryStateSelect, + ) -> Weight { + unimplemented!() + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(_: AccountId) -> Nonce { + unimplemented!() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(_: bool) -> ( + Vec, + Vec, + ) { + unimplemented!() + } + + fn dispatch_benchmark( + _: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + unimplemented!() + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + unimplemented!() + } + + fn build_config(_: Vec) -> sp_genesis_builder::Result { + unimplemented!() + } + } +} diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/mod.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..29e2736b06ff3a1092211224abd73092bd7ee46c --- /dev/null +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/mod.rs @@ -0,0 +1,21 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! In an ideal world this would be one runtime which would simplify the code massively. +//! This is not an ideal world - Polkadot Asset Hub has a different key type. + +pub mod asset_hub_polkadot_aura; +pub mod aura; diff --git a/cumulus/polkadot-parachain/src/main.rs b/cumulus/polkadot-parachain/src/main.rs index e40af8128f7166697a7e4f7571dd541a11d626a8..0757bea84aae83b64ec24982874c28f095057e75 100644 --- a/cumulus/polkadot-parachain/src/main.rs +++ b/cumulus/polkadot-parachain/src/main.rs @@ -20,11 +20,11 @@ #![warn(unused_extern_crates)] mod chain_spec; -#[macro_use] -mod service; mod cli; mod command; +mod fake_runtime_api; mod rpc; +mod service; fn main() -> sc_cli::Result<()> { command::run() diff --git a/cumulus/polkadot-parachain/src/rpc.rs b/cumulus/polkadot-parachain/src/rpc.rs index d106c52a364290dad54d3d278ce76dad8ef55c26..caee14e555220fc0e704b5edbe93a5ad76762eff 100644 --- a/cumulus/polkadot-parachain/src/rpc.rs +++ b/cumulus/polkadot-parachain/src/rpc.rs @@ -22,7 +22,7 @@ use std::sync::Arc; use parachains_common::{AccountId, Balance, Block, Nonce}; use sc_client_api::AuxStore; -pub use sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor}; +pub use sc_rpc::DenyUnsafe; use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 3bcc9b7f60d33847a66e661dd5438a2df3ac04e5..dff5881108c64b65a4cdfa0f6d87b00798592566 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -40,8 +40,8 @@ use sp_core::Pair; use jsonrpsee::RpcModule; -use crate::rpc; -pub use parachains_common::{AccountId, Balance, Block, BlockNumber, Hash, Header, Nonce}; +use crate::{fake_runtime_api::aura::RuntimeApi, rpc}; +pub use parachains_common::{AccountId, Balance, Block, Hash, Header, Nonce}; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; use futures::{lock::Mutex, prelude::*}; @@ -96,36 +96,6 @@ impl sc_executor::NativeExecutionDispatch for ShellRuntimeExecutor { } } -/// Native Asset Hub Polkadot (Statemint) executor instance. -pub struct AssetHubPolkadotRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for AssetHubPolkadotRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - asset_hub_polkadot_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - asset_hub_polkadot_runtime::native_version() - } -} - -/// Native Asset Hub Kusama (Statemine) executor instance. -pub struct AssetHubKusamaExecutor; - -impl sc_executor::NativeExecutionDispatch for AssetHubKusamaExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - asset_hub_kusama_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - asset_hub_kusama_runtime::native_version() - } -} - /// Native Asset Hub Westend (Westmint) executor instance. pub struct AssetHubWestendExecutor; @@ -141,63 +111,57 @@ impl sc_executor::NativeExecutionDispatch for AssetHubWestendExecutor { } } -/// Native Polkadot Collectives executor instance. -pub struct CollectivesPolkadotRuntimeExecutor; +/// Native Westend Collectives executor instance. +pub struct CollectivesWestendRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for CollectivesPolkadotRuntimeExecutor { +impl sc_executor::NativeExecutionDispatch for CollectivesWestendRuntimeExecutor { type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { - collectives_polkadot_runtime::api::dispatch(method, data) + collectives_westend_runtime::api::dispatch(method, data) } fn native_version() -> sc_executor::NativeVersion { - collectives_polkadot_runtime::native_version() + collectives_westend_runtime::native_version() } } -/// Native BridgeHubPolkadot executor instance. -pub struct BridgeHubPolkadotRuntimeExecutor; +/// Native BridgeHubRococo executor instance. +pub struct BridgeHubRococoRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for BridgeHubPolkadotRuntimeExecutor { +impl sc_executor::NativeExecutionDispatch for BridgeHubRococoRuntimeExecutor { type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { - bridge_hub_polkadot_runtime::api::dispatch(method, data) + bridge_hub_rococo_runtime::api::dispatch(method, data) } fn native_version() -> sc_executor::NativeVersion { - bridge_hub_polkadot_runtime::native_version() + bridge_hub_rococo_runtime::native_version() } } -/// Native BridgeHubKusama executor instance. -pub struct BridgeHubKusamaRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for BridgeHubKusamaRuntimeExecutor { +/// Native `CoretimeRococo` executor instance. +pub struct CoretimeRococoRuntimeExecutor; +impl sc_executor::NativeExecutionDispatch for CoretimeRococoRuntimeExecutor { type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - fn dispatch(method: &str, data: &[u8]) -> Option> { - bridge_hub_kusama_runtime::api::dispatch(method, data) + coretime_rococo_runtime::api::dispatch(method, data) } - fn native_version() -> sc_executor::NativeVersion { - bridge_hub_kusama_runtime::native_version() + coretime_rococo_runtime::native_version() } } -/// Native BridgeHubRococo executor instance. -pub struct BridgeHubRococoRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for BridgeHubRococoRuntimeExecutor { +/// Native `CoretimeWestend` executor instance. +pub struct CoretimeWestendRuntimeExecutor; +impl sc_executor::NativeExecutionDispatch for CoretimeWestendRuntimeExecutor { type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - fn dispatch(method: &str, data: &[u8]) -> Option> { - bridge_hub_rococo_runtime::api::dispatch(method, data) + coretime_westend_runtime::api::dispatch(method, data) } - fn native_version() -> sc_executor::NativeVersion { - bridge_hub_rococo_runtime::native_version() + coretime_westend_runtime::native_version() } } @@ -216,18 +180,18 @@ impl sc_executor::NativeExecutionDispatch for ContractsRococoRuntimeExecutor { } } -/// Native Glutton executor instance. -pub struct GluttonRuntimeExecutor; +/// Native Westend Glutton executor instance. +pub struct GluttonWestendRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for GluttonRuntimeExecutor { +impl sc_executor::NativeExecutionDispatch for GluttonWestendRuntimeExecutor { type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { - shell_runtime::api::dispatch(method, data) + glutton_westend_runtime::api::dispatch(method, data) } fn native_version() -> sc_executor::NativeVersion { - shell_runtime::native_version() + glutton_westend_runtime::native_version() } } @@ -560,6 +524,7 @@ where CollatorPair, OverseerHandle, Arc>) + Send + Sync>, + Arc, ) -> Result<(), sc_service::Error>, { let parachain_config = prepare_node_config(parachain_config); @@ -693,6 +658,7 @@ where collator_key.expect("Command line arguments do not allow this. qed"), overseer_handle, announce_block, + backend.clone(), )?; } @@ -885,8 +851,8 @@ where /// Build the import queue for the rococo parachain runtime. pub fn rococo_parachain_build_import_queue( - client: Arc>, - block_import: ParachainBlockImport, + client: Arc>, + block_import: ParachainBlockImport, config: &Configuration, telemetry: Option, task_manager: &TaskManager, @@ -928,11 +894,8 @@ pub async fn start_rococo_parachain_node( collator_options: CollatorOptions, para_id: ParaId, hwbench: Option, -) -> sc_service::error::Result<( - TaskManager, - Arc>, -)> { - start_node_impl::( +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( parachain_config, polkadot_config, collator_options, @@ -953,7 +916,8 @@ pub async fn start_rococo_parachain_node( para_id, collator_key, overseer_handle, - announce_block| { + announce_block, + backend| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( @@ -972,11 +936,15 @@ pub async fn start_rococo_parachain_node( client.clone(), ); - let params = BasicAuraParams { + let params = AuraParams { create_inherent_data_providers: move |_, ()| async move { Ok(()) }, block_import, - para_client: client, + para_client: client.clone(), + para_backend: backend.clone(), relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + }, sync_oracle, keystore, collator_key, @@ -986,12 +954,10 @@ pub async fn start_rococo_parachain_node( relay_chain_slot_duration, proposer, collator_service, - // Very limited proposal time. - authoring_duration: Duration::from_millis(500), - collation_request_receiver: None, + authoring_duration: Duration::from_millis(1500), }; - let fut = basic_aura::run::< + let fut = aura::run::< Block, sp_consensus_aura::sr25519::AuthorityPair, _, @@ -1001,6 +967,8 @@ pub async fn start_rococo_parachain_node( _, _, _, + _, + _, >(params); task_manager.spawn_essential_handle().spawn("aura", None, fut); @@ -1346,7 +1314,8 @@ where para_id, collator_key, overseer_handle, - announce_block| { + announce_block, + _backend| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( @@ -1441,7 +1410,8 @@ where para_id, collator_key, overseer_handle, - announce_block| { + announce_block, + _backend| { let relay_chain_interface2 = relay_chain_interface.clone(); let collator_service = CollatorService::new( @@ -1612,7 +1582,7 @@ where para_backend: backend.clone(), relay_client: relay_chain_interface, code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(ValidationCode).map(|c| c.hash()) + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) }, sync_oracle, keystore, @@ -1683,6 +1653,7 @@ where CollatorPair, OverseerHandle, Arc>) + Send + Sync>, + Arc, ) -> Result<(), sc_service::Error>, { let parachain_config = prepare_node_config(parachain_config); @@ -1815,6 +1786,7 @@ where collator_key.expect("Command line arguments do not allow this. qed"), overseer_handle, announce_block, + backend.clone(), )?; } @@ -1825,8 +1797,8 @@ where #[allow(clippy::type_complexity)] pub fn contracts_rococo_build_import_queue( - client: Arc>, - block_import: ParachainBlockImport, + client: Arc>, + block_import: ParachainBlockImport, config: &Configuration, telemetry: Option, task_manager: &TaskManager, @@ -1868,11 +1840,8 @@ pub async fn start_contracts_rococo_node( collator_options: CollatorOptions, para_id: ParaId, hwbench: Option, -) -> sc_service::error::Result<( - TaskManager, - Arc>, -)> { - start_contracts_rococo_node_impl::( +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_contracts_rococo_node_impl::( parachain_config, polkadot_config, collator_options, @@ -1893,7 +1862,8 @@ pub async fn start_contracts_rococo_node( para_id, collator_key, overseer_handle, - announce_block| { + announce_block, + _backend| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( diff --git a/cumulus/polkadot-parachain/tests/benchmark_storage_works.rs b/cumulus/polkadot-parachain/tests/benchmark_storage_works.rs index c2850b64e458adca9bb0918667cf912eb16796df..c554b5b3d6be5602d84aa33c5d89020af26be74b 100644 --- a/cumulus/polkadot-parachain/tests/benchmark_storage_works.rs +++ b/cumulus/polkadot-parachain/tests/benchmark_storage_works.rs @@ -24,7 +24,7 @@ use std::{ use tempfile::tempdir; /// The runtimes that this command supports. -static RUNTIMES: [&str; 3] = ["asset-hub-westend", "asset-hub-kusama", "asset-hub-polkadot"]; +static RUNTIMES: [&str; 1] = ["asset-hub-westend"]; /// The `benchmark storage` command works for the dev runtimes. #[test] diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 19607eb7c18c2335adf8cc461cc7dc1bb6f82331..6d917eea270ec2fac273e349ad4fa9521ecfe0fc 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -6,21 +6,24 @@ edition.workspace = true license = "Apache-2.0" description = "Core primitives for Aura in Cumulus" +[lints] +workspace = true + [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } # Substrate -sp-api = { path = "../../../substrate/primitives/api", default-features = false} -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } # Polkadot -polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false} -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false} +polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false } +polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "polkadot-core-primitives/std", diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 23839a10e46b3f28723f49396c442579ff159543..98c3e8ab5672e87f9d63407c058290739e065472 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -6,24 +6,27 @@ edition.workspace = true license = "Apache-2.0" description = "Cumulus related core primitive types and traits" +[lints] +workspace = true + [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -sp-api = { path = "../../../substrate/primitives/api", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false} +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } # Polkadot -polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false} -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false} -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false } +polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "polkadot-core-primitives/std", diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index 46b5da57f3837a0f840b516db7460f2112ec8494..f914af1175145cc556bc38bb66bcff7f0ab1536d 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -6,22 +6,25 @@ edition.workspace = true description = "Inherent that needs to be present in every parachain block. Contains messages and a relay chain storage-proof." license = "Apache-2.0" +[lints] +workspace = true + [dependencies] -async-trait = { version = "0.1.73", optional = true } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +async-trait = { version = "0.1.74", optional = true } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } tracing = { version = "0.1.37", optional = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api", optional = true} -sp-api = { path = "../../../substrate/primitives/api", optional = true} -sp-core = { path = "../../../substrate/primitives/core", default-features = false} -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", optional = true} -sp-state-machine = { path = "../../../substrate/primitives/state-machine", optional = true} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../substrate/primitives/storage", optional = true} -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false} +sc-client-api = { path = "../../../substrate/client/api", optional = true } +sp-api = { path = "../../../substrate/primitives/api", optional = true } +sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", optional = true } +sp-state-machine = { path = "../../../substrate/primitives/state-machine", optional = true } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../substrate/primitives/storage", optional = true } +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } # Cumulus cumulus-primitives-core = { path = "../core", default-features = false } @@ -29,7 +32,7 @@ cumulus-relay-chain-interface = { path = "../../client/relay-chain-interface", o cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "async-trait", "codec/std", diff --git a/cumulus/primitives/parachain-inherent/src/lib.rs b/cumulus/primitives/parachain-inherent/src/lib.rs index 08407023bb4604933ff4de97a6669e45b85d6bc5..f98c748e82fa057651b4ff2b9e0ed0f56184dba7 100644 --- a/cumulus/primitives/parachain-inherent/src/lib.rs +++ b/cumulus/primitives/parachain-inherent/src/lib.rs @@ -39,8 +39,6 @@ use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; #[cfg(feature = "std")] mod client_side; #[cfg(feature = "std")] -pub use client_side::*; -#[cfg(feature = "std")] mod mock; #[cfg(feature = "std")] pub use mock::{MockValidationDataInherentDataProvider, MockXcmConfig}; diff --git a/cumulus/primitives/parachain-inherent/src/mock.rs b/cumulus/primitives/parachain-inherent/src/mock.rs index 5168b46a14d03def1ccfe72c9244e71bfb202d84..e40cb49acddd1f7cd9d01e37c4f1c869a3043c6a 100644 --- a/cumulus/primitives/parachain-inherent/src/mock.rs +++ b/cumulus/primitives/parachain-inherent/src/mock.rs @@ -61,6 +61,8 @@ pub struct MockValidationDataInherentDataProvider { pub raw_downward_messages: Vec>, // Inbound Horizontal messages sorted by channel pub raw_horizontal_messages: Vec<(ParaId, Vec)>, + // Additional key-value pairs that should be injected. + pub additional_key_values: Option, Vec)>>, } pub trait GenerateRandomness { @@ -210,6 +212,10 @@ impl> InherentDataProvider sproof_builder.randomness = self.relay_randomness_config.generate_randomness(self.current_para_block.into()); + if let Some(key_values) = &self.additional_key_values { + sproof_builder.additional_key_values = key_values.clone() + } + let (relay_parent_storage_root, proof) = sproof_builder.into_state_root_and_proof(); inherent_data.put_data( diff --git a/cumulus/primitives/proof-size-hostfunction/Cargo.toml b/cumulus/primitives/proof-size-hostfunction/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..06797f86863265797f59d3f44504168f1549ecb5 --- /dev/null +++ b/cumulus/primitives/proof-size-hostfunction/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "cumulus-primitives-proof-size-hostfunction" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +description = "Hostfunction exposing storage proof size to the runtime." +license = "Apache-2.0" + +[lints] +workspace = true + +[dependencies] +sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } +sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } + +[dev-dependencies] +sp-state-machine = { path = "../../../substrate/primitives/state-machine" } +sp-core = { path = "../../../substrate/primitives/core" } +sp-io = { path = "../../../substrate/primitives/io" } + +[features] +default = ["std"] +std = ["sp-externalities/std", "sp-runtime-interface/std", "sp-trie/std"] diff --git a/cumulus/primitives/proof-size-hostfunction/src/lib.rs b/cumulus/primitives/proof-size-hostfunction/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..6da6235e585a343887f87931e375b21bec48c20d --- /dev/null +++ b/cumulus/primitives/proof-size-hostfunction/src/lib.rs @@ -0,0 +1,107 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Tools for reclaiming PoV weight in parachain runtimes. + +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_externalities::ExternalitiesExt; + +use sp_runtime_interface::runtime_interface; + +#[cfg(feature = "std")] +use sp_trie::proof_size_extension::ProofSizeExt; + +pub const PROOF_RECORDING_DISABLED: u64 = u64::MAX; + +/// Interface that provides access to the current storage proof size. +/// +/// Should return the current storage proof size if [`ProofSizeExt`] is registered. Otherwise, needs +/// to return u64::MAX. +#[runtime_interface] +pub trait StorageProofSize { + /// Returns the current storage proof size. + fn storage_proof_size(&mut self) -> u64 { + self.extension::().map_or(u64::MAX, |e| e.storage_proof_size()) + } +} + +#[cfg(test)] +mod tests { + use sp_core::Blake2Hasher; + use sp_state_machine::TestExternalities; + use sp_trie::{ + proof_size_extension::ProofSizeExt, recorder::Recorder, LayoutV1, PrefixedMemoryDB, + TrieDBMutBuilder, TrieMut, + }; + + use crate::{storage_proof_size, PROOF_RECORDING_DISABLED}; + + const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", &[1; 64]), (b"key2", &[2; 64])]; + + type TestLayout = LayoutV1; + + fn get_prepared_test_externalities() -> (TestExternalities, Recorder) + { + let mut db = PrefixedMemoryDB::default(); + let mut root = Default::default(); + + { + let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); + for (k, v) in TEST_DATA { + trie.insert(k, v).expect("Inserts data"); + } + } + + let recorder: sp_trie::recorder::Recorder = Default::default(); + let trie_backend = sp_state_machine::TrieBackendBuilder::new(db, root) + .with_recorder(recorder.clone()) + .build(); + + let mut ext: TestExternalities = TestExternalities::default(); + ext.backend = trie_backend; + (ext, recorder) + } + + #[test] + fn host_function_returns_size_from_recorder() { + let (mut ext, recorder) = get_prepared_test_externalities(); + ext.register_extension(ProofSizeExt::new(recorder)); + + ext.execute_with(|| { + assert_eq!(storage_proof_size::storage_proof_size(), 0); + sp_io::storage::get(b"key1"); + assert_eq!(storage_proof_size::storage_proof_size(), 175); + sp_io::storage::get(b"key2"); + assert_eq!(storage_proof_size::storage_proof_size(), 275); + sp_io::storage::get(b"key2"); + assert_eq!(storage_proof_size::storage_proof_size(), 275); + }); + } + + #[test] + fn host_function_returns_max_without_extension() { + let (mut ext, _) = get_prepared_test_externalities(); + + ext.execute_with(|| { + assert_eq!(storage_proof_size::storage_proof_size(), PROOF_RECORDING_DISABLED); + sp_io::storage::get(b"key1"); + assert_eq!(storage_proof_size::storage_proof_size(), PROOF_RECORDING_DISABLED); + sp_io::storage::get(b"key2"); + assert_eq!(storage_proof_size::storage_proof_size(), PROOF_RECORDING_DISABLED); + }); + } +} diff --git a/cumulus/primitives/timestamp/Cargo.toml b/cumulus/primitives/timestamp/Cargo.toml index a0fea51f8db1ff43682e1cb9093d5a1b2e1fd703..b07a907154dfab36a63f900852fc4d044ccee341 100644 --- a/cumulus/primitives/timestamp/Cargo.toml +++ b/cumulus/primitives/timestamp/Cargo.toml @@ -6,20 +6,23 @@ edition.workspace = true description = "Provides timestamp related functionality for parachains." license = "Apache-2.0" +[lints] +workspace = true + [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } futures = "0.3.28" # Substrate -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-timestamp = { path = "../../../substrate/primitives/timestamp", default-features = false} +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-timestamp = { path = "../../../substrate/primitives/timestamp", default-features = false } # Cumulus cumulus-primitives-core = { path = "../core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 45ce6701988682ed963d13e19f56b5f7c9b08e09..56b6b9284176ef13624ef3d45b2483a4861b246d 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -6,29 +6,32 @@ edition.workspace = true license = "Apache-2.0" description = "Helper datatypes for Cumulus" +[lints] +workspace = true + [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } log = { version = "0.4.20", default-features = false } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +frame-support = { path = "../../../substrate/frame/support", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } # Polkadot polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false } polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } pallet-xcm-benchmarks = { path = "../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false } # Cumulus cumulus-primitives-core = { path = "../core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/scripts/benchmarks.sh b/cumulus/scripts/benchmarks.sh index 29d0690592583512e7923f92b9d81d46913d04e9..58b8419bf4ae7ef1db1de785ed8ebac137063556 100755 --- a/cumulus/scripts/benchmarks.sh +++ b/cumulus/scripts/benchmarks.sh @@ -6,14 +6,7 @@ repeat=${3:-20} __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -${__dir}/benchmarks-ci.sh collectives collectives-polkadot target/$target $steps $repeat - -${__dir}/benchmarks-ci.sh assets asset-hub-kusama target/$target $steps $repeat -${__dir}/benchmarks-ci.sh assets asset-hub-polkadot target/$target $steps $repeat +${__dir}/benchmarks-ci.sh collectives collectives-westend target/$target $steps $repeat ${__dir}/benchmarks-ci.sh assets asset-hub-westend target/$target $steps $repeat - -${__dir}/benchmarks-ci.sh bridge-hubs bridge-hub-polkadot target/$target $steps $repeat -${__dir}/benchmarks-ci.sh bridge-hubs bridge-hub-kusama target/$target $steps $repeat ${__dir}/benchmarks-ci.sh bridge-hubs bridge-hub-rococo target/$target $steps $repeat - -${__dir}/benchmarks-ci.sh glutton glutton-kusama target/$target $steps $repeat +${__dir}/benchmarks-ci.sh glutton glutton-westend target/$target $steps $repeat diff --git a/cumulus/scripts/bridges_common.sh b/cumulus/scripts/bridges_common.sh index 8d64c5ede52a25eaa4f0332addf1b39969ac478d..97ef8aa1259535dde929108fb7d6797b238148b9 100755 --- a/cumulus/scripts/bridges_common.sh +++ b/cumulus/scripts/bridges_common.sh @@ -187,23 +187,25 @@ function open_hrmp_channels() { ${max_message_size} } -function set_storage() { +function force_xcm_version() { local relay_url=$1 local relay_chain_seed=$2 local runtime_para_id=$3 local runtime_para_endpoint=$4 - local items=$5 - echo " calling set_storage:" + local dest=$5 + local xcm_version=$6 + echo " calling force_xcm_version:" echo " relay_url: ${relay_url}" echo " relay_chain_seed: ${relay_chain_seed}" echo " runtime_para_id: ${runtime_para_id}" echo " runtime_para_endpoint: ${runtime_para_endpoint}" - echo " items: ${items}" + echo " dest: ${dest}" + echo " xcm_version: ${xcm_version}" echo " params:" - # 1. generate data for Transact (System::set_storage) + # 1. generate data for Transact (PolkadotXcm::force_xcm_version) local tmp_output_file=$(mktemp) - generate_hex_encoded_call_data "set-storage" "${runtime_para_endpoint}" "${tmp_output_file}" "$items" + generate_hex_encoded_call_data "force-xcm-version" "${runtime_para_endpoint}" "${tmp_output_file}" "$dest" "$xcm_version" local hex_encoded_data=$(cat $tmp_output_file) # 2. trigger governance call diff --git a/cumulus/scripts/bridges_rococo_westend.sh b/cumulus/scripts/bridges_rococo_westend.sh index ce8480685aada05fa6af606b6e6d74d4b906d17b..c52b72e51fc518a730e3c919cc7f2b73cf99a705 100755 --- a/cumulus/scripts/bridges_rococo_westend.sh +++ b/cumulus/scripts/bridges_rococo_westend.sh @@ -129,6 +129,7 @@ ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_ThisChain="5EHnXa ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_BridgedChain="5EHnXaT5BhiSGP5h9RgQci1txJ2BDbp7KBRE9k8xty3BMUSi" LANE_ID="00000002" +XCM_VERSION=3 function init_ro_wnd() { ensure_relayer @@ -170,8 +171,6 @@ function run_relay() { --bridge-hub-rococo-port 8943 \ --bridge-hub-rococo-version-mode Auto \ --bridge-hub-rococo-signer //Charlie \ - --westend-headers-to-bridge-hub-rococo-signer //Bob \ - --westend-parachains-to-bridge-hub-rococo-signer //Bob \ --bridge-hub-rococo-transactions-mortality 4 \ --westend-host localhost \ --westend-port 9945 \ @@ -180,8 +179,6 @@ function run_relay() { --bridge-hub-westend-port 8945 \ --bridge-hub-westend-version-mode Auto \ --bridge-hub-westend-signer //Charlie \ - --rococo-headers-to-bridge-hub-westend-signer //Bob \ - --rococo-parachains-to-bridge-hub-westend-signer //Bob \ --bridge-hub-westend-transactions-mortality 4 \ --lane "${LANE_ID}" } @@ -209,7 +206,7 @@ case "$1" in "ws://127.0.0.1:9910" \ "//Alice" \ "$GLOBAL_CONSENSUS_WESTEND_ASSET_HUB_WESTEND_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) + $((1000000000000 + 50000000000 * 20)) # HRMP open_hrmp_channels \ "ws://127.0.0.1:9942" \ @@ -219,6 +216,14 @@ case "$1" in "ws://127.0.0.1:9942" \ "//Alice" \ 1013 1000 4 524288 + # set XCM version of remote AssetHubWestend + force_xcm_version \ + "ws://127.0.0.1:9942" \ + "//Alice" \ + 1000 \ + "ws://127.0.0.1:9910" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } }')" \ + $XCM_VERSION ;; init-bridge-hub-rococo-local) ensure_polkadot_js_api @@ -227,19 +232,27 @@ case "$1" in "ws://127.0.0.1:8943" \ "//Alice" \ "$ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO" \ - $((1000000000 + 50000000000 * 20)) + $((1000000000000 + 50000000000 * 20)) # drip SA of lane dedicated to asset hub for paying rewards for delivery transfer_balance \ "ws://127.0.0.1:8943" \ "//Alice" \ "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_ThisChain" \ - $((1000000000 + 2000000000000)) + $((1000000000000 + 2000000000000)) # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation transfer_balance \ "ws://127.0.0.1:8943" \ "//Alice" \ "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_BridgedChain" \ - $((1000000000 + 2000000000000)) + $((1000000000000 + 2000000000000)) + # set XCM version of remote BridgeHubWestend + force_xcm_version \ + "ws://127.0.0.1:9942" \ + "//Alice" \ + 1013 \ + "ws://127.0.0.1:8943" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1002 } ] } }')" \ + $XCM_VERSION ;; init-asset-hub-westend-local) ensure_polkadot_js_api @@ -258,7 +271,7 @@ case "$1" in "ws://127.0.0.1:9010" \ "//Alice" \ "$GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) + $((1000000000000000 + 50000000000 * 20)) # HRMP open_hrmp_channels \ "ws://127.0.0.1:9945" \ @@ -268,6 +281,14 @@ case "$1" in "ws://127.0.0.1:9945" \ "//Alice" \ 1002 1000 4 524288 + # set XCM version of remote AssetHubRococo + force_xcm_version \ + "ws://127.0.0.1:9945" \ + "//Alice" \ + 1000 \ + "ws://127.0.0.1:9010" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } }')" \ + $XCM_VERSION ;; init-bridge-hub-westend-local) # SA of sibling asset hub pays for the execution @@ -275,19 +296,27 @@ case "$1" in "ws://127.0.0.1:8945" \ "//Alice" \ "$ASSET_HUB_WESTEND_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WESTEND" \ - $((1000000000 + 50000000000 * 20)) + $((1000000000000000 + 50000000000 * 20)) # drip SA of lane dedicated to asset hub for paying rewards for delivery transfer_balance \ "ws://127.0.0.1:8945" \ "//Alice" \ "$ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_ThisChain" \ - $((1000000000 + 2000000000000)) + $((1000000000000000 + 2000000000000)) # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation transfer_balance \ "ws://127.0.0.1:8945" \ "//Alice" \ "$ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_BridgedChain" \ - $((1000000000 + 2000000000000)) + $((1000000000000000 + 2000000000000)) + # set XCM version of remote BridgeHubRococo + force_xcm_version \ + "ws://127.0.0.1:9945" \ + "//Alice" \ + 1002 \ + "ws://127.0.0.1:8945" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1013 } ] } }')" \ + $XCM_VERSION ;; reserve-transfer-assets-from-asset-hub-rococo-local) ensure_polkadot_js_api @@ -297,19 +326,43 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 200000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 5000000000000 } } ] }')" \ + 0 \ + "Unlimited" + ;; + withdraw-reserve-assets-from-asset-hub-rococo-local) + ensure_polkadot_js_api + # send back only 100000000000 wrappedWNDs to Alice account on AHW + limited_reserve_transfer_assets \ + "ws://127.0.0.1:9910" \ + "//Alice" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": 3000000000000 } } ] }')" \ 0 \ "Unlimited" ;; reserve-transfer-assets-from-asset-hub-westend-local) ensure_polkadot_js_api - # send WOCs to Alice account on AHR + # send WNDs to Alice account on AHR + limited_reserve_transfer_assets \ + "ws://127.0.0.1:9010" \ + "//Alice" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 5000000000000 } } ] }')" \ + 0 \ + "Unlimited" + ;; + withdraw-reserve-assets-from-asset-hub-westend-local) + ensure_polkadot_js_api + # send back only 100000000000 wrappedROCs to Alice account on AHR limited_reserve_transfer_assets \ "ws://127.0.0.1:9010" \ "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 150000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": 3000000000000 } } ] }')" \ 0 \ "Unlimited" ;; @@ -360,7 +413,9 @@ case "$1" in - init-asset-hub-westend-local - init-bridge-hub-westend-local - reserve-transfer-assets-from-asset-hub-rococo-local + - withdraw-reserve-assets-from-asset-hub-rococo-local - reserve-transfer-assets-from-asset-hub-westend-local + - withdraw-reserve-assets-from-asset-hub-westend-local - claim-rewards-bridge-hub-rococo-local - claim-rewards-bridge-hub-westend-local"; exit 1 diff --git a/cumulus/scripts/bridges_rococo_wococo.sh b/cumulus/scripts/bridges_rococo_wococo.sh deleted file mode 100755 index dd7c7062a3b39684f46be376f20a520a0585b9fb..0000000000000000000000000000000000000000 --- a/cumulus/scripts/bridges_rococo_wococo.sh +++ /dev/null @@ -1,386 +0,0 @@ -#!/bin/bash - -# import common functions -source "$(dirname "$0")"/bridges_common.sh - -# Expected sovereign accounts. -# -# Generated by: -# -# #[test] -# fn generate_sovereign_accounts() { -# use sp_core::crypto::Ss58Codec; -# use polkadot_parachain_primitives::primitives::Sibling; -# -# parameter_types! { -# pub UniversalLocationAHR: InteriorMultiLocation = X2(GlobalConsensus(Rococo), Parachain(1000)); -# pub UniversalLocationAHW: InteriorMultiLocation = X2(GlobalConsensus(Wococo), Parachain(1000)); -# } -# -# // SS58=42 -# println!("GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X1(GlobalConsensus(Rococo)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusParachainConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X2(GlobalConsensus(Rococo), Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("ASSET_HUB_WOCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WOCOCO=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# SiblingParachainConvertsVia::::convert_location( -# &MultiLocation { parents: 1, interior: X1(Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# -# // SS58=42 -# println!("GLOBAL_CONSENSUS_WOCOCO_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X1(GlobalConsensus(Wococo)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("GLOBAL_CONSENSUS_WOCOCO_ASSET_HUB_WOCOCO_1000_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusParachainConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X2(GlobalConsensus(Wococo), Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# SiblingParachainConvertsVia::::convert_location( -# &MultiLocation { parents: 1, interior: X1(Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# } -GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT="5GxRGwT8bU1JeBPTUXc7LEjZMxNrK8MyL2NJnkWFQJTQ4sii" -GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT="5CfNu7eH3SJvqqPt3aJh38T8dcFvhGzEohp9tsd41ANhXDnQ" -ASSET_HUB_WOCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WOCOCO="5Eg2fntNprdN3FgH4sfEaaZhYtddZQSQUqvYJ1f2mLtinVhV" -GLOBAL_CONSENSUS_WOCOCO_SOVEREIGN_ACCOUNT="5EWw2NzfPr2DCahourp33cya6bGWEJViTnJN6Z2ruFevpJML" -GLOBAL_CONSENSUS_WOCOCO_ASSET_HUB_WOCOCO_1000_SOVEREIGN_ACCOUNT="5EJX8L4dwGyYnCsjZ91LfWAsm3rCN8vY2AYvT4mauMEjsrQz" -ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO="5Eg2fntNprdN3FgH4sfEaaZhYtddZQSQUqvYJ1f2mLtinVhV" - -# Expected sovereign accounts for rewards on BridgeHubs. -# -# Generated by: -#[test] -#fn generate_sovereign_accounts_for_rewards() { -# use bp_messages::LaneId; -# use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; -# use sp_core::crypto::Ss58Codec; -# -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_ThisChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 1]), -# *b"bhwo", -# RewardsAccountOwner::ThisChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_BridgedChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 1]), -# *b"bhwo", -# RewardsAccountOwner::BridgedChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -# -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_ThisChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 1]), -# *b"bhro", -# RewardsAccountOwner::ThisChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_BridgedChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 1]), -# *b"bhro", -# RewardsAccountOwner::BridgedChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -#} -ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_ThisChain="5EHnXaT5BhiS8YRPMeHi97YHofTtNx4pLNb8wR8TwjVq1gzU" -ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_BridgedChain="5EHnXaT5BhiS8YRPMeHyt95svA95qWAh53XeVMpJQZNZHAzj" -ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_ThisChain="5EHnXaT5BhiS8YRNuCukWXTQdAqARjjXmpjehjx1YZNE5keZ" -ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_BridgedChain="5EHnXaT5BhiS8YRNuCv2FYzzjfWMtHqQWVgAFgdr1PExMN94" - -LANE_ID="00000001" - -function init_ro_wo() { - ensure_relayer - - RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay init-bridge rococo-to-bridge-hub-wococo \ - --source-host localhost \ - --source-port 9942 \ - --source-version-mode Auto \ - --target-host localhost \ - --target-port 8945 \ - --target-version-mode Auto \ - --target-signer //Bob -} - -function init_wo_ro() { - ensure_relayer - - RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay init-bridge wococo-to-bridge-hub-rococo \ - --source-host localhost \ - --source-port 9945 \ - --source-version-mode Auto \ - --target-host localhost \ - --target-port 8943 \ - --target-version-mode Auto \ - --target-signer //Bob -} - -function run_relay() { - ensure_relayer - - RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay relay-headers-and-messages bridge-hub-rococo-bridge-hub-wococo \ - --rococo-host localhost \ - --rococo-port 9942 \ - --rococo-version-mode Auto \ - --bridge-hub-rococo-host localhost \ - --bridge-hub-rococo-port 8943 \ - --bridge-hub-rococo-version-mode Auto \ - --bridge-hub-rococo-signer //Charlie \ - --wococo-headers-to-bridge-hub-rococo-signer //Bob \ - --wococo-parachains-to-bridge-hub-rococo-signer //Bob \ - --bridge-hub-rococo-transactions-mortality 4 \ - --wococo-host localhost \ - --wococo-port 9945 \ - --wococo-version-mode Auto \ - --bridge-hub-wococo-host localhost \ - --bridge-hub-wococo-port 8945 \ - --bridge-hub-wococo-version-mode Auto \ - --bridge-hub-wococo-signer //Charlie \ - --rococo-headers-to-bridge-hub-wococo-signer //Bob \ - --rococo-parachains-to-bridge-hub-wococo-signer //Bob \ - --bridge-hub-wococo-transactions-mortality 4 \ - --lane "${LANE_ID}" -} - -case "$1" in - run-relay) - init_ro_wo - init_wo_ro - run_relay - ;; - init-asset-hub-rococo-local) - ensure_polkadot_js_api - # create foreign assets for native Wococo token (governance call on Rococo) - force_create_foreign_asset \ - "ws://127.0.0.1:9942" \ - "//Alice" \ - 1000 \ - "ws://127.0.0.1:9910" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X1": { "GlobalConsensus": "Wococo" } } }')" \ - "$GLOBAL_CONSENSUS_WOCOCO_SOVEREIGN_ACCOUNT" \ - 10000000000 \ - true - # drip SA which holds reserves - transfer_balance \ - "ws://127.0.0.1:9910" \ - "//Alice" \ - "$GLOBAL_CONSENSUS_WOCOCO_ASSET_HUB_WOCOCO_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) - # HRMP - open_hrmp_channels \ - "ws://127.0.0.1:9942" \ - "//Alice" \ - 1000 1013 4 524288 - open_hrmp_channels \ - "ws://127.0.0.1:9942" \ - "//Alice" \ - 1013 1000 4 524288 - ;; - init-bridge-hub-rococo-local) - ensure_polkadot_js_api - # SA of sibling asset hub pays for the execution - transfer_balance \ - "ws://127.0.0.1:8943" \ - "//Alice" \ - "$ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO" \ - $((1000000000 + 50000000000 * 20)) - # drip SA of lane dedicated to asset hub for paying rewards for delivery - transfer_balance \ - "ws://127.0.0.1:8943" \ - "//Alice" \ - "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_ThisChain" \ - $((1000000000 + 2000000000000)) - # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation - transfer_balance \ - "ws://127.0.0.1:8943" \ - "//Alice" \ - "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_BridgedChain" \ - $((1000000000 + 2000000000000)) - ;; - init-asset-hub-wococo-local) - ensure_polkadot_js_api - # set Wococo flavor - set_storage with: - # - `key` is `HexDisplay::from(&asset_hub_rococo_runtime::xcm_config::Flavor::key())` - # - `value` is `HexDisplay::from(&asset_hub_rococo_runtime::RuntimeFlavor::Wococo.encode())` - set_storage \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1000 \ - "ws://127.0.0.1:9010" \ - "$(jq --null-input '[["0x48297505634037ef48c848c99c0b1f1b", "0x01"]]')" - # create foreign assets for native Rococo token (governance call on Wococo) - force_create_foreign_asset \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1000 \ - "ws://127.0.0.1:9010" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } }')" \ - "$GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT" \ - 10000000000 \ - true - # drip SA which holds reserves - transfer_balance \ - "ws://127.0.0.1:9010" \ - "//Alice" \ - "$GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) - # HRMP - open_hrmp_channels \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1000 1014 4 524288 - open_hrmp_channels \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1014 1000 4 524288 - ;; - init-bridge-hub-wococo-local) - # set Wococo flavor - set_storage with: - # - `key` is `HexDisplay::from(&bridge_hub_rococo_runtime::xcm_config::Flavor::key())` - # - `value` is `HexDisplay::from(&bridge_hub_rococo_runtime::RuntimeFlavor::Wococo.encode())` - set_storage \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1014 \ - "ws://127.0.0.1:8945" \ - "$(jq --null-input '[["0x48297505634037ef48c848c99c0b1f1b", "0x01"]]')" - # SA of sibling asset hub pays for the execution - transfer_balance \ - "ws://127.0.0.1:8945" \ - "//Alice" \ - "$ASSET_HUB_WOCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WOCOCO" \ - $((1000000000 + 50000000000 * 20)) - # drip SA of lane dedicated to asset hub for paying rewards for delivery - transfer_balance \ - "ws://127.0.0.1:8945" \ - "//Alice" \ - "$ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_ThisChain" \ - $((1000000000 + 2000000000000)) - # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation - transfer_balance \ - "ws://127.0.0.1:8945" \ - "//Alice" \ - "$ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_BridgedChain" \ - $((1000000000 + 2000000000000)) - ;; - reserve-transfer-assets-from-asset-hub-rococo-local) - ensure_polkadot_js_api - # send ROCs to Alice account on AHW - limited_reserve_transfer_assets \ - "ws://127.0.0.1:9910" \ - "//Alice" \ - "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Wococo" }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 200000000000 } } ] }')" \ - 0 \ - "Unlimited" - ;; - reserve-transfer-assets-from-asset-hub-wococo-local) - ensure_polkadot_js_api - # send WOCs to Alice account on AHR - limited_reserve_transfer_assets \ - "ws://127.0.0.1:9010" \ - "//Alice" \ - "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 150000000000 } } ] }')" \ - 0 \ - "Unlimited" - ;; - claim-rewards-bridge-hub-rococo-local) - ensure_polkadot_js_api - # bhwo -> [62, 68, 77, 6f] -> 0x6268776f - claim_rewards \ - "ws://127.0.0.1:8943" \ - "//Charlie" \ - "0x${LANE_ID}" \ - "0x6268776f" \ - "ThisChain" - claim_rewards \ - "ws://127.0.0.1:8943" \ - "//Charlie" \ - "0x${LANE_ID}" \ - "0x6268776f" \ - "BridgedChain" - ;; - claim-rewards-bridge-hub-wococo-local) - # bhro -> [62, 68, 72, 6f] -> 0x6268726f - claim_rewards \ - "ws://127.0.0.1:8945" \ - "//Charlie" \ - "0x${LANE_ID}" \ - "0x6268726f" \ - "ThisChain" - claim_rewards \ - "ws://127.0.0.1:8945" \ - "//Charlie" \ - "0x${LANE_ID}" \ - "0x6268726f" \ - "BridgedChain" - ;; - stop) - pkill -f polkadot - pkill -f parachain - ;; - import) - # to avoid trigger anything here - ;; - *) - echo "A command is require. Supported commands for: - Local (zombienet) run: - - run-relay - - init-asset-hub-rococo-local - - init-bridge-hub-rococo-local - - init-asset-hub-wococo-local - - init-bridge-hub-wococo-local - - reserve-transfer-assets-from-asset-hub-rococo-local - - reserve-transfer-assets-from-asset-hub-wococo-local - - claim-rewards-bridge-hub-rococo-local - - claim-rewards-bridge-hub-wococo-local"; - exit 1 - ;; -esac diff --git a/cumulus/scripts/create_bridge_hub_kusama_spec.sh b/cumulus/scripts/create_bridge_hub_kusama_spec.sh deleted file mode 100755 index 813921b079a8c64f8b12646570ffca587b0fa718..0000000000000000000000000000000000000000 --- a/cumulus/scripts/create_bridge_hub_kusama_spec.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env bash - -usage() { - echo Usage: - echo "$1 " - echo "$2 " - echo "e.g.: ./scripts/create_bridge_hub_kusama_spec.sh ./target/release/wbuild/bridge-hub-kusama-runtime/bridge_hub_kusama_runtime.compact.compressed.wasm 1002" - exit 1 -} - -if [ -z "$1" ]; then - usage -fi - -if [ -z "$2" ]; then - usage -fi - -set -e - -rt_path=$1 -para_id=$2 - -echo "Generating chain spec for runtime: $rt_path and para_id: $para_id" - -binary="./target/release/polkadot-parachain" - -# build the chain spec we'll manipulate -$binary build-spec --chain bridge-hub-kusama-dev > chain-spec-plain.json - -# convert runtime to hex -cat $rt_path | od -A n -v -t x1 | tr -d ' \n' > rt-hex.txt - -# replace the runtime in the spec with the given runtime and set some values to production -cat chain-spec-plain.json | jq --rawfile code rt-hex.txt '.genesis.runtime.system.code = ("0x" + $code)' \ - | jq '.name = "Kusama BridgeHub"' \ - | jq '.id = "bridge-hub-kusama"' \ - | jq '.chainType = "Live"' \ - | jq '.bootNodes = [ - "/dns/kusama-bridge-hub-collator-ew1-0.polkadot.io/tcp/30334/p2p/12D3KooWP2Gngt4tt2sz5BgDaAbMTxasPWk3V2Z99bQTmFcAorqa", - "/dns/kusama-bridge-hub-collator-ew1-1.polkadot.io/tcp/30334/p2p/12D3KooWMmL3FQuYmruBui1sbY4MwNmvicinrePi1Yq4QMRSYHoR", - "/dns/kusama-bridge-hub-collator-ue4-0.polkadot.io/tcp/30334/p2p/12D3KooWQpTocTck1tNBzMNTHJ3kSv4vzv8Yf9FpVkfGnungbez4", - "/dns/kusama-bridge-hub-collator-ue4-1.polkadot.io/tcp/30334/p2p/12D3KooWRgtJqKEaMi7hkU4VMiGhpHTJeL8N7JgL7d9gwooPv4eW", - - "/dns/kusama-bridge-hub-connect-ew1-0.polkadot.io/tcp/30334/p2p/12D3KooWPQQPivrqQ51kRTDc2R1mtqwKT4GGtk2rapkY4FrwHrEp", - "/dns/kusama-bridge-hub-connect-ew1-1.polkadot.io/tcp/30334/p2p/12D3KooWPcF9Yk4gYrMju9CyWCV69hAFXbYsnxCLogwLGu9QFTRn", - "/dns/kusama-bridge-hub-connect-ue4-0.polkadot.io/tcp/30334/p2p/12D3KooWMf1sVnJDTkKWtaThqvrgcSPLbfGXttSqbwhM2DJp9BUG", - "/dns/kusama-bridge-hub-connect-ue4-1.polkadot.io/tcp/30334/p2p/12D3KooWQaV7wMfNVKy2aMz4Lds3TTxgSDyZAUEnbAZMfD8rW3ow", - - "/dns/kusama-bridge-hub-connect-ew1-0.polkadot.io/tcp/443/wss/p2p/12D3KooWPQQPivrqQ51kRTDc2R1mtqwKT4GGtk2rapkY4FrwHrEp", - "/dns/kusama-bridge-hub-connect-ew1-1.polkadot.io/tcp/443/wss/p2p/12D3KooWPcF9Yk4gYrMju9CyWCV69hAFXbYsnxCLogwLGu9QFTRn", - "/dns/kusama-bridge-hub-connect-ue4-0.polkadot.io/tcp/443/wss/p2p/12D3KooWMf1sVnJDTkKWtaThqvrgcSPLbfGXttSqbwhM2DJp9BUG", - "/dns/kusama-bridge-hub-connect-ue4-1.polkadot.io/tcp/443/wss/p2p/12D3KooWQaV7wMfNVKy2aMz4Lds3TTxgSDyZAUEnbAZMfD8rW3ow" - - ]' \ - | jq '.relay_chain = "kusama"' \ - | jq --argjson para_id $para_id '.para_id = $para_id' \ - | jq --argjson para_id $para_id '.genesis.runtime.parachainInfo.parachainId = $para_id' \ - | jq '.genesis.runtime.balances.balances = []' \ - | jq '.genesis.runtime.collatorSelection.invulnerables = [ - "DQkekNBt8g6D7bPUEqhgfujADxzzfivr1qQZJkeGzAqnEzF", - "HbUc5qrLtKAZvasioiTSf1CunaN2SyEwvfsgMuYQjXA5sfk", - "JEe4NcVyuWFEwZe4WLfRtynDswyKgvLS8H8r4Wo9d3t61g1", - "FAe4DGhQHKTm35n5MgBFNBZvyEJcm7QAwgnVNQU8KXP2ixn" - ]' \ - | jq '.genesis.runtime.session.keys = [ - [ - "DQkekNBt8g6D7bPUEqhgfujADxzzfivr1qQZJkeGzAqnEzF", - "DQkekNBt8g6D7bPUEqhgfujADxzzfivr1qQZJkeGzAqnEzF", - { - "aura": "5E7AiV9ygGUcfdK3XVoJsew7fsu18uvKQHYhksE5PXDNfRL9" - } - ], - [ - "HbUc5qrLtKAZvasioiTSf1CunaN2SyEwvfsgMuYQjXA5sfk", - "HbUc5qrLtKAZvasioiTSf1CunaN2SyEwvfsgMuYQjXA5sfk", - { - "aura": "5CyXoMh8cA2MSk55JASpCfhCg44iSG5fBwmhvSfXUUS3uhPR" - } - ], - [ - "JEe4NcVyuWFEwZe4WLfRtynDswyKgvLS8H8r4Wo9d3t61g1", - "JEe4NcVyuWFEwZe4WLfRtynDswyKgvLS8H8r4Wo9d3t61g1", - { - "aura": "5Grj5pN52kKU61qK9qP5cf9ADuyowe2WVvYWxMNK1QqAM8qf" - } - ], - [ - "FAe4DGhQHKTm35n5MgBFNBZvyEJcm7QAwgnVNQU8KXP2ixn", - "FAe4DGhQHKTm35n5MgBFNBZvyEJcm7QAwgnVNQU8KXP2ixn", - { - "aura": "5EHTyftGjcHfe71VVuZqCeLbHNf4ptYzgdAMMyqpTNbs5Rrp" - } - ] - ]' \ - > edited-chain-spec-plain.json - -# build a raw spec -$binary build-spec --chain edited-chain-spec-plain.json --raw > chain-spec-raw.json -cp edited-chain-spec-plain.json bridge-hub-kusama-spec.json -cp chain-spec-raw.json ./parachains/chain-specs/bridge-hub-kusama.json -cp chain-spec-raw.json bridge-hub-kusama-spec-raw.json - -# build genesis data -$binary export-genesis-state --chain chain-spec-raw.json > bridge-hub-kusama-genesis-head-data - -# build genesis wasm -$binary export-genesis-wasm --chain chain-spec-raw.json > bridge-hub-kusama-wasm diff --git a/cumulus/scripts/create_bridge_hub_polkadot_spec.sh b/cumulus/scripts/create_bridge_hub_polkadot_spec.sh deleted file mode 100755 index 49bc9cee692bc077cdef4d61991e33c2b3db5d9c..0000000000000000000000000000000000000000 --- a/cumulus/scripts/create_bridge_hub_polkadot_spec.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env bash - -usage() { - echo Usage: - echo "$1 " - echo "$2 " - echo "e.g.: ./scripts/create_bridge_hub_polkadot_spec.sh ./target/release/wbuild/bridge-hub-polkadot-runtime/bridge_hub_polkadot_runtime.compact.compressed.wasm 1002" - exit 1 -} - -if [ -z "$1" ]; then - usage -fi - -if [ -z "$2" ]; then - usage -fi - -set -e - -rt_path=$1 -para_id=$2 - -echo "Generating chain spec for runtime: $rt_path and para_id: $para_id" - -binary="./target/release/polkadot-parachain" - -# build the chain spec we'll manipulate -$binary build-spec --chain bridge-hub-polkadot-dev > chain-spec-plain.json - -# convert runtime to hex -cat $rt_path | od -A n -v -t x1 | tr -d ' \n' > rt-hex.txt - -# replace the runtime in the spec with the given runtime and set some values to production -cat chain-spec-plain.json | jq --rawfile code rt-hex.txt '.genesis.runtime.system.code = ("0x" + $code)' \ - | jq '.name = "Polkadot BridgeHub"' \ - | jq '.id = "bridge-hub-polkadot"' \ - | jq '.chainType = "Live"' \ - | jq '.bootNodes = [ - "/dns/polkadot-bridge-hub-connect-a-0.polkadot.io/tcp/30334/p2p/12D3KooWAVQMhkXmc5ueSYasdsRWQbKus2YGZ6HDZUB4ViJMCxXy", - "/dns/polkadot-bridge-hub-connect-a-1.polkadot.io/tcp/30334/p2p/12D3KooWG4ypDHLKGCv4BZ6PuaGUwQHKAH6p2D6arR2uQ1eiR1T3", - "/dns/polkadot-bridge-hub-connect-b-0.polkadot.io/tcp/30334/p2p/12D3KooWCwGKxjpJXnx1mwXKvaxGQm769EM3b6Pg5vbU33wbhsNw", - "/dns/polkadot-bridge-hub-connect-b-1.polkadot.io/tcp/30334/p2p/12D3KooWLiSEdhriJUPdZKFtAjZrQncxN2ssEoDKVrt5mGM4Qu4J", - - "/dns/polkadot-bridge-hub-connect-a-0.polkadot.io/tcp/443/wss/p2p/12D3KooWAVQMhkXmc5ueSYasdsRWQbKus2YGZ6HDZUB4ViJMCxXy", - "/dns/polkadot-bridge-hub-connect-a-1.polkadot.io/tcp/443/wss/p2p/12D3KooWG4ypDHLKGCv4BZ6PuaGUwQHKAH6p2D6arR2uQ1eiR1T3", - "/dns/polkadot-bridge-hub-connect-b-0.polkadot.io/tcp/443/wss/p2p/12D3KooWCwGKxjpJXnx1mwXKvaxGQm769EM3b6Pg5vbU33wbhsNw", - "/dns/polkadot-bridge-hub-connect-b-1.polkadot.io/tcp/443/wss/p2p/12D3KooWLiSEdhriJUPdZKFtAjZrQncxN2ssEoDKVrt5mGM4Qu4J" - ]' \ - | jq '.relay_chain = "polkadot"' \ - | jq --argjson para_id $para_id '.para_id = $para_id' \ - | jq --argjson para_id $para_id '.genesis.runtime.parachainInfo.parachainId = $para_id' \ - | jq '.genesis.runtime.balances.balances = []' \ - | jq '.genesis.runtime.collatorSelection.invulnerables = [ - "134AK3RiMA97Fx9dLj1CvuLJUa8Yo93EeLA1TkP6CCGnWMSd", - "15dU8Tt7kde2diuHzijGbKGPU5K8BPzrFJfYFozvrS1DdE21", - "1vXMKM8SctM28AQw1wSpd7p9yCUWn1uhbbKSVTuznsw8Q2x", - "15mCQcaj3QP1UdxBF82JRd9v3riZJcVNVEmx8xkFp7DSYR4Y" - ]' \ - | jq '.genesis.runtime.session.keys = [ - [ - "134AK3RiMA97Fx9dLj1CvuLJUa8Yo93EeLA1TkP6CCGnWMSd", - "134AK3RiMA97Fx9dLj1CvuLJUa8Yo93EeLA1TkP6CCGnWMSd", - { - "aura": "5EX6AnyuSPEFQ7HAPjRgzqk1sxgh8cyacGimwJ16y1nJ2w7g" - } - ], - [ - "15dU8Tt7kde2diuHzijGbKGPU5K8BPzrFJfYFozvrS1DdE21", - "15dU8Tt7kde2diuHzijGbKGPU5K8BPzrFJfYFozvrS1DdE21", - { - "aura": "5DZN8UhaJftvKhMMARmJBwrwzuEDpoUzzBvvWMbFXYsJ4CmK" - } - ], - [ - "1vXMKM8SctM28AQw1wSpd7p9yCUWn1uhbbKSVTuznsw8Q2x", - "1vXMKM8SctM28AQw1wSpd7p9yCUWn1uhbbKSVTuznsw8Q2x", - { - "aura": "5FKsn83rXQQiw7HwoeYoLMoYS5GP9YVNHZiCHwA4DSwDcPVa" - } - ], - [ - "15mCQcaj3QP1UdxBF82JRd9v3riZJcVNVEmx8xkFp7DSYR4Y", - "15mCQcaj3QP1UdxBF82JRd9v3riZJcVNVEmx8xkFp7DSYR4Y", - { - "aura": "5DCg19ckcJz4m52Th4o1LcSRK3H7NsUcQsRbu7pTDM3mZ26v" - } - ] - ]' \ - > edited-chain-spec-plain.json - -# build a raw spec -$binary build-spec --chain edited-chain-spec-plain.json --raw > chain-spec-raw.json -cp edited-chain-spec-plain.json bridge-hub-polkadot-spec.json -cp chain-spec-raw.json ./parachains/chain-specs/bridge-hub-polkadot.json -cp chain-spec-raw.json bridge-hub-polkadot-spec-raw.json - -# build genesis data -$binary export-genesis-state --chain chain-spec-raw.json > bridge-hub-polkadot-genesis-head-data - -# build genesis wasm -$binary export-genesis-wasm --chain chain-spec-raw.json > bridge-hub-polkadot-wasm - -# cleanup -rm -f rt-hex.txt -rm -f chain-spec-plain.json -rm -f chain-spec-raw.json -rm -f edited-chain-spec-plain.json diff --git a/cumulus/scripts/create_coretime_rococo_spec.sh b/cumulus/scripts/create_coretime_rococo_spec.sh new file mode 100755 index 0000000000000000000000000000000000000000..877e8ee36c7d9b8a3c2b380e2796571018ef2179 --- /dev/null +++ b/cumulus/scripts/create_coretime_rococo_spec.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +usage() { + echo Usage: + echo "$1 " + echo "$2 " + echo "e.g.: ./cumulus/scripts/create_coretime_rococo_spec.sh ./target/release/wbuild/coretime-rococo-runtime/coretime_rococo_runtime.compact.compressed.wasm 1005" + exit 1 +} + +if [ -z "$1" ]; then + usage +fi + +if [ -z "$2" ]; then + usage +fi + +set -e + +rt_path=$1 +para_id=$2 + +echo "Generating chain spec for runtime: $rt_path and para_id: $para_id" + +binary="./target/release/polkadot-parachain" + +# build the chain spec we'll manipulate +$binary build-spec --chain coretime-rococo-dev > chain-spec-plain.json + +# convert runtime to hex +cat $rt_path | od -A n -v -t x1 | tr -d ' \n' > rt-hex.txt + +# replace the runtime in the spec with the given runtime and set some values to production +# Related issue for bootNodes, invulnerables, and session keys: https://github.com/paritytech/devops/issues/2725 +cat chain-spec-plain.json | jq --rawfile code rt-hex.txt '.genesis.runtimeGenesis.code = ("0x" + $code)' \ + | jq '.name = "Rococo Coretime"' \ + | jq '.id = "coretime-rococo"' \ + | jq '.chainType = "Live"' \ + | jq '.bootNodes = [ + "/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/30333/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy", + "/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30333/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX" + ]' \ + | jq '.relay_chain = "rococo"' \ + | jq --argjson para_id $para_id '.para_id = $para_id' \ + | jq --argjson para_id $para_id '.genesis.runtimeGenesis.patch.parachainInfo.parachainId = $para_id' \ + | jq '.genesis.runtimeGenesis.patch.balances.balances = []' \ + | jq '.genesis.runtimeGenesis.patch.collatorSelection.invulnerables = [ + "5G6Zua7Sowmt6ziddwUyueQs7HXDUVvDLaqqJDXXFyKvQ6Y6", + "5C8aSedh7ShpWEPW8aTNEErbKkMbiibdwP8cRzVRNqLmzAWF" + ]' \ + | jq '.genesis.runtimeGenesis.patch.session.keys = [ + [ + "5G6Zua7Sowmt6ziddwUyueQs7HXDUVvDLaqqJDXXFyKvQ6Y6", + "5G6Zua7Sowmt6ziddwUyueQs7HXDUVvDLaqqJDXXFyKvQ6Y6", + { + "aura": "5G6Zua7Sowmt6ziddwUyueQs7HXDUVvDLaqqJDXXFyKvQ6Y6" + } + ], + [ + "5C8aSedh7ShpWEPW8aTNEErbKkMbiibdwP8cRzVRNqLmzAWF", + "5C8aSedh7ShpWEPW8aTNEErbKkMbiibdwP8cRzVRNqLmzAWF", + { + "aura": "5C8aSedh7ShpWEPW8aTNEErbKkMbiibdwP8cRzVRNqLmzAWF" + } + ] + ]' \ + > edited-chain-spec-plain.json + +# build a raw spec +$binary build-spec --chain edited-chain-spec-plain.json --raw > chain-spec-raw.json +cp edited-chain-spec-plain.json coretime-rococo-spec.json +cp chain-spec-raw.json ./cumulus/parachains/chain-specs/coretime-rococo.json +cp chain-spec-raw.json coretime-rococo-spec-raw.json + +# build genesis data +$binary export-genesis-state --chain chain-spec-raw.json > coretime-rococo-genesis-head-data + +# build genesis wasm +$binary export-genesis-wasm --chain chain-spec-raw.json > coretime-rococo-wasm + +# cleanup +rm -f rt-hex.txt +rm -f chain-spec-plain.json +rm -f chain-spec-raw.json +rm -f edited-chain-spec-plain.json diff --git a/cumulus/scripts/create_coretime_westend_spec.sh b/cumulus/scripts/create_coretime_westend_spec.sh new file mode 100755 index 0000000000000000000000000000000000000000..90996f4a74f47f9783a29ff2ce358920be810641 --- /dev/null +++ b/cumulus/scripts/create_coretime_westend_spec.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash + +usage() { + echo Usage: + echo "$1 " + echo "$2 " + echo "e.g.: ./cumulus/scripts/create_coretime_westend_spec.sh ./target/release/wbuild/coretime-westend-runtime/coretime_westend_runtime.compact.compressed.wasm 1005" + exit 1 +} + +if [ -z "$1" ]; then + usage +fi + +if [ -z "$2" ]; then + usage +fi + +set -e + +rt_path=$1 +para_id=$2 + +echo "Generating chain spec for runtime: $rt_path and para_id: $para_id" + +binary="./target/release/polkadot-parachain" + +# build the chain spec we'll manipulate +$binary build-spec --chain coretime-westend-dev > chain-spec-plain.json + +# convert runtime to hex +cat $rt_path | od -A n -v -t x1 | tr -d ' \n' > rt-hex.txt + +# replace the runtime in the spec with the given runtime and set some values to production +# Related issue for bootNodes, invulnerables, and session keys: https://github.com/paritytech/devops/issues/2725 +cat chain-spec-plain.json | jq --rawfile code rt-hex.txt '.genesis.runtimeGenesis.code = ("0x" + $code)' \ + | jq '.name = "Westend Coretime"' \ + | jq '.id = "coretime-westend"' \ + | jq '.chainType = "Live"' \ + | jq '.bootNodes = [ + "/dns/westend-coretime-collator-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", + "/dns/westend-coretime-collator-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", + "/dns/westend-coretime-collator-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWAys2hVpF7AN8hYGnu1T6XYFRGKeBFqD8q5LUcvWXRLg8", + "/dns/westend-coretime-collator-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWSGgmiRryoi7A3qAmeYWgmVeGQkk66PrhDjJ6ZPP555as", + "/dns/westend-coretime-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", + "/dns/westend-coretime-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", + "/dns/westend-coretime-connect-2.polkadot.io/tcp/443/wss/p2p/12D3KooWAys2hVpF7AN8hYGnu1T6XYFRGKeBFqD8q5LUcvWXRLg8", + "/dns/westend-coretime-connect-3.polkadot.io/tcp/443/wss/p2p/12D3KooWSGgmiRryoi7A3qAmeYWgmVeGQkk66PrhDjJ6ZPP555as", + ]' \ + | jq '.relay_chain = "westend"' \ + | jq --argjson para_id $para_id '.para_id = $para_id' \ + | jq --argjson para_id $para_id '.genesis.runtimeGenesis.patch.parachainInfo.parachainId = $para_id' \ + | jq '.genesis.runtimeGenesis.patch.balances.balances = []' \ + | jq '.genesis.runtimeGenesis.patch.collatorSelection.invulnerables = [ + "5GKXTtB7RG3mLJ2kT4AkDXoxvKCFDVUdwyRmeMEbX3gBwcGi", + "5DknBCD1h49nc8eqnm6XtHz3bMQm5hfMuGYcLenRfCmpnBJG", + "5D52g9Mt9jQnZn6hwYhv649QYqGwhjygxkpb6rm3FYzYHEs3", + "5Egx2B41PYj8uvuhkNJeucA54h6Xmi7ZH9wqrZLwj3CuvQKA" + ]' \ + | jq '.genesis.runtimeGenesis.patch.session.keys = [ + [ + "5GKXTtB7RG3mLJ2kT4AkDXoxvKCFDVUdwyRmeMEbX3gBwcGi", + "5GKXTtB7RG3mLJ2kT4AkDXoxvKCFDVUdwyRmeMEbX3gBwcGi", + { + "aura": "0xbc3ea120d2991b75447b0b53cd8623970a0f6d98fa2701036c74d94e6b79252c" + } + ], + [ + "5DknBCD1h49nc8eqnm6XtHz3bMQm5hfMuGYcLenRfCmpnBJG", + "5DknBCD1h49nc8eqnm6XtHz3bMQm5hfMuGYcLenRfCmpnBJG", + { + "aura": "0x4acc970c28713ec93bf925352d3023418fdf89933227e1e2fdae8481103dfe28" + } + ], + [ + "5D52g9Mt9jQnZn6hwYhv649QYqGwhjygxkpb6rm3FYzYHEs3", + "5D52g9Mt9jQnZn6hwYhv649QYqGwhjygxkpb6rm3FYzYHEs3", + { + "aura": "0x2c7b95155708c10616b6f1a77a84f3d92c9a0272609ed24dbb7e6bdb81b53e76" + } + ], + [ + "5Egx2B41PYj8uvuhkNJeucA54h6Xmi7ZH9wqrZLwj3CuvQKA", + "5Egx2B41PYj8uvuhkNJeucA54h6Xmi7ZH9wqrZLwj3CuvQKA", + { + "aura": "0x741cfb39ec61bc76824ccec62d61670a80a890e0e21d58817f84040d3ec54474" + } + ] + ]' \ + > edited-chain-spec-plain.json + +# build a raw spec +$binary build-spec --chain edited-chain-spec-plain.json --raw > chain-spec-raw.json +cp edited-chain-spec-plain.json coretime-westend-spec.json +cp chain-spec-raw.json ./cumulus/parachains/chain-specs/coretime-westend.json +cp chain-spec-raw.json coretime-westend-spec-raw.json + +# build genesis data +$binary export-genesis-state --chain chain-spec-raw.json > coretime-westend-genesis-head-data + +# build genesis wasm +$binary export-genesis-wasm --chain chain-spec-raw.json > coretime-westend-wasm + +# cleanup +rm -f rt-hex.txt +rm -f chain-spec-plain.json +rm -f chain-spec-raw.json +rm -f edited-chain-spec-plain.json diff --git a/cumulus/scripts/create_glutton_spec.sh b/cumulus/scripts/create_glutton_spec.sh index c5158392f529eddf462e25e7754ec413748fdb92..78aafda3bd08ac226ce3561241599389c3503f7b 100755 --- a/cumulus/scripts/create_glutton_spec.sh +++ b/cumulus/scripts/create_glutton_spec.sh @@ -55,7 +55,7 @@ for (( para_id=$from_para_id; para_id<=$to_para_id; para_id++ )); do fi # build the chain spec we'll manipulate - $binary_path build-spec --disable-default-bootnode --chain "glutton-kusama-genesis-$para_id" > "$output_para_dir/plain-glutton-$relay_chain-$para_id-spec.json" + $binary_path build-spec --disable-default-bootnode --chain "glutton-westend-genesis-$para_id" > "$output_para_dir/plain-glutton-$relay_chain-$para_id-spec.json" id="glutton-$relay_chain-$para_id" protocol_id="glutton-$relay_chain-$para_id" diff --git a/cumulus/scripts/generate_hex_encoded_call/index.js b/cumulus/scripts/generate_hex_encoded_call/index.js index 09f0e6aaf619a44642bfa4f065f3bf69f0bb7a33..30f89d754ceb7de1b24bc31413e09c862a461256 100644 --- a/cumulus/scripts/generate_hex_encoded_call/index.js +++ b/cumulus/scripts/generate_hex_encoded_call/index.js @@ -106,11 +106,11 @@ function forceCreateAsset(endpoint, outputFile, assetId, assetOwnerAccountId, is }); } -function setStorage(endpoint, outputFile, items) { - console.log(`Generating setStorage from RPC endpoint: ${endpoint} to outputFile: ${outputFile}, items: ${items}`); +function forceXcmVersion(endpoint, outputFile, dest, xcm_version) { + console.log(`Generating forceXcmVersion from RPC endpoint: ${endpoint} to outputFile: ${outputFile}, dest: ${dest}, xcm_version: ${xcm_version}`); connect(endpoint) .then((api) => { - const call = api.tx.system.setStorage(JSON.parse(items)); + const call = api.tx.polkadotXcm.forceXcmVersion(JSON.parse(dest), xcm_version); writeHexEncodedBytesToOutput(call.method, outputFile); exit(0); }) @@ -154,8 +154,8 @@ switch (type) { case 'force-create-asset': forceCreateAsset(rpcEnpoint, output, inputArgs[0], inputArgs[1], inputArgs[2], inputArgs[3]); break; - case 'set-storage': - setStorage(rpcEnpoint, output, inputArgs[0]); + case 'force-xcm-version': + forceXcmVersion(rpcEnpoint, output, inputArgs[0], inputArgs[1]); break; case 'check': console.log(`Checking nodejs installation, if you see this everything is ready!`); diff --git a/cumulus/scripts/parachains_integration_tests.sh b/cumulus/scripts/parachains_integration_tests.sh deleted file mode 100755 index 2a06b930e22ff0fef6fbc5769ead258e78ac8864..0000000000000000000000000000000000000000 --- a/cumulus/scripts/parachains_integration_tests.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash - -tests=( - asset-hub-kusama - asset-hub-polkadot -) - -rm -R logs &> /dev/null - -for t in ${tests[@]} -do - printf "\n🔍 Running $t tests...\n\n" - - mkdir -p logs/$t - - parachains-integration-tests \ - -m zombienet \ - -c ./parachains/integration-tests/$t/config.toml \ - -cl ./logs/$t/chains.log 2> /dev/null & - - parachains-integration-tests \ - -m test \ - -t ./parachains/integration-tests/$t \ - -tl ./logs/$t/tests.log & tests=$! - - wait $tests - - pkill -f polkadot - pkill -f parachain - - printf "\n🎉 $t integration tests finished! \n\n" -done diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index b760b796ec9a8cdd0ffe9c7f6436e0df6effd53c..7190172101cb509f7dd7c19ad25dc6d4d54036e7 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -5,8 +5,11 @@ authors.workspace = true edition.workspace = true publish = false +[lints] +workspace = true + [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } # Substrate sc-service = { path = "../../../substrate/client/service" } @@ -36,6 +39,7 @@ cumulus-test-runtime = { path = "../runtime" } cumulus-test-service = { path = "../service" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } cumulus-primitives-core = { path = "../../primitives/core" } +cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } [features] diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index a3c79158f492b5671109e6f6d960c840c634a2c6..df63f683de6b4312a953bbbf9f03862eac7ce451 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -44,7 +44,8 @@ mod local_executor { pub struct LocalExecutor; impl sc_executor::NativeExecutionDispatch for LocalExecutor { - type ExtendHostFunctions = (); + type ExtendHostFunctions = + cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index b24ac30849562409ead63a59e7514ec1436d55ac..02a9750d78ec09d674f37833a61a03bf9dc6daf0 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -6,23 +6,26 @@ edition.workspace = true license = "Apache-2.0" description = "Mocked relay state proof builder for testing Cumulus." +[lints] +workspace = true + [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } # Substrate -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false} -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false } +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false} +polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } # Cumulus cumulus-primitives-core = { path = "../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 3ea51c1973f4366f666eb4032ca9ca0d2555ea91..5902a62512bed772318145ccdb954ff2dfef4c92 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -5,43 +5,46 @@ authors.workspace = true edition.workspace = true publish = false +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-executive = { path = "../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false} -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false} +frame-executive = { path = "../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false} -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false} -pallet-glutton = { path = "../../../substrate/frame/glutton", default-features = false} -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false} -sp-api = { path = "../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false} -sp-core = { path = "../../../substrate/primitives/core", default-features = false} -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false} -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../substrate/primitives/version", default-features = false} +pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } +pallet-glutton = { path = "../../../substrate/frame/glutton", default-features = false } +pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } +sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } +sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../substrate/primitives/version", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-parachain-system/std", diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 19fd6d5f02dbf0ddda7c1fa9f452297d81402287..3de77cb1e58116838ff474d9b7b7e7d549675a66 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -29,7 +29,7 @@ pub mod wasm_spec_version_incremented { mod test_pallet; -use frame_support::traits::OnRuntimeUpgrade; +use frame_support::{derive_impl, traits::OnRuntimeUpgrade}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; use sp_core::{ConstU32, OpaqueMetadata}; use sp_runtime::{ @@ -177,36 +177,23 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. type Lookup = IdentityLookup; /// The index type for storing how many extrinsics an account has signed. type Nonce = Nonce; /// The type for hashing blocks and tries. type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; /// The block type. type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// Runtime version. type Version = Version; - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type SS58Prefix = SS58Prefix; diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index ed8c8748cc803ea5407a9128040146ce23667431..c6d82191a9eda96b769dd6c82f55d44783d25653 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -5,18 +5,21 @@ authors.workspace = true edition.workspace = true publish = false +[lints] +workspace = true + [[bin]] name = "test-parachain" path = "src/main.rs" [dependencies] -async-trait = "0.1.73" -clap = { version = "4.4.6", features = ["derive"] } +async-trait = "0.1.74" +clap = { version = "4.4.11", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } -criterion = { version = "0.5.1", features = [ "async_tokio" ] } +criterion = { version = "0.5.1", features = ["async_tokio"] } jsonrpsee = { version = "0.16.2", features = ["server"] } rand = "0.8.5" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" tokio = { version = "1.32.0", features = ["macros"] } tracing = "0.1.37" @@ -44,7 +47,7 @@ sp-core = { path = "../../../substrate/primitives/core" } sp-io = { path = "../../../substrate/primitives/io" } sp-api = { path = "../../../substrate/primitives/api" } sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-state-machine = { path = "../../../substrate/primitives/state-machine" } sp-tracing = { path = "../../../substrate/primitives/tracing" } sp-timestamp = { path = "../../../substrate/primitives/timestamp" } @@ -77,7 +80,7 @@ cumulus-test-runtime = { path = "../runtime" } cumulus-relay-chain-minimal-node = { path = "../../client/relay-chain-minimal-node" } cumulus-client-pov-recovery = { path = "../../client/pov-recovery" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } pallet-timestamp = { path = "../../../substrate/frame/timestamp" } [dev-dependencies] diff --git a/cumulus/test/service/benches/block_import.rs b/cumulus/test/service/benches/block_import.rs index 254e03b9263a8f30286c50930d2faaa2cb27d4a3..9d6485d74c5948079102556ee6b56d40b75d5cc1 100644 --- a/cumulus/test/service/benches/block_import.rs +++ b/cumulus/test/service/benches/block_import.rs @@ -24,7 +24,7 @@ use core::time::Duration; use cumulus_primitives_core::ParaId; use sp_api::{Core, ProvideRuntimeApi}; -use sp_keyring::Sr25519Keyring::Alice; +use sp_keyring::Sr25519Keyring::{Alice, Bob}; use cumulus_test_service::bench_utils as utils; @@ -32,51 +32,69 @@ fn benchmark_block_import(c: &mut Criterion) { sp_tracing::try_init_simple(); let runtime = tokio::runtime::Runtime::new().expect("creating tokio runtime doesn't fail; qed"); - let para_id = ParaId::from(100); + + let para_id = ParaId::from(cumulus_test_runtime::PARACHAIN_ID); let tokio_handle = runtime.handle(); // Create enough accounts to fill the block with transactions. // Each account should only be included in one transfer. let (src_accounts, dst_accounts, account_ids) = utils::create_benchmark_accounts(); - let alice = runtime.block_on( - cumulus_test_service::TestNodeBuilder::new(para_id, tokio_handle.clone(), Alice) + for bench_parameters in &[(true, Alice), (false, Bob)] { + let node = runtime.block_on( + cumulus_test_service::TestNodeBuilder::new( + para_id, + tokio_handle.clone(), + bench_parameters.1, + ) // Preload all accounts with funds for the transfers - .endowed_accounts(account_ids) + .endowed_accounts(account_ids.clone()) + .import_proof_recording(bench_parameters.0) .build(), - ); - - let client = alice.client; - - let (max_transfer_count, extrinsics) = - utils::create_benchmarking_transfer_extrinsics(&client, &src_accounts, &dst_accounts); - - let parent_hash = client.usage_info().chain.best_hash; - let mut block_builder = BlockBuilderBuilder::new(&*client) - .on_parent_block(parent_hash) - .fetch_parent_block_number(&*client) - .unwrap() - .build() - .unwrap(); - for extrinsic in extrinsics { - block_builder.push(extrinsic).unwrap(); - } - let benchmark_block = block_builder.build().unwrap(); - - let mut group = c.benchmark_group("Block import"); - group.sample_size(20); - group.measurement_time(Duration::from_secs(120)); - group.throughput(Throughput::Elements(max_transfer_count as u64)); - - group.bench_function(format!("(transfers = {}) block import", max_transfer_count), |b| { - b.iter_batched( - || benchmark_block.block.clone(), - |block| { - client.runtime_api().execute_block(parent_hash, block).unwrap(); + ); + + let client = node.client; + let backend = node.backend; + + let (max_transfer_count, extrinsics) = + utils::create_benchmarking_transfer_extrinsics(&client, &src_accounts, &dst_accounts); + + let parent_hash = client.usage_info().chain.best_hash; + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .fetch_parent_block_number(&*client) + .unwrap() + .build() + .unwrap(); + for extrinsic in extrinsics { + block_builder.push(extrinsic).unwrap(); + } + let benchmark_block = block_builder.build().unwrap(); + + let mut group = c.benchmark_group("Block import"); + group.sample_size(20); + group.measurement_time(Duration::from_secs(120)); + group.throughput(Throughput::Elements(max_transfer_count as u64)); + + group.bench_function( + format!( + "(transfers = {max_transfer_count}, proof_recording = {}) block import", + bench_parameters.0 + ), + |b| { + b.iter_batched( + || { + backend.reset_trie_cache(); + benchmark_block.block.clone() + }, + |block| { + client.runtime_api().execute_block(parent_hash, block).unwrap(); + }, + BatchSize::SmallInput, + ) }, - BatchSize::SmallInput, - ) - }); + ); + } } criterion_group!(benches, benchmark_block_import); diff --git a/cumulus/test/service/benches/block_import_glutton.rs b/cumulus/test/service/benches/block_import_glutton.rs index aeaf0722e724eaea8f7b5829809a79e2d8f2e71a..6295fd68286bd8d4f4b48f305cf95cc544cce019 100644 --- a/cumulus/test/service/benches/block_import_glutton.rs +++ b/cumulus/test/service/benches/block_import_glutton.rs @@ -27,7 +27,7 @@ use core::time::Duration; use cumulus_primitives_core::ParaId; use sc_block_builder::BlockBuilderBuilder; -use sp_keyring::Sr25519Keyring::Alice; +use sp_keyring::Sr25519Keyring::{Alice, Bob, Charlie, Ferdie}; use cumulus_test_service::bench_utils as utils; @@ -38,17 +38,29 @@ fn benchmark_block_import(c: &mut Criterion) { let para_id = ParaId::from(100); let tokio_handle = runtime.handle(); - let alice = runtime.block_on( - cumulus_test_service::TestNodeBuilder::new(para_id, tokio_handle.clone(), Alice).build(), - ); - let client = alice.client; + let mut initialize_glutton_pallet = true; + for (compute_ratio, storage_ratio, proof_on_import, keyring_identity) in &[ + (One::one(), Zero::zero(), true, Alice), + (One::one(), One::one(), true, Bob), + (One::one(), Zero::zero(), false, Charlie), + (One::one(), One::one(), false, Ferdie), + ] { + let node = runtime.block_on( + cumulus_test_service::TestNodeBuilder::new( + para_id, + tokio_handle.clone(), + *keyring_identity, + ) + .import_proof_recording(*proof_on_import) + .build(), + ); + let client = node.client; + let backend = node.backend; - let mut group = c.benchmark_group("Block import"); - group.sample_size(20); - group.measurement_time(Duration::from_secs(120)); + let mut group = c.benchmark_group("Block import"); + group.sample_size(20); + group.measurement_time(Duration::from_secs(120)); - let mut initialize_glutton_pallet = true; - for (compute_ratio, storage_ratio) in &[(One::one(), Zero::zero()), (One::one(), One::one())] { let block = utils::set_glutton_parameters( &client, initialize_glutton_pallet, @@ -82,7 +94,10 @@ fn benchmark_block_import(c: &mut Criterion) { ), |b| { b.iter_batched( - || benchmark_block.block.clone(), + || { + backend.reset_trie_cache(); + benchmark_block.block.clone() + }, |block| { client.runtime_api().execute_block(parent_hash, block).unwrap(); }, diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index 11a7c4376d4c81156c067566dd20e2f8172b6529..a614863803e09e89ff9671dd6916ccbfeb657278 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -18,7 +18,9 @@ use codec::{Decode, Encode}; use core::time::Duration; use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; -use cumulus_primitives_core::{relay_chain::AccountId, PersistedValidationData, ValidationParams}; +use cumulus_primitives_core::{ + relay_chain::AccountId, ParaId, PersistedValidationData, ValidationParams, +}; use cumulus_test_client::{ generate_extrinsic_with_pair, BuildParachainBlockData, InitBlockBuilder, TestClientBuilder, ValidationResult, @@ -83,6 +85,7 @@ fn benchmark_block_validation(c: &mut Criterion) { // Each account should only be included in one transfer. let (src_accounts, dst_accounts, account_ids) = utils::create_benchmark_accounts(); + let para_id = ParaId::from(cumulus_test_runtime::PARACHAIN_ID); let mut test_client_builder = TestClientBuilder::with_default_backend(); let genesis_init = test_client_builder.genesis_init_mut(); *genesis_init = cumulus_test_client::GenesisParameters { endowed_accounts: account_ids }; @@ -98,7 +101,14 @@ fn benchmark_block_validation(c: &mut Criterion) { ..Default::default() }; - let mut block_builder = client.init_block_builder(Some(validation_data), Default::default()); + let sproof_builder = RelayStateSproofBuilder { + included_para_head: Some(parent_header.clone().encode().into()), + para_id, + ..Default::default() + }; + + let mut block_builder = + client.init_block_builder(Some(validation_data), sproof_builder.clone()); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } @@ -108,7 +118,6 @@ fn benchmark_block_validation(c: &mut Criterion) { let proof_size_in_kb = parachain_block.storage_proof().encode().len() as f64 / 1024f64; let runtime = utils::get_wasm_module(); - let sproof_builder: RelayStateSproofBuilder = Default::default(); let (relay_parent_storage_root, _) = sproof_builder.into_state_root_and_proof(); let encoded_params = ValidationParams { block_data: cumulus_test_client::BlockData(parachain_block.encode()), diff --git a/cumulus/test/service/src/bench_utils.rs b/cumulus/test/service/src/bench_utils.rs index 82142f21695ff21e33966813eccc20fa79cd6988..1894835caec81e3176b3c9c037d70d5770a47f3f 100644 --- a/cumulus/test/service/src/bench_utils.rs +++ b/cumulus/test/service/src/bench_utils.rs @@ -81,8 +81,13 @@ pub fn extrinsic_set_time(client: &TestClient) -> OpaqueExtrinsic { pub fn extrinsic_set_validation_data( parent_header: cumulus_test_runtime::Header, ) -> OpaqueExtrinsic { - let sproof_builder = RelayStateSproofBuilder { para_id: 100.into(), ..Default::default() }; let parent_head = HeadData(parent_header.encode()); + let sproof_builder = RelayStateSproofBuilder { + para_id: cumulus_test_runtime::PARACHAIN_ID.into(), + included_para_head: parent_head.clone().into(), + ..Default::default() + }; + let (relay_parent_storage_root, relay_chain_state) = sproof_builder.into_state_root_and_proof(); let data = ParachainInherentData { validation_data: PersistedValidationData { diff --git a/cumulus/test/service/src/cli.rs b/cumulus/test/service/src/cli.rs index ef1159a3c1f8534a0dbfa5ae09859c8e1dadc626..3dc5b8e31016bf792f47003c03089a136c32c711 100644 --- a/cumulus/test/service/src/cli.rs +++ b/cumulus/test/service/src/cli.rs @@ -16,6 +16,7 @@ use std::{net::SocketAddr, path::PathBuf}; +use cumulus_client_cli::{ExportGenesisHeadCommand, ExportGenesisWasmCommand}; use polkadot_service::{ChainSpec, ParaId, PrometheusConfig}; use sc_cli::{ CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, NetworkParams, @@ -60,45 +61,13 @@ pub enum Subcommand { BuildSpec(sc_cli::BuildSpecCmd), /// Export the genesis state of the parachain. - ExportGenesisState(ExportGenesisStateCommand), + #[command(alias = "export-genesis-state")] + ExportGenesisHead(ExportGenesisHeadCommand), /// Export the genesis wasm of the parachain. ExportGenesisWasm(ExportGenesisWasmCommand), } -#[derive(Debug, clap::Parser)] -#[group(skip)] -pub struct ExportGenesisStateCommand { - #[arg(default_value_t = 2000u32)] - pub parachain_id: u32, - - #[command(flatten)] - pub base: cumulus_client_cli::ExportGenesisStateCommand, -} - -impl CliConfiguration for ExportGenesisStateCommand { - fn shared_params(&self) -> &SharedParams { - &self.base.shared_params - } -} - -/// Command for exporting the genesis wasm file. -#[derive(Debug, clap::Parser)] -#[group(skip)] -pub struct ExportGenesisWasmCommand { - #[arg(default_value_t = 2000u32)] - pub parachain_id: u32, - - #[command(flatten)] - pub base: cumulus_client_cli::ExportGenesisWasmCommand, -} - -impl CliConfiguration for ExportGenesisWasmCommand { - fn shared_params(&self) -> &SharedParams { - &self.base.shared_params - } -} - #[derive(Debug)] pub struct RelayChainCli { /// The actual relay chain cli object. diff --git a/cumulus/test/service/src/genesis.rs b/cumulus/test/service/src/genesis.rs index d4a9a22562646d09cc793fa367396f5ab33cf118..be4b0427b2ee50ed4db16340571dd8d46df28ed2 100644 --- a/cumulus/test/service/src/genesis.rs +++ b/cumulus/test/service/src/genesis.rs @@ -15,11 +15,50 @@ // along with Cumulus. If not, see . use codec::Encode; -use cumulus_client_cli::generate_genesis_block; use cumulus_primitives_core::ParaId; use cumulus_test_runtime::Block; use polkadot_primitives::HeadData; -use sp_runtime::traits::Block as BlockT; +use sc_chain_spec::ChainSpec; +use sp_runtime::{ + traits::{Block as BlockT, Hash as HashT, Header as HeaderT, Zero}, + StateVersion, +}; + +/// Generate a simple test genesis block from a given ChainSpec. +pub fn generate_genesis_block( + chain_spec: &dyn ChainSpec, + genesis_state_version: StateVersion, +) -> Result { + let storage = chain_spec.build_storage()?; + + let child_roots = storage.children_default.iter().map(|(sk, child_content)| { + let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( + child_content.data.clone().into_iter().collect(), + genesis_state_version, + ); + (sk.clone(), state_root.encode()) + }); + let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( + storage.top.clone().into_iter().chain(child_roots).collect(), + genesis_state_version, + ); + + let extrinsics_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( + Vec::new(), + genesis_state_version, + ); + + Ok(Block::new( + <::Header as HeaderT>::new( + Zero::zero(), + extrinsics_root, + state_root, + Default::default(), + Default::default(), + ), + Default::default(), + )) +} /// Returns the initial head data for a parachain ID. pub fn initial_head_data(para_id: ParaId) -> HeadData { diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 6fd3e4d43d75e8e3116148d0a8b64e1576f9f867..586c4603c76a2a28673a0eb1bfced7d92d1827ca 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -22,11 +22,14 @@ pub mod bench_utils; pub mod chain_spec; -mod genesis; + +/// Utilities for creating test genesis block and head data +pub mod genesis; use runtime::AccountId; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use std::{ + collections::HashSet, future::Future, net::{IpAddr, Ipv4Addr, SocketAddr}, time::Duration, @@ -57,7 +60,7 @@ use cumulus_test_runtime::{Hash, Header, NodeBlock as Block, RuntimeApi}; use frame_system_rpc_runtime_api::AccountNonceApi; use polkadot_node_subsystem::{errors::RecoveryError, messages::AvailabilityRecoveryMessage}; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{CollatorPair, Hash as PHash, PersistedValidationData}; +use polkadot_primitives::{CandidateHash, CollatorPair, Hash as PHash, PersistedValidationData}; use polkadot_service::ProvideRuntimeApi; use sc_consensus::ImportQueue; use sc_network::{ @@ -144,12 +147,13 @@ pub type TransactionPool = Arc>; pub struct FailingRecoveryHandle { overseer_handle: OverseerHandle, counter: u32, + failed_hashes: HashSet, } impl FailingRecoveryHandle { /// Create a new FailingRecoveryHandle pub fn new(overseer_handle: OverseerHandle) -> Self { - Self { overseer_handle, counter: 0 } + Self { overseer_handle, counter: 0, failed_hashes: Default::default() } } } @@ -160,11 +164,15 @@ impl RecoveryHandle for FailingRecoveryHandle { message: AvailabilityRecoveryMessage, origin: &'static str, ) { - // For every 5th block we immediately signal unavailability to trigger - // a retry. - if self.counter % 5 == 0 { + let AvailabilityRecoveryMessage::RecoverAvailableData(ref receipt, _, _, _) = message; + let candidate_hash = receipt.hash(); + + // For every 3rd block we immediately signal unavailability to trigger + // a retry. The same candidate is never failed multiple times to ensure progress. + if self.counter % 3 == 0 && self.failed_hashes.insert(candidate_hash) { + tracing::info!(target: LOG_TARGET, ?candidate_hash, "Failing pov recovery."); + let AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, back_sender) = message; - tracing::info!(target: LOG_TARGET, "Failing pov recovery."); back_sender .send(Err(RecoveryError::Unavailable)) .expect("Return channel should work here."); @@ -181,6 +189,7 @@ impl RecoveryHandle for FailingRecoveryHandle { /// be able to perform chain operations. pub fn new_partial( config: &mut Configuration, + enable_import_proof_record: bool, ) -> Result< PartialComponents< Client, @@ -208,10 +217,16 @@ pub fn new_partial( sc_executor::NativeElseWasmExecutor::::new_with_wasm_executor(wasm); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::(config, None, executor)?; + sc_service::new_full_parts_record_import::( + config, + None, + executor, + enable_import_proof_record, + )?; let client = Arc::new(client); - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let block_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); let registry = config.prometheus_registry(); @@ -302,19 +317,21 @@ pub async fn start_node_impl( rpc_ext_builder: RB, consensus: Consensus, collator_options: CollatorOptions, + proof_recording_during_import: bool, ) -> sc_service::error::Result<( TaskManager, Arc, Arc>, RpcHandlers, TransactionPool, + Arc, )> where RB: Fn(Arc) -> Result, sc_service::Error> + Send + 'static, { let mut parachain_config = prepare_node_config(parachain_config); - let params = new_partial(&mut parachain_config)?; + let params = new_partial(&mut parachain_config, proof_recording_during_import)?; let transaction_pool = params.transaction_pool.clone(); let mut task_manager = params.task_manager; @@ -470,7 +487,7 @@ where start_network.start_network(); - Ok((task_manager, client, network, rpc_handlers, transaction_pool)) + Ok((task_manager, client, network, rpc_handlers, transaction_pool, backend)) } /// A Cumulus test node instance used for testing. @@ -488,6 +505,8 @@ pub struct TestNode { pub rpc_handlers: RpcHandlers, /// Node's transaction pool pub transaction_pool: TransactionPool, + /// Node's backend + pub backend: Arc, } #[allow(missing_docs)] @@ -513,6 +532,7 @@ pub struct TestNodeBuilder { consensus: Consensus, relay_chain_mode: RelayChainMode, endowed_accounts: Vec, + record_proof_during_import: bool, } impl TestNodeBuilder { @@ -537,6 +557,7 @@ impl TestNodeBuilder { consensus: Consensus::RelayChain, endowed_accounts: Default::default(), relay_chain_mode: RelayChainMode::Embedded, + record_proof_during_import: true, } } @@ -649,6 +670,12 @@ impl TestNodeBuilder { self } + /// Record proofs during import. + pub fn import_proof_recording(mut self, should_record_proof: bool) -> TestNodeBuilder { + self.record_proof_during_import = should_record_proof; + self + } + /// Build the [`TestNode`]. pub async fn build(self) -> TestNode { let parachain_config = node_config( @@ -677,24 +704,26 @@ impl TestNodeBuilder { format!("{} (relay chain)", relay_chain_config.network.node_name); let multiaddr = parachain_config.network.listen_addresses[0].clone(); - let (task_manager, client, network, rpc_handlers, transaction_pool) = start_node_impl( - parachain_config, - self.collator_key, - relay_chain_config, - self.para_id, - self.wrap_announce_block, - false, - |_| Ok(jsonrpsee::RpcModule::new(())), - self.consensus, - collator_options, - ) - .await - .expect("could not create Cumulus test service"); + let (task_manager, client, network, rpc_handlers, transaction_pool, backend) = + start_node_impl( + parachain_config, + self.collator_key, + relay_chain_config, + self.para_id, + self.wrap_announce_block, + false, + |_| Ok(jsonrpsee::RpcModule::new(())), + self.consensus, + collator_options, + self.record_proof_during_import, + ) + .await + .expect("could not create Cumulus test service"); let peer_id = network.local_peer_id(); let addr = MultiaddrWithPeerId { multiaddr, peer_id }; - TestNode { task_manager, client, network, addr, rpc_handlers, transaction_pool } + TestNode { task_manager, client, network, addr, rpc_handlers, transaction_pool, backend } } } diff --git a/cumulus/test/service/src/main.rs b/cumulus/test/service/src/main.rs index 16b68796bd391bdfa8a2085aac95f3e7201d9ce0..aace92ca965dcfeaa85b58b2b0f8d8c14431af47 100644 --- a/cumulus/test/service/src/main.rs +++ b/cumulus/test/service/src/main.rs @@ -16,16 +16,14 @@ mod cli; -use std::{io::Write, sync::Arc}; +use std::sync::Arc; use cli::{RelayChainCli, Subcommand, TestCollatorCli}; -use cumulus_client_cli::generate_genesis_block; use cumulus_primitives_core::{relay_chain::CollatorPair, ParaId}; -use cumulus_test_service::AnnounceBlockFn; +use cumulus_test_service::{new_partial, AnnounceBlockFn}; use polkadot_service::runtime_traits::AccountIdConversion; use sc_cli::{CliConfiguration, SubstrateCli}; -use sp_core::{hexdisplay::HexDisplay, Encode, Pair}; -use sp_runtime::traits::Block; +use sp_core::Pair; pub fn wrap_announce_block() -> Box AnnounceBlockFn> { tracing::info!("Block announcements disabled."); @@ -44,38 +42,16 @@ fn main() -> Result<(), sc_cli::Error> { runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) }, - Some(Subcommand::ExportGenesisState(params)) => { - let mut builder = sc_cli::LoggerBuilder::new(""); - builder.with_profiling(sc_tracing::TracingReceiver::Log, ""); - let _ = builder.init(); - - let spec = - cli.load_spec(¶ms.base.shared_params.chain.clone().unwrap_or_default())?; - let state_version = cumulus_test_service::runtime::VERSION.state_version(); - - let block: parachains_common::Block = generate_genesis_block(&*spec, state_version)?; - let raw_header = block.header().encode(); - let output_buf = if params.base.raw { - raw_header - } else { - format!("0x{:?}", HexDisplay::from(&block.header().encode())).into_bytes() - }; - - if let Some(output) = ¶ms.base.output { - std::fs::write(output, output_buf)?; - } else { - std::io::stdout().write_all(&output_buf)?; - } - - Ok(()) + Some(Subcommand::ExportGenesisHead(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|mut config| { + let partial = new_partial(&mut config, false)?; + cmd.run(partial.client) + }) }, Some(Subcommand::ExportGenesisWasm(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.sync_run(|_config| { - let parachain_id = ParaId::from(cmd.parachain_id); - let spec = cumulus_test_service::get_chain_spec(Some(parachain_id)); - cmd.base.run(&spec) - }) + runner.sync_run(|config| cmd.run(&*config.chain_spec)) }, None => { let log_filters = cli.run.normalize().log_filters(); @@ -128,7 +104,7 @@ fn main() -> Result<(), sc_cli::Error> { }) .unwrap_or(cumulus_test_service::Consensus::RelayChain); - let (mut task_manager, _, _, _, _) = tokio_runtime + let (mut task_manager, _, _, _, _, _) = tokio_runtime .block_on(cumulus_test_service::start_node_impl( config, collator_key, @@ -139,6 +115,7 @@ fn main() -> Result<(), sc_cli::Error> { |_| Ok(jsonrpsee::RpcModule::new(())), consensus, collator_options, + true, )) .expect("could not create Cumulus test service"); diff --git a/cumulus/xcm/xcm-emulator/Cargo.toml b/cumulus/xcm/xcm-emulator/Cargo.toml index 2f851f1bcde06cb57107019b1f477ad747fc6ef6..0f10221d6006abce96b4dfb69445678957810693 100644 --- a/cumulus/xcm/xcm-emulator/Cargo.toml +++ b/cumulus/xcm/xcm-emulator/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } paste = "1.0.14" diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index 7ff5512d214a7faffe360a96c29142d97c12a19a..f5c6b88adce55ed2fe57b06d771192709e744667 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -pub use codec::{Decode, Encode, EncodeLike}; +pub use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; pub use lazy_static::lazy_static; pub use log; pub use paste; @@ -245,7 +245,7 @@ pub trait Parachain: Chain { type LocationToAccountId: ConvertLocation>; type ParachainInfo: Get; type ParachainSystem; - type MessageProcessor: ProcessMessage + ServiceQueues; + type MessageProcessor: ProcessMessage + ServiceQueues; fn init(); @@ -576,7 +576,7 @@ macro_rules! decl_test_parachains { XcmpMessageHandler: $xcmp_message_handler:path, LocationToAccountId: $location_to_account:path, ParachainInfo: $parachain_info:path, - // MessageProcessor: $message_processor:path, + MessageOrigin: $message_origin:path, }, pallets = { $($pallet_name:ident: $pallet_path:path,)* @@ -615,7 +615,7 @@ macro_rules! decl_test_parachains { type LocationToAccountId = $location_to_account; type ParachainSystem = $crate::ParachainSystemPallet<::Runtime>; type ParachainInfo = $parachain_info; - type MessageProcessor = $crate::DefaultParaMessageProcessor<$name>; + type MessageProcessor = $crate::DefaultParaMessageProcessor<$name, $message_origin>; // We run an empty block during initialisation to open HRMP channels // and have them ready for the next block @@ -1007,7 +1007,7 @@ macro_rules! decl_test_networks { <$parachain>::ext_wrapper(|| { let _ = <$parachain as Parachain>::MessageProcessor::process_message( &msg[..], - $crate::CumulusAggregateMessageOrigin::Parent, + $crate::CumulusAggregateMessageOrigin::Parent.into(), &mut weight_meter, &mut msg.using_encoded($crate::blake2_256), ); @@ -1313,17 +1313,23 @@ macro_rules! decl_test_sender_receiver_accounts_parameter_types { }; } -pub struct DefaultParaMessageProcessor(PhantomData); +pub struct DefaultParaMessageProcessor(PhantomData<(T, M)>); // Process HRMP messages from sibling paraids -impl ProcessMessage for DefaultParaMessageProcessor +impl ProcessMessage for DefaultParaMessageProcessor where + M: codec::FullCodec + + MaxEncodedLen + + Clone + + Eq + + PartialEq + + frame_support::pallet_prelude::TypeInfo + + Debug, T: Parachain, T::Runtime: MessageQueueConfig, - <::MessageProcessor as ProcessMessage>::Origin: - PartialEq, - MessageQueuePallet: EnqueueMessage + ServiceQueues, + <::MessageProcessor as ProcessMessage>::Origin: PartialEq, + MessageQueuePallet: EnqueueMessage + ServiceQueues, { - type Origin = CumulusAggregateMessageOrigin; + type Origin = M; fn process_message( msg: &[u8], @@ -1340,13 +1346,13 @@ where Ok(true) } } -impl ServiceQueues for DefaultParaMessageProcessor +impl ServiceQueues for DefaultParaMessageProcessor where + M: MaxEncodedLen, T: Parachain, T::Runtime: MessageQueueConfig, - <::MessageProcessor as ProcessMessage>::Origin: - PartialEq, - MessageQueuePallet: EnqueueMessage + ServiceQueues, + <::MessageProcessor as ProcessMessage>::Origin: PartialEq, + MessageQueuePallet: EnqueueMessage + ServiceQueues, { type OverweightMessageAddress = (); @@ -1443,9 +1449,9 @@ pub struct TestContext { /// These arguments can be easily reused and shared between the assertion functions /// and dispatchable functions, which are also stored in `Test`. /// `Origin` corresponds to the chain where the XCM interaction starts with an initial execution. -/// `Destination` corresponds to the last chain where an effect of the intial execution is expected -/// happen. `Hops` refer all the ordered intermediary chains an initial XCM execution can provoke -/// some effect. +/// `Destination` corresponds to the last chain where an effect of the initial execution is expected +/// to happen. `Hops` refer to all the ordered intermediary chains an initial XCM execution can +/// provoke some effect on. #[derive(Clone)] pub struct Test where @@ -1499,7 +1505,7 @@ where let chain_name = std::any::type_name::(); self.hops_assertion.insert(chain_name.to_string(), assertion); } - /// Stores an assertion in a particular Chain + /// Stores a dispatchable in a particular Chain pub fn set_dispatchable(&mut self, dispatchable: fn(Self) -> DispatchResult) { let chain_name = std::any::type_name::(); self.hops_dispatchable.insert(chain_name.to_string(), dispatchable); diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml b/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml index a117942858e68bdb04569e37f665bdb15dc2b18f..99a7d0035b511c57ccf5c10fa94165933c495ba9 100644 --- a/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml +++ b/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml @@ -41,8 +41,7 @@ cumulus_based = true ws_port = 8943 args = [ "-lparachain=debug,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--rpc-port 48933" + "--force-authoring" ] # run bob as parachain collator @@ -54,8 +53,7 @@ cumulus_based = true ws_port = 8944 args = [ "-lparachain=trace,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--rpc-port 48934" + "--force-authoring" ] [[parachains]] @@ -69,16 +67,14 @@ cumulus_based = true ws_port = 9910 command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--rpc-port 58933" + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] [[parachains.collators]] name = "asset-hub-rococo-collator2" command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--rpc-port 58833" + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] #[[hrmp_channels]] diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml b/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml index 4c345d3825c7cc40102498e4354c361fec77adc4..1919d1c63f25f154e4676599afb8a2969598c10b 100644 --- a/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml +++ b/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml @@ -41,8 +41,7 @@ cumulus_based = true ws_port = 8945 args = [ "-lparachain=debug,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--rpc-port 48935" + "--force-authoring" ] # run bob as parachain collator @@ -54,8 +53,7 @@ cumulus_based = true ws_port = 8946 args = [ "-lparachain=trace,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--rpc-port 48936" + "--force-authoring" ] [[parachains]] @@ -69,16 +67,14 @@ cumulus_based = true ws_port = 9010 command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--rpc-port 38933" + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] [[parachains.collators]] name = "asset-hub-westend-collator2" command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--rpc-port 38833" + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] #[[hrmp_channels]] diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml b/cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml deleted file mode 100644 index ae5cf641f66d051f0682a652ca8003c266149d8a..0000000000000000000000000000000000000000 --- a/cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml +++ /dev/null @@ -1,94 +0,0 @@ -[settings] -node_spawn_timeout = 240 - -[relaychain] -default_command = "{{POLKADOT_BINARY_PATH}}" -default_args = [ "-lparachain=debug,xcm=trace" ] -chain = "wococo-local" - - [[relaychain.nodes]] - name = "alice-wococo-validator" - validator = true - rpc_port = 9935 - ws_port = 9945 - balance = 2000000000000 - - [[relaychain.nodes]] - name = "bob-wococo-validator" - validator = true - rpc_port = 9936 - ws_port = 9946 - balance = 2000000000000 - - [[relaychain.nodes]] - name = "charlie-wococo-validator" - validator = true - rpc_port = 9937 - ws_port = 9947 - balance = 2000000000000 - -[[parachains]] -id = 1014 -chain = "bridge-hub-wococo-local" -cumulus_based = true - - # run alice as parachain collator - [[parachains.collators]] - name = "bridge-hub-wococo-collator1" - validator = true - command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}" - rpc_port = 8935 - ws_port = 8945 - args = [ - "-lparachain=debug,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--port 41335", "--rpc-port 48935" - ] - - # run bob as parachain collator - [[parachains.collators]] - name = "bridge-hub-wococo-collator2" - validator = true - command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}" - rpc_port = 8936 - ws_port = 8946 - args = [ - "-lparachain=trace,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--port 41336", "--rpc-port 48936" - ] - -[[parachains]] -id = 1000 -chain = "asset-hub-wococo-local" -cumulus_based = true - - [[parachains.collators]] - name = "asset-hub-wococo-collator1" - rpc_port = 9011 - ws_port = 9010 - command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO}}" - args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--port 31333", "--rpc-port 38933" - ] - - [[parachains.collators]] - name = "asset-hub-wococo-collator2" - command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO}}" - args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--port 31433", "--rpc-port 38833" - ] - -#[[hrmp_channels]] -#sender = 1000 -#recipient = 1014 -#max_capacity = 4 -#max_message_size = 524288 -# -#[[hrmp_channels]] -#sender = 1014 -#recipient = 1000 -#max_capacity = 4 -#max_message_size = 524288 diff --git a/cumulus/zombienet/examples/statemine_kusama_local_network.toml b/cumulus/zombienet/examples/asset_hub_westend_local_network.toml similarity index 71% rename from cumulus/zombienet/examples/statemine_kusama_local_network.toml rename to cumulus/zombienet/examples/asset_hub_westend_local_network.toml index 1f3debfb9d29cd4381eb376af1d23aefc1815723..5b0ac1f17e8b74cd9c2a89e623e97f634eb54ef4 100644 --- a/cumulus/zombienet/examples/statemine_kusama_local_network.toml +++ b/cumulus/zombienet/examples/asset_hub_westend_local_network.toml @@ -1,7 +1,7 @@ [relaychain] -default_command = "../polkadot/target/release/polkadot" +default_command = "../../target/release/polkadot" default_args = [ "-lparachain=debug" ] -chain = "kusama-local" +chain = "westend-local" [[relaychain.nodes]] name = "alice" @@ -21,47 +21,47 @@ chain = "kusama-local" [[parachains]] id = 1000 -chain = "asset-hub-kusama-local" +chain = "asset-hub-westend-local" cumulus_based = true # run alice as parachain collator [[parachains.collators]] name = "alice" validator = true - command = "./target/release/polkadot-parachain" + command = "../../target/release/polkadot-parachain" args = ["-lparachain=debug"] # run bob as parachain collator [[parachains.collators]] name = "bob" validator = true - command = "./target/release/polkadot-parachain" + command = "../../target/release/polkadot-parachain" args = ["-lparachain=debug"] # run charlie as parachain collator [[parachains.collators]] name = "charlie" validator = true - command = "./target/release/polkadot-parachain" + command = "../../target/release/polkadot-parachain" args = ["-lparachain=debug"] # run dave as parachain collator [[parachains.collators]] name = "dave" validator = true - command = "./target/release/polkadot-parachain" + command = "../../target/release/polkadot-parachain" args = ["-lparachain=debug"] # run eve as parachain collator [[parachains.collators]] name = "eve" validator = true - command = "./target/release/polkadot-parachain" + command = "../../target/release/polkadot-parachain" args = ["-lparachain=debug"] # run ferdie as parachain collator [[parachains.collators]] name = "ferdie" validator = true - command = "./target/release/polkadot-parachain" + command = "../../target/release/polkadot-parachain" args = ["-lparachain=debug"] diff --git a/cumulus/zombienet/examples/bridge_hub_kusama_local_network.toml b/cumulus/zombienet/examples/bridge_hub_kusama_local_network.toml deleted file mode 100644 index ae8ae07a75ce84f074837ba1125554622a426e9e..0000000000000000000000000000000000000000 --- a/cumulus/zombienet/examples/bridge_hub_kusama_local_network.toml +++ /dev/null @@ -1,67 +0,0 @@ -[relaychain] -default_command = "../polkadot/target/release/polkadot" -default_args = [ "-lparachain=debug" ] -chain = "kusama-local" - - [[relaychain.nodes]] - name = "alice" - validator = true - - [[relaychain.nodes]] - name = "bob" - validator = true - - [[relaychain.nodes]] - name = "charlie" - validator = true - - [[relaychain.nodes]] - name = "dave" - validator = true - -[[parachains]] -id = 1003 -chain = "bridge-hub-kusama-local" -cumulus_based = true - - # run alice as parachain collator - [[parachains.collators]] - name = "alice" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run bob as parachain collator - [[parachains.collators]] - name = "bob" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run charlie as parachain collator - [[parachains.collators]] - name = "charlie" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run dave as parachain collator - [[parachains.collators]] - name = "dave" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run eve as parachain collator - [[parachains.collators]] - name = "eve" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run ferdie as parachain collator - [[parachains.collators]] - name = "ferdie" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] diff --git a/cumulus/zombienet/examples/bridge_hub_polkadot_local_network.toml b/cumulus/zombienet/examples/bridge_hub_polkadot_local_network.toml deleted file mode 100644 index 564fece7cae7781d06415221a879de0c24380e38..0000000000000000000000000000000000000000 --- a/cumulus/zombienet/examples/bridge_hub_polkadot_local_network.toml +++ /dev/null @@ -1,67 +0,0 @@ -[relaychain] -default_command = "../polkadot/target/release/polkadot" -default_args = [ "-lparachain=debug" ] -chain = "polkadot-local" - - [[relaychain.nodes]] - name = "alice" - validator = true - - [[relaychain.nodes]] - name = "bob" - validator = true - - [[relaychain.nodes]] - name = "charlie" - validator = true - - [[relaychain.nodes]] - name = "dave" - validator = true - -[[parachains]] -id = 1003 -chain = "bridge-hub-polkadot-local" -cumulus_based = true - - # run alice as parachain collator - [[parachains.collators]] - name = "alice" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run bob as parachain collator - [[parachains.collators]] - name = "bob" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run charlie as parachain collator - [[parachains.collators]] - name = "charlie" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run dave as parachain collator - [[parachains.collators]] - name = "dave" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run eve as parachain collator - [[parachains.collators]] - name = "eve" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run ferdie as parachain collator - [[parachains.collators]] - name = "ferdie" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] diff --git a/cumulus/zombienet/examples/small_network.toml b/cumulus/zombienet/examples/small_network.toml index 06ac0d0e5e78781710eb134eae7c7dd8233cfce9..ab7265712308f86a6a3b9d2d78dfacfa3c40abd2 100644 --- a/cumulus/zombienet/examples/small_network.toml +++ b/cumulus/zombienet/examples/small_network.toml @@ -14,7 +14,7 @@ chain = "rococo-local" [[parachains]] id = 2000 cumulus_based = true -chain = "asset-hub-kusama-local" +chain = "asset-hub-rococo-local" # run charlie as parachain collator [[parachains.collators]] diff --git a/cumulus/zombienet/tests/0002-pov_recovery.toml b/cumulus/zombienet/tests/0002-pov_recovery.toml index 34cacbc2a9ba8e3907fd90b97c13eaca63af6900..fe42fd4b2f6681154e89d5e7274618a8f307dfab 100644 --- a/cumulus/zombienet/tests/0002-pov_recovery.toml +++ b/cumulus/zombienet/tests/0002-pov_recovery.toml @@ -34,13 +34,12 @@ add_to_genesis = false args = ["--disable-block-announcements"] # run 'alice' as a parachain collator who does not produce blocks - # 'alice' is a bootnode for 'bob' and 'charlie' [[parachains.collators]] name = "alice" validator = true # collator image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--use-null-consensus", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--use-null-consensus", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--in-peers 0", "--out-peers 0", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] # run 'charlie' as a parachain full node [[parachains.collators]] @@ -48,7 +47,7 @@ add_to_genesis = false validator = false # full node image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}","--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--in-peers 0", "--out-peers 0", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] # we fail recovery for 'eve' from time to time to test retries [[parachains.collators]] @@ -56,7 +55,7 @@ add_to_genesis = false validator = true # collator image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--fail-pov-recovery", "--use-null-consensus", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--fail-pov-recovery", "--in-peers 0", "--out-peers 0", "--use-null-consensus", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] # run 'one' as a RPC collator who does not produce blocks [[parachains.collators]] @@ -64,7 +63,7 @@ add_to_genesis = false validator = true # collator image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--use-null-consensus", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--relay-chain-rpc-url {{'ferdie'|zombie('wsUri')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--use-null-consensus", "--in-peers 0", "--out-peers 0", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--relay-chain-rpc-url {{'ferdie'|zombie('wsUri')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] # run 'two' as a RPC parachain full node [[parachains.collators]] @@ -72,7 +71,7 @@ add_to_genesis = false validator = false # full node image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--relay-chain-rpc-url {{'ferdie'|zombie('wsUri')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--in-peers 0", "--out-peers 0", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--relay-chain-rpc-url {{'ferdie'|zombie('wsUri')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] # run 'three' with light client [[parachains.collators]] @@ -80,4 +79,4 @@ add_to_genesis = false validator = false # full node image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--relay-chain-light-client", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--in-peers 0", "--out-peers 0", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--relay-chain-light-client", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] diff --git a/cumulus/zombienet/tests/0002-pov_recovery.zndsl b/cumulus/zombienet/tests/0002-pov_recovery.zndsl index 12ff00210f33c0918402b02845325739bc3b8ae2..b05285c87bff5a69312552d13b9b652bbd9d1bc1 100644 --- a/cumulus/zombienet/tests/0002-pov_recovery.zndsl +++ b/cumulus/zombienet/tests/0002-pov_recovery.zndsl @@ -13,5 +13,13 @@ alice: reports block height is at least 20 within 600 seconds charlie: reports block height is at least 20 within 600 seconds one: reports block height is at least 20 within 800 seconds two: reports block height is at least 20 within 800 seconds -three: reports block height is at least 20 within 800 seconds +# Re-enable once we upgraded from smoldot 0.11.0 and https://github.com/paritytech/polkadot-sdk/pull/1631 is merged +# three: reports block height is at least 20 within 800 seconds eve: reports block height is at least 20 within 800 seconds + +one: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds +two: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds +three: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds +eve: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds +charlie: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds +alice: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds diff --git a/docs/CODE_OF_CONDUCT.md b/docs/contributor/CODE_OF_CONDUCT.md similarity index 100% rename from docs/CODE_OF_CONDUCT.md rename to docs/contributor/CODE_OF_CONDUCT.md diff --git a/docs/CONTRIBUTING.md b/docs/contributor/CONTRIBUTING.md similarity index 90% rename from docs/CONTRIBUTING.md rename to docs/contributor/CONTRIBUTING.md index 1e05755a9b8338c5769a48c5e8f477670eb92e25..96dc86e9780561e33e24fbc6f0346572d26598b7 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/contributor/CONTRIBUTING.md @@ -93,22 +93,12 @@ The reviewers are also responsible to check: All Pull Requests must contain proper title & description. -Some Pull Requests can be exempt of `prdoc` documentation, those -must be labelled with +Some Pull Requests can be exempt of `prdoc` documentation, those must be labelled with [`R0-silent`](https://github.com/paritytech/labels/blob/main/ruled_labels/specs_polkadot-sdk.yaml#L89-L91). Non "silent" PRs must come with documentation in the form of a `.prdoc` file. -A `.prdoc` documentation is made of a text file (YAML) named `/prdoc/pr_NNNN.prdoc` where `NNNN` is the PR number. -For convenience, those file can also contain a short description/title: `/prdoc/pr_NNNN_pr-foobar.prdoc`. -The CI automation checks for the presence and validity of a `prdoc` in the `/prdoc` folder. -Those files need to comply with a specific [schema](https://github.com/paritytech/prdoc/blob/master/schema_user.json). It -is highly recommended to [make your editor aware](https://github.com/paritytech/prdoc#schemas) of the schema as it is -self-described and will assist you in writing correct content. - -This schema is also embedded in the -[prdoc](https://github.com/paritytech/prdoc) utility that can also be used to generate and check the validity of a -`prdoc` locally. +See more about `prdoc` [here](./prdoc.md) ## Helping out @@ -153,8 +143,18 @@ Or if you have opened PR and you're member of `paritytech` - you can use command - `bot update-ui latest -v CMD_IMAGE=paritytech/ci-unified:bullseye-1.70.0-2023-05-23 --rust_version=1.70.0` - will run the tests for the specified rust version and specified image +## Feature Propagation + +We use [zepter](https://github.com/ggwpez/zepter) to enforce features are propagated between crates correctly. + ## Command Bot If you're member of **paritytech** org - you can use command-bot to run various of common commands in CI: Start with comment in PR: `bot help` to see the list of available commands. + + +## Deprecating code + +When deprecating and removing code you need to be mindful of how this could impact downstream developers. In order to +mitigate this impact, it is recommended to adhere to the steps outlined in the [Deprecation Checklist](./DEPRECATION_CHECKLIST.md). diff --git a/docs/DEPRECATION_CHECKLIST.md b/docs/contributor/DEPRECATION_CHECKLIST.md similarity index 90% rename from docs/DEPRECATION_CHECKLIST.md rename to docs/contributor/DEPRECATION_CHECKLIST.md index fccf93d227379bdc16d831eac4ffb1f561adbe94..687c0a7cd7da040f40c120d00d75069db76ba27e 100644 --- a/docs/DEPRECATION_CHECKLIST.md +++ b/docs/contributor/DEPRECATION_CHECKLIST.md @@ -1,9 +1,7 @@ # Deprecation Checklist -This deprecation checklist makes sense while we don’t use [SemVer](https://semver.org/). -After that, this document will most likely change. -As deprecation and removal of existing code can happen on any release, we need to be mindful that external builders -could be impacted by the changes we make. +Polkadot SDK is under constant development and improvement, thus deprecation and removal of existing code happen often. +When creating a breaking change we need to be mindful that external builders could be impacted by this. The deprecation checklist tries to mitigate this impact, while still keeping the developer experience, the DevEx, as smooth as possible. @@ -45,7 +43,7 @@ We also need [https://docs.substrate.io/](https://docs.substrate.io/) to be upda ## Announce the deprecation and removal -**At minimum they should be noted in the release log.** Please see how to document a PR [here](https://github.com/paritytech/polkadot-sdk/blob/master/docs/CONTRIBUTING.md#documentation). +**At minimum they should be noted in the release log.** Please see how to document a PR [here](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md#documentation). There you can give instructions based on the audience and tell them what they need to do to upgrade the code. Some breaking changes have a bigger impact than others. When the impact is big the release note is not enough, though diff --git a/docs/DOCUMENTATION_GUIDELINES.md b/docs/contributor/DOCUMENTATION_GUIDELINES.md similarity index 99% rename from docs/DOCUMENTATION_GUIDELINES.md rename to docs/contributor/DOCUMENTATION_GUIDELINES.md index 5d1164e8ca89f8eefea71ad96318b9347ca29c89..96811a2772d775f92ce4524416c295a689df1790 100644 --- a/docs/DOCUMENTATION_GUIDELINES.md +++ b/docs/contributor/DOCUMENTATION_GUIDELINES.md @@ -225,7 +225,7 @@ For the top-level pallet docs, consider the following template: //! //! ## Pallet API //! -//! //! //! See the [`pallet`] module for more information about the interfaces this pallet exposes, including its @@ -349,3 +349,7 @@ Consider the fact that, similar to dispatchables, these docs will be part of the and might be used by wallets and explorers. Specifically for `error`, explain why the error has happened, and what can be done in order to avoid it. + +## Documenting Changes/PR + +See [PRDoc](./prdoc.md). diff --git a/docs/PULL_REQUEST_TEMPLATE.md b/docs/contributor/PULL_REQUEST_TEMPLATE.md similarity index 96% rename from docs/PULL_REQUEST_TEMPLATE.md rename to docs/contributor/PULL_REQUEST_TEMPLATE.md index c93ac90c7e32e1c6c33d4409920eaa85a1aea814..79a036a235ad92a2cfceeba9b8fb66a44d163dfd 100644 --- a/docs/PULL_REQUEST_TEMPLATE.md +++ b/docs/contributor/PULL_REQUEST_TEMPLATE.md @@ -3,7 +3,7 @@ ✄ ----------------------------------------------------------------------------- Thank you for your Pull Request! 🙏 Please make sure it follows the contribution guidelines outlined in -[this document](https://github.com/paritytech/polkadot-sdk/blob/master/docs/CONTRIBUTING.md) and fill +[this document](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and fill out the sections below. Once you're ready to submit your PR for review, please delete this section and leave only the text under the "Description" heading. diff --git a/docs/SECURITY.md b/docs/contributor/SECURITY.md similarity index 100% rename from docs/SECURITY.md rename to docs/contributor/SECURITY.md diff --git a/docs/STYLE_GUIDE.md b/docs/contributor/STYLE_GUIDE.md similarity index 83% rename from docs/STYLE_GUIDE.md rename to docs/contributor/STYLE_GUIDE.md index 1ae9bc5003f60502dae885c49e4af861561a793c..3df65d9699a05e64e6461bfa84384b3d4d108b29 100644 --- a/docs/STYLE_GUIDE.md +++ b/docs/contributor/STYLE_GUIDE.md @@ -2,9 +2,11 @@ title: Style Guide for Rust in the Polkadot-SDK --- -Where possible these styles are enforced by settings in `rustfmt.toml` so if you run `cargo fmt` +Where possible these styles are enforced by settings in `rustfmt.toml` so if you run `cargo +nightly fmt` then you will adhere to most of these style guidelines automatically. +To see exactly which nightly version is used, check our CI job logs. + # Formatting - Indent using tabs. @@ -150,31 +152,13 @@ let mut target_path = # Manifest Formatting -> **TLDR** -> You can use the CLI tool [Zepter](https://crates.io/crates/zepter) to -> format the files: `zepter format features --fix` (or `zepter f f -f`). +We use [taplo](https://taplo.tamasfe.dev/) to enforce consistent TOML formatting. -Rust `Cargo.toml` files need to respect certain formatting rules. All entries -need to be alphabetically sorted. This makes it easier to read them and insert -new entries. The exhaustive list of rules is enforced by the CI. The general -format looks like this: +You can install it with `cargo install taplo-cli` and format your code with `taplo format --config .config/taplo.toml`. -- The feature is written as a single line if it fits within 80 chars: +See the config file for the exact rules. -```toml -[features] -default = [ "std" ] -``` +You may find useful -- Otherwise the feature is broken down into multiple lines with one entry per - line. Each line is padded with one tab and no trailing spaces but a trailing - comma. - -```toml -[features] -default = [ - "loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong", - # Comments go here as well ;) - "std", -] -``` +- [Taplo VSCode extension](https://marketplace.visualstudio.com/items?itemName=tamasfe.even-better-toml) +- For NeoVim, [taplo is avaliable with Mason](https://github.com/williamboman/mason-lspconfig.nvim#available-lsp-servers) diff --git a/docs/container.md b/docs/contributor/container.md similarity index 100% rename from docs/container.md rename to docs/contributor/container.md diff --git a/docs/docker.md b/docs/contributor/docker.md similarity index 100% rename from docs/docker.md rename to docs/contributor/docker.md diff --git a/docs/markdown_linting.md b/docs/contributor/markdown_linting.md similarity index 100% rename from docs/markdown_linting.md rename to docs/contributor/markdown_linting.md diff --git a/docs/contributor/prdoc.md b/docs/contributor/prdoc.md new file mode 100644 index 0000000000000000000000000000000000000000..af0ede5107a6cb1ea31d8b81fe16b06553ef64ac --- /dev/null +++ b/docs/contributor/prdoc.md @@ -0,0 +1,71 @@ +# PRDoc + +## Intro + +With the merge of [PR #1946](https://github.com/paritytech/polkadot-sdk/pull/1946), a new method for +documenting changes has been introduced: `prdoc`. The [prdoc repository](https://github.com/paritytech/prdoc) +contains more documentation and tooling. + +The current document describes how to quickly get started authoring `PRDoc` files. + +## Requirements + +When creating a PR, the author needs to decides with the `R0` label whether the change (PR) should +appear in the release notes or not. + +Labelling a PR with `R0` means that no `PRDoc` is required. + +A PR without the `R0` label **does** require a valid `PRDoc` file to be introduced in the PR. + +## PRDoc how-to + +A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). + +For significant changes, a `.prdoc` file is mandatory and the file must meet the following +requirements: +- file named `pr_NNNN.prdoc` where `NNNN` is the PR number. + For convenience, those file can also contain a short description: `pr_NNNN_foobar.prdoc`. +- located under the [`prdoc` folder](https://github.com/paritytech/polkadot-sdk/tree/master/prdoc) of the repository +- compliant with the [JSON schema](https://json-schema.org/) defined in `prdoc/schema_user.json` + +Those requirements can be fulfilled manually without any tooling but a text editor. + +## Tooling + +Users might find the following helpers convenient: +- Setup VSCode to be aware of the prdoc schema: see [using VSCode](https://github.com/paritytech/prdoc#using-vscode) +- Using the `prdoc` cli to: + - generate a `PRDoc` file from a [template defined in the Polkadot SDK + repo](https://github.com/paritytech/polkadot-sdk/blob/master/prdoc/.template.prdoc) simply providing a PR number + - check the validity of one or more `PRDoc` files + +## `prdoc` cli usage + +The `prdoc` cli documentation can be found at https://github.com/paritytech/prdoc#prdoc + +tldr: +- `prdoc generate ` +- `prdoc check -n ` + +where is the PR number. + +## Pick an audience + +While describing a PR, the author needs to consider which audience(s) need to be addressed. +The list of valid audiences is described and documented in the JSON schema as follow: + +- `Node Dev`: Those who build around the client side code. Alternative client builders, SMOLDOT, those who consume RPCs. + These are people who are oblivious to the runtime changes. They only care about the meta-protocol, not the protocol + itself. + +- `Runtime Dev`: All of those who rely on the runtime. A parachain team that is using a pallet. A DApp that is using a + pallet. These are people who care about the protocol (WASM), not the meta-protocol (client). + +- `Node Operator`: Those who don't write any code and only run code. + +- `Runtime User`: Anyone using the runtime. This can be a token holder or a dev writing a front end for a chain. + +## Tips + +The PRDoc schema is defined in each repo and usually is quite restrictive. +You cannot simply add a new property to a `PRDoc` file unless the Schema allows it. diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd new file mode 100644 index 0000000000000000000000000000000000000000..93d3e92814cf1307fa7c0ea9f290ed21edae58fb --- /dev/null +++ b/docs/mermaid/IA.mmd @@ -0,0 +1,14 @@ +flowchart + parity[paritytech.github.io] --> devhub[polkadot_sdk_docs] + + devhub --> polkadot_sdk + devhub --> reference_docs + devhub --> tutorial + + polkadot_sdk --> substrate + polkadot_sdk --> frame + polkadot_sdk --> cumulus + polkadot_sdk --> polkadot + polkadot_sdk --> xcm + + diff --git a/docs/mermaid/extrinsics.mmd b/docs/mermaid/extrinsics.mmd new file mode 100644 index 0000000000000000000000000000000000000000..4afd4ab8f755d818f9bb0ccf6488610dcc6638f9 --- /dev/null +++ b/docs/mermaid/extrinsics.mmd @@ -0,0 +1,5 @@ +flowchart TD + E(Extrinsic) ---> I(Inherent); + E --> T(Transaction) + T --> ST("Signed (aka. Transaction)") + T --> UT(Unsigned) diff --git a/docs/mermaid/polkadot_sdk_parachain.mmd b/docs/mermaid/polkadot_sdk_parachain.mmd new file mode 100644 index 0000000000000000000000000000000000000000..3f38fce046c2e60b6860885c851d0121fbda804c --- /dev/null +++ b/docs/mermaid/polkadot_sdk_parachain.mmd @@ -0,0 +1,11 @@ +flowchart LR + subgraph Parachain[A Polkadot Parachain] + ParachainNode[Parachain Node] + ParachainRuntime[Parachain Runtime] + end + + FRAME -.-> ParachainRuntime + Substrate[Substrate Node Libraries] -.-> ParachainNoe + + CumulusC[Cumulus Node Libraries] -.-> ParachainNode + CumulusR[Cumulus Runtime Libraries] -.-> ParachainRuntime diff --git a/docs/mermaid/polkadot_sdk_polkadot.mmd b/docs/mermaid/polkadot_sdk_polkadot.mmd new file mode 100644 index 0000000000000000000000000000000000000000..3326cc59383926779e044f0e3c8a4837306b4f98 --- /dev/null +++ b/docs/mermaid/polkadot_sdk_polkadot.mmd @@ -0,0 +1,10 @@ +flowchart LR + + subgraph Polkadot[The Polkadot Relay Chain] + PolkadotNode[Polkadot Node] + PolkadotRuntime[Polkadot Runtime] + end + + FRAME -.-> PolkadotRuntime + Substrate[Substrate Node Libraries] -.-> PolkadotNode + diff --git a/docs/mermaid/polkadot_sdk_substrate.mmd b/docs/mermaid/polkadot_sdk_substrate.mmd new file mode 100644 index 0000000000000000000000000000000000000000..dfaf20d241f81b006a2c4979ebb7c3a0811bd208 --- /dev/null +++ b/docs/mermaid/polkadot_sdk_substrate.mmd @@ -0,0 +1,8 @@ +flowchart LR + subgraph SubstrateChain[A Substrate-based blockchain] + Node + Runtime + end + + FRAME -.-> Runtime + Substrate[Substrate Node Libraries] -.-> Node diff --git a/docs/mermaid/state.mmd b/docs/mermaid/state.mmd new file mode 100644 index 0000000000000000000000000000000000000000..c72ecbfd1568b2ee753bf2e58a9aebcad10af164 --- /dev/null +++ b/docs/mermaid/state.mmd @@ -0,0 +1,16 @@ +flowchart TB + subgraph Node[Node's View Of The State 🙈] + direction LR + 0x1234 --> 0x2345 + 0x3456 --> 0x4567 + 0x5678 --> 0x6789 + :code --> code[wasm code] + end + + subgraph Runtime[Runtime's View Of The State 🙉] + direction LR + ab[alice's balance] --> abv[known value] + bb[bob's balance] --> bbv[known value] + cb[charlie's balance] --> cbv[known value] + c2[:code] --> c22[wasm code] + end diff --git a/docs/mermaid/stf.mmd b/docs/mermaid/stf.mmd new file mode 100644 index 0000000000000000000000000000000000000000..dd6c7c36de66fe7d2dbfd77f23a1362878c7a7e5 --- /dev/null +++ b/docs/mermaid/stf.mmd @@ -0,0 +1,21 @@ +flowchart LR + %%{init: {'flowchart' : {'curve' : 'linear'}}}%% + subgraph BData[Blockchain Database] + direction LR + BN[Block N] -.-> BN1[Block N+1] + end + + subgraph SData[State Database] + direction LR + SN[State N] -.-> SN1[State N+1] -.-> SN2[State N+2] + end + + BN --> STFN[STF] + SN --> STFN[STF] + STFN[STF] --> SN1 + + BN1 --> STFN1[STF] + SN1 --> STFN1[STF] + STFN1[STF] --> SN2 + + diff --git a/docs/mermaid/stf_simple.mmd b/docs/mermaid/stf_simple.mmd new file mode 100644 index 0000000000000000000000000000000000000000..5db20cf6156c8c6b9817eb07de07e43d6b7a1abd --- /dev/null +++ b/docs/mermaid/stf_simple.mmd @@ -0,0 +1,4 @@ +flowchart LR + B[Block] --> STF + S[State] --> STF + STF --> NS[New State] diff --git a/docs/mermaid/substrate_client_runtime.mmd b/docs/mermaid/substrate_client_runtime.mmd index 23c3f849224affcb791fc3324df44f4bd4fa398b..caab2b6230283f866730fa389efd2934a970dadf 100644 --- a/docs/mermaid/substrate_client_runtime.mmd +++ b/docs/mermaid/substrate_client_runtime.mmd @@ -1,10 +1,12 @@ graph TB subgraph Substrate direction LR - subgraph Client + subgraph Node end + subgraph Runtime end - Client --runtime-api--> Runtime - Runtime --host-functions--> Client + + Node --runtime-api--> Runtime + Runtime --host-functions--> Node end diff --git a/docs/mermaid/substrate_dev.mmd b/docs/mermaid/substrate_dev.mmd new file mode 100644 index 0000000000000000000000000000000000000000..fc331ce311fea6519957f88deb4a289974d2a98c --- /dev/null +++ b/docs/mermaid/substrate_dev.mmd @@ -0,0 +1,2 @@ +flowchart LR + T[Using a Template] --> P[Writing Your Own FRAME-Based Pallet] --> C[Custom Node] diff --git a/docs/mermaid/substrate_simple.mmd b/docs/mermaid/substrate_simple.mmd index 475d8be5ef818ca6e2798a71cf3adee5c36a6dbf..a752eaba625fb1d1a27a91413266bb6fd30c75c0 100644 --- a/docs/mermaid/substrate_simple.mmd +++ b/docs/mermaid/substrate_simple.mmd @@ -1,7 +1,7 @@ graph TB subgraph Substrate direction LR - subgraph Client + subgraph Node end subgraph Runtime end diff --git a/docs/mermaid/substrate_with_frame.mmd b/docs/mermaid/substrate_with_frame.mmd index 12d072a3360c2a43d248271348227891f89f6f46..173c1757b955f3157d8a38a12b03d0e3157c34e5 100644 --- a/docs/mermaid/substrate_with_frame.mmd +++ b/docs/mermaid/substrate_with_frame.mmd @@ -1,7 +1,7 @@ graph TB subgraph Substrate direction LR - subgraph Client + subgraph Node Database Networking Consensus @@ -15,6 +15,6 @@ subgraph Substrate Identity end end - Client --runtime-api--> Runtime - Runtime --host-functions--> Client + Node --runtime-api--> Runtime + Runtime --host-functions--> Node end diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..246da2cd68c6e386bbacf039768767aae70d26dd --- /dev/null +++ b/docs/sdk/Cargo.toml @@ -0,0 +1,69 @@ +[package] +name = "polkadot-sdk-docs" +description = "The one stop shop for developers of the polakdot-sdk" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "paritytech.github.io" +repository.workspace = true +authors.workspace = true +edition.workspace = true +# This crate is not publish-able to crates.io for now because of docify. +publish = false +version = "0.0.1" + +[lints] +workspace = true + +[dependencies] +# Needed for all FRAME-based code +parity-scale-codec = { version = "3.0.0", default-features = false } +scale-info = { version = "2.6.0", default-features = false } +frame = { path = "../../substrate/frame", features = ["experimental", "runtime"] } +pallet-examples = { path = "../../substrate/frame/examples" } +pallet-default-config-example = { path = "../../substrate/frame/examples/default-config" } + +# How we build docs in rust-docs +simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", rev = "e48b187bcfd5cc75111acd9d241f1bd36604344b" } +docify = "0.2.6" + +# Polkadot SDK deps, typically all should only be in scope such that we can link to their doc item. +node-cli = { package = "staging-node-cli", path = "../../substrate/bin/node/cli" } +kitchensink-runtime = { path = "../../substrate/bin/node/runtime" } +chain-spec-builder = { package = "staging-chain-spec-builder", path = "../../substrate/bin/utils/chain-spec-builder" } +subkey = { path = "../../substrate/bin/utils/subkey" } + +# Substrate +sc-network = { path = "../../substrate/client/network" } +sc-rpc-api = { path = "../../substrate/client/rpc-api" } +sc-rpc = { path = "../../substrate/client/rpc" } +sc-client-db = { path = "../../substrate/client/db" } +sc-cli = { path = "../../substrate/client/cli" } +sc-consensus-aura = { path = "../../substrate/client/consensus/aura" } +sc-consensus-babe = { path = "../../substrate/client/consensus/babe" } +sc-consensus-grandpa = { path = "../../substrate/client/consensus/grandpa" } +sc-consensus-beefy = { path = "../../substrate/client/consensus/beefy" } +sc-consensus-manual-seal = { path = "../../substrate/client/consensus/manual-seal" } +sc-consensus-pow = { path = "../../substrate/client/consensus/pow" } +substrate-wasm-builder = { path = "../../substrate/utils/wasm-builder" } + +# Cumulus +cumulus-pallet-aura-ext = { path = "../../cumulus/pallets/aura-ext" } +cumulus-pallet-parachain-system = { path = "../../cumulus/pallets/parachain-system", features = [ + "parameterized-consensus-hook", +] } +parachain-info = { package = "staging-parachain-info", path = "../../cumulus/parachains/pallets/parachain-info" } +pallet-aura = { path = "../../substrate/frame/aura", default-features = false } +pallet-timestamp = { path = "../../substrate/frame/timestamp" } + +# Primitives +sp-io = { path = "../../substrate/primitives/io" } +sp-api = { path = "../../substrate/primitives/api" } +sp-core = { path = "../../substrate/primitives/core" } +sp-keyring = { path = "../../substrate/primitives/keyring" } +sp-runtime = { path = "../../substrate/primitives/runtime" } + +[dev-dependencies] +parity-scale-codec = "3.6.5" +scale-info = "2.9.0" + +[features] +experimental = ["pallet-aura/experimental"] diff --git a/docs/sdk/headers/toc.html b/docs/sdk/headers/toc.html new file mode 100644 index 0000000000000000000000000000000000000000..a4a074cb4f3153cb135da8608462d4ecf59144cb --- /dev/null +++ b/docs/sdk/headers/toc.html @@ -0,0 +1,54 @@ + + diff --git a/docs/sdk/src/guides/changing_consensus.rs b/docs/sdk/src/guides/changing_consensus.rs new file mode 100644 index 0000000000000000000000000000000000000000..7ba742f10723d22a0ae58b48b3fa35770b75b331 --- /dev/null +++ b/docs/sdk/src/guides/changing_consensus.rs @@ -0,0 +1 @@ +//! # Changing Consensus diff --git a/docs/sdk/src/guides/cumulus_enabled_parachain.rs b/docs/sdk/src/guides/cumulus_enabled_parachain.rs new file mode 100644 index 0000000000000000000000000000000000000000..fafd97feb8291b34947e0a779fc9dda754dec3f1 --- /dev/null +++ b/docs/sdk/src/guides/cumulus_enabled_parachain.rs @@ -0,0 +1 @@ +//! # Cumulus Enabled Parachain diff --git a/docs/sdk/src/guides/mod.rs b/docs/sdk/src/guides/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..3120f25331099263087d3804d324e71555c0813d --- /dev/null +++ b/docs/sdk/src/guides/mod.rs @@ -0,0 +1,25 @@ +//! # Polkadot SDK Docs Guides +//! +//! This crate contains a collection of guides that are foundational to the developers of +//! Polkadot SDK. They are common user-journeys that are traversed in the Polkadot ecosystem. + +/// Write your first simple pallet, learning the most most basic features of FRAME along the way. +pub mod your_first_pallet; + +/// Writing your first real [runtime](`crate::reference_docs::wasm_meta_protocol`), and successfully +/// compiling it to [WASM](crate::polkadot_sdk::substrate#wasm-build). +pub mod your_first_runtime; + +/// Running the given runtime with a node. No specific consensus mechanism is used at this stage. +pub mod your_first_node; + +/// How to change the consensus engine of both the node and the runtime. +pub mod changing_consensus; + +/// How to enhance a given runtime and node to be cumulus-enabled, run it as a parachain and connect +/// it to a relay-chain. +pub mod cumulus_enabled_parachain; + +/// How to make a given runtime XCM-enabled, capable of sending messages (`Transact`) between itself +/// and the relay chain to which it is connected. +pub mod xcm_enabled_parachain; diff --git a/docs/sdk/src/guides/xcm_enabled_parachain.rs b/docs/sdk/src/guides/xcm_enabled_parachain.rs new file mode 100644 index 0000000000000000000000000000000000000000..4518cab934215c8de7d6ebdedabb8f4ba2670e2b --- /dev/null +++ b/docs/sdk/src/guides/xcm_enabled_parachain.rs @@ -0,0 +1 @@ +//! # XCM Enabled Parachain diff --git a/docs/sdk/src/guides/your_first_node.rs b/docs/sdk/src/guides/your_first_node.rs new file mode 100644 index 0000000000000000000000000000000000000000..d12349c990632deb03bf24006ffc63b493347715 --- /dev/null +++ b/docs/sdk/src/guides/your_first_node.rs @@ -0,0 +1 @@ +//! # Your first Node diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..c886bc9af842d831f7a0869c998242525068c35c --- /dev/null +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -0,0 +1,754 @@ +//! # Currency Pallet +//! +//! By the end of this guide, you will write a small FRAME pallet (see +//! [`crate::polkadot_sdk::frame_runtime`]) that is capable of handling a simple crypto-currency. +//! This pallet will: +//! +//! 1. Allow anyone to mint new tokens into accounts (which is obviously not a great idea for a real +//! system). +//! 2. Allow any user that owns tokens to transfer them to others. +//! 3. Track the total issuance of all tokens at all times. +//! +//! > This guide will build a currency pallet from scratch using only the lowest primitives of +//! > FRAME, and is mainly intended for education, not *applicability*. For example, almost all +//! > FRAME-based runtimes use various techniques to re-use a currency pallet instead of writing +//! > one. Further advanced FRAME related topics are discussed in [`crate::reference_docs`]. +//! +//! ## Topics Covered +//! +//! The following FRAME topics are covered in this guide: +//! +//! - [Storage](frame::pallet_macros::storage) +//! - [Call](frame::pallet_macros::call) +//! - [Event](frame::pallet_macros::event) +//! - [Error](frame::pallet_macros::error) +//! - Basics of testing a pallet +//! - [Constructing a runtime](frame::runtime::prelude::construct_runtime) +//! +//! ## Writing Your First Pallet +//! +//! You should have studied the following modules as a prelude to this guide: +//! +//! - [`crate::reference_docs::blockchain_state_machines`] +//! - [`crate::reference_docs::trait_based_programming`] +//! - [`crate::polkadot_sdk::frame_runtime`] +//! +//! ### Shell Pallet +//! +//! Consider the following as a "shell pallet". We continue building the rest of this pallet based +//! on this template. +//! +//! [`pallet::config`](frame::pallet_macros::config) and +//! [`pallet::pallet`](frame::pallet_macros::pallet) are both mandatory parts of any pallet. Refer +//! to the documentation of each to get an overview of what they do. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", shell_pallet)] +//! +//! ### Storage +//! +//! First, we will need to create two onchain storage declarations. +//! +//! One should be a mapping from account-ids to a balance type, and one value that is the total +//! issuance. +//! +//! > For the rest of this guide, we will opt for a balance type of `u128`. For the sake of +//! > simplicity, we are hardcoding this type. In a real pallet is best practice to define it as a +//! > generic bounded type in the `Config` trait, and then specify it in the implementation. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balance)] +//! +//! The definition of these two storage items, based on [`frame::pallet_macros::storage`] details, +//! is as follows: +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", TotalIssuance)] +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balances)] +//! +//! ### Dispatchables +//! +//! Next, we will define the dispatchable functions. As per [`frame::pallet_macros::call`], these +//! will be defined as normal `fn`s attached to `struct Pallet`. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_pallet)] +//! +//! The logic of the functions is self-explanatory. Instead, we will focus on the FRAME-related +//! details: +//! +//! - Where do `T::AccountId` and `T::RuntimeOrigin` come from? These are both defined in +//! [`frame::prelude::frame_system::Config`], therefore we can access them in `T`. +//! - What is `ensure_signed`, and what does it do with the aforementioned `T::RuntimeOrigin`? This +//! is outside the scope of this guide, and you can learn more about it in the origin reference +//! document ([`crate::reference_docs::frame_origin`]). For now, you should only know the +//! signature of the function: it takes a generic `T::RuntimeOrigin` and returns a +//! `Result`. So by the end of this function call, we know that this dispatchable +//! was signed by `who`. +#![doc = docify::embed!("../../substrate/frame/system/src/lib.rs", ensure_signed)] +//! +//! +//! - Where does `mutate`, `get` and `insert` and other storage APIs come from? All of them are +//! explained in the corresponding `type`, for example, for `Balances::::insert`, you can look +//! into [`frame::prelude::StorageMap::insert`]. +//! +//! - The return type of all dispatchable functions is [`frame::prelude::DispatchResult`]: +#![doc = docify::embed!("../../substrate/frame/support/src/dispatch.rs", DispatchResult)] +//! +//! Which is more or less a normal Rust `Result`, with a custom [`frame::prelude::DispatchError`] as +//! the `Err` variant. We won't cover this error in detail here, but importantly you should know +//! that there is an `impl From<&'static string> for DispatchError` provided (see +//! [here](`frame::prelude::DispatchError#impl-From<%26'static+str>-for-DispatchError`)). Therefore, +//! we can use basic string literals as our error type and `.into()` them into `DispatchError`. +//! +//! - Why are all `get` and `mutate` functions returning an `Option`? This is the default behavior +//! of FRAME storage APIs. You can learn more about how to override this by looking into +//! [`frame::pallet_macros::storage`], and +//! [`frame::prelude::ValueQuery`]/[`frame::prelude::OptionQuery`] +//! +//! ### Improving Errors +//! +//! How we handle error in the above snippets is fairly rudimentary. Let's look at how this can be +//! improved. First, we can use [`frame::prelude::ensure`] to express the error slightly better. +//! This macro will call `.into()` under the hood. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_better)] +//! +//! Moreover, you will learn in the [Safe Defensive Programming +//! section](crate::reference_docs::safe_defensive_programming) that it is always recommended to use +//! safe arithmetic operations in your runtime. By using [`frame::traits::CheckedSub`], we can not +//! only take a step in that direction, but also improve the error handing and make it slightly more +//! ergonomic. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_better_checked)] +//! +//! This is more or less all the logic that there is this basic currency pallet! +//! +//! ### Your First (Test) Runtime +//! +//! Next, we create a "test runtime" in order to test our pallet. Recall from +//! [`crate::polkadot_sdk::frame_runtime`] that a runtime is a collection of pallets, expressed +//! through [`frame::runtime::prelude::construct_runtime`]. All runtimes also have to include +//! [`frame::prelude::frame_system`]. So we expect to see a runtime with two pallet, `frame_system` +//! and the one we just wrote. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", runtime)] +//! +//! > [`frame::pallet_macros::derive_impl`] is a FRAME feature that enables developers to have +//! > defaults for associated types. +//! +//! Recall that within our pallet, (almost) all blocks of code are generic over ``. And, +//! because `trait Config: frame_system::Config`, we can get access to all items in `Config` (or +//! `frame_system::Config`) using `T::NameOfItem`. This is all within the boundaries of how Rust +//! traits and generics work. If unfamiliar with this pattern, read +//! [`crate::reference_docs::trait_based_programming`] before going further. +//! +//! Crucially, a typical FRAME runtime contains a `struct Runtime`. The main role of this `struct` +//! is to implement the `trait Config` of all pallets. That is, anywhere within your pallet code +//! where you see `` (read: *"some type `T` that implements `Config`"*), in the runtime, +//! it can be replaced with ``, because `Runtime` implements `Config` of all pallets, as we +//! see above. +//! +//! Another way to think about this is that within a pallet, a lot of types are "unknown" and, we +//! only know that they will be provided at some later point. For example, when you write +//! `T::AccountId` (which is short for `::AccountId`) in your pallet, +//! you are in fact saying "*Some type `AccountId` that will be known later*". That "later" is in +//! fact when you specify these types when you implement all `Config` traits for `Runtime`. +//! +//! As you see above, `frame_system::Config` is setting the `AccountId` to `u64`. Of course, a real +//! runtime will not use this type, and instead reside to a proper type like a 32-byte standard +//! public key. This is a HUGE benefit that FRAME developers can tap into: through the framework +//! being so generic, different types can always be customized to simple things when needed. +//! +//! > Imagine how hard it would have been if all tests had to use a real 32-byte account id, as +//! > opposed to just a u64 number 🙈. +//! +//! ### Your First Test +//! +//! The above is all you need to execute the dispatchables of your pallet. The last thing you need +//! to learn is that all of your pallet testing code should be wrapped in +//! [`frame::testing_prelude::TestState`]. This is a type that provides access to an in-memory state +//! to be used in our tests. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", first_test)] +//! +//! In the first test, we simply assert that there is no total issuance, and no balance associated +//! with Alice's account. Then, we mint some balance into Alice's, and re-check. +//! +//! As noted above, the `T::AccountId` is now `u64`. Moreover, `Runtime` is replacing ``. +//! This is why for example you see `Balances::::get(..)`. Finally, notice that the +//! dispatchables are simply functions that can be called on top of the `Pallet` struct. +// TODO: hard to explain exactly `RuntimeOrigin::signed(ALICE)` at this point. +//! +//! Congratulations! You have written your first pallet and tested it! Next, we learn a few optional +//! steps to improve our pallet. +//! +//! ## Improving the Currency Pallet +//! +//! ### Better Test Setup +//! +//! Idiomatic FRAME pallets often use Builder pattern to define their initial state. +//! +//! > The Polkadot Blockchain Academy's Rust entrance exam has a +//! > [section](https://github.com/Polkadot-Blockchain-Academy/pba-qualifier-exam/blob/main/src/m_builder.rs) +//! > on this that you can use to learn the Builder Pattern. +//! +//! Let's see how we can implement a better test setup using this pattern. First, we define a +//! `struct StateBuilder`. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", StateBuilder)] +//! +//! This struct is meant to contain the same list of accounts and balances that we want to have at +//! the beginning of each block. We hardcoded this to `let accounts = vec![(ALICE, 100), (2, 100)];` +//! so far. Then, if desired, we attach a default value for this struct. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", default_state_builder)] +//! +//! Like any other builder pattern, we attach functions to the type to mutate its internal +//! properties. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_state_builder_add)] +//! +//! Finally --the useful part-- we write our own custom `build_and_execute` function on +//! this type. This function will do multiple things: +//! +//! 1. It would consume `self` to produce our `TestState` based on the properties that we attached +//! to `self`. +//! 2. It would execute any test function that we pass in as closure. +//! 3. A nifty trick, this allows our test setup to have some code that is executed both before and +//! after each test. For example, in this test, we do some additional checking about the +//! correctness of the `TotalIssuance`. We leave it up to you as an exercise to learn why the +//! assertion should always hold, and how it is checked. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_state_builder_build)] +//! +//! We can write tests that specifically check the initial state, and making sure our `StateBuilder` +//! is working exactly as intended. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", state_builder_works)] +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", state_builder_add_balance)] +//! +//! ### More Tests +//! +//! Now that we have a more ergonomic test setup, let's see how a well written test for transfer and +//! mint would look like. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_works)] +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", mint_works)] +//! +//! It is always a good idea to build a mental model where you write *at least* one test for each +//! "success path" of a dispatchable, and one test for each "failure path", such as: +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_from_non_existent_fails)] +//! +//! We leave it up to you to write a test that triggers the `InsufficientBalance` error. +//! +//! ### Event and Error +//! +//! Our pallet is mainly missing two parts that are common in most FRAME pallets: Events, and +//! Errors. First, let's understand what each is. +//! +//! - **Error**: The static string-based error scheme we used so far is good for readability, but it +//! has a few drawbacks. The biggest problem with strings are that they are not type safe, e.g. a +//! match statement cannot be exhaustive. These string literals will bloat the final wasm blob, +//! and are relatively heavy to transmit and encode/decode. Moreover, it is easy to mistype them +//! by one character. FRAME errors are exactly a solution to maintain readability, whilst fixing +//! the drawbacks mentioned. In short, we use an enum to represent different variants of our +//! error. These variants are then mapped in an efficient way (using only `u8` indices) to +//! [`sp_runtime::DispatchError::Module`]. Read more about this in +//! [`frame::pallet_macros::error`]. +//! +//! - **Event**: Events are akin to the return type of dispatchables. They are mostly data blobs +//! emitted by the runtime to let outside world know what is happening inside the pallet. Since +//! otherwise, the outside world does not have an easy access to the state changes. They should +//! represent what happened at the end of a dispatch operation. Therefore, the convention is to +//! use passive tense for event names (eg. `SomethingHappened`). This allows other sub-systems or +//! external parties (eg. a light-node, a DApp) to listen to particular events happening, without +//! needing to re-execute the whole state transition function. +// TODO: both need to be improved a lot at the pallet-macro rust-doc level. Also my explanation +// of event is probably not the best. +//! +//! With the explanation out of the way, let's see how these components can be added. Both follow a +//! fairly familiar syntax: normal Rust enums, with an extra `#[frame::event/error]` attribute +//! attached. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Event)] +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Error)] +//! +//! One slightly custom part of this is the `#[pallet::generate_deposit(pub(super) fn +//! deposit_event)]` part. Without going into too much detail, in order for a pallet to emit events +//! to the rest of the system, it needs to do two things: +//! +//! 1. Declare a type in its `Config` that refers to the overarching event type of the runtime. In +//! short, by doing this, the pallet is expressing an important bound: `type RuntimeEvent: +//! From>`. Read: a `RuntimeEvent` exists, and it can be created from the local `enum +//! Event` of this pallet. This enables the pallet to convert its `Event` into `RuntimeEvent`, and +//! store it where needed. +//! +//! 2. But, doing this conversion and storing is too much to expect each pallet to define. FRAME +//! provides a default way of storing events, and this is what `pallet::generate_deposit` is doing. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", config_v2)] +//! +//! > These `Runtime*` types are better explained in +//! > [`crate::reference_docs::frame_composite_enums`]. +//! +//! Then, we can rewrite the `transfer` dispatchable as such: +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_v2)] +//! +//! Then, notice how now we would need to provide this `type RuntimeEvent` in our test runtime +//! setup. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", runtime_v2)] +//! +//! In this snippet, the actual `RuntimeEvent` type (right hand side of `type RuntimeEvent = +//! RuntimeEvent`) is generated by `construct_runtime`. An interesting way to inspect this type is +//! to see its definition in rust-docs: +//! [`crate::guides::your_first_pallet::pallet_v2::tests::runtime_v2::RuntimeEvent`]. +//! +//! +//! +//! ## What Next? +//! +//! The following topics where used in this guide, but not covered in depth. It is suggested to +//! study them subsequently: +//! +//! - [`crate::reference_docs::safe_defensive_programming`]. +//! - [`crate::reference_docs::frame_origin`]. +//! - [`crate::reference_docs::frame_composite_enums`]. +//! - The pallet we wrote in this guide was using `dev_mode`, learn more in +//! [`frame::pallet_macros::config`]. +//! - Learn more about the individual pallet items/macros, such as event and errors and call, in +//! [`frame::pallet_macros`]. + +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod shell_pallet { + use frame::prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +#[frame::pallet(dev_mode)] +pub mod pallet { + use frame::prelude::*; + + #[docify::export] + pub type Balance = u128; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export] + /// Single storage item, of type `Balance`. + #[pallet::storage] + pub type TotalIssuance = StorageValue<_, Balance>; + + #[docify::export] + /// A mapping from `T::AccountId` to `Balance` + #[pallet::storage] + pub type Balances = StorageMap<_, _, T::AccountId, Balance>; + + #[docify::export(impl_pallet)] + #[pallet::call] + impl Pallet { + /// An unsafe mint that can be called by anyone. Not a great idea. + pub fn mint_unsafe( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + // ensure that this is a signed account, but we don't really check `_anyone`. + let _anyone = ensure_signed(origin)?; + + // update the balances map. Notice how all `` remains as ``. + Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); + // update total issuance. + TotalIssuance::::mutate(|t| *t = Some(t.unwrap_or(0) + amount)); + + Ok(()) + } + + /// Transfer `amount` from `origin` to `dest`. + pub fn transfer( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + // ensure sender has enough balance, and if so, calculate what is left after `amount`. + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + if sender_balance < amount { + return Err("InsufficientBalance".into()) + } + let reminder = sender_balance - amount; + + // update sender and dest balances. + Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); + Balances::::insert(&sender, reminder); + + Ok(()) + } + } + + #[allow(unused)] + impl Pallet { + #[docify::export] + pub fn transfer_better( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + ensure!(sender_balance >= amount, "InsufficientBalance"); + let reminder = sender_balance - amount; + + // .. snip + Ok(()) + } + + #[docify::export] + /// Transfer `amount` from `origin` to `dest`. + pub fn transfer_better_checked( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + let reminder = sender_balance.checked_sub(amount).ok_or("InsufficientBalance")?; + + // .. snip + Ok(()) + } + } + + #[cfg(any(test, doc))] + pub(crate) mod tests { + use crate::guides::your_first_pallet::pallet::*; + use frame::testing_prelude::*; + const ALICE: u64 = 1; + const BOB: u64 = 2; + const CHARLIE: u64 = 3; + + #[docify::export] + mod runtime { + use super::*; + // we need to reference our `mod pallet` as an identifier to pass to + // `construct_runtime`. + use crate::guides::your_first_pallet::pallet as pallet_currency; + + construct_runtime!( + pub struct Runtime { + // ---^^^^^^ This is where `struct Runtime` is defined. + System: frame_system, + Currency: pallet_currency, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + // within pallet we just said `::AccountId`, now we + // finally specified it. + type AccountId = u64; + } + + // our simple pallet has nothing to be configured. + impl pallet_currency::Config for Runtime {} + } + + pub(crate) use runtime::*; + + #[allow(unused)] + #[docify::export] + fn new_test_state_basic() -> TestState { + let mut state = TestState::new_empty(); + let accounts = vec![(ALICE, 100), (BOB, 100)]; + state.execute_with(|| { + for (who, amount) in &accounts { + Balances::::insert(who, amount); + TotalIssuance::::mutate(|b| *b = Some(b.unwrap_or(0) + amount)); + } + }); + + state + } + + #[docify::export] + pub(crate) struct StateBuilder { + balances: Vec<(::AccountId, Balance)>, + } + + #[docify::export(default_state_builder)] + impl Default for StateBuilder { + fn default() -> Self { + Self { balances: vec![(ALICE, 100), (BOB, 100)] } + } + } + + #[docify::export(impl_state_builder_add)] + impl StateBuilder { + fn add_balance( + mut self, + who: ::AccountId, + amount: Balance, + ) -> Self { + self.balances.push((who, amount)); + self + } + } + + #[docify::export(impl_state_builder_build)] + impl StateBuilder { + pub(crate) fn build_and_execute(self, test: impl FnOnce() -> ()) { + let mut ext = TestState::new_empty(); + ext.execute_with(|| { + for (who, amount) in &self.balances { + Balances::::insert(who, amount); + TotalIssuance::::mutate(|b| *b = Some(b.unwrap_or(0) + amount)); + } + }); + + ext.execute_with(test); + + // assertions that must always hold + ext.execute_with(|| { + assert_eq!( + Balances::::iter().map(|(_, x)| x).sum::(), + TotalIssuance::::get().unwrap_or_default() + ); + }) + } + } + + #[docify::export] + #[test] + fn first_test() { + TestState::new_empty().execute_with(|| { + // We expect Alice's account to have no funds. + assert_eq!(Balances::::get(&ALICE), None); + assert_eq!(TotalIssuance::::get(), None); + + // mint some funds into Alice's account. + assert_ok!(Pallet::::mint_unsafe( + RuntimeOrigin::signed(ALICE), + ALICE, + 100 + )); + + // re-check the above + assert_eq!(Balances::::get(&ALICE), Some(100)); + assert_eq!(TotalIssuance::::get(), Some(100)); + }) + } + + #[docify::export] + #[test] + fn state_builder_works() { + StateBuilder::default().build_and_execute(|| { + assert_eq!(Balances::::get(&ALICE), Some(100)); + assert_eq!(Balances::::get(&BOB), Some(100)); + assert_eq!(Balances::::get(&CHARLIE), None); + assert_eq!(TotalIssuance::::get(), Some(200)); + }); + } + + #[docify::export] + #[test] + fn state_builder_add_balance() { + StateBuilder::default().add_balance(CHARLIE, 42).build_and_execute(|| { + assert_eq!(Balances::::get(&CHARLIE), Some(42)); + assert_eq!(TotalIssuance::::get(), Some(242)); + }) + } + + #[test] + #[should_panic] + fn state_builder_duplicate_genesis_fails() { + StateBuilder::default() + .add_balance(CHARLIE, 42) + .add_balance(CHARLIE, 43) + .build_and_execute(|| { + assert_eq!(Balances::::get(&CHARLIE), None); + assert_eq!(TotalIssuance::::get(), Some(242)); + }) + } + + #[docify::export] + #[test] + fn mint_works() { + StateBuilder::default().build_and_execute(|| { + // given the initial state, when: + assert_ok!(Pallet::::mint_unsafe(RuntimeOrigin::signed(ALICE), BOB, 100)); + + // then: + assert_eq!(Balances::::get(&BOB), Some(200)); + assert_eq!(TotalIssuance::::get(), Some(300)); + + // given: + assert_ok!(Pallet::::mint_unsafe( + RuntimeOrigin::signed(ALICE), + CHARLIE, + 100 + )); + + // then: + assert_eq!(Balances::::get(&CHARLIE), Some(100)); + assert_eq!(TotalIssuance::::get(), Some(400)); + }); + } + + #[docify::export] + #[test] + fn transfer_works() { + StateBuilder::default().build_and_execute(|| { + // given the the initial state, when: + assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(ALICE), BOB, 50)); + + // then: + assert_eq!(Balances::::get(&ALICE), Some(50)); + assert_eq!(Balances::::get(&BOB), Some(150)); + assert_eq!(TotalIssuance::::get(), Some(200)); + + // when: + assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(BOB), ALICE, 50)); + + // then: + assert_eq!(Balances::::get(&ALICE), Some(100)); + assert_eq!(Balances::::get(&BOB), Some(100)); + assert_eq!(TotalIssuance::::get(), Some(200)); + }); + } + + #[docify::export] + #[test] + fn transfer_from_non_existent_fails() { + StateBuilder::default().build_and_execute(|| { + // given the the initial state, when: + assert_err!( + Pallet::::transfer(RuntimeOrigin::signed(CHARLIE), ALICE, 10), + "NonExistentAccount" + ); + + // then nothing has changed. + assert_eq!(Balances::::get(&ALICE), Some(100)); + assert_eq!(Balances::::get(&BOB), Some(100)); + assert_eq!(Balances::::get(&CHARLIE), None); + assert_eq!(TotalIssuance::::get(), Some(200)); + }); + } + } +} + +#[frame::pallet(dev_mode)] +pub mod pallet_v2 { + use super::pallet::Balance; + use frame::prelude::*; + + #[docify::export(config_v2)] + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type of the runtime. + type RuntimeEvent: From> + + IsType<::RuntimeEvent> + + TryInto>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type Balances = StorageMap<_, _, T::AccountId, Balance>; + + #[pallet::storage] + pub type TotalIssuance = StorageValue<_, Balance>; + + #[docify::export] + #[pallet::error] + pub enum Error { + /// Account does not exist. + NonExistentAccount, + /// Account does not have enough balance. + InsufficientBalance, + } + + #[docify::export] + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A transfer succeeded. + Transferred { from: T::AccountId, to: T::AccountId, amount: Balance }, + } + + #[pallet::call] + impl Pallet { + #[docify::export(transfer_v2)] + pub fn transfer( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + // ensure sender has enough balance, and if so, calculate what is left after `amount`. + let sender_balance = + Balances::::get(&sender).ok_or(Error::::NonExistentAccount)?; + let reminder = + sender_balance.checked_sub(amount).ok_or(Error::::InsufficientBalance)?; + + Balances::::mutate(&dest, |b| *b = Some(b.unwrap_or(0) + amount)); + Balances::::insert(&sender, reminder); + + Self::deposit_event(Event::::Transferred { from: sender, to: dest, amount }); + + Ok(()) + } + } + + #[cfg(any(test, doc))] + pub mod tests { + use super::{super::pallet::tests::StateBuilder, *}; + use frame::testing_prelude::*; + const ALICE: u64 = 1; + const BOB: u64 = 2; + + #[docify::export] + pub mod runtime_v2 { + use super::*; + use crate::guides::your_first_pallet::pallet_v2 as pallet_currency; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + Currency: pallet_currency, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + type AccountId = u64; + } + + impl pallet_currency::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + } + } + + pub(crate) use runtime_v2::*; + + #[docify::export(transfer_works_v2)] + #[test] + fn transfer_works() { + StateBuilder::default().build_and_execute(|| { + // skip the genesis block, as events are not deposited there and we need them for + // the final assertion. + System::set_block_number(ALICE); + + // given the the initial state, when: + assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(ALICE), BOB, 50)); + + // then: + assert_eq!(Balances::::get(&ALICE), Some(50)); + assert_eq!(Balances::::get(&BOB), Some(150)); + assert_eq!(TotalIssuance::::get(), Some(200)); + + // now we can also check that an event has been deposited: + assert_eq!( + System::read_events_for_pallet::>(), + vec![Event::Transferred { from: ALICE, to: BOB, amount: 50 }] + ); + }); + } + } +} diff --git a/docs/sdk/src/guides/your_first_pallet/with_event.rs b/docs/sdk/src/guides/your_first_pallet/with_event.rs new file mode 100644 index 0000000000000000000000000000000000000000..a65aac324f07f6bff9fa85c9b9a2849188f3c4f2 --- /dev/null +++ b/docs/sdk/src/guides/your_first_pallet/with_event.rs @@ -0,0 +1,101 @@ +#[frame::pallet(dev_mode)] +pub mod pallet { + use frame::prelude::*; + + #[docify::export] + pub type Balance = u128; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export] + /// Single storage item, of type `Balance`. + #[pallet::storage] + pub type TotalIssuance = StorageValue<_, Balance>; + + #[docify::export] + /// A mapping from `T::AccountId` to `Balance` + #[pallet::storage] + pub type Balances = StorageMap<_, _, T::AccountId, Balance>; + + #[docify::export(impl_pallet)] + #[pallet::call] + impl Pallet { + /// An unsafe mint that can be called by anyone. Not a great idea. + pub fn mint_unsafe( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + // ensure that this is a signed account, but we don't really check `_anyone`. + let _anyone = ensure_signed(origin)?; + + // update the balances map. Notice how all `` remains as ``. + Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); + // update total issuance. + TotalIssuance::::mutate(|t| *t = Some(t.unwrap_or(0) + amount)); + + Ok(()) + } + + /// Transfer `amount` from `origin` to `dest`. + pub fn transfer( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + // ensure sender has enough balance, and if so, calculate what is left after `amount`. + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + if sender_balance < amount { + return Err("NotEnoughBalance".into()) + } + let reminder = sender_balance - amount; + + // update sender and dest balances. + Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); + Balances::::insert(&sender, reminder); + + Ok(()) + } + } + + #[allow(unused)] + impl Pallet { + #[docify::export] + pub fn transfer_better( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + ensure!(sender_balance >= amount, "NotEnoughBalance"); + let reminder = sender_balance - amount; + + // .. snip + Ok(()) + } + + #[docify::export] + /// Transfer `amount` from `origin` to `dest`. + pub fn transfer_better_checked( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + let reminder = sender_balance.checked_sub(amount).ok_or("NotEnoughBalance")?; + + // .. snip + Ok(()) + } + } +} diff --git a/docs/sdk/src/guides/your_first_runtime.rs b/docs/sdk/src/guides/your_first_runtime.rs new file mode 100644 index 0000000000000000000000000000000000000000..3e02ef1b1b28eedb7cea0ce38fb8b372a0caaf60 --- /dev/null +++ b/docs/sdk/src/guides/your_first_runtime.rs @@ -0,0 +1 @@ +//! # Your first Runtime diff --git a/docs/sdk/src/lib.rs b/docs/sdk/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..b0abb50b52dae32a8e8b19f4248035b591c908c4 --- /dev/null +++ b/docs/sdk/src/lib.rs @@ -0,0 +1,43 @@ +//! # Polkadot SDK Docs +//! +//! The Polkadot SDK Developer Documentation. +//! +//! This crate is a *minimal*, but *always-accurate* source of information for those wishing to +//! build on the Polkadot SDK. +//! +//! > **Work in Progress**: This crate is under heavy development. Expect content to be moved and +//! > changed. Do not use links to this crate yet. See [`meta_contributing`] for more information. +//! +//! ## Getting Started +//! +//! We suggest the following reading sequence: +//! +//! - Start by learning about the the [`polkadot_sdk`], its structure and context. +//! - Then, head over the [`guides`]. This modules contains in-depth guides about the most important +//! user-journeys of the Polkadot SDK. +//! - Whilst reading the guides, you might find back-links to [`crate::reference_docs`]. +//! - Finally, is the parent website of this crate that contains the +//! list of further tools related to the Polkadot SDK. +//! +//! ## Information Architecture +//! +//! This section paints a picture over the high-level information architecture of this crate. +#![doc = simple_mermaid::mermaid!("../../mermaid/IA.mmd")] +#![allow(rustdoc::invalid_html_tags)] // TODO: remove later. https://github.com/paritytech/polkadot-sdk-docs/issues/65 +#![allow(rustdoc::bare_urls)] // TODO: remove later. https://github.com/paritytech/polkadot-sdk-docs/issues/65 +#![warn(rustdoc::broken_intra_doc_links)] +#![warn(rustdoc::private_intra_doc_links)] + +/// Meta information about this crate, how it is built, what principles dictates its evolution and +/// how one can contribute to it. +pub mod meta_contributing; + +/// In-depth guides about the most common components of the Polkadot SDK. They are slightly more +/// high level and broad than reference docs. +pub mod guides; +/// An introduction to the Polkadot SDK. Read this module to learn about the structure of the SDK, +/// the tools that are provided as a part of it, and to gain a high level understanding of each. +pub mod polkadot_sdk; +/// Reference documents covering in-depth topics across the Polkadot SDK. It is suggested to read +/// these on-demand, while you are going through the [`guides`] or other content. +pub mod reference_docs; diff --git a/docs/sdk/src/meta_contributing.rs b/docs/sdk/src/meta_contributing.rs new file mode 100644 index 0000000000000000000000000000000000000000..0d3ecea4655721cb6c048b2082b1c739647ce260 --- /dev/null +++ b/docs/sdk/src/meta_contributing.rs @@ -0,0 +1,146 @@ +//! # Contribution +//! +//! The following sections cover more detailed information about this crate and how it should be +//! maintained. +//! +//! ## Why Rust Docs? +//! +//! We acknowledge that blockchain based systems, particularly a cutting-edge one like Polkadot SDK +//! is a software artifact that is complex, and rapidly evolving. This makes the task of documenting +//! it externally extremely difficult, especially with regards to making sure it is up-to-date. +//! +//! Consequently, we argue that the best hedge against this is to move as much of the documentation +//! near the source code as possible. This would further incentivize developers to keep the +//! documentation up-to-date, as the overhead is reduced by making sure everything is in one +//! repository, and everything being in `.rs` files. +//! +//! > This is not say that a more visually appealing version of this crate (for example as an +//! > `md-book`) cannot exist, but it would be outside the scope of this crate. +//! +//! Moreover, we acknowledge that a major pain point has been not only outdated *concepts*, but also +//! *outdated code*. For this, we commit to making sure no code-snippet in this crate is left as +//! `///ignore` or `///no_compile`, making sure all code snippets are self-contained, compile-able, +//! and correct at every single revision of the entire repository. +//! +//! > This also allows us to have a clear versioning on the entire content of this crate. For every +//! commit of the Polkadot SDK, there would be one version of this crate that is guaranteed to be +//! correct. +//! +//! > To achieve this, we often use [`docify`](https://github.com/sam0x17/docify), a nifty invention +//! > of `@sam0x17`. +//! +//! Also see: . +//! +//! ## Scope +//! +//! The above would NOT be attainable if we don't acknowledge that the scope of this crate MUST be +//! limited, or else its maintenance burden would be infeasible or not worthwhile. In short, future +//! maintainers should always strive to keep the content of this repository as minimal as possible. +//! Some of the following principles are specifically there to be the guidance for this. +//! +//! ## Principles +//! +//! The following guidelines are meant to be the guiding torch of those who contribute to this +//! crate. +//! +//! 1. 🔺 Ground Up: Information should be layed out in the most ground-up fashion. The lowest level +//! (i.e. "ground") is Rust-docs. The highest level (i.e. "up") is "outside of this crate". In +//! between lies [`reference_docs`] and [`guides`], from low to high. The point of this principle +//! is to document as much of the information as possible in the lower level media, as it is +//! easier to maintain and more reachable. Then, use excessive linking to back-link when writing +//! in a more high level. +//! +//! > A prime example of this, the details of the FRAME storage APIs should NOT be explained in a +//! > high level tutorial. They should be explained in the rust-doc of the corresponding type or +//! > macro. +//! +//! 2. 🧘 Less is More: For reasons mentioned [above](#crate::why-rust-docs), the more concise this +//! crate is, the better. +//! 3. √ Don’t Repeat Yourself – DRY: A summary of the above two points. Authors should always +//! strive to avoid any duplicate information. Every concept should ideally be documented in +//! *ONE* place and one place only. This makes the task of maintaining topics significantly +//! easier. +//! +//! > A prime example of this, the list of CLI arguments of a particular binary should not be +//! > documented in multiple places across this crate. It should be only be documented in the +//! > corresponding crate (e.g. `sc_cli`). +//! +//! > Moreover, this means that as a contributor, **it is your responsibility to have a grasp over +//! > what topics are already covered in this crate, and how you can build on top of the information +//! > that they already pose, rather than repeating yourself**. +//! +//! For more details about documenting guidelines, see: +//! +//! +//! #### Example: Explaining `#[pallet::call]` +//! +//!
+//! +//! Let's consider the seemingly simple example of explaining to someone dead-simple code of a FRAME +//! call and see how we can use the above principles. +//! +//! +//! +//! ``` +//! #[frame::pallet(dev_mode)] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! #[pallet::call] +//! impl Pallet { +//! pub fn a_simple_call(origin: OriginFor, data: u32) -> DispatchResult { +//! ensure!(data > 10, "SomeStaticString"); +//! todo!(); +//! } +//! } +//! } +//! ``` +//! +//! * Before even getting started, what is with all of this ``? We link to +//! [`crate::reference_docs::trait_based_programming`]. +//! * First, the name. Why is this called `pallet::call`? This goes back to `enum Call`, which is +//! explained in [`crate::reference_docs::frame_composite_enums`]. Build on top of this! +//! * Then, what is `origin`? Just an account id? [`crate::reference_docs::frame_origin`]. +//! * Then, what is `DispatchResult`? Why is this called *dispatch*? Probably something that can be +//! explained in the documentation of [`frame::prelude::DispatchResult`]. +//! * Why is `"SomeStaticString"` a valid error? Because there is implementation for it that you can +//! see [here](frame::prelude::DispatchError#impl-From<%26'static+str>-for-DispatchError). +//! +//! +//! All of these are examples of underlying information that a contributor should: +//! +//! 1. Try and create and they are going along. +//! 2. Back-link to if they already exist. +//! +//! Of course, all of this is not set in stone as a either/or rule. Sometimes, it is necessary to +//! rephrase a concept in a new context. +//! +//!
+//! +//! ## `docs.substrate.io` +//! +//! This crate is meant to gradually replace `docs.substrate.io`. As any content is added here, the +//! corresponding counter-part should be marked as deprecated, as described +//! [here](https://github.com/paritytech/polkadot-sdk-docs/issues/26). +//! +//! ## `crates.io` and Publishing +//! +//! As it stands now, this crate cannot be published to crates.io because of its use of +//! [workspace-level `docify`](https://github.com/sam0x17/docify/issues/22). For now, we accept this +//! compromise, but in the long term, we should work towards finding a way to maintain different +//! revisions of this crate. +//! +//! ## How to Build +//! +//! To build this crate properly, with with right HTML headers injected, run: +//! +//! ```no_compile +//! RUSTDOCFLAGS="--html-in-header $(pwd)/docs/sdk/headers/toc.html" cargo doc -p polkadot-sdk-docs +//! ``` +//! +//! adding `--no-deps` would speed up the process while development. If even faster build time for +//! docs is needed, you can temporarily remove most of the substrate/cumulus dependencies that are +//! only used for linking purposes. diff --git a/docs/sdk/src/polkadot_sdk/cumulus.rs b/docs/sdk/src/polkadot_sdk/cumulus.rs new file mode 100644 index 0000000000000000000000000000000000000000..07a48c92d8075ed75f2f7a3e71e170587b4074e9 --- /dev/null +++ b/docs/sdk/src/polkadot_sdk/cumulus.rs @@ -0,0 +1,130 @@ +//! # Cumulus +//! +//! Substrate provides a framework ([FRAME]) through which a blockchain node and runtime can easily +//! be created. Cumulus aims to extend the same approach to creation of Polkadot parachains. +//! +//! > Cumulus clouds are shaped sort of like dots; together they form a system that is intricate, +//! > beautiful and functional. +//! +//! ## Example: Runtime +//! +//! A Cumulus-based runtime is fairly similar to other [FRAME]-based runtimes. Most notably, the +//! following changes are applied to a normal FRAME-based runtime to make it a Cumulus-based +//! runtime: +//! +//! #### Cumulus Pallets +//! +//! A parachain runtime should use a number of pallets that are provided by Cumulus and Substrate. +//! Notably: +//! +//! - [`frame-system`](frame::prelude::frame_system), like all FRAME-based runtimes. +//! - [`cumulus_pallet_parachain_system`] +//! - [`parachain_info`] +#![doc = docify::embed!("./src/polkadot_sdk/cumulus.rs", system_pallets)] +//! +//! Given that all Cumulus-based runtimes use a simple Aura-based consensus mechanism, the following +//! pallets also need to be added: +//! +//! - [`pallet_timestamp`] +//! - [`pallet_aura`] +//! - [`cumulus_pallet_aura_ext`] +#![doc = docify::embed!("./src/polkadot_sdk/cumulus.rs", consensus_pallets)] +//! +//! +//! Finally, a separate macro, similar to +//! [`impl_runtime_api`](frame::runtime::prelude::impl_runtime_apis), which creates the default set +//! of runtime APIs, will generate the parachain runtime's validation runtime API, also known as +//! parachain validation function (PVF). Without this API, the relay chain is unable to validate +//! blocks produced by our parachain. +#![doc = docify::embed!("./src/polkadot_sdk/cumulus.rs", validate_block)] +//! +//! --- +//! +//! [FRAME]: crate::polkadot_sdk::frame_runtime + +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(rustdoc::private_intra_doc_links)] + +#[cfg(test)] +mod tests { + mod runtime { + pub use frame::{ + deps::sp_consensus_aura::sr25519::AuthorityId as AuraId, prelude::*, + runtime::prelude::*, testing_prelude::*, + }; + + #[docify::export(CR)] + construct_runtime!( + pub struct Runtime { + // system-level pallets. + System: frame_system, + Timestamp: pallet_timestamp, + ParachainSystem: cumulus_pallet_parachain_system, + ParachainInfo: parachain_info, + + // parachain consensus support -- mandatory. + Aura: pallet_aura, + AuraExt: cumulus_pallet_aura_ext, + } + ); + + #[docify::export] + mod system_pallets { + use super::*; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + } + + impl cumulus_pallet_parachain_system::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type OutboundXcmpMessageSource = (); + type XcmpMessageHandler = (); + type ReservedDmpWeight = (); + type ReservedXcmpWeight = (); + type CheckAssociatedRelayNumber = + cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + 6000, // relay chain block time + 1, + 1, + >; + type WeightInfo = (); + type DmpQueue = frame::traits::EnqueueWithOrigin<(), sp_core::ConstU8<0>>; + } + + impl parachain_info::Config for Runtime {} + } + + #[docify::export] + mod consensus_pallets { + use super::*; + + impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; + } + + #[docify::export(timestamp)] + #[derive_impl(pallet_timestamp::config_preludes::TestDefaultConfig as pallet_timestamp::DefaultConfig)] + impl pallet_timestamp::Config for Runtime {} + + impl cumulus_pallet_aura_ext::Config for Runtime {} + } + + #[docify::export(validate_block)] + cumulus_pallet_parachain_system::register_validate_block! { + Runtime = Runtime, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, + } + } +} diff --git a/docs/sdk/src/polkadot_sdk/frame_runtime.rs b/docs/sdk/src/polkadot_sdk/frame_runtime.rs new file mode 100644 index 0000000000000000000000000000000000000000..32dc2045e3a4745b0dba3d416dc118ff92949e35 --- /dev/null +++ b/docs/sdk/src/polkadot_sdk/frame_runtime.rs @@ -0,0 +1,179 @@ +//! # FRAME +//! +//! ```no_compile +//! ______ ______ ________ ___ __ __ ______ +//! /_____/\ /_____/\ /_______/\ /__//_//_/\ /_____/\ +//! \::::_\/_\:::_ \ \ \::: _ \ \\::\| \| \ \\::::_\/_ +//! \:\/___/\\:(_) ) )_\::(_) \ \\:. \ \\:\/___/\ +//! \:::._\/ \: __ `\ \\:: __ \ \\:.\-/\ \ \\::___\/_ +//! \:\ \ \ \ `\ \ \\:.\ \ \ \\. \ \ \ \\:\____/\ +//! \_\/ \_\/ \_\/ \__\/\__\/ \__\/ \__\/ \_____\/ +//! ``` +//! +//! > **F**ramework for **R**untime **A**ggregation of **M**odularized **E**ntities: Substrate's +//! > State Transition Function (Runtime) Framework. +//! +//! ## Introduction +//! +//! As described in [`crate::reference_docs::wasm_meta_protocol`], at a high-level Substrate-based +//! blockchains are composed of two parts: +//! +//! 1. A *runtime* which represents the state transition function (i.e. "Business Logic") of a +//! blockchain, and is encoded as a WASM blob. +//! 2. A node whose primary purpose is to execute the given runtime. +#![doc = simple_mermaid::mermaid!("../../../mermaid/substrate_simple.mmd")] +//! +//! *FRAME is the Substrate's framework of choice to build a runtime.* +//! +//! FRAME is composed of two major components, **pallets** and a **runtime**. +//! +//! ## Pallets +//! +//! A pallet is a unit of encapsulated logic. It has a clearly defined responsibility and can be +//! linked to other pallets. In order to be reusable, pallets shipped with FRAME strive to only care +//! about its own responsibilities and make as few assumptions about the general runtime as +//! possible. A pallet is analogous to a _module_ in the runtime. +//! +//! A pallet is defined as a `mod pallet` wrapped by the [`frame::pallet`] macro. Within this macro, +//! pallet components/parts can be defined. Most notable of these parts are: +//! +//! - [Config](frame::pallet_macros::config), allowing a pallet to make itself configurable and +//! generic over types, values and such. +//! - [Storage](frame::pallet_macros::storage), allowing a pallet to define onchain storage. +//! - [Dispatchable function](frame::pallet_macros::call), allowing a pallet to define extrinsics +//! that are callable by end users, from the outer world. +//! - [Events](frame::pallet_macros::event), allowing a pallet to emit events. +//! - [Errors](frame::pallet_macros::error), allowing a pallet to emit well-formed errors. +//! +//! Some of these pallet components resemble the building blocks of a smart contract. While both +//! models are programming state transition functions of blockchains, there are crucial differences +//! between the two. See [`crate::reference_docs::runtime_vs_smart_contract`] for more. +//! +//! Most of these components are defined using macros, the full list of which can be found in +//! [`frame::pallet_macros`]. +//! +//! ### Example +//! +//! The following examples showcases a minimal pallet. +#![doc = docify::embed!("src/polkadot_sdk/frame_runtime.rs", pallet)] +//! +//! +//! A runtime is a collection of pallets that are amalgamated together. Each pallet typically has +//! some configurations (exposed as a `trait Config`) that needs to be *specified* in the runtime. +//! This is done with [`frame::runtime::prelude::construct_runtime`]. +//! +//! A (real) runtime that actually wishes to compile to WASM needs to also implement a set of +//! runtime-apis. These implementation can be specified using the +//! [`frame::runtime::prelude::impl_runtime_apis`] macro. +//! +//! ### Example +//! +//! The following example shows a (test) runtime that is composing the pallet demonstrated above, +//! next to the [`frame::prelude::frame_system`] pallet, into a runtime. +#![doc = docify::embed!("src/polkadot_sdk/frame_runtime.rs", runtime)] +//! +//! ## More Examples +//! +//! You can find more FRAME examples that revolve around specific features at [`pallet_examples`]. +//! +//! ## Alternatives 🌈 +//! +//! There is nothing in the Substrate's node side code-base that mandates the use of FRAME. While +//! FRAME makes it very simple to write Substrate-based runtimes, it is by no means intended to be +//! the only one. At the end of the day, any WASM blob that exposes the right set of runtime APIs is +//! a valid Runtime form the point of view of a Substrate client (see +//! [`crate::reference_docs::wasm_meta_protocol`]). Notable examples are: +//! +//! * writing a runtime in pure Rust, as done in [this template](https://github.com/JoshOrndorff/frameless-node-template). +//! * writing a runtime in AssemblyScript,as explored in [this project](https://github.com/LimeChain/subsembly). + +#[cfg(test)] +mod tests { + use frame::prelude::*; + + /// A FRAME based pallet. This `mod` is the entry point for everything else. All + /// `#[pallet::xxx]` macros must be defined in this `mod`. Although, frame also provides an + /// experimental feature to break these parts into different `mod`s. See [`pallet_examples`] for + /// more. + #[docify::export] + #[frame::pallet(dev_mode)] + pub mod pallet { + use super::*; + + /// The configuration trait of a pallet. Mandatory. Allows a pallet to receive types at a + /// later point from the runtime that wishes to contain it. It allows the pallet to be + /// parameterized over both types and values. + #[pallet::config] + pub trait Config: frame_system::Config { + /// A type that is not known now, but the runtime that will contain this pallet will + /// know it later, therefore we define it here as an associated type. + type RuntimeEvent: IsType<::RuntimeEvent> + + From>; + + /// A parameterize-able value that we receive later via the `Get<_>` trait. + type ValueParameter: Get; + + /// Similar to [`Config::ValueParameter`], but using `const`. Both are functionally + /// equal, but offer different tradeoffs. + const ANOTHER_VALUE_PARAMETER: u32; + } + + /// A mandatory struct in each pallet. All functions callable by external users (aka. + /// transactions) must be attached to this type (see [`frame::pallet_macros::call`]). For + /// convenience, internal (private) functions can also be attached to this type. + #[pallet::pallet] + pub struct Pallet(PhantomData); + + /// The events tha this pallet can emit. + #[pallet::event] + pub enum Event {} + + /// A storage item that this pallet contains. This will be part of the state root trie/root + /// of the blockchain. + #[pallet::storage] + pub type Value = StorageValue; + + /// All *dispatchable* call functions (aka. transactions) are attached to `Pallet` in a + /// `impl` block. + #[pallet::call] + impl Pallet { + /// This will be callable by external users, and has two u32s as a parameter. + pub fn some_dispatchable( + _origin: OriginFor, + _param: u32, + _other_para: u32, + ) -> DispatchResult { + Ok(()) + } + } + } + + /// A simple runtime that contains the above pallet and `frame_system`, the mandatory pallet of + /// all runtimes. This runtime is for testing, but it shares a lot of similarities with a *real* + /// runtime. + #[docify::export] + pub mod runtime { + use super::pallet as pallet_example; + use frame::{prelude::*, testing_prelude::*}; + + // The major macro that amalgamates pallets into `struct Runtime` + construct_runtime!( + pub struct Runtime { + System: frame_system, + Example: pallet_example, + } + ); + + // These `impl` blocks specify the parameters of each pallet's `trait Config`. + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_example::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValueParameter = ConstU32<42>; + const ANOTHER_VALUE_PARAMETER: u32 = 42; + } + } +} diff --git a/docs/sdk/src/polkadot_sdk/mod.rs b/docs/sdk/src/polkadot_sdk/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..124d391421b9049dd5865fae0ac9e739e3f46cce --- /dev/null +++ b/docs/sdk/src/polkadot_sdk/mod.rs @@ -0,0 +1,134 @@ +//! # Polkadot SDK +//! +//! [Polkadot SDK](https://github.com/paritytech/polkadot-sdk) provides the main resources needed to +//! start building on the [Polkadot network](https://polkadot.network), a scalable, multi-chain +//! blockchain platform that enables different blockchains to securely interoperate. +//! +//! [![StackExchange](https://img.shields.io/badge/StackExchange-Polkadot%20and%20Substrate-222222?logo=stackexchange)](https://substrate.stackexchange.com/) +//! +//! [![awesomeDot](https://img.shields.io/badge/polkadot-awesome-e6007a?logo=polkadot)](https://github.com/Awsmdot/awesome-dot) +//! [![wiki](https://img.shields.io/badge/polkadot-wiki-e6007a?logo=polkadot)](https://wiki.polkadot.network/) +//! [![forum](https://img.shields.io/badge/polkadot-forum-e6007a?logo=polkadot)](https://forum.polkadot.network/) +//! +//! [![RFCs](https://img.shields.io/badge/fellowship-RFCs-e6007a?logo=polkadot)](https://github.com/polkadot-fellows/rfcs) +//! [![Runtime](https://img.shields.io/badge/fellowship-runtimes-e6007a?logo=polkadot)](https://github.com/polkadot-fellows/runtimes) +//! [![Manifesto](https://img.shields.io/badge/fellowship-manifesto-e6007a?logo=polkadot)](https://github.com/polkadot-fellows/manifesto) +//! +//! ## Getting Started +//! +//! The primary way to get started with the Polkadot SDK is to start writing a FRAME-based runtime. +//! See: +//! +//! * [`polkadot`], to understand what is Polkadot as a development platform. +//! * [`substrate`], for an overview of what Substrate as the main blockchain framework of Polkadot +//! SDK. +//! * [`frame`], to learn about how to write blockchain applications aka. "App Chains". +//! * Continue with the [`polkadot_sdk_docs`'s "getting started"](crate#getting-started). +//! +//! ## Components +//! +//! #### Substrate +//! +//! [![Substrate-license](https://img.shields.io/badge/License-GPL3%2FApache2.0-blue)](https://github.com/paritytech/polkadot-sdk/blob/master/substrate/LICENSE-APACHE2) +//! [![GitHub +//! Repo](https://img.shields.io/badge/github-substrate-2324CC85)](https://github.com/paritytech/polkadot-sdk/blob/master/substrate) +//! +//! [`substrate`] is the base blockchain framework used to power the Polkadot SDK. It is a full +//! toolkit to create sovereign blockchains, including but not limited to those who connect to +//! Polkadot as parachains. +//! +//! #### FRAME +//! +//! [![Substrate-license](https://img.shields.io/badge/License-Apache2.0-blue)](https://github.com/paritytech/polkadot-sdk/blob/master/substrate/LICENSE-APACHE2) +//! [![GitHub +//! Repo](https://img.shields.io/badge/github-frame-2324CC85)](https://github.com/paritytech/polkadot-sdk/blob/master/substrate/frame) +//! +//! [`frame`] is the framework used to create Substrate-based application logic, aka. runtimes. +//! Learn more about the distinction of a runtime and node in +//! [`reference_docs::wasm_meta_protocol`]. +//! +//! #### Cumulus +//! +//! [![Cumulus-license](https://img.shields.io/badge/License-GPL3-blue)](https://github.com/paritytech/polkadot-sdk/blob/master/cumulus/LICENSE) +//! [![GitHub +//! Repo](https://img.shields.io/badge/github-cumulus-white)](https://github.com/paritytech/polkadot-sdk/blob/master/cumulus) +//! +//! [`cumulus`] transforms FRAME-based runtimes into Polkadot-compatible parachain runtimes, and +//! Substrate-based nodes into Polkadot/Parachain-compatible nodes. +//! +//! #### XCM +//! +//! [![XCM-license](https://img.shields.io/badge/License-GPL3-blue)](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/LICENSE) +//! [![GitHub +//! Repo](https://img.shields.io/badge/github-XCM-e6007a?logo=polkadot)](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm) +//! +//! [`xcm`], short for "cross consensus message", is the primary format that is used for +//! communication between parachains, but is intended to be extensible to other use cases as well. +//! +//! #### Polkadot +//! +//! [![Polkadot-license](https://img.shields.io/badge/License-GPL3-blue)](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/LICENSE) +//! [![GitHub +//! Repo](https://img.shields.io/badge/github-polkadot-e6007a?logo=polkadot)](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot) +//! +//! [`polkadot`] is an implementation of a Polkadot node in Rust, by `@paritytech`. The Polkadot +//! runtimes are located under the +//! [`polkadot-fellows/runtimes`](https://github.com/polkadot-fellows/runtimes) repository. +//! +//! ### Summary +//! +//! The following diagram summarizes how some of the components of Polkadot SDK work together: +#![doc = simple_mermaid::mermaid!("../../../mermaid/polkadot_sdk_substrate.mmd")] +//! +//! A Substrate-based chain is a blockchain composed of a runtime and a node. As noted above, the +//! runtime is the application logic of the blockchain, and the node is everything else. +//! See [`crate::reference_docs::wasm_meta_protocol`] for an in-depth explanation of this. The +//! former is built with [`frame`], and the latter is built with rest of Substrate. +//! +//! > You can think of a Substrate-based chain as a white-labeled blockchain. +#![doc = simple_mermaid::mermaid!("../../../mermaid/polkadot_sdk_polkadot.mmd")] +//! Polkadot is itself a Substrate-based chain, composed of the exact same two components. It has +//! specialized logic in both the node and the runtime side, but it is not "special" in any way. +//! +//! A parachain is a "special" Substrate-based chain, whereby both the node and the runtime +//! components have became "Polkadot-aware" using Cumulus. +#![doc = simple_mermaid::mermaid!("../../../mermaid/polkadot_sdk_parachain.mmd")] +//! +//! ## Notable Upstream Crates +//! +//! - [`parity-scale-codec`](https://github.com/paritytech/parity-scale-codec) +//! - [`parity-db`](https://github.com/paritytech/parity-db) +//! - [`trie`](https://github.com/paritytech/trie) +//! - [`parity-common`](https://github.com/paritytech/parity-common) +//! +//! ## Trophy Section: Notable Downstream Projects +//! +//! A list of projects and tools in the blockchain ecosystem that one way or another parts of the +//! Polkadot SDK: +//! +//! * [Polygon's spin-off, Avail](https://github.com/availproject/avail) +//! * [Cardano Partner Chains](https://iohk.io/en/blog/posts/2023/11/03/partner-chains-are-coming-to-cardano/) +//! * [Starknet's Madara Sequencer](https://github.com/keep-starknet-strange/madara) +//! +//! [`substrate`]: crate::polkadot_sdk::substrate +//! [`frame`]: crate::polkadot_sdk::frame_runtime +//! [`cumulus`]: crate::polkadot_sdk::cumulus +//! [`polkadot`]: crate::polkadot_sdk::polkadot +//! [`xcm`]: crate::polkadot_sdk::xcm + +/// Lean about Cumulus, the framework that transforms [`substrate`]-based chains into +/// [`polkadot`]-enabled parachains. +pub mod cumulus; +/// Learn about FRAME, the framework used to build Substrate runtimes. +pub mod frame_runtime; +/// Learn about Polkadot as a platform. +pub mod polkadot; +/// Learn about different ways through which smart contracts can be utilized on top of Substrate, +/// and in the Polkadot ecosystem. +pub mod smart_contracts; +/// Learn about Substrate, the main blockchain framework used in the Polkadot ecosystem. +pub mod substrate; +/// Index of all the templates that can act as first scaffold for a new project. +pub mod templates; +/// Learn about XCM, the de-facto communication language between different consensus systems. +pub mod xcm; diff --git a/docs/sdk/src/polkadot_sdk/polkadot.rs b/docs/sdk/src/polkadot_sdk/polkadot.rs new file mode 100644 index 0000000000000000000000000000000000000000..d157a660e5648926f9764957015bcf6c54aff356 --- /dev/null +++ b/docs/sdk/src/polkadot_sdk/polkadot.rs @@ -0,0 +1,87 @@ +//! # Polkadot +//! +//! Implementation of the Polkadot node/host in Rust. +//! +//! ## Learn More and Get Involved +//! +//! - [Polkadot Forum](https://forum.polkadot.network/) +//! - [Polkadot Parachains](https://parachains.info/) +//! - [Polkadot (multi-chain) Explorer](https://subscan.io/) +//! - Polkadot Fellowship +//! - [Manifesto](https://github.com/polkadot-fellows/manifesto) +//! - [Runtimes](https://github.com/polkadot-fellows/runtimes) +//! - [RFCs](https://github.com/polkadot-fellows/rfcs) +//! - [Polkadot Specs](spec.polkadot.network) +//! - [The Polkadot Parachain Host Implementers' Guide](https://paritytech.github.io/polkadot-sdk/book/) +//! - [Whitepaper](https://www.polkadot.network/whitepaper/) +//! +//! ## Alternative Node Implementations 🌈 +//! +//! - [Smoldot](https://crates.io/crates/smoldot-light). Polkadot light node/client. +//! - [KAGOME](https://github.com/qdrvm/kagome). C++ implementation of the Polkadot host. +//! - [Gossamer](https://github.com/ChainSafe/gossamer). Golang implementation of the Polkadot host. +//! +//! ## Platform +//! +//! In this section, we examine what what platform Polkadot exactly provides to developers. +//! +//! ### Polkadot White Paper +//! +//! The original vision of Polkadot (everything in the whitepaper, which was eventually called +//! **Polkadot 1.0**) revolves around the following arguments: +//! +//! * Future is multi-chain, because we need different chains with different specialization to +//! achieve widespread goals. +//! * In other words, no single chain is good enough to achieve all goals. +//! * A multi-chain future will inadvertently suffer from fragmentation of economic security. +//! * This stake fragmentation will make communication over consensus system with varying security +//! levels inherently unsafe. +//! +//! Polkadot's answer to the above is: +//! +//! > The chains of the future must have a way to share their economic security, whilst maintaining +//! > their execution and governance sovereignty. These chains are called "Parachains". +//! +//! * Shared Security: The idea of shared economic security sits at the core of Polkadot. Polkadot +//! enables different parachains* to pool their economic security from Polkadot (i.e. "*Relay +//! Chain*"). +//! * (heterogenous) Sharded Execution: Yet, each parachain is free to have its own execution logic +//! (runtime), which also encompasses governance and sovereignty. Moreover, Polkadot ensures the +//! correct execution of all parachain, without having all of its validators re-execute all +//! parachain blocks. When seen from this perspective, the fact that Polkadot executes different +//! parachains means it is a platform that has fully delivered (the holy grail of) "Full Execution +//! Sharding". TODO: link to approval checking article. https://github.com/paritytech/polkadot-sdk-docs/issues/66 +//! * A framework to build blockchains: In order to materialize the ecosystem of parachains, an easy +//! blockchain framework must exist. This is [Substrate](crate::polkadot_sdk::substrate), +//! [FRAME](crate::polkadot_sdk::frame_runtime) and [Cumulus](crate::polkadot_sdk::cumulus). +//! * A communication language between blockchains: In order for these blockchains to communicate, +//! they need a shared language. [XCM](crate::polkadot_sdk::xcm) is one such language, and the one +//! that is most endorsed in the Polkadot ecosystem. +//! +//! > Note that the interoperability promised by Polkadot is unparalleled in that any two parachains +//! > connected to Polkadot have the same security and can have much better guarantees about the +//! > security of the recipient of any message. TODO: weakest link in bridges systems. https://github.com/paritytech/polkadot-sdk-docs/issues/66 +//! +//! Polkadot delivers the above vision, alongside a flexible means for parachains to schedule +//! themselves with the Relay Chain. To achieve this, Polkadot has been developed with an +//! architecture similar to that of a computer. Polkadot Relay Chain has a number of "cores". Each +//! core is (in simple terms) capable of progressing 1 parachain at a time. For example, a parachain +//! can schedule itself on a single core for 5 relay chain blocks. +//! +//! Within the scope of Polkadot 1.x, two main scheduling ways have been considered: +//! +//! * Long term Parachains, obtained through locking a sum of DOT in an auction system. +//! * on-demand Parachains, purchased through paying DOT to the relay-chain whenever needed. +//! +//! ### The Future +//! +//! After delivering Polkadot 1.x, the future of Polkadot as a protocol and platform is in the hands +//! of the community and the fellowship. This is happening most notable through the RFC process. +//! Some of the RFCs that do alter Polkadot as a platform and have already passed are as follows: +//! +//! - RFC#1: [Agile-coretime](https://github.com/polkadot-fellows/RFCs/blob/main/text/0001-agile-coretime.md): +//! Agile periodic-sale-based model for assigning Coretime on the Polkadot Ubiquitous Computer. +//! - RFC#5: [Coretime-interface](https://github.com/polkadot-fellows/RFCs/blob/main/text/0005-coretime-interface.md): +//! Interface for manipulating the usage of cores on the Polkadot Ubiquitous Computer. +// TODO: add more context and explanations about Polkadot as the Ubiquitous Computer and related +// tech. https://github.com/paritytech/polkadot-sdk-docs/issues/66 diff --git a/docs/sdk/src/polkadot_sdk/smart_contracts.rs b/docs/sdk/src/polkadot_sdk/smart_contracts.rs new file mode 100644 index 0000000000000000000000000000000000000000..a4916f9c9218007111b0ec32d677cdc4e4e7e867 --- /dev/null +++ b/docs/sdk/src/polkadot_sdk/smart_contracts.rs @@ -0,0 +1,9 @@ +//! # Smart Contracts +//! +//! TODO: @cmichi https://github.com/paritytech/polkadot-sdk-docs/issues/56 +//! +//! - WASM and EVM based, pallet-contracts and pallet-evm. +//! - single-daap-chain, transition from ink! to FRAME. +//! - Link to `use.ink` +//! - Link to [`crate::reference_docs::runtime_vs_smart_contract`]. +//! - https://use.ink/migrate-ink-contracts-to-polkadot-frame-parachain/ diff --git a/docs/sdk/src/polkadot_sdk/substrate.rs b/docs/sdk/src/polkadot_sdk/substrate.rs new file mode 100644 index 0000000000000000000000000000000000000000..fd172f71469fc5fa42607af078da524c79099f9b --- /dev/null +++ b/docs/sdk/src/polkadot_sdk/substrate.rs @@ -0,0 +1,151 @@ +//! # Substrate +//! +//! Substrate is a Rust framework for building blockchains in a modular and extensible way. While in +//! itself un-opinionated, it is the main engine behind the Polkadot ecosystem. +//! +//! ## Overview, Philosophy +//! +//! Substrate approaches blockchain development with an acknowledgement of a few self-evident +//! truths: +//! +//! 1. Society and technology evolves. +//! 2. Humans are fallible. +//! +//! This, makes the task of designing a correct, safe and long-lasting blockchain system hard. +//! +//! Nonetheless, in strive towards achieve this goal, Substrate embraces the following: +//! +//! 1. Use of **Rust** as a modern and safe programming language, which limits human error through +//! various means, most notably memory and type safety. +//! 2. Substrate is written from the ground-up with a *generic, modular and extensible* design. This +//! ensures that software components can be easily swapped and upgraded. Examples of this is +//! multiple consensus mechanisms provided by Substrate, as listed below. +//! 3. Lastly, the final blockchain system created with the above properties needs to be +//! upgradeable. In order to achieve this, Substrate is designed as a meta-protocol, whereby the +//! application logic of the blockchain (called "Runtime") is encoded as a WASM blob, and is +//! stored in the state. The rest of the system (called "node") acts as the executor of the WASM +//! blob. +//! +//! In essence, the meta-protocol of all Substrate based chains is the "Runtime as WASM blob" +//! accord. This enables the Runtime to become inherently upgradeable, crucially without forks. The +//! upgrade is merely a matter of the WASM blob being changed in the state, which is, in principle, +//! same as updating an account's balance. Learn more about this in detail in +//! [`crate::reference_docs::wasm_meta_protocol`]. +//! +//! > A great analogy for substrate is the following: Substrate node is a gaming console, and a WASM +//! > runtime, possibly created with FRAME is the game being inserted into the console. +//! +//! [`frame`], Substrate's default runtime development library, takes the above safety practices +//! even further by embracing a declarative programming model whereby correctness is enhanced and +//! the system is highly configurable through parameterization. Learn more about this in +//! [`crate::reference_docs::trait_based_programming`]. +//! +//! ## How to Get Started +//! +//! Substrate offers different options at the spectrum of technical freedom <-> development ease. +//! +//! * The easiest way to use Substrate is to use one of the templates (some of which listed at +//! [`crate::polkadot_sdk::templates`]) and only tweak the parameters of the runtime or node. This +//! allows you to launch a blockchain in minutes, but is limited in technical freedom. +//! * Next, most developers wish to develop their custom runtime modules, for which the de-facto way +//! is [`frame`](crate::polkadot_sdk::frame_runtime). +//! * Finally, Substrate is highly configurable at the node side as well, but this is the most +//! technically demanding. +//! +//! > A notable Substrate-based blockchain that has built both custom FRAME pallets and custom +//! > node-side components is . +#![doc = simple_mermaid::mermaid!("../../../mermaid/substrate_dev.mmd")] +//! +//! ## Structure +//! +//! Substrate contains a large number of crates, therefore it is useful to have an overview of what +//! they are, and how they are organized. In broad terms, these crates are divided into three +//! categories: +//! +//! * `sc-*` (short for *Substrate-client*) crates, located under `./client` folder. These are all +//! the crates that lead to the node software. Notable examples [`sc_network`], various consensus +//! crates, RPC ([`sc_rpc_api`]) and database ([`sc_client_db`]), all of which are expected to +//! reside in the node side. +//! * `sp-*` (short for *substrate-primitives*) crates, located under `./primitives` folder. These +//! are crates that facilitate both the node and the runtime, but are not opinionated about what +//! framework is using for building the runtime. Notable examples are [`sp_api`] and [`sp_io`], +//! which form the communication bridge between the node and runtime. +//! * `pallet-*` and `frame-*` crates, located under `./frame` folder. These are the crates related +//! to FRAME. See [`frame`] for more information. +//! +//! ### WASM Build +//! +//! Many of the Substrate crates, such as entire `sp-*`, need to compile to both WASM (when a WASM +//! runtime is being generated) and native (for example, when testing). To achieve this, Substrate +//! follows the convention of the Rust community, and uses a `feature = "std"` to signify that a +//! crate is being built with the standard library, and is built for native. Otherwise, it is built +//! for `no_std`. +//! +//! This can be summarized in `#![cfg_attr(not(feature = "std"), no_std)]`, which you can often find +//! in any Substrate-based runtime. +//! +//! Substrate-based runtimes use [`substrate_wasm_builder`] in their `build.rs` to automatically +//! build their WASM files as a part of normal build command (e.g. `cargo build`). Once built, the +//! wasm file is placed in `./target/{debug|release}/wbuild/{runtime_name}.wasm`. +//! +//! ### Binaries +//! +//! Multiple binaries are shipped with substrate, the most important of which are located in the +//! [`./bin`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin) folder. +//! +//! * [`node_cli`] is an extensive substrate node that contains the superset of all runtime and node +//! side features. The corresponding runtime, called [`kitchensink_runtime`] contains all of the +//! modules that are provided with `FRAME`. This node and runtime is only used for testing and +//! demonstration. +//! * [`chain_spec_builder`]: Utility to build more detailed chain-specs for the aforementioned +//! node. Other projects typically contain a `build-spec` subcommand that does the same. +//! * [`node_template`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin/node-template): +//! a template node that contains a minimal set of features and can act as a starting point of a +//! project. +//! * [`subkey`]: Substrate's key management utility. +//! +//! ### Anatomy of a Binary Crate +//! +//! From the above, [`node_cli`]/[`kitchensink_runtime`] and `node-template` are essentially +//! blueprints of a Substrate-based project, as the name of the latter is implying. Each +//! Substrate-based project typically contains the following: +//! +//! * Under `./runtime`, a `./runtime/src/lib.rs` which is the top level runtime amalgamator file. +//! This file typically contains the [`frame::runtime::prelude::construct_runtime`] and +//! [`frame::runtime::prelude::impl_runtime_apis`] macro calls, which is the final definition of a +//! runtime. +//! +//! * Under `./node`, a `main.rs`, which is the starting point, and a `./service.rs`, which contains +//! all the node side components. Skimming this file yields an overview of the networking, +//! database, consensus and similar node side components. +//! +//! > The above two are conventions, not rules. +//! +//! > See for an update on how the node side +//! > components are being amalgamated. +//! +//! ## Parachain? +//! +//! As noted above, Substrate is the main engine behind the Polkadot ecosystem. One of the ways +//! through which Polkadot can be utilized is by building "parachains", blockchains that are +//! connected to Polkadot's shared security. +//! +//! To build a parachain, one could use [Cumulus](crate::polkadot_sdk::cumulus), the library on +//! top of Substrate, empowering any substrate-based chain to be a Polkadot parachain. +//! +//! ## Where To Go Next? +//! +//! Additional noteworthy crates within substrate: +//! +//! - RPC APIs of a Substrate node: [`sc_rpc_api`]/[`sc_rpc`] +//! - CLI Options of a Substrate node: [`sc_cli`] +//! - All of the consensus related crates provided by Substrate: +//! - [`sc_consensus_aura`] +//! - [`sc_consensus_babe`] +//! - [`sc_consensus_grandpa`] +//! - [`sc_consensus_beefy`] (TODO: @adrian, add some high level docs https://github.com/paritytech/polkadot-sdk-docs/issues/57) +//! - [`sc_consensus_manual_seal`] +//! - [`sc_consensus_pow`] + +#[doc(hidden)] +pub use crate::polkadot_sdk; diff --git a/docs/sdk/src/polkadot_sdk/templates.rs b/docs/sdk/src/polkadot_sdk/templates.rs new file mode 100644 index 0000000000000000000000000000000000000000..f60c75b8f219334ccc52a9ea8441d21a2dbede8d --- /dev/null +++ b/docs/sdk/src/polkadot_sdk/templates.rs @@ -0,0 +1,45 @@ +//! # Templates +//! +//! ### Internal +//! +//! The following templates are maintained as a part of the `polkadot-sdk` repository: +//! +//! - classic [`substrate-node-template`]: is a white-labeled substrate-based blockchain with a +//! moderate amount of features. It can act as a great starting point for those who want to learn +//! Substrate/FRAME and want to have a template that is already doing something. +//! - [`substrate-minimal-template`]: Same as the above, but it contains the least amount of code in +//! both the node and runtime. It is a great starting point for those who want to deeply learn +//! Substrate and FRAME. +//! - classic [`cumulus-parachain-template`], which is the de-facto parachain template shipped with +//! Cumulus. It is the parachain-enabled version of [`substrate-node-template`]. +//! +//! ### External Templates +//! +//! Noteworthy templates outside of this repository. +//! +//! - [`extended-parachain-template`](https://github.com/paritytech/extended-parachain-template): A +//! parachain template that contains more built-in functionality such as assets and NFTs. +//! - [`frontier-parachain-template`](https://github.com/paritytech/frontier-parachain-template): A +//! parachain template for launching EVM-compatible parachains. +//! +//! [`substrate-node-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/substrate/bin/node-template/ +//! [`substrate-minimal-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/substrate/bin/minimal/ +//! [`cumulus-parachain-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/cumulus/parachain-template/ + +// TODO: in general, we need to make a deliberate choice here of moving a few key templates to this +// repo (nothing stays in `substrate-developer-hub`) and the everything else should be community +// maintained. https://github.com/paritytech/polkadot-sdk-docs/issues/67 + +// TODO: we should rename `substrate-node-template` to `substrate-basic-template`, +// `substrate-blockchain-template`. `node` is confusing in the name. +// `substrate-blockchain-template` and `cumulus-parachain-template` go well together 🤝. https://github.com/paritytech/polkadot-sdk-docs/issues/67 + +// NOTE: a super important detail that I am looking forward to here is +// and +// . Meaning that I would not spend time on +// teaching someone too much detail about the ugly thing we call "node" nowadays. In the future, I +// am sure we will either have a better "node-builder" code that can actually be tested, or an +// "omni-node" that can run (almost) any wasm file. We should already build tutorials in this +// direction IMO. This also affects all the templates. If we have a good neat runtime file, which we +// are moving toward, and a good node-builder, we don't need all of these damn templates. These +// templates are only there because the boilerplate is super horrible atm. diff --git a/docs/sdk/src/polkadot_sdk/xcm.rs b/docs/sdk/src/polkadot_sdk/xcm.rs new file mode 100644 index 0000000000000000000000000000000000000000..0d600f751c8b1e2ca74fdac9d4d47331fb0233c1 --- /dev/null +++ b/docs/sdk/src/polkadot_sdk/xcm.rs @@ -0,0 +1,5 @@ +//! # XCM +//! +//! @KiChjang @franciscoaguirre +//! TODO: RFCs, xcm-spec, the future of the repo, minimal example perhaps, forward to where actual +//! docs are hosted. https://github.com/paritytech/polkadot-sdk-docs/issues/58 diff --git a/docs/sdk/src/reference_docs/blockchain_scalibility.rs b/docs/sdk/src/reference_docs/blockchain_scalibility.rs new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/sdk/src/reference_docs/blockchain_state_machines.rs b/docs/sdk/src/reference_docs/blockchain_state_machines.rs new file mode 100644 index 0000000000000000000000000000000000000000..0d1aefcc52770b70baf284335816f013cbfbe5ed --- /dev/null +++ b/docs/sdk/src/reference_docs/blockchain_state_machines.rs @@ -0,0 +1,29 @@ +//! # State Transition Function +//! +//! This document briefly explains how in the context of Substrate-based blockchains, we view the +//! blockchain as a **decentralized state transition function**. +//! +//! Recall that a blockchain's main purpose is to help a permissionless set of entities to agree on +//! a shared data-set, and how it evolves. This is called the **State**, also referred to as +//! "onchain" data, or *Storage* in the context of FRAME. The state is where the account balance of +//! each user is, for example, stored, and there is a canonical version of it that everyone agrees +//! upon. +//! +//! Then, recall that a typical blockchain system will alter its state through execution of blocks. +//! *The component that dictates how this state alteration can happen is called the state transition +//! function*. +#![doc = simple_mermaid::mermaid!("../../../mermaid/stf_simple.mmd")] +//! +//! In Substrate-based blockchains, the state transition function is called the *Runtime*. This is +//! explained further in [`crate::reference_docs::wasm_meta_protocol`]. +//! +//! With this in mind, we can paint a complete picture of a blockchain as a state machine: +#![doc = simple_mermaid::mermaid!("../../../mermaid/stf.mmd")] +//! +//! In essence, the state of the blockchain at block N is the outcome of applying the state +//! transition function to the the previous state, and the current block as input. This can be +//! mathematically represented as: +//! +//! ```math +//! STF = F(State_N, Block_N) -> State_{N+1} +//! ``` diff --git a/docs/sdk/src/reference_docs/chain_spec_genesis.rs b/docs/sdk/src/reference_docs/chain_spec_genesis.rs new file mode 100644 index 0000000000000000000000000000000000000000..2ac51a91f2de78cf10262e5b38c2ebf482c15183 --- /dev/null +++ b/docs/sdk/src/reference_docs/chain_spec_genesis.rs @@ -0,0 +1,4 @@ +//! Chain spec and genesis build. +//! +//! What is chain-spec. +//! What is genesis state and how to build it. diff --git a/docs/sdk/src/reference_docs/cli.rs b/docs/sdk/src/reference_docs/cli.rs new file mode 100644 index 0000000000000000000000000000000000000000..5779e0f8d04954e9fe1d245e0af4761fb0246805 --- /dev/null +++ b/docs/sdk/src/reference_docs/cli.rs @@ -0,0 +1,104 @@ +//! # Substrate CLI +//! +//! Let's see some examples of typical CLI arguments used when setting up and running a +//! Substrate-based blockchain. We use the [`substrate-node-template`](https://github.com/substrate-developer-hub/substrate-node-template) +//! on these examples. +//! +//! #### Checking the available CLI arguments +//! ```bash +//! ./target/debug/node-template --help +//! ``` +//! - `--help`: Displays the available CLI arguments. +//! +//! #### Starting a Local Substrate Node in Development Mode +//! ```bash +//! ./target/release/node-template \ +//! --dev +//! ``` +//! - `--dev`: Runs the node in development mode, using a pre-defined development chain +//! specification. +//! This mode ensures a fresh state by deleting existing data on restart. +//! +//! #### Generating Custom Chain Specification +//! ```bash +//! ./target/debug/node-template \ +//! build-spec \ +//! --disable-default-bootnode \ +//! --chain local \ +//! > customSpec.json +//! ``` +//! +//! - `build-spec`: A subcommand to generate a chain specification file. +//! - `--disable-default-bootnode`: Disables the default bootnodes in the node template. +//! - `--chain local`: Indicates the chain specification is for a local development chain. +//! - `> customSpec.json`: Redirects the output into a customSpec.json file. +//! +//! #### Converting Chain Specification to Raw Format +//! ```bash +//! ./target/debug/node-template build-spec \ +//! --chain=customSpec.json \ +//! --raw \ +//! --disable-default-bootnode \ +//! > customSpecRaw.json +//! ``` +//! +//! - `--chain=customSpec.json`: Uses the custom chain specification as input. +//! - `--disable-default-bootnode`: Disables the default bootnodes in the node template. +//! - `--raw`: Converts the chain specification into a raw format with encoded storage keys. +//! - `> customSpecRaw.json`: Outputs to customSpecRaw.json. +//! +//! #### Starting the First Node in a Private Network +//! ```bash +//! ./target/debug/node-template \ +//! --base-path /tmp/node01 \ +//! --chain ./customSpecRaw.json \ +//! --port 30333 \ +//! --ws-port 9945 \ +//! --rpc-port 9933 \ +//! --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" \ +//! --validator \ +//! --rpc-methods Unsafe \ +//! --name MyNode01 +//! ``` +//! +//! - `--base-path`: Sets the directory for node data. +//! - `--chain`: Specifies the chain specification file. +//! - `--port`: TCP port for peer-to-peer communication. +//! - `--ws-port`: WebSocket port for RPC. +//! - `--rpc-port`: HTTP port for JSON-RPC. +//! - `--telemetry-url`: Endpoint for sending telemetry data. +//! - `--validator`: Indicates the node’s participation in block production. +//! - `--rpc-methods Unsafe`: Allows potentially unsafe RPC methods. +//! - `--name`: Sets a human-readable name for the node. +//! +//! #### Adding a Second Node to the Network +//! ```bash +//! ./target/release/node-template \ +//! --base-path /tmp/bob \ +//! --chain local \ +//! --bob \ +//! --port 30334 \ +//! --rpc-port 9946 \ +//! --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" \ +//! --validator \ +//! --bootnodes /ip4/127.0.0.1/tcp/30333/p2p/12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp +//! ``` +//! +//! - `--base-path`: Sets the directory for node data. +//! - `--chain`: Specifies the chain specification file. +//! - `--bob`: Initializes the node with the session keys of the "Bob" account. +//! - `--port`: TCP port for peer-to-peer communication. +//! - `--rpc-port`: HTTP port for JSON-RPC. +//! - `--telemetry-url`: Endpoint for sending telemetry data. +//! - `--validator`: Indicates the node’s participation in block production. +//! - `--bootnodes`: Specifies the address of the first node for peer discovery. Nodes should find +//! each other using mDNS. This command needs to be used if they don't find each other. +//! +//! --- +//! +//! > If you are interested in learning how to extend the CLI with your custom arguments, you can +//! > check out the [Customize your Substrate chain CLI](https://www.youtube.com/watch?v=IVifko1fqjw) +//! > seminar. +//! > Please note that the seminar is based on an older version of Substrate, and [Clap](https://docs.rs/clap/latest/clap/) +//! > is now used instead of [StructOpt](https://docs.rs/structopt/latest/structopt/) for parsing +//! > CLI arguments. diff --git a/docs/sdk/src/reference_docs/consensus_swapping.rs b/docs/sdk/src/reference_docs/consensus_swapping.rs new file mode 100644 index 0000000000000000000000000000000000000000..e639761ee97b42fa68e1ba77250d490e28277e24 --- /dev/null +++ b/docs/sdk/src/reference_docs/consensus_swapping.rs @@ -0,0 +1,6 @@ +//! Consensus Swapping +//! +//! Notes: +//! +//! - The typical workshop done by Joshy in some places where he swaps out the consensus to be PoW. +//! - This could also be a tutorial rather than a ref doc, depending on the size. diff --git a/docs/sdk/src/reference_docs/extrinsic_encoding.rs b/docs/sdk/src/reference_docs/extrinsic_encoding.rs new file mode 100644 index 0000000000000000000000000000000000000000..89c7cfe983c1a3afc6f3ba03ad784102025f2e67 --- /dev/null +++ b/docs/sdk/src/reference_docs/extrinsic_encoding.rs @@ -0,0 +1,277 @@ +//! # Constructing and Signing Extrinsics +//! +//! Extrinsics are payloads that are stored in blocks which are responsible for altering the state +//! of a blockchain via the [_state transition +//! function_][crate::reference_docs::blockchain_state_machines]. +//! +//! Substrate is configurable enough that extrinsics can take any format. In practice, runtimes +//! tend to use our [`sp_runtime::generic::UncheckedExtrinsic`] type to represent extrinsics, +//! because it's generic enough to cater for most (if not all) use cases. In Polkadot, this is +//! configured [here](https://github.com/polkadot-fellows/runtimes/blob/94b2798b69ba6779764e20a50f056e48db78ebef/relay/polkadot/src/lib.rs#L1478) +//! at the time of writing. +//! +//! What follows is a description of how extrinsics based on this +//! [`sp_runtime::generic::UncheckedExtrinsic`] type are encoded into bytes. Specifically, we are +//! looking at how extrinsics with a format version of 4 are encoded. This version is itself a part +//! of the payload, and if it changes, it indicates that something about the encoding may have +//! changed. +//! +//! # Encoding an Extrinsic +//! +//! At a high level, all extrinsics compatible with [`sp_runtime::generic::UncheckedExtrinsic`] +//! are formed from concatenating some details together, as in the following pseudo-code: +//! +//! ```text +//! extrinsic_bytes = concat( +//! compact_encoded_length, +//! version_and_maybe_signature, +//! call_data +//! ) +//! ``` +//! +//! For clarity, the actual implementation in Substrate looks like this: +#![doc = docify::embed!("../../substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs", unchecked_extrinsic_encode_impl)] +//! +//! Let's look at how each of these details is constructed: +//! +//! ## compact_encoded_length +//! +//! This is a [SCALE compact encoded][frame::deps::codec::Compact] integer which is equal to the +//! length, in bytes, of the rest of the extrinsic details. +//! +//! To obtain this value, we must encode and concatenate together the rest of the extrinsic details +//! first, and then obtain the byte length of these. We can then compact encode that length, and +//! prepend it to the rest of the details. +//! +//! ## version_and_maybe_signature +//! +//! If the extrinsic is _unsigned_, then `version_and_maybe_signature` will be just one byte +//! denoting the _transaction protocol version_, which is 4 (or `0b0000_0100`). +//! +//! If the extrinsic is _signed_ (all extrinsics submitted from users must be signed), then +//! `version_and_maybe_signature` is obtained by concatenating some details together, ie: +//! +//! ```text +//! version_and_maybe_signature = concat( +//! version_and_signed, +//! from_address, +//! signature, +//! signed_extensions_extra, +//! ) +//! ``` +//! +//! Each of the details to be concatenated together is explained below: +//! +//! ### version_and_signed +//! +//! This is one byte, equal to `0x84` or `0b1000_0100` (i.e. an upper 1 bit to denote that it is +//! signed, and then the transaction version, 4, in the lower bits). +//! +//! ### from_address +//! +//! This is the [SCALE encoded][frame::deps::codec] address of the sender of the extrinsic. The +//! address is the first generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`], and so +//! can vary from chain to chain. +//! +//! The address type used on the Polkadot relay chain is [`sp_runtime::MultiAddress`], +//! where `AccountId32` is defined [here][`sp_core::crypto::AccountId32`]. When constructing a +//! signed extrinsic to be submitted to a Polkadot node, you'll always use the +//! [`sp_runtime::MultiAddress::Id`] variant to wrap your `AccountId32`. +//! +//! ### signature +//! +//! This is the [SCALE encoded][frame::deps::codec] signature. The signature type is configured via +//! the third generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`], which determines the +//! shape of the signature and signing algorithm that should be used. +//! +//! The signature is obtained by signing the _signed payload_ bytes (see below on how this is +//! constructed) using the private key associated with the address and correct algorithm. +//! +//! The signature type used on the Polkadot relay chain is [`sp_runtime::MultiSignature`]; the +//! variants there are the types of signature that can be provided. +//! +//! ### signed_extensions_extra +//! +//! This is the concatenation of the [SCALE encoded][frame::deps::codec] bytes representing each of +//! the [_signed extensions_][sp_runtime::traits::SignedExtension], and are configured by the +//! fourth generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`]. Learn more about +//! signed extensions [here][crate::reference_docs::signed_extensions]. +//! +//! When it comes to constructing an extrinsic, each signed extension has two things that we are +//! interested in here: +//! +//! - The actual SCALE encoding of the signed extension type itself; this is what will form our +//! `signed_extensions_extra` bytes. +//! - An `AdditionalSigned` type. This is SCALE encoded into the `signed_extensions_additional` data +//! of the _signed payload_ (see below). +//! +//! Either (or both) of these can encode to zero bytes. +//! +//! Each chain configures the set of signed extensions that it uses in its runtime configuration. +//! At the time of writing, Polkadot configures them +//! [here](https://github.com/polkadot-fellows/runtimes/blob/1dc04eb954eadf8aadb5d83990b89662dbb5a074/relay/polkadot/src/lib.rs#L1432C25-L1432C25). +//! Some of the common signed extensions are defined +//! [here][frame::deps::frame_system#signed-extensions]. +//! +//! Information about exactly which signed extensions are present on a chain and in what order is +//! also a part of the metadata for the chain. For V15 metadata, it can be +//! [found here][frame::deps::frame_support::__private::metadata::v15::ExtrinsicMetadata]. +//! +//! ## call_data +//! +//! This is the main payload of the extrinsic, which is used to determine how the chain's state is +//! altered. This is defined by the second generic parameter of +//! [`sp_runtime::generic::UncheckedExtrinsic`]. +//! +//! A call can be anything that implements [`Encode`][frame::deps::codec::Encode]. In FRAME-based +//! runtimes, a call is represented as an enum of enums, where the outer enum represents the FRAME +//! pallet being called, and the inner enum represents the call being made within that pallet, and +//! any arguments to it. Read more about the call enum +//! [here][crate::reference_docs::frame_composite_enums]. +//! +//! FRAME `Call` enums are automatically generated, and end up looking something like this: +#![doc = docify::embed!("./src/reference_docs/extrinsic_encoding.rs", call_data)] +//! +//! In pseudo-code, this `Call` enum encodes equivalently to: +//! +//! ```text +//! call_data = concat( +//! pallet_index, +//! call_index, +//! call_args +//! ) +//! ``` +//! +//! - `pallet_index` is a single byte denoting the index of the pallet that we are calling into, and +//! is what the tag of the outermost enum will encode to. +//! - `call_index` is a single byte denoting the index of the call that we are making the pallet, +//! and is what the tag of the inner enum will encode to. +//! - `call_args` are the SCALE encoded bytes for each of the arguments that the call expects, and +//! are typically provided as values to the inner enum. +//! +//! Information about the pallets that exist for a chain (including their indexes), the calls +//! available in each pallet (including their indexes), and the arguments required for each call +//! can be found in the metadata for the chain. For V15 metadata, this information +//! [is here][frame::deps::frame_support::__private::metadata::v15::PalletMetadata]. +//! +//! # The Signed Payload Format +//! +//! All extrinsics submitted to a node from the outside world (also known as _transactions_) need to +//! be _signed_. The data that needs to be signed for some extrinsic is called the _signed payload_, +//! and its shape is described by the following pseudo-code: +//! +//! ```text +//! signed_payload = concat( +//! call_data, +//! signed_extensions_extra, +//! signed_extensions_additional, +//! ) +//! +//! if length(signed_payload) > 256 { +//! signed_payload = blake2_256(signed_payload) +//! } +//! ``` +//! +//! The bytes representing `call_data` and `signed_extensions_extra` can be obtained as descibed +//! above. `signed_extensions_additional` is constructed by SCALE encoding the +//! ["additional signed" data][sp_runtime::traits::SignedExtension::AdditionalSigned] for each +//! signed extension that the chain is using, in order. +//! +//! Once we've concatenated those together, we hash the result if it's greater than 256 bytes in +//! length using a Blake2 256bit hasher. +//! +//! The [`sp_runtime::generic::SignedPayload`] type takes care of assembling the correct payload +//! for us, given `call_data` and a tuple of signed extensions. +//! +//! # Example Encoding +//! +//! Using [`sp_runtime::generic::UncheckedExtrinsic`], we can construct and encode an extrinsic +//! as follows: +#![doc = docify::embed!("./src/reference_docs/extrinsic_encoding.rs", encoding_example)] + +#[docify::export] +pub mod call_data { + use parity_scale_codec::{Decode, Encode}; + + // The outer enum composes calls within + // different pallets together. We have two + // pallets, "PalletA" and "PalletB". + #[derive(Encode, Decode)] + pub enum Call { + #[codec(index = 0)] + PalletA(PalletACall), + #[codec(index = 7)] + PalletB(PalletBCall), + } + + // An inner enum represents the calls within + // a specific pallet. "PalletA" has one call, + // "Foo". + #[derive(Encode, Decode)] + pub enum PalletACall { + #[codec(index = 0)] + Foo(String), + } + + #[derive(Encode, Decode)] + pub enum PalletBCall { + #[codec(index = 0)] + Bar(String), + } +} + +#[docify::export] +pub mod encoding_example { + use super::call_data::{Call, PalletACall}; + use crate::reference_docs::signed_extensions::signed_extensions_example; + use parity_scale_codec::Encode; + use sp_core::crypto::AccountId32; + use sp_keyring::sr25519::Keyring; + use sp_runtime::{ + generic::{SignedPayload, UncheckedExtrinsic}, + MultiAddress, MultiSignature, + }; + + // Define some signed extensions to use. We'll use a couple of examples + // from the signed extensions reference doc. + type SignedExtensions = + (signed_extensions_example::AddToPayload, signed_extensions_example::AddToSignaturePayload); + + // We'll use `UncheckedExtrinsic` to encode our extrinsic for us. We set + // the address and signature type to those used on Polkadot, use our custom + // `Call` type, and use our custom set of `SignedExtensions`. + type Extrinsic = + UncheckedExtrinsic, Call, MultiSignature, SignedExtensions>; + + pub fn encode_demo_extrinsic() -> Vec { + // The "from" address will be our Alice dev account. + let from_address = MultiAddress::::Id(Keyring::Alice.to_account_id()); + + // We provide some values for our expected signed extensions. + let signed_extensions = ( + signed_extensions_example::AddToPayload(1), + signed_extensions_example::AddToSignaturePayload, + ); + + // Construct our call data: + let call_data = Call::PalletA(PalletACall::Foo("Hello".to_string())); + + // The signed payload. This takes care of encoding the call_data, + // signed_extensions_extra and signed_extensions_additional, and hashing + // the result if it's > 256 bytes: + let signed_payload = SignedPayload::new(&call_data, signed_extensions.clone()); + + // Sign the signed payload with our Alice dev account's private key, + // and wrap the signature into the expected type: + let signature = { + let sig = Keyring::Alice.sign(&signed_payload.encode()); + MultiSignature::Sr25519(sig) + }; + + // Now, we can build and encode our extrinsic: + let ext = Extrinsic::new_signed(call_data, from_address, signature, signed_extensions); + + let encoded_ext = ext.encode(); + encoded_ext + } +} diff --git a/docs/sdk/src/reference_docs/fee_less_runtime.rs b/docs/sdk/src/reference_docs/fee_less_runtime.rs new file mode 100644 index 0000000000000000000000000000000000000000..1213c26282537fdee2d04ebd452289f0e6bc759f --- /dev/null +++ b/docs/sdk/src/reference_docs/fee_less_runtime.rs @@ -0,0 +1,12 @@ +//! # Fee-Less Runtime +//! +//! +//! Notes: +//! +//! - An extension of [`runtime_vs_smart_contract`], showcasing the tools needed to build a safe +//! runtime that is fee-less. +//! - Would need to use unsigned origins, custom validate_unsigned, check the existence of some NFT +//! and some kind of rate limiting (eg. any account gets 5 free tx per day). +//! - The rule of thumb is that as long as the unsigned validate does one storage read, similar to +//! nonce, it is fine. +//! - This could possibly be a good guide/template, rather than a reference doc. diff --git a/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs b/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs new file mode 100644 index 0000000000000000000000000000000000000000..f65f4174ec66265a6f1ad1b52099a235d7ed9bfa --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs @@ -0,0 +1,23 @@ +//! # FRAME Benchmarking and Weights. +//! +//! Notes: +//! +//! On Weight as a concept. +//! +//! - Why we need it. Super important. People hate this. We need to argue why it is worth it. +//! - Axis of weight: PoV + Time. +//! - pre dispatch weight vs. metering and post dispatch correction. +//! - mention that we will do this for PoV +//! - you can manually refund using `DispatchResultWithPostInfo`. +//! - Technically you can have weights with any benchmarking framework. You just need one number to +//! be computed pre-dispatch. But FRAME gives you a framework for this. +//! - improve documentation of `#[weight = ..]` and `#[pallet::weight(..)]`. All syntax variation +//! should be covered. +//! +//! on FRAME benchmarking machinery: +//! +//! - component analysis, why everything must be linear. +//! - how to write benchmarks, how you must think of worst case. +//! - how to run benchmarks. +//! +//! - https://www.shawntabrizi.com/substrate/substrate-storage-deep-dive/ diff --git a/docs/sdk/src/reference_docs/frame_composite_enums.rs b/docs/sdk/src/reference_docs/frame_composite_enums.rs new file mode 100644 index 0000000000000000000000000000000000000000..6051cd534467672b9831187ef5c8b814712f7d18 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_composite_enums.rs @@ -0,0 +1 @@ +//! # FRAME Composite Enums diff --git a/docs/sdk/src/reference_docs/frame_currency.rs b/docs/sdk/src/reference_docs/frame_currency.rs new file mode 100644 index 0000000000000000000000000000000000000000..ba181373062f05e4645931739f60630ae00109f6 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_currency.rs @@ -0,0 +1,8 @@ +//! FRAME Currency Abstractions and Traits +//! +//! Notes: +//! +//! - History, `Currency` trait. +//! - `Hold` and `Freeze` with diagram. +//! - `HoldReason` and `FreezeReason` +//! - This footgun: https://github.com/paritytech/polkadot-sdk/pull/1900#discussion_r1363783609 diff --git a/docs/sdk/src/reference_docs/frame_origin.rs b/docs/sdk/src/reference_docs/frame_origin.rs new file mode 100644 index 0000000000000000000000000000000000000000..a4078377cd77dad6b3aac78ba6acdddda14251a8 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_origin.rs @@ -0,0 +1,14 @@ +//! # FRAME Origin +//! +//! Notes: +//! +//! - Def talk about account abstraction and how it is a solved issue in frame. See Gav's talk in +//! Protocol Berg 2023 +//! - system's raw origin, how it is amalgamated with other origins into one type +//! [`frame_composite_enums`] +//! - signed origin +//! - unsigned origin, link to [`fee_less_runtime`] +//! - Root origin, how no one can obtain it. +//! - Abstract origin: how FRAME allows you to express "origin is 2/3 of the this body or 1/2 of +//! that body or half of the token holders". +//! - `type CustomOrigin: EnsureOrigin<_>` in pallets. diff --git a/docs/sdk/src/reference_docs/frame_runtime_migration.rs b/docs/sdk/src/reference_docs/frame_runtime_migration.rs new file mode 100644 index 0000000000000000000000000000000000000000..0616ccbb6f57971823c7347a60574ef0c0bef2ab --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_runtime_migration.rs @@ -0,0 +1,9 @@ +//! # Runtime Runtime Upgrade and Testing +//! +//! +//! Notes: +//! +//! - Flow of things, when does `on_runtime_upgrade` get called. Link to to `Hooks` and its diagram +//! as source of truth. +//! - Data migration and when it is needed. +//! - Look into the pba-lecture. diff --git a/docs/sdk/src/reference_docs/frame_system_accounts.rs b/docs/sdk/src/reference_docs/frame_system_accounts.rs new file mode 100644 index 0000000000000000000000000000000000000000..ae9d2c9e0cb3ca7a694d9e5330363c05f6f78cb0 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_system_accounts.rs @@ -0,0 +1,8 @@ +//! # FRAME Accounts +//! +//! How `frame_system` handles accountIds. Nonce. Consumers and Providers, reference counting. + +// - poorly understood topics, needs one great article to rul them all. +// - https://github.com/paritytech/substrate/issues/14425 +// - https://github.com/paritytech/substrate/pull/12951 +// - https://substrate.stackexchange.com/questions/263/what-is-the-meaning-of-the-account-provider-sufficients-and-consumer diff --git a/docs/sdk/src/reference_docs/glossary.rs b/docs/sdk/src/reference_docs/glossary.rs new file mode 100644 index 0000000000000000000000000000000000000000..56f5ef5aeb58dbea251d6829bfc17fc11c70d088 --- /dev/null +++ b/docs/sdk/src/reference_docs/glossary.rs @@ -0,0 +1,120 @@ +//! # Glossary +//! +//! #### State +//! +//! The data around which the blockchain network wishes to come to consensus. Also +//! referred to as "onchain data", "onchain storage" or sometimes just "storage". In UTXO based +//! blockchains, is referred to as "ledger". +//! +//! **Synonyms**: Onchain data, Onchain storage, Storage, Ledger +//! +//! #### State Transition Function +//! +//! The WASM Blob that dictates how the blockchain should transition its state upon encountering new +//! blocks. +//! +//! #### Host +//! +//! The environment that hosts and executes the [state transition function's WASM +//! blob](#state-transition-function). +//! +//! #### Node +//! +//! The full software artifact that contains the [host](#host), but importantly also all the other +//! modules needed to be part of a blockchain network, such as peer-to-peer networking, database and +//! such. +//! +//! **Synonyms**: Client +//! +//! #### Light Node +//! +//! Same as [node](#nodes), but when capable of following the network only through listening to +//! block headers. Usually capable of running in more constrained environments, such as an embedded +//! device, phone, or a web browser. +//! +//! **Synonyms**: Light Client +//! +//! #### Offchain +//! +//! Refers to operations conducted outside the blockchain's consensus mechanism. They are essential +//! for enhancing scalability and efficiency, enabling activities like data fetching and computation +//! without bloating the blockchain state. +//! +//! #### Host Functions: +//! +//! Host functions are the node's API, these are functions provided by the runtime environment (the +//! [host](#host)) to the Wasm runtime. These functions allow the Wasm code to interact with and +//! perform operations on the [node](#node), like accessing the blockchain state. +//! +//! #### Runtime API: +//! +//! This is the API of the runtime, it acts as a communication bridge between the runtime and the +//! node, serving as the exposed interface that facilitates their interactions. +//! +//! #### Dispatchable: +//! +//! Dispatchables are [function objects](https://en.wikipedia.org/wiki/Function_object) that act as +//! the entry points in [FRAME](frame) pallets. They can be called by internal or external entities +//! to interact with the blockchain's state. They are a core aspect of the runtime logic, handling +//! transactions and other state-changing operations. +//! +//! **Synonyms**: Callable +//! +//! #### Extrinsic +//! +//! An extrinsic is a general term for a piece of data that is originated outside of the runtime, +//! included into a block and leads to some action. This includes user-initiated transactions as +//! well as inherents which are placed into the block by the block-builder. +//! +//! #### Pallet +//! +//! Similar to software modules in traditional programming, [FRAME](frame) pallets in Substrate are +//! modular components that encapsulate distinct functionalities or business logic. Just as +//! libraries or modules are used to build and extend the capabilities of a software application, +//! pallets are the foundational building blocks for constructing a blockchain's runtime with frame. +//! They enable the creation of customizable and upgradeable networks, offering a composable +//! framework for a Substrate-based blockchain. Each pallet can be thought of as a plug-and-play +//! module, enhancing the blockchain's functionality in a cohesive and integrated manner. +//! +//! #### Full Node +//! +//! It is a node that prunes historical states, keeping only recent finalized block states to reduce +//! storage needs. Full nodes provide current chain state access and allow direct submission and +//! validation of extrinsics, maintaining network decentralization. +//! +//! #### Archive Node +//! +//! An archive node is a specialized node that maintains a complete history of all block states and +//! transactions. Unlike a full node, it does not prune historical data, ensuring full access to the +//! entire blockchain history. This makes it essential for detailed blockchain analysis and +//! historical queries, but requires significantly more storage capacity. +//! +//! #### Validator +//! +//! A validator is a node that participates in the consensus mechanism of the network. +//! Its role includes block production, transaction validation, network integrity and security +//! maintenance. +//! +//! #### Collator +//! +//! A collator is a node that is responsible for producing candidate blocks for the validators. +//! Collators are similar to validators on any other blockchain but, they do not need to provide +//! security guarantees as the Relay Chain handles this. +//! +//! #### Parachain +//! +//! Short for "parallelized chain" a parachain is a specialized blockchain that runs in parallel to +//! the Relay Chain (Polkadot, Kusama, etc.), benefiting from the shared security and +//! interoperability features of it. +//! +//! **Synonyms**: AppChain +//! +//! #### PVF +//! The Parachain Validation Function (PVF) is the current runtime Wasm for a parachain that is +//! stored on the Relay chain. It is an essential component in the Polkadot ecosystem, encapsulating +//! the validation logic for each parachain. The PVF is executed by validators to verify the +//! correctness of parachain blocks. This is critical for ensuring that each block follows the logic +//! set by its respective parachain, thus maintaining the integrity and security of the entire +//! network. +//! +//! **Synonyms**: Parachain Validation Function diff --git a/docs/sdk/src/reference_docs/light_nodes.rs b/docs/sdk/src/reference_docs/light_nodes.rs new file mode 100644 index 0000000000000000000000000000000000000000..a6a0a828ef58a20593fbf55cbcce4e4310e1cabd --- /dev/null +++ b/docs/sdk/src/reference_docs/light_nodes.rs @@ -0,0 +1,7 @@ +//! # Light Clients +//! +//! +//! Notes: should contain only high level information about light clients, then link to how to set +//! it up in PAPI and SubXT +//! https://docs.substrate.io/learn/light-clients-in-substrate-connect/ +//! https://github.com/substrate-developer-hub/substrate-front-end-template/pull/277 diff --git a/docs/sdk/src/reference_docs/metadata.rs b/docs/sdk/src/reference_docs/metadata.rs new file mode 100644 index 0000000000000000000000000000000000000000..702c1c30fd9cf2d09082b39fecf880065ebd5375 --- /dev/null +++ b/docs/sdk/src/reference_docs/metadata.rs @@ -0,0 +1 @@ +//! # Metadata diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..44284394000d3bac718a81ac95928c4d94e3dc2b --- /dev/null +++ b/docs/sdk/src/reference_docs/mod.rs @@ -0,0 +1,99 @@ +//! # Polkadot SDK Reference Docs. +//! +//! This is the entry point for all reference documents that enhance one's learning experience in +//! the Polkadot SDK. +//! +//! Note that this module also contains the [glossary](crate::reference_docs::glossary). +//! +//! ## What is a "reference document"? +//! +//! First, see [why we use rust-docs for everything](crate#why-rust-docs) and our documentation +//! [principles](crate#principles). We acknowledge that as much of the crucial information should be +//! embedded in the low level rust-docs. Then, high level scenarios should be covered in +//! [`crate::guides`]. Finally, we acknowledge that there is a category of information that is: +//! +//! 1. crucial to know. +//! 2. is too high level to be in the rust-doc of any one `type`, `trait` or `fn`. +//! 3. is too low level to be encompassed in a [`crate::guides`]. +//! +//! We call this class of documents "reference documents". Our goal should be to minimize the number +//! of "reference" docs, as they incur maintenance burden. + +/// Learn how Substrate and FRAME use traits and associated types to make modules generic in a +/// type-safe manner. +pub mod trait_based_programming; + +/// Learn about the way Substrate and FRAME view their blockchains as state machines. +pub mod blockchain_state_machines; + +/// The glossary. +pub mod glossary; + +/// Learn about the WASM meta-protocol of all Substrate-based chains. +pub mod wasm_meta_protocol; + +/// Learn about the differences between smart contracts and a FRAME-based runtime. They are both +/// "code stored onchain", but how do they differ? +pub mod runtime_vs_smart_contract; + +/// Learn about how extrinsics are encoded to be transmitted to a node and stored in blocks. +pub mod extrinsic_encoding; + +/// Learn about the signed extensions that form a part of extrinsics. +// TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/42 +pub mod signed_extensions; + +/// Learn about *"Origin"* A topic in FRAME that enables complex account abstractions to be built. +// TODO: @shawntabrizi https://github.com/paritytech/polkadot-sdk-docs/issues/43 +pub mod frame_origin; + +/// Learn about how to write safe and defensive code in your FRAME runtime. +// TODO: @CrackTheCode016 https://github.com/paritytech/polkadot-sdk-docs/issues/44 +pub mod safe_defensive_programming; + +/// Learn about composite enums in FRAME-based runtimes, such as "RuntimeEvent" and "RuntimeCall". +pub mod frame_composite_enums; + +/// Learn about how to make a pallet/runtime that is fee-less and instead uses another mechanism to +/// control usage and sybil attacks. +pub mod fee_less_runtime; + +/// Learn about metadata, the main means through which an upgradeable runtime communicates its +/// properties to the outside world. +// TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/47 +pub mod metadata; + +/// Learn about how frame-system handles `account-ids`, nonces, consumers and providers. +pub mod frame_system_accounts; + +/// Learn about the currency-related abstractions provided in FRAME. +pub mod frame_currency; + +/// Learn about benchmarking and weight. +// TODO: @shawntabrizi @ggwpez https://github.com/paritytech/polkadot-sdk-docs/issues/50 +pub mod frame_benchmarking_weight; + +/// Learn about chain specification file and the genesis state of the blockchain. +// TODO: @michalkucharczyk https://github.com/paritytech/polkadot-sdk-docs/issues/51 +pub mod chain_spec_genesis; + +/// Learn about all the memory limitations of the WASM runtime when it comes to memory usage. +// TODO: @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/52 +pub mod wasm_memory; + +/// Learn about Substrate's CLI, and how it can be extended. +// TODO: @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/53 +pub mod cli; + +/// Learn about Substrate's consensus algorithms, and how you can switch between two. +// TODO: @JoshOrndorff @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/54 +pub mod consensus_swapping; + +/// Learn about all the advance ways to test your coordinate a rutnime upgrade and data migration. +// TODO: @liamaharon https://github.com/paritytech/polkadot-sdk-docs/issues/55 +pub mod frame_runtime_migration; + +/// Learn about light nodes, how they function, and how Substrate-based chains come +/// light-node-first out of the box. +// TODO: @jsdw @josepot https://github.com/paritytech/polkadot-sdk-docs/issues/68 +pub mod light_nodes; diff --git a/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs b/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs new file mode 100644 index 0000000000000000000000000000000000000000..16db44d8be49fb546230b5aa8dc6ca9673518592 --- /dev/null +++ b/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs @@ -0,0 +1,209 @@ +//! # Runtime vs. Smart Contracts +//! +//! *TL;DR*: If you need to create a *Blockchain*, then write a runtime. If you need to create a +//! *DApp*, then write a Smart Contract. +//! +//! This is a comparative analysis of Substrate-based Runtimes and Smart Contracts, highlighting +//! their main differences. Our aim is to equip you with a clear understanding of how these two +//! methods of deploying on-chain logic diverge in their design, usage, and implications. +//! +//! Both Runtimes and Smart Contracts serve distinct purposes. Runtimes offer deep customization for +//! blockchain development, while Smart Contracts provide a more accessible approach for +//! decentralized applications. Understanding their differences is crucial in choosing the right +//! approach for a specific solution. +//! +//! ## Substrate +//! Substrate is a modular framework that enables the creation of purpose-specific blockchains. In +//! the Polkadot ecosystem you can find two distinct approaches for on-chain code execution: +//! [Runtime Development](#runtime-in-substrate) and [Smart Contracts](#smart-contracts). +//! +//! #### Smart Contracts in Substrate +//! Smart Contracts are autonomous, programmable constructs deployed on the blockchain. +//! In [FRAME](frame), Smart Contracts infrastructure is implemented by the +//! [`pallet_contracts`](../../../pallet_contracts/index.html) for WASM-based contracts or the +//! [`pallet_evm`](../../../pallet_evm/index.html) for EVM-compatible contracts. These pallets +//! enable Smart Contract developers to build applications and systems on top of a Substrate-based +//! blockchain. +//! +//! #### Runtime in Substrate +//! The Runtime is the state transition function of a Substrate-based blockchain. It defines the +//! rules for processing transactions and blocks, essentially governing the behavior and +//! capabilities of a blockchain. +//! +//! ## Comparative Table +//! +//! | Aspect | Runtime | Smart Contracts | +//! |-----------------------|-------------------------------------------------------------------------|----------------------------------------------------------------------| +//! | **Design Philosophy** | Core logic of a blockchain, allowing broad and deep customization. | Designed for DApps deployed on the blockchain runtime.| +//! | **Development Complexity** | Requires in-depth knowledge of Rust and Substrate. Suitable for complex blockchain architectures. | Easier to develop with knowledge of Smart Contract languages like Solidity or [ink!](https://use.ink/). | +//! | **Upgradeability and Flexibility** | Offers comprehensive upgradeability with migration logic and on-chain governance, allowing modifications to the entire blockchain logic without hard forks. | Less flexible in upgrade migrations but offers more straightforward deployment and iteration. | +//! | **Performance and Efficiency** | More efficient, optimized for specific needs of the blockchain. | Can be less efficient due to its generic nature (e.g. the overhead of a virtual machine). | +//! | **Security Considerations** | Security flaws can affect the entire blockchain. | Security risks usually localized to the individual contract. | +//! | **Weighing and Metering** | Operations can be weighed, allowing for precise benchmarking. | Execution is metered, allowing for measurement of resource consumption. | +//! +//! We will now explore these differences in more detail. +//! +//! ## Design Philosophy +//! Runtimes and Smart Contracts are designed for different purposes. Runtimes are the core logic +//! of a blockchain, while Smart Contracts are designed for DApps on top of the blockchain. +//! Runtimes can be more complex, but also more flexible and efficient, while Smart Contracts are +//! easier to develop and deploy. +//! +//! #### Runtime Design Philosophy +//! - **Core Blockchain Logic**: Runtimes are essentially the backbone of a blockchain. They define +//! the fundamental rules, operations, and state transitions of the blockchain network. +//! - **Broad and Deep Customization**: Runtimes allow for extensive customization and flexibility. +//! Developers can tailor the most fundamental aspects of the blockchain, like introducing an +//! efficient transaction fee model to eliminating transaction fees completely. This level of +//! control is essential for creating specialized or application-specific blockchains. +//! +//! #### Smart Contract Design Philosophy +//! - **DApps Development**: Smart contracts are designed primarily for developing DApps. They +//! operate on top of the blockchain's infrastructure. +//! - **Modularity and Isolation**: Smart contracts offer a more modular approach. Each contract is +//! an isolated piece of code, executing predefined operations when triggered. This isolation +//! simplifies development and enhances security, as flaws in one contract do not directly +//! compromise the entire network. +//! +//! ## Development Complexity +//! Runtimes and Smart Contracts differ in their development complexity, largely due to their +//! differing purposes and technical requirements. +//! +//! #### Runtime Development Complexity +//! - **In-depth Knowledge Requirements**: Developing a Runtime in Substrate requires a +//! comprehensive understanding of Rust, Substrate's framework, and blockchain principles. +//! - **Complex Blockchain Architectures**: Runtime development is suitable for creating complex +//! blockchain architectures. Developers must consider aspects like security, scalability, and +//! network efficiency. +//! +//! #### Smart Contract Development Complexity +//! - **Accessibility**: Smart Contract development is generally more accessible, especially for +//! those already familiar with programming concepts. Knowledge of smart contract-specific +//! languages like Solidity or ink! is required. +//! - **Focused on Application Logic**: The development here is focused on the application logic +//! only. This includes writing functions that execute when certain conditions are met, managing +//! state within the contract, and ensuring security against common Smart Contract +//! vulnerabilities. +//! +//! ## Upgradeability and Flexibility +//! Runtimes and Smart Contracts differ significantly in how they handle upgrades and flexibility, +//! each with its own advantages and constraints. Runtimes are more flexible, allowing for writing +//! migration logic for upgrades, while Smart Contracts are less flexible but offer easier +//! deployment and iteration. +//! +//! #### Runtime Upgradeability and Flexibility +//! - **Migration Logic**: One of the key strengths of runtime development is the ability to define +//! migration logic. This allows developers to implement changes in the state or structure of the +//! blockchain during an upgrade. Such migrations can adapt the existing state to fit new +//! requirements or features seamlessly. +//! - **On-Chain Governance**: Upgrades in a Runtime environment are typically governed on-chain, +//! involving validators or a governance mechanism. This allows for a democratic and transparent +//! process for making substantial changes to the blockchain. +//! - **Broad Impact of Changes**: Changes made in Runtime affect the entire blockchain. This gives +//! developers the power to introduce significant improvements or changes but also necessitates a +//! high level of responsibility and scrutiny, we will talk further about it in the [Security +//! Considerations](#security-considerations) section. +//! +//! #### Smart Contract Upgradeability and Flexibility +//! - **Deployment and Iteration**: Smart Contracts, by nature, are designed for more +//! straightforward deployment and iteration. Developers can quickly deploy contracts. +//! - **Contract Code Updates**: Once deployed, although typically immutable, Smart Contracts can be +//! upgraded, but lack of migration logic. The [pallet_contracts](../../../pallet_contracts/index.html) +//! allows for contracts to be upgraded by exposing the `set_code` dispatchable. More details on this +//! can be found in [Ink! documentation on upgradeable contracts](https://use.ink/5.x/basics/upgradeable-contracts). +//! - **Isolated Impact**: Upgrades or changes to a smart contract generally impact only that +//! contract and its users, unlike Runtime upgrades that have a network-wide effect. +//! - **Simplicity and Rapid Development**: The development cycle for Smart Contracts is usually +//! faster and less complex than Runtime development, allowing for rapid prototyping and +//! deployment. +//! +//! ## Performance and Efficiency +//! Runtimes and Smart Contracts have distinct characteristics in terms of performance and +//! efficiency due to their inherent design and operational contexts. Runtimes are more efficient +//! and optimized for specific needs, while Smart Contracts are more generic and less efficient. +//! +//! #### Runtime Performance and Efficiency +//! - **Optimized for Specific Needs**: Runtime modules in Substrate are tailored to meet the +//! specific needs of the blockchain. They are integrated directly into the blockchain's core, +//! allowing them to operate with high efficiency and minimal overhead. +//! - **Direct Access to Blockchain State**: Runtime has direct access to the blockchain's state. +//! This direct access enables more efficient data processing and transaction handling, as there +//! is no additional layer between the runtime logic and the blockchain's core. +//! - **Resource Management**: Resource management is integral to runtime development to ensure that +//! the blockchain operates smoothly and efficiently. +//! +//! #### Smart Contract Performance and Efficiency +//! - **Generic Nature and Overhead**: Smart Contracts, particularly those running in virtual +//! machine environments, can be less efficient due to the generic nature of their execution +//! environment. The overhead of the virtual machine can lead to increased computational and +//! resource costs. +//! - **Isolation and Security Constraints**: Smart Contracts operate in an isolated environment to +//! ensure security and prevent unwanted interactions with the blockchain's state. This isolation, +//! while crucial for security, can introduce additional computational overhead. +//! - **Gas Mechanism and Metering**: The gas mechanism in Smart Contracts, used for metering +//! computational resources, ensures that contracts don't consume excessive resources. However, +//! this metering itself requires computational power, adding to the overall cost of contract +//! execution. +//! +//! ## Security Considerations +//! These two methodologies, while serving different purposes, come with their own unique security +//! considerations. +//! +//! #### Runtime Security Aspects +//! Runtimes, being at the core of blockchain functionality, have profound implications for the +//! security of the entire network: +//! +//! - **Broad Impact**: Security flaws in the runtime can compromise the entire blockchain, +//! affecting all network participants. +//! - **Governance and Upgradeability**: Runtime upgrades, while powerful, need rigorous governance +//! and testing to ensure security. Improperly executed upgrades can introduce vulnerabilities or +//! disrupt network operations. +//! - **Complexity and Expertise**: Developing and maintaining runtime requires a higher level of +//! expertise in blockchain architecture and security, as mistakes can be far-reaching. +//! +//! #### Smart Contract Security Aspects +//! Smart contracts, while more isolated, bring their own set of security challenges: +//! +//! - **Isolated Impact**: Security issues in a smart contract typically affect the contract itself +//! and its users, rather than the whole network. +//! - **Contract-specific Risks**: Common issues like reentrancy +//! attacks, improper handling of external calls, and gas limit vulnerabilities are specific to +//! smart contract development. +//! - **Permissionless Deployment**: Since anyone can deploy a smart contract, +//! the ecosystem is more open to potentially malicious or vulnerable code. +//! +//! ## Weighing and Metering +//! Weighing and metering are mechanisms designed to limit the resources used by external actors. +//! However, there are fundamental differences in how these resources are handled in FRAME-based +//! Runtimes and how they are handled in Smart Contracts, while Runtime operations are weighed, +//! Smart Contract executions must be metered. +//! +//! #### Weighing +//! In FRAME-based Runtimes, operations are *weighed*. This means that each operation in the Runtime +//! has a fixed upper cost, known in advance, determined through +//! [benchmarking](crate::reference_docs::frame_benchmarking_weight). Weighing is practical here +//! because: +//! +//! - *Predictability*: Runtime operations are part of the blockchain's core logic, which is static +//! until an upgrade occurs. This predictability allows for precise +//! [benchmarking](crate::reference_docs::frame_benchmarking_weight). +//! - *Prevention of Abuse*: By having a fixed upper cost that corresponds to the worst-case +//! complexity scenario of its execution (and a mechanism to refund unused weight), it becomes +//! infeasible for an attacker to create transactions that could unpredictably consume excessive +//! resources. +//! +//! #### Metering +//! For Smart Contracts resource consumption is metered. This is essential due to: +//! +//! - **Untrusted Nature**: Unlike Runtime operations, Smart Contracts can be deployed by any user, +//! and their behavior isn’t known in advance. Metering dynamically measures resource consumption +//! as the contract executes. +//! - **Safety Against Infinite Loops**: Metering protects the blockchain from poorly designed +//! contracts that might run into infinite loops, consuming an indefinite amount of resources. +//! +//! #### Implications for Developers and Users +//! - **For Runtime Developers**: Understanding the cost of each operation is essential. Misjudging +//! the weight of operations can lead to network congestion or vulnerability exploitation. +//! - **For Smart Contract Developers**: Being mindful of the gas cost associated with contract +//! execution is crucial. Efficiently written contracts save costs and are less likely to hit gas +//! limits, ensuring smoother execution on the blockchain. diff --git a/docs/sdk/src/reference_docs/safe_defensive_programming.rs b/docs/sdk/src/reference_docs/safe_defensive_programming.rs new file mode 100644 index 0000000000000000000000000000000000000000..9d0f028e570d42d223e656cb7f46798d956fd173 --- /dev/null +++ b/docs/sdk/src/reference_docs/safe_defensive_programming.rs @@ -0,0 +1 @@ +//! diff --git a/docs/sdk/src/reference_docs/signed_extensions.rs b/docs/sdk/src/reference_docs/signed_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..28b1426536bcdf371865db74a104a27d51869c86 --- /dev/null +++ b/docs/sdk/src/reference_docs/signed_extensions.rs @@ -0,0 +1,79 @@ +//! Signed extensions are, briefly, a means for different chains to extend the "basic" extrinsic +//! format with custom data that can be checked by the runtime. +//! +//! # Example +//! +//! Defining a couple of very simple signed extensions looks like the following: +#![doc = docify::embed!("./src/reference_docs/signed_extensions.rs", signed_extensions_example)] + +#[docify::export] +pub mod signed_extensions_example { + use parity_scale_codec::{Decode, Encode}; + use scale_info::TypeInfo; + use sp_runtime::traits::SignedExtension; + + // This doesn't actually check anything, but simply allows + // some arbitrary `u32` to be added to the extrinsic payload + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] + pub struct AddToPayload(pub u32); + + impl SignedExtension for AddToPayload { + const IDENTIFIER: &'static str = "AddToPayload"; + type AccountId = (); + type Call = (); + type AdditionalSigned = (); + type Pre = (); + + fn additional_signed( + &self, + ) -> Result< + Self::AdditionalSigned, + sp_runtime::transaction_validity::TransactionValidityError, + > { + Ok(()) + } + + fn pre_dispatch( + self, + _who: &Self::AccountId, + _call: &Self::Call, + _info: &sp_runtime::traits::DispatchInfoOf, + _len: usize, + ) -> Result { + Ok(()) + } + } + + // This is the opposite; nothing will be added to the extrinsic payload, + // but the AdditionalSigned type (`1234u32`) will be added to the + // payload to be signed. + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] + pub struct AddToSignaturePayload; + + impl SignedExtension for AddToSignaturePayload { + const IDENTIFIER: &'static str = "AddToSignaturePayload"; + type AccountId = (); + type Call = (); + type AdditionalSigned = u32; + type Pre = (); + + fn additional_signed( + &self, + ) -> Result< + Self::AdditionalSigned, + sp_runtime::transaction_validity::TransactionValidityError, + > { + Ok(1234) + } + + fn pre_dispatch( + self, + _who: &Self::AccountId, + _call: &Self::Call, + _info: &sp_runtime::traits::DispatchInfoOf, + _len: usize, + ) -> Result { + Ok(()) + } + } +} diff --git a/docs/sdk/src/reference_docs/trait_based_programming.rs b/docs/sdk/src/reference_docs/trait_based_programming.rs new file mode 100644 index 0000000000000000000000000000000000000000..ace3138807071a35dfaf4ea75e3321960f05137e --- /dev/null +++ b/docs/sdk/src/reference_docs/trait_based_programming.rs @@ -0,0 +1,229 @@ +//! # Trait-based Programming +//! +//! This document walks you over a peculiar way of using Rust's `trait` items. This pattern is +//! abundantly used within [`frame`] and is therefore paramount important for a smooth transition +//! into it. +//! +//! The rest of this document assumes familiarity with the +//! [Rust book's Advanced Traits](https://doc.rust-lang.org/book/ch19-03-advanced-traits.html) +//! section. +//! Moreover, we use the [`frame::traits::Get`]. +//! +//! First, imagine we are writing a FRAME pallet. We represent this pallet with a `struct Pallet`, +//! and this pallet wants to implement the functionalities of that pallet, for example a simple +//! `transfer` function. For the sake of education, we are interested in having a `MinTransfer` +//! amount, expressed as a [`frame::traits::Get`], which will dictate what is the minimum amount +//! that can be transferred. +//! +//! We can foremost write this as simple as the following snippet: +#![doc = docify::embed!("./src/reference_docs/trait_based_programming.rs", basic)] +//! +//! +//! In this example, we use arbitrary choices for `AccountId`, `Balance` and the `MinTransfer` type. +//! This works great for **one team's purposes** but we have to remember that Substrate and FRAME +//! are written as generic frameworks, intended to be highly configurable. +//! +//! In a broad sense, there are two avenues in exposing configurability: +//! +//! 1. For *values* that need to be generic, for example `MinTransfer`, we attach them to the +//! `Pallet` struct as fields: +//! +//! ``` +//! struct Pallet { +//! min_transfer: u128, +//! } +//! ``` +//! +//! 2. For *types* that need to be generic, we would have to use generic or associated types, such +//! as: +//! +//! ``` +//! struct Pallet { +//! min_transfer: u128, +//! _marker: std::marker::PhantomData, +//! } +//! ``` +//! +//! Substrate and FRAME, for various reasons (performance, correctness, type safety) has opted to +//! use *types* to declare both *values* and *types* as generic. This is the essence of why the +//! `Get` trait exists. +//! +//! This would bring us to the second iteration of the pallet, which would look like: +#![doc = docify::embed!("./src/reference_docs/trait_based_programming.rs", generic)] +//! +//! In this example, we managed to make all 3 of our types generic. Taking the example of the +//! `AccountId`, one should read the above as following: +//! +//! > The `Pallet` does not know what type `AccountId` concretely is, but it knows that it is +//! > something that adheres to being `From<[u8; 32]>`. +//! +//! This method would work, but it suffers from two downsides: +//! +//! 1. It is verbose, each `impl` block would have to reiterate all of the trait bounds. +//! 2. It cannot easily share/inherit generic types. Imagine multiple pallets wanting to be generic +//! over a single `AccountId`. There is no easy way to express that in this model. +//! +//! Finally, this brings us to using traits and associated types on traits to express the above. +//! Trait associated types have the benefit of: +//! +//! 1. Being less verbose, as in effect they can *group multiple `type`s together*. +//! 2. Can inherit from one another by declaring +//! [supertraits](https://doc.rust-lang.org/rust-by-example/trait/supertraits.html). +//! +//! > Interestingly, one downside of associated types is that declaring defaults on them is not +//! > stable yet. In the meantime, we have built our own custom mechanics around declaring defaults +//! for associated types, see [`pallet_default_config_example`]. +//! +//! The last iteration of our code would look like this: +#![doc = docify::embed!("./src/reference_docs/trait_based_programming.rs", trait_based)] +//! +//! Notice how instead of having multiple generics, everything is generic over a single ``, and all types are fetched through `T`, for example `T::AccountId`, `T::MinTransfer`. +//! +//! Finally, imagine all pallets wanting to be generic over `AccountId`. This can be achieved by +//! having individual `trait Configs` declare a shared `trait SystemConfig` as their +//! [supertrait](https://doc.rust-lang.org/rust-by-example/trait/supertraits.html). +#![doc = docify::embed!("./src/reference_docs/trait_based_programming.rs", with_system)] +//! In FRAME, this shared supertrait is [`frame::prelude::frame_system`]. +//! +//! Notice how this made no difference in the syntax of the rest of the code. `T::AccountId` is +//! still a valid type, since `T` implements `Config` and `Config` implies `SystemConfig`, which +//! has a `type AccountId`. +//! +//! Note, in some instances one would need to use what is known as the fully-qualified-syntax to +//! access a type to help the Rust compiler disambiguate. +#![doc = docify::embed!("./src/reference_docs/trait_based_programming.rs", fully_qualified)] +//! +//! This syntax can sometimes become more complicated when you are dealing with nested traits. +//! Consider the following example, in which we fetch the `type Balance` from another trait +//! `CurrencyTrait`. +#![doc = docify::embed!("./src/reference_docs/trait_based_programming.rs", fully_qualified_complicated)] +//! +//! Notice the final `type BalanceOf` and how it is defined. Using such aliases to shorten the +//! length of fully qualified syntax is a common pattern in FRAME. +//! +//! The above example is almost identical to the well-known (and somewhat notorious) `type +//! BalanceOf` that is often used in the context of [`frame::traits::fungible`]. +#![doc = docify::embed!("../../substrate/frame/fast-unstake/src/types.rs", BalanceOf)] +//! +//! ## Additional Resources +//! +//! - +//! - [Substrate Seminar - Traits and Generic Types](https://www.youtube.com/watch?v=6cp10jVWNl4) +//! - +#![allow(unused)] + +use frame::traits::Get; + +#[docify::export] +mod basic { + struct Pallet; + + type AccountId = frame::deps::sp_runtime::AccountId32; + type Balance = u128; + type MinTransfer = frame::traits::ConstU128<10>; + + impl Pallet { + fn transfer(_from: AccountId, _to: AccountId, _amount: Balance) { + todo!() + } + } +} + +#[docify::export] +mod generic { + use super::*; + + struct Pallet { + _marker: std::marker::PhantomData<(AccountId, Balance, MinTransfer)>, + } + + impl Pallet + where + Balance: frame::traits::AtLeast32BitUnsigned, + MinTransfer: frame::traits::Get, + AccountId: From<[u8; 32]>, + { + fn transfer(_from: AccountId, _to: AccountId, amount: Balance) { + assert!(amount >= MinTransfer::get()); + unimplemented!(); + } + } +} + +#[docify::export] +mod trait_based { + use super::*; + + trait Config { + type AccountId: From<[u8; 32]>; + type Balance: frame::traits::AtLeast32BitUnsigned; + type MinTransfer: frame::traits::Get; + } + + struct Pallet(std::marker::PhantomData); + impl Pallet { + fn transfer(_from: T::AccountId, _to: T::AccountId, amount: T::Balance) { + assert!(amount >= T::MinTransfer::get()); + unimplemented!(); + } + } +} + +#[docify::export] +mod with_system { + use super::*; + + pub trait SystemConfig { + type AccountId: From<[u8; 32]>; + } + + pub trait Config: SystemConfig { + type Balance: frame::traits::AtLeast32BitUnsigned; + type MinTransfer: frame::traits::Get; + } + + pub struct Pallet(std::marker::PhantomData); + impl Pallet { + fn transfer(_from: T::AccountId, _to: T::AccountId, amount: T::Balance) { + assert!(amount >= T::MinTransfer::get()); + unimplemented!(); + } + } +} + +#[docify::export] +mod fully_qualified { + use super::with_system::*; + + // Simple of using fully qualified syntax. + type AccountIdOf = ::AccountId; +} + +#[docify::export] +mod fully_qualified_complicated { + use super::with_system::*; + + trait CurrencyTrait { + type Balance: frame::traits::AtLeast32BitUnsigned; + fn more_stuff() {} + } + + trait Config: SystemConfig { + type Currency: CurrencyTrait; + } + + struct Pallet(std::marker::PhantomData); + impl Pallet { + fn transfer( + _from: T::AccountId, + _to: T::AccountId, + _amount: <::Currency as CurrencyTrait>::Balance, + ) { + unimplemented!(); + } + } + + /// A common pattern in FRAME. + type BalanceOf = <::Currency as CurrencyTrait>::Balance; +} diff --git a/docs/sdk/src/reference_docs/wasm_memory.rs b/docs/sdk/src/reference_docs/wasm_memory.rs new file mode 100644 index 0000000000000000000000000000000000000000..4f4cda31094e40e8f308dbb06de4ab0ecd828777 --- /dev/null +++ b/docs/sdk/src/reference_docs/wasm_memory.rs @@ -0,0 +1,7 @@ +//! # WASM Memory Limitations. +//! +//! Notes: +//! +//! - Stack: Need to use `Box<_>` +//! - Heap: Substrate imposes a limit. PvF execution has its own limits +//! - Heap: There is also a maximum amount that a single allocation can have. diff --git a/docs/sdk/src/reference_docs/wasm_meta_protocol.rs b/docs/sdk/src/reference_docs/wasm_meta_protocol.rs new file mode 100644 index 0000000000000000000000000000000000000000..37d1460f0e1a3737217ac0c80ec41de769db4c1a --- /dev/null +++ b/docs/sdk/src/reference_docs/wasm_meta_protocol.rs @@ -0,0 +1,113 @@ +//! # WASM Meta Protocol +//! +//! All Substrate based chains adhere to a unique architectural design novel to the Polkadot +//! ecosystem. We refer to this design as the "WASM Meta Protocol". +//! +//! Consider the fact that a traditional blockchain software is usually a monolithic artifact. +//! Upgrading any part of the system implies upgrading the entire system. This has historically led +//! to cumbersome forkful upgrades to be the status quo in the blockchain ecosystem. +//! +//! Moreover, the idea of "storing code in the state" is explored in the context of smart contracts +//! platforms, but has not been expanded further. +//! +//! Substrate mixes these two ideas together, and takes the novel approach of storing the +//! blockchain's main "state transition function" in the main blockchain state, in the same fashion +//! that a smart contract platform stores the code of individual contracts in its state. As noted in +//! [`crate::reference_docs::blockchain_state_machines`], this state transition function is called +//! the **Runtime**, and WASM is chosen as the bytecode. The Runtime is stored under a special key +//! in the state (see +//! [`sp_core::storage::well_known_keys`](../../../sp_core/index.html)) and can be +//! updated as a part of the state transition function's execution, just like a user's account +//! balance can be updated. +//! +//! > Note that while we drew an analogy between smart contracts and runtimes in the above, there +//! > are fundamental differences between the two, explained in +//! > [`crate::reference_docs::runtime_vs_smart_contract`]. +//! +//! The rest of the system that is NOT the state transition function is called the **node**, and +//! is a normal binary that is compiled from Rust to different hardware targets. +//! +//! This design enables all Substrate-based chains to be fork-less-ly upgradeable, because the +//! Runtime can be updates on the fly, within the execution of a block, and the node is (for the +//! most part) oblivious to the change that is happening. +//! +//! Therefore, the high-level architecture of a any Substrate-based chain can be demonstrated as +//! follows: +#![doc = simple_mermaid::mermaid!("../../../mermaid/substrate_simple.mmd")] +//! +//! The node and the runtime need to communicate. This is done through two concepts: +//! +//! 1. **Host functions**: a way for the (WASM) runtime to talk to the node. All host functions are +//! defined in [`sp_io`]. For example, [`sp_io::storage`] are the set of host functions that +//! allow the runtime to read and write data to the on-chain state. +//! 2. **Runtime APIs**: a way for the node to talk to the WASM runtime. Runtime APIs are defined +//! using macros and utilities in [`sp_api`]. For example, [`sp_api::Core`] is the most +//! fundamental runtime API that any blockchain must implement in order to be able to (re) +//! execute blocks. +#![doc = simple_mermaid::mermaid!("../../../mermaid/substrate_client_runtime.mmd")] +//! +//! A runtime must have a set of runtime APIs in order to have any meaningful blockchain +//! functionality, but it can also expose more APIs. See TODO as an example of how to add custom +//! runtime APIs to your FRAME-based runtime. +//! +//! Similarly, for a runtime to be "compatible" with a node, the node must implement the full set of +//! host functions that the runtime at any point in time requires. Given the fact that a runtime can +//! evolve in time, and a blockchain node (typically) wishes to be capable of re-executing all the +//! previous blocks, this means that a node must always maintain support for the old host functions. +//! This also implies that adding a new host function is a big commitment and should be done with +//! care. This is why, for example, adding a new host function to Polkadot always requires an RFC. +//! +//! ## Node vs. Runtime +//! +//! A common question is: which components of the system end up being part of the node, and which +//! ones of the runtime? +//! +//! Recall from [`crate::reference_docs::blockchain_state_machines`] that the runtime is the state +//! transition function. Anything that needs to influence how your blockchain's state is updated, +//! should be a part of the runtime. For example, the logic around currency, governance, identity or +//! any other application-specific logic that has to do with the state is part of the runtime. +//! +//! Anything that does not have to do with the state-transition function and will only +//! facilitate/enable it is part of the node. For example, the database, networking, and even +//! consensus algorithm are all node-side components. +//! +//! > The consensus is to your runtime what HTTP is to a web-application. It is the underlying +//! > engine that enables trustless execution of the runtime in a distributed manner whilst +//! > maintaining a canonical outcome of that execution. +#![doc = simple_mermaid::mermaid!("../../../mermaid/substrate_with_frame.mmd")] +//! +//! ## State +//! +//! From the previous sections, we know that the a database component is part of the node, not the +//! runtime. We also hinted that a set of host functions ([`sp_io::storage`]) are how the runtime +//! issues commands to the node to read/write to the state. Let's dive deeper into this. +//! +//! The state of the blockchain, what we seek to come to consensus about, is indeed *kept* in the +//! node side. Nonetheless, the runtime is the only component that: +//! +//! 1. Can update the state. +//! 2. Can fully interpret the state. +//! +//! In fact, [`sp_core::storage::well_known_keys`] are the only state keys that the node side is +//! aware of. The rest of the state, including what logic the runtime has, what balance each user +//! has and such are all only comprehensible to the runtime. +#![doc = simple_mermaid::mermaid!("../../../mermaid/state.mmd")] +//! +//! In the above diagram, all of the state keys and values are opaque bytes to the node. The node +//! does not know what they mean, and it does not now what is the type of the corresponding value +//! (e.g. if it is a number of a vector). Contrary, the runtime knows both the meaning of their +//! keys, and the type of the values. +//! +//! This opaque-ness is the fundamental reason why Substrate-based chains can fork-less-ly upgrade: +//! because the node side code is kept oblivious to all of the details of the state transition +//! function. Therefore, the state transition function can freely upgrade without the node needing +//! to know. +//! +//! ## Native Runtime +//! +//! TODO +//! +//! +//! ## Example: Block Execution. +//! +//! TODO diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index fb161848fb6cf8ee0fd4f0b6d8aba6da7f3c382c..d769957490e97d653af2e4ff6682dc5e2901c7fe 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -18,15 +18,18 @@ rust-version = "1.64.0" readme = "README.md" authors.workspace = true edition.workspace = true -version = "1.1.0" +version = "1.5.0" default-run = "polkadot" +[lints] +workspace = true + [dependencies] color-eyre = { version = "0.6.1", default-features = false } -tikv-jemallocator = { version = "0.5.0", optional = true, features = [ "unprefixed_malloc_on_supported_platforms" ] } +tikv-jemallocator = { version = "0.5.0", optional = true, features = ["unprefixed_malloc_on_supported_platforms"] } # Crates in our workspace, defined as dependencies so we can pass them feature flags. -polkadot-cli = { path = "cli", features = [ "westend-native", "rococo-native" ] } +polkadot-cli = { path = "cli", features = ["rococo-native", "westend-native"] } polkadot-node-core-pvf = { path = "node/core/pvf" } polkadot-node-core-pvf-prepare-worker = { path = "node/core/pvf/prepare-worker" } polkadot-overseer = { path = "node/overseer" } @@ -36,7 +39,7 @@ polkadot-node-core-pvf-common = { path = "node/core/pvf/common" } polkadot-node-core-pvf-execute-worker = { path = "node/core/pvf/execute-worker" } [target.'cfg(target_os = "linux")'.dependencies] -tikv-jemallocator = { version = "0.5.0", features = [ "unprefixed_malloc_on_supported_platforms" ] } +tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"] } [dev-dependencies] assert_cmd = "2.0.4" @@ -53,23 +56,22 @@ substrate-build-script-utils = { path = "../substrate/utils/build-script-utils" maintenance = { status = "actively-developed" } [features] -runtime-benchmarks = [ "polkadot-cli/runtime-benchmarks" ] -try-runtime = [ "polkadot-cli/try-runtime" ] -fast-runtime = [ "polkadot-cli/fast-runtime" ] -runtime-metrics = [ "polkadot-cli/runtime-metrics" ] -pyroscope = [ "polkadot-cli/pyroscope" ] +runtime-benchmarks = ["polkadot-cli/runtime-benchmarks"] +try-runtime = ["polkadot-cli/try-runtime"] +fast-runtime = ["polkadot-cli/fast-runtime"] +runtime-metrics = ["polkadot-cli/runtime-metrics"] +pyroscope = ["polkadot-cli/pyroscope"] jemalloc-allocator = [ "dep:tikv-jemallocator", "polkadot-node-core-pvf-prepare-worker/jemalloc-allocator", "polkadot-node-core-pvf/jemalloc-allocator", "polkadot-overseer/jemalloc-allocator", ] -network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ] # Enables timeout-based tests supposed to be run only in CI environment as they may be flaky # when run locally depending on system load -ci-only-tests = [ "polkadot-node-core-pvf/ci-only-tests" ] +ci-only-tests = ["polkadot-node-core-pvf/ci-only-tests"] # Configuration for building a .deb package - for use with `cargo-deb` [package.metadata.deb] @@ -89,12 +91,12 @@ assets = [ [ "target/release/polkadot-prepare-worker", "/usr/lib/polkadot/", - "755" + "755", ], [ "target/release/polkadot-execute-worker", "/usr/lib/polkadot/", - "755" + "755", ], [ "scripts/packaging/polkadot.service", diff --git a/polkadot/README.md b/polkadot/README.md index 3c234bb8e3f4d3bed19331047f42c9afa2658053..f27fc86df27c987582439e0b207e0336a6d612d8 100644 --- a/polkadot/README.md +++ b/polkadot/README.md @@ -63,7 +63,6 @@ directory of the repo: ```bash git checkout -./scripts/init.sh cargo build --release ``` @@ -213,7 +212,7 @@ that we currently maintain. ### Using Docker -[Using Docker](../docs/docker.md) +[Using Docker](../docs/contributor/docker.md) ### Shell Completion @@ -223,11 +222,11 @@ that we currently maintain. ### Contributing Guidelines -[Contribution Guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/CONTRIBUTING.md) +[Contribution Guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) ### Contributor Code of Conduct -[Code of Conduct](https://github.com/paritytech/polkadot-sdk/blob/master/docs/CODE_OF_CONDUCT.md) +[Code of Conduct](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CODE_OF_CONDUCT.md) ## License diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 0c2925c76e82f155a2d0fffd012f80b1ed4ee3cc..ed90b2d58a41d7278078baebb4ae6850b564fb89 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [package.metadata.wasm-pack.profile.release] # `wasm-opt` has some problems on Linux, see # https://github.com/rustwasm/wasm-pack/issues/781 etc. @@ -15,7 +18,8 @@ wasm-opt = false crate-type = ["cdylib", "rlib"] [dependencies] -clap = { version = "4.4.6", features = ["derive"], optional = true } +cfg-if = "1.0" +clap = { version = "4.4.11", features = ["derive"], optional = true } log = "0.4.17" thiserror = "1.0.48" futures = "0.3.21" @@ -43,8 +47,8 @@ sc-storage-monitor = { path = "../../substrate/client/storage-monitor" } substrate-build-script-utils = { path = "../../substrate/utils/build-script-utils" } [features] -default = [ "cli", "db", "full-node" ] -db = [ "service/db" ] +default = ["cli", "db", "full-node"] +db = ["service/db"] cli = [ "clap", "frame-benchmarking-cli", @@ -60,19 +64,17 @@ runtime-benchmarks = [ "sc-service?/runtime-benchmarks", "service/runtime-benchmarks", ] -full-node = [ "service/full-node" ] -try-runtime = [ "service/try-runtime", "try-runtime-cli/try-runtime" ] -fast-runtime = [ "service/fast-runtime" ] -pyroscope = [ "pyro", "pyroscope_pprofrs" ] +full-node = ["service/full-node"] +try-runtime = ["service/try-runtime", "try-runtime-cli/try-runtime"] +fast-runtime = ["service/fast-runtime"] +pyroscope = ["pyro", "pyroscope_pprofrs"] # Configure the native runtimes to use. -westend-native = [ "service/westend-native" ] -rococo-native = [ "service/rococo-native" ] +westend-native = ["service/westend-native"] +rococo-native = ["service/rococo-native"] -malus = [ "full-node", "service/malus" ] +malus = ["full-node", "service/malus"] runtime-metrics = [ "polkadot-node-metrics/runtime-metrics", "service/runtime-metrics", ] - -network-protocol-staging = [ "service/network-protocol-staging" ] diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index e20e35c9103813e7884c2a56a9d9d82d061b8768..30f35ebcb6ffa95f0f2384821e168353d95df94b 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -82,22 +82,22 @@ pub struct RunCmd { #[arg(long = "force-rococo")] pub force_rococo: bool, - /// Setup a GRANDPA scheduled voting pause. - /// - /// This parameter takes two values, namely a block number and a delay (in blocks). - /// - /// After the given block number is finalized the GRANDPA voter will temporarily - /// stop voting for new blocks until the given delay has elapsed (i.e. until a - /// block at height `pause_block + delay` is imported). - #[arg(long = "grandpa-pause", num_args = 2)] - pub grandpa_pause: Vec, - /// Disable the BEEFY gadget. /// /// Currently enabled by default on 'Rococo', 'Wococo' and 'Versi'. #[arg(long)] pub no_beefy: bool, + /// Allows a validator to run insecurely outside of Secure Validator Mode. Security features + /// are still enabled on a best-effort basis, but missing features are no longer required. For + /// more information see . + #[arg(long = "insecure-validator-i-know-what-i-do", requires = "validator")] + pub insecure_validator: bool, + + /// Enable the block authoring backoff that is triggered when finality is lagging. + #[arg(long)] + pub force_authoring_backoff: bool, + /// Add the destination address to the 'Jaeger' agent. /// /// Must be valid socket address, of format `IP:Port` (commonly `127.0.0.1:6831`). diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index 2dcf5e0e8d7bf616d5c1eef3a775ddfa25d44721..018400fbcf8bf0513ebd3cae3c6ae6e6a0230cd3 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -215,12 +215,6 @@ where set_default_ss58_version(chain_spec); - let grandpa_pause = if cli.run.grandpa_pause.is_empty() { - None - } else { - Some((cli.run.grandpa_pause[0], cli.run.grandpa_pause[1])) - }; - if chain_spec.is_kusama() { info!("----------------------------"); info!("This chain is not in any way"); @@ -244,6 +238,8 @@ where let node_version = if cli.run.disable_worker_version_check { None } else { Some(NODE_VERSION.to_string()) }; + let secure_validator_mode = cli.run.base.validator && !cli.run.insecure_validator; + runner.run_node_until_exit(move |config| async move { let hwbench = (!cli.run.no_hardware_benchmarks) .then_some(config.database.path().map(|database_path| { @@ -257,11 +253,12 @@ where config, service::NewFullParams { is_parachain_node: service::IsParachainNode::No, - grandpa_pause, enable_beefy, + force_authoring_backoff: cli.run.force_authoring_backoff, jaeger_agent, telemetry_worker_handle: None, node_version, + secure_validator_mode, workers_path: cli.run.workers_path, workers_names: None, overseer_gen, diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index f843ec17943cbaddc886f73b33fc84bf53bb644e..32ee8d3ff3fbf149b452c7801edbeff876b400aa 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -6,15 +6,18 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] sp-core = { path = "../../substrate/primitives/core", default-features = false } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = [ "derive" ] } +parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } [features] -default = [ "std" ] +default = ["std"] std = [ "parity-scale-codec/std", "scale-info/std", diff --git a/polkadot/doc/testing.md b/polkadot/doc/testing.md index 1045303baf0df1e757cefa374ca2396b99984504..76703b1b4398a0cac8423183a5d2febfabab0e6c 100644 --- a/polkadot/doc/testing.md +++ b/polkadot/doc/testing.md @@ -1,6 +1,7 @@ # Testing -Automated testing is an essential tool to assure correctness. +Testing is an essential tool to assure correctness. This document describes how we test the Polkadot code, whether +locally, at scale, and/or automatically in CI. ## Scopes @@ -8,27 +9,57 @@ The testing strategy for Polkadot is 4-fold: ### Unit testing (1) -Boring, small scale correctness tests of individual functions. +Boring, small scale correctness tests of individual functions. It is usually +enough to run `cargo test` in the crate you are testing. + +For full coverage you may have to pass some additional features. For example: + +```sh +cargo test --features ci-only-tests +``` ### Integration tests -There are two variants of integration tests: +There are the following variants of integration tests: #### Subsystem tests (2) One particular subsystem (subsystem under test) interacts with a mocked overseer that is made to assert incoming and -outgoing messages of the subsystem under test. This is largely present today, but has some fragmentation in the evolved -integration test implementation. A `proc-macro`/`macro_rules` would allow for more consistent implementation and -structure. +outgoing messages of the subsystem under test. See e.g. the `statement-distribution` tests. #### Behavior tests (3) -Launching small scale networks, with multiple adversarial nodes without any further tooling required. This should -include tests around the thresholds in order to evaluate the error handling once certain assumed invariants fail. +Launching small scale networks, with multiple adversarial nodes. This should include tests around the thresholds in +order to evaluate the error handling once certain assumed invariants fail. + +Currently, we commonly use **zombienet** to run mini test-networks, whether locally or in CI. To run on your machine: + +- First, make sure you have [zombienet][zombienet] installed. + +- Now, all the required binaries must be installed in your $PATH. You must run the following from the `polkadot/` +directory in order to test your changes. (Not `zombienet setup`, or you will get the released binaries without your +local changes!) -For this purpose based on `AllSubsystems` and `proc-macro` `AllSubsystemsGen`. +```sh +cargo install --path . --locked +``` + +- You will also need to install whatever binaries are required for your specific tests. For example, to install +`undying-collator`, from `polkadot/`, run: + +```sh +cargo install --path ./parachain/test-parachains/undying/collator --locked +``` -This assumes a simplistic test runtime. +- Finally, run the zombienet test from the `polkadot` directory: + +```sh +RUST_LOG=parachain::pvf=trace zombienet --provider=native spawn zombienet_tests/functional/0001-parachains-pvf.toml +``` + +- You can pick a validator node like `alice` from the output and view its logs +(`tail -f `) or metrics. Make sure there is nothing funny in the logs +(try `grep WARN `). #### Testing at scale (4) @@ -41,13 +72,27 @@ addition prometheus avoiding additional Polkadot source changes. _Behavior tests_ and _testing at scale_ have naturally soft boundary. The most significant difference is the presence of a real network and the number of nodes, since a single host often not capable to run multiple nodes at once. ---- +## Observing Logs + +To verify expected behavior it's often useful to observe logs. To avoid too many +logs at once, you can run one test at a time: + +1. Add `sp_tracing::try_init_simple();` to the beginning of a test +2. Specify `RUST_LOG=::=trace` before the cargo command. + +For example: + +```sh +RUST_LOG=parachain::pvf=trace cargo test execute_can_run_serially +``` + +For more info on how our logs work, check [the docs][logs]. ## Coverage Coverage gives a _hint_ of the actually covered source lines by tests and test applications. -The state of the art is currently [tarpaulin][tarpaulin] which unfortunately yields a lot of false negatives. Lines that +The state of the art is currently tarpaulin which unfortunately yields a lot of false negatives. Lines that are in fact covered, marked as uncovered due to a mere linebreak in a statement can cause these artifacts. This leads to lower coverage percentages than there actually is. @@ -102,7 +147,7 @@ Fuzzing is an approach to verify correctness against arbitrary or partially stru Currently implemented fuzzing targets: -* `erasure-coding` +- `erasure-coding` The tooling of choice here is `honggfuzz-rs` as it allows _fastest_ coverage according to "some paper" which is a positive feature when run as part of PRs. @@ -113,16 +158,16 @@ hence simply not feasible due to the amount of state that is required. Other candidates to implement fuzzing are: -* `rpc` -* ... +- `rpc` +- ... ## Performance metrics There are various ways of performance metrics. -* timing with `criterion` -* cache hits/misses w/ `iai` harness or `criterion-perf` -* `coz` a performance based compiler +- timing with `criterion` +- cache hits/misses w/ `iai` harness or `criterion-perf` +- `coz` a performance based compiler Most of them are standard tools to aid in the creation of statistical tests regarding change in time of certain unit tests. @@ -140,10 +185,10 @@ pursued at the current time. Requirements: -* spawn nodes with preconfigured behaviors -* allow multiple types of configuration to be specified -* allow extendability via external crates -* ... +- spawn nodes with preconfigured behaviors +- allow multiple types of configuration to be specified +- allow extendability via external crates +- ... --- @@ -251,5 +296,7 @@ behavior_testcase!{ } ``` +[zombienet]: https://github.com/paritytech/zombienet [Gurke]: https://github.com/paritytech/gurke [simnet]: https://github.com/paritytech/simnet_scripts +[logs]: https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/node/gum/src/lib.rs diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index ccfe7f14eb4647e114eebf5f4272b7a7cae4e664..f174f8ad0cf4b82e2dd48d4d9d602c6a024c4275 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -6,11 +6,14 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] polkadot-primitives = { path = "../primitives" } polkadot-node-primitives = { package = "polkadot-node-primitives", path = "../node/primitives" } novelpoly = { package = "reed-solomon-novelpoly", version = "1.0.0" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["std", "derive"] } +parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "std"] } sp-core = { path = "../../substrate/primitives/core" } sp-trie = { path = "../../substrate/primitives/trie" } thiserror = "1.0.48" diff --git a/polkadot/erasure-coding/fuzzer/Cargo.toml b/polkadot/erasure-coding/fuzzer/Cargo.toml index 862b148cc5b136528af1bce8df5c5fccb903b0d9..4e5ef9d229d82db298f57cd4f853042079b5a1f8 100644 --- a/polkadot/erasure-coding/fuzzer/Cargo.toml +++ b/polkadot/erasure-coding/fuzzer/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] polkadot-erasure-coding = { path = ".." } honggfuzz = "0.5" diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index c1848f47fc69f0dba739fafed79279adbf31cd4b..366c08a6c6705dedf27d212b284b8b848769d0eb 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true description = "Collator-side subsystem that handles incoming candidate submissions from the parachain." +[lints] +workspace = true + [dependencies] futures = "0.3.21" gum = { package = "tracing-gum", path = "../gum" } @@ -15,7 +18,7 @@ polkadot-node-subsystem = { path = "../subsystem" } polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-primitives = { path = "../../primitives" } sp-core = { path = "../../../substrate/primitives/core" } -sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } +sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } thiserror = "1.0.48" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 59a6708f17e4701276a034c923a79933c8f32092..5fbcec50cd3d60bf13de66d3f0e3f8c9713ae196 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true description = "Approval Voting Subsystem of the Polkadot node" +[lints] +workspace = true + [dependencies] futures = "0.3.21" futures-timer = "3.0.2" @@ -13,8 +16,8 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ gum = { package = "tracing-gum", path = "../../gum" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } schnellru = "0.2.1" -merlin = "2.0" -schnorrkel = "0.9.1" +merlin = "3.0" +schnorrkel = "0.11.4" kvdb = "0.13.0" derive_more = "0.99.17" thiserror = "1.0.48" @@ -32,14 +35,14 @@ sp-consensus = { path = "../../../../substrate/primitives/consensus/common", def sp-consensus-slots = { path = "../../../../substrate/primitives/consensus/slots", default-features = false } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto", default-features = false, features = ["full_crypto"] } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -rand_core = "0.5.1" +# should match schnorrkel +rand_core = "0.6.2" rand_chacha = { version = "0.3.1" } rand = "0.8.5" [dev-dependencies] -async-trait = "0.1.57" +async-trait = "0.1.74" parking_lot = "0.12.0" -rand_core = "0.5.1" # should match schnorrkel sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } sp-core = { path = "../../../../substrate/primitives/core" } diff --git a/polkadot/node/core/approval-voting/src/approval_checking.rs b/polkadot/node/core/approval-voting/src/approval_checking.rs index 5d24ff164193de287893b5dc6bcb156f6bcda578..0aa6102fbd6d243b793e889fefb80297a7bb1d89 100644 --- a/polkadot/node/core/approval-voting/src/approval_checking.rs +++ b/polkadot/node/core/approval-voting/src/approval_checking.rs @@ -25,6 +25,15 @@ use crate::{ time::Tick, }; +/// Result of counting the necessary tranches needed for approving a block. +#[derive(Debug, PartialEq, Clone)] +pub struct TranchesToApproveResult { + /// The required tranches for approving this block + pub required_tranches: RequiredTranches, + /// The total number of no_shows at the moment we are doing the counting. + pub total_observed_no_shows: usize, +} + /// The required tranches of assignments needed to determine whether a candidate is approved. #[derive(Debug, PartialEq, Clone)] pub enum RequiredTranches { @@ -64,7 +73,7 @@ pub enum RequiredTranches { } /// The result of a check. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, PartialEq)] pub enum Check { /// The candidate is unapproved. Unapproved, @@ -178,6 +187,7 @@ struct State { next_no_show: Option, /// The last tick at which a considered assignment was received. last_assignment_tick: Option, + total_observed_no_shows: usize, } impl State { @@ -187,41 +197,53 @@ impl State { needed_approvals: usize, n_validators: usize, no_show_duration: Tick, - ) -> RequiredTranches { + ) -> TranchesToApproveResult { let covering = if self.depth == 0 { 0 } else { self.covering }; if self.depth != 0 && self.assignments + covering + self.uncovered >= n_validators { - return RequiredTranches::All + return TranchesToApproveResult { + required_tranches: RequiredTranches::All, + total_observed_no_shows: self.total_observed_no_shows, + } } // If we have enough assignments and all no-shows are covered, we have reached the number // of tranches that we need to have. if self.assignments >= needed_approvals && (covering + self.uncovered) == 0 { - return RequiredTranches::Exact { - needed: tranche, - tolerated_missing: self.covered, - next_no_show: self.next_no_show, - last_assignment_tick: self.last_assignment_tick, + return TranchesToApproveResult { + required_tranches: RequiredTranches::Exact { + needed: tranche, + tolerated_missing: self.covered, + next_no_show: self.next_no_show, + last_assignment_tick: self.last_assignment_tick, + }, + total_observed_no_shows: self.total_observed_no_shows, } } // We're pending more assignments and should look at more tranches. let clock_drift = self.clock_drift(no_show_duration); if self.depth == 0 { - RequiredTranches::Pending { - considered: tranche, - next_no_show: self.next_no_show, - // during the initial assignment-gathering phase, we want to accept assignments - // from any tranche. Note that honest validators will still not broadcast their - // assignment until it is time to do so, regardless of this value. - maximum_broadcast: DelayTranche::max_value(), - clock_drift, + TranchesToApproveResult { + required_tranches: RequiredTranches::Pending { + considered: tranche, + next_no_show: self.next_no_show, + // during the initial assignment-gathering phase, we want to accept assignments + // from any tranche. Note that honest validators will still not broadcast their + // assignment until it is time to do so, regardless of this value. + maximum_broadcast: DelayTranche::max_value(), + clock_drift, + }, + total_observed_no_shows: self.total_observed_no_shows, } } else { - RequiredTranches::Pending { - considered: tranche, - next_no_show: self.next_no_show, - maximum_broadcast: tranche + (covering + self.uncovered) as DelayTranche, - clock_drift, + TranchesToApproveResult { + required_tranches: RequiredTranches::Pending { + considered: tranche, + next_no_show: self.next_no_show, + maximum_broadcast: tranche + (covering + self.uncovered) as DelayTranche, + clock_drift, + }, + total_observed_no_shows: self.total_observed_no_shows, } } } @@ -276,6 +298,7 @@ impl State { uncovered, next_no_show, last_assignment_tick, + total_observed_no_shows: self.total_observed_no_shows + new_no_shows, } } } @@ -372,7 +395,7 @@ pub fn tranches_to_approve( block_tick: Tick, no_show_duration: Tick, needed_approvals: usize, -) -> RequiredTranches { +) -> TranchesToApproveResult { let tick_now = tranche_now as Tick + block_tick; let n_validators = approval_entry.n_validators(); @@ -384,6 +407,7 @@ pub fn tranches_to_approve( uncovered: 0, next_no_show: None, last_assignment_tick: None, + total_observed_no_shows: 0, }; // The `ApprovalEntry` doesn't have any data for empty tranches. We still want to iterate over @@ -434,7 +458,7 @@ pub fn tranches_to_approve( let s = s.advance(n_assignments, no_shows, next_no_show, last_assignment_tick); let output = s.output(tranche, needed_approvals, n_validators, no_show_duration); - *state = match output { + *state = match output.required_tranches { RequiredTranches::Exact { .. } | RequiredTranches::All => { // Wipe the state clean so the next iteration of this closure will terminate // the iterator. This guarantees that we can call `last` further down to see @@ -464,15 +488,17 @@ mod tests { #[test] fn pending_is_not_approved() { - let candidate = approval_db::v1::CandidateEntry { - candidate: dummy_candidate_receipt(dummy_hash()), - session: 0, - block_assignments: BTreeMap::default(), - approvals: BitVec::default(), - } - .into(); + let candidate = CandidateEntry::from_v1( + approval_db::v1::CandidateEntry { + candidate: dummy_candidate_receipt(dummy_hash()), + session: 0, + block_assignments: BTreeMap::default(), + approvals: BitVec::default(), + }, + 0, + ); - let approval_entry = approval_db::v2::ApprovalEntry { + let approval_entry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: BitVec::default(), our_assignment: None, @@ -497,29 +523,31 @@ mod tests { #[test] fn exact_takes_only_assignments_up_to() { - let mut candidate: CandidateEntry = approval_db::v1::CandidateEntry { - candidate: dummy_candidate_receipt(dummy_hash()), - session: 0, - block_assignments: BTreeMap::default(), - approvals: bitvec![u8, BitOrderLsb0; 0; 10], - } - .into(); + let mut candidate: CandidateEntry = CandidateEntry::from_v1( + approval_db::v1::CandidateEntry { + candidate: dummy_candidate_receipt(dummy_hash()), + session: 0, + block_assignments: BTreeMap::default(), + approvals: bitvec![u8, BitOrderLsb0; 0; 10], + }, + 0, + ); for i in 0..3 { candidate.mark_approval(ValidatorIndex(i)); } - let approval_entry = approval_db::v2::ApprovalEntry { + let approval_entry = approval_db::v3::ApprovalEntry { tranches: vec![ - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 0, assignments: (0..2).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 1, assignments: (2..5).map(|i| (ValidatorIndex(i), 1.into())).collect(), }, - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 2, assignments: (5..10).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, @@ -569,29 +597,31 @@ mod tests { #[test] fn one_honest_node_always_approves() { - let mut candidate: CandidateEntry = approval_db::v1::CandidateEntry { - candidate: dummy_candidate_receipt(dummy_hash()), - session: 0, - block_assignments: BTreeMap::default(), - approvals: bitvec![u8, BitOrderLsb0; 0; 10], - } - .into(); + let mut candidate: CandidateEntry = CandidateEntry::from_v1( + approval_db::v1::CandidateEntry { + candidate: dummy_candidate_receipt(dummy_hash()), + session: 0, + block_assignments: BTreeMap::default(), + approvals: bitvec![u8, BitOrderLsb0; 0; 10], + }, + 0, + ); for i in 0..3 { candidate.mark_approval(ValidatorIndex(i)); } - let approval_entry = approval_db::v2::ApprovalEntry { + let approval_entry = approval_db::v3::ApprovalEntry { tranches: vec![ - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 0, assignments: (0..4).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 1, assignments: (4..6).map(|i| (ValidatorIndex(i), 1.into())).collect(), }, - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 2, assignments: (6..10).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, @@ -647,7 +677,7 @@ mod tests { let no_show_duration = 10; let needed_approvals = 4; - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; 5], our_assignment: None, @@ -675,7 +705,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Exact { needed: 1, tolerated_missing: 0, @@ -691,7 +722,7 @@ mod tests { let no_show_duration = 10; let needed_approvals = 4; - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; 10], our_assignment: None, @@ -715,7 +746,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 2, next_no_show: Some(block_tick + no_show_duration), @@ -731,7 +763,7 @@ mod tests { let no_show_duration = 10; let needed_approvals = 4; - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; 10], our_assignment: None, @@ -759,7 +791,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 11, next_no_show: None, @@ -776,7 +809,7 @@ mod tests { let needed_approvals = 4; let n_validators = 8; - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, @@ -807,7 +840,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 1, next_no_show: None, @@ -826,7 +860,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 1, next_no_show: None, @@ -843,7 +878,7 @@ mod tests { let needed_approvals = 4; let n_validators = 8; - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, @@ -879,7 +914,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Exact { needed: 1, tolerated_missing: 0, @@ -898,7 +934,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Exact { needed: 2, tolerated_missing: 1, @@ -917,7 +954,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 2, next_no_show: None, @@ -934,7 +972,7 @@ mod tests { let needed_approvals = 4; let n_validators = 8; - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, @@ -970,7 +1008,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Exact { needed: 2, tolerated_missing: 1, @@ -992,7 +1031,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 2, next_no_show: None, @@ -1013,7 +1053,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Exact { needed: 3, tolerated_missing: 2, @@ -1029,22 +1070,24 @@ mod tests { let no_show_duration = 10; let needed_approvals = 3; - let mut candidate: CandidateEntry = approval_db::v1::CandidateEntry { - candidate: dummy_candidate_receipt(dummy_hash()), - session: 0, - block_assignments: BTreeMap::default(), - approvals: bitvec![u8, BitOrderLsb0; 0; 3], - } - .into(); + let mut candidate: CandidateEntry = CandidateEntry::from_v1( + approval_db::v1::CandidateEntry { + candidate: dummy_candidate_receipt(dummy_hash()), + session: 0, + block_assignments: BTreeMap::default(), + approvals: bitvec![u8, BitOrderLsb0; 0; 3], + }, + 0, + ); for i in 0..3 { candidate.mark_approval(ValidatorIndex(i)); } - let approval_entry = approval_db::v2::ApprovalEntry { + let approval_entry = approval_db::v3::ApprovalEntry { tranches: vec![ // Assignments with invalid validator indexes. - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 1, assignments: (2..5).map(|i| (ValidatorIndex(i), 1.into())).collect(), }, @@ -1068,7 +1111,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 10, next_no_show: None, @@ -1094,7 +1138,7 @@ mod tests { ]; for test_tranche in test_tranches { - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), backing_group: GroupIndex(0), our_assignment: None, @@ -1345,10 +1389,11 @@ mod tests { uncovered: 0, next_no_show: None, last_assignment_tick: None, + total_observed_no_shows: 0, }; assert_eq!( - state.output(0, 10, 10, 20), + state.output(0, 10, 10, 20).required_tranches, RequiredTranches::Pending { considered: 0, next_no_show: None, @@ -1368,10 +1413,11 @@ mod tests { uncovered: 0, next_no_show: None, last_assignment_tick: None, + total_observed_no_shows: 0, }; assert_eq!( - state.output(0, 10, 10, 20), + state.output(0, 10, 10, 20).required_tranches, RequiredTranches::Exact { needed: 0, tolerated_missing: 0, diff --git a/polkadot/node/core/approval-voting/src/approval_db/common/migration_helpers.rs b/polkadot/node/core/approval-voting/src/approval_db/common/migration_helpers.rs new file mode 100644 index 0000000000000000000000000000000000000000..747bbdb2064ef1467c92993ccb825dece0dfb262 --- /dev/null +++ b/polkadot/node/core/approval-voting/src/approval_db/common/migration_helpers.rs @@ -0,0 +1,40 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; + +use polkadot_node_primitives::approval::{ + v1::{AssignmentCert, AssignmentCertKind, VrfProof, VrfSignature, RELAY_VRF_MODULO_CONTEXT}, + v2::VrfPreOutput, +}; + +pub fn make_bitvec(len: usize) -> BitVec { + bitvec::bitvec![u8, BitOrderLsb0; 0; len] +} + +pub fn dummy_assignment_cert(kind: AssignmentCertKind) -> AssignmentCert { + let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); + let msg = b"test-garbage"; + let mut prng = rand_core::OsRng; + let keypair = schnorrkel::Keypair::generate_with(&mut prng); + let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); + let preout = inout.to_preout(); + + AssignmentCert { + kind, + vrf: VrfSignature { pre_output: VrfPreOutput(preout), proof: VrfProof(proof) }, + } +} diff --git a/polkadot/node/core/approval-voting/src/approval_db/common/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/common/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..249dcf912df50530e87f732edf2848664df1136e --- /dev/null +++ b/polkadot/node/core/approval-voting/src/approval_db/common/mod.rs @@ -0,0 +1,293 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Common helper functions for all versions of approval-voting database. +use std::sync::Arc; + +use parity_scale_codec::{Decode, Encode}; +use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; +use polkadot_node_subsystem_util::database::{DBTransaction, Database}; +use polkadot_primitives::{BlockNumber, CandidateHash, CandidateIndex, Hash}; + +use crate::{ + backend::{Backend, BackendWriteOp, V1ReadBackend, V2ReadBackend}, + persisted_entries, +}; + +use super::{ + v2::{load_block_entry_v1, load_candidate_entry_v1}, + v3::{load_block_entry_v2, load_candidate_entry_v2, BlockEntry, CandidateEntry}, +}; + +pub mod migration_helpers; + +const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks"; + +/// A range from earliest..last block number stored within the DB. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct StoredBlockRange(pub BlockNumber, pub BlockNumber); +/// The database config. +#[derive(Debug, Clone, Copy)] +pub struct Config { + /// The column family in the database where data is stored. + pub col_approval_data: u32, +} + +/// `DbBackend` is a concrete implementation of the higher-level Backend trait +pub struct DbBackend { + inner: Arc, + config: Config, +} + +impl DbBackend { + /// Create a new [`DbBackend`] with the supplied key-value store and + /// config. + pub fn new(db: Arc, config: Config) -> Self { + DbBackend { inner: db, config } + } +} + +/// Errors while accessing things from the DB. +#[derive(Debug, derive_more::From, derive_more::Display)] +pub enum Error { + Io(std::io::Error), + InvalidDecoding(parity_scale_codec::Error), + InternalError(SubsystemError), +} + +impl std::error::Error for Error {} + +/// Result alias for DB errors. +pub type Result = std::result::Result; + +impl Backend for DbBackend { + fn load_block_entry( + &self, + block_hash: &Hash, + ) -> SubsystemResult> { + load_block_entry(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) + } + + fn load_candidate_entry( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult> { + load_candidate_entry(&*self.inner, &self.config, candidate_hash).map(|e| e.map(Into::into)) + } + + fn load_blocks_at_height(&self, block_height: &BlockNumber) -> SubsystemResult> { + load_blocks_at_height(&*self.inner, &self.config, block_height) + } + + fn load_all_blocks(&self) -> SubsystemResult> { + load_all_blocks(&*self.inner, &self.config) + } + + fn load_stored_blocks(&self) -> SubsystemResult> { + load_stored_blocks(&*self.inner, &self.config) + } + + /// Atomically write the list of operations, with later operations taking precedence over prior. + fn write(&mut self, ops: I) -> SubsystemResult<()> + where + I: IntoIterator, + { + let mut tx = DBTransaction::new(); + for op in ops { + match op { + BackendWriteOp::WriteStoredBlockRange(stored_block_range) => { + tx.put_vec( + self.config.col_approval_data, + &STORED_BLOCKS_KEY, + stored_block_range.encode(), + ); + }, + BackendWriteOp::DeleteStoredBlockRange => { + tx.delete(self.config.col_approval_data, &STORED_BLOCKS_KEY); + }, + BackendWriteOp::WriteBlocksAtHeight(h, blocks) => { + tx.put_vec( + self.config.col_approval_data, + &blocks_at_height_key(h), + blocks.encode(), + ); + }, + BackendWriteOp::DeleteBlocksAtHeight(h) => { + tx.delete(self.config.col_approval_data, &blocks_at_height_key(h)); + }, + BackendWriteOp::WriteBlockEntry(block_entry) => { + let block_entry: BlockEntry = block_entry.into(); + tx.put_vec( + self.config.col_approval_data, + &block_entry_key(&block_entry.block_hash), + block_entry.encode(), + ); + }, + BackendWriteOp::DeleteBlockEntry(hash) => { + tx.delete(self.config.col_approval_data, &block_entry_key(&hash)); + }, + BackendWriteOp::WriteCandidateEntry(candidate_entry) => { + let candidate_entry: CandidateEntry = candidate_entry.into(); + tx.put_vec( + self.config.col_approval_data, + &candidate_entry_key(&candidate_entry.candidate.hash()), + candidate_entry.encode(), + ); + }, + BackendWriteOp::DeleteCandidateEntry(candidate_hash) => { + tx.delete(self.config.col_approval_data, &candidate_entry_key(&candidate_hash)); + }, + } + } + + self.inner.write(tx).map_err(|e| e.into()) + } +} + +impl V1ReadBackend for DbBackend { + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + candidate_index: CandidateIndex, + ) -> SubsystemResult> { + load_candidate_entry_v1(&*self.inner, &self.config, candidate_hash) + .map(|e| e.map(|e| persisted_entries::CandidateEntry::from_v1(e, candidate_index))) + } + + fn load_block_entry_v1( + &self, + block_hash: &Hash, + ) -> SubsystemResult> { + load_block_entry_v1(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) + } +} + +impl V2ReadBackend for DbBackend { + fn load_candidate_entry_v2( + &self, + candidate_hash: &CandidateHash, + candidate_index: CandidateIndex, + ) -> SubsystemResult> { + load_candidate_entry_v2(&*self.inner, &self.config, candidate_hash) + .map(|e| e.map(|e| persisted_entries::CandidateEntry::from_v2(e, candidate_index))) + } + + fn load_block_entry_v2( + &self, + block_hash: &Hash, + ) -> SubsystemResult> { + load_block_entry_v2(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) + } +} + +pub(crate) fn load_decode( + store: &dyn Database, + col_approval_data: u32, + key: &[u8], +) -> Result> { + match store.get(col_approval_data, key)? { + None => Ok(None), + Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into), + } +} + +/// The key a given block entry is stored under. +pub(crate) fn block_entry_key(block_hash: &Hash) -> [u8; 46] { + const BLOCK_ENTRY_PREFIX: [u8; 14] = *b"Approvals_blck"; + + let mut key = [0u8; 14 + 32]; + key[0..14].copy_from_slice(&BLOCK_ENTRY_PREFIX); + key[14..][..32].copy_from_slice(block_hash.as_ref()); + + key +} + +/// The key a given candidate entry is stored under. +pub(crate) fn candidate_entry_key(candidate_hash: &CandidateHash) -> [u8; 46] { + const CANDIDATE_ENTRY_PREFIX: [u8; 14] = *b"Approvals_cand"; + + let mut key = [0u8; 14 + 32]; + key[0..14].copy_from_slice(&CANDIDATE_ENTRY_PREFIX); + key[14..][..32].copy_from_slice(candidate_hash.0.as_ref()); + + key +} + +/// The key a set of block hashes corresponding to a block number is stored under. +pub(crate) fn blocks_at_height_key(block_number: BlockNumber) -> [u8; 16] { + const BLOCKS_AT_HEIGHT_PREFIX: [u8; 12] = *b"Approvals_at"; + + let mut key = [0u8; 12 + 4]; + key[0..12].copy_from_slice(&BLOCKS_AT_HEIGHT_PREFIX); + block_number.using_encoded(|s| key[12..16].copy_from_slice(s)); + + key +} + +/// Return all blocks which have entries in the DB, ascending, by height. +pub fn load_all_blocks(store: &dyn Database, config: &Config) -> SubsystemResult> { + let mut hashes = Vec::new(); + if let Some(stored_blocks) = load_stored_blocks(store, config)? { + for height in stored_blocks.0..stored_blocks.1 { + let blocks = load_blocks_at_height(store, config, &height)?; + hashes.extend(blocks); + } + } + + Ok(hashes) +} + +/// Load the stored-blocks key from the state. +pub fn load_stored_blocks( + store: &dyn Database, + config: &Config, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, STORED_BLOCKS_KEY) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a blocks-at-height entry for a given block number. +pub fn load_blocks_at_height( + store: &dyn Database, + config: &Config, + block_number: &BlockNumber, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &blocks_at_height_key(*block_number)) + .map(|x| x.unwrap_or_default()) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a block entry from the aux store. +pub fn load_block_entry( + store: &dyn Database, + config: &Config, + block_hash: &Hash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &block_entry_key(block_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a candidate entry from the aux store in current version format. +pub fn load_candidate_entry( + store: &dyn Database, + config: &Config, + candidate_hash: &CandidateHash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} diff --git a/polkadot/node/core/approval-voting/src/approval_db/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/mod.rs index 20fb6aa82d8d902885e974d59990b29bf683f55d..78942a507f4b0fa334882d893168794443966932 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/mod.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/mod.rs @@ -30,5 +30,7 @@ //! In the future, we may use a temporary DB which doesn't need to be wiped, but for the //! time being we share the same DB with the rest of Substrate. +pub mod common; pub mod v1; pub mod v2; +pub mod v3; diff --git a/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs index 07d8242b772ea9e107edf10a83cf1acd697d031f..b979cb7ef45f6bf6af02acd8fde1d04e6bee8206 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs @@ -40,10 +40,6 @@ fn make_db() -> (DbBackend, Arc) { (DbBackend::new(db_writer.clone(), TEST_CONFIG), db_writer) } -fn make_bitvec(len: usize) -> BitVec { - bitvec::bitvec![u8, BitOrderLsb0; 0; len] -} - fn make_block_entry( block_hash: Hash, parent_hash: Hash, diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs index 74e997c7af8422d6075a4b67747aa66013dd1ed4..df6e4754dbd63ba81e736495297bfaf017a4e3dc 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs @@ -16,25 +16,19 @@ //! Approval DB migration helpers. use super::*; -use crate::backend::Backend; -use polkadot_node_primitives::approval::v1::{ - AssignmentCert, AssignmentCertKind, VrfOutput, VrfProof, VrfSignature, RELAY_VRF_MODULO_CONTEXT, +use crate::{ + approval_db::common::{ + migration_helpers::{dummy_assignment_cert, make_bitvec}, + Error, Result, StoredBlockRange, + }, + backend::Backend, }; + +use polkadot_node_primitives::approval::v1::AssignmentCertKind; use polkadot_node_subsystem_util::database::Database; use sp_application_crypto::sp_core::H256; use std::{collections::HashSet, sync::Arc}; -fn dummy_assignment_cert(kind: AssignmentCertKind) -> AssignmentCert { - let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); - let msg = b"test-garbage"; - let mut prng = rand_core::OsRng; - let keypair = schnorrkel::Keypair::generate_with(&mut prng); - let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); - let out = inout.to_output(); - - AssignmentCert { kind, vrf: VrfSignature { output: VrfOutput(out), proof: VrfProof(proof) } } -} - fn make_block_entry_v1( block_hash: Hash, parent_hash: Hash, @@ -54,14 +48,10 @@ fn make_block_entry_v1( } } -fn make_bitvec(len: usize) -> BitVec { - bitvec::bitvec![u8, BitOrderLsb0; 0; len] -} - /// Migrates `OurAssignment`, `CandidateEntry` and `ApprovalEntry` to version 2. /// Returns on any error. /// Must only be used in parachains DB migration code - `polkadot-service` crate. -pub fn v1_to_v2(db: Arc, config: Config) -> Result<()> { +pub fn v1_to_latest(db: Arc, config: Config) -> Result<()> { let mut backend = crate::DbBackend::new(db, config); let all_blocks = backend .load_all_blocks() @@ -85,11 +75,13 @@ pub fn v1_to_v2(db: Arc, config: Config) -> Result<()> { let mut counter = 0; // Get all candidate entries, approval entries and convert each of them. for block in all_blocks { - for (_core_index, candidate_hash) in block.candidates() { + for (candidate_index, (_core_index, candidate_hash)) in + block.candidates().iter().enumerate() + { // Loading the candidate will also perform the conversion to the updated format and // return that represantation. if let Some(candidate_entry) = backend - .load_candidate_entry_v1(&candidate_hash) + .load_candidate_entry_v1(&candidate_hash, candidate_index as CandidateIndex) .map_err(|e| Error::InternalError(e))? { // Write the updated representation. @@ -109,42 +101,8 @@ pub fn v1_to_v2(db: Arc, config: Config) -> Result<()> { Ok(()) } -// Checks if the migration doesn't leave the DB in an unsane state. -// This function is to be used in tests. -pub fn v1_to_v2_sanity_check( - db: Arc, - config: Config, - expected_candidates: HashSet, -) -> Result<()> { - let backend = crate::DbBackend::new(db, config); - - let all_blocks = backend - .load_all_blocks() - .unwrap() - .iter() - .map(|block_hash| backend.load_block_entry(block_hash).unwrap().unwrap()) - .collect::>(); - - let mut candidates = HashSet::new(); - - // Iterate all blocks and approval entries. - for block in all_blocks { - for (_core_index, candidate_hash) in block.candidates() { - // Loading the candidate will also perform the conversion to the updated format and - // return that represantation. - if let Some(candidate_entry) = backend.load_candidate_entry(&candidate_hash).unwrap() { - candidates.insert(candidate_entry.candidate.hash()); - } - } - } - - assert_eq!(candidates, expected_candidates); - - Ok(()) -} - // Fills the db with dummy data in v1 scheme. -pub fn v1_to_v2_fill_test_data( +pub fn v1_fill_test_data( db: Arc, config: Config, dummy_candidate_create: F, diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs index 66df6ee8f653a992f09428a9c0b2a418aef2209d..da42fc5be485caabdd8f8428bdf15c9bf6eb08e9 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs @@ -21,145 +21,23 @@ use polkadot_node_primitives::approval::{v1::DelayTranche, v2::AssignmentCertV2} use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, - ValidatorIndex, ValidatorSignature, + BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, GroupIndex, Hash, + SessionIndex, ValidatorIndex, ValidatorSignature, }; use sp_consensus_slots::Slot; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; -use crate::{ - backend::{Backend, BackendWriteOp, V1ReadBackend}, - persisted_entries, -}; +use crate::backend::V1ReadBackend; -const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks"; +use super::common::{block_entry_key, candidate_entry_key, load_decode, Config}; pub mod migration_helpers; #[cfg(test)] pub mod tests; -/// `DbBackend` is a concrete implementation of the higher-level Backend trait -pub struct DbBackend { - inner: Arc, - config: Config, -} - -impl DbBackend { - /// Create a new [`DbBackend`] with the supplied key-value store and - /// config. - pub fn new(db: Arc, config: Config) -> Self { - DbBackend { inner: db, config } - } -} - -impl V1ReadBackend for DbBackend { - fn load_candidate_entry_v1( - &self, - candidate_hash: &CandidateHash, - ) -> SubsystemResult> { - load_candidate_entry_v1(&*self.inner, &self.config, candidate_hash) - .map(|e| e.map(Into::into)) - } - - fn load_block_entry_v1( - &self, - block_hash: &Hash, - ) -> SubsystemResult> { - load_block_entry_v1(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) - } -} - -impl Backend for DbBackend { - fn load_block_entry( - &self, - block_hash: &Hash, - ) -> SubsystemResult> { - load_block_entry(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) - } - - fn load_candidate_entry( - &self, - candidate_hash: &CandidateHash, - ) -> SubsystemResult> { - load_candidate_entry(&*self.inner, &self.config, candidate_hash).map(|e| e.map(Into::into)) - } - - fn load_blocks_at_height(&self, block_height: &BlockNumber) -> SubsystemResult> { - load_blocks_at_height(&*self.inner, &self.config, block_height) - } - - fn load_all_blocks(&self) -> SubsystemResult> { - load_all_blocks(&*self.inner, &self.config) - } - - fn load_stored_blocks(&self) -> SubsystemResult> { - load_stored_blocks(&*self.inner, &self.config) - } - - /// Atomically write the list of operations, with later operations taking precedence over prior. - fn write(&mut self, ops: I) -> SubsystemResult<()> - where - I: IntoIterator, - { - let mut tx = DBTransaction::new(); - for op in ops { - match op { - BackendWriteOp::WriteStoredBlockRange(stored_block_range) => { - tx.put_vec( - self.config.col_approval_data, - &STORED_BLOCKS_KEY, - stored_block_range.encode(), - ); - }, - BackendWriteOp::DeleteStoredBlockRange => { - tx.delete(self.config.col_approval_data, &STORED_BLOCKS_KEY); - }, - BackendWriteOp::WriteBlocksAtHeight(h, blocks) => { - tx.put_vec( - self.config.col_approval_data, - &blocks_at_height_key(h), - blocks.encode(), - ); - }, - BackendWriteOp::DeleteBlocksAtHeight(h) => { - tx.delete(self.config.col_approval_data, &blocks_at_height_key(h)); - }, - BackendWriteOp::WriteBlockEntry(block_entry) => { - let block_entry: BlockEntry = block_entry.into(); - tx.put_vec( - self.config.col_approval_data, - &block_entry_key(&block_entry.block_hash), - block_entry.encode(), - ); - }, - BackendWriteOp::DeleteBlockEntry(hash) => { - tx.delete(self.config.col_approval_data, &block_entry_key(&hash)); - }, - BackendWriteOp::WriteCandidateEntry(candidate_entry) => { - let candidate_entry: CandidateEntry = candidate_entry.into(); - tx.put_vec( - self.config.col_approval_data, - &candidate_entry_key(&candidate_entry.candidate.hash()), - candidate_entry.encode(), - ); - }, - BackendWriteOp::DeleteCandidateEntry(candidate_hash) => { - tx.delete(self.config.col_approval_data, &candidate_entry_key(&candidate_hash)); - }, - } - } - - self.inner.write(tx).map_err(|e| e.into()) - } -} - -/// A range from earliest..last block number stored within the DB. -#[derive(Encode, Decode, Debug, Clone, PartialEq)] -pub struct StoredBlockRange(pub BlockNumber, pub BlockNumber); - // slot_duration * 2 + DelayTranche gives the number of delay tranches since the // unix epoch. #[derive(Encode, Decode, Clone, Copy, Debug, PartialEq)] @@ -168,13 +46,6 @@ pub struct Tick(u64); /// Convenience type definition pub type Bitfield = BitVec; -/// The database config. -#[derive(Debug, Clone, Copy)] -pub struct Config { - /// The column family in the database where data is stored. - pub col_approval_data: u32, -} - /// Details pertaining to our assignment on a block. #[derive(Encode, Decode, Debug, Clone, PartialEq)] pub struct OurAssignment { @@ -259,118 +130,6 @@ impl From for crate::Tick { } } -/// Errors while accessing things from the DB. -#[derive(Debug, derive_more::From, derive_more::Display)] -pub enum Error { - Io(std::io::Error), - InvalidDecoding(parity_scale_codec::Error), - InternalError(SubsystemError), -} - -impl std::error::Error for Error {} - -/// Result alias for DB errors. -pub type Result = std::result::Result; - -pub(crate) fn load_decode( - store: &dyn Database, - col_approval_data: u32, - key: &[u8], -) -> Result> { - match store.get(col_approval_data, key)? { - None => Ok(None), - Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into), - } -} - -/// The key a given block entry is stored under. -pub(crate) fn block_entry_key(block_hash: &Hash) -> [u8; 46] { - const BLOCK_ENTRY_PREFIX: [u8; 14] = *b"Approvals_blck"; - - let mut key = [0u8; 14 + 32]; - key[0..14].copy_from_slice(&BLOCK_ENTRY_PREFIX); - key[14..][..32].copy_from_slice(block_hash.as_ref()); - - key -} - -/// The key a given candidate entry is stored under. -pub(crate) fn candidate_entry_key(candidate_hash: &CandidateHash) -> [u8; 46] { - const CANDIDATE_ENTRY_PREFIX: [u8; 14] = *b"Approvals_cand"; - - let mut key = [0u8; 14 + 32]; - key[0..14].copy_from_slice(&CANDIDATE_ENTRY_PREFIX); - key[14..][..32].copy_from_slice(candidate_hash.0.as_ref()); - - key -} - -/// The key a set of block hashes corresponding to a block number is stored under. -pub(crate) fn blocks_at_height_key(block_number: BlockNumber) -> [u8; 16] { - const BLOCKS_AT_HEIGHT_PREFIX: [u8; 12] = *b"Approvals_at"; - - let mut key = [0u8; 12 + 4]; - key[0..12].copy_from_slice(&BLOCKS_AT_HEIGHT_PREFIX); - block_number.using_encoded(|s| key[12..16].copy_from_slice(s)); - - key -} - -/// Return all blocks which have entries in the DB, ascending, by height. -pub fn load_all_blocks(store: &dyn Database, config: &Config) -> SubsystemResult> { - let mut hashes = Vec::new(); - if let Some(stored_blocks) = load_stored_blocks(store, config)? { - for height in stored_blocks.0..stored_blocks.1 { - let blocks = load_blocks_at_height(store, config, &height)?; - hashes.extend(blocks); - } - } - - Ok(hashes) -} - -/// Load the stored-blocks key from the state. -pub fn load_stored_blocks( - store: &dyn Database, - config: &Config, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, STORED_BLOCKS_KEY) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - -/// Load a blocks-at-height entry for a given block number. -pub fn load_blocks_at_height( - store: &dyn Database, - config: &Config, - block_number: &BlockNumber, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, &blocks_at_height_key(*block_number)) - .map(|x| x.unwrap_or_default()) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - -/// Load a block entry from the aux store. -pub fn load_block_entry( - store: &dyn Database, - config: &Config, - block_hash: &Hash, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, &block_entry_key(block_hash)) - .map(|u: Option| u.map(|v| v.into())) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - -/// Load a candidate entry from the aux store in current version format. -pub fn load_candidate_entry( - store: &dyn Database, - config: &Config, - candidate_hash: &CandidateHash, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash)) - .map(|u: Option| u.map(|v| v.into())) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - /// Load a candidate entry from the aux store in v1 format. pub fn load_candidate_entry_v1( store: &dyn Database, diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs index 50a5a924ca8dba696e1f3e2d0465a91e837194ec..6021b44c2765ff12a03e1ad85bfb8117c9fcfb03 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs @@ -16,13 +16,22 @@ //! Tests for the aux-schema of approval voting. -use super::{DbBackend, StoredBlockRange, *}; use crate::{ + approval_db::{ + common::{migration_helpers::make_bitvec, DbBackend, StoredBlockRange, *}, + v2::*, + v3::{load_block_entry_v2, load_candidate_entry_v2}, + }, backend::{Backend, OverlayedBackend}, ops::{add_block_entry, canonicalize, force_approve, NewCandidateInfo}, }; +use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, +}; + use polkadot_node_subsystem_util::database::Database; use polkadot_primitives::Id as ParaId; +use sp_consensus_slots::Slot; use std::{collections::HashMap, sync::Arc}; use ::test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash}; @@ -60,10 +69,6 @@ fn make_block_entry( } } -fn make_bitvec(len: usize) -> BitVec { - bitvec::bitvec![u8, BitOrderLsb0; 0; len] -} - fn make_candidate(para_id: ParaId, relay_parent: Hash) -> CandidateReceipt { let mut c = dummy_candidate_receipt(dummy_hash()); @@ -110,7 +115,10 @@ fn read_write() { overlay_db.write_stored_block_range(range.clone()); overlay_db.write_blocks_at_height(1, at_height.clone()); overlay_db.write_block_entry(block_entry.clone().into()); - overlay_db.write_candidate_entry(candidate_entry.clone().into()); + overlay_db.write_candidate_entry(crate::persisted_entries::CandidateEntry::from_v2( + candidate_entry.clone(), + 0, + )); let write_ops = overlay_db.into_write_ops(); db.write(write_ops).unwrap(); @@ -118,11 +126,11 @@ fn read_write() { assert_eq!(load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap(), Some(range)); assert_eq!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap(), at_height); assert_eq!( - load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap(), + load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap(), Some(block_entry.into()) ); assert_eq!( - load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash).unwrap(), + load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &candidate_hash).unwrap(), Some(candidate_entry.into()), ); @@ -134,8 +142,8 @@ fn read_write() { db.write(write_ops).unwrap(); assert!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap().is_empty()); - assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap().is_none()); - assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash) + assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap().is_none()); + assert!(load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &candidate_hash) .unwrap() .is_none()); } @@ -196,25 +204,27 @@ fn add_block_entry_works() { db.write(write_ops).unwrap(); assert_eq!( - load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), + load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), Some(block_entry_a.into()) ); assert_eq!( - load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), + load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), Some(block_entry_b.into()) ); - let candidate_entry_a = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_a) - .unwrap() - .unwrap(); + let candidate_entry_a = + load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &candidate_hash_a) + .unwrap() + .unwrap(); assert_eq!( candidate_entry_a.block_assignments.keys().collect::>(), vec![&block_hash_a, &block_hash_b] ); - let candidate_entry_b = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_b) - .unwrap() - .unwrap(); + let candidate_entry_b = + load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &candidate_hash_b) + .unwrap() + .unwrap(); assert_eq!(candidate_entry_b.block_assignments.keys().collect::>(), vec![&block_hash_b]); } @@ -243,11 +253,11 @@ fn add_block_entry_adds_child() { block_entry_a.children.push(block_hash_b); assert_eq!( - load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), + load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), Some(block_entry_a.into()) ); assert_eq!( - load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), + load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), Some(block_entry_b.into()) ); } @@ -365,13 +375,15 @@ fn canonicalize_works() { for (c_hash, in_blocks) in expected { let (entry, in_blocks) = match in_blocks { None => { - assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash) + assert!(load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &c_hash) .unwrap() .is_none()); continue }, Some(i) => ( - load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash).unwrap().unwrap(), + load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &c_hash) + .unwrap() + .unwrap(), i, ), }; @@ -388,13 +400,13 @@ fn canonicalize_works() { for (hash, with_candidates) in expected { let (entry, with_candidates) = match with_candidates { None => { - assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash) + assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &hash) .unwrap() .is_none()); continue }, Some(i) => - (load_block_entry(store.as_ref(), &TEST_CONFIG, &hash).unwrap().unwrap(), i), + (load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &hash).unwrap().unwrap(), i), }; assert_eq!(entry.candidates.len(), with_candidates.len()); @@ -510,22 +522,22 @@ fn force_approve_works() { let write_ops = overlay_db.into_write_ops(); db.write(write_ops).unwrap(); - assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a,) + assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_a,) .unwrap() .unwrap() .approved_bitfield .all()); - assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b,) + assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_b,) .unwrap() .unwrap() .approved_bitfield .all()); - assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_c,) + assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_c,) .unwrap() .unwrap() .approved_bitfield .not_any()); - assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_d,) + assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_d,) .unwrap() .unwrap() .approved_bitfield diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs new file mode 100644 index 0000000000000000000000000000000000000000..ad5e89ef3de84035ff3e9c79533edfc07bf8d4c5 --- /dev/null +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs @@ -0,0 +1,237 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Approval DB migration helpers. +use super::*; +use crate::{ + approval_db::common::{ + block_entry_key, candidate_entry_key, + migration_helpers::{dummy_assignment_cert, make_bitvec}, + Config, Error, Result, StoredBlockRange, + }, + backend::{Backend, V2ReadBackend}, +}; +use polkadot_node_primitives::approval::v1::AssignmentCertKind; +use polkadot_node_subsystem_util::database::Database; +use sp_application_crypto::sp_core::H256; +use std::{collections::HashSet, sync::Arc}; + +/// Migrates `BlockEntry`, `CandidateEntry`, `ApprovalEntry` and `OurApproval` to version 3. +/// Returns on any error. +/// Must only be used in parachains DB migration code - `polkadot-service` crate. +pub fn v2_to_latest(db: Arc, config: Config) -> Result<()> { + let mut backend = crate::DbBackend::new(db, config); + let all_blocks = backend + .load_all_blocks() + .map_err(|e| Error::InternalError(e))? + .iter() + .filter_map(|block_hash| { + backend + .load_block_entry_v2(block_hash) + .map_err(|e| Error::InternalError(e)) + .ok()? + }) + .collect::>(); + + gum::info!( + target: crate::LOG_TARGET, + "Migrating candidate entries on top of {} blocks", + all_blocks.len() + ); + + let mut overlay = crate::OverlayedBackend::new(&backend); + let mut counter = 0; + // Get all candidate entries, approval entries and convert each of them. + for block in all_blocks { + for (candidate_index, (_core_index, candidate_hash)) in + block.candidates().iter().enumerate() + { + // Loading the candidate will also perform the conversion to the updated format and + // return that represantation. + if let Some(candidate_entry) = backend + .load_candidate_entry_v2(&candidate_hash, candidate_index as CandidateIndex) + .map_err(|e| Error::InternalError(e))? + { + // Write the updated representation. + overlay.write_candidate_entry(candidate_entry); + counter += 1; + } + } + overlay.write_block_entry(block); + } + + gum::info!(target: crate::LOG_TARGET, "Migrated {} entries", counter); + + // Commit all changes to DB. + let write_ops = overlay.into_write_ops(); + backend.write(write_ops).unwrap(); + + Ok(()) +} + +// Checks if the migration doesn't leave the DB in an unsane state. +// This function is to be used in tests. +pub fn v1_to_latest_sanity_check( + db: Arc, + config: Config, + expected_candidates: HashSet, +) -> Result<()> { + let backend = crate::DbBackend::new(db, config); + + let all_blocks = backend + .load_all_blocks() + .unwrap() + .iter() + .map(|block_hash| backend.load_block_entry(block_hash).unwrap().unwrap()) + .collect::>(); + + let mut candidates = HashSet::new(); + + // Iterate all blocks and approval entries. + for block in all_blocks { + for (_core_index, candidate_hash) in block.candidates() { + // Loading the candidate will also perform the conversion to the updated format and + // return that represantation. + if let Some(candidate_entry) = backend.load_candidate_entry(&candidate_hash).unwrap() { + candidates.insert(candidate_entry.candidate.hash()); + } + } + } + + assert_eq!(candidates, expected_candidates); + + Ok(()) +} + +// Fills the db with dummy data in v2 scheme. +pub fn v2_fill_test_data( + db: Arc, + config: Config, + dummy_candidate_create: F, +) -> Result> +where + F: Fn(H256) -> CandidateReceipt, +{ + let mut backend = crate::DbBackend::new(db.clone(), config); + let mut overlay_db = crate::OverlayedBackend::new(&backend); + let mut expected_candidates = HashSet::new(); + + const RELAY_BLOCK_COUNT: u32 = 10; + + let range = StoredBlockRange(1, 11); + overlay_db.write_stored_block_range(range.clone()); + + for relay_number in 1..=RELAY_BLOCK_COUNT { + let relay_hash = Hash::repeat_byte(relay_number as u8); + let assignment_core_index = CoreIndex(relay_number); + let candidate = dummy_candidate_create(relay_hash); + let candidate_hash = candidate.hash(); + + let at_height = vec![relay_hash]; + + let block_entry = make_block_entry_v2( + relay_hash, + Default::default(), + relay_number, + vec![(assignment_core_index, candidate_hash)], + ); + + let dummy_assignment = crate::approval_db::v2::OurAssignment { + cert: dummy_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }).into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + }; + + let candidate_entry = crate::approval_db::v2::CandidateEntry { + candidate, + session: 123, + block_assignments: vec![( + relay_hash, + crate::approval_db::v2::ApprovalEntry { + tranches: Vec::new(), + backing_group: GroupIndex(1), + our_assignment: Some(dummy_assignment), + our_approval_sig: None, + approved: false, + assigned_validators: make_bitvec(1), + }, + )] + .into_iter() + .collect(), + approvals: Default::default(), + }; + + overlay_db.write_blocks_at_height(relay_number, at_height.clone()); + expected_candidates.insert(candidate_entry.candidate.hash()); + + db.write(write_candidate_entry_v2(candidate_entry, config)).unwrap(); + db.write(write_block_entry_v2(block_entry, config)).unwrap(); + } + + let write_ops = overlay_db.into_write_ops(); + backend.write(write_ops).unwrap(); + + Ok(expected_candidates) +} + +fn make_block_entry_v2( + block_hash: Hash, + parent_hash: Hash, + block_number: BlockNumber, + candidates: Vec<(CoreIndex, CandidateHash)>, +) -> crate::approval_db::v2::BlockEntry { + crate::approval_db::v2::BlockEntry { + block_hash, + parent_hash, + block_number, + session: 1, + slot: Slot::from(1), + relay_vrf_story: [0u8; 32], + approved_bitfield: make_bitvec(candidates.len()), + distributed_assignments: make_bitvec(candidates.len()), + candidates, + children: Vec::new(), + } +} + +// Low level DB helper to write a candidate entry in v1 scheme. +fn write_candidate_entry_v2( + candidate_entry: crate::approval_db::v2::CandidateEntry, + config: Config, +) -> DBTransaction { + let mut tx = DBTransaction::new(); + tx.put_vec( + config.col_approval_data, + &candidate_entry_key(&candidate_entry.candidate.hash()), + candidate_entry.encode(), + ); + tx +} + +// Low level DB helper to write a block entry in v1 scheme. +fn write_block_entry_v2( + block_entry: crate::approval_db::v2::BlockEntry, + config: Config, +) -> DBTransaction { + let mut tx = DBTransaction::new(); + tx.put_vec( + config.col_approval_data, + &block_entry_key(&block_entry.block_hash), + block_entry.encode(), + ); + tx +} diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..3e4f4302195256205905d3f931039cbf9631824c --- /dev/null +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs @@ -0,0 +1,137 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Version 3 of the DB schema. +//! +//! Version 3 modifies the `our_approval` format of `ApprovalEntry` +//! and adds a new field `pending_signatures` for `BlockEntry` + +use parity_scale_codec::{Decode, Encode}; +use polkadot_node_primitives::approval::v2::CandidateBitfield; +use polkadot_node_subsystem::SubsystemResult; +use polkadot_node_subsystem_util::database::{DBTransaction, Database}; +use polkadot_overseer::SubsystemError; +use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, GroupIndex, Hash, + SessionIndex, ValidatorIndex, ValidatorSignature, +}; + +use sp_consensus_slots::Slot; + +use std::collections::BTreeMap; + +use super::common::{block_entry_key, candidate_entry_key, load_decode, Config}; + +/// Re-export this structs as v3 since they did not change between v2 and v3. +pub use super::v2::{Bitfield, OurAssignment, Tick, TrancheEntry}; + +pub mod migration_helpers; + +#[cfg(test)] +pub mod tests; + +/// Metadata about our approval signature +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct OurApproval { + /// The signature for the candidates hashes pointed by indices. + pub signature: ValidatorSignature, + /// The indices of the candidates signed in this approval. + pub signed_candidates_indices: CandidateBitfield, +} + +/// Metadata regarding approval of a particular candidate within the context of some +/// particular block. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct ApprovalEntry { + pub tranches: Vec, + pub backing_group: GroupIndex, + pub our_assignment: Option, + pub our_approval_sig: Option, + // `n_validators` bits. + pub assigned_validators: Bitfield, + pub approved: bool, +} + +/// Metadata regarding approval of a particular candidate. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct CandidateEntry { + pub candidate: CandidateReceipt, + pub session: SessionIndex, + // Assignments are based on blocks, so we need to track assignments separately + // based on the block we are looking at. + pub block_assignments: BTreeMap, + pub approvals: Bitfield, +} + +/// Metadata regarding approval of a particular block, by way of approval of the +/// candidates contained within it. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct BlockEntry { + pub block_hash: Hash, + pub block_number: BlockNumber, + pub parent_hash: Hash, + pub session: SessionIndex, + pub slot: Slot, + /// Random bytes derived from the VRF submitted within the block by the block + /// author as a credential and used as input to approval assignment criteria. + pub relay_vrf_story: [u8; 32], + // The candidates included as-of this block and the index of the core they are + // leaving. Sorted ascending by core index. + pub candidates: Vec<(CoreIndex, CandidateHash)>, + // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`. + // The i'th bit is `true` iff the candidate has been approved in the context of this + // block. The block can be considered approved if the bitfield has all bits set to `true`. + pub approved_bitfield: Bitfield, + pub children: Vec, + // A list of candidates we have checked, but didn't not sign and + // advertise the vote yet. + pub candidates_pending_signature: BTreeMap, + // Assignments we already distributed. A 1 bit means the candidate index for which + // we already have sent out an assignment. We need this to avoid distributing + // multiple core assignments more than once. + pub distributed_assignments: Bitfield, +} + +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +/// Context needed for creating an approval signature for a given candidate. +pub struct CandidateSigningContext { + /// The candidate hash, to be included in the signature. + pub candidate_hash: CandidateHash, + /// The latest tick we have to create and send the approval. + pub sign_no_later_than_tick: Tick, +} + +/// Load a candidate entry from the aux store in v2 format. +pub fn load_candidate_entry_v2( + store: &dyn Database, + config: &Config, + candidate_hash: &CandidateHash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a block entry from the aux store in v2 format. +pub fn load_block_entry_v2( + store: &dyn Database, + config: &Config, + block_hash: &Hash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &block_entry_key(block_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..08c65461bca80aafb758e42c80b509bc37c47ece --- /dev/null +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs @@ -0,0 +1,575 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the aux-schema of approval voting. + +use crate::{ + approval_db::{ + common::{migration_helpers::make_bitvec, DbBackend, StoredBlockRange, *}, + v3::*, + }, + backend::{Backend, OverlayedBackend}, + ops::{add_block_entry, canonicalize, force_approve, NewCandidateInfo}, +}; +use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, +}; + +use polkadot_node_subsystem_util::database::Database; +use polkadot_primitives::Id as ParaId; +use sp_consensus_slots::Slot; +use std::{collections::HashMap, sync::Arc}; + +use ::test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash}; + +const DATA_COL: u32 = 0; + +const NUM_COLUMNS: u32 = 1; + +const TEST_CONFIG: Config = Config { col_approval_data: DATA_COL }; + +fn make_db() -> (DbBackend, Arc) { + let db = kvdb_memorydb::create(NUM_COLUMNS); + let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]); + let db_writer: Arc = Arc::new(db); + (DbBackend::new(db_writer.clone(), TEST_CONFIG), db_writer) +} + +fn make_block_entry( + block_hash: Hash, + parent_hash: Hash, + block_number: BlockNumber, + candidates: Vec<(CoreIndex, CandidateHash)>, +) -> BlockEntry { + BlockEntry { + block_hash, + parent_hash, + block_number, + session: 1, + slot: Slot::from(1), + relay_vrf_story: [0u8; 32], + approved_bitfield: make_bitvec(candidates.len()), + candidates, + children: Vec::new(), + candidates_pending_signature: Default::default(), + distributed_assignments: Default::default(), + } +} + +fn make_candidate(para_id: ParaId, relay_parent: Hash) -> CandidateReceipt { + let mut c = dummy_candidate_receipt(dummy_hash()); + + c.descriptor.para_id = para_id; + c.descriptor.relay_parent = relay_parent; + + c +} + +#[test] +fn read_write() { + let (mut db, store) = make_db(); + + let hash_a = Hash::repeat_byte(1); + let hash_b = Hash::repeat_byte(2); + let candidate_hash = dummy_candidate_receipt_bad_sig(dummy_hash(), None).hash(); + + let range = StoredBlockRange(10, 20); + let at_height = vec![hash_a, hash_b]; + + let block_entry = + make_block_entry(hash_a, Default::default(), 1, vec![(CoreIndex(0), candidate_hash)]); + + let candidate_entry = CandidateEntry { + candidate: dummy_candidate_receipt_bad_sig(dummy_hash(), None), + session: 5, + block_assignments: vec![( + hash_a, + ApprovalEntry { + tranches: Vec::new(), + backing_group: GroupIndex(1), + our_assignment: None, + our_approval_sig: None, + assigned_validators: Default::default(), + approved: false, + }, + )] + .into_iter() + .collect(), + approvals: Default::default(), + }; + + let mut overlay_db = OverlayedBackend::new(&db); + overlay_db.write_stored_block_range(range.clone()); + overlay_db.write_blocks_at_height(1, at_height.clone()); + overlay_db.write_block_entry(block_entry.clone().into()); + overlay_db.write_candidate_entry(candidate_entry.clone().into()); + + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!(load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap(), Some(range)); + assert_eq!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap(), at_height); + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap(), + Some(block_entry.into()) + ); + assert_eq!( + load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash).unwrap(), + Some(candidate_entry.into()), + ); + + let mut overlay_db = OverlayedBackend::new(&db); + overlay_db.delete_blocks_at_height(1); + overlay_db.delete_block_entry(&hash_a); + overlay_db.delete_candidate_entry(&candidate_hash); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap().is_empty()); + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap().is_none()); + assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash) + .unwrap() + .is_none()); +} + +#[test] +fn add_block_entry_works() { + let (mut db, store) = make_db(); + + let parent_hash = Hash::repeat_byte(1); + let block_hash_a = Hash::repeat_byte(2); + let block_hash_b = Hash::repeat_byte(69); + + let candidate_receipt_a = make_candidate(ParaId::from(1_u32), parent_hash); + let candidate_receipt_b = make_candidate(ParaId::from(2_u32), parent_hash); + + let candidate_hash_a = candidate_receipt_a.hash(); + let candidate_hash_b = candidate_receipt_b.hash(); + + let block_number = 10; + + let block_entry_a = make_block_entry( + block_hash_a, + parent_hash, + block_number, + vec![(CoreIndex(0), candidate_hash_a)], + ); + + let block_entry_b = make_block_entry( + block_hash_b, + parent_hash, + block_number, + vec![(CoreIndex(0), candidate_hash_a), (CoreIndex(1), candidate_hash_b)], + ); + + let n_validators = 10; + + let mut new_candidate_info = HashMap::new(); + new_candidate_info + .insert(candidate_hash_a, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(0), None)); + + let mut overlay_db = OverlayedBackend::new(&db); + add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |h| { + new_candidate_info.get(h).map(|x| x.clone()) + }) + .unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + new_candidate_info + .insert(candidate_hash_b, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(1), None)); + + let mut overlay_db = OverlayedBackend::new(&db); + add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |h| { + new_candidate_info.get(h).map(|x| x.clone()) + }) + .unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), + Some(block_entry_a.into()) + ); + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), + Some(block_entry_b.into()) + ); + + let candidate_entry_a = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_a) + .unwrap() + .unwrap(); + assert_eq!( + candidate_entry_a.block_assignments.keys().collect::>(), + vec![&block_hash_a, &block_hash_b] + ); + + let candidate_entry_b = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_b) + .unwrap() + .unwrap(); + assert_eq!(candidate_entry_b.block_assignments.keys().collect::>(), vec![&block_hash_b]); +} + +#[test] +fn add_block_entry_adds_child() { + let (mut db, store) = make_db(); + + let parent_hash = Hash::repeat_byte(1); + let block_hash_a = Hash::repeat_byte(2); + let block_hash_b = Hash::repeat_byte(69); + + let mut block_entry_a = make_block_entry(block_hash_a, parent_hash, 1, Vec::new()); + + let block_entry_b = make_block_entry(block_hash_b, block_hash_a, 2, Vec::new()); + + let n_validators = 10; + + let mut overlay_db = OverlayedBackend::new(&db); + add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap(); + + add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap(); + + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + block_entry_a.children.push(block_hash_b); + + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), + Some(block_entry_a.into()) + ); + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), + Some(block_entry_b.into()) + ); +} + +#[test] +fn canonicalize_works() { + let (mut db, store) = make_db(); + + // -> B1 -> C1 -> D1 + // A -> B2 -> C2 -> D2 + // + // We'll canonicalize C1. Everytning except D1 should disappear. + // + // Candidates: + // Cand1 in B2 + // Cand2 in C2 + // Cand3 in C2 and D1 + // Cand4 in D1 + // Cand5 in D2 + // Only Cand3 and Cand4 should remain after canonicalize. + + let n_validators = 10; + + let mut overlay_db = OverlayedBackend::new(&db); + overlay_db.write_stored_block_range(StoredBlockRange(1, 5)); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + let genesis = Hash::repeat_byte(0); + + let block_hash_a = Hash::repeat_byte(1); + let block_hash_b1 = Hash::repeat_byte(2); + let block_hash_b2 = Hash::repeat_byte(3); + let block_hash_c1 = Hash::repeat_byte(4); + let block_hash_c2 = Hash::repeat_byte(5); + let block_hash_d1 = Hash::repeat_byte(6); + let block_hash_d2 = Hash::repeat_byte(7); + + let candidate_receipt_genesis = make_candidate(ParaId::from(1_u32), genesis); + let candidate_receipt_a = make_candidate(ParaId::from(2_u32), block_hash_a); + let candidate_receipt_b = make_candidate(ParaId::from(3_u32), block_hash_a); + let candidate_receipt_b1 = make_candidate(ParaId::from(4_u32), block_hash_b1); + let candidate_receipt_c1 = make_candidate(ParaId::from(5_u32), block_hash_c1); + + let cand_hash_1 = candidate_receipt_genesis.hash(); + let cand_hash_2 = candidate_receipt_a.hash(); + let cand_hash_3 = candidate_receipt_b.hash(); + let cand_hash_4 = candidate_receipt_b1.hash(); + let cand_hash_5 = candidate_receipt_c1.hash(); + + let block_entry_a = make_block_entry(block_hash_a, genesis, 1, Vec::new()); + let block_entry_b1 = make_block_entry(block_hash_b1, block_hash_a, 2, Vec::new()); + let block_entry_b2 = + make_block_entry(block_hash_b2, block_hash_a, 2, vec![(CoreIndex(0), cand_hash_1)]); + let block_entry_c1 = make_block_entry(block_hash_c1, block_hash_b1, 3, Vec::new()); + let block_entry_c2 = make_block_entry( + block_hash_c2, + block_hash_b2, + 3, + vec![(CoreIndex(0), cand_hash_2), (CoreIndex(1), cand_hash_3)], + ); + let block_entry_d1 = make_block_entry( + block_hash_d1, + block_hash_c1, + 4, + vec![(CoreIndex(0), cand_hash_3), (CoreIndex(1), cand_hash_4)], + ); + let block_entry_d2 = + make_block_entry(block_hash_d2, block_hash_c2, 4, vec![(CoreIndex(0), cand_hash_5)]); + + let candidate_info = { + let mut candidate_info = HashMap::new(); + candidate_info.insert( + cand_hash_1, + NewCandidateInfo::new(candidate_receipt_genesis, GroupIndex(1), None), + ); + + candidate_info + .insert(cand_hash_2, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(2), None)); + + candidate_info + .insert(cand_hash_3, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(3), None)); + + candidate_info + .insert(cand_hash_4, NewCandidateInfo::new(candidate_receipt_b1, GroupIndex(4), None)); + + candidate_info + .insert(cand_hash_5, NewCandidateInfo::new(candidate_receipt_c1, GroupIndex(5), None)); + + candidate_info + }; + + // now insert all the blocks. + let blocks = vec![ + block_entry_a.clone(), + block_entry_b1.clone(), + block_entry_b2.clone(), + block_entry_c1.clone(), + block_entry_c2.clone(), + block_entry_d1.clone(), + block_entry_d2.clone(), + ]; + + let mut overlay_db = OverlayedBackend::new(&db); + for block_entry in blocks { + add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| { + candidate_info.get(h).map(|x| x.clone()) + }) + .unwrap(); + } + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + let check_candidates_in_store = |expected: Vec<(CandidateHash, Option>)>| { + for (c_hash, in_blocks) in expected { + let (entry, in_blocks) = match in_blocks { + None => { + assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash) + .unwrap() + .is_none()); + continue + }, + Some(i) => ( + load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash).unwrap().unwrap(), + i, + ), + }; + + assert_eq!(entry.block_assignments.len(), in_blocks.len()); + + for x in in_blocks { + assert!(entry.block_assignments.contains_key(&x)); + } + } + }; + + let check_blocks_in_store = |expected: Vec<(Hash, Option>)>| { + for (hash, with_candidates) in expected { + let (entry, with_candidates) = match with_candidates { + None => { + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash) + .unwrap() + .is_none()); + continue + }, + Some(i) => + (load_block_entry(store.as_ref(), &TEST_CONFIG, &hash).unwrap().unwrap(), i), + }; + + assert_eq!(entry.candidates.len(), with_candidates.len()); + + for x in with_candidates { + assert!(entry.candidates.iter().any(|(_, c)| c == &x)); + } + } + }; + + check_candidates_in_store(vec![ + (cand_hash_1, Some(vec![block_hash_b2])), + (cand_hash_2, Some(vec![block_hash_c2])), + (cand_hash_3, Some(vec![block_hash_c2, block_hash_d1])), + (cand_hash_4, Some(vec![block_hash_d1])), + (cand_hash_5, Some(vec![block_hash_d2])), + ]); + + check_blocks_in_store(vec![ + (block_hash_a, Some(vec![])), + (block_hash_b1, Some(vec![])), + (block_hash_b2, Some(vec![cand_hash_1])), + (block_hash_c1, Some(vec![])), + (block_hash_c2, Some(vec![cand_hash_2, cand_hash_3])), + (block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])), + (block_hash_d2, Some(vec![cand_hash_5])), + ]); + + let mut overlay_db = OverlayedBackend::new(&db); + canonicalize(&mut overlay_db, 3, block_hash_c1).unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!( + load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(), + StoredBlockRange(4, 5) + ); + + check_candidates_in_store(vec![ + (cand_hash_1, None), + (cand_hash_2, None), + (cand_hash_3, Some(vec![block_hash_d1])), + (cand_hash_4, Some(vec![block_hash_d1])), + (cand_hash_5, None), + ]); + + check_blocks_in_store(vec![ + (block_hash_a, None), + (block_hash_b1, None), + (block_hash_b2, None), + (block_hash_c1, None), + (block_hash_c2, None), + (block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])), + (block_hash_d2, None), + ]); +} + +#[test] +fn force_approve_works() { + let (mut db, store) = make_db(); + let n_validators = 10; + + let mut overlay_db = OverlayedBackend::new(&db); + overlay_db.write_stored_block_range(StoredBlockRange(1, 4)); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let single_candidate_vec = vec![(CoreIndex(0), candidate_hash)]; + let candidate_info = { + let mut candidate_info = HashMap::new(); + candidate_info.insert( + candidate_hash, + NewCandidateInfo::new( + make_candidate(ParaId::from(1_u32), Default::default()), + GroupIndex(1), + None, + ), + ); + + candidate_info + }; + + let block_hash_a = Hash::repeat_byte(1); // 1 + let block_hash_b = Hash::repeat_byte(2); + let block_hash_c = Hash::repeat_byte(3); + let block_hash_d = Hash::repeat_byte(4); // 4 + + let block_entry_a = + make_block_entry(block_hash_a, Default::default(), 1, single_candidate_vec.clone()); + let block_entry_b = + make_block_entry(block_hash_b, block_hash_a, 2, single_candidate_vec.clone()); + let block_entry_c = + make_block_entry(block_hash_c, block_hash_b, 3, single_candidate_vec.clone()); + let block_entry_d = + make_block_entry(block_hash_d, block_hash_c, 4, single_candidate_vec.clone()); + + let blocks = vec![ + block_entry_a.clone(), + block_entry_b.clone(), + block_entry_c.clone(), + block_entry_d.clone(), + ]; + + let mut overlay_db = OverlayedBackend::new(&db); + for block_entry in blocks { + add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| { + candidate_info.get(h).map(|x| x.clone()) + }) + .unwrap(); + } + let approved_hashes = force_approve(&mut overlay_db, block_hash_d, 2).unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a,) + .unwrap() + .unwrap() + .approved_bitfield + .all()); + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b,) + .unwrap() + .unwrap() + .approved_bitfield + .all()); + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_c,) + .unwrap() + .unwrap() + .approved_bitfield + .not_any()); + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_d,) + .unwrap() + .unwrap() + .approved_bitfield + .not_any()); + assert_eq!(approved_hashes, vec![block_hash_b, block_hash_a]); +} + +#[test] +fn load_all_blocks_works() { + let (mut db, store) = make_db(); + + let parent_hash = Hash::repeat_byte(1); + let block_hash_a = Hash::repeat_byte(2); + let block_hash_b = Hash::repeat_byte(69); + let block_hash_c = Hash::repeat_byte(42); + + let block_number = 10; + + let block_entry_a = make_block_entry(block_hash_a, parent_hash, block_number, vec![]); + + let block_entry_b = make_block_entry(block_hash_b, parent_hash, block_number, vec![]); + + let block_entry_c = make_block_entry(block_hash_c, block_hash_a, block_number + 1, vec![]); + + let n_validators = 10; + + let mut overlay_db = OverlayedBackend::new(&db); + add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap(); + + // add C before B to test sorting. + add_block_entry(&mut overlay_db, block_entry_c.clone().into(), n_validators, |_| None).unwrap(); + + add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap(); + + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!( + load_all_blocks(store.as_ref(), &TEST_CONFIG).unwrap(), + vec![block_hash_a, block_hash_b, block_hash_c], + ) +} diff --git a/polkadot/node/core/approval-voting/src/backend.rs b/polkadot/node/core/approval-voting/src/backend.rs index d98f3c5fd202eaf94371210f6366c61aa98017c5..9ce25334c0fadf526162b3735d42e03c87762bc5 100644 --- a/polkadot/node/core/approval-voting/src/backend.rs +++ b/polkadot/node/core/approval-voting/src/backend.rs @@ -22,12 +22,12 @@ //! before any commit to the underlying storage is made. use polkadot_node_subsystem::SubsystemResult; -use polkadot_primitives::{BlockNumber, CandidateHash, Hash}; +use polkadot_primitives::{BlockNumber, CandidateHash, CandidateIndex, Hash}; use std::collections::HashMap; use super::{ - approval_db::v2::StoredBlockRange, + approval_db::common::StoredBlockRange, persisted_entries::{BlockEntry, CandidateEntry}, }; @@ -72,12 +72,26 @@ pub trait V1ReadBackend: Backend { fn load_candidate_entry_v1( &self, candidate_hash: &CandidateHash, + candidate_index: CandidateIndex, ) -> SubsystemResult>; /// Load a block entry from the DB with scheme version 1. fn load_block_entry_v1(&self, block_hash: &Hash) -> SubsystemResult>; } +/// A read only backend to enable db migration from version 2 of DB. +pub trait V2ReadBackend: Backend { + /// Load a candidate entry from the DB with scheme version 1. + fn load_candidate_entry_v2( + &self, + candidate_hash: &CandidateHash, + candidate_index: CandidateIndex, + ) -> SubsystemResult>; + + /// Load a block entry from the DB with scheme version 1. + fn load_block_entry_v2(&self, block_hash: &Hash) -> SubsystemResult>; +} + // Status of block range in the `OverlayedBackend`. #[derive(PartialEq)] enum BlockRangeStatus { diff --git a/polkadot/node/core/approval-voting/src/criteria.rs b/polkadot/node/core/approval-voting/src/criteria.rs index 2bb5a151fe23bfc22478623b59d30af9f372dea4..1af61e72d7affa81744d8fcdb48ed5b1e80ae4d6 100644 --- a/polkadot/node/core/approval-voting/src/criteria.rs +++ b/polkadot/node/core/approval-voting/src/criteria.rs @@ -21,7 +21,9 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::approval::{ self as approval_types, v1::{AssignmentCert, AssignmentCertKind, DelayTranche, RelayVRFStory}, - v2::{AssignmentCertKindV2, AssignmentCertV2, CoreBitfield, VrfOutput, VrfProof, VrfSignature}, + v2::{ + AssignmentCertKindV2, AssignmentCertV2, CoreBitfield, VrfPreOutput, VrfProof, VrfSignature, + }, }; use polkadot_primitives::{ AssignmentId, AssignmentPair, CandidateHash, CoreIndex, GroupIndex, IndexedVec, SessionInfo, @@ -259,6 +261,7 @@ pub(crate) trait AssignmentCriteria { relay_vrf_story: RelayVRFStory, config: &Config, leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>, + enable_v2_assignments: bool, ) -> HashMap; fn check_assignment_cert( @@ -282,8 +285,9 @@ impl AssignmentCriteria for RealAssignmentCriteria { relay_vrf_story: RelayVRFStory, config: &Config, leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>, + enable_v2_assignments: bool, ) -> HashMap { - compute_assignments(keystore, relay_vrf_story, config, leaving_cores, false) + compute_assignments(keystore, relay_vrf_story, config, leaving_cores, enable_v2_assignments) } fn check_assignment_cert( @@ -459,7 +463,7 @@ fn compute_relay_vrf_modulo_assignments_v1( let cert = AssignmentCert { kind: AssignmentCertKind::RelayVRFModulo { sample: rvm_sample }, vrf: VrfSignature { - output: VrfOutput(vrf_in_out.to_output()), + pre_output: VrfPreOutput(vrf_in_out.to_preout()), proof: VrfProof(vrf_proof), }, }; @@ -539,7 +543,7 @@ fn compute_relay_vrf_modulo_assignments_v2( core_bitfield: assignment_bitfield.clone(), }, vrf: VrfSignature { - output: VrfOutput(vrf_in_out.to_output()), + pre_output: VrfPreOutput(vrf_in_out.to_preout()), proof: VrfProof(vrf_proof), }, }; @@ -574,7 +578,7 @@ fn compute_relay_vrf_delay_assignments( let cert = AssignmentCertV2 { kind: AssignmentCertKindV2::RelayVRFDelay { core_index: core }, vrf: VrfSignature { - output: VrfOutput(vrf_in_out.to_output()), + pre_output: VrfPreOutput(vrf_in_out.to_preout()), proof: VrfProof(vrf_proof), }, }; @@ -689,7 +693,7 @@ pub(crate) fn check_assignment_cert( } } - let vrf_output = &assignment.vrf.output; + let vrf_pre_output = &assignment.vrf.pre_output; let vrf_proof = &assignment.vrf.proof; let first_claimed_core_index = claimed_core_indices.first_one().expect("Checked above; qed") as u32; @@ -704,7 +708,7 @@ pub(crate) fn check_assignment_cert( let (vrf_in_out, _) = public .vrf_verify_extra( relay_vrf_modulo_transcript_v2(relay_vrf_story), - &vrf_output.0, + &vrf_pre_output.0, &vrf_proof.0, assigned_cores_transcript(core_bitfield), ) @@ -753,7 +757,7 @@ pub(crate) fn check_assignment_cert( let (vrf_in_out, _) = public .vrf_verify_extra( relay_vrf_modulo_transcript_v1(relay_vrf_story, *sample), - &vrf_output.0, + &vrf_pre_output.0, &vrf_proof.0, assigned_core_transcript(CoreIndex(first_claimed_core_index)), ) @@ -791,7 +795,7 @@ pub(crate) fn check_assignment_cert( let (vrf_in_out, _) = public .vrf_verify( relay_vrf_delay_transcript(relay_vrf_story, *core_index), - &vrf_output.0, + &vrf_pre_output.0, &vrf_proof.0, ) .map_err(|_| InvalidAssignment(Reason::VRFDelayOutputMismatch))?; diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index d7667e8e405a4cd8c5f437ff3374b4da0ebc4ec2..7a56e9fd11293d1a96debd5e98b719a19c48045f 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -45,8 +45,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::{determine_new_blocks, runtime::RuntimeInfo}; use polkadot_primitives::{ - BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, ConsensusLog, CoreIndex, - GroupIndex, Hash, Header, SessionIndex, + vstaging::node_features, BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, + ConsensusLog, CoreIndex, GroupIndex, Hash, Header, SessionIndex, }; use sc_keystore::LocalKeystore; use sp_consensus_slots::Slot; @@ -56,11 +56,11 @@ use futures::{channel::oneshot, prelude::*}; use std::collections::HashMap; -use super::approval_db::v2; +use super::approval_db::v3; use crate::{ backend::{Backend, OverlayedBackend}, criteria::{AssignmentCriteria, OurAssignment}, - get_session_info, + get_extended_session_info, get_session_info, persisted_entries::CandidateEntry, time::{slot_number_to_tick, Tick}, }; @@ -214,10 +214,21 @@ async fn imported_block_info( } }; + let extended_session_info = + get_extended_session_info(env.runtime_info, ctx.sender(), block_hash, session_index).await; + let enable_v2_assignments = extended_session_info.map_or(false, |extended_session_info| { + *extended_session_info + .node_features + .get(node_features::FeatureIndex::EnableAssignmentsV2 as usize) + .as_deref() + .unwrap_or(&false) + }); + let session_info = get_session_info(env.runtime_info, ctx.sender(), block_hash, session_index) .await .ok_or(ImportedBlockInfoError::SessionInfoUnavailable)?; + gum::debug!(target: LOG_TARGET, ?enable_v2_assignments, "V2 assignments"); let (assignments, slot, relay_vrf_story) = { let unsafe_vrf = approval_types::v1::babe_unsafe_vrf_info(&block_header); @@ -239,6 +250,7 @@ async fn imported_block_info( .iter() .map(|(c_hash, _, core, group)| (*c_hash, *core, *group)) .collect(), + enable_v2_assignments, ); (assignments, slot, relay_vrf) @@ -500,7 +512,7 @@ pub(crate) async fn handle_new_head( ctx.send_message(ChainSelectionMessage::Approved(block_hash)).await; } - let block_entry = v2::BlockEntry { + let block_entry = v3::BlockEntry { block_hash, parent_hash: block_header.parent_hash, block_number: block_header.number, @@ -513,6 +525,7 @@ pub(crate) async fn handle_new_head( .collect(), approved_bitfield, children: Vec::new(), + candidates_pending_signature: Default::default(), distributed_assignments: Default::default(), }; @@ -592,7 +605,10 @@ pub(crate) async fn handle_new_head( #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::{approval_db::v2::DbBackend, RuntimeInfo, RuntimeInfoConfig}; + use crate::{ + approval_db::common::{load_block_entry, DbBackend}, + RuntimeInfo, RuntimeInfoConfig, + }; use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; use assert_matches::assert_matches; use polkadot_node_primitives::{ @@ -603,6 +619,7 @@ pub(crate) mod tests { use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_node_subsystem_util::database::Database; use polkadot_primitives::{ + vstaging::{node_features::FeatureIndex, NodeFeatures}, ExecutorParams, Id as ParaId, IndexedVec, SessionInfo, ValidatorId, ValidatorIndex, }; pub(crate) use sp_consensus_babe::{ @@ -614,7 +631,7 @@ pub(crate) mod tests { pub(crate) use sp_runtime::{Digest, DigestItem}; use std::{pin::Pin, sync::Arc}; - use crate::{approval_db::v2::Config as DatabaseConfig, criteria, BlockEntry}; + use crate::{approval_db::common::Config as DatabaseConfig, criteria, BlockEntry}; const DATA_COL: u32 = 0; @@ -639,7 +656,7 @@ pub(crate) mod tests { keystore: Arc::new(LocalKeystore::in_memory()), slot_duration_millis: 6_000, clock: Box::new(MockClock::default()), - assignment_criteria: Box::new(MockAssignmentCriteria), + assignment_criteria: Box::new(MockAssignmentCriteria::default()), spans: HashMap::new(), } } @@ -654,7 +671,10 @@ pub(crate) mod tests { ) } - struct MockAssignmentCriteria; + #[derive(Default)] + struct MockAssignmentCriteria { + enable_v2: bool, + } impl AssignmentCriteria for MockAssignmentCriteria { fn compute_assignments( @@ -667,7 +687,9 @@ pub(crate) mod tests { polkadot_primitives::CoreIndex, polkadot_primitives::GroupIndex, )>, + enable_assignments_v2: bool, ) -> HashMap { + assert_eq!(enable_assignments_v2, self.enable_v2); HashMap::new() } @@ -711,154 +733,164 @@ pub(crate) mod tests { #[test] fn imported_block_info_is_good() { - let pool = TaskExecutor::new(); - let (mut ctx, mut handle) = - make_subsystem_context::(pool.clone()); - - let session = 5; - let session_info = dummy_session_info(session); - - let slot = Slot::from(10); - - let header = Header { - digest: { - let mut d = Digest::default(); - let vrf_signature = garbage_vrf_signature(); - d.push(DigestItem::babe_pre_digest(PreDigest::SecondaryVRF( - SecondaryVRFPreDigest { authority_index: 0, slot, vrf_signature }, - ))); - - d - }, - extrinsics_root: Default::default(), - number: 5, - state_root: Default::default(), - parent_hash: Default::default(), - }; - - let hash = header.hash(); - let make_candidate = |para_id| { - let mut r = dummy_candidate_receipt(dummy_hash()); - r.descriptor.para_id = para_id; - r.descriptor.relay_parent = hash; - r - }; - let candidates = vec![ - (make_candidate(1.into()), CoreIndex(0), GroupIndex(2)), - (make_candidate(2.into()), CoreIndex(1), GroupIndex(3)), - ]; + for enable_v2 in [false, true] { + let pool = TaskExecutor::new(); + let (mut ctx, mut handle) = + make_subsystem_context::(pool.clone()); + + let session = 5; + let session_info = dummy_session_info(session); + + let slot = Slot::from(10); + let header = Header { + digest: { + let mut d = Digest::default(); + let vrf_signature = garbage_vrf_signature(); + d.push(DigestItem::babe_pre_digest(PreDigest::SecondaryVRF( + SecondaryVRFPreDigest { authority_index: 0, slot, vrf_signature }, + ))); + + d + }, + extrinsics_root: Default::default(), + number: 5, + state_root: Default::default(), + parent_hash: Default::default(), + }; - let inclusion_events = candidates - .iter() - .cloned() - .map(|(r, c, g)| CandidateEvent::CandidateIncluded(r, Vec::new().into(), c, g)) - .collect::>(); + let hash = header.hash(); + let make_candidate = |para_id| { + let mut r = dummy_candidate_receipt(dummy_hash()); + r.descriptor.para_id = para_id; + r.descriptor.relay_parent = hash; + r + }; + let candidates = vec![ + (make_candidate(1.into()), CoreIndex(0), GroupIndex(2)), + (make_candidate(2.into()), CoreIndex(1), GroupIndex(3)), + ]; - let test_fut = { - let included_candidates = candidates + let inclusion_events = candidates .iter() - .map(|(r, c, g)| (r.hash(), r.clone(), *c, *g)) + .cloned() + .map(|(r, c, g)| CandidateEvent::CandidateIncluded(r, Vec::new().into(), c, g)) .collect::>(); - let mut runtime_info = RuntimeInfo::new_with_config(RuntimeInfoConfig { - keystore: None, - session_cache_lru_size: DISPUTE_WINDOW.get(), - }); + let test_fut = { + let included_candidates = candidates + .iter() + .map(|(r, c, g)| (r.hash(), r.clone(), *c, *g)) + .collect::>(); + + let mut runtime_info = RuntimeInfo::new_with_config(RuntimeInfoConfig { + keystore: None, + session_cache_lru_size: DISPUTE_WINDOW.get(), + }); + + let header = header.clone(); + Box::pin(async move { + let env = ImportedBlockInfoEnv { + runtime_info: &mut runtime_info, + assignment_criteria: &MockAssignmentCriteria { enable_v2 }, + keystore: &LocalKeystore::in_memory(), + }; - let header = header.clone(); - Box::pin(async move { - let env = ImportedBlockInfoEnv { - runtime_info: &mut runtime_info, - assignment_criteria: &MockAssignmentCriteria, - keystore: &LocalKeystore::in_memory(), - }; + let info = + imported_block_info(&mut ctx, env, hash, &header, &Some(4)).await.unwrap(); - let info = - imported_block_info(&mut ctx, env, hash, &header, &Some(4)).await.unwrap(); + assert_eq!(info.included_candidates, included_candidates); + assert_eq!(info.session_index, session); + assert!(info.assignments.is_empty()); + assert_eq!(info.n_validators, 0); + assert_eq!(info.slot, slot); + assert!(info.force_approve.is_none()); + }) + }; - assert_eq!(info.included_candidates, included_candidates); - assert_eq!(info.session_index, session); - assert!(info.assignments.is_empty()); - assert_eq!(info.n_validators, 0); - assert_eq!(info.slot, slot); - assert!(info.force_approve.is_none()); - }) - }; + let aux_fut = Box::pin(async move { + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + h, + RuntimeApiRequest::CandidateEvents(c_tx), + )) => { + assert_eq!(h, hash); + let _ = c_tx.send(Ok(inclusion_events)); + } + ); - let aux_fut = Box::pin(async move { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::CandidateEvents(c_tx), - )) => { - assert_eq!(h, hash); - let _ = c_tx.send(Ok(inclusion_events)); - } - ); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + h, + RuntimeApiRequest::SessionIndexForChild(c_tx), + )) => { + assert_eq!(h, header.parent_hash); + let _ = c_tx.send(Ok(session)); + } + ); - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(c_tx), - )) => { - assert_eq!(h, header.parent_hash); - let _ = c_tx.send(Ok(session)); - } - ); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + h, + RuntimeApiRequest::CurrentBabeEpoch(c_tx), + )) => { + assert_eq!(h, hash); + let _ = c_tx.send(Ok(BabeEpoch { + epoch_index: session as _, + start_slot: Slot::from(0), + duration: 200, + authorities: vec![(Sr25519Keyring::Alice.public().into(), 1)], + randomness: [0u8; 32], + config: BabeEpochConfiguration { + c: (1, 4), + allowed_slots: AllowedSlots::PrimarySlots, + }, + })); + } + ); - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::CurrentBabeEpoch(c_tx), - )) => { - assert_eq!(h, hash); - let _ = c_tx.send(Ok(BabeEpoch { - epoch_index: session as _, - start_slot: Slot::from(0), - duration: 200, - authorities: vec![(Sr25519Keyring::Alice.public().into(), 1)], - randomness: [0u8; 32], - config: BabeEpochConfiguration { - c: (1, 4), - allowed_slots: AllowedSlots::PrimarySlots, - }, - })); - } - ); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + req_block_hash, + RuntimeApiRequest::SessionInfo(idx, si_tx), + ) + ) => { + assert_eq!(session, idx); + assert_eq!(req_block_hash, hash); + si_tx.send(Ok(Some(session_info.clone()))).unwrap(); + } + ); - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request( - req_block_hash, - RuntimeApiRequest::SessionInfo(idx, si_tx), - ) - ) => { - assert_eq!(session, idx); - assert_eq!(req_block_hash, hash); - si_tx.send(Ok(Some(session_info.clone()))).unwrap(); - } - ); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + req_block_hash, + RuntimeApiRequest::SessionExecutorParams(idx, si_tx), + ) + ) => { + assert_eq!(session, idx); + assert_eq!(req_block_hash, hash); + si_tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); + } + ); - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request( - req_block_hash, - RuntimeApiRequest::SessionExecutorParams(idx, si_tx), - ) - ) => { - assert_eq!(session, idx); - assert_eq!(req_block_hash, hash); - si_tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); - } - ); - }); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::repeat(enable_v2, FeatureIndex::EnableAssignmentsV2 as usize + 1))).unwrap(); + } + ); + }); - futures::executor::block_on(futures::future::join(test_fut, aux_fut)); + futures::executor::block_on(futures::future::join(test_fut, aux_fut)); + } } #[test] @@ -906,7 +938,7 @@ pub(crate) mod tests { Box::pin(async move { let env = ImportedBlockInfoEnv { runtime_info: &mut runtime_info, - assignment_criteria: &MockAssignmentCriteria, + assignment_criteria: &MockAssignmentCriteria::default(), keystore: &LocalKeystore::in_memory(), }; @@ -987,6 +1019,15 @@ pub(crate) mod tests { si_tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); } ); + + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); }); futures::executor::block_on(futures::future::join(test_fut, aux_fut)); @@ -1036,7 +1077,7 @@ pub(crate) mod tests { Box::pin(async move { let env = ImportedBlockInfoEnv { runtime_info: &mut runtime_info, - assignment_criteria: &MockAssignmentCriteria, + assignment_criteria: &MockAssignmentCriteria::default(), keystore: &LocalKeystore::in_memory(), }; @@ -1134,7 +1175,7 @@ pub(crate) mod tests { Box::pin(async move { let env = ImportedBlockInfoEnv { runtime_info: &mut runtime_info, - assignment_criteria: &MockAssignmentCriteria, + assignment_criteria: &MockAssignmentCriteria::default(), keystore: &LocalKeystore::in_memory(), }; @@ -1221,6 +1262,15 @@ pub(crate) mod tests { si_tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); } ); + + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); }); futures::executor::block_on(futures::future::join(test_fut, aux_fut)); @@ -1301,7 +1351,7 @@ pub(crate) mod tests { let (state, mut session_info_provider) = single_session_state(); overlay_db.write_block_entry( - v2::BlockEntry { + v3::BlockEntry { block_hash: parent_hash, parent_hash: Default::default(), block_number: 4, @@ -1311,6 +1361,7 @@ pub(crate) mod tests { candidates: Vec::new(), approved_bitfield: Default::default(), children: Vec::new(), + candidates_pending_signature: Default::default(), distributed_assignments: Default::default(), } .into(), @@ -1343,11 +1394,10 @@ pub(crate) mod tests { assert_eq!(candidates[1].1.approvals().len(), 6); // the first candidate should be insta-approved // the second should not - let entry: BlockEntry = - v2::load_block_entry(db_writer.as_ref(), &TEST_CONFIG, &hash) - .unwrap() - .unwrap() - .into(); + let entry: BlockEntry = load_block_entry(db_writer.as_ref(), &TEST_CONFIG, &hash) + .unwrap() + .unwrap() + .into(); assert!(entry.is_candidate_approved(&candidates[0].0)); assert!(!entry.is_candidate_approved(&candidates[1].0)); }) @@ -1438,6 +1488,15 @@ pub(crate) mod tests { } ); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + assert_matches!( handle.recv().await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks( diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 94f7fcaf9411e3122c15ba4b706d4333afb5dea2..af76b576d7cab9c0f9c9242973d33e0beba6c422 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -21,14 +21,15 @@ //! of others. It uses this information to determine when candidates and blocks have //! been sufficiently approved to finalize. +use itertools::Itertools; use jaeger::{hash_to_trace_identifier, PerLeafSpan}; use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ approval::{ - v1::{BlockApprovalMeta, DelayTranche, IndirectSignedApprovalVote}, + v1::{BlockApprovalMeta, DelayTranche}, v2::{ AssignmentCertKindV2, BitfieldError, CandidateBitfield, CoreBitfield, - IndirectAssignmentCertV2, + IndirectAssignmentCertV2, IndirectSignedApprovalVoteV2, }, }, ValidationResult, DISPUTE_WINDOW, @@ -53,9 +54,10 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - ApprovalVote, BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement, - ExecutorParams, GroupIndex, Hash, PvfExecTimeoutKind, SessionIndex, SessionInfo, - ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, + vstaging::{ApprovalVoteMultipleCandidates, ApprovalVotingParams}, + BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement, ExecutorParams, + GroupIndex, Hash, PvfExecKind, SessionIndex, SessionInfo, ValidDisputeStatementKind, + ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair; @@ -67,9 +69,11 @@ use futures::{ future::{BoxFuture, RemoteHandle}, prelude::*, stream::FuturesUnordered, + StreamExt, }; use std::{ + cmp::min, collections::{ btree_map::Entry as BTMEntry, hash_map::Entry as HMEntry, BTreeMap, HashMap, HashSet, }, @@ -83,7 +87,7 @@ use approval_checking::RequiredTranches; use bitvec::{order::Lsb0, vec::BitVec}; use criteria::{AssignmentCriteria, RealAssignmentCriteria}; use persisted_entries::{ApprovalEntry, BlockEntry, CandidateEntry}; -use time::{slot_number_to_tick, Clock, ClockExt, SystemClock, Tick}; +use time::{slot_number_to_tick, Clock, ClockExt, DelayedApprovalTimer, SystemClock, Tick}; mod approval_checking; pub mod approval_db; @@ -95,9 +99,11 @@ mod persisted_entries; mod time; use crate::{ - approval_db::v2::{Config as DatabaseConfig, DbBackend}, + approval_checking::{Check, TranchesToApproveResult}, + approval_db::common::{Config as DatabaseConfig, DbBackend}, backend::{Backend, OverlayedBackend}, criteria::InvalidAssignmentReason, + persisted_entries::OurApproval, }; #[cfg(test)] @@ -115,6 +121,9 @@ const TICK_TOO_FAR_IN_FUTURE: Tick = 20; // 10 seconds. const APPROVAL_DELAY: Tick = 2; pub(crate) const LOG_TARGET: &str = "parachain::approval-voting"; +// The max number of ticks we delay sending the approval after we are ready to issue the approval +const MAX_APPROVAL_COALESCE_WAIT_TICKS: Tick = 12; + /// Configuration for the approval voting subsystem #[derive(Debug, Clone)] pub struct Config { @@ -158,7 +167,14 @@ struct MetricsInner { assignments_produced: prometheus::Histogram, approvals_produced_total: prometheus::CounterVec, no_shows_total: prometheus::Counter, + // The difference from `no_shows_total` is that this counts all observed no-shows at any + // moment in time. While `no_shows_total` catches that the no-shows at the moment the candidate + // is approved, approvals might arrive late and `no_shows_total` wouldn't catch that number. + observed_no_shows: prometheus::Counter, + approved_by_one_third: prometheus::Counter, wakeups_triggered_total: prometheus::Counter, + coalesced_approvals_buckets: prometheus::Histogram, + coalesced_approvals_delay: prometheus::Histogram, candidate_approval_time_ticks: prometheus::Histogram, block_approval_time_ticks: prometheus::Histogram, time_db_transaction: prometheus::Histogram, @@ -184,6 +200,22 @@ impl Metrics { } } + fn on_approval_coalesce(&self, num_coalesced: u32) { + if let Some(metrics) = &self.0 { + // Count how many candidates we covered with this coalesced approvals, + // so that the heat-map really gives a good understanding of the scales. + for _ in 0..num_coalesced { + metrics.coalesced_approvals_buckets.observe(num_coalesced as f64) + } + } + } + + fn on_delayed_approval(&self, delayed_ticks: u64) { + if let Some(metrics) = &self.0 { + metrics.coalesced_approvals_delay.observe(delayed_ticks as f64) + } + } + fn on_approval_stale(&self) { if let Some(metrics) = &self.0 { metrics.approvals_produced_total.with_label_values(&["stale"]).inc() @@ -220,6 +252,18 @@ impl Metrics { } } + fn on_observed_no_shows(&self, n: usize) { + if let Some(metrics) = &self.0 { + metrics.observed_no_shows.inc_by(n as u64); + } + } + + fn on_approved_by_one_third(&self) { + if let Some(metrics) = &self.0 { + metrics.approved_by_one_third.inc(); + } + } + fn on_wakeup(&self) { if let Some(metrics) = &self.0 { metrics.wakeups_triggered_total.inc(); @@ -297,6 +341,13 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + observed_no_shows: prometheus::register( + prometheus::Counter::new( + "polkadot_parachain_approvals_observed_no_shows_total", + "Number of observed no shows at any moment in time", + )?, + registry, + )?, wakeups_triggered_total: prometheus::register( prometheus::Counter::new( "polkadot_parachain_approvals_wakeups_total", @@ -313,6 +364,31 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + coalesced_approvals_buckets: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_approvals_coalesced_approvals_buckets", + "Number of coalesced approvals.", + ).buckets(vec![1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5]), + )?, + registry, + )?, + coalesced_approvals_delay: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_approvals_coalescing_delay", + "Number of ticks we delay the sending of a candidate approval", + ).buckets(vec![1.1, 2.1, 3.1, 4.1, 6.1, 8.1, 12.1, 20.1, 32.1]), + )?, + registry, + )?, + approved_by_one_third: prometheus::register( + prometheus::Counter::new( + "polkadot_parachain_approved_by_one_third", + "Number of candidates where more than one third had to vote ", + )?, + registry, + )?, block_approval_time_ticks: prometheus::register( prometheus::Histogram::with_opts( prometheus::HistogramOpts::new( @@ -383,8 +459,8 @@ impl ApprovalVotingSubsystem { /// The operation is not allowed for blocks older than the last finalized one. pub fn revert_to(&self, hash: Hash) -> Result<(), SubsystemError> { let config = - approval_db::v2::Config { col_approval_data: self.db_config.col_approval_data }; - let mut backend = approval_db::v2::DbBackend::new(self.db.clone(), config); + approval_db::common::Config { col_approval_data: self.db_config.col_approval_data }; + let mut backend = approval_db::common::DbBackend::new(self.db.clone(), config); let mut overlay = OverlayedBackend::new(&backend); ops::revert_to(&mut overlay, hash)?; @@ -559,6 +635,7 @@ struct ApprovalStatus { required_tranches: RequiredTranches, tranche_now: DelayTranche, block_tick: Tick, + last_no_shows: usize, } #[derive(Copy, Clone)] @@ -733,22 +810,73 @@ impl State { ); if let Some(approval_entry) = candidate_entry.approval_entry(&block_hash) { - let required_tranches = approval_checking::tranches_to_approve( - approval_entry, - candidate_entry.approvals(), - tranche_now, - block_tick, - no_show_duration, - session_info.needed_approvals as _, - ); + let TranchesToApproveResult { required_tranches, total_observed_no_shows } = + approval_checking::tranches_to_approve( + approval_entry, + candidate_entry.approvals(), + tranche_now, + block_tick, + no_show_duration, + session_info.needed_approvals as _, + ); - let status = ApprovalStatus { required_tranches, block_tick, tranche_now }; + let status = ApprovalStatus { + required_tranches, + block_tick, + tranche_now, + last_no_shows: total_observed_no_shows, + }; Some((approval_entry, status)) } else { None } } + + // Returns the approval voting params from the RuntimeApi. + #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] + async fn get_approval_voting_params_or_default( + &self, + ctx: &mut Context, + session_index: SessionIndex, + block_hash: Hash, + ) -> Option { + let (s_tx, s_rx) = oneshot::channel(); + + ctx.send_message(RuntimeApiMessage::Request( + block_hash, + RuntimeApiRequest::ApprovalVotingParams(session_index, s_tx), + )) + .await; + + match s_rx.await { + Ok(Ok(params)) => { + gum::trace!( + target: LOG_TARGET, + approval_voting_params = ?params, + session = ?session_index, + "Using the following subsystem params" + ); + Some(params) + }, + Ok(Err(err)) => { + gum::debug!( + target: LOG_TARGET, + ?err, + "Could not request approval voting params from runtime" + ); + None + }, + Err(err) => { + gum::debug!( + target: LOG_TARGET, + ?err, + "Could not request approval voting params from runtime" + ); + None + }, + } + } } #[derive(Debug, Clone)] @@ -807,6 +935,7 @@ where }); let mut wakeups = Wakeups::default(); let mut currently_checking_set = CurrentlyCheckingSet::default(); + let mut delayed_approvals_timers = DelayedApprovalTimer::default(); let mut approvals_cache = LruMap::new(ByLength::new(APPROVAL_CACHE_SIZE)); let mut last_finalized_height: Option = { @@ -885,17 +1014,49 @@ where } actions + }, + (block_hash, validator_index) = delayed_approvals_timers.select_next_some() => { + gum::debug!( + target: LOG_TARGET, + ?block_hash, + ?validator_index, + "Sign approval for multiple candidates", + ); + + match maybe_create_signature( + &mut overlayed_db, + &mut session_info_provider, + &state, + &mut ctx, + block_hash, + validator_index, + &subsystem.metrics, + ).await { + Ok(Some(next_wakeup)) => { + delayed_approvals_timers.maybe_arm_timer(next_wakeup, state.clock.as_ref(), block_hash, validator_index); + }, + Ok(None) => {} + Err(err) => { + gum::error!( + target: LOG_TARGET, + ?err, + "Failed to create signature", + ); + } + } + vec![] } }; if handle_actions( &mut ctx, - &state, + &mut state, &mut overlayed_db, &mut session_info_provider, &subsystem.metrics, &mut wakeups, &mut currently_checking_set, + &mut delayed_approvals_timers, &mut approvals_cache, &mut subsystem.mode, actions, @@ -937,12 +1098,13 @@ where #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] async fn handle_actions( ctx: &mut Context, - state: &State, + state: &mut State, overlayed_db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, metrics: &Metrics, wakeups: &mut Wakeups, currently_checking_set: &mut CurrentlyCheckingSet, + delayed_approvals_timers: &mut DelayedApprovalTimer, approvals_cache: &mut LruMap, mode: &mut Mode, actions: Vec, @@ -973,6 +1135,7 @@ async fn handle_actions( session_info_provider, metrics, candidate_hash, + delayed_approvals_timers, approval_request, ) .await? @@ -1075,7 +1238,11 @@ async fn handle_actions( Action::BecomeActive => { *mode = Mode::Active; - let messages = distribution_messages_for_activation(overlayed_db, state)?; + let messages = distribution_messages_for_activation( + overlayed_db, + state, + delayed_approvals_timers, + )?; ctx.send_messages(messages.into_iter()).await; }, @@ -1101,7 +1268,7 @@ fn cores_to_candidate_indices( .iter() .position(|(core_index, _)| core_index.0 == claimed_core_index as u32) { - candidate_indices.push(candidate_index as CandidateIndex); + candidate_indices.push(candidate_index as _); } } @@ -1134,6 +1301,7 @@ fn get_assignment_core_indices( fn distribution_messages_for_activation( db: &OverlayedBackend<'_, impl Backend>, state: &State, + delayed_approvals_timers: &mut DelayedApprovalTimer, ) -> SubsystemResult> { let all_blocks: Vec = db.load_all_blocks()?; @@ -1172,8 +1340,8 @@ fn distribution_messages_for_activation( slot: block_entry.slot(), session: block_entry.session(), }); - - for (i, (_, candidate_hash)) in block_entry.candidates().iter().enumerate() { + let mut signatures_queued = HashSet::new(); + for (_, candidate_hash) in block_entry.candidates() { let _candidate_span = distribution_message_span.child("candidate").with_candidate(*candidate_hash); let candidate_entry = match db.load_candidate_entry(&candidate_hash)? { @@ -1200,6 +1368,15 @@ fn distribution_messages_for_activation( &candidate_hash, &block_entry, ) { + if block_entry.has_candidates_pending_signature() { + delayed_approvals_timers.maybe_arm_timer( + state.clock.tick_now(), + state.clock.as_ref(), + block_entry.block_hash(), + assignment.validator_index(), + ) + } + match cores_to_candidate_indices( &claimed_core_indices, &block_entry, @@ -1267,15 +1444,19 @@ fn distribution_messages_for_activation( continue }, } - - messages.push(ApprovalDistributionMessage::DistributeApproval( - IndirectSignedApprovalVote { - block_hash, - candidate_index: i as _, - validator: assignment.validator_index(), - signature: approval_sig, - }, - )); + if signatures_queued + .insert(approval_sig.signed_candidates_indices.clone()) + { + messages.push(ApprovalDistributionMessage::DistributeApproval( + IndirectSignedApprovalVoteV2 { + block_hash, + candidate_indices: approval_sig + .signed_candidates_indices, + validator: assignment.validator_index(), + signature: approval_sig.signature, + }, + )) + }; } else { gum::warn!( target: LOG_TARGET, @@ -1481,7 +1662,7 @@ async fn get_approval_signatures_for_candidate( ctx: &mut Context, db: &OverlayedBackend<'_, impl Backend>, candidate_hash: CandidateHash, - tx: oneshot::Sender>, + tx: oneshot::Sender, ValidatorSignature)>>, ) -> SubsystemResult<()> { let send_votes = |votes| { if let Err(_) = tx.send(votes) { @@ -1507,6 +1688,11 @@ async fn get_approval_signatures_for_candidate( let relay_hashes = entry.block_assignments.keys(); let mut candidate_indices = HashSet::new(); + let mut candidate_indices_to_candidate_hashes: HashMap< + Hash, + HashMap, + > = HashMap::new(); + // Retrieve `CoreIndices`/`CandidateIndices` as required by approval-distribution: for hash in relay_hashes { let entry = match db.load_block_entry(hash)? { @@ -1524,8 +1710,11 @@ async fn get_approval_signatures_for_candidate( for (candidate_index, (_core_index, c_hash)) in entry.candidates().iter().enumerate() { if c_hash == &candidate_hash { candidate_indices.insert((*hash, candidate_index as u32)); - break } + candidate_indices_to_candidate_hashes + .entry(*hash) + .or_default() + .insert(candidate_index as _, *c_hash); } } @@ -1550,7 +1739,55 @@ async fn get_approval_signatures_for_candidate( target: LOG_TARGET, "Request for approval signatures got cancelled by `approval-distribution`." ), - Some(Ok(votes)) => send_votes(votes), + Some(Ok(votes)) => { + let votes = votes + .into_iter() + .filter_map(|(validator_index, (hash, signed_candidates_indices, signature))| { + let candidates_hashes = candidate_indices_to_candidate_hashes.get(&hash); + + if candidates_hashes.is_none() { + gum::warn!( + target: LOG_TARGET, + ?hash, + "Possible bug! Could not find map of candidate_hashes for block hash received from approval-distribution" + ); + } + + let num_signed_candidates = signed_candidates_indices.len(); + + let signed_candidates_hashes: Vec = + signed_candidates_indices + .into_iter() + .filter_map(|candidate_index| { + candidates_hashes.and_then(|candidate_hashes| { + if let Some(candidate_hash) = + candidate_hashes.get(&candidate_index) + { + Some(*candidate_hash) + } else { + gum::warn!( + target: LOG_TARGET, + ?candidate_index, + "Possible bug! Could not find candidate hash for candidate_index coming from approval-distribution" + ); + None + } + }) + }) + .collect(); + if num_signed_candidates == signed_candidates_hashes.len() { + Some((validator_index, (signed_candidates_hashes, signature))) + } else { + gum::warn!( + target: LOG_TARGET, + "Possible bug! Could not find all hashes for candidates coming from approval-distribution" + ); + None + } + }) + .collect(); + send_votes(votes) + }, } }; @@ -2184,7 +2421,7 @@ async fn check_and_import_approval( db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, metrics: &Metrics, - approval: IndirectSignedApprovalVote, + approval: IndirectSignedApprovalVoteV2, with_response: impl FnOnce(ApprovalCheckResult) -> T, ) -> SubsystemResult<(Vec, T)> where @@ -2196,13 +2433,12 @@ where return Ok((Vec::new(), t)) }}; } - let mut span = state .spans .get(&approval.block_hash) .map(|span| span.child("check-and-import-approval")) .unwrap_or_else(|| jaeger::Span::new(approval.block_hash, "check-and-import-approval")) - .with_uint_tag("candidate-index", approval.candidate_index as u64) + .with_string_fmt_debug_tag("candidate-index", approval.candidate_indices.clone()) .with_relay_parent(approval.block_hash) .with_stage(jaeger::Stage::ApprovalChecking); @@ -2215,105 +2451,163 @@ where }, }; - let session_info = match get_session_info( - session_info_provider, - sender, - approval.block_hash, - block_entry.session(), - ) - .await - { - Some(s) => s, - None => { - respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::UnknownSessionIndex( - block_entry.session() - ),)) - }, - }; + let approved_candidates_info: Result, ApprovalCheckError> = + approval + .candidate_indices + .iter_ones() + .map(|candidate_index| { + block_entry + .candidate(candidate_index) + .ok_or(ApprovalCheckError::InvalidCandidateIndex(candidate_index as _)) + .map(|candidate| (candidate_index as _, candidate.1)) + }) + .collect(); - let approved_candidate_hash = match block_entry.candidate(approval.candidate_index as usize) { - Some((_, h)) => *h, - None => respond_early!(ApprovalCheckResult::Bad( - ApprovalCheckError::InvalidCandidateIndex(approval.candidate_index), - )), + let approved_candidates_info = match approved_candidates_info { + Ok(approved_candidates_info) => approved_candidates_info, + Err(err) => { + respond_early!(ApprovalCheckResult::Bad(err)) + }, }; - span.add_string_tag("candidate-hash", format!("{:?}", approved_candidate_hash)); + span.add_string_tag("candidate-hashes", format!("{:?}", approved_candidates_info)); span.add_string_tag( - "traceID", - format!("{:?}", hash_to_trace_identifier(approved_candidate_hash.0)), + "traceIDs", + format!( + "{:?}", + approved_candidates_info + .iter() + .map(|(_, approved_candidate_hash)| hash_to_trace_identifier( + approved_candidate_hash.0 + )) + .collect_vec() + ), ); - let pubkey = match session_info.validators.get(approval.validator) { - Some(k) => k, - None => respond_early!(ApprovalCheckResult::Bad( - ApprovalCheckError::InvalidValidatorIndex(approval.validator), - )), - }; + { + let session_info = match get_session_info( + session_info_provider, + sender, + approval.block_hash, + block_entry.session(), + ) + .await + { + Some(s) => s, + None => { + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::UnknownSessionIndex( + block_entry.session() + ),)) + }, + }; - // Signature check: - match DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking).check_signature( - &pubkey, - approved_candidate_hash, - block_entry.session(), - &approval.signature, - ) { - Err(_) => respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::InvalidSignature( - approval.validator - ),)), - Ok(()) => {}, - }; + let pubkey = match session_info.validators.get(approval.validator) { + Some(k) => k, + None => respond_early!(ApprovalCheckResult::Bad( + ApprovalCheckError::InvalidValidatorIndex(approval.validator), + )), + }; - let candidate_entry = match db.load_candidate_entry(&approved_candidate_hash)? { - Some(c) => c, - None => { - respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::InvalidCandidate( - approval.candidate_index, - approved_candidate_hash - ),)) - }, - }; + gum::trace!( + target: LOG_TARGET, + "Received approval for num_candidates {:}", + approval.candidate_indices.count_ones() + ); - // Don't accept approvals until assignment. - match candidate_entry.approval_entry(&approval.block_hash) { - None => { - respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::Internal( - approval.block_hash, - approved_candidate_hash - ),)) - }, - Some(e) if !e.is_assigned(approval.validator) => { - respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::NoAssignment( - approval.validator - ),)) - }, - _ => {}, + let candidate_hashes: Vec = + approved_candidates_info.iter().map(|candidate| candidate.1).collect(); + // Signature check: + match DisputeStatement::Valid( + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(candidate_hashes.clone()), + ) + .check_signature( + &pubkey, + if let Some(candidate_hash) = candidate_hashes.first() { + *candidate_hash + } else { + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::InvalidValidatorIndex( + approval.validator + ),)) + }, + block_entry.session(), + &approval.signature, + ) { + Err(_) => { + gum::error!( + target: LOG_TARGET, + "Error while checking signature {:}", + approval.candidate_indices.count_ones() + ); + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::InvalidSignature( + approval.validator + ),)) + }, + Ok(()) => {}, + }; } - // importing the approval can be heavy as it may trigger acceptance for a series of blocks. - let t = with_response(ApprovalCheckResult::Accepted); + let mut actions = Vec::new(); + for (approval_candidate_index, approved_candidate_hash) in approved_candidates_info { + let block_entry = match db.load_block_entry(&approval.block_hash)? { + Some(b) => b, + None => { + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::UnknownBlock( + approval.block_hash + ),)) + }, + }; - gum::trace!( - target: LOG_TARGET, - validator_index = approval.validator.0, - validator = ?pubkey, - candidate_hash = ?approved_candidate_hash, - para_id = ?candidate_entry.candidate_receipt().descriptor.para_id, - "Importing approval vote", - ); + let candidate_entry = match db.load_candidate_entry(&approved_candidate_hash)? { + Some(c) => c, + None => { + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::InvalidCandidate( + approval_candidate_index, + approved_candidate_hash + ),)) + }, + }; - let actions = advance_approval_state( - sender, - state, - db, - session_info_provider, - &metrics, - block_entry, - approved_candidate_hash, - candidate_entry, - ApprovalStateTransition::RemoteApproval(approval.validator), - ) - .await; + // Don't accept approvals until assignment. + match candidate_entry.approval_entry(&approval.block_hash) { + None => { + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::Internal( + approval.block_hash, + approved_candidate_hash + ),)) + }, + Some(e) if !e.is_assigned(approval.validator) => { + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::NoAssignment( + approval.validator + ),)) + }, + _ => {}, + } + + gum::debug!( + target: LOG_TARGET, + validator_index = approval.validator.0, + candidate_hash = ?approved_candidate_hash, + para_id = ?candidate_entry.candidate_receipt().descriptor.para_id, + "Importing approval vote", + ); + + let new_actions = advance_approval_state( + sender, + state, + db, + session_info_provider, + &metrics, + block_entry, + approved_candidate_hash, + candidate_entry, + ApprovalStateTransition::RemoteApproval(approval.validator), + ) + .await; + actions.extend(new_actions); + } + + // importing the approval can be heavy as it may trigger acceptance for a series of blocks. + let t = with_response(ApprovalCheckResult::Accepted); Ok((actions, t)) } @@ -2321,7 +2615,7 @@ where #[derive(Debug)] enum ApprovalStateTransition { RemoteApproval(ValidatorIndex), - LocalApproval(ValidatorIndex, ValidatorSignature), + LocalApproval(ValidatorIndex), WakeupProcessed, } @@ -2329,7 +2623,7 @@ impl ApprovalStateTransition { fn validator_index(&self) -> Option { match *self { ApprovalStateTransition::RemoteApproval(v) | - ApprovalStateTransition::LocalApproval(v, _) => Some(v), + ApprovalStateTransition::LocalApproval(v) => Some(v), ApprovalStateTransition::WakeupProcessed => None, } } @@ -2337,7 +2631,7 @@ impl ApprovalStateTransition { fn is_local_approval(&self) -> bool { match *self { ApprovalStateTransition::RemoteApproval(_) => false, - ApprovalStateTransition::LocalApproval(_, _) => true, + ApprovalStateTransition::LocalApproval(_) => true, ApprovalStateTransition::WakeupProcessed => false, } } @@ -2404,7 +2698,16 @@ where // assignment tick of `now - APPROVAL_DELAY` - that is, that // all counted assignments are at least `APPROVAL_DELAY` ticks old. let is_approved = check.is_approved(tick_now.saturating_sub(APPROVAL_DELAY)); - + if status.last_no_shows != 0 { + metrics.on_observed_no_shows(status.last_no_shows); + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?block_hash, + last_no_shows = ?status.last_no_shows, + "Observed no_shows", + ); + } if is_approved { gum::trace!( target: LOG_TARGET, @@ -2422,6 +2725,12 @@ where if no_shows != 0 { metrics.on_no_shows(no_shows); } + if check == Check::ApprovedOneThird { + // No-shows are not counted when more than one third of validators approve a + // candidate, so count candidates where more than one third of validators had to + // approve it, this is indicative of something breaking. + metrics.on_approved_by_one_third() + } metrics.on_candidate_approved(status.tranche_now as _); @@ -2430,6 +2739,10 @@ where actions.push(Action::NoteApprovedInChainSelection(block_hash)); } + db.write_block_entry(block_entry.into()); + } else if transition.is_local_approval() { + // Local approvals always update the block_entry, so we need to flush it to + // the database. db.write_block_entry(block_entry.into()); } @@ -2458,10 +2771,6 @@ where approval_entry.mark_approved(); } - if let ApprovalStateTransition::LocalApproval(_, ref sig) = transition { - approval_entry.import_approval_sig(sig.clone()); - } - actions.extend(schedule_wakeup_action( &approval_entry, block_hash, @@ -2599,7 +2908,7 @@ async fn process_wakeup( let should_trigger = should_trigger_assignment( &approval_entry, &candidate_entry, - tranches_to_approve, + tranches_to_approve.required_tranches, tranche_now, ); @@ -2867,7 +3176,7 @@ async fn launch_approval( candidate_receipt: candidate.clone(), pov: available_data.pov, executor_params, - exec_timeout_kind: PvfExecTimeoutKind::Approval, + exec_kind: PvfExecKind::Approval, response_sender: val_tx, }) .await; @@ -2924,11 +3233,12 @@ async fn launch_approval( #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] async fn issue_approval( ctx: &mut Context, - state: &State, + state: &mut State, db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, metrics: &Metrics, candidate_hash: CandidateHash, + delayed_approvals_timers: &mut DelayedApprovalTimer, ApprovalVoteRequest { validator_index, block_hash }: ApprovalVoteRequest, ) -> SubsystemResult> { let mut issue_approval_span = state @@ -2942,7 +3252,7 @@ async fn issue_approval( .with_validator_index(validator_index) .with_stage(jaeger::Stage::ApprovalChecking); - let block_entry = match db.load_block_entry(&block_hash)? { + let mut block_entry = match db.load_block_entry(&block_hash)? { Some(b) => b, None => { // not a cause for alarm - just lost a race with pruning, most likely. @@ -2968,21 +3278,6 @@ async fn issue_approval( }; issue_approval_span.add_int_tag("candidate_index", candidate_index as i64); - let session_info = match get_session_info( - session_info_provider, - ctx.sender(), - block_entry.parent_hash(), - block_entry.session(), - ) - .await - { - Some(s) => s, - None => { - metrics.on_approval_error(); - return Ok(Vec::new()) - }, - }; - let candidate_hash = match block_entry.candidate(candidate_index as usize) { Some((_, h)) => *h, None => { @@ -3013,10 +3308,149 @@ async fn issue_approval( }, }; + let session_info = match get_session_info( + session_info_provider, + ctx.sender(), + block_entry.parent_hash(), + block_entry.session(), + ) + .await + { + Some(s) => s, + None => return Ok(Vec::new()), + }; + + if block_entry + .defer_candidate_signature( + candidate_index as _, + candidate_hash, + compute_delayed_approval_sending_tick( + state, + &block_entry, + &candidate_entry, + session_info, + &metrics, + ), + ) + .is_some() + { + gum::error!( + target: LOG_TARGET, + ?candidate_hash, + ?block_hash, + validator_index = validator_index.0, + "Possible bug, we shouldn't have to defer a candidate more than once", + ); + } + + gum::info!( + target: LOG_TARGET, + ?candidate_hash, + ?block_hash, + validator_index = validator_index.0, + "Ready to issue approval vote", + ); + + let actions = advance_approval_state( + ctx.sender(), + state, + db, + session_info_provider, + metrics, + block_entry, + candidate_hash, + candidate_entry, + ApprovalStateTransition::LocalApproval(validator_index as _), + ) + .await; + + if let Some(next_wakeup) = maybe_create_signature( + db, + session_info_provider, + state, + ctx, + block_hash, + validator_index, + metrics, + ) + .await? + { + delayed_approvals_timers.maybe_arm_timer( + next_wakeup, + state.clock.as_ref(), + block_hash, + validator_index, + ); + } + Ok(actions) +} + +// Create signature for the approved candidates pending signatures +#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] +async fn maybe_create_signature( + db: &mut OverlayedBackend<'_, impl Backend>, + session_info_provider: &mut RuntimeInfo, + state: &State, + ctx: &mut Context, + block_hash: Hash, + validator_index: ValidatorIndex, + metrics: &Metrics, +) -> SubsystemResult> { + let mut block_entry = match db.load_block_entry(&block_hash)? { + Some(b) => b, + None => { + // not a cause for alarm - just lost a race with pruning, most likely. + metrics.on_approval_stale(); + gum::debug!( + target: LOG_TARGET, + "Could not find block that needs signature {:}", block_hash + ); + return Ok(None) + }, + }; + + let approval_params = state + .get_approval_voting_params_or_default(ctx, block_entry.session(), block_hash) + .await + .unwrap_or_default(); + + gum::trace!( + target: LOG_TARGET, + "Candidates pending signatures {:}", block_entry.num_candidates_pending_signature() + ); + let tick_now = state.clock.tick_now(); + + let (candidates_to_sign, sign_no_later_then) = block_entry + .get_candidates_that_need_signature(tick_now, approval_params.max_approval_coalesce_count); + + let (candidates_hashes, candidates_indices) = match candidates_to_sign { + Some(candidates_to_sign) => candidates_to_sign, + None => return Ok(sign_no_later_then), + }; + + let session_info = match get_session_info( + session_info_provider, + ctx.sender(), + block_entry.parent_hash(), + block_entry.session(), + ) + .await + { + Some(s) => s, + None => { + metrics.on_approval_error(); + gum::error!( + target: LOG_TARGET, + "Could not retrieve the session" + ); + return Ok(None) + }, + }; + let validator_pubkey = match session_info.validators.get(validator_index) { Some(p) => p, None => { - gum::warn!( + gum::error!( target: LOG_TARGET, "Validator index {} out of bounds in session {}", validator_index.0, @@ -3024,72 +3458,89 @@ async fn issue_approval( ); metrics.on_approval_error(); - return Ok(Vec::new()) + return Ok(None) }, }; - let session = block_entry.session(); - let sig = match sign_approval(&state.keystore, &validator_pubkey, candidate_hash, session) { + let signature = match sign_approval( + &state.keystore, + &validator_pubkey, + &candidates_hashes, + block_entry.session(), + ) { Some(sig) => sig, None => { - gum::warn!( + gum::error!( target: LOG_TARGET, validator_index = ?validator_index, - session, + session = ?block_entry.session(), "Could not issue approval signature. Assignment key present but not validator key?", ); metrics.on_approval_error(); - return Ok(Vec::new()) + return Ok(None) }, }; + metrics.on_approval_coalesce(candidates_hashes.len() as u32); - gum::trace!( - target: LOG_TARGET, - ?candidate_hash, - ?block_hash, - validator_index = validator_index.0, - "Issuing approval vote", - ); + let candidate_entries = candidates_hashes + .iter() + .map(|candidate_hash| db.load_candidate_entry(candidate_hash)) + .collect::>>>()?; - let actions = advance_approval_state( - ctx.sender(), - state, - db, - session_info_provider, - metrics, - block_entry, - candidate_hash, - candidate_entry, - ApprovalStateTransition::LocalApproval(validator_index as _, sig.clone()), - ) - .await; + for mut candidate_entry in candidate_entries { + let approval_entry = candidate_entry.as_mut().and_then(|candidate_entry| { + candidate_entry.approval_entry_mut(&block_entry.block_hash()) + }); + + match approval_entry { + Some(approval_entry) => approval_entry.import_approval_sig(OurApproval { + signature: signature.clone(), + signed_candidates_indices: candidates_indices.clone(), + }), + None => { + gum::error!( + target: LOG_TARGET, + candidate_entry = ?candidate_entry, + "Candidate scheduled for signing approval entry should not be None" + ); + }, + }; + candidate_entry.map(|candidate_entry| db.write_candidate_entry(candidate_entry)); + } metrics.on_approval_produced(); - // dispatch to approval distribution. ctx.send_unbounded_message(ApprovalDistributionMessage::DistributeApproval( - IndirectSignedApprovalVote { - block_hash, - candidate_index: candidate_index as _, + IndirectSignedApprovalVoteV2 { + block_hash: block_entry.block_hash(), + candidate_indices: candidates_indices, validator: validator_index, - signature: sig, + signature, }, )); - Ok(actions) + gum::trace!( + target: LOG_TARGET, + ?block_hash, + signed_candidates = ?block_entry.num_candidates_pending_signature(), + "Issue approval votes", + ); + block_entry.issued_approval(); + db.write_block_entry(block_entry.into()); + Ok(None) } // Sign an approval vote. Fails if the key isn't present in the store. fn sign_approval( keystore: &LocalKeystore, public: &ValidatorId, - candidate_hash: CandidateHash, + candidate_hashes: &[CandidateHash], session_index: SessionIndex, ) -> Option { let key = keystore.key_pair::(public).ok().flatten()?; - let payload = ApprovalVote(candidate_hash).signing_payload(session_index); + let payload = ApprovalVoteMultipleCandidates(candidate_hashes).signing_payload(session_index); Some(key.sign(&payload[..])) } @@ -3119,3 +3570,38 @@ fn issue_local_invalid_statement( false, )); } + +// Computes what is the latest tick we can send an approval +fn compute_delayed_approval_sending_tick( + state: &State, + block_entry: &BlockEntry, + candidate_entry: &CandidateEntry, + session_info: &SessionInfo, + metrics: &Metrics, +) -> Tick { + let current_block_tick = slot_number_to_tick(state.slot_duration_millis, block_entry.slot()); + let assignment_tranche = candidate_entry + .approval_entry(&block_entry.block_hash()) + .and_then(|approval_entry| approval_entry.our_assignment()) + .map(|our_assignment| our_assignment.tranche()) + .unwrap_or_default(); + + let assignment_triggered_tick = current_block_tick + assignment_tranche as Tick; + + let no_show_duration_ticks = slot_number_to_tick( + state.slot_duration_millis, + Slot::from(u64::from(session_info.no_show_slots)), + ); + let tick_now = state.clock.tick_now(); + + let sign_no_later_than = min( + tick_now + MAX_APPROVAL_COALESCE_WAIT_TICKS as Tick, + // We don't want to accidentally cause no-shows, so if we are past + // the second half of the no show time, force the sending of the + // approval immediately. + assignment_triggered_tick + no_show_duration_ticks / 2, + ); + + metrics.on_delayed_approval(sign_no_later_than.checked_sub(tick_now).unwrap_or_default()); + sign_no_later_than +} diff --git a/polkadot/node/core/approval-voting/src/ops.rs b/polkadot/node/core/approval-voting/src/ops.rs index a6f0ecf9d1f027ee4f4ef1d0d5480fe26f175607..2a8fdba5aa3642f5b702e72bc2641d58106faa7a 100644 --- a/polkadot/node/core/approval-voting/src/ops.rs +++ b/polkadot/node/core/approval-voting/src/ops.rs @@ -25,7 +25,7 @@ use polkadot_primitives::{BlockNumber, CandidateHash, CandidateReceipt, GroupInd use std::collections::{hash_map::Entry, BTreeMap, HashMap}; use super::{ - approval_db::v2::{OurAssignment, StoredBlockRange}, + approval_db::{common::StoredBlockRange, v2::OurAssignment}, backend::{Backend, OverlayedBackend}, persisted_entries::{ApprovalEntry, BlockEntry, CandidateEntry}, LOG_TARGET, diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index 9cfe1c4cf8da9bb62ec16822ef1bb419f71eb96f..ef47bdb2213a153dc7223c1018ba8ec9b341a5aa 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -20,13 +20,14 @@ //! Within that context, things are plain-old-data. Within this module, //! data and logic are intertwined. +use itertools::Itertools; use polkadot_node_primitives::approval::{ v1::{DelayTranche, RelayVRFStory}, v2::{AssignmentCertV2, CandidateBitfield}, }; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, - ValidatorIndex, ValidatorSignature, + BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, GroupIndex, Hash, + SessionIndex, ValidatorIndex, ValidatorSignature, }; use sp_consensus_slots::Slot; @@ -76,6 +77,45 @@ impl From for crate::approval_db::v2::TrancheEntry { } } +impl From for OurApproval { + fn from(approval: crate::approval_db::v3::OurApproval) -> Self { + Self { + signature: approval.signature, + signed_candidates_indices: approval.signed_candidates_indices, + } + } +} +impl From for crate::approval_db::v3::OurApproval { + fn from(approval: OurApproval) -> Self { + Self { + signature: approval.signature, + signed_candidates_indices: approval.signed_candidates_indices, + } + } +} + +/// Metadata about our approval signature +#[derive(Debug, Clone, PartialEq)] +pub struct OurApproval { + /// The signature for the candidates hashes pointed by indices. + pub signature: ValidatorSignature, + /// The indices of the candidates signed in this approval. + pub signed_candidates_indices: CandidateBitfield, +} + +impl OurApproval { + /// Converts a ValidatorSignature to an OurApproval. + /// It used in converting the database from v1 to latest. + pub fn from_v1(value: ValidatorSignature, candidate_index: CandidateIndex) -> Self { + Self { signature: value, signed_candidates_indices: candidate_index.into() } + } + + /// Converts a ValidatorSignature to an OurApproval. + /// It used in converting the database from v2 to latest. + pub fn from_v2(value: ValidatorSignature, candidate_index: CandidateIndex) -> Self { + Self::from_v1(value, candidate_index) + } +} /// Metadata regarding approval of a particular candidate within the context of some /// particular block. #[derive(Debug, Clone, PartialEq)] @@ -83,7 +123,7 @@ pub struct ApprovalEntry { tranches: Vec, backing_group: GroupIndex, our_assignment: Option, - our_approval_sig: Option, + our_approval_sig: Option, // `n_validators` bits. assigned_validators: Bitfield, approved: bool, @@ -95,7 +135,7 @@ impl ApprovalEntry { tranches: Vec, backing_group: GroupIndex, our_assignment: Option, - our_approval_sig: Option, + our_approval_sig: Option, // `n_validators` bits. assigned_validators: Bitfield, approved: bool, @@ -137,7 +177,7 @@ impl ApprovalEntry { } /// Import our local approval vote signature for this candidate. - pub fn import_approval_sig(&mut self, approval_sig: ValidatorSignature) { + pub fn import_approval_sig(&mut self, approval_sig: OurApproval) { self.our_approval_sig = Some(approval_sig); } @@ -224,7 +264,7 @@ impl ApprovalEntry { /// Get the assignment cert & approval signature. /// /// The approval signature will only be `Some` if the assignment is too. - pub fn local_statements(&self) -> (Option, Option) { + pub fn local_statements(&self) -> (Option, Option) { let approval_sig = self.our_approval_sig.clone(); if let Some(our_assignment) = self.our_assignment.as_ref().filter(|a| a.triggered()) { (Some(our_assignment.clone()), approval_sig) @@ -232,10 +272,44 @@ impl ApprovalEntry { (None, None) } } + + // Convert an ApprovalEntry from v1 version to latest version + pub fn from_v1( + value: crate::approval_db::v1::ApprovalEntry, + candidate_index: CandidateIndex, + ) -> Self { + ApprovalEntry { + tranches: value.tranches.into_iter().map(|tranche| tranche.into()).collect(), + backing_group: value.backing_group, + our_assignment: value.our_assignment.map(|assignment| assignment.into()), + our_approval_sig: value + .our_approval_sig + .map(|sig| OurApproval::from_v1(sig, candidate_index)), + assigned_validators: value.assignments, + approved: value.approved, + } + } + + // Convert an ApprovalEntry from v1 version to latest version + pub fn from_v2( + value: crate::approval_db::v2::ApprovalEntry, + candidate_index: CandidateIndex, + ) -> Self { + ApprovalEntry { + tranches: value.tranches.into_iter().map(|tranche| tranche.into()).collect(), + backing_group: value.backing_group, + our_assignment: value.our_assignment.map(|assignment| assignment.into()), + our_approval_sig: value + .our_approval_sig + .map(|sig| OurApproval::from_v2(sig, candidate_index)), + assigned_validators: value.assigned_validators, + approved: value.approved, + } + } } -impl From for ApprovalEntry { - fn from(entry: crate::approval_db::v2::ApprovalEntry) -> Self { +impl From for ApprovalEntry { + fn from(entry: crate::approval_db::v3::ApprovalEntry) -> Self { ApprovalEntry { tranches: entry.tranches.into_iter().map(Into::into).collect(), backing_group: entry.backing_group, @@ -247,7 +321,7 @@ impl From for ApprovalEntry { } } -impl From for crate::approval_db::v2::ApprovalEntry { +impl From for crate::approval_db::v3::ApprovalEntry { fn from(entry: ApprovalEntry) -> Self { Self { tranches: entry.tranches.into_iter().map(Into::into).collect(), @@ -303,10 +377,44 @@ impl CandidateEntry { pub fn approval_entry(&self, block_hash: &Hash) -> Option<&ApprovalEntry> { self.block_assignments.get(block_hash) } + + /// Convert a CandidateEntry from a v1 to its latest equivalent. + pub fn from_v1( + value: crate::approval_db::v1::CandidateEntry, + candidate_index: CandidateIndex, + ) -> Self { + Self { + approvals: value.approvals, + block_assignments: value + .block_assignments + .into_iter() + .map(|(h, ae)| (h, ApprovalEntry::from_v1(ae, candidate_index))) + .collect(), + candidate: value.candidate, + session: value.session, + } + } + + /// Convert a CandidateEntry from a v2 to its latest equivalent. + pub fn from_v2( + value: crate::approval_db::v2::CandidateEntry, + candidate_index: CandidateIndex, + ) -> Self { + Self { + approvals: value.approvals, + block_assignments: value + .block_assignments + .into_iter() + .map(|(h, ae)| (h, ApprovalEntry::from_v2(ae, candidate_index))) + .collect(), + candidate: value.candidate, + session: value.session, + } + } } -impl From for CandidateEntry { - fn from(entry: crate::approval_db::v2::CandidateEntry) -> Self { +impl From for CandidateEntry { + fn from(entry: crate::approval_db::v3::CandidateEntry) -> Self { CandidateEntry { candidate: entry.candidate, session: entry.session, @@ -320,7 +428,7 @@ impl From for CandidateEntry { } } -impl From for crate::approval_db::v2::CandidateEntry { +impl From for crate::approval_db::v3::CandidateEntry { fn from(entry: CandidateEntry) -> Self { Self { candidate: entry.candidate, @@ -353,12 +461,21 @@ pub struct BlockEntry { // block. The block can be considered approved if the bitfield has all bits set to `true`. pub approved_bitfield: Bitfield, pub children: Vec, + // A list of candidates we have checked, but didn't not sign and + // advertise the vote yet. + candidates_pending_signature: BTreeMap, // A list of assignments for which we already distributed the assignment. // We use this to ensure we don't distribute multiple core assignments twice as we track // individual wakeups for each core. distributed_assignments: Bitfield, } +#[derive(Debug, Clone, PartialEq)] +pub struct CandidateSigningContext { + pub candidate_hash: CandidateHash, + pub sign_no_later_than_tick: Tick, +} + impl BlockEntry { /// Mark a candidate as fully approved in the bitfield. pub fn mark_approved_by_hash(&mut self, candidate_hash: &CandidateHash) { @@ -447,10 +564,97 @@ impl BlockEntry { distributed } + + /// Defer signing and issuing an approval for a candidate no later than the specified tick + pub fn defer_candidate_signature( + &mut self, + candidate_index: CandidateIndex, + candidate_hash: CandidateHash, + sign_no_later_than_tick: Tick, + ) -> Option { + self.candidates_pending_signature.insert( + candidate_index, + CandidateSigningContext { candidate_hash, sign_no_later_than_tick }, + ) + } + + /// Returns the number of candidates waiting for an approval to be issued. + pub fn num_candidates_pending_signature(&self) -> usize { + self.candidates_pending_signature.len() + } + + /// Return if we have candidates waiting for signature to be issued + pub fn has_candidates_pending_signature(&self) -> bool { + !self.candidates_pending_signature.is_empty() + } + + /// Candidate hashes for candidates pending signatures + fn candidate_hashes_pending_signature(&self) -> Vec { + self.candidates_pending_signature + .values() + .map(|unsigned_approval| unsigned_approval.candidate_hash) + .collect() + } + + /// Candidate indices for candidates pending signature + fn candidate_indices_pending_signature(&self) -> Option { + self.candidates_pending_signature + .keys() + .map(|val| *val) + .collect_vec() + .try_into() + .ok() + } + + /// Returns a list of candidates hashes that need need signature created at the current tick: + /// This might happen in other of the two reasons: + /// 1. We queued more than max_approval_coalesce_count candidates. + /// 2. We have candidates that waiting in the queue past their `sign_no_later_than_tick` + /// + /// Additionally, we also return the first tick when we will have to create a signature, + /// so that the caller can arm the timer if it is not already armed. + pub fn get_candidates_that_need_signature( + &self, + tick_now: Tick, + max_approval_coalesce_count: u32, + ) -> (Option<(Vec, CandidateBitfield)>, Option) { + let sign_no_later_than_tick = self + .candidates_pending_signature + .values() + .min_by(|a, b| a.sign_no_later_than_tick.cmp(&b.sign_no_later_than_tick)) + .map(|val| val.sign_no_later_than_tick); + + if let Some(sign_no_later_than_tick) = sign_no_later_than_tick { + if sign_no_later_than_tick <= tick_now || + self.num_candidates_pending_signature() >= max_approval_coalesce_count as usize + { + ( + self.candidate_indices_pending_signature().and_then(|candidate_indices| { + Some((self.candidate_hashes_pending_signature(), candidate_indices)) + }), + Some(sign_no_later_than_tick), + ) + } else { + // We can still wait for other candidates to queue in, so just make sure + // we wake up at the tick we have to sign the longest waiting candidate. + (Default::default(), Some(sign_no_later_than_tick)) + } + } else { + // No cached candidates, nothing to do here, this just means the timer fired, + // but the signatures were already sent because we gathered more than + // max_approval_coalesce_count. + (Default::default(), sign_no_later_than_tick) + } + } + + /// Clears the candidates pending signature because the approval was issued. + pub fn issued_approval(&mut self) { + self.candidates_pending_signature.clear(); + } } -impl From for BlockEntry { - fn from(entry: crate::approval_db::v2::BlockEntry) -> Self { +impl From for BlockEntry { + fn from(entry: crate::approval_db::v3::BlockEntry) -> Self { BlockEntry { block_hash: entry.block_hash, parent_hash: entry.parent_hash, @@ -461,6 +665,11 @@ impl From for BlockEntry { candidates: entry.candidates, approved_bitfield: entry.approved_bitfield, children: entry.children, + candidates_pending_signature: entry + .candidates_pending_signature + .into_iter() + .map(|(candidate_index, signing_context)| (candidate_index, signing_context.into())) + .collect(), distributed_assignments: entry.distributed_assignments, } } @@ -479,11 +688,30 @@ impl From for BlockEntry { approved_bitfield: entry.approved_bitfield, children: entry.children, distributed_assignments: Default::default(), + candidates_pending_signature: Default::default(), } } } -impl From for crate::approval_db::v2::BlockEntry { +impl From for BlockEntry { + fn from(entry: crate::approval_db::v2::BlockEntry) -> Self { + BlockEntry { + block_hash: entry.block_hash, + parent_hash: entry.parent_hash, + block_number: entry.block_number, + session: entry.session, + slot: entry.slot, + relay_vrf_story: RelayVRFStory(entry.relay_vrf_story), + candidates: entry.candidates, + approved_bitfield: entry.approved_bitfield, + children: entry.children, + distributed_assignments: entry.distributed_assignments, + candidates_pending_signature: Default::default(), + } + } +} + +impl From for crate::approval_db::v3::BlockEntry { fn from(entry: BlockEntry) -> Self { Self { block_hash: entry.block_hash, @@ -495,36 +723,30 @@ impl From for crate::approval_db::v2::BlockEntry { candidates: entry.candidates, approved_bitfield: entry.approved_bitfield, children: entry.children, + candidates_pending_signature: entry + .candidates_pending_signature + .into_iter() + .map(|(candidate_index, signing_context)| (candidate_index, signing_context.into())) + .collect(), distributed_assignments: entry.distributed_assignments, } } } -/// Migration helpers. -impl From for CandidateEntry { - fn from(value: crate::approval_db::v1::CandidateEntry) -> Self { +impl From for CandidateSigningContext { + fn from(signing_context: crate::approval_db::v3::CandidateSigningContext) -> Self { Self { - approvals: value.approvals, - block_assignments: value - .block_assignments - .into_iter() - .map(|(h, ae)| (h, ae.into())) - .collect(), - candidate: value.candidate, - session: value.session, + candidate_hash: signing_context.candidate_hash, + sign_no_later_than_tick: signing_context.sign_no_later_than_tick.into(), } } } -impl From for ApprovalEntry { - fn from(value: crate::approval_db::v1::ApprovalEntry) -> Self { - ApprovalEntry { - tranches: value.tranches.into_iter().map(|tranche| tranche.into()).collect(), - backing_group: value.backing_group, - our_assignment: value.our_assignment.map(|assignment| assignment.into()), - our_approval_sig: value.our_approval_sig, - assigned_validators: value.assignments, - approved: value.approved, +impl From for crate::approval_db::v3::CandidateSigningContext { + fn from(signing_context: CandidateSigningContext) -> Self { + Self { + candidate_hash: signing_context.candidate_hash, + sign_no_later_than_tick: signing_context.sign_no_later_than_tick.into(), } } } diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 0c0dcfde9b6660e181646e6689e1ab26735554d9..7a0bde6a55e28135036dbc9b8ee8137a0485aa28 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -20,7 +20,7 @@ use crate::backend::V1ReadBackend; use polkadot_node_primitives::{ approval::{ v1::{ - AssignmentCert, AssignmentCertKind, DelayTranche, VrfOutput, VrfProof, VrfSignature, + AssignmentCert, AssignmentCertKind, DelayTranche, VrfPreOutput, VrfProof, VrfSignature, RELAY_VRF_MODULO_CONTEXT, }, v2::{AssignmentCertKindV2, AssignmentCertV2}, @@ -37,8 +37,8 @@ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_overseer::HeadSupportsParachains; use polkadot_primitives::{ - CandidateCommitments, CandidateEvent, CoreIndex, GroupIndex, Header, Id as ParaId, IndexedVec, - ValidationCode, ValidatorSignature, + vstaging::NodeFeatures, ApprovalVote, CandidateCommitments, CandidateEvent, CoreIndex, + GroupIndex, Header, Id as ParaId, IndexedVec, ValidationCode, ValidatorSignature, }; use std::time::Duration; @@ -56,7 +56,7 @@ use std::{ }; use super::{ - approval_db::v2::StoredBlockRange, + approval_db::common::StoredBlockRange, backend::BackendWriteOp, import::tests::{ garbage_vrf_signature, AllowedSlots, BabeEpoch, BabeEpochConfiguration, @@ -116,7 +116,7 @@ fn make_sync_oracle(val: bool) -> (Box, TestSyncOracleHan #[cfg(test)] pub mod test_constants { - use crate::approval_db::v2::Config as DatabaseConfig; + use crate::approval_db::common::Config as DatabaseConfig; const DATA_COL: u32 = 0; pub(crate) const NUM_COLUMNS: u32 = 1; @@ -243,6 +243,7 @@ where polkadot_primitives::CoreIndex, polkadot_primitives::GroupIndex, )>, + _enable_assignments_v2: bool, ) -> HashMap { self.0() } @@ -280,6 +281,7 @@ impl V1ReadBackend for TestStoreInner { fn load_candidate_entry_v1( &self, candidate_hash: &CandidateHash, + _candidate_index: CandidateIndex, ) -> SubsystemResult> { self.load_candidate_entry(candidate_hash) } @@ -363,6 +365,7 @@ impl V1ReadBackend for TestStore { fn load_candidate_entry_v1( &self, candidate_hash: &CandidateHash, + _candidate_index: CandidateIndex, ) -> SubsystemResult> { self.load_candidate_entry(candidate_hash) } @@ -415,9 +418,12 @@ fn garbage_assignment_cert(kind: AssignmentCertKind) -> AssignmentCert { let mut prng = rand_core::OsRng; let keypair = schnorrkel::Keypair::generate_with(&mut prng); let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); - let out = inout.to_output(); + let preout = inout.to_preout(); - AssignmentCert { kind, vrf: VrfSignature { output: VrfOutput(out), proof: VrfProof(proof) } } + AssignmentCert { + kind, + vrf: VrfSignature { pre_output: VrfPreOutput(preout), proof: VrfProof(proof) }, + } } fn garbage_assignment_cert_v2(kind: AssignmentCertKindV2) -> AssignmentCertV2 { @@ -426,9 +432,12 @@ fn garbage_assignment_cert_v2(kind: AssignmentCertKindV2) -> AssignmentCertV2 { let mut prng = rand_core::OsRng; let keypair = schnorrkel::Keypair::generate_with(&mut prng); let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); - let out = inout.to_output(); + let preout = inout.to_preout(); - AssignmentCertV2 { kind, vrf: VrfSignature { output: VrfOutput(out), proof: VrfProof(proof) } } + AssignmentCertV2 { + kind, + vrf: VrfSignature { pre_output: VrfPreOutput(preout), proof: VrfProof(proof) }, + } } fn sign_approval( @@ -439,6 +448,15 @@ fn sign_approval( key.sign(&ApprovalVote(candidate_hash).signing_payload(session_index)).into() } +fn sign_approval_multiple_candidates( + key: Sr25519Keyring, + candidate_hashes: Vec, + session_index: SessionIndex, +) -> ValidatorSignature { + key.sign(&ApprovalVoteMultipleCandidates(&candidate_hashes).signing_payload(session_index)) + .into() +} + type VirtualOverseer = test_helpers::TestSubsystemContextHandle; #[derive(Default)] @@ -634,7 +652,12 @@ async fn check_and_import_approval( overseer, FromOrchestra::Communication { msg: ApprovalVotingMessage::CheckAndImportApproval( - IndirectSignedApprovalVote { block_hash, candidate_index, validator, signature }, + IndirectSignedApprovalVoteV2 { + block_hash, + candidate_indices: candidate_index.into(), + validator, + signature, + }, tx, ), }, @@ -997,6 +1020,15 @@ async fn import_block( si_tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); } ); + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); } assert_matches!( @@ -1998,6 +2030,91 @@ fn forkful_import_at_same_height_act_on_leaf() { }); } +#[test] +fn test_signing_a_single_candidate_is_backwards_compatible() { + let session_index = 1; + let block_hash = Hash::repeat_byte(0x01); + let candidate_descriptors = (1..10) + .into_iter() + .map(|val| make_candidate(ParaId::from(val as u32), &block_hash)) + .collect::>(); + + let candidate_hashes = candidate_descriptors + .iter() + .map(|candidate_descriptor| candidate_descriptor.hash()) + .collect_vec(); + + let first_descriptor = candidate_descriptors.first().unwrap(); + + let candidate_hash = first_descriptor.hash(); + + let sig_a = sign_approval(Sr25519Keyring::Alice, candidate_hash, session_index); + + let sig_b = sign_approval(Sr25519Keyring::Alice, candidate_hash, session_index); + + assert!(DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) + .check_signature( + &Sr25519Keyring::Alice.public().into(), + candidate_hash, + session_index, + &sig_a, + ) + .is_ok()); + + assert!(DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) + .check_signature( + &Sr25519Keyring::Alice.public().into(), + candidate_hash, + session_index, + &sig_b, + ) + .is_ok()); + + let sig_c = sign_approval_multiple_candidates( + Sr25519Keyring::Alice, + vec![candidate_hash], + session_index, + ); + + assert!(DisputeStatement::Valid( + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(vec![candidate_hash]) + ) + .check_signature(&Sr25519Keyring::Alice.public().into(), candidate_hash, session_index, &sig_c,) + .is_ok()); + + assert!(DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) + .check_signature( + &Sr25519Keyring::Alice.public().into(), + candidate_hash, + session_index, + &sig_c, + ) + .is_ok()); + + assert!(DisputeStatement::Valid( + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(vec![candidate_hash]) + ) + .check_signature(&Sr25519Keyring::Alice.public().into(), candidate_hash, session_index, &sig_a,) + .is_ok()); + + let sig_all = sign_approval_multiple_candidates( + Sr25519Keyring::Alice, + candidate_hashes.clone(), + session_index, + ); + + assert!(DisputeStatement::Valid( + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(candidate_hashes.clone()) + ) + .check_signature( + &Sr25519Keyring::Alice.public().into(), + *candidate_hashes.first().expect("test"), + session_index, + &sig_all, + ) + .is_ok()); +} + #[test] fn import_checked_approval_updates_entries_and_schedules() { let config = HarnessConfig::default(); @@ -2705,20 +2822,38 @@ async fn handle_double_assignment_import( assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { - exec_timeout_kind, + exec_kind, response_sender, .. - }) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + }) if exec_kind == PvfExecKind::Approval => { response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) .unwrap(); } ); + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) ); + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) @@ -3453,3 +3588,455 @@ fn waits_until_approving_assignments_are_old_enough() { virtual_overseer }); } + +#[test] +fn test_approval_is_sent_on_max_approval_coalesce_count() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let assignments_cert = + garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: assignments_cert.clone(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(1), + approval_db::v2::OurAssignment { + cert: assignments_cert.clone(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle: _sync_oracle_handle } = + test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + + let candidate_commitments = CandidateCommitments::default(); + + let candidate_receipt1 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(1_u32); + receipt.commitments_hash = candidate_commitments.hash(); + receipt + }; + + let candidate_hash1 = candidate_receipt1.hash(); + + let candidate_receipt2 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(2_u32); + receipt.commitments_hash = candidate_commitments.hash(); + receipt + }; + + let slot = Slot::from(1); + let candidate_index1 = 0; + let candidate_index2 = 1; + + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + let session_info = SessionInfo { + validator_groups: IndexedVec::>::from(vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3), ValidatorIndex(4)], + ]), + ..session_info(&validators) + }; + + let candidates = Some(vec![ + (candidate_receipt1.clone(), CoreIndex(0), GroupIndex(0)), + (candidate_receipt2.clone(), CoreIndex(1), GroupIndex(1)), + ]); + ChainBuilder::new() + .add_block( + block_hash, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot, + candidates: candidates.clone(), + session_info: Some(session_info.clone()), + }, + ) + .build(&mut virtual_overseer) + .await; + + assert!(!clock.inner.lock().current_wakeup_is(1)); + clock.inner.lock().wakeup_all(1); + + assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); + clock.inner.lock().wakeup_all(slot_to_tick(slot)); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + let candidate_entry = store.load_candidate_entry(&candidate_hash1).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + handle_approval_on_max_coalesce_count( + &mut virtual_overseer, + vec![candidate_index1, candidate_index2], + ) + .await; + + virtual_overseer + }); +} + +async fn handle_approval_on_max_coalesce_count( + virtual_overseer: &mut VirtualOverseer, + candidate_indicies: Vec, +) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + c_indices, + )) => { + assert_eq!(TryInto::::try_into(candidate_indicies.clone()).unwrap(), c_indices); + } + ); + + for _ in &candidate_indicies { + recover_available_data(virtual_overseer).await; + fetch_validation_code(virtual_overseer).await; + } + + for _ in &candidate_indicies { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive{exec_kind, response_sender, ..}) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + } + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 2, + })); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 2, + })); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(vote)) => { + assert_eq!(TryInto::::try_into(candidate_indicies).unwrap(), vote.candidate_indices); + } + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); +} + +async fn handle_approval_on_max_wait_time( + virtual_overseer: &mut VirtualOverseer, + candidate_indicies: Vec, + clock: Box, +) { + const TICK_NOW_BEGIN: u64 = 1; + const MAX_COALESCE_COUNT: u32 = 3; + + clock.inner.lock().set_tick(TICK_NOW_BEGIN); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + c_indices, + )) => { + assert_eq!(TryInto::::try_into(candidate_indicies.clone()).unwrap(), c_indices); + } + ); + + for _ in &candidate_indicies { + recover_available_data(virtual_overseer).await; + fetch_validation_code(virtual_overseer).await; + } + + for _ in &candidate_indicies { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive{exec_kind, response_sender, ..}) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + } + + // First time we fetch the configuration when we are ready to approve the first candidate + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: MAX_COALESCE_COUNT, + })); + } + ); + + // Second time we fetch the configuration when we are ready to approve the second candidate + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: MAX_COALESCE_COUNT, + })); + } + ); + + assert!(overseer_recv(virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + // Move the clock just before we should send the approval + clock + .inner + .lock() + .set_tick(MAX_APPROVAL_COALESCE_WAIT_TICKS as Tick + TICK_NOW_BEGIN - 1); + + assert!(overseer_recv(virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + // Move the clock tick, so we can trigger a force sending of the approvals + clock + .inner + .lock() + .set_tick(MAX_APPROVAL_COALESCE_WAIT_TICKS as Tick + TICK_NOW_BEGIN); + + // Third time we fetch the configuration when timer expires and we are ready to sent the + // approval + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 3, + })); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(vote)) => { + assert_eq!(TryInto::::try_into(candidate_indicies).unwrap(), vote.candidate_indices); + } + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); +} + +#[test] +fn test_approval_is_sent_on_max_approval_coalesce_wait() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let assignments_cert = + garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: assignments_cert.clone(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(1), + approval_db::v2::OurAssignment { + cert: assignments_cert.clone(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle: _sync_oracle_handle } = + test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + + let candidate_commitments = CandidateCommitments::default(); + + let candidate_receipt1 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(1_u32); + receipt.commitments_hash = candidate_commitments.hash(); + receipt + }; + + let candidate_hash1 = candidate_receipt1.hash(); + + let candidate_receipt2 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(2_u32); + receipt.commitments_hash = candidate_commitments.hash(); + receipt + }; + + let slot = Slot::from(1); + let candidate_index1 = 0; + let candidate_index2 = 1; + + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + let session_info = SessionInfo { + validator_groups: IndexedVec::>::from(vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3), ValidatorIndex(4)], + ]), + ..session_info(&validators) + }; + + let candidates = Some(vec![ + (candidate_receipt1.clone(), CoreIndex(0), GroupIndex(0)), + (candidate_receipt2.clone(), CoreIndex(1), GroupIndex(1)), + ]); + ChainBuilder::new() + .add_block( + block_hash, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot, + candidates: candidates.clone(), + session_info: Some(session_info.clone()), + }, + ) + .build(&mut virtual_overseer) + .await; + + assert!(!clock.inner.lock().current_wakeup_is(1)); + clock.inner.lock().wakeup_all(1); + + assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); + clock.inner.lock().wakeup_all(slot_to_tick(slot)); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + let candidate_entry = store.load_candidate_entry(&candidate_hash1).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + handle_approval_on_max_wait_time( + &mut virtual_overseer, + vec![candidate_index1, candidate_index2], + clock, + ) + .await; + + virtual_overseer + }); +} diff --git a/polkadot/node/core/approval-voting/src/time.rs b/polkadot/node/core/approval-voting/src/time.rs index a45866402c827e7e91ad6fd44bc7561be35e5d30..61091f3c34cdab4aed00c24bcbc8a40d6a77a116 100644 --- a/polkadot/node/core/approval-voting/src/time.rs +++ b/polkadot/node/core/approval-voting/src/time.rs @@ -16,14 +16,23 @@ //! Time utilities for approval voting. -use futures::prelude::*; +use futures::{ + future::BoxFuture, + prelude::*, + stream::{FusedStream, FuturesUnordered}, + Stream, StreamExt, +}; + use polkadot_node_primitives::approval::v1::DelayTranche; use sp_consensus_slots::Slot; use std::{ + collections::HashSet, pin::Pin, + task::Poll, time::{Duration, SystemTime}, }; +use polkadot_primitives::{Hash, ValidatorIndex}; const TICK_DURATION_MILLIS: u64 = 500; /// A base unit of time, starting from the Unix epoch, split into half-second intervals. @@ -88,3 +97,157 @@ pub(crate) fn slot_number_to_tick(slot_duration_millis: u64, slot: Slot) -> Tick let ticks_per_slot = slot_duration_millis / TICK_DURATION_MILLIS; u64::from(slot) * ticks_per_slot } + +/// A list of delayed futures that gets triggered when the waiting time has expired and it is +/// time to sign the candidate. +/// We have a timer per relay-chain block. +#[derive(Default)] +pub struct DelayedApprovalTimer { + timers: FuturesUnordered>, + blocks: HashSet, +} + +impl DelayedApprovalTimer { + /// Starts a single timer per block hash + /// + /// Guarantees that if a timer already exits for the give block hash, + /// no additional timer is started. + pub(crate) fn maybe_arm_timer( + &mut self, + wait_untill: Tick, + clock: &dyn Clock, + block_hash: Hash, + validator_index: ValidatorIndex, + ) { + if self.blocks.insert(block_hash) { + let clock_wait = clock.wait(wait_untill); + self.timers.push(Box::pin(async move { + clock_wait.await; + (block_hash, validator_index) + })); + } + } +} + +impl Stream for DelayedApprovalTimer { + type Item = (Hash, ValidatorIndex); + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let poll_result = self.timers.poll_next_unpin(cx); + match poll_result { + Poll::Ready(Some(result)) => { + self.blocks.remove(&result.0); + Poll::Ready(Some(result)) + }, + _ => poll_result, + } + } +} + +impl FusedStream for DelayedApprovalTimer { + fn is_terminated(&self) -> bool { + self.timers.is_terminated() + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use futures::{executor::block_on, FutureExt, StreamExt}; + use futures_timer::Delay; + use polkadot_primitives::{Hash, ValidatorIndex}; + + use crate::time::{Clock, SystemClock}; + + use super::DelayedApprovalTimer; + + #[test] + fn test_select_empty_timer() { + block_on(async move { + let mut timer = DelayedApprovalTimer::default(); + + for _ in 1..10 { + let result = futures::select!( + _ = timer.select_next_some() => { + 0 + } + // Only this arm should fire + _ = Delay::new(Duration::from_millis(100)).fuse() => { + 1 + } + ); + + assert_eq!(result, 1); + } + }); + } + + #[test] + fn test_timer_functionality() { + block_on(async move { + let mut timer = DelayedApprovalTimer::default(); + let test_hashes = + vec![Hash::repeat_byte(0x01), Hash::repeat_byte(0x02), Hash::repeat_byte(0x03)]; + for (index, hash) in test_hashes.iter().enumerate() { + timer.maybe_arm_timer( + SystemClock.tick_now() + index as u64, + &SystemClock, + *hash, + ValidatorIndex::from(2), + ); + timer.maybe_arm_timer( + SystemClock.tick_now() + index as u64, + &SystemClock, + *hash, + ValidatorIndex::from(2), + ); + } + let timeout_hash = Hash::repeat_byte(0x02); + for i in 0..test_hashes.len() * 2 { + let result = futures::select!( + (hash, _) = timer.select_next_some() => { + hash + } + // Timers should fire only once, so for the rest of the iterations we should timeout through here. + _ = Delay::new(Duration::from_secs(2)).fuse() => { + timeout_hash + } + ); + assert_eq!(test_hashes.get(i).cloned().unwrap_or(timeout_hash), result); + } + + // Now check timer can be restarted if already fired + for (index, hash) in test_hashes.iter().enumerate() { + timer.maybe_arm_timer( + SystemClock.tick_now() + index as u64, + &SystemClock, + *hash, + ValidatorIndex::from(2), + ); + timer.maybe_arm_timer( + SystemClock.tick_now() + index as u64, + &SystemClock, + *hash, + ValidatorIndex::from(2), + ); + } + + for i in 0..test_hashes.len() * 2 { + let result = futures::select!( + (hash, _) = timer.select_next_some() => { + hash + } + // Timers should fire only once, so for the rest of the iterations we should timeout through here. + _ = Delay::new(Duration::from_secs(2)).fuse() => { + timeout_hash + } + ); + assert_eq!(test_hashes.get(i).cloned().unwrap_or(timeout_hash), result); + } + }); + } +} diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 3fa81d064a883608b556343b5c07f4e55af1b8c9..4b2baf3fc55421a0d3f07fc3996972e1bd7dc2ac 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] futures = "0.3.21" futures-timer = "3.0.2" diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index 7a6ce5de8cb18777c8832c561327aa15d127b9cc..16ed11e7eec9a2aa7a97ad9a55585ebbef95c98c 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true description = "The Candidate Backing Subsystem. Tracks parachain candidates that can be backed, as well as the issuance of statements about candidates." +[lints] +workspace = true + [dependencies] futures = "0.3.21" sp-keystore = { path = "../../../../substrate/primitives/keystore" } diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index a91eefe5e04f8790896dfe953481ab81597c1d09..434051f1b00f490504f1384cda6ecb03f4fc7703 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -106,7 +106,7 @@ use polkadot_node_subsystem_util::{ use polkadot_primitives::{ BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, Hash, Id as ParaId, - PersistedValidationData, PvfExecTimeoutKind, SigningContext, ValidationCode, ValidatorId, + PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, }; use sp_keystore::KeystorePtr; @@ -566,7 +566,7 @@ async fn request_candidate_validation( candidate_receipt, pov, executor_params, - exec_timeout_kind: PvfExecTimeoutKind::Backing, + exec_kind: PvfExecKind::Backing, response_sender: tx, }) .await; diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index caa85c12989c395fbdcd4af5253cbb15275debe9..c12be72556e36dfe62af71056bea379aa934de35 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -33,7 +33,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecTimeoutKind, + CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, }; use sp_application_crypto::AppCrypto; @@ -344,14 +344,14 @@ async fn assert_validate_from_exhaustive( validation_data, validation_code, candidate_receipt, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == *assert_pvd && validation_code == *assert_validation_code && *pov == *assert_pov && &candidate_receipt.descriptor == assert_candidate.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate_receipt.commitments_hash == assert_candidate.commitments.hash() => { response_sender.send(Ok(ValidationResult::Valid( @@ -550,14 +550,14 @@ fn backing_works() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_ab && validation_code == validation_code_ab && *pov == pov_ab && &candidate_receipt.descriptor == candidate_a.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate_receipt.commitments_hash == candidate_a_commitments_hash => { response_sender.send(Ok( @@ -729,14 +729,14 @@ fn backing_works_while_validation_ongoing() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_abc && validation_code == validation_code_abc && *pov == pov_abc && &candidate_receipt.descriptor == candidate_a.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate_a_commitments_hash == candidate_receipt.commitments_hash => { // we never validate the candidate. our local node @@ -890,14 +890,14 @@ fn backing_misbehavior_works() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_a && validation_code == validation_code_a && *pov == pov_a && &candidate_receipt.descriptor == candidate_a.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate_a_commitments_hash == candidate_receipt.commitments_hash => { response_sender.send(Ok( @@ -1057,14 +1057,14 @@ fn backing_dont_second_invalid() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_a && validation_code == validation_code_a && *pov == pov_block_a && &candidate_receipt.descriptor == candidate_a.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate_a.commitments.hash() == candidate_receipt.commitments_hash => { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); @@ -1097,14 +1097,14 @@ fn backing_dont_second_invalid() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_b && validation_code == validation_code_b && *pov == pov_block_b && &candidate_receipt.descriptor == candidate_b.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate_b.commitments.hash() == candidate_receipt.commitments_hash => { response_sender.send(Ok( @@ -1224,14 +1224,14 @@ fn backing_second_after_first_fails_works() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_a && validation_code == validation_code_a && *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash => { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); @@ -1368,14 +1368,14 @@ fn backing_works_after_failed_validation() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_a && validation_code == validation_code_a && *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash => { response_sender.send(Err(ValidationFailed("Internal test error".into()))).unwrap(); @@ -1634,13 +1634,13 @@ fn retry_works() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, .. }, ) if validation_data == pvd_a && validation_code == validation_code_a && *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash ); virtual_overseer diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index fc4bd7d98e7d229bc3738e60273a484349c2ee79..e7c29e11bb4702a1af46d414af0620af39910e1f 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -232,14 +232,14 @@ async fn assert_validate_seconded_candidate( validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }) if &validation_data == assert_pvd && &validation_code == assert_validation_code && &*pov == assert_pov && &candidate_receipt.descriptor == candidate.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash => { response_sender.send(Ok(ValidationResult::Valid( diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index 712a01b46b1cf9df13220accaad7ca1dde6cbee2..880273c0e7f3cc4a30d979edaad0d57dbb6ac523 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true description = "Bitfield signing subsystem for the Polkadot node" +[lints] +workspace = true + [dependencies] futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index a2e88778532f754cd429c5c156fe27cf5dd88d75..4f0ad67dbf1c84dd07d170c0ed00b7255ed275fa 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -6,8 +6,11 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.74" futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 89ea02728840eb540b60e0856cb93d16be403ff6..5c4e449b2c9025ec1b22ee4fda36331c245d3551 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -24,8 +24,8 @@ #![warn(missing_docs)] use polkadot_node_core_pvf::{ - InternalValidationError, InvalidCandidate as WasmInvalidCandidate, PrepareError, - PrepareJobKind, PrepareStats, PvfPrepData, ValidationError, ValidationHost, + InternalValidationError, InvalidCandidate as WasmInvalidCandidate, PossiblyInvalidError, + PrepareError, PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, }; use polkadot_node_primitives::{ BlockData, InvalidCandidate, PoV, ValidationResult, POV_BOMB_LIMIT, VALIDATION_CODE_BOMB_LIMIT, @@ -49,8 +49,8 @@ use polkadot_primitives::{ DEFAULT_LENIENT_PREPARATION_TIMEOUT, DEFAULT_PRECHECK_PREPARATION_TIMEOUT, }, CandidateCommitments, CandidateDescriptor, CandidateReceipt, ExecutorParams, Hash, - OccupiedCoreAssumption, PersistedValidationData, PvfExecTimeoutKind, PvfPrepTimeoutKind, - ValidationCode, ValidationCodeHash, + OccupiedCoreAssumption, PersistedValidationData, PvfExecKind, PvfPrepKind, ValidationCode, + ValidationCodeHash, }; use parity_scale_codec::Encode; @@ -73,12 +73,6 @@ mod tests; const LOG_TARGET: &'static str = "parachain::candidate-validation"; -/// The amount of time to wait before retrying after a retry-able backing validation error. We use a -/// lower value for the backing case, to fit within the lower backing timeout. -#[cfg(not(test))] -const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(500); -#[cfg(test)] -const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(200); /// The amount of time to wait before retrying after a retry-able approval validation error. We use /// a higher value for the approval case since we have more time, and if we wait longer it is more /// likely that transient conditions will resolve. @@ -94,6 +88,8 @@ pub struct Config { pub artifacts_cache_path: PathBuf, /// The version of the node. `None` can be passed to skip the version check (only for tests). pub node_version: Option, + /// Whether the node is attempting to run as a secure validator. + pub secure_validator_mode: bool, /// Path to the preparation worker binary pub prep_worker_path: PathBuf, /// Path to the execution worker binary @@ -139,18 +135,25 @@ async fn run( mut ctx: Context, metrics: Metrics, pvf_metrics: polkadot_node_core_pvf::Metrics, - Config { artifacts_cache_path, node_version, prep_worker_path, exec_worker_path }: Config, + Config { + artifacts_cache_path, + node_version, + secure_validator_mode, + prep_worker_path, + exec_worker_path, + }: Config, ) -> SubsystemResult<()> { let (validation_host, task) = polkadot_node_core_pvf::start( polkadot_node_core_pvf::Config::new( artifacts_cache_path, node_version, + secure_validator_mode, prep_worker_path, exec_worker_path, ), pvf_metrics, ) - .await; + .await?; ctx.spawn_blocking("pvf-validation-host", task.boxed())?; loop { @@ -163,7 +166,7 @@ async fn run( candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, .. } => { @@ -180,7 +183,7 @@ async fn run( candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, &metrics, ) .await; @@ -198,7 +201,7 @@ async fn run( candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, .. } => { @@ -215,7 +218,7 @@ async fn run( candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, &metrics, ) .await; @@ -357,7 +360,7 @@ where return PreCheckOutcome::Invalid }; - let timeout = pvf_prep_timeout(&executor_params, PvfPrepTimeoutKind::Precheck); + let timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Precheck); let pvf = match sp_maybe_compressed_blob::decompress( &validation_code.0, @@ -501,7 +504,7 @@ async fn validate_from_chain_state( candidate_receipt: CandidateReceipt, pov: Arc, executor_params: ExecutorParams, - exec_timeout_kind: PvfExecTimeoutKind, + exec_kind: PvfExecKind, metrics: &Metrics, ) -> Result where @@ -521,7 +524,7 @@ where candidate_receipt.clone(), pov, executor_params, - exec_timeout_kind, + exec_kind, metrics, ) .await; @@ -557,7 +560,7 @@ async fn validate_candidate_exhaustive( candidate_receipt: CandidateReceipt, pov: Arc, executor_params: ExecutorParams, - exec_timeout_kind: PvfExecTimeoutKind, + exec_kind: PvfExecKind, metrics: &Metrics, ) -> Result { let _timer = metrics.time_validate_candidate_exhaustive(); @@ -616,22 +619,39 @@ async fn validate_candidate_exhaustive( relay_parent_storage_root: persisted_validation_data.relay_parent_storage_root, }; - let result = validation_backend - .validate_candidate_with_retry( - raw_validation_code.to_vec(), - pvf_exec_timeout(&executor_params, exec_timeout_kind), - exec_timeout_kind, - params, - executor_params, - ) - .await; + let result = match exec_kind { + // Retry is disabled to reduce the chance of nondeterministic blocks getting backed and + // honest backers getting slashed. + PvfExecKind::Backing => { + let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare); + let exec_timeout = pvf_exec_timeout(&executor_params, exec_kind); + let pvf = PvfPrepData::from_code( + raw_validation_code.to_vec(), + executor_params, + prep_timeout, + PrepareJobKind::Compilation, + ); + + validation_backend.validate_candidate(pvf, exec_timeout, params.encode()).await + }, + PvfExecKind::Approval => + validation_backend + .validate_candidate_with_retry( + raw_validation_code.to_vec(), + pvf_exec_timeout(&executor_params, exec_kind), + params, + executor_params, + PVF_APPROVAL_EXECUTION_RETRY_DELAY, + ) + .await, + }; if let Err(ref error) = result { gum::info!(target: LOG_TARGET, ?para_id, ?error, "Failed to validate candidate"); } match result { - Err(ValidationError::InternalError(e)) => { + Err(ValidationError::Internal(e)) => { gum::warn!( target: LOG_TARGET, ?para_id, @@ -640,29 +660,29 @@ async fn validate_candidate_exhaustive( ); Err(ValidationFailed(e.to_string())) }, - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout)) => + Err(ValidationError::Invalid(WasmInvalidCandidate::HardTimeout)) => Ok(ValidationResult::Invalid(InvalidCandidate::Timeout)), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::WorkerReportedError(e))) => + Err(ValidationError::Invalid(WasmInvalidCandidate::WorkerReportedInvalid(e))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(e))), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)) => + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError( "ambiguous worker death".to_string(), ))), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic(err))) => + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(err))), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::PrepareError(e))) => { - // In principle if preparation of the `WASM` fails, the current candidate can not be the - // reason for that. So we can't say whether it is invalid or not. In addition, with - // pre-checking enabled only valid runtimes should ever get enacted, so we can be - // reasonably sure that this is some local problem on the current node. However, as this - // particular error *seems* to indicate a deterministic error, we raise a warning. + + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))) => + Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(format!( + "ambiguous job death: {err}" + )))), + Err(ValidationError::Preparation(e)) => { gum::warn!( target: LOG_TARGET, ?para_id, ?e, "Deterministic error occurred during preparation (should have been ruled out by pre-checking phase)", ); - Err(ValidationFailed(e)) + Err(ValidationFailed(e.to_string())) }, Ok(res) => if res.head_data.hash() != candidate_receipt.descriptor.para_head { @@ -704,8 +724,8 @@ trait ValidationBackend { encoded_params: Vec, ) -> Result; - /// Tries executing a PVF. Will retry once if an error is encountered that may have been - /// transient. + /// Tries executing a PVF for the approval subsystem. Will retry once if an error is encountered + /// that may have been transient. /// /// NOTE: Should retry only on errors that are a result of execution itself, and not of /// preparation. @@ -713,11 +733,11 @@ trait ValidationBackend { &mut self, raw_validation_code: Vec, exec_timeout: Duration, - exec_timeout_kind: PvfExecTimeoutKind, params: ValidationParams, executor_params: ExecutorParams, + retry_delay: Duration, ) -> Result { - let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepTimeoutKind::Lenient); + let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare); // Construct the PVF a single time, since it is an expensive operation. Cloning it is cheap. let pvf = PvfPrepData::from_code( raw_validation_code, @@ -735,15 +755,10 @@ trait ValidationBackend { return validation_result } - let retry_delay = match exec_timeout_kind { - PvfExecTimeoutKind::Backing => PVF_BACKING_EXECUTION_RETRY_DELAY, - PvfExecTimeoutKind::Approval => PVF_APPROVAL_EXECUTION_RETRY_DELAY, - }; - // Allow limited retries for each kind of error. + let mut num_death_retries_left = 1; + let mut num_job_error_retries_left = 1; let mut num_internal_retries_left = 1; - let mut num_awd_retries_left = 1; - let mut num_panic_retries_left = 1; loop { // Stop retrying if we exceeded the timeout. if total_time_start.elapsed() + retry_delay > exec_timeout { @@ -751,15 +766,31 @@ trait ValidationBackend { } match validation_result { - Err(ValidationError::InvalidCandidate( - WasmInvalidCandidate::AmbiguousWorkerDeath, - )) if num_awd_retries_left > 0 => num_awd_retries_left -= 1, - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic(_))) - if num_panic_retries_left > 0 => - num_panic_retries_left -= 1, - Err(ValidationError::InternalError(_)) if num_internal_retries_left > 0 => - num_internal_retries_left -= 1, - _ => break, + Err(ValidationError::PossiblyInvalid( + PossiblyInvalidError::AmbiguousWorkerDeath | + PossiblyInvalidError::AmbiguousJobDeath(_), + )) => + if num_death_retries_left > 0 { + num_death_retries_left -= 1; + } else { + break; + }, + + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(_))) => + if num_job_error_retries_left > 0 { + num_job_error_retries_left -= 1; + } else { + break; + }, + + Err(ValidationError::Internal(_)) => + if num_internal_retries_left > 0 { + num_internal_retries_left -= 1; + } else { + break; + }, + + Ok(_) | Err(ValidationError::Invalid(_) | ValidationError::Preparation(_)) => break, } // If we got a possibly transient error, retry once after a brief delay, on the @@ -788,7 +819,7 @@ trait ValidationBackend { validation_result } - async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result; + async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result<(), PrepareError>; } #[async_trait] @@ -818,7 +849,7 @@ impl ValidationBackend for ValidationHost { })? } - async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result { + async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result<(), PrepareError> { let (tx, rx) = oneshot::channel(); if let Err(err) = self.precheck_pvf(pvf, tx).await { // Return an IO error if there was an error communicating with the host. @@ -861,22 +892,41 @@ fn perform_basic_checks( Ok(()) } -fn pvf_prep_timeout(executor_params: &ExecutorParams, kind: PvfPrepTimeoutKind) -> Duration { +/// To determine the amount of timeout time for the pvf execution. +/// +/// Precheck +/// The time period after which the preparation worker is considered +/// unresponsive and will be killed. +/// +/// Prepare +///The time period after which the preparation worker is considered +/// unresponsive and will be killed. +fn pvf_prep_timeout(executor_params: &ExecutorParams, kind: PvfPrepKind) -> Duration { if let Some(timeout) = executor_params.pvf_prep_timeout(kind) { return timeout } match kind { - PvfPrepTimeoutKind::Precheck => DEFAULT_PRECHECK_PREPARATION_TIMEOUT, - PvfPrepTimeoutKind::Lenient => DEFAULT_LENIENT_PREPARATION_TIMEOUT, + PvfPrepKind::Precheck => DEFAULT_PRECHECK_PREPARATION_TIMEOUT, + PvfPrepKind::Prepare => DEFAULT_LENIENT_PREPARATION_TIMEOUT, } } -fn pvf_exec_timeout(executor_params: &ExecutorParams, kind: PvfExecTimeoutKind) -> Duration { +/// To determine the amount of timeout time for the pvf execution. +/// +/// Backing subsystem +/// The amount of time to spend on execution during backing. +/// +/// Approval subsystem +/// The amount of time to spend on execution during approval or disputes. +/// This should be much longer than the backing execution timeout to ensure that in the +/// absence of extremely large disparities between hardware, blocks that pass backing are +/// considered executable by approval checkers or dispute participants. +fn pvf_exec_timeout(executor_params: &ExecutorParams, kind: PvfExecKind) -> Duration { if let Some(timeout) = executor_params.pvf_exec_timeout(kind) { return timeout } match kind { - PvfExecTimeoutKind::Backing => DEFAULT_BACKING_EXECUTION_TIMEOUT, - PvfExecTimeoutKind::Approval => DEFAULT_APPROVAL_EXECUTION_TIMEOUT, + PvfExecKind::Backing => DEFAULT_BACKING_EXECUTION_TIMEOUT, + PvfExecKind::Approval => DEFAULT_APPROVAL_EXECUTION_TIMEOUT, } } diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index af530a20c4e0ca61fcf44f4ce0e3121ffeab1363..11078580465263a35588fc860b6f4f7a53161cd3 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -377,7 +377,7 @@ impl ValidationBackend for MockValidateCandidateBackend { result } - async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result { + async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result<(), PrepareError> { unreachable!() } } @@ -436,7 +436,7 @@ fn candidate_validation_ok_is_ok() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )) .unwrap(); @@ -480,15 +480,15 @@ fn candidate_validation_bad_return_is_invalid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Err( - ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout), - )), + MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( + WasmInvalidCandidate::HardTimeout, + ))), validation_data, validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )) .unwrap(); @@ -496,23 +496,20 @@ fn candidate_validation_bad_return_is_invalid() { assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::Timeout)); } -// Test that we vote valid if we get `AmbiguousWorkerDeath`, retry, and then succeed. -#[test] -fn candidate_validation_one_ambiguous_error_is_valid() { - let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; - - let pov = PoV { block_data: BlockData(vec![1; 32]) }; - let head_data = HeadData(vec![1, 1, 1]); - let validation_code = ValidationCode(vec![2; 16]); - +fn perform_basic_checks_on_valid_candidate( + pov: &PoV, + validation_code: &ValidationCode, + validation_data: &PersistedValidationData, + head_data_hash: Hash, +) -> CandidateDescriptor { let descriptor = make_valid_candidate_descriptor( ParaId::from(1_u32), dummy_hash(), validation_data.hash(), pov.hash(), validation_code.hash(), - head_data.hash(), - dummy_hash(), + head_data_hash, + head_data_hash, Sr25519Keyring::Alice, ); @@ -523,6 +520,24 @@ fn candidate_validation_one_ambiguous_error_is_valid() { &validation_code.hash(), ); assert!(check.is_ok()); + descriptor +} + +// Test that we vote valid if we get `AmbiguousWorkerDeath`, retry, and then succeed. +#[test] +fn candidate_validation_one_ambiguous_error_is_valid() { + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; + + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + let head_data = HeadData(vec![1, 1, 1]); + let validation_code = ValidationCode(vec![2; 16]); + + let descriptor = perform_basic_checks_on_valid_candidate( + &pov, + &validation_code, + &validation_data, + head_data.hash(), + ); let validation_result = WasmValidationResult { head_data, @@ -546,7 +561,7 @@ fn candidate_validation_one_ambiguous_error_is_valid() { let v = executor::block_on(validate_candidate_exhaustive( MockValidateCandidateBackend::with_hardcoded_result_list(vec![ - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), Ok(validation_result), ]), validation_data.clone(), @@ -554,7 +569,7 @@ fn candidate_validation_one_ambiguous_error_is_valid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Approval, &Default::default(), )) .unwrap(); @@ -576,38 +591,26 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { let pov = PoV { block_data: BlockData(vec![1; 32]) }; let validation_code = ValidationCode(vec![2; 16]); - let descriptor = make_valid_candidate_descriptor( - ParaId::from(1_u32), - dummy_hash(), - validation_data.hash(), - pov.hash(), - validation_code.hash(), - dummy_hash(), - dummy_hash(), - Sr25519Keyring::Alice, - ); - - let check = perform_basic_checks( - &descriptor, - validation_data.max_pov_size, + let descriptor = perform_basic_checks_on_valid_candidate( &pov, - &validation_code.hash(), + &validation_code, + &validation_data, + dummy_hash(), ); - assert!(check.is_ok()); let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( MockValidateCandidateBackend::with_hardcoded_result_list(vec![ - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), ]), validation_data, validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Approval, &Default::default(), )) .unwrap(); @@ -615,58 +618,79 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::ExecutionError(_))); } -// Test that we retry on internal errors. +// Test that we retry for approval on internal errors. #[test] fn candidate_validation_retry_internal_errors() { - let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; - - let pov = PoV { block_data: BlockData(vec![1; 32]) }; - let validation_code = ValidationCode(vec![2; 16]); - - let descriptor = make_valid_candidate_descriptor( - ParaId::from(1_u32), - dummy_hash(), - validation_data.hash(), - pov.hash(), - validation_code.hash(), - dummy_hash(), - dummy_hash(), - Sr25519Keyring::Alice, - ); - - let check = perform_basic_checks( - &descriptor, - validation_data.max_pov_size, - &pov, - &validation_code.hash(), + let v = candidate_validation_retry_on_error_helper( + PvfExecKind::Approval, + vec![ + Err(InternalValidationError::HostCommunication("foo".into()).into()), + // Throw an AJD error, we should still retry again. + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath( + "baz".into(), + ))), + // Throw another internal error. + Err(InternalValidationError::HostCommunication("bar".into()).into()), + ], ); - assert!(check.is_ok()); - - let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; + assert_matches!(v, Err(ValidationFailed(s)) if s.contains("bar")); +} - let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result_list(vec![ +// Test that we don't retry for backing on internal errors. +#[test] +fn candidate_validation_dont_retry_internal_errors() { + let v = candidate_validation_retry_on_error_helper( + PvfExecKind::Backing, + vec![ Err(InternalValidationError::HostCommunication("foo".into()).into()), // Throw an AWD error, we should still retry again. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), // Throw another internal error. Err(InternalValidationError::HostCommunication("bar".into()).into()), - ]), - validation_data, - validation_code, - candidate_receipt, - Arc::new(pov), - ExecutorParams::default(), - PvfExecTimeoutKind::Backing, - &Default::default(), - )); + ], + ); - assert_matches!(v, Err(ValidationFailed(s)) if s.contains("bar")); + assert_matches!(v, Err(ValidationFailed(s)) if s.contains("foo")); } -// Test that we retry on panic errors. +// Test that we retry for approval on panic errors. #[test] fn candidate_validation_retry_panic_errors() { + let v = candidate_validation_retry_on_error_helper( + PvfExecKind::Approval, + vec![ + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError("foo".into()))), + // Throw an AWD error, we should still retry again. + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), + // Throw another panic error. + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError("bar".into()))), + ], + ); + + assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(s))) if s == "bar".to_string()); +} + +// Test that we don't retry for backing on panic errors. +#[test] +fn candidate_validation_dont_retry_panic_errors() { + let v = candidate_validation_retry_on_error_helper( + PvfExecKind::Backing, + vec![ + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError("foo".into()))), + // Throw an AWD error, we should still retry again. + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), + // Throw another panic error. + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError("bar".into()))), + ], + ); + + assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(s))) if s == "foo".to_string()); +} + +fn candidate_validation_retry_on_error_helper( + exec_kind: PvfExecKind, + mock_errors: Vec>, +) -> Result { let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; let pov = PoV { block_data: BlockData(vec![1; 32]) }; @@ -693,24 +717,16 @@ fn candidate_validation_retry_panic_errors() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; - let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result_list(vec![ - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic("foo".into()))), - // Throw an AWD error, we should still retry again. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), - // Throw another panic error. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic("bar".into()))), - ]), + return executor::block_on(validate_candidate_exhaustive( + MockValidateCandidateBackend::with_hardcoded_result_list(mock_errors), validation_data, validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + exec_kind, &Default::default(), )); - - assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(s))) if s == "bar".to_string()); } #[test] @@ -742,15 +758,15 @@ fn candidate_validation_timeout_is_internal_error() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Err( - ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout), - )), + MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( + WasmInvalidCandidate::HardTimeout, + ))), validation_data, validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )); @@ -795,7 +811,7 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )) .unwrap(); @@ -836,15 +852,15 @@ fn candidate_validation_code_mismatch_is_invalid() { let (_ctx, _ctx_handle) = test_helpers::make_subsystem_context::(pool.clone()); let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Err( - ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout), - )), + MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( + WasmInvalidCandidate::HardTimeout, + ))), validation_data, validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )) .unwrap(); @@ -901,7 +917,7 @@ fn compressed_code_works() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )); @@ -952,7 +968,7 @@ fn code_decompression_failure_is_error() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )); @@ -1004,7 +1020,7 @@ fn pov_decompression_failure_is_invalid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )); @@ -1012,11 +1028,11 @@ fn pov_decompression_failure_is_invalid() { } struct MockPreCheckBackend { - result: Result, + result: Result<(), PrepareError>, } impl MockPreCheckBackend { - fn with_hardcoded_result(result: Result) -> Self { + fn with_hardcoded_result(result: Result<(), PrepareError>) -> Self { Self { result } } } @@ -1032,7 +1048,7 @@ impl ValidationBackend for MockPreCheckBackend { unreachable!() } - async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result { + async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result<(), PrepareError> { self.result.clone() } } @@ -1049,7 +1065,7 @@ fn precheck_works() { let (check_fut, check_result) = precheck_pvf( ctx.sender(), - MockPreCheckBackend::with_hardcoded_result(Ok(PrepareStats::default())), + MockPreCheckBackend::with_hardcoded_result(Ok(())), relay_parent, validation_code_hash, ) @@ -1111,7 +1127,7 @@ fn precheck_invalid_pvf_blob_compression() { let (check_fut, check_result) = precheck_pvf( ctx.sender(), - MockPreCheckBackend::with_hardcoded_result(Ok(PrepareStats::default())), + MockPreCheckBackend::with_hardcoded_result(Ok(())), relay_parent, validation_code_hash, ) @@ -1216,7 +1232,7 @@ fn precheck_properly_classifies_outcomes() { inner(Err(PrepareError::Prevalidation("foo".to_owned())), PreCheckOutcome::Invalid); inner(Err(PrepareError::Preparation("bar".to_owned())), PreCheckOutcome::Invalid); - inner(Err(PrepareError::Panic("baz".to_owned())), PreCheckOutcome::Invalid); + inner(Err(PrepareError::JobError("baz".to_owned())), PreCheckOutcome::Invalid); inner(Err(PrepareError::TimedOut), PreCheckOutcome::Failed); inner(Err(PrepareError::IoErr("fizz".to_owned())), PreCheckOutcome::Failed); diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index 154fa20e75d0b1016a82758663bba27ff98dcbb9..32962c9bda43f0c0f0709712930b90d8573ac92c 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -6,13 +6,15 @@ edition.workspace = true license.workspace = true description = "The Chain API subsystem provides access to chain related utility functions like block number to hash conversions." +[lints] +workspace = true + [dependencies] futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -polkadot-primitives = { path = "../../../primitives" } polkadot-node-metrics = { path = "../../metrics" } polkadot-node-subsystem = { path = "../../subsystem" } +polkadot-node-subsystem-types = { path = "../../subsystem-types" } sc-client-api = { path = "../../../../substrate/client/api" } sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } @@ -21,5 +23,7 @@ futures = { version = "0.3.21", features = ["thread-pool"] } maplit = "1.0.2" parity-scale-codec = "3.6.1" polkadot-node-primitives = { path = "../../primitives" } +polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-core = { path = "../../../../substrate/primitives/core" } +sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } diff --git a/polkadot/node/core/chain-api/src/lib.rs b/polkadot/node/core/chain-api/src/lib.rs index 9b25481d71860bbc76f19840397360b8e3ee102b..7fd5166310fec2641af2edaeb8a7580caf9512ad 100644 --- a/polkadot/node/core/chain-api/src/lib.rs +++ b/polkadot/node/core/chain-api/src/lib.rs @@ -35,13 +35,13 @@ use std::sync::Arc; use futures::prelude::*; use sc_client_api::AuxStore; -use sp_blockchain::HeaderBackend; +use futures::stream::StreamExt; use polkadot_node_subsystem::{ messages::ChainApiMessage, overseer, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, SubsystemResult, }; -use polkadot_primitives::Block; +use polkadot_node_subsystem_types::ChainApiBackend; mod metrics; use self::metrics::Metrics; @@ -67,7 +67,7 @@ impl ChainApiSubsystem { #[overseer::subsystem(ChainApi, error = SubsystemError, prefix = self::overseer)] impl ChainApiSubsystem where - Client: HeaderBackend + AuxStore + 'static, + Client: ChainApiBackend + AuxStore + 'static, { fn start(self, ctx: Context) -> SpawnedSubsystem { let future = run::(ctx, self) @@ -83,7 +83,7 @@ async fn run( subsystem: ChainApiSubsystem, ) -> SubsystemResult<()> where - Client: HeaderBackend + AuxStore, + Client: ChainApiBackend + AuxStore, { loop { match ctx.recv().await? { @@ -93,13 +93,15 @@ where FromOrchestra::Communication { msg } => match msg { ChainApiMessage::BlockNumber(hash, response_channel) => { let _timer = subsystem.metrics.time_block_number(); - let result = subsystem.client.number(hash).map_err(|e| e.to_string().into()); + let result = + subsystem.client.number(hash).await.map_err(|e| e.to_string().into()); subsystem.metrics.on_request(result.is_ok()); let _ = response_channel.send(result); }, ChainApiMessage::BlockHeader(hash, response_channel) => { let _timer = subsystem.metrics.time_block_header(); - let result = subsystem.client.header(hash).map_err(|e| e.to_string().into()); + let result = + subsystem.client.header(hash).await.map_err(|e| e.to_string().into()); subsystem.metrics.on_request(result.is_ok()); let _ = response_channel.send(result); }, @@ -113,46 +115,51 @@ where ChainApiMessage::FinalizedBlockHash(number, response_channel) => { let _timer = subsystem.metrics.time_finalized_block_hash(); // Note: we don't verify it's finalized - let result = subsystem.client.hash(number).map_err(|e| e.to_string().into()); + let result = + subsystem.client.hash(number).await.map_err(|e| e.to_string().into()); subsystem.metrics.on_request(result.is_ok()); let _ = response_channel.send(result); }, ChainApiMessage::FinalizedBlockNumber(response_channel) => { let _timer = subsystem.metrics.time_finalized_block_number(); - let result = subsystem.client.info().finalized_number; - // always succeeds - subsystem.metrics.on_request(true); - let _ = response_channel.send(Ok(result)); + let result = subsystem + .client + .info() + .await + .map_err(|e| e.to_string().into()) + .map(|info| info.finalized_number); + subsystem.metrics.on_request(result.is_ok()); + let _ = response_channel.send(result); }, ChainApiMessage::Ancestors { hash, k, response_channel } => { let _timer = subsystem.metrics.time_ancestors(); gum::trace!(target: LOG_TARGET, hash=%hash, k=k, "ChainApiMessage::Ancestors"); - let mut hash = hash; - - let next_parent = core::iter::from_fn(|| { - let maybe_header = subsystem.client.header(hash); - match maybe_header { - // propagate the error - Err(e) => { - let e = e.to_string().into(); - Some(Err(e)) - }, - // fewer than `k` ancestors are available - Ok(None) => None, - Ok(Some(header)) => { - // stop at the genesis header. - if header.number == 0 { - None - } else { - hash = header.parent_hash; - Some(Ok(hash)) - } - }, - } - }); - - let result = next_parent.take(k).collect::, _>>(); + let next_parent_stream = futures::stream::unfold( + (hash, subsystem.client.clone()), + |(hash, client)| async move { + let maybe_header = client.header(hash).await; + match maybe_header { + // propagate the error + Err(e) => { + let e = e.to_string().into(); + Some((Err(e), (hash, client))) + }, + // fewer than `k` ancestors are available + Ok(None) => None, + Ok(Some(header)) => { + // stop at the genesis header. + if header.number == 0 { + None + } else { + Some((Ok(header.parent_hash), (header.parent_hash, client))) + } + }, + } + }, + ); + + let result = next_parent_stream.take(k).try_collect().await; subsystem.metrics.on_request(result.is_ok()); let _ = response_channel.send(result); }, diff --git a/polkadot/node/core/chain-api/src/tests.rs b/polkadot/node/core/chain-api/src/tests.rs index 331a4f9ba820a85307659494e3c2a14f4fee52f9..eae8f6fa4ac591c565930047e4dcf590c914947f 100644 --- a/polkadot/node/core/chain-api/src/tests.rs +++ b/polkadot/node/core/chain-api/src/tests.rs @@ -22,7 +22,8 @@ use std::collections::BTreeMap; use polkadot_node_primitives::BlockWeight; use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystemContextHandle}; -use polkadot_primitives::{BlockNumber, Hash, Header}; +use polkadot_node_subsystem_types::ChainApiBackend; +use polkadot_primitives::{Block, BlockNumber, Hash, Header}; use sp_blockchain::Info as BlockInfo; use sp_core::testing::TaskExecutor; @@ -110,7 +111,7 @@ fn last_key_value(map: &BTreeMap) -> (K, V) { map.iter().last().map(|(k, v)| (k.clone(), v.clone())).unwrap() } -impl HeaderBackend for TestClient { +impl sp_blockchain::HeaderBackend for TestClient { fn info(&self) -> BlockInfo { let genesis_hash = self.blocks.iter().next().map(|(h, _)| *h).unwrap(); let (best_hash, best_number) = last_key_value(&self.blocks); @@ -191,8 +192,8 @@ fn request_block_number() { async move { let zero = Hash::zero(); let test_cases = [ - (TWO, client.number(TWO).unwrap()), - (zero, client.number(zero).unwrap()), // not here + (TWO, client.number(TWO).await.unwrap()), + (zero, client.number(zero).await.unwrap()), // not here ]; for (hash, expected) in &test_cases { let (tx, rx) = oneshot::channel(); @@ -217,8 +218,10 @@ fn request_block_header() { test_harness(|client, mut sender| { async move { const NOT_HERE: Hash = Hash::repeat_byte(0x5); - let test_cases = - [(TWO, client.header(TWO).unwrap()), (NOT_HERE, client.header(NOT_HERE).unwrap())]; + let test_cases = [ + (TWO, client.header(TWO).await.unwrap()), + (NOT_HERE, client.header(NOT_HERE).await.unwrap()), + ]; for (hash, expected) in &test_cases { let (tx, rx) = oneshot::channel(); @@ -270,8 +273,8 @@ fn request_finalized_hash() { test_harness(|client, mut sender| { async move { let test_cases = [ - (1, client.hash(1).unwrap()), // not here - (2, client.hash(2).unwrap()), + (1, client.hash(1).await.unwrap()), // not here + (2, client.hash(2).await.unwrap()), ]; for (number, expected) in &test_cases { let (tx, rx) = oneshot::channel(); @@ -297,7 +300,7 @@ fn request_last_finalized_number() { async move { let (tx, rx) = oneshot::channel(); - let expected = client.info().finalized_number; + let expected = client.info().await.unwrap().finalized_number; sender .send(FromOrchestra::Communication { msg: ChainApiMessage::FinalizedBlockNumber(tx), diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 7678379870e0371de28eeb3d3807b90820504b87..6056ddd41cd710e5fdbb5f4ecbfd7f6819124f5e 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] futures = "0.3.21" futures-timer = "3" diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index e2086db708f9a616ee348738fcf706aef86ccd92..8ec9bcbe07070cf01b077bf717a98cf835aa3176 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/core/dispute-coordinator/src/import.rs b/polkadot/node/core/dispute-coordinator/src/import.rs index 837ad7856e735c47f38b90d85a98dbf3a39f5122..98c12bd509b4b13d0a49380baf55e98e1bd4ef1f 100644 --- a/polkadot/node/core/dispute-coordinator/src/import.rs +++ b/polkadot/node/core/dispute-coordinator/src/import.rs @@ -34,9 +34,9 @@ use polkadot_node_primitives::{ use polkadot_node_subsystem::overseer; use polkadot_node_subsystem_util::runtime::RuntimeInfo; use polkadot_primitives::{ - CandidateReceipt, DisputeStatement, ExecutorParams, Hash, IndexedVec, SessionIndex, - SessionInfo, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, - ValidatorSignature, + CandidateHash, CandidateReceipt, DisputeStatement, ExecutorParams, Hash, IndexedVec, + SessionIndex, SessionInfo, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, + ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; @@ -126,7 +126,9 @@ impl OwnVoteState { let our_valid_votes = controlled_indices .iter() .filter_map(|i| votes.valid.raw().get_key_value(i)) - .map(|(index, (kind, sig))| (*index, (DisputeStatement::Valid(*kind), sig.clone()))); + .map(|(index, (kind, sig))| { + (*index, (DisputeStatement::Valid(kind.clone()), sig.clone())) + }); let our_invalid_votes = controlled_indices .iter() .filter_map(|i| votes.invalid.get_key_value(i)) @@ -305,7 +307,7 @@ impl CandidateVoteState { DisputeStatement::Valid(valid_kind) => { let fresh = votes.valid.insert_vote( val_index, - *valid_kind, + valid_kind.clone(), statement.into_validator_signature(), ); if fresh { @@ -511,7 +513,7 @@ impl ImportResult { pub fn import_approval_votes( self, env: &CandidateEnvironment, - approval_votes: HashMap, + approval_votes: HashMap, ValidatorSignature)>, now: Timestamp, ) -> Self { let Self { @@ -525,19 +527,33 @@ impl ImportResult { let (mut votes, _) = new_state.into_old_state(); - for (index, sig) in approval_votes.into_iter() { + for (index, (candidate_hashes, sig)) in approval_votes.into_iter() { debug_assert!( { let pub_key = &env.session_info().validators.get(index).expect("indices are validated by approval-voting subsystem; qed"); - let candidate_hash = votes.candidate_receipt.hash(); let session_index = env.session_index(); - DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) - .check_signature(pub_key, candidate_hash, session_index, &sig) + candidate_hashes.contains(&votes.candidate_receipt.hash()) && DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(candidate_hashes.clone())) + .check_signature(pub_key, *candidate_hashes.first().expect("Valid votes have at least one candidate; qed"), session_index, &sig) .is_ok() }, "Signature check for imported approval votes failed! This is a serious bug. Session: {:?}, candidate hash: {:?}, validator index: {:?}", env.session_index(), votes.candidate_receipt.hash(), index ); - if votes.valid.insert_vote(index, ValidDisputeStatementKind::ApprovalChecking, sig) { + if votes.valid.insert_vote( + index, + // There is a hidden dependency here between approval-voting and this subsystem. + // We should be able to start emitting + // ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates only after: + // 1. Runtime have been upgraded to know about the new format. + // 2. All nodes have been upgraded to know about the new format. + // Once those two requirements have been met we should be able to increase + // max_approval_coalesce_count to values greater than 1. + if candidate_hashes.len() > 1 { + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(candidate_hashes) + } else { + ValidDisputeStatementKind::ApprovalChecking + }, + sig, + ) { imported_valid_votes += 1; imported_approval_votes += 1; } diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index e44530b3f1bbab0995f850af7e951984bd1dc55b..d9cd4e39d3cb30c7ccb06bc9e9b28704a3a24c73 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -642,7 +642,7 @@ impl Initialized { }; debug_assert!( SignedDisputeStatement::new_checked( - DisputeStatement::Valid(valid_statement_kind), + DisputeStatement::Valid(valid_statement_kind.clone()), candidate_hash, session, validator_public.clone(), @@ -656,7 +656,7 @@ impl Initialized { ); let signed_dispute_statement = SignedDisputeStatement::new_unchecked_from_trusted_source( - DisputeStatement::Valid(valid_statement_kind), + DisputeStatement::Valid(valid_statement_kind.clone()), candidate_hash, session, validator_public, diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs index e96fee8124099bf000b09863b1a46da98256c5e6..5067d3673da9b23c79edff0ef83a449359d78a2a 100644 --- a/polkadot/node/core/dispute-coordinator/src/lib.rs +++ b/polkadot/node/core/dispute-coordinator/src/lib.rs @@ -576,7 +576,7 @@ pub fn make_dispute_message( .next() .ok_or(DisputeMessageCreationError::NoOppositeVote)?; let other_vote = SignedDisputeStatement::new_checked( - DisputeStatement::Valid(*statement_kind), + DisputeStatement::Valid(statement_kind.clone()), *our_vote.candidate_hash(), our_vote.session_index(), validators diff --git a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs index 90268516e9df01b021ca914ffe59a19b0893bc80..05ea7323af1419d770cb7b68e15ee7887ceeaab8 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs @@ -32,7 +32,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::runtime::get_validation_code_by_hash; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, Hash, PvfExecTimeoutKind, SessionIndex, + BlockNumber, CandidateHash, CandidateReceipt, Hash, PvfExecKind, SessionIndex, }; use crate::LOG_TARGET; @@ -386,7 +386,7 @@ async fn participate( candidate_receipt: req.candidate_receipt().clone(), pov: available_data.pov, executor_params: req.executor_params(), - exec_timeout_kind: PvfExecTimeoutKind::Approval, + exec_kind: PvfExecKind::Approval, response_sender: validation_tx, }) .await; diff --git a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs index 0aa0d772005112eac9d3580eab8bcab1985013cd..012df51d0cd3eac291e17354a45ae18e347b121d 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs @@ -115,8 +115,8 @@ pub async fn participation_full_happy_path( assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { candidate_receipt, exec_timeout_kind, response_sender, .. } - ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + CandidateValidationMessage::ValidateFromExhaustive { candidate_receipt, exec_kind, response_sender, .. } + ) if exec_kind == PvfExecKind::Approval => { if expected_commitments_hash != candidate_receipt.commitments_hash { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); } else { @@ -450,8 +450,8 @@ fn cast_invalid_vote_if_validation_fails_or_is_invalid() { assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { exec_timeout_kind, response_sender, .. } - ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. } + ) if exec_kind == PvfExecKind::Approval => { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))).unwrap(); }, "overseer did not receive candidate validation message", @@ -487,8 +487,8 @@ fn cast_invalid_vote_if_commitments_dont_match() { assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { exec_timeout_kind, response_sender, .. } - ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. } + ) if exec_kind == PvfExecKind::Approval => { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); }, "overseer did not receive candidate validation message", @@ -524,8 +524,8 @@ fn cast_valid_vote_if_validation_passes() { assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { exec_timeout_kind, response_sender, .. } - ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. } + ) if exec_kind == PvfExecKind::Approval => { response_sender.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap(); }, "overseer did not receive candidate validation message", diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs index 9254c2a851cea502ed8daccf434a68f48863de94..da449773fe8ff4bc1cbfac5d33a923f8d1b0426f 100644 --- a/polkadot/node/core/dispute-coordinator/src/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -61,10 +61,11 @@ use polkadot_node_subsystem_test_helpers::{ make_buffered_subsystem_context, mock::new_leaf, TestSubsystemContextHandle, }; use polkadot_primitives::{ - ApprovalVote, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CandidateReceipt, CoreIndex, DisputeStatement, ExecutorParams, GroupIndex, Hash, HeadData, - Header, IndexedVec, MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, SessionInfo, - SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, + vstaging::NodeFeatures, ApprovalVote, BlockNumber, CandidateCommitments, CandidateEvent, + CandidateHash, CandidateReceipt, CoreIndex, DisputeStatement, ExecutorParams, GroupIndex, Hash, + HeadData, Header, IndexedVec, MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, + SessionInfo, SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, + ValidatorSignature, }; use crate::{ @@ -352,6 +353,15 @@ impl TestState { let _ = tx.send(Ok(Some(ExecutorParams::default()))); } ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); } } @@ -651,7 +661,7 @@ fn make_candidate_included_event(candidate_receipt: CandidateReceipt) -> Candida pub async fn handle_approval_vote_request( ctx_handle: &mut VirtualOverseer, expected_hash: &CandidateHash, - votes_to_send: HashMap, + votes_to_send: HashMap, ValidatorSignature)>, ) { assert_matches!( ctx_handle.recv().await, @@ -858,9 +868,12 @@ fn approval_vote_import_works() { .await; gum::trace!("After sending `ImportStatements`"); - let approval_votes = [(ValidatorIndex(4), approval_vote.into_validator_signature())] - .into_iter() - .collect(); + let approval_votes = [( + ValidatorIndex(4), + (vec![candidate_receipt1.hash()], approval_vote.into_validator_signature()), + )] + .into_iter() + .collect(); handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, approval_votes) .await; @@ -3492,6 +3505,14 @@ fn session_info_is_requested_only_once() { let _ = tx.send(Ok(Some(ExecutorParams::default()))); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); test_state }) }); @@ -3552,6 +3573,15 @@ fn session_info_big_jump_works() { let _ = tx.send(Ok(Some(ExecutorParams::default()))); } ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); } test_state }) @@ -3612,6 +3642,14 @@ fn session_info_small_jump_works() { let _ = tx.send(Ok(Some(ExecutorParams::default()))); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); } test_state }) diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index c783f21e24df335838259952d6cc0831193e6184..2384020025181d9dd0cae04c54246193fb6278d3 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -6,12 +6,15 @@ edition.workspace = true license.workspace = true description = "Parachains inherent data provider for Polkadot node" +[lints] +workspace = true + [dependencies] futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } thiserror = "1.0.48" -async-trait = "0.1.57" +async-trait = "0.1.74" polkadot-node-subsystem = { path = "../../subsystem" } polkadot-overseer = { path = "../../overseer" } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 9db1259e61d0105d1156f68fe1702f2505bf30c6..e6b6aa5e15d72e758a5acd30a6771756e6ee78a9 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true description = "The Prospective Parachains subsystem. Tracks and handles prospective parachain fragments." +[lints] +workspace = true + [dependencies] futures = "0.3.19" gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index d2cd23fe95fc1d8638238c8e30b5227da7e883a0..7e369245c0e1587b405eb4516343610aa8c9a320 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -101,11 +101,8 @@ fn test_harness>( let mut view = View::new(); let subsystem = async move { - loop { - match run_iteration(&mut context, &mut view, &Metrics(None)).await { - Ok(()) => break, - Err(e) => panic!("{:?}", e), - } + if let Err(e) = run_iteration(&mut context, &mut view, &Metrics(None)).await { + panic!("{:?}", e); } view diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 13ecb356f2c467fc8dad85ade54f71cf981d8e38..2d18bd29c1c097cccf5a94515a55d232b9263032 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -1,11 +1,14 @@ [package] name = "polkadot-node-core-provisioner" version = "1.0.0" -description="Responsible for assembling a relay chain block from a set of available parachain candidates" +description = "Responsible for assembling a relay chain block from a set of available parachain candidates" authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } futures = "0.3.21" diff --git a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs index 096b73d271a8de637856b85836761c90593d96ee..cb55ce39bc89f3eeee3d1cb319351f659eace478 100644 --- a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs +++ b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs @@ -221,7 +221,7 @@ where votes.valid.retain(|validator_idx, (statement_kind, _)| { is_vote_worth_to_keep( validator_idx, - DisputeStatement::Valid(*statement_kind), + DisputeStatement::Valid(statement_kind.clone()), &onchain_state, ) }); diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index 0326a20e5a52e7a07fe96a8c721af1c6ac86e5ef..274d8ee43bf1338094306aa78bb1991da5ff7477 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] futures = "0.3.21" thiserror = "1.0.48" diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 430f7cd5e8ef1679b77c94f5b1037e55a8cf172c..2642377b6e6266c4804ba75cfe15044a1cb2e4e4 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -6,8 +6,12 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] always-assert = "0.1" +blake3 = "1.5" cfg-if = "1.0" futures = "0.3.21" futures-timer = "3.0.2" @@ -18,6 +22,7 @@ pin-project = "1.0.9" rand = "0.8.5" slotmap = "1.0" tempfile = "3.3.0" +thiserror = "1.0.31" tokio = { version = "1.24.2", features = ["fs", "process"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -27,6 +32,7 @@ polkadot-core-primitives = { path = "../../../core-primitives" } polkadot-node-core-pvf-common = { path = "common" } polkadot-node-metrics = { path = "../../metrics" } polkadot-node-primitives = { path = "../../primitives" } +polkadot-node-subsystem = { path = "../../subsystem" } polkadot-primitives = { path = "../../../primitives" } sp-core = { path = "../../../../substrate/primitives/core" } @@ -37,7 +43,7 @@ polkadot-node-core-pvf-execute-worker = { path = "execute-worker", optional = tr [dev-dependencies] assert_matches = "1.4.0" -criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support", "async_tokio"] } +criterion = { version = "0.4.0", default-features = false, features = ["async_tokio", "cargo_bench_support"] } hex-literal = "0.4.1" polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } @@ -52,6 +58,7 @@ halt = { package = "test-parachain-halt", path = "../../../parachain/test-parach [target.'cfg(target_os = "linux")'.dev-dependencies] procfs = "0.16.0" rusty-fork = "0.3.0" +sc-sysinfo = { path = "../../../../substrate/client/sysinfo" } [[bench]] name = "host_prepare_rococo_runtime" @@ -59,7 +66,7 @@ harness = false [features] ci-only-tests = [] -jemalloc-allocator = [ "polkadot-node-core-pvf-common/jemalloc-allocator" ] +jemalloc-allocator = ["polkadot-node-core-pvf-common/jemalloc-allocator"] # This feature is used to export test code to other crates without putting it in the production build. test-utils = [ "polkadot-node-core-pvf-execute-worker", diff --git a/polkadot/node/core/pvf/README.md b/polkadot/node/core/pvf/README.md new file mode 100644 index 0000000000000000000000000000000000000000..796e17c05faa47ceec455125ae29f7943ffa5740 --- /dev/null +++ b/polkadot/node/core/pvf/README.md @@ -0,0 +1,47 @@ +# PVF Host + +This is the PVF host, responsible for responding to requests from Candidate +Validation and spawning worker tasks to fulfill those requests. + +See also: + +- for more information: [the Implementer's Guide][impl-guide] +- for an explanation of terminology: [the Glossary][glossary] + +## Running basic tests + +Running `cargo test` in the `pvf/` directory will run unit and integration +tests. + +**Note:** some tests run only under Linux, amd64, and/or with the +`ci-only-tests` feature enabled. + +See the general [Testing][testing] instructions for more information on +**running tests** and **observing logs**. + +## Running a test-network with zombienet + +Since this crate is consensus-critical, for major changes it is highly +recommended to run a test-network. See the "Behavior tests" section of the +[Testing][testing] docs for full instructions. + +To run the PVF-specific zombienet test: + +```sh +RUST_LOG=parachain::pvf=trace zombienet --provider=native spawn zombienet_tests/functional/0001-parachains-pvf.toml +``` + +## Testing on Linux + +Some of the PVF functionality, especially related to security, is Linux-only, +and some is amd64-only. If you touch anything security-related, make sure to +test on Linux amd64! If you're on a Mac, you can either run a VM or you can hire +a VPS and use the open-source tool [EternalTerminal][et] to connect to it.[^et] + +[^et]: Unlike ssh, ET preserves your session across disconnects, and unlike +another popular persistent shell, mosh, it allows scrollback. + +[impl-guide]: https://paritytech.github.io/polkadot-sdk/book/pvf-prechecking.html#summary +[glossary]: https://paritytech.github.io/polkadot-sdk/book/glossary.html +[testing]: https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/doc/testing.md +[et]: https://github.com/MisterTea/EternalTerminal diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs index acd80526262c00862d3d64081ed1d6d588e2c458..2aea21361a3e8fcb4114eb533539f09b1d660fc7 100644 --- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -18,8 +18,7 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion, SamplingMode}; use polkadot_node_core_pvf::{ - start, testing, Config, Metrics, PrepareError, PrepareJobKind, PrepareStats, PvfPrepData, - ValidationHost, + start, testing, Config, Metrics, PrepareError, PrepareJobKind, PvfPrepData, ValidationHost, }; use polkadot_primitives::ExecutorParams; use rococo_runtime::WASM_BINARY; @@ -29,6 +28,9 @@ use tokio::{runtime::Handle, sync::Mutex}; const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(30); struct TestHost { + // Keep a reference to the tempdir otherwise it gets deleted on drop. + #[allow(dead_code)] + cache_dir: tempfile::TempDir, host: Mutex, } @@ -37,26 +39,27 @@ impl TestHost { where F: FnOnce(&mut Config), { - let (prepare_worker_path, execute_worker_path) = testing::get_and_check_worker_paths(); + let (prepare_worker_path, execute_worker_path) = testing::build_workers_and_get_paths(); let cache_dir = tempfile::tempdir().unwrap(); let mut config = Config::new( cache_dir.path().to_owned(), None, + false, prepare_worker_path, execute_worker_path, ); f(&mut config); - let (host, task) = start(config, Metrics::default()).await; + let (host, task) = start(config, Metrics::default()).await.unwrap(); let _ = handle.spawn(task); - Self { host: Mutex::new(host) } + Self { host: Mutex::new(host), cache_dir } } async fn precheck_pvf( &self, code: &[u8], executor_params: ExecutorParams, - ) -> Result { + ) -> Result<(), PrepareError> { let (result_tx, result_rx) = futures::channel::oneshot::channel(); let code = sp_maybe_compressed_blob::decompress(code, 16 * 1024 * 1024) diff --git a/polkadot/node/core/pvf/build.rs b/polkadot/node/core/pvf/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..e01cc6deecc22c574a929c96f0684d777764149e --- /dev/null +++ b/polkadot/node/core/pvf/build.rs @@ -0,0 +1,21 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +fn main() { + if let Ok(profile) = std::env::var("PROFILE") { + println!(r#"cargo:rustc-cfg=build_type="{}""#, profile); + } +} diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 7dc8d307026e09f80e8ce0c4d168aa3146714cd7..c5c09300e8af951513ab798cf60312800b309bbf 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -6,12 +6,16 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] cfg-if = "1.0" cpu-time = "1.0.0" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.139" +thiserror = "1.0.31" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -30,12 +34,14 @@ sp-tracing = { path = "../../../../../substrate/primitives/tracing" } [target.'cfg(target_os = "linux")'.dependencies] landlock = "0.3.0" seccompiler = "0.4.0" -thiserror = "1.0.31" [dev-dependencies] assert_matches = "1.4.0" tempfile = "3.3.0" +[build-dependencies] +substrate-build-script-utils = { path = "../../../../../substrate/utils/build-script-utils" } + [features] # This feature is used to export test code to other crates without putting it in the production build. test-utils = [] diff --git a/polkadot/node/core/pvf/common/build.rs b/polkadot/node/core/pvf/common/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..5531ad411da80ebb51cec8e84f675495edf22bdd --- /dev/null +++ b/polkadot/node/core/pvf/common/build.rs @@ -0,0 +1,19 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +fn main() { + substrate_build_script_utils::generate_wasmtime_version(); +} diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index 82b56562d8cc38463e8c029aa21ef3e1a68e992c..7db7f9a5945179e16733c6e50b157d36369b61e1 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -14,16 +14,24 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::prepare::PrepareStats; +use crate::prepare::{PrepareSuccess, PrepareWorkerSuccess}; use parity_scale_codec::{Decode, Encode}; use std::fmt; -/// Result of PVF preparation performed by the validation host. Contains stats about the preparation -/// if successful -pub type PrepareResult = Result; +/// Result of PVF preparation from a worker, with checksum of the compiled PVF and stats of the +/// preparation if successful. +pub type PrepareWorkerResult = Result; + +/// Result of PVF preparation propagated all the way back to the host, with path to the concluded +/// artifact and stats of the preparation if successful. +pub type PrepareResult = Result; + +/// Result of prechecking PVF performed by the validation host. Contains stats about the preparation +/// if successful. +pub type PrecheckResult = Result<(), PrepareError>; /// An error that occurred during the prepare part of the PVF pipeline. -// Codec indexes are intended to stabilize pre-encoded payloads (see `OOM_PAYLOAD` below) +// Codec indexes are intended to stabilize pre-encoded payloads (see `OOM_PAYLOAD`) #[derive(Debug, Clone, Encode, Decode)] pub enum PrepareError { /// During the prevalidation stage of preparation an issue was found with the PVF. @@ -35,9 +43,9 @@ pub enum PrepareError { /// Instantiation of the WASM module instance failed. #[codec(index = 2)] RuntimeConstruction(String), - /// An unexpected panic has occurred in the preparation worker. + /// An unexpected error has occurred in the preparation job. #[codec(index = 3)] - Panic(String), + JobError(String), /// Failed to prepare the PVF due to the time limit. #[codec(index = 4)] TimedOut, @@ -48,12 +56,12 @@ pub enum PrepareError { /// The temporary file for the artifact could not be created at the given cache path. This /// state is reported by the validation host (not by the worker). #[codec(index = 6)] - CreateTmpFileErr(String), + CreateTmpFile(String), /// The response from the worker is received, but the file cannot be renamed (moved) to the /// final destination location. This state is reported by the validation host (not by the /// worker). #[codec(index = 7)] - RenameTmpFileErr { + RenameTmpFile { err: String, // Unfortunately `PathBuf` doesn't implement `Encode`/`Decode`, so we do a fallible // conversion to `Option`. @@ -68,11 +76,14 @@ pub enum PrepareError { /// reported by the validation host (not by the worker). #[codec(index = 9)] ClearWorkerDir(String), + /// The preparation job process died, due to OOM, a seccomp violation, or some other factor. + JobDied { err: String, job_pid: i32 }, + #[codec(index = 10)] + /// Some error occurred when interfacing with the kernel. + #[codec(index = 11)] + Kernel(String), } -/// Pre-encoded length-prefixed `PrepareResult::Err(PrepareError::OutOfMemory)` -pub const OOM_PAYLOAD: &[u8] = b"\x02\x00\x00\x00\x00\x00\x00\x00\x01\x08"; - impl PrepareError { /// Returns whether this is a deterministic error, i.e. one that should trigger reliably. Those /// errors depend on the PVF itself and the sc-executor/wasmtime logic. @@ -83,12 +94,15 @@ impl PrepareError { pub fn is_deterministic(&self) -> bool { use PrepareError::*; match self { - Prevalidation(_) | Preparation(_) | Panic(_) | OutOfMemory => true, - TimedOut | + Prevalidation(_) | Preparation(_) | JobError(_) | OutOfMemory => true, IoErr(_) | - CreateTmpFileErr(_) | - RenameTmpFileErr { .. } | - ClearWorkerDir(_) => false, + JobDied { .. } | + CreateTmpFile(_) | + RenameTmpFile { .. } | + ClearWorkerDir(_) | + Kernel(_) => false, + // Can occur due to issues with the PVF, but also due to factors like local load. + TimedOut => false, // Can occur due to issues with the PVF, but also due to local errors. RuntimeConstruction(_) => false, } @@ -102,14 +116,17 @@ impl fmt::Display for PrepareError { Prevalidation(err) => write!(f, "prevalidation: {}", err), Preparation(err) => write!(f, "preparation: {}", err), RuntimeConstruction(err) => write!(f, "runtime construction: {}", err), - Panic(err) => write!(f, "panic: {}", err), + JobError(err) => write!(f, "panic: {}", err), TimedOut => write!(f, "prepare: timeout"), IoErr(err) => write!(f, "prepare: io error while receiving response: {}", err), - CreateTmpFileErr(err) => write!(f, "prepare: error creating tmp file: {}", err), - RenameTmpFileErr { err, src, dest } => + JobDied { err, job_pid } => + write!(f, "prepare: prepare job with pid {job_pid} died: {err}"), + CreateTmpFile(err) => write!(f, "prepare: error creating tmp file: {}", err), + RenameTmpFile { err, src, dest } => write!(f, "prepare: error renaming tmp file ({:?} -> {:?}): {}", src, dest, err), OutOfMemory => write!(f, "prepare: out of memory"), ClearWorkerDir(err) => write!(f, "prepare: error clearing worker cache: {}", err), + Kernel(err) => write!(f, "prepare: error interfacing with the kernel: {}", err), } } } @@ -133,9 +150,9 @@ pub enum InternalValidationError { // conversion to `Option`. path: Option, }, - /// An error occurred in the CPU time monitor thread. Should be totally unrelated to - /// validation. - CpuTimeMonitorThread(String), + /// Some error occurred when interfacing with the kernel. + Kernel(String), + /// Some non-deterministic preparation error occurred. NonDeterministicPrepareError(PrepareError), } @@ -158,17 +175,8 @@ impl fmt::Display for InternalValidationError { "validation: host could not clear the worker cache ({:?}) after a job: {}", path, err ), - CpuTimeMonitorThread(err) => - write!(f, "validation: an error occurred in the CPU time monitor thread: {}", err), + Kernel(err) => write!(f, "validation: error interfacing with the kernel: {}", err), NonDeterministicPrepareError(err) => write!(f, "validation: prepare: {}", err), } } } - -#[test] -fn pre_encoded_payloads() { - let oom_enc = PrepareResult::Err(PrepareError::OutOfMemory).encode(); - let mut oom_payload = oom_enc.len().to_le_bytes().to_vec(); - oom_payload.extend(oom_enc); - assert_eq!(oom_payload, OOM_PAYLOAD); -} diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index b89ab089af1c02eba401517e8248c292dd7040f8..aa1c1c5396823c5f313f007724522adb67ae44cb 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -28,9 +28,9 @@ pub struct Handshake { pub executor_params: ExecutorParams, } -/// The response from an execution job on the worker. +/// The response from the execution worker. #[derive(Debug, Encode, Decode)] -pub enum Response { +pub enum WorkerResponse { /// The job completed successfully. Ok { /// The result of parachain validation. @@ -41,14 +41,38 @@ pub enum Response { /// The candidate is invalid. InvalidCandidate(String), /// The job timed out. - TimedOut, - /// An unexpected panic has occurred in the execution worker. - Panic(String), + JobTimedOut, + /// The job process has died. We must kill the worker just in case. + /// + /// We cannot treat this as an internal error because malicious code may have killed the job. + /// We still retry it, because in the non-malicious case it is likely spurious. + JobDied { err: String, job_pid: i32 }, + /// An unexpected error occurred in the job process, e.g. failing to spawn a thread, panic, + /// etc. + /// + /// Because malicious code can cause a job error, we must not treat it as an internal error. We + /// still retry it, because in the non-malicious case it is likely spurious. + JobError(String), + /// Some internal error occurred. InternalError(InternalValidationError), } -impl Response { +/// The result of a job on the execution worker. +pub type JobResult = Result; + +/// The successful response from a job on the execution worker. +#[derive(Debug, Encode, Decode)] +pub enum JobResponse { + Ok { + /// The result of parachain validation. + result_descriptor: ValidationResult, + }, + /// The candidate is invalid. + InvalidCandidate(String), +} + +impl JobResponse { /// Creates an invalid response from a context `ctx` and a message `msg` (which can be empty). pub fn format_invalid(ctx: &'static str, msg: &str) -> Self { if msg.is_empty() { @@ -58,3 +82,18 @@ impl Response { } } } + +/// An unexpected error occurred in the execution job process. Because this comes from the job, +/// which executes untrusted code, this error must likewise be treated as untrusted. That is, we +/// cannot raise an internal error based on this. +#[derive(thiserror::Error, Debug, Encode, Decode)] +pub enum JobError { + #[error("The job timed out")] + TimedOut, + #[error("An unexpected panic has occurred in the execution job: {0}")] + Panic(String), + #[error("Could not spawn the requested thread: {0}")] + CouldNotSpawnThread(String), + #[error("An error occurred in the CPU time monitor thread: {0}")] + CpuTimeMonitorThread(String), +} diff --git a/polkadot/node/core/pvf/common/src/executor_intf.rs b/polkadot/node/core/pvf/common/src/executor_interface.rs similarity index 95% rename from polkadot/node/core/pvf/common/src/executor_intf.rs rename to polkadot/node/core/pvf/common/src/executor_interface.rs index 3a1d3ac1ba07154fe025a65340c714a5a7396e94..e634940dbe65458d08d143978c9e45fd087f2e32 100644 --- a/polkadot/node/core/pvf/common/src/executor_intf.rs +++ b/polkadot/node/core/pvf/common/src/executor_interface.rs @@ -140,8 +140,7 @@ pub unsafe fn create_runtime_from_artifact_bytes( executor_params: &ExecutorParams, ) -> Result { let mut config = DEFAULT_CONFIG.clone(); - config.semantics = - params_to_wasmtime_semantics(executor_params).map_err(|err| WasmError::Other(err))?; + config.semantics = params_to_wasmtime_semantics(executor_params); sc_executor_wasmtime::create_runtime_from_artifact_bytes::( compiled_artifact_blob, @@ -149,13 +148,12 @@ pub unsafe fn create_runtime_from_artifact_bytes( ) } -pub fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Result { +pub fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Semantics { let mut sem = DEFAULT_CONFIG.semantics.clone(); - let mut stack_limit = if let Some(stack_limit) = sem.deterministic_stack_limit.clone() { - stack_limit - } else { - return Err("No default stack limit set".to_owned()) - }; + let mut stack_limit = sem + .deterministic_stack_limit + .expect("There is a comment to not change the default stack limit; it should always be available; qed") + .clone(); for p in par.iter() { match p { @@ -172,16 +170,14 @@ pub fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Result Result { + // Construct the runtime blob and do some basic checks for consistency. let blob = RuntimeBlob::new(code)?; - // It's assumed this function will take care of any prevalidation logic - // that needs to be done. - // - // Do nothing for now. + // In the future this function should take care of any further prevalidation logic. Ok(blob) } @@ -191,8 +187,7 @@ pub fn prepare( blob: RuntimeBlob, executor_params: &ExecutorParams, ) -> Result, sc_executor_common::error::WasmError> { - let semantics = params_to_wasmtime_semantics(executor_params) - .map_err(|e| sc_executor_common::error::WasmError::Other(e))?; + let semantics = params_to_wasmtime_semantics(executor_params); sc_executor_wasmtime::prepare_runtime_artifact(blob, &semantics) } diff --git a/polkadot/node/core/pvf/common/src/lib.rs b/polkadot/node/core/pvf/common/src/lib.rs index e2211b97d87b3063c56c767dffb10b365f753d25..abebd06f71a45738402909a53f795a75867e58d7 100644 --- a/polkadot/node/core/pvf/common/src/lib.rs +++ b/polkadot/node/core/pvf/common/src/lib.rs @@ -18,7 +18,7 @@ pub mod error; pub mod execute; -pub mod executor_intf; +pub mod executor_interface; pub mod prepare; pub mod pvf; pub mod worker; @@ -31,6 +31,9 @@ pub use sp_tracing; const LOG_TARGET: &str = "parachain::pvf-common"; +pub const RUNTIME_VERSION: &str = env!("SUBSTRATE_WASMTIME_VERSION"); + +use parity_scale_codec::{Decode, Encode}; use std::{ io::{self, Read, Write}, mem, @@ -45,16 +48,25 @@ pub mod tests { } /// Status of security features on the current system. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq, Eq, Encode, Decode)] pub struct SecurityStatus { + /// Whether Secure Validator Mode is enabled. This mode enforces that all required security + /// features are present. All features are enabled on a best-effort basis regardless. + pub secure_validator_mode: bool, /// Whether the landlock features we use are fully available on this system. pub can_enable_landlock: bool, /// Whether the seccomp features we use are fully available on this system. pub can_enable_seccomp: bool, - // Whether we are able to unshare the user namespace and change the filesystem root. + /// Whether we are able to unshare the user namespace and change the filesystem root. pub can_unshare_user_namespace_and_change_root: bool, } +/// A handshake with information for the worker. +#[derive(Debug, Encode, Decode)] +pub struct WorkerHandshake { + pub security_status: SecurityStatus, +} + /// Write some data prefixed by its length into `w`. Sync version of `framed_send` to avoid /// dependency on tokio. pub fn framed_send_blocking(w: &mut (impl Write + Unpin), buf: &[u8]) -> io::Result<()> { diff --git a/polkadot/node/core/pvf/common/src/prepare.rs b/polkadot/node/core/pvf/common/src/prepare.rs index 4436ebe4861e3ae095c9b6494316c885dfc50cd0..28ab682ec136d962364019f2623f80d8e7ad6194 100644 --- a/polkadot/node/core/pvf/common/src/prepare.rs +++ b/polkadot/node/core/pvf/common/src/prepare.rs @@ -15,6 +15,25 @@ // along with Polkadot. If not, see . use parity_scale_codec::{Decode, Encode}; +use std::path::PathBuf; + +/// Result from prepare worker if successful. +#[derive(Debug, Clone, Default, Encode, Decode)] +pub struct PrepareWorkerSuccess { + /// Checksum of the compiled PVF. + pub checksum: String, + /// Stats of the current preparation run. + pub stats: PrepareStats, +} + +/// Result of PVF preparation if successful. +#[derive(Debug, Clone, Default)] +pub struct PrepareSuccess { + /// Canonical path to the compiled artifact. + pub path: PathBuf, + /// Stats of the current preparation run. + pub stats: PrepareStats, +} /// Preparation statistics, including the CPU time and memory taken. #[derive(Debug, Clone, Default, Encode, Decode)] diff --git a/polkadot/node/core/pvf/common/src/pvf.rs b/polkadot/node/core/pvf/common/src/pvf.rs index 0cc86434c19526396b67565bee30c3a38dae9ac6..2d8f6430187b2505e82517a0b93daf30b4e3a504 100644 --- a/polkadot/node/core/pvf/common/src/pvf.rs +++ b/polkadot/node/core/pvf/common/src/pvf.rs @@ -115,7 +115,7 @@ impl fmt::Debug for PvfPrepData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "Pvf {{ code, code_hash: {:?}, executor_params: {:?}, prep_timeout: {:?} }}", + "Pvf {{ code: [...], code_hash: {:?}, executor_params: {:?}, prep_timeout: {:?} }}", self.code_hash, self.executor_params, self.prep_timeout ) } diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index 274a2fc80397bdeff1b66c790210b57c42f7f645..5e7deb5ca782e91ad19dd492e013c43fd12a9237 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -18,9 +18,10 @@ pub mod security; -use crate::{SecurityStatus, LOG_TARGET}; +use crate::{framed_recv_blocking, WorkerHandshake, LOG_TARGET}; use cpu_time::ProcessTime; use futures::never::Never; +use parity_scale_codec::Decode; use std::{ any::Any, fmt, io, @@ -50,8 +51,6 @@ macro_rules! decl_worker_main { #[cfg(target_os = "linux")] use $crate::worker::security; - // TODO: Remove this dependency, and `pub use sp_tracing` in `lib.rs`. - // See . $crate::sp_tracing::try_init_simple(); let worker_pid = std::process::id(); @@ -79,27 +78,37 @@ macro_rules! decl_worker_main { "--check-can-enable-landlock" => { #[cfg(target_os = "linux")] - let status = if security::landlock::check_is_fully_enabled() { 0 } else { -1 }; + let status = if let Err(err) = security::landlock::check_is_fully_enabled() { + // Write the error to stderr, log it on the host-side. + eprintln!("{}", err); + -1 + } else { + 0 + }; #[cfg(not(target_os = "linux"))] let status = -1; std::process::exit(status) }, "--check-can-enable-seccomp" => { #[cfg(all(target_os = "linux", target_arch = "x86_64"))] - let status = if security::seccomp::check_is_fully_enabled() { 0 } else { -1 }; + let status = if let Err(err) = security::seccomp::check_is_fully_enabled() { + // Write the error to stderr, log it on the host-side. + eprintln!("{}", err); + -1 + } else { + 0 + }; #[cfg(not(all(target_os = "linux", target_arch = "x86_64")))] let status = -1; std::process::exit(status) }, "--check-can-unshare-user-namespace-and-change-root" => { #[cfg(target_os = "linux")] - let status = if let Err(err) = security::unshare_user_namespace_and_change_root( - $crate::worker::WorkerKind::CheckPivotRoot, - worker_pid, - // We're not accessing any files, so we can try to pivot_root in the temp - // dir without conflicts with other processes. - &std::env::temp_dir(), - ) { + let cache_path_tempdir = std::path::Path::new(&args[2]); + #[cfg(target_os = "linux")] + let status = if let Err(err) = + security::change_root::check_is_fully_enabled(&cache_path_tempdir) + { // Write the error to stderr, log it on the host-side. eprintln!("{}", err); -1 @@ -107,11 +116,7 @@ macro_rules! decl_worker_main { 0 }; #[cfg(not(target_os = "linux"))] - let status = { - // Write the error to stderr, log it on the host-side. - eprintln!("not available on macos"); - -1 - }; + let status = -1; std::process::exit(status) }, @@ -134,9 +139,6 @@ macro_rules! decl_worker_main { let mut socket_path = None; let mut worker_dir_path = None; let mut node_version = None; - let mut can_enable_landlock = false; - let mut can_enable_seccomp = false; - let mut can_unshare_user_namespace_and_change_root = false; let mut i = 2; while i < args.len() { @@ -153,10 +155,6 @@ macro_rules! decl_worker_main { node_version = Some(args[i + 1].as_str()); i += 1 }, - "--can-enable-landlock" => can_enable_landlock = true, - "--can-enable-seccomp" => can_enable_seccomp = true, - "--can-unshare-user-namespace-and-change-root" => - can_unshare_user_namespace_and_change_root = true, arg => panic!("Unexpected argument found: {}", arg), } i += 1; @@ -167,19 +165,8 @@ macro_rules! decl_worker_main { let socket_path = std::path::Path::new(socket_path).to_owned(); let worker_dir_path = std::path::Path::new(worker_dir_path).to_owned(); - let security_status = $crate::SecurityStatus { - can_enable_landlock, - can_enable_seccomp, - can_unshare_user_namespace_and_change_root, - }; - - $entrypoint( - socket_path, - worker_dir_path, - node_version, - Some($worker_version), - security_status, - ); + + $entrypoint(socket_path, worker_dir_path, node_version, Some($worker_version)); } }; } @@ -205,67 +192,75 @@ impl fmt::Display for WorkerKind { } } -// The worker version must be passed in so that we accurately get the version of the worker, and not -// the version that this crate was compiled with. -pub fn worker_event_loop( +// Some fields are only used for logging, and dead-code analysis ignores Debug. +#[allow(dead_code)] +#[derive(Debug)] +pub struct WorkerInfo { + pid: u32, + kind: WorkerKind, + version: Option, + worker_dir_path: PathBuf, +} + +// NOTE: The worker version must be passed in so that we accurately get the version of the worker, +// and not the version that this crate was compiled with. +// +// NOTE: This must not spawn any threads due to safety requirements in `event_loop` and to avoid +// errors in [`security::change_root::try_restrict`]. +// +/// Initializes the worker process, then runs the given event loop, which spawns a new job process +/// to securely handle each incoming request. +pub fn run_worker( worker_kind: WorkerKind, socket_path: PathBuf, - #[cfg_attr(not(target_os = "linux"), allow(unused_mut))] mut worker_dir_path: PathBuf, + worker_dir_path: PathBuf, node_version: Option<&str>, worker_version: Option<&str>, - #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] security_status: &SecurityStatus, mut event_loop: F, ) where F: FnMut(UnixStream, PathBuf) -> io::Result, { - let worker_pid = std::process::id(); + #[cfg_attr(not(target_os = "linux"), allow(unused_mut))] + let mut worker_info = WorkerInfo { + pid: std::process::id(), + kind: worker_kind, + version: worker_version.map(|v| v.to_string()), + worker_dir_path, + }; gum::debug!( target: LOG_TARGET, - %worker_pid, + ?worker_info, ?socket_path, - ?worker_dir_path, - ?security_status, "starting pvf worker ({})", - worker_kind + worker_info.kind ); // Check for a mismatch between the node and worker versions. - if let (Some(node_version), Some(worker_version)) = (node_version, worker_version) { + if let (Some(node_version), Some(worker_version)) = (node_version, &worker_info.version) { if node_version != worker_version { gum::error!( target: LOG_TARGET, - %worker_kind, - %worker_pid, + ?worker_info, %node_version, - %worker_version, "Node and worker version mismatch, node needs restarting, forcing shutdown", ); kill_parent_node_in_emergency(); - worker_shutdown_message(worker_kind, worker_pid, "Version mismatch"); - return + worker_shutdown(worker_info, "Version mismatch"); } } // Make sure that we can read the worker dir path, and log its contents. let entries = || -> Result, io::Error> { - std::fs::read_dir(&worker_dir_path)? + std::fs::read_dir(&worker_info.worker_dir_path)? .map(|res| res.map(|e| e.file_name())) .collect() }(); match entries { Ok(entries) => - gum::trace!(target: LOG_TARGET, %worker_pid, ?worker_dir_path, "content of worker dir: {:?}", entries), + gum::trace!(target: LOG_TARGET, ?worker_info, "content of worker dir: {:?}", entries), Err(err) => { - gum::error!( - target: LOG_TARGET, - %worker_kind, - %worker_pid, - ?worker_dir_path, - "Could not read worker dir: {}", - err.to_string() - ); - worker_shutdown_message(worker_kind, worker_pid, &err.to_string()); - return + let err = format!("Could not read worker dir: {}", err.to_string()); + worker_shutdown_error(worker_info, &err); }, } @@ -275,23 +270,20 @@ pub fn worker_event_loop( let _ = std::fs::remove_file(&socket_path); Ok(stream) }(); - let stream = match stream { - Ok(s) => s, - Err(err) => { - gum::error!( - target: LOG_TARGET, - %worker_kind, - %worker_pid, - "{}", - err - ); - worker_shutdown_message(worker_kind, worker_pid, &err.to_string()); - return - }, + let mut stream = match stream { + Ok(ok) => ok, + Err(err) => worker_shutdown_error(worker_info, &err.to_string()), + }; + + let WorkerHandshake { security_status } = match recv_worker_handshake(&mut stream) { + Ok(ok) => ok, + Err(err) => worker_shutdown_error(worker_info, &err.to_string()), }; // Enable some security features. { + gum::trace!(target: LOG_TARGET, ?security_status, "Enabling security features"); + // Call based on whether we can change root. Error out if it should work but fails. // // NOTE: This should not be called in a multi-threaded context (i.e. inside the tokio @@ -300,39 +292,29 @@ pub fn worker_event_loop( // > CLONE_NEWUSER requires that the calling process is not threaded. #[cfg(target_os = "linux")] if security_status.can_unshare_user_namespace_and_change_root { - if let Err(err) = security::unshare_user_namespace_and_change_root( - worker_kind, - worker_pid, - &worker_dir_path, - ) { - // The filesystem may be in an inconsistent state, bail out. - gum::error!( - target: LOG_TARGET, - %worker_kind, - %worker_pid, - ?worker_dir_path, - "Could not change root to be the worker cache path: {}", - err - ); - worker_shutdown_message(worker_kind, worker_pid, &err); - return + if let Err(err) = security::change_root::enable_for_worker(&worker_info) { + // The filesystem may be in an inconsistent state, always bail out. + let err = format!("Could not change root to be the worker cache path: {}", err); + worker_shutdown_error(worker_info, &err); } - worker_dir_path = std::path::Path::new("/").to_owned(); + worker_info.worker_dir_path = std::path::Path::new("/").to_owned(); } #[cfg(target_os = "linux")] if security_status.can_enable_landlock { - let landlock_status = - security::landlock::enable_for_worker(worker_kind, worker_pid, &worker_dir_path); - if !matches!(landlock_status, Ok(landlock::RulesetStatus::FullyEnforced)) { - // We previously were able to enable, so this should never happen. + if let Err(err) = security::landlock::enable_for_worker(&worker_info) { + // We previously were able to enable, so this should never happen. Shutdown if + // running in secure mode. + let err = format!("could not fully enable landlock: {:?}", err); gum::error!( target: LOG_TARGET, - %worker_kind, - %worker_pid, - "could not fully enable landlock: {:?}. This should not happen, please report an issue", - landlock_status + ?worker_info, + "{}. This should not happen, please report an issue", + err ); + if security_status.secure_validator_mode { + worker_shutdown(worker_info, &err); + } } } @@ -340,48 +322,54 @@ pub fn worker_event_loop( // job to catch regressions. See . #[cfg(all(target_os = "linux", target_arch = "x86_64"))] if security_status.can_enable_seccomp { - let seccomp_status = - security::seccomp::enable_for_worker(worker_kind, worker_pid, &worker_dir_path); - if !matches!(seccomp_status, Ok(())) { - // We previously were able to enable, so this should never happen. - // - // TODO: Make this a real error in secure-mode. See: - // + if let Err(err) = security::seccomp::enable_for_worker(&worker_info) { + // We previously were able to enable, so this should never happen. Shutdown if + // running in secure mode. + let err = format!("could not fully enable seccomp: {:?}", err); gum::error!( target: LOG_TARGET, - %worker_kind, - %worker_pid, - "could not fully enable seccomp: {:?}. This should not happen, please report an issue", - seccomp_status + ?worker_info, + "{}. This should not happen, please report an issue", + err ); + if security_status.secure_validator_mode { + worker_shutdown(worker_info, &err); + } } } - if !security::check_env_vars_were_cleared(worker_kind, worker_pid) { + if !security::check_env_vars_were_cleared(&worker_info) { let err = "not all env vars were cleared when spawning the process"; gum::error!( target: LOG_TARGET, - %worker_kind, - %worker_pid, + ?worker_info, "{}", err ); - worker_shutdown_message(worker_kind, worker_pid, err); - return + if security_status.secure_validator_mode { + worker_shutdown(worker_info, err); + } } } // Run the main worker loop. - let err = event_loop(stream, worker_dir_path) + let err = event_loop(stream, worker_info.worker_dir_path.clone()) // It's never `Ok` because it's `Ok(Never)`. .unwrap_err(); - worker_shutdown_message(worker_kind, worker_pid, &err.to_string()); + worker_shutdown(worker_info, &err.to_string()); +} + +/// Provide a consistent message on unexpected worker shutdown. +fn worker_shutdown(worker_info: WorkerInfo, err: &str) -> ! { + gum::warn!(target: LOG_TARGET, ?worker_info, "quitting pvf worker ({}): {}", worker_info.kind, err); + std::process::exit(1); } -/// Provide a consistent message on worker shutdown. -fn worker_shutdown_message(worker_kind: WorkerKind, worker_pid: u32, err: &str) { - gum::debug!(target: LOG_TARGET, %worker_pid, "quitting pvf worker ({}): {}", worker_kind, err); +/// Provide a consistent error on unexpected worker shutdown. +fn worker_shutdown_error(worker_info: WorkerInfo, err: &str) -> ! { + gum::error!(target: LOG_TARGET, ?worker_info, "quitting pvf worker ({}): {}", worker_info.kind, err); + std::process::exit(1); } /// Loop that runs in the CPU time monitor thread on prepare and execute jobs. Continuously wakes up @@ -452,6 +440,18 @@ fn kill_parent_node_in_emergency() { } } +/// Receives a handshake with information for the worker. +fn recv_worker_handshake(stream: &mut UnixStream) -> io::Result { + let worker_handshake = framed_recv_blocking(stream)?; + let worker_handshake = WorkerHandshake::decode(&mut &worker_handshake[..]).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("recv_worker_handshake: failed to decode WorkerHandshake: {}", e), + ) + })?; + Ok(worker_handshake) +} + /// Functionality related to threads spawned by the workers. /// /// The motivation for this module is to coordinate worker threads without using async Rust. diff --git a/polkadot/node/core/pvf/common/src/worker/security/change_root.rs b/polkadot/node/core/pvf/common/src/worker/security/change_root.rs new file mode 100644 index 0000000000000000000000000000000000000000..375cc8ff6f28e5ff10d33fd9f1cac35fa16de7b1 --- /dev/null +++ b/polkadot/node/core/pvf/common/src/worker/security/change_root.rs @@ -0,0 +1,173 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Functionality for securing workers by unsharing some namespaces from other processes and +//! changing the root. + +use crate::{ + worker::{WorkerInfo, WorkerKind}, + LOG_TARGET, +}; +use std::{env, ffi::CString, io, os::unix::ffi::OsStrExt, path::Path, ptr}; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("{0}")] + OsErrWithContext(String), + #[error(transparent)] + Io(#[from] io::Error), + #[error("assertion failed: {0}")] + AssertionFailed(String), +} + +pub type Result = std::result::Result; + +/// Try to enable for the given kind of worker. +/// +/// NOTE: This should not be called in a multi-threaded context. `unshare(2)`: +/// "CLONE_NEWUSER requires that the calling process is not threaded." +pub fn enable_for_worker(worker_info: &WorkerInfo) -> Result<()> { + gum::trace!( + target: LOG_TARGET, + ?worker_info, + "enabling change-root", + ); + + try_restrict(worker_info) +} + +/// Runs a check for unshare-and-change-root and returns an error indicating whether it can be fully +/// enabled on the current Linux environment. +/// +/// NOTE: This should not be called in a multi-threaded context. `unshare(2)`: +/// "CLONE_NEWUSER requires that the calling process is not threaded." +#[cfg(target_os = "linux")] +pub fn check_is_fully_enabled(tempdir: &Path) -> Result<()> { + let worker_dir_path = tempdir.to_owned(); + try_restrict(&WorkerInfo { + pid: std::process::id(), + kind: WorkerKind::CheckPivotRoot, + version: None, + worker_dir_path, + }) +} + +/// Unshare the user namespace and change root to be the worker directory. +/// +/// NOTE: This should not be called in a multi-threaded context. `unshare(2)`: +/// "CLONE_NEWUSER requires that the calling process is not threaded." +#[cfg(target_os = "linux")] +fn try_restrict(worker_info: &WorkerInfo) -> Result<()> { + // TODO: Remove this once this is stable: https://github.com/rust-lang/rust/issues/105723 + macro_rules! cstr_ptr { + ($e:expr) => { + concat!($e, "\0").as_ptr().cast::() + }; + } + + gum::trace!( + target: LOG_TARGET, + ?worker_info, + "unsharing the user namespace and calling pivot_root", + ); + + let worker_dir_path_c = CString::new(worker_info.worker_dir_path.as_os_str().as_bytes()) + .expect("on unix; the path will never contain 0 bytes; qed"); + + // Wrapper around all the work to prevent repetitive error handling. + // + // # Errors + // + // It's the caller's responsibility to call `Error::last_os_error`. Note that that alone does + // not give the context of which call failed, so we return a &str error. + || -> std::result::Result<(), &'static str> { + // SAFETY: We pass null-terminated C strings and use the APIs as documented. In fact, steps + // (2) and (3) are adapted from the example in pivot_root(2), with the additional + // change described in the `pivot_root(".", ".")` section. + unsafe { + // 1. `unshare` the user and the mount namespaces. + if libc::unshare(libc::CLONE_NEWUSER | libc::CLONE_NEWNS) < 0 { + return Err("unshare user and mount namespaces") + } + + // 2. Setup mounts. + // + // Ensure that new root and its parent mount don't have shared propagation (which would + // cause pivot_root() to return an error), and prevent propagation of mount events to + // the initial mount namespace. + if libc::mount( + ptr::null(), + cstr_ptr!("/"), + ptr::null(), + libc::MS_REC | libc::MS_PRIVATE, + ptr::null(), + ) < 0 + { + return Err("mount MS_PRIVATE") + } + // Ensure that the new root is a mount point. + let additional_flags = + if let WorkerKind::Execute | WorkerKind::CheckPivotRoot = worker_info.kind { + libc::MS_RDONLY + } else { + 0 + }; + if libc::mount( + worker_dir_path_c.as_ptr(), + worker_dir_path_c.as_ptr(), + ptr::null(), // ignored when MS_BIND is used + libc::MS_BIND | + libc::MS_REC | libc::MS_NOEXEC | + libc::MS_NODEV | libc::MS_NOSUID | + libc::MS_NOATIME | additional_flags, + ptr::null(), // ignored when MS_BIND is used + ) < 0 + { + return Err("mount MS_BIND") + } + + // 3. `pivot_root` to the artifact directory. + if libc::chdir(worker_dir_path_c.as_ptr()) < 0 { + return Err("chdir to worker dir path") + } + if libc::syscall(libc::SYS_pivot_root, cstr_ptr!("."), cstr_ptr!(".")) < 0 { + return Err("pivot_root") + } + if libc::umount2(cstr_ptr!("."), libc::MNT_DETACH) < 0 { + return Err("umount the old root mount point") + } + } + + Ok(()) + }() + .map_err(|err_ctx| { + let err = io::Error::last_os_error(); + Error::OsErrWithContext(format!("{}: {}", err_ctx, err)) + })?; + + // Do some assertions. + if env::current_dir()? != Path::new("/") { + return Err(Error::AssertionFailed("expected current dir after pivot_root to be `/`".into())) + } + env::set_current_dir("..")?; + if env::current_dir()? != Path::new("/") { + return Err(Error::AssertionFailed( + "expected not to be able to break out of new root by doing `..`".into(), + )) + } + + Ok(()) +} diff --git a/polkadot/node/core/pvf/common/src/worker/security/landlock.rs b/polkadot/node/core/pvf/common/src/worker/security/landlock.rs index 51500c733b8cea52805f0f6acdfdeb99ef4d7b68..211d12c2e443aacd6b11b6ef9e4cfddf5aa9bf26 100644 --- a/polkadot/node/core/pvf/common/src/worker/security/landlock.rs +++ b/polkadot/node/core/pvf/common/src/worker/security/landlock.rs @@ -28,7 +28,7 @@ pub use landlock::RulesetStatus; use crate::{ - worker::{stringify_panic_payload, WorkerKind}, + worker::{stringify_panic_payload, WorkerInfo, WorkerKind}, LOG_TARGET, }; use landlock::*; @@ -74,6 +74,8 @@ pub const LANDLOCK_ABI: ABI = ABI::V1; #[derive(thiserror::Error, Debug)] pub enum Error { + #[error("Could not fully enable: {0:?}")] + NotFullyEnabled(RulesetStatus), #[error("Invalid exception path: {0:?}")] InvalidExceptionPath(PathBuf), #[error(transparent)] @@ -85,17 +87,13 @@ pub enum Error { pub type Result = std::result::Result; /// Try to enable landlock for the given kind of worker. -pub fn enable_for_worker( - worker_kind: WorkerKind, - worker_pid: u32, - worker_dir_path: &Path, -) -> Result { - let exceptions: Vec<(PathBuf, BitFlags)> = match worker_kind { +pub fn enable_for_worker(worker_info: &WorkerInfo) -> Result<()> { + let exceptions: Vec<(PathBuf, BitFlags)> = match worker_info.kind { WorkerKind::Prepare => { - vec![(worker_dir_path.to_owned(), AccessFs::WriteFile.into())] + vec![(worker_info.worker_dir_path.to_owned(), AccessFs::WriteFile.into())] }, WorkerKind::Execute => { - vec![(worker_dir_path.to_owned(), AccessFs::ReadFile.into())] + vec![(worker_info.worker_dir_path.to_owned(), AccessFs::ReadFile.into())] }, WorkerKind::CheckPivotRoot => panic!("this should only be passed for checking pivot_root; qed"), @@ -103,9 +101,7 @@ pub fn enable_for_worker( gum::trace!( target: LOG_TARGET, - %worker_kind, - %worker_pid, - ?worker_dir_path, + ?worker_info, "enabling landlock with exceptions: {:?}", exceptions, ); @@ -114,18 +110,14 @@ pub fn enable_for_worker( } // TODO: -/// Runs a check for landlock and returns a single bool indicating whether the given landlock -/// ABI is fully enabled on the current Linux environment. -pub fn check_is_fully_enabled() -> bool { - let status_from_thread: Result = - match std::thread::spawn(|| try_restrict(std::iter::empty::<(PathBuf, AccessFs)>())).join() - { - Ok(Ok(status)) => Ok(status), - Ok(Err(ruleset_err)) => Err(ruleset_err.into()), - Err(err) => Err(Error::Panic(stringify_panic_payload(err))), - }; - - matches!(status_from_thread, Ok(RulesetStatus::FullyEnforced)) +/// Runs a check for landlock in its own thread, and returns an error indicating whether the given +/// landlock ABI is fully enabled on the current Linux environment. +pub fn check_is_fully_enabled() -> Result<()> { + match std::thread::spawn(|| try_restrict(std::iter::empty::<(PathBuf, AccessFs)>())).join() { + Ok(Ok(())) => Ok(()), + Ok(Err(err)) => Err(err), + Err(err) => Err(Error::Panic(stringify_panic_payload(err))), + } } /// Tries to restrict the current thread (should only be called in a process' main thread) with @@ -139,7 +131,7 @@ pub fn check_is_fully_enabled() -> bool { /// # Returns /// /// The status of the restriction (whether it was fully, partially, or not-at-all enforced). -fn try_restrict(fs_exceptions: I) -> Result +fn try_restrict(fs_exceptions: I) -> Result<()> where I: IntoIterator, P: AsRef, @@ -156,8 +148,13 @@ where } ruleset = ruleset.add_rules(rules)?; } + let status = ruleset.restrict_self()?; - Ok(status.ruleset) + if !matches!(status.ruleset, RulesetStatus::FullyEnforced) { + return Err(Error::NotFullyEnabled(status.ruleset)) + } + + Ok(()) } #[cfg(test)] @@ -168,7 +165,7 @@ mod tests { #[test] fn restricted_thread_cannot_read_file() { // TODO: This would be nice: . - if !check_is_fully_enabled() { + if check_is_fully_enabled().is_err() { return } @@ -191,7 +188,7 @@ mod tests { // Apply Landlock with a read exception for only one of the files. let status = try_restrict(vec![(path1, AccessFs::ReadFile)]); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + if !matches!(status, Ok(())) { panic!( "Ruleset should be enforced since we checked if landlock is enabled: {:?}", status @@ -212,7 +209,7 @@ mod tests { // Apply Landlock for all files. let status = try_restrict(std::iter::empty::<(PathBuf, AccessFs)>()); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + if !matches!(status, Ok(())) { panic!( "Ruleset should be enforced since we checked if landlock is enabled: {:?}", status @@ -233,7 +230,7 @@ mod tests { #[test] fn restricted_thread_cannot_write_file() { // TODO: This would be nice: . - if !check_is_fully_enabled() { + if check_is_fully_enabled().is_err() { return } @@ -252,7 +249,7 @@ mod tests { // Apply Landlock with a write exception for only one of the files. let status = try_restrict(vec![(path1, AccessFs::WriteFile)]); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + if !matches!(status, Ok(())) { panic!( "Ruleset should be enforced since we checked if landlock is enabled: {:?}", status @@ -270,7 +267,7 @@ mod tests { // Apply Landlock for all files. let status = try_restrict(std::iter::empty::<(PathBuf, AccessFs)>()); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + if !matches!(status, Ok(())) { panic!( "Ruleset should be enforced since we checked if landlock is enabled: {:?}", status @@ -292,7 +289,7 @@ mod tests { #[test] fn restricted_thread_can_truncate_file() { // TODO: This would be nice: . - if !check_is_fully_enabled() { + if check_is_fully_enabled().is_err() { return } @@ -308,7 +305,7 @@ mod tests { // Apply Landlock with all exceptions under the current ABI. let status = try_restrict(vec![(path, AccessFs::from_all(LANDLOCK_ABI))]); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + if !matches!(status, Ok(())) { panic!( "Ruleset should be enforced since we checked if landlock is enabled: {:?}", status diff --git a/polkadot/node/core/pvf/common/src/worker/security/mod.rs b/polkadot/node/core/pvf/common/src/worker/security/mod.rs index 9a38ed172773dff10f533434c1c928d8ed99868d..ff4c712f6bdca1351dcc7da79aee3558b8121a44 100644 --- a/polkadot/node/core/pvf/common/src/worker/security/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/security/mod.rs @@ -27,134 +27,21 @@ //! - Restrict networking by blocking socket creation and io_uring. //! - Remove env vars -use crate::{worker::WorkerKind, LOG_TARGET}; +use crate::{worker::WorkerInfo, LOG_TARGET}; +#[cfg(target_os = "linux")] +pub mod change_root; #[cfg(target_os = "linux")] pub mod landlock; - #[cfg(all(target_os = "linux", target_arch = "x86_64"))] pub mod seccomp; -/// Unshare the user namespace and change root to be the artifact directory. -/// -/// NOTE: This should not be called in a multi-threaded context. `unshare(2)`: -/// "CLONE_NEWUSER requires that the calling process is not threaded." -#[cfg(target_os = "linux")] -pub fn unshare_user_namespace_and_change_root( - worker_kind: WorkerKind, - worker_pid: u32, - worker_dir_path: &std::path::Path, -) -> Result<(), String> { - use std::{env, ffi::CString, os::unix::ffi::OsStrExt, path::Path, ptr}; - - // TODO: Remove this once this is stable: https://github.com/rust-lang/rust/issues/105723 - macro_rules! cstr_ptr { - ($e:expr) => { - concat!($e, "\0").as_ptr().cast::() - }; - } - - gum::trace!( - target: LOG_TARGET, - %worker_kind, - %worker_pid, - ?worker_dir_path, - "unsharing the user namespace and calling pivot_root", - ); - - let worker_dir_path_c = CString::new(worker_dir_path.as_os_str().as_bytes()) - .expect("on unix; the path will never contain 0 bytes; qed"); - - // Wrapper around all the work to prevent repetitive error handling. - // - // # Errors - // - // It's the caller's responsibility to call `Error::last_os_error`. Note that that alone does - // not give the context of which call failed, so we return a &str error. - || -> Result<(), &'static str> { - // SAFETY: We pass null-terminated C strings and use the APIs as documented. In fact, steps - // (2) and (3) are adapted from the example in pivot_root(2), with the additional - // change described in the `pivot_root(".", ".")` section. - unsafe { - // 1. `unshare` the user and the mount namespaces. - if libc::unshare(libc::CLONE_NEWUSER | libc::CLONE_NEWNS) < 0 { - return Err("unshare user and mount namespaces") - } - - // 2. Setup mounts. - // - // Ensure that new root and its parent mount don't have shared propagation (which would - // cause pivot_root() to return an error), and prevent propagation of mount events to - // the initial mount namespace. - if libc::mount( - ptr::null(), - cstr_ptr!("/"), - ptr::null(), - libc::MS_REC | libc::MS_PRIVATE, - ptr::null(), - ) < 0 - { - return Err("mount MS_PRIVATE") - } - // Ensure that the new root is a mount point. - let additional_flags = - if let WorkerKind::Execute | WorkerKind::CheckPivotRoot = worker_kind { - libc::MS_RDONLY - } else { - 0 - }; - if libc::mount( - worker_dir_path_c.as_ptr(), - worker_dir_path_c.as_ptr(), - ptr::null(), // ignored when MS_BIND is used - libc::MS_BIND | - libc::MS_REC | libc::MS_NOEXEC | - libc::MS_NODEV | libc::MS_NOSUID | - libc::MS_NOATIME | additional_flags, - ptr::null(), // ignored when MS_BIND is used - ) < 0 - { - return Err("mount MS_BIND") - } - - // 3. `pivot_root` to the artifact directory. - if libc::chdir(worker_dir_path_c.as_ptr()) < 0 { - return Err("chdir to worker dir path") - } - if libc::syscall(libc::SYS_pivot_root, cstr_ptr!("."), cstr_ptr!(".")) < 0 { - return Err("pivot_root") - } - if libc::umount2(cstr_ptr!("."), libc::MNT_DETACH) < 0 { - return Err("umount the old root mount point") - } - } - - Ok(()) - }() - .map_err(|err_ctx| { - let err = std::io::Error::last_os_error(); - format!("{}: {}", err_ctx, err) - })?; - - // Do some assertions. - if env::current_dir().map_err(|err| err.to_string())? != Path::new("/") { - return Err("expected current dir after pivot_root to be `/`".into()) - } - env::set_current_dir("..").map_err(|err| err.to_string())?; - if env::current_dir().map_err(|err| err.to_string())? != Path::new("/") { - return Err("expected not to be able to break out of new root by doing `..`".into()) - } - - Ok(()) -} - /// Require env vars to have been removed when spawning the process, to prevent malicious code from /// accessing them. -pub fn check_env_vars_were_cleared(worker_kind: WorkerKind, worker_pid: u32) -> bool { +pub fn check_env_vars_were_cleared(worker_info: &WorkerInfo) -> bool { gum::trace!( target: LOG_TARGET, - %worker_kind, - %worker_pid, + ?worker_info, "clearing env vars in worker", ); @@ -162,8 +49,8 @@ pub fn check_env_vars_were_cleared(worker_kind: WorkerKind, worker_pid: u32) -> for (key, value) in std::env::vars_os() { // TODO: *theoretically* the value (or mere presence) of `RUST_LOG` can be a source of - // randomness for malicious code. In the future we can remove it also and log in the host; - // see . + // randomness for malicious code. It should be removed in the job process, which does no + // logging. if key == "RUST_LOG" { continue } @@ -175,8 +62,7 @@ pub fn check_env_vars_were_cleared(worker_kind: WorkerKind, worker_pid: u32) -> gum::error!( target: LOG_TARGET, - %worker_kind, - %worker_pid, + ?worker_info, ?key, ?value, "env var was present that should have been removed", diff --git a/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs b/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs index 5539ad284400b93ae4ed2643ed5b9a987f31baac..4f270f75b345c96fc1118f5373b3fdca229e8e52 100644 --- a/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs +++ b/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs @@ -67,22 +67,20 @@ //! //! # Action on syscall violations //! -//! On syscall violations we currently only log, to make sure this works correctly before enforcing. -//! -//! In the future, when a forbidden syscall is attempted we immediately kill the process in order to -//! prevent the attacker from doing anything else. In execution, this will result in voting against -//! the candidate. +//! When a forbidden syscall is attempted we immediately kill the process in order to prevent the +//! attacker from doing anything else. In execution, this will result in voting against the +//! candidate. use crate::{ - worker::{stringify_panic_payload, WorkerKind}, + worker::{stringify_panic_payload, WorkerInfo}, LOG_TARGET, }; use seccompiler::*; -use std::{collections::BTreeMap, path::Path}; +use std::collections::BTreeMap; /// The action to take on caught syscalls. #[cfg(not(test))] -const CAUGHT_ACTION: SeccompAction = SeccompAction::Log; +const CAUGHT_ACTION: SeccompAction = SeccompAction::KillProcess; /// Don't kill the process when testing. #[cfg(test)] const CAUGHT_ACTION: SeccompAction = SeccompAction::Errno(libc::EACCES as u32); @@ -100,36 +98,28 @@ pub enum Error { pub type Result = std::result::Result; /// Try to enable seccomp for the given kind of worker. -pub fn enable_for_worker( - worker_kind: WorkerKind, - worker_pid: u32, - worker_dir_path: &Path, -) -> Result<()> { +pub fn enable_for_worker(worker_info: &WorkerInfo) -> Result<()> { gum::trace!( target: LOG_TARGET, - %worker_kind, - %worker_pid, - ?worker_dir_path, + ?worker_info, "enabling seccomp", ); try_restrict() } -/// Runs a check for seccomp and returns a single bool indicating whether seccomp with our rules is -/// fully enabled on the current Linux environment. -pub fn check_is_fully_enabled() -> bool { - let status_from_thread: Result<()> = match std::thread::spawn(|| try_restrict()).join() { +/// Runs a check for seccomp in its own thread, and returns an error indicating whether seccomp with +/// our rules is fully enabled on the current Linux environment. +pub fn check_is_fully_enabled() -> Result<()> { + match std::thread::spawn(|| try_restrict()).join() { Ok(Ok(())) => Ok(()), - Ok(Err(err)) => Err(err.into()), + Ok(Err(err)) => Err(err), Err(err) => Err(Error::Panic(stringify_panic_payload(err))), - }; - - matches!(status_from_thread, Ok(())) + } } /// Applies a `seccomp` filter to disable networking for the PVF threads. -pub fn try_restrict() -> Result<()> { +fn try_restrict() -> Result<()> { // Build a `seccomp` filter which by default allows all syscalls except those blocked in the // blacklist. let mut blacklisted_rules = BTreeMap::default(); @@ -171,7 +161,7 @@ mod tests { #[test] fn sandboxed_thread_cannot_use_sockets() { // TODO: This would be nice: . - if !check_is_fully_enabled() { + if check_is_fully_enabled().is_err() { return } diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 77a9420961c00a41c5cd3cf3470e57a9a39be2f0..97dde59ebc2e471a411119c34988a9498c13de7b 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -6,9 +6,15 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] cpu-time = "1.0.0" gum = { package = "tracing-gum", path = "../../../gum" } +os_pipe = "1.1.4" +nix = { version = "0.27.1", features = ["process", "resource"] } +libc = "0.2.139" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index 8872f9bc8dd302bba29bd054224c3cecd2fb0d8f..b33a9d5069dffaa0d4264897022e1e7709577991 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -16,32 +16,40 @@ //! Contains the logic for executing PVFs. Used by the polkadot-execute-worker binary. -pub use polkadot_node_core_pvf_common::{ - executor_intf::execute_artifact, worker_dir, SecurityStatus, -}; +pub use polkadot_node_core_pvf_common::{executor_interface::execute_artifact, worker_dir}; // NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are // separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-execute-worker=trace`. const LOG_TARGET: &str = "parachain::pvf-execute-worker"; use cpu_time::ProcessTime; +use nix::{ + errno::Errno, + sys::{ + resource::{Usage, UsageWho}, + wait::WaitStatus, + }, + unistd::{ForkResult, Pid}, +}; +use os_pipe::{self, PipeReader, PipeWriter}; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, - execute::{Handshake, Response}, + execute::{Handshake, JobError, JobResponse, JobResult, WorkerResponse}, framed_recv_blocking, framed_send_blocking, worker::{ - cpu_time_monitor_loop, stringify_panic_payload, + cpu_time_monitor_loop, run_worker, stringify_panic_payload, thread::{self, WaitOutcome}, - worker_event_loop, WorkerKind, + WorkerKind, }, }; use polkadot_parachain_primitives::primitives::ValidationResult; use polkadot_primitives::{executor_params::DEFAULT_NATIVE_STACK_MAX, ExecutorParams}; use std::{ - io, + io::{self, Read}, os::unix::net::UnixStream, path::PathBuf, + process, sync::{mpsc::channel, Arc}, time::Duration, }; @@ -82,12 +90,13 @@ use std::{ /// The stack size for the execute thread. pub const EXECUTE_THREAD_STACK_SIZE: usize = 2 * 1024 * 1024 + DEFAULT_NATIVE_STACK_MAX as usize; -fn recv_handshake(stream: &mut UnixStream) -> io::Result { +/// Receives a handshake with information specific to the execute worker. +fn recv_execute_handshake(stream: &mut UnixStream) -> io::Result { let handshake_enc = framed_recv_blocking(stream)?; let handshake = Handshake::decode(&mut &handshake_enc[..]).map_err(|_| { io::Error::new( io::ErrorKind::Other, - "execute pvf recv_handshake: failed to decode Handshake".to_owned(), + "execute pvf recv_execute_handshake: failed to decode Handshake".to_owned(), ) })?; Ok(handshake) @@ -105,7 +114,7 @@ fn recv_request(stream: &mut UnixStream) -> io::Result<(Vec, Duration)> { Ok((params, execution_timeout)) } -fn send_response(stream: &mut UnixStream, response: Response) -> io::Result<()> { +fn send_response(stream: &mut UnixStream, response: WorkerResponse) -> io::Result<()> { framed_send_blocking(stream, &response.encode()) } @@ -129,20 +138,18 @@ pub fn worker_entrypoint( worker_dir_path: PathBuf, node_version: Option<&str>, worker_version: Option<&str>, - security_status: SecurityStatus, ) { - worker_event_loop( + run_worker( WorkerKind::Execute, socket_path, worker_dir_path, node_version, worker_version, - &security_status, |mut stream, worker_dir_path| { - let worker_pid = std::process::id(); + let worker_pid = process::id(); let artifact_path = worker_dir::execute_artifact(&worker_dir_path); - let Handshake { executor_params } = recv_handshake(&mut stream)?; + let Handshake { executor_params } = recv_execute_handshake(&mut stream)?; loop { let (params, execution_timeout) = recv_request(&mut stream)?; @@ -157,7 +164,7 @@ pub fn worker_entrypoint( let compiled_artifact_blob = match std::fs::read(&artifact_path) { Ok(bytes) => bytes, Err(err) => { - let response = Response::InternalError( + let response = WorkerResponse::InternalError( InternalValidationError::CouldNotOpenFile(err.to_string()), ); send_response(&mut stream, response)?; @@ -165,90 +172,53 @@ pub fn worker_entrypoint( }, }; - // Conditional variable to notify us when a thread is done. - let condvar = thread::get_condvar(); + let (pipe_reader, pipe_writer) = os_pipe::pipe()?; + + let usage_before = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { + Ok(usage) => usage, + Err(errno) => { + let response = internal_error_from_errno("getrusage before", errno); + send_response(&mut stream, response)?; + continue + }, + }; - let cpu_time_start = ProcessTime::now(); + // SAFETY: new process is spawned within a single threaded process. This invariant + // is enforced by tests. + let response = match unsafe { nix::unistd::fork() } { + Err(errno) => internal_error_from_errno("fork", errno), + Ok(ForkResult::Child) => { + // Dropping the stream closes the underlying socket. We want to make sure + // that the sandboxed child can't get any kind of information from the + // outside world. The only IPC it should be able to do is sending its + // response over the pipe. + drop(stream); + // Drop the read end so we don't have too many FDs open. + drop(pipe_reader); - // Spawn a new thread that runs the CPU time monitor. - let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); - let cpu_time_monitor_thread = thread::spawn_worker_thread( - "cpu time monitor thread", - move || { - cpu_time_monitor_loop( - cpu_time_start, + handle_child_process( + pipe_writer, + compiled_artifact_blob, + executor_params, + params, execution_timeout, - cpu_time_monitor_rx, - ) - }, - Arc::clone(&condvar), - WaitOutcome::TimedOut, - )?; - - let executor_params_2 = executor_params.clone(); - let execute_thread = thread::spawn_worker_thread_with_stack_size( - "execute thread", - move || { - validate_using_artifact( - &compiled_artifact_blob, - &executor_params_2, - ¶ms, - cpu_time_start, ) }, - Arc::clone(&condvar), - WaitOutcome::Finished, - EXECUTE_THREAD_STACK_SIZE, - )?; - - let outcome = thread::wait_for_threads(condvar); - - let response = match outcome { - WaitOutcome::Finished => { - let _ = cpu_time_monitor_tx.send(()); - execute_thread - .join() - .unwrap_or_else(|e| Response::Panic(stringify_panic_payload(e))) - }, - // If the CPU thread is not selected, we signal it to end, the join handle is - // dropped and the thread will finish in the background. - WaitOutcome::TimedOut => { - match cpu_time_monitor_thread.join() { - Ok(Some(cpu_time_elapsed)) => { - // Log if we exceed the timeout and the other thread hasn't - // finished. - gum::warn!( - target: LOG_TARGET, - %worker_pid, - "execute job took {}ms cpu time, exceeded execute timeout {}ms", - cpu_time_elapsed.as_millis(), - execution_timeout.as_millis(), - ); - Response::TimedOut - }, - Ok(None) => Response::InternalError( - InternalValidationError::CpuTimeMonitorThread( - "error communicating over finished channel".into(), - ), - ), - Err(e) => Response::InternalError( - InternalValidationError::CpuTimeMonitorThread( - stringify_panic_payload(e), - ), - ), - } + Ok(ForkResult::Parent { child }) => { + // the read end will wait until all write ends have been closed, + // this drop is necessary to avoid deadlock + drop(pipe_writer); + + handle_parent_process( + pipe_reader, + child, + worker_pid, + usage_before, + execution_timeout, + )? }, - WaitOutcome::Pending => unreachable!( - "we run wait_while until the outcome is no longer pending; qed" - ), }; - gum::trace!( - target: LOG_TARGET, - %worker_pid, - "worker: sending response to host: {:?}", - response - ); send_response(&mut stream, response)?; } }, @@ -259,27 +229,281 @@ fn validate_using_artifact( compiled_artifact_blob: &[u8], executor_params: &ExecutorParams, params: &[u8], - cpu_time_start: ProcessTime, -) -> Response { +) -> JobResponse { let descriptor_bytes = match unsafe { // SAFETY: this should be safe since the compiled artifact passed here comes from the // file created by the prepare workers. These files are obtained by calling - // [`executor_intf::prepare`]. + // [`executor_interface::prepare`]. execute_artifact(compiled_artifact_blob, executor_params, params) } { - Err(err) => return Response::format_invalid("execute", &err), + Err(err) => return JobResponse::format_invalid("execute", &err), Ok(d) => d, }; let result_descriptor = match ValidationResult::decode(&mut &descriptor_bytes[..]) { Err(err) => - return Response::format_invalid("validation result decoding failed", &err.to_string()), + return JobResponse::format_invalid( + "validation result decoding failed", + &err.to_string(), + ), Ok(r) => r, }; - // Include the decoding in the measured time, to prevent any potential attacks exploiting some - // bug in decoding. - let duration = cpu_time_start.elapsed(); + JobResponse::Ok { result_descriptor } +} + +/// This is used to handle child process during pvf execute worker. +/// It execute the artifact and pipes back the response to the parent process +/// +/// # Arguments +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `compiled_artifact_blob`: The artifact bytes from compiled by the prepare worker`. +/// +/// - `executor_params`: Deterministically serialized execution environment semantics. +/// +/// - `params`: Validation parameters. +/// +/// - `execution_timeout`: The timeout in `Duration`. +/// +/// # Returns +/// +/// - pipe back `JobResponse` to the parent process. +fn handle_child_process( + mut pipe_write: PipeWriter, + compiled_artifact_blob: Vec, + executor_params: ExecutorParams, + params: Vec, + execution_timeout: Duration, +) -> ! { + gum::debug!( + target: LOG_TARGET, + worker_job_pid = %process::id(), + "worker job: executing artifact", + ); + + // Conditional variable to notify us when a thread is done. + let condvar = thread::get_condvar(); + let cpu_time_start = ProcessTime::now(); + + // Spawn a new thread that runs the CPU time monitor. + let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); + let cpu_time_monitor_thread = thread::spawn_worker_thread( + "cpu time monitor thread", + move || cpu_time_monitor_loop(cpu_time_start, execution_timeout, cpu_time_monitor_rx), + Arc::clone(&condvar), + WaitOutcome::TimedOut, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(JobError::CouldNotSpawnThread(err.to_string()))) + }); + + let executor_params_2 = executor_params.clone(); + let execute_thread = thread::spawn_worker_thread_with_stack_size( + "execute thread", + move || validate_using_artifact(&compiled_artifact_blob, &executor_params_2, ¶ms), + Arc::clone(&condvar), + WaitOutcome::Finished, + EXECUTE_THREAD_STACK_SIZE, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(JobError::CouldNotSpawnThread(err.to_string()))) + }); + + let outcome = thread::wait_for_threads(condvar); + + let response = match outcome { + WaitOutcome::Finished => { + let _ = cpu_time_monitor_tx.send(()); + execute_thread.join().map_err(|e| JobError::Panic(stringify_panic_payload(e))) + }, + // If the CPU thread is not selected, we signal it to end, the join handle is + // dropped and the thread will finish in the background. + WaitOutcome::TimedOut => match cpu_time_monitor_thread.join() { + Ok(Some(_cpu_time_elapsed)) => Err(JobError::TimedOut), + Ok(None) => Err(JobError::CpuTimeMonitorThread( + "error communicating over finished channel".into(), + )), + Err(e) => Err(JobError::CpuTimeMonitorThread(stringify_panic_payload(e))), + }, + WaitOutcome::Pending => + unreachable!("we run wait_while until the outcome is no longer pending; qed"), + }; + + send_child_response(&mut pipe_write, response); +} + +/// Waits for child process to finish and handle child response from pipe. +/// +/// # Arguments +/// +/// - `pipe_read`: A `PipeReader` used to read data from the child process. +/// +/// - `child`: The child pid. +/// +/// - `usage_before`: Resource usage statistics before executing the child process. +/// +/// - `timeout`: The maximum allowed time for the child process to finish, in `Duration`. +/// +/// # Returns +/// +/// - The response, either `Ok` or some error state. +fn handle_parent_process( + mut pipe_read: PipeReader, + job_pid: Pid, + worker_pid: u32, + usage_before: Usage, + timeout: Duration, +) -> io::Result { + // Read from the child. Don't decode unless the process exited normally, which we check later. + let mut received_data = Vec::new(); + pipe_read + .read_to_end(&mut received_data) + // Could not decode job response. There is either a bug or the job was hijacked. + // Should retry at any rate. + .map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?; + + let status = nix::sys::wait::waitpid(job_pid, None); + gum::trace!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + "execute worker received wait status from job: {:?}", + status, + ); + + let usage_after = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { + Ok(usage) => usage, + Err(errno) => return Ok(internal_error_from_errno("getrusage after", errno)), + }; + + // Using `getrusage` is needed to check whether child has timedout since we cannot rely on + // child to report its own time. + // As `getrusage` returns resource usage from all terminated child processes, + // it is necessary to subtract the usage before the current child process to isolate its cpu + // time + let cpu_tv = get_total_cpu_usage(usage_after) - get_total_cpu_usage(usage_before); + if cpu_tv >= timeout { + gum::warn!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + "execute job took {}ms cpu time, exceeded execute timeout {}ms", + cpu_tv.as_millis(), + timeout.as_millis(), + ); + return Ok(WorkerResponse::JobTimedOut) + } + + match status { + Ok(WaitStatus::Exited(_, exit_status)) => { + let mut reader = io::BufReader::new(received_data.as_slice()); + let result = match recv_child_response(&mut reader) { + Ok(result) => result, + Err(err) => return Ok(WorkerResponse::JobError(err.to_string())), + }; + + match result { + Ok(JobResponse::Ok { result_descriptor }) => { + // The exit status should have been zero if no error occurred. + if exit_status != 0 { + return Ok(WorkerResponse::JobError(format!( + "unexpected exit status: {}", + exit_status + ))) + } + + Ok(WorkerResponse::Ok { result_descriptor, duration: cpu_tv }) + }, + Ok(JobResponse::InvalidCandidate(err)) => Ok(WorkerResponse::InvalidCandidate(err)), + Err(job_error) => { + gum::warn!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + "execute job error: {}", + job_error, + ); + if matches!(job_error, JobError::TimedOut) { + Ok(WorkerResponse::JobTimedOut) + } else { + Ok(WorkerResponse::JobError(job_error.to_string())) + } + }, + } + }, + // The job was killed by the given signal. + // + // The job gets SIGSYS on seccomp violations, but this signal may have been sent for some + // other reason, so we still need to check for seccomp violations elsewhere. + Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => Ok(WorkerResponse::JobDied { + err: format!("received signal: {signal:?}"), + job_pid: job_pid.as_raw(), + }), + Err(errno) => Ok(internal_error_from_errno("waitpid", errno)), + + // It is within an attacker's power to send an unexpected exit status. So we cannot treat + // this as an internal error (which would make us abstain), but must vote against. + Ok(unexpected_wait_status) => Ok(WorkerResponse::JobDied { + err: format!("unexpected status from wait: {unexpected_wait_status:?}"), + job_pid: job_pid.as_raw(), + }), + } +} + +/// Calculate the total CPU time from the given `usage` structure, returned from +/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user +/// and system time. +/// +/// # Arguments +/// +/// - `rusage`: Contains resource usage information. +/// +/// # Returns +/// +/// Returns a `Duration` representing the total CPU time. +fn get_total_cpu_usage(rusage: Usage) -> Duration { + let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + + (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; + + return Duration::from_micros(micros) +} + +/// Get a job response. +fn recv_child_response(received_data: &mut io::BufReader<&[u8]>) -> io::Result { + let response_bytes = framed_recv_blocking(received_data)?; + JobResult::decode(&mut response_bytes.as_slice()).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("execute pvf recv_child_response: decode error: {:?}", e), + ) + }) +} + +/// Write response to the pipe and exit process after. +/// +/// # Arguments +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `response`: Child process response, or error. +fn send_child_response(pipe_write: &mut PipeWriter, response: JobResult) -> ! { + framed_send_blocking(pipe_write, response.encode().as_slice()) + .unwrap_or_else(|_| process::exit(libc::EXIT_FAILURE)); + + if response.is_ok() { + process::exit(libc::EXIT_SUCCESS) + } else { + process::exit(libc::EXIT_FAILURE) + } +} - Response::Ok { result_descriptor, duration } +fn internal_error_from_errno(context: &'static str, errno: Errno) -> WorkerResponse { + WorkerResponse::InternalError(InternalValidationError::Kernel(format!( + "{}: {}: {}", + context, + errno, + io::Error::last_os_error() + ))) } diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index e21583ecc8b7595599fe775eb003b97f54bd31db..81e887afe4d0b864ede9184bbe48a366c22e1522 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -6,7 +6,11 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] +blake3 = "1.5" cfg-if = "1.0" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.139" @@ -14,6 +18,8 @@ rayon = "1.5.1" tracking-allocator = { package = "staging-tracking-allocator", path = "../../../tracking-allocator" } tikv-jemalloc-ctl = { version = "0.5.0", optional = true } tikv-jemallocator = { version = "0.5.0", optional = true } +os_pipe = "1.1.4" +nix = { version = "0.27.1", features = ["process", "resource"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs b/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs index ba2568cd80cc634c16e43065a9ffd1fd31137e6c..d531c90b64b578e31f42a13f2399b4343469fa6d 100644 --- a/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs @@ -16,7 +16,7 @@ use criterion::{criterion_group, criterion_main, Criterion, SamplingMode}; use polkadot_node_core_pvf_common::{ - executor_intf::{prepare, prevalidate}, + executor_interface::{prepare, prevalidate}, prepare::PrepareJobKind, pvf::PvfPrepData, }; diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index 37a4dd06075e9a8aa1b6d90bd80dce8830fb0415..af5ac8c5974900055a9623fe5ec44242d2d77a2d 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -18,7 +18,7 @@ mod memory_stats; -use polkadot_node_core_pvf_common::executor_intf::{prepare, prevalidate}; +use polkadot_node_core_pvf_common::executor_interface::{prepare, prevalidate}; // NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are // separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-prepare-worker=trace`. @@ -28,28 +28,40 @@ const LOG_TARGET: &str = "parachain::pvf-prepare-worker"; use crate::memory_stats::max_rss_stat::{extract_max_rss_stat, get_max_rss_thread}; #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] use crate::memory_stats::memory_tracker::{get_memory_tracker_loop_stats, memory_tracker_loop}; +use libc; +use nix::{ + errno::Errno, + sys::{ + resource::{Usage, UsageWho}, + wait::WaitStatus, + }, + unistd::{ForkResult, Pid}, +}; +use os_pipe::{self, PipeReader, PipeWriter}; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ - error::{PrepareError, PrepareResult, OOM_PAYLOAD}, - executor_intf::create_runtime_from_artifact_bytes, + error::{PrepareError, PrepareWorkerResult}, + executor_interface::create_runtime_from_artifact_bytes, framed_recv_blocking, framed_send_blocking, - prepare::{MemoryStats, PrepareJobKind, PrepareStats}, + prepare::{MemoryStats, PrepareJobKind, PrepareStats, PrepareWorkerSuccess}, pvf::PvfPrepData, worker::{ - cpu_time_monitor_loop, stringify_panic_payload, - thread::{self, WaitOutcome}, - worker_event_loop, WorkerKind, + cpu_time_monitor_loop, run_worker, stringify_panic_payload, + thread::{self, spawn_worker_thread, WaitOutcome}, + WorkerKind, }, - worker_dir, ProcessTime, SecurityStatus, + worker_dir, ProcessTime, }; use polkadot_primitives::ExecutorParams; use std::{ - fs, io, + fs, + io::{self, Read}, os::{ fd::{AsRawFd, RawFd}, unix::net::UnixStream, }, path::PathBuf, + process, sync::{mpsc::channel, Arc}, time::Duration, }; @@ -65,6 +77,7 @@ static ALLOC: TrackingAllocator = static ALLOC: TrackingAllocator = TrackingAllocator(std::alloc::System); /// Contains the bytes for a successfully compiled artifact. +#[derive(Encode, Decode)] pub struct CompiledArtifact(Vec); impl CompiledArtifact { @@ -80,6 +93,7 @@ impl AsRef<[u8]> for CompiledArtifact { } } +/// Get a worker request. fn recv_request(stream: &mut UnixStream) -> io::Result { let pvf = framed_recv_blocking(stream)?; let pvf = PvfPrepData::decode(&mut &pvf[..]).map_err(|e| { @@ -91,7 +105,8 @@ fn recv_request(stream: &mut UnixStream) -> io::Result { Ok(pvf) } -fn send_response(stream: &mut UnixStream, result: PrepareResult) -> io::Result<()> { +/// Send a worker response. +fn send_response(stream: &mut UnixStream, result: PrepareWorkerResult) -> io::Result<()> { framed_send_blocking(stream, &result.encode()) } @@ -111,18 +126,22 @@ fn start_memory_tracking(fd: RawFd, limit: Option) { // Syscalls never allocate or deallocate, so this is safe. libc::syscall(libc::SYS_write, fd, OOM_PAYLOAD.as_ptr(), OOM_PAYLOAD.len()); libc::syscall(libc::SYS_close, fd); - libc::syscall(libc::SYS_exit, 1); + // Make sure we exit from all threads. Copied from glibc. + libc::syscall(libc::SYS_exit_group, 1); + loop { + libc::syscall(libc::SYS_exit, 1); + } } #[cfg(not(target_os = "linux"))] { // Syscalls are not available on MacOS, so we have to use `libc` wrappers. - // Technicaly, there may be allocations inside, although they shouldn't be + // Technically, there may be allocations inside, although they shouldn't be // there. In that case, we'll see deadlocks on MacOS after the OOM condition // triggered. As we consider running a validator on MacOS unsafe, and this // code is only run by a validator, it's a lesser evil. libc::write(fd, OOM_PAYLOAD.as_ptr().cast(), OOM_PAYLOAD.len()); libc::close(fd); - std::process::exit(1); + libc::_exit(1); } })), ); @@ -155,34 +174,34 @@ fn end_memory_tracking() -> isize { /// /// 1. Get the code and parameters for preparation from the host. /// -/// 2. Start a memory tracker in a separate thread. +/// 2. Start a new child process /// -/// 3. Start the CPU time monitor loop and the actual preparation in two separate threads. +/// 3. Start the memory tracker and the actual preparation in two separate threads. /// /// 4. Wait on the two threads created in step 3. /// /// 5. Stop the memory tracker and get the stats. /// -/// 6. If compilation succeeded, write the compiled artifact into a temporary file. +/// 6. Pipe the result back to the parent process and exit from child process. /// -/// 7. Send the result of preparation back to the host. If any error occurred in the above steps, we -/// send that in the `PrepareResult`. +/// 7. If compilation succeeded, write the compiled artifact into a temporary file. +/// +/// 8. Send the result of preparation back to the host, including the checksum of the artifact. If +/// any error occurred in the above steps, we send that in the `PrepareWorkerResult`. pub fn worker_entrypoint( socket_path: PathBuf, worker_dir_path: PathBuf, node_version: Option<&str>, worker_version: Option<&str>, - security_status: SecurityStatus, ) { - worker_event_loop( + run_worker( WorkerKind::Prepare, socket_path, worker_dir_path, node_version, worker_version, - &security_status, |mut stream, worker_dir_path| { - let worker_pid = std::process::id(); + let worker_pid = process::id(); let temp_artifact_dest = worker_dir::prepare_tmp_artifact(&worker_dir_path); loop { @@ -197,186 +216,58 @@ pub fn worker_entrypoint( let prepare_job_kind = pvf.prep_kind(); let executor_params = pvf.executor_params(); - // Conditional variable to notify us when a thread is done. - let condvar = thread::get_condvar(); - - // Run the memory tracker in a regular, non-worker thread. - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - let condvar_memory = Arc::clone(&condvar); - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - let memory_tracker_thread = std::thread::spawn(|| memory_tracker_loop(condvar_memory)); - - let cpu_time_start = ProcessTime::now(); - - // Spawn a new thread that runs the CPU time monitor. - let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); - let cpu_time_monitor_thread = thread::spawn_worker_thread( - "cpu time monitor thread", - move || { - cpu_time_monitor_loop( - cpu_time_start, - preparation_timeout, - cpu_time_monitor_rx, - ) - }, - Arc::clone(&condvar), - WaitOutcome::TimedOut, - )?; - - start_memory_tracking( - stream.as_raw_fd(), - executor_params.prechecking_max_memory().map(|v| { - v.try_into().unwrap_or_else(|_| { - gum::warn!( - LOG_TARGET, - %worker_pid, - "Illegal pre-checking max memory value {} discarded", - v, - ); - 0 - }) - }), - ); - - // Spawn another thread for preparation. - let prepare_thread = thread::spawn_worker_thread( - "prepare thread", - move || { - #[allow(unused_mut)] - let mut result = prepare_artifact(pvf, cpu_time_start); - - // Get the `ru_maxrss` stat. If supported, call getrusage for the thread. - #[cfg(target_os = "linux")] - let mut result = result - .map(|(artifact, elapsed)| (artifact, elapsed, get_max_rss_thread())); - - // If we are pre-checking, check for runtime construction errors. - // - // As pre-checking is more strict than just preparation in terms of memory - // and time, it is okay to do extra checks here. This takes negligible time - // anyway. - if let PrepareJobKind::Prechecking = prepare_job_kind { - result = result.and_then(|output| { - runtime_construction_check( - output.0.as_ref(), - executor_params.as_ref(), - )?; - Ok(output) - }); - } + let (pipe_reader, pipe_writer) = os_pipe::pipe()?; - result + let usage_before = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { + Ok(usage) => usage, + Err(errno) => { + let result = Err(error_from_errno("getrusage before", errno)); + send_response(&mut stream, result)?; + continue }, - Arc::clone(&condvar), - WaitOutcome::Finished, - )?; - - let outcome = thread::wait_for_threads(condvar); - - let peak_alloc = { - let peak = end_memory_tracking(); - gum::debug!( - target: LOG_TARGET, - %worker_pid, - "prepare job peak allocation is {} bytes", - peak, - ); - peak }; - let result = match outcome { - WaitOutcome::Finished => { - let _ = cpu_time_monitor_tx.send(()); - - match prepare_thread.join().unwrap_or_else(|err| { - Err(PrepareError::Panic(stringify_panic_payload(err))) - }) { - Err(err) => { - // Serialized error will be written into the socket. - Err(err) - }, - Ok(ok) => { - cfg_if::cfg_if! { - if #[cfg(target_os = "linux")] { - let (artifact, cpu_time_elapsed, max_rss) = ok; - } else { - let (artifact, cpu_time_elapsed) = ok; - } - } - - // Stop the memory stats worker and get its observed memory stats. - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - let memory_tracker_stats = get_memory_tracker_loop_stats(memory_tracker_thread, worker_pid); - let memory_stats = MemoryStats { - #[cfg(any( - target_os = "linux", - feature = "jemalloc-allocator" - ))] - memory_tracker_stats, - #[cfg(target_os = "linux")] - max_rss: extract_max_rss_stat(max_rss, worker_pid), - // Negative peak allocation values are legit; they are narrow - // corner cases and shouldn't affect overall statistics - // significantly - peak_tracked_alloc: if peak_alloc > 0 { - peak_alloc as u64 - } else { - 0u64 - }, - }; - - // Write the serialized artifact into a temp file. - // - // PVF host only keeps artifacts statuses in its memory, - // successfully compiled code gets stored on the disk (and - // consequently deserialized by execute-workers). The prepare worker - // is only required to send `Ok` to the pool to indicate the - // success. - - gum::debug!( - target: LOG_TARGET, - %worker_pid, - "worker: writing artifact to {}", - temp_artifact_dest.display(), - ); - fs::write(&temp_artifact_dest, &artifact)?; - - Ok(PrepareStats { cpu_time_elapsed, memory_stats }) - }, - } + // SAFETY: new process is spawned within a single threaded process. This invariant + // is enforced by tests. + let result = match unsafe { nix::unistd::fork() } { + Err(errno) => Err(error_from_errno("fork", errno)), + Ok(ForkResult::Child) => { + // Dropping the stream closes the underlying socket. We want to make sure + // that the sandboxed child can't get any kind of information from the + // outside world. The only IPC it should be able to do is sending its + // response over the pipe. + drop(stream); + // Drop the read end so we don't have too many FDs open. + drop(pipe_reader); + + handle_child_process( + pvf, + pipe_writer, + preparation_timeout, + prepare_job_kind, + executor_params, + ) }, - // If the CPU thread is not selected, we signal it to end, the join handle is - // dropped and the thread will finish in the background. - WaitOutcome::TimedOut => { - match cpu_time_monitor_thread.join() { - Ok(Some(cpu_time_elapsed)) => { - // Log if we exceed the timeout and the other thread hasn't - // finished. - gum::warn!( - target: LOG_TARGET, - %worker_pid, - "prepare job took {}ms cpu time, exceeded prepare timeout {}ms", - cpu_time_elapsed.as_millis(), - preparation_timeout.as_millis(), - ); - Err(PrepareError::TimedOut) - }, - Ok(None) => Err(PrepareError::IoErr( - "error communicating over closed channel".into(), - )), - // Errors in this thread are independent of the PVF. - Err(err) => Err(PrepareError::IoErr(stringify_panic_payload(err))), - } + Ok(ForkResult::Parent { child }) => { + // the read end will wait until all write ends have been closed, + // this drop is necessary to avoid deadlock + drop(pipe_writer); + + handle_parent_process( + pipe_reader, + worker_pid, + child, + temp_artifact_dest.clone(), + usage_before, + preparation_timeout, + ) }, - WaitOutcome::Pending => unreachable!( - "we run wait_while until the outcome is no longer pending; qed" - ), }; gum::trace!( target: LOG_TARGET, %worker_pid, - "worker: sending response to host: {:?}", + "worker: sending result to host: {:?}", result ); send_response(&mut stream, result)?; @@ -385,10 +276,7 @@ pub fn worker_entrypoint( ); } -fn prepare_artifact( - pvf: PvfPrepData, - cpu_time_start: ProcessTime, -) -> Result<(CompiledArtifact, Duration), PrepareError> { +fn prepare_artifact(pvf: PvfPrepData) -> Result { let blob = match prevalidate(&pvf.code()) { Err(err) => return Err(PrepareError::Prevalidation(format!("{:?}", err))), Ok(b) => b, @@ -398,7 +286,6 @@ fn prepare_artifact( Ok(compiled_artifact) => Ok(CompiledArtifact::new(compiled_artifact)), Err(err) => Err(PrepareError::Preparation(format!("{:?}", err))), } - .map(|artifact| (artifact, cpu_time_start.elapsed())) } /// Try constructing the runtime to catch any instantiation errors during pre-checking. @@ -412,3 +299,379 @@ fn runtime_construction_check( .map(|_runtime| ()) .map_err(|err| PrepareError::RuntimeConstruction(format!("{:?}", err))) } + +#[derive(Encode, Decode)] +struct JobResponse { + artifact: CompiledArtifact, + memory_stats: MemoryStats, +} + +/// This is used to handle child process during pvf prepare worker. +/// It prepares the artifact and tracks memory stats during preparation +/// and pipes back the response to the parent process +/// +/// # Arguments +/// +/// - `pvf`: `PvfPrepData` structure, containing data to prepare the artifact +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `preparation_timeout`: The timeout in `Duration`. +/// +/// - `prepare_job_kind`: The kind of prepare job. +/// +/// - `executor_params`: Deterministically serialized execution environment semantics. +/// +/// # Returns +/// +/// - If any error occur, pipe response back with `PrepareError`. +/// +/// - If success, pipe back `JobResponse`. +fn handle_child_process( + pvf: PvfPrepData, + mut pipe_write: PipeWriter, + preparation_timeout: Duration, + prepare_job_kind: PrepareJobKind, + executor_params: Arc, +) -> ! { + let worker_job_pid = process::id(); + gum::debug!( + target: LOG_TARGET, + %worker_job_pid, + ?prepare_job_kind, + ?preparation_timeout, + "worker job: preparing artifact", + ); + + // Conditional variable to notify us when a thread is done. + let condvar = thread::get_condvar(); + + // Run the memory tracker in a regular, non-worker thread. + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + let condvar_memory = Arc::clone(&condvar); + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + let memory_tracker_thread = std::thread::spawn(|| memory_tracker_loop(condvar_memory)); + + start_memory_tracking( + pipe_write.as_raw_fd(), + executor_params.prechecking_max_memory().map(|v| { + v.try_into().unwrap_or_else(|_| { + gum::warn!( + LOG_TARGET, + %worker_job_pid, + "Illegal pre-checking max memory value {} discarded", + v, + ); + 0 + }) + }), + ); + + let cpu_time_start = ProcessTime::now(); + + // Spawn a new thread that runs the CPU time monitor. + let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); + let cpu_time_monitor_thread = thread::spawn_worker_thread( + "cpu time monitor thread", + move || cpu_time_monitor_loop(cpu_time_start, preparation_timeout, cpu_time_monitor_rx), + Arc::clone(&condvar), + WaitOutcome::TimedOut, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(PrepareError::IoErr(err.to_string()))) + }); + + let prepare_thread = spawn_worker_thread( + "prepare worker", + move || { + #[allow(unused_mut)] + let mut result = prepare_artifact(pvf); + + // Get the `ru_maxrss` stat. If supported, call getrusage for the thread. + #[cfg(target_os = "linux")] + let mut result = result.map(|artifact| (artifact, get_max_rss_thread())); + + // If we are pre-checking, check for runtime construction errors. + // + // As pre-checking is more strict than just preparation in terms of memory + // and time, it is okay to do extra checks here. This takes negligible time + // anyway. + if let PrepareJobKind::Prechecking = prepare_job_kind { + result = result.and_then(|output| { + runtime_construction_check(output.0.as_ref(), &executor_params)?; + Ok(output) + }); + } + result + }, + Arc::clone(&condvar), + WaitOutcome::Finished, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(PrepareError::IoErr(err.to_string()))) + }); + + let outcome = thread::wait_for_threads(condvar); + + let peak_alloc = { + let peak = end_memory_tracking(); + gum::debug!( + target: LOG_TARGET, + %worker_job_pid, + "prepare job peak allocation is {} bytes", + peak, + ); + peak + }; + + let result = match outcome { + WaitOutcome::Finished => { + let _ = cpu_time_monitor_tx.send(()); + + match prepare_thread.join().unwrap_or_else(|err| { + send_child_response( + &mut pipe_write, + Err(PrepareError::JobError(stringify_panic_payload(err))), + ) + }) { + Err(err) => Err(err), + Ok(ok) => { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + let (artifact, max_rss) = ok; + } else { + let artifact = ok; + } + } + + // Stop the memory stats worker and get its observed memory stats. + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + let memory_tracker_stats = get_memory_tracker_loop_stats(memory_tracker_thread, process::id()); + + let memory_stats = MemoryStats { + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + memory_tracker_stats, + #[cfg(target_os = "linux")] + max_rss: extract_max_rss_stat(max_rss, process::id()), + // Negative peak allocation values are legit; they are narrow + // corner cases and shouldn't affect overall statistics + // significantly + peak_tracked_alloc: if peak_alloc > 0 { peak_alloc as u64 } else { 0u64 }, + }; + + Ok(JobResponse { artifact, memory_stats }) + }, + } + }, + + // If the CPU thread is not selected, we signal it to end, the join handle is + // dropped and the thread will finish in the background. + WaitOutcome::TimedOut => match cpu_time_monitor_thread.join() { + Ok(Some(_cpu_time_elapsed)) => Err(PrepareError::TimedOut), + Ok(None) => Err(PrepareError::IoErr("error communicating over closed channel".into())), + Err(err) => Err(PrepareError::IoErr(stringify_panic_payload(err))), + }, + WaitOutcome::Pending => + unreachable!("we run wait_while until the outcome is no longer pending; qed"), + }; + + send_child_response(&mut pipe_write, result); +} + +/// Waits for child process to finish and handle child response from pipe. +/// +/// # Arguments +/// +/// - `pipe_read`: A `PipeReader` used to read data from the child process. +/// +/// - `child`: The child pid. +/// +/// - `temp_artifact_dest`: The destination `PathBuf` to write the temporary artifact file. +/// +/// - `worker_pid`: The PID of the child process. +/// +/// - `usage_before`: Resource usage statistics before executing the child process. +/// +/// - `timeout`: The maximum allowed time for the child process to finish, in `Duration`. +/// +/// # Returns +/// +/// - If the child send response without an error, this function returns `Ok(PrepareStats)` +/// containing memory and CPU usage statistics. +/// +/// - If the child send response with an error, it returns a `PrepareError` with that error. +/// +/// - If the child process timeout, it returns `PrepareError::TimedOut`. +fn handle_parent_process( + mut pipe_read: PipeReader, + worker_pid: u32, + job_pid: Pid, + temp_artifact_dest: PathBuf, + usage_before: Usage, + timeout: Duration, +) -> Result { + // Read from the child. Don't decode unless the process exited normally, which we check later. + let mut received_data = Vec::new(); + pipe_read + .read_to_end(&mut received_data) + .map_err(|err| PrepareError::IoErr(err.to_string()))?; + + let status = nix::sys::wait::waitpid(job_pid, None); + gum::trace!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + "prepare worker received wait status from job: {:?}", + status, + ); + + let usage_after = nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) + .map_err(|errno| error_from_errno("getrusage after", errno))?; + + // Using `getrusage` is needed to check whether child has timedout since we cannot rely on + // child to report its own time. + // As `getrusage` returns resource usage from all terminated child processes, + // it is necessary to subtract the usage before the current child process to isolate its cpu + // time + let cpu_tv = get_total_cpu_usage(usage_after) - get_total_cpu_usage(usage_before); + if cpu_tv >= timeout { + gum::warn!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + "prepare job took {}ms cpu time, exceeded prepare timeout {}ms", + cpu_tv.as_millis(), + timeout.as_millis(), + ); + return Err(PrepareError::TimedOut) + } + + match status { + Ok(WaitStatus::Exited(_pid, exit_status)) => { + let mut reader = io::BufReader::new(received_data.as_slice()); + let result = recv_child_response(&mut reader) + .map_err(|err| PrepareError::JobError(err.to_string()))?; + + match result { + Err(err) => Err(err), + Ok(JobResponse { artifact, memory_stats }) => { + // The exit status should have been zero if no error occurred. + if exit_status != 0 { + return Err(PrepareError::JobError(format!( + "unexpected exit status: {}", + exit_status + ))) + } + + // Write the serialized artifact into a temp file. + // + // PVF host only keeps artifacts statuses in its memory, + // successfully compiled code gets stored on the disk (and + // consequently deserialized by execute-workers). The prepare worker + // is only required to send `Ok` to the pool to indicate the + // success. + gum::debug!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + "worker: writing artifact to {}", + temp_artifact_dest.display(), + ); + // Write to the temp file created by the host. + if let Err(err) = fs::write(&temp_artifact_dest, &artifact) { + return Err(PrepareError::IoErr(err.to_string())) + }; + + let checksum = blake3::hash(&artifact.as_ref()).to_hex().to_string(); + Ok(PrepareWorkerSuccess { + checksum, + stats: PrepareStats { memory_stats, cpu_time_elapsed: cpu_tv }, + }) + }, + } + }, + // The job was killed by the given signal. + // + // The job gets SIGSYS on seccomp violations, but this signal may have been sent for some + // other reason, so we still need to check for seccomp violations elsewhere. + Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => Err(PrepareError::JobDied { + err: format!("received signal: {signal:?}"), + job_pid: job_pid.as_raw(), + }), + Err(errno) => Err(error_from_errno("waitpid", errno)), + + // An attacker can make the child process return any exit status it wants. So we can treat + // all unexpected cases the same way. + Ok(unexpected_wait_status) => Err(PrepareError::JobDied { + err: format!("unexpected status from wait: {unexpected_wait_status:?}"), + job_pid: job_pid.as_raw(), + }), + } +} + +/// Calculate the total CPU time from the given `usage` structure, returned from +/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user +/// and system time. +/// +/// # Arguments +/// +/// - `rusage`: Contains resource usage information. +/// +/// # Returns +/// +/// Returns a `Duration` representing the total CPU time. +fn get_total_cpu_usage(rusage: Usage) -> Duration { + let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + + (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; + + return Duration::from_micros(micros) +} + +/// Get a job response. +fn recv_child_response(received_data: &mut io::BufReader<&[u8]>) -> io::Result { + let response_bytes = framed_recv_blocking(received_data)?; + JobResult::decode(&mut response_bytes.as_slice()).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("prepare pvf recv_child_response: decode error: {:?}", e), + ) + }) +} + +/// Write a job response to the pipe and exit process after. +/// +/// # Arguments +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `response`: Child process response +fn send_child_response(pipe_write: &mut PipeWriter, response: JobResult) -> ! { + framed_send_blocking(pipe_write, response.encode().as_slice()) + .unwrap_or_else(|_| process::exit(libc::EXIT_FAILURE)); + + if response.is_ok() { + process::exit(libc::EXIT_SUCCESS) + } else { + process::exit(libc::EXIT_FAILURE) + } +} + +fn error_from_errno(context: &'static str, errno: Errno) -> PrepareError { + PrepareError::Kernel(format!("{}: {}: {}", context, errno, io::Error::last_os_error())) +} + +type JobResult = Result; + +/// Pre-encoded length-prefixed `JobResult::Err(PrepareError::OutOfMemory)` +const OOM_PAYLOAD: &[u8] = b"\x02\x00\x00\x00\x00\x00\x00\x00\x01\x08"; + +#[test] +fn pre_encoded_payloads() { + // NOTE: This must match the type of `response` in `send_child_response`. + let oom_unencoded: JobResult = JobResult::Err(PrepareError::OutOfMemory); + let oom_encoded = oom_unencoded.encode(); + // The payload is prefixed with its length in `framed_send`. + let mut oom_payload = oom_encoded.len().to_le_bytes().to_vec(); + oom_payload.extend(oom_encoded); + assert_eq!(oom_payload, OOM_PAYLOAD); +} diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index dd83f76494ed6cc47dea8f06fc3929ffeabc995b..17ce5b443e3387600153a6d5fe5703650bf3a2d0 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -16,10 +16,10 @@ //! PVF artifacts (final compiled code blobs). //! -//! # Lifecycle of an artifact +//! # Lifecycle of an artifact //! -//! 1. During node start-up, the artifacts cache is cleaned up. This means that all local artifacts -//! stored on-disk are cleared, and we start with an empty [`Artifacts`] table. +//! 1. During node start-up, we will check the cached artifacts, if any. The stale and corrupted +//! ones are pruned. The valid ones are registered in the [`Artifacts`] table. //! //! 2. In order to be executed, a PVF should be prepared first. This means that artifacts should //! have an [`ArtifactState::Prepared`] entry for that artifact in the table. If not, the @@ -55,18 +55,30 @@ //! older by a predefined parameter. This process is run very rarely (say, once a day). Once the //! artifact is expired it is removed from disk eagerly atomically. -use crate::host::PrepareResultSender; +use crate::{host::PrecheckResultSender, LOG_TARGET}; use always_assert::always; -use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData}; +use polkadot_core_primitives::Hash; +use polkadot_node_core_pvf_common::{ + error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData, RUNTIME_VERSION, +}; use polkadot_node_primitives::NODE_VERSION; use polkadot_parachain_primitives::primitives::ValidationCodeHash; use polkadot_primitives::ExecutorParamsHash; use std::{ collections::HashMap, + io, path::{Path, PathBuf}, + str::FromStr as _, time::{Duration, SystemTime}, }; +const RUNTIME_PREFIX: &str = "wasmtime_v"; +const NODE_PREFIX: &str = "polkadot_v"; + +fn artifact_prefix() -> String { + format!("{}{}_{}{}", RUNTIME_PREFIX, RUNTIME_VERSION, NODE_PREFIX, NODE_VERSION) +} + /// Identifier of an artifact. Encodes a code hash of the PVF and a hash of executor parameter set. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ArtifactId { @@ -75,9 +87,6 @@ pub struct ArtifactId { } impl ArtifactId { - const PREFIX: &'static str = "wasmtime_"; - const NODE_VERSION_PREFIX: &'static str = "polkadot_v"; - /// Creates a new artifact ID with the given hash. pub fn new(code_hash: ValidationCodeHash, executor_params_hash: ExecutorParamsHash) -> Self { Self { code_hash, executor_params_hash } @@ -88,38 +97,34 @@ impl ArtifactId { Self::new(pvf.code_hash(), pvf.executor_params().hash()) } - /// Tries to recover the artifact id from the given file name. - #[cfg(test)] - pub fn from_file_name(file_name: &str) -> Option { - use polkadot_core_primitives::Hash; - use std::str::FromStr as _; - - let file_name = - file_name.strip_prefix(Self::PREFIX)?.strip_prefix(Self::NODE_VERSION_PREFIX)?; - - // [ node version | code hash | param hash ] - let parts: Vec<&str> = file_name.split('_').collect(); - let (_node_ver, code_hash_str, executor_params_hash_str) = (parts[0], parts[1], parts[2]); - - let code_hash = Hash::from_str(code_hash_str).ok()?.into(); - let executor_params_hash = - ExecutorParamsHash::from_hash(Hash::from_str(executor_params_hash_str).ok()?); - - Some(Self { code_hash, executor_params_hash }) - } - - /// Returns the expected path to this artifact given the root of the cache. - pub fn path(&self, cache_path: &Path) -> PathBuf { + /// Returns the canonical path to the concluded artifact. + pub(crate) fn path(&self, cache_path: &Path, checksum: &str) -> PathBuf { let file_name = format!( - "{}{}{}_{:#x}_{:#x}", - Self::PREFIX, - Self::NODE_VERSION_PREFIX, - NODE_VERSION, + "{}_{:#x}_{:#x}_0x{}", + artifact_prefix(), self.code_hash, - self.executor_params_hash + self.executor_params_hash, + checksum ); cache_path.join(file_name) } + + /// Tries to recover the artifact id from the given file name. + /// Return `None` if the given file name is invalid. + /// VALID_NAME := _ _ _ + fn from_file_name(file_name: &str) -> Option { + let file_name = file_name.strip_prefix(&artifact_prefix())?.strip_prefix('_')?; + let parts: Vec<&str> = file_name.split('_').collect(); + + if let [code_hash, param_hash, _checksum] = parts[..] { + let code_hash = Hash::from_str(code_hash).ok()?.into(); + let executor_params_hash = + ExecutorParamsHash::from_hash(Hash::from_str(param_hash).ok()?); + return Some(Self { code_hash, executor_params_hash }) + } + + None + } } /// A bundle of the artifact ID and the path. @@ -136,8 +141,8 @@ pub struct ArtifactPathId { } impl ArtifactPathId { - pub(crate) fn new(artifact_id: ArtifactId, cache_path: &Path) -> Self { - Self { path: artifact_id.path(cache_path), id: artifact_id } + pub(crate) fn new(artifact_id: ArtifactId, path: &Path) -> Self { + Self { id: artifact_id, path: path.to_owned() } } } @@ -148,6 +153,8 @@ pub enum ArtifactState { /// That means that the artifact should be accessible through the path obtained by the artifact /// id (unless, it was removed externally). Prepared { + /// The path of the compiled artifact. + path: PathBuf, /// The time when the artifact was last needed. /// /// This is updated when we get the heads up for this artifact or when we just discover @@ -159,7 +166,7 @@ pub enum ArtifactState { /// A task to prepare this artifact is scheduled. Preparing { /// List of result senders that are waiting for a response. - waiting_for_response: Vec, + waiting_for_response: Vec, /// The number of times this artifact has failed to prepare. num_failures: u32, }, @@ -177,32 +184,135 @@ pub enum ArtifactState { /// A container of all known artifact ids and their states. pub struct Artifacts { - artifacts: HashMap, + inner: HashMap, } impl Artifacts { - /// Initialize a blank cache at the given path. This will clear everything present at the - /// given path, to be populated over time. - /// - /// The recognized artifacts will be filled in the table and unrecognized will be removed. - pub async fn new(cache_path: &Path) -> Self { - // First delete the entire cache. This includes artifacts and any leftover worker dirs (see - // [`WorkerDir`]). Nodes are long-running so this should populate shortly. - let _ = tokio::fs::remove_dir_all(cache_path).await; - // Make sure that the cache path directory and all its parents are created. - let _ = tokio::fs::create_dir_all(cache_path).await; - - Self { artifacts: HashMap::new() } + #[cfg(test)] + pub(crate) fn empty() -> Self { + Self { inner: HashMap::new() } } #[cfg(test)] - pub(crate) fn empty() -> Self { - Self { artifacts: HashMap::new() } + pub(crate) fn len(&self) -> usize { + self.inner.len() + } + + /// Create an empty table and populate it with valid artifacts as [`ArtifactState::Prepared`], + /// if any. The existing caches will be checked by their file name to determine whether they are + /// valid, e.g., matching the current node version. The ones deemed invalid will be pruned. + /// + /// Create the cache directory on-disk if it doesn't exist. + pub async fn new_and_prune(cache_path: &Path) -> Self { + let mut artifacts = Self { inner: HashMap::new() }; + let _ = artifacts.insert_and_prune(cache_path).await.map_err(|err| { + gum::error!( + target: LOG_TARGET, + "could not initialize artifacts cache: {err}", + ) + }); + artifacts + } + + async fn insert_and_prune(&mut self, cache_path: &Path) -> Result<(), String> { + async fn is_corrupted(path: &Path) -> bool { + let checksum = match tokio::fs::read(path).await { + Ok(bytes) => blake3::hash(&bytes), + Err(err) => { + // just remove the file if we cannot read it + gum::warn!( + target: LOG_TARGET, + ?err, + "unable to read artifact {:?} when checking integrity, removing...", + path, + ); + return true + }, + }; + + if let Some(file_name) = path.file_name() { + if let Some(file_name) = file_name.to_str() { + return !file_name.ends_with(checksum.to_hex().as_str()) + } + } + true + } + + // Insert the entry into the artifacts table if it is valid. + // Otherwise, prune it. + async fn insert_or_prune( + artifacts: &mut Artifacts, + entry: &tokio::fs::DirEntry, + cache_path: &Path, + ) -> Result<(), String> { + let file_type = entry.file_type().await; + let file_name = entry.file_name(); + + match file_type { + Ok(file_type) => + if !file_type.is_file() { + return Ok(()) + }, + Err(err) => return Err(format!("unable to get file type for {file_name:?}: {err}")), + } + + if let Some(file_name) = file_name.to_str() { + let id = ArtifactId::from_file_name(file_name); + let path = cache_path.join(file_name); + + if id.is_none() || is_corrupted(&path).await { + let _ = tokio::fs::remove_file(&path).await; + return Err(format!("invalid artifact {path:?}, file deleted")) + } + + let id = id.expect("checked is_none() above; qed"); + gum::debug!( + target: LOG_TARGET, + "reusing existing {:?} for node version v{}", + &path, + NODE_VERSION, + ); + artifacts.insert_prepared(id, path, SystemTime::now(), Default::default()); + + Ok(()) + } else { + Err(format!("non-Unicode file name {file_name:?} found in {cache_path:?}")) + } + } + + // Make sure that the cache path directory and all its parents are created. + if let Err(err) = tokio::fs::create_dir_all(cache_path).await { + if err.kind() != io::ErrorKind::AlreadyExists { + return Err(format!("failed to create dir {cache_path:?}: {err}")) + } + } + + let mut dir = tokio::fs::read_dir(cache_path) + .await + .map_err(|err| format!("failed to read dir {cache_path:?}: {err}"))?; + + loop { + match dir.next_entry().await { + Ok(Some(entry)) => + if let Err(err) = insert_or_prune(self, &entry, cache_path).await { + gum::warn!( + target: LOG_TARGET, + ?cache_path, + "could not insert entry {:?} into the artifact cache: {}", + entry, + err, + ) + }, + Ok(None) => return Ok(()), + Err(err) => + return Err(format!("error processing artifacts in {cache_path:?}: {err}")), + } + } } /// Returns the state of the given artifact by its ID. pub fn artifact_state_mut(&mut self, artifact_id: &ArtifactId) -> Option<&mut ArtifactState> { - self.artifacts.get_mut(artifact_id) + self.inner.get_mut(artifact_id) } /// Inform the table about the artifact with the given ID. The state will be set to "preparing". @@ -212,53 +322,52 @@ impl Artifacts { pub fn insert_preparing( &mut self, artifact_id: ArtifactId, - waiting_for_response: Vec, + waiting_for_response: Vec, ) { // See the precondition. always!(self - .artifacts + .inner .insert(artifact_id, ArtifactState::Preparing { waiting_for_response, num_failures: 0 }) .is_none()); } /// Insert an artifact with the given ID as "prepared". /// - /// This function must be used only for brand-new artifacts and should never be used for - /// replacing existing ones. - #[cfg(test)] - pub fn insert_prepared( + /// This function should only be used to build the artifact table at startup with valid + /// artifact caches. + pub(crate) fn insert_prepared( &mut self, artifact_id: ArtifactId, + path: PathBuf, last_time_needed: SystemTime, prepare_stats: PrepareStats, ) { // See the precondition. always!(self - .artifacts - .insert(artifact_id, ArtifactState::Prepared { last_time_needed, prepare_stats }) + .inner + .insert(artifact_id, ArtifactState::Prepared { path, last_time_needed, prepare_stats }) .is_none()); } - /// Remove and retrieve the artifacts from the table that are older than the supplied - /// Time-To-Live. - pub fn prune(&mut self, artifact_ttl: Duration) -> Vec { + /// Remove artifacts older than the given TTL and return id and path of the removed ones. + pub fn prune(&mut self, artifact_ttl: Duration) -> Vec<(ArtifactId, PathBuf)> { let now = SystemTime::now(); let mut to_remove = vec![]; - for (k, v) in self.artifacts.iter() { - if let ArtifactState::Prepared { last_time_needed, .. } = *v { + for (k, v) in self.inner.iter() { + if let ArtifactState::Prepared { last_time_needed, ref path, .. } = *v { if now .duration_since(last_time_needed) .map(|age| age > artifact_ttl) .unwrap_or(false) { - to_remove.push(k.clone()); + to_remove.push((k.clone(), path.clone())); } } } for artifact in &to_remove { - self.artifacts.remove(artifact); + self.inner.remove(&artifact.0); } to_remove @@ -267,13 +376,72 @@ impl Artifacts { #[cfg(test)] mod tests { - use super::{ArtifactId, Artifacts, NODE_VERSION}; + use super::{artifact_prefix as prefix, ArtifactId, Artifacts, NODE_VERSION, RUNTIME_VERSION}; use polkadot_primitives::ExecutorParamsHash; + use rand::Rng; use sp_core::H256; - use std::{path::Path, str::FromStr}; + use std::{ + fs, + io::Write, + path::{Path, PathBuf}, + str::FromStr, + }; + + fn rand_hash(len: usize) -> String { + let mut rng = rand::thread_rng(); + let hex: Vec<_> = "0123456789abcdef".chars().collect(); + (0..len).map(|_| hex[rng.gen_range(0..hex.len())]).collect() + } + + fn file_name(code_hash: &str, param_hash: &str, checksum: &str) -> String { + format!("{}_0x{}_0x{}_0x{}", prefix(), code_hash, param_hash, checksum) + } + + fn create_artifact( + dir: impl AsRef, + prefix: &str, + code_hash: impl AsRef, + params_hash: impl AsRef, + ) -> (PathBuf, String) { + fn artifact_path_without_checksum( + dir: impl AsRef, + prefix: &str, + code_hash: impl AsRef, + params_hash: impl AsRef, + ) -> PathBuf { + let mut path = dir.as_ref().to_path_buf(); + let file_name = + format!("{}_0x{}_0x{}", prefix, code_hash.as_ref(), params_hash.as_ref(),); + path.push(file_name); + path + } + + let (code_hash, params_hash) = (code_hash.as_ref(), params_hash.as_ref()); + let path = artifact_path_without_checksum(dir, prefix, code_hash, params_hash); + let mut file = fs::File::create(&path).unwrap(); + + let content = format!("{}{}", code_hash, params_hash).into_bytes(); + file.write_all(&content).unwrap(); + let checksum = blake3::hash(&content).to_hex().to_string(); + + (path, checksum) + } + + fn create_rand_artifact(dir: impl AsRef, prefix: &str) -> (PathBuf, String) { + create_artifact(dir, prefix, rand_hash(64), rand_hash(64)) + } - fn file_name(code_hash: &str, param_hash: &str) -> String { - format!("wasmtime_polkadot_v{}_0x{}_0x{}", NODE_VERSION, code_hash, param_hash) + fn concluded_path(path: impl AsRef, checksum: &str) -> PathBuf { + let path = path.as_ref(); + let mut file_name = path.file_name().unwrap().to_os_string(); + file_name.push("_0x"); + file_name.push(checksum); + path.with_file_name(file_name) + } + + #[test] + fn artifact_prefix() { + assert_eq!(prefix(), format!("wasmtime_v{}_polkadot_v{}", RUNTIME_VERSION, NODE_VERSION)); } #[test] @@ -284,6 +452,7 @@ mod tests { let file_name = file_name( "0022800000000000000000000000000000000000000000000000000000000000", "0033900000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000", ); assert_eq!( @@ -305,40 +474,53 @@ mod tests { let dir = Path::new("/test"); let code_hash = "1234567890123456789012345678901234567890123456789012345678901234"; let params_hash = "4321098765432109876543210987654321098765432109876543210987654321"; - let file_name = file_name(code_hash, params_hash); + let checksum = "34567890123456789012345678901234"; + let file_name = file_name(code_hash, params_hash, checksum); let code_hash = H256::from_str(code_hash).unwrap(); let params_hash = H256::from_str(params_hash).unwrap(); + let path = ArtifactId::new(code_hash.into(), ExecutorParamsHash::from_hash(params_hash)) + .path(dir, checksum); - assert_eq!( - ArtifactId::new(code_hash.into(), ExecutorParamsHash::from_hash(params_hash)) - .path(dir) - .to_str(), - Some(format!("/test/{}", file_name).as_str()), - ); + assert_eq!(path.to_str().unwrap(), format!("/test/{}", file_name)); } #[tokio::test] - async fn artifacts_removes_cache_on_startup() { - let fake_cache_path = crate::worker_intf::tmppath("test-cache").await.unwrap(); - let fake_artifact_path = { - let mut p = fake_cache_path.clone(); - p.push("wasmtime_0x1234567890123456789012345678901234567890123456789012345678901234"); - p - }; + async fn remove_stale_cache_on_startup() { + let cache_dir = tempfile::Builder::new().prefix("test-cache-").tempdir().unwrap(); + + // invalid prefix + create_rand_artifact(&cache_dir, ""); + create_rand_artifact(&cache_dir, "wasmtime_polkadot_v"); + create_rand_artifact(&cache_dir, "wasmtime_v8.0.0_polkadot_v1.0.0"); + + let prefix = prefix(); + + // no checksum + create_rand_artifact(&cache_dir, &prefix); + + // invalid hashes + let (path, checksum) = create_artifact(&cache_dir, &prefix, "000", "000001"); + let new_path = concluded_path(&path, &checksum); + fs::rename(&path, &new_path).unwrap(); - // create a tmp cache with 1 artifact. + // checksum tampered + let (path, checksum) = create_rand_artifact(&cache_dir, &prefix); + let new_path = concluded_path(&path, checksum.chars().rev().collect::().as_str()); + fs::rename(&path, &new_path).unwrap(); - std::fs::create_dir_all(&fake_cache_path).unwrap(); - std::fs::File::create(fake_artifact_path).unwrap(); + // valid + let (path, checksum) = create_rand_artifact(&cache_dir, &prefix); + let new_path = concluded_path(&path, &checksum); + fs::rename(&path, &new_path).unwrap(); - // this should remove it and re-create. + assert_eq!(fs::read_dir(&cache_dir).unwrap().count(), 7); - let p = &fake_cache_path; - Artifacts::new(p).await; + let artifacts = Artifacts::new_and_prune(cache_dir.path()).await; - assert_eq!(std::fs::read_dir(&fake_cache_path).unwrap().count(), 0); + assert_eq!(fs::read_dir(&cache_dir).unwrap().count(), 1); + assert_eq!(artifacts.len(), 1); - std::fs::remove_dir_all(fake_cache_path).unwrap(); + fs::remove_dir_all(cache_dir).unwrap(); } } diff --git a/polkadot/node/core/pvf/src/error.rs b/polkadot/node/core/pvf/src/error.rs index 87ef0b54a0406776b865ea143185fd8012ef82e1..442443f326e9815fe3b9e56813356989748a8d79 100644 --- a/polkadot/node/core/pvf/src/error.rs +++ b/polkadot/node/core/pvf/src/error.rs @@ -19,55 +19,69 @@ use polkadot_node_core_pvf_common::error::{InternalValidationError, PrepareError /// A error raised during validation of the candidate. #[derive(Debug, Clone)] pub enum ValidationError { - /// The error was raised because the candidate is invalid. + /// Deterministic preparation issue. In practice, most of the problems should be caught by + /// prechecking, so this may be a sign of internal conditions. /// - /// Whenever we are unsure if the error was due to the candidate or not, we must vote invalid. - InvalidCandidate(InvalidCandidate), - /// Some internal error occurred. - InternalError(InternalValidationError), + /// In principle if preparation of the `WASM` fails, the current candidate cannot be the + /// reason for that. So we can't say whether it is invalid or not. In addition, with + /// pre-checking enabled only valid runtimes should ever get enacted, so we can be + /// reasonably sure that this is some local problem on the current node. However, as this + /// particular error *seems* to indicate a deterministic error, we raise a warning. + Preparation(PrepareError), + /// The error was raised because the candidate is invalid. Should vote against. + Invalid(InvalidCandidate), + /// Possibly transient issue that may resolve after retries. Should vote against when retries + /// fail. + PossiblyInvalid(PossiblyInvalidError), + /// Preparation or execution issue caused by an internal condition. Should not vote against. + Internal(InternalValidationError), } /// A description of an error raised during executing a PVF and can be attributed to the combination /// of the candidate [`polkadot_parachain_primitives::primitives::ValidationParams`] and the PVF. #[derive(Debug, Clone)] pub enum InvalidCandidate { - /// PVF preparation ended up with a deterministic error. - PrepareError(String), - /// The failure is reported by the execution worker. The string contains the error message. - WorkerReportedError(String), - /// The worker has died during validation of a candidate. That may fall in one of the following - /// categories, which we cannot distinguish programmatically: + /// The candidate is reported to be invalid by the execution worker. The string contains the + /// error message. + WorkerReportedInvalid(String), + /// PVF execution (compilation is not included) took more time than was allotted. + HardTimeout, +} + +/// Possibly transient issue that may resolve after retries. +#[derive(Debug, Clone)] +pub enum PossiblyInvalidError { + /// The worker process (not the job) has died during validation of a candidate. /// - /// (a) Some sort of transient glitch caused the worker process to abort. An example would be - /// that the host machine ran out of free memory and the OOM killer started killing the - /// processes, and in order to save the parent it will "sacrifice child" first. + /// It's unlikely that this is caused by malicious code since workers spawn separate job + /// processes, and those job processes are sandboxed. But, it is possible. We retry in this + /// case, and if the error persists, we assume it's caused by the candidate and vote against. + AmbiguousWorkerDeath, + /// The job process (not the worker) has died for one of the following reasons: /// - /// (b) The candidate triggered a code path that has lead to the process death. For example, - /// the PVF found a way to consume unbounded amount of resources and then it either - /// exceeded an `rlimit` (if set) or, again, invited OOM killer. Another possibility is a - /// bug in wasmtime allowed the PVF to gain control over the execution worker. + /// (a) A seccomp violation occurred, most likely due to an attempt by malicious code to + /// execute arbitrary code. Note that there is no foolproof way to detect this if the operator + /// has seccomp auditing disabled. /// - /// We attribute such an event to an *invalid candidate* in either case. + /// (b) The host machine ran out of free memory and the OOM killer started killing the + /// processes, and in order to save the parent it will "sacrifice child" first. /// - /// The rationale for this is that a glitch may lead to unfair rejecting candidate by a single - /// validator. If the glitch is somewhat more persistent the validator will reject all - /// candidate thrown at it and hopefully the operator notices it by decreased reward - /// performance of the validator. On the other hand, if the worker died because of (b) we would - /// have better chances to stop the attack. - AmbiguousWorkerDeath, - /// PVF execution (compilation is not included) took more time than was allotted. - HardTimeout, - /// A panic occurred and we can't be sure whether the candidate is really invalid or some - /// internal glitch occurred. Whenever we are unsure, we can never treat an error as internal - /// as we would abstain from voting. This is bad because if the issue was due to the candidate, - /// then all validators would abstain, stalling finality on the chain. So we will first retry - /// the candidate, and if the issue persists we are forced to vote invalid. - Panic(String), + /// (c) Some other reason, perhaps transient or perhaps caused by malicious code. + /// + /// We cannot treat this as an internal error because malicious code may have caused this. + AmbiguousJobDeath(String), + /// An unexpected error occurred in the job process and we can't be sure whether the candidate + /// is really invalid or some internal glitch occurred. Whenever we are unsure, we can never + /// treat an error as internal as we would abstain from voting. This is bad because if the + /// issue was due to the candidate, then all validators would abstain, stalling finality on the + /// chain. So we will first retry the candidate, and if the issue persists we are forced to + /// vote invalid. + JobError(String), } impl From for ValidationError { fn from(error: InternalValidationError) -> Self { - Self::InternalError(error) + Self::Internal(error) } } @@ -76,9 +90,9 @@ impl From for ValidationError { // Here we need to classify the errors into two errors: deterministic and non-deterministic. // See [`PrepareError::is_deterministic`]. if error.is_deterministic() { - Self::InvalidCandidate(InvalidCandidate::PrepareError(error.to_string())) + Self::Preparation(error) } else { - Self::InternalError(InternalValidationError::NonDeterministicPrepareError(error)) + Self::Internal(InternalValidationError::NonDeterministicPrepareError(error)) } } } diff --git a/polkadot/node/core/pvf/src/execute/mod.rs b/polkadot/node/core/pvf/src/execute/mod.rs index 669b9dc04d7c518d06791536462b2c8bdbcd91ea..c6d9cf90fa289601f89b0aad307483a002f89427 100644 --- a/polkadot/node/core/pvf/src/execute/mod.rs +++ b/polkadot/node/core/pvf/src/execute/mod.rs @@ -21,6 +21,6 @@ //! `polkadot_node_core_pvf_worker::execute_worker_entrypoint`. mod queue; -mod worker_intf; +mod worker_interface; pub use queue::{start, PendingExecutionRequest, ToQueue}; diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index aca604f0de21dd805e10c68c1a8d80c202010e9b..be607fe1c20b06659765776e90b29bf939f4aed7 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -16,13 +16,13 @@ //! A queue that handles requests for PVF execution. -use super::worker_intf::Outcome; +use super::worker_interface::Outcome; use crate::{ artifacts::{ArtifactId, ArtifactPathId}, host::ResultSender, metrics::Metrics, - worker_intf::{IdleWorker, WorkerHandle}, - InvalidCandidate, ValidationError, LOG_TARGET, + worker_interface::{IdleWorker, WorkerHandle}, + InvalidCandidate, PossiblyInvalidError, ValidationError, LOG_TARGET, }; use futures::{ channel::mpsc, @@ -342,20 +342,27 @@ fn handle_job_finish( }, Outcome::InvalidCandidate { err, idle_worker } => ( Some(idle_worker), - Err(ValidationError::InvalidCandidate(InvalidCandidate::WorkerReportedError(err))), + Err(ValidationError::Invalid(InvalidCandidate::WorkerReportedInvalid(err))), None, ), - Outcome::InternalError { err } => (None, Err(ValidationError::InternalError(err)), None), + Outcome::InternalError { err } => (None, Err(ValidationError::Internal(err)), None), + // Either the worker or the job timed out. Kill the worker in either case. Treated as + // definitely-invalid, because if we timed out, there's no time left for a retry. Outcome::HardTimeout => - (None, Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)), None), + (None, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), None), // "Maybe invalid" errors (will retry). - Outcome::IoErr => ( + Outcome::WorkerIntfErr => ( None, - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), None, ), - Outcome::Panic { err } => - (None, Err(ValidationError::InvalidCandidate(InvalidCandidate::Panic(err))), None), + Outcome::JobDied { err } => ( + None, + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))), + None, + ), + Outcome::JobError { err } => + (None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))), None), }; queue.metrics.execute_finished(); @@ -441,7 +448,7 @@ async fn spawn_worker_task( use futures_timer::Delay; loop { - match super::worker_intf::spawn( + match super::worker_interface::spawn( &program_path, &cache_path, job.executor_params.clone(), @@ -493,7 +500,7 @@ fn assign(queue: &mut Queue, worker: Worker, job: ExecuteJob) { queue.mux.push( async move { let _timer = execution_timer; - let outcome = super::worker_intf::start_work( + let outcome = super::worker_interface::start_work( idle, job.artifact.clone(), job.exec_timeout, diff --git a/polkadot/node/core/pvf/src/execute/worker_intf.rs b/polkadot/node/core/pvf/src/execute/worker_interface.rs similarity index 72% rename from polkadot/node/core/pvf/src/execute/worker_intf.rs rename to polkadot/node/core/pvf/src/execute/worker_interface.rs index 61264f7d517d8fe93273b806f0ca7f8c4b942222..9f7738f00e699ab981d7fa4396fcd09d5e1a4abe 100644 --- a/polkadot/node/core/pvf/src/execute/worker_intf.rs +++ b/polkadot/node/core/pvf/src/execute/worker_interface.rs @@ -18,8 +18,7 @@ use crate::{ artifacts::ArtifactPathId, - security, - worker_intf::{ + worker_interface::{ clear_worker_dir_path, framed_recv, framed_send, spawn_with_program_path, IdleWorker, SpawnErr, WorkerDir, WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }, @@ -30,7 +29,7 @@ use futures_timer::Delay; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, - execute::{Handshake, Response}, + execute::{Handshake, WorkerResponse}, worker_dir, SecurityStatus, }; use polkadot_parachain_primitives::primitives::ValidationResult; @@ -63,16 +62,16 @@ pub async fn spawn( security_status, ) .await?; - send_handshake(&mut idle_worker.stream, Handshake { executor_params }) + send_execute_handshake(&mut idle_worker.stream, Handshake { executor_params }) .await .map_err(|error| { + let err = SpawnErr::Handshake { err: error.to_string() }; gum::warn!( target: LOG_TARGET, worker_pid = %idle_worker.pid, - ?error, - "failed to send a handshake to the spawned worker", + %err ); - SpawnErr::Handshake + err })?; Ok((idle_worker, worker_handle)) } @@ -88,19 +87,26 @@ pub enum Outcome { /// a trap. Errors related to the preparation process are not expected to be encountered by the /// execution workers. InvalidCandidate { err: String, idle_worker: IdleWorker }, + /// The execution time exceeded the hard limit. The worker is terminated. + HardTimeout, + /// An I/O error happened during communication with the worker. This may mean that the worker + /// process already died. The token is not returned in any case. + WorkerIntfErr, + /// The job process has died. We must kill the worker just in case. + /// + /// We cannot treat this as an internal error because malicious code may have caused this. + JobDied { err: String }, + /// An unexpected error occurred in the job process. + /// + /// Because malicious code can cause a job error, we must not treat it as an internal error. + JobError { err: String }, + /// An internal error happened during the validation. Such an error is most likely related to /// some transient glitch. /// /// Should only ever be used for errors independent of the candidate and PVF. Therefore it may /// be a problem with the worker, so we terminate it. InternalError { err: InternalValidationError }, - /// The execution time exceeded the hard limit. The worker is terminated. - HardTimeout, - /// An I/O error happened during communication with the worker. This may mean that the worker - /// process already died. The token is not returned in any case. - IoErr, - /// An unexpected panic has occurred in the execution worker. - Panic { err: String }, } /// Given the idle token of a worker and parameters of work, communicates with the worker and @@ -125,10 +131,7 @@ pub async fn start_work( artifact.path.display(), ); - let artifact_path = artifact.path.clone(); with_worker_dir_setup(worker_dir, pid, &artifact.path, |worker_dir| async move { - let audit_log_file = security::AuditLogFile::try_open_and_seek_to_end().await; - if let Err(error) = send_request(&mut stream, &validation_params, execution_timeout).await { gum::warn!( target: LOG_TARGET, @@ -137,7 +140,7 @@ pub async fn start_work( ?error, "failed to send an execute request", ); - return Outcome::IoErr + return Outcome::WorkerIntfErr } // We use a generous timeout here. This is in addition to the one in the child process, in @@ -149,6 +152,13 @@ pub async fn start_work( let response = futures::select! { response = recv_response(&mut stream).fuse() => { match response { + Ok(response) => + handle_response( + response, + pid, + execution_timeout, + ) + .await, Err(error) => { gum::warn!( target: LOG_TARGET, @@ -157,55 +167,8 @@ pub async fn start_work( ?error, "failed to recv an execute response", ); - // The worker died. Check if it was due to a seccomp violation. - // - // NOTE: Log, but don't change the outcome. Not all validators may have - // auditing enabled, so we don't want attackers to abuse a non-deterministic - // outcome. - for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { - gum::error!( - target: LOG_TARGET, - worker_pid = %pid, - %syscall, - validation_code_hash = ?artifact.id.code_hash, - ?artifact_path, - "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" - ); - } - return Outcome::IoErr - }, - Ok(response) => { - // Check if any syscall violations occurred during the job. For now this is - // only informative, as we are not enforcing the seccomp policy yet. - for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { - gum::error!( - target: LOG_TARGET, - worker_pid = %pid, - %syscall, - validation_code_hash = ?artifact.id.code_hash, - ?artifact_path, - "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" - ); - } - - if let Response::Ok{duration, ..} = response { - if duration > execution_timeout { - // The job didn't complete within the timeout. - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - "execute job took {}ms cpu time, exceeded execution timeout {}ms.", - duration.as_millis(), - execution_timeout.as_millis(), - ); - - // Return a timeout error. - return Outcome::HardTimeout; - } - } - - response + return Outcome::WorkerIntfErr }, } }, @@ -216,28 +179,58 @@ pub async fn start_work( validation_code_hash = ?artifact.id.code_hash, "execution worker exceeded lenient timeout for execution, child worker likely stalled", ); - Response::TimedOut + WorkerResponse::JobTimedOut }, }; match response { - Response::Ok { result_descriptor, duration } => Outcome::Ok { + WorkerResponse::Ok { result_descriptor, duration } => Outcome::Ok { result_descriptor, duration, idle_worker: IdleWorker { stream, pid, worker_dir }, }, - Response::InvalidCandidate(err) => Outcome::InvalidCandidate { + WorkerResponse::InvalidCandidate(err) => Outcome::InvalidCandidate { err, idle_worker: IdleWorker { stream, pid, worker_dir }, }, - Response::TimedOut => Outcome::HardTimeout, - Response::Panic(err) => Outcome::Panic { err }, - Response::InternalError(err) => Outcome::InternalError { err }, + WorkerResponse::JobTimedOut => Outcome::HardTimeout, + WorkerResponse::JobDied { err, job_pid: _ } => Outcome::JobDied { err }, + WorkerResponse::JobError(err) => Outcome::JobError { err }, + + WorkerResponse::InternalError(err) => Outcome::InternalError { err }, } }) .await } +/// Handles the case where we successfully received response bytes on the host from the child. +/// +/// Here we know the artifact exists, but is still located in a temporary file which will be cleared +/// by [`with_worker_dir_setup`]. +async fn handle_response( + response: WorkerResponse, + worker_pid: u32, + execution_timeout: Duration, +) -> WorkerResponse { + if let WorkerResponse::Ok { duration, .. } = response { + if duration > execution_timeout { + // The job didn't complete within the timeout. + gum::warn!( + target: LOG_TARGET, + worker_pid, + "execute job took {}ms cpu time, exceeded execution timeout {}ms.", + duration.as_millis(), + execution_timeout.as_millis(), + ); + + // Return a timeout error. + return WorkerResponse::JobTimedOut + } + } + + response +} + /// Create a temporary file for an artifact in the worker cache, execute the given future/closure /// passing the file path in, and clean up the worker cache. /// @@ -256,7 +249,7 @@ where // Cheaply create a hard link to the artifact. The artifact is always at a known location in the // worker cache, and the child can't access any other artifacts or gain any information from the // original filename. - let link_path = worker_dir::execute_artifact(&worker_dir.path); + let link_path = worker_dir::execute_artifact(worker_dir.path()); if let Err(err) = tokio::fs::hard_link(artifact_path, link_path).await { gum::warn!( target: LOG_TARGET, @@ -270,7 +263,7 @@ where } } - let worker_dir_path = worker_dir.path.clone(); + let worker_dir_path = worker_dir.path().to_owned(); let outcome = f(worker_dir).await; // Try to clear the worker dir. @@ -293,7 +286,8 @@ where outcome } -async fn send_handshake(stream: &mut UnixStream, handshake: Handshake) -> io::Result<()> { +/// Sends a handshake with information specific to the execute worker. +async fn send_execute_handshake(stream: &mut UnixStream, handshake: Handshake) -> io::Result<()> { framed_send(stream, &handshake.encode()).await } @@ -306,9 +300,9 @@ async fn send_request( framed_send(stream, &execution_timeout.encode()).await } -async fn recv_response(stream: &mut UnixStream) -> io::Result { +async fn recv_response(stream: &mut UnixStream) -> io::Result { let response_bytes = framed_recv(stream).await?; - Response::decode(&mut &response_bytes[..]).map_err(|e| { + WorkerResponse::decode(&mut response_bytes.as_slice()).map_err(|e| { io::Error::new( io::ErrorKind::Other, format!("execute pvf recv_response: decode error: {:?}", e), diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index dd0bd85819853ed856a4518fb17062a5de5aa14b..d17a4d918e00469405b2185aec2ac3467f864c0c 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -24,22 +24,23 @@ use crate::{ artifacts::{ArtifactId, ArtifactPathId, ArtifactState, Artifacts}, execute::{self, PendingExecutionRequest}, metrics::Metrics, - prepare, security, Priority, ValidationError, LOG_TARGET, + prepare, security, Priority, SecurityStatus, ValidationError, LOG_TARGET, }; use always_assert::never; use futures::{ channel::{mpsc, oneshot}, - join, Future, FutureExt, SinkExt, StreamExt, + Future, FutureExt, SinkExt, StreamExt, }; use polkadot_node_core_pvf_common::{ - error::{PrepareError, PrepareResult}, + error::{PrecheckResult, PrepareError}, + prepare::PrepareSuccess, pvf::PvfPrepData, - SecurityStatus, }; +use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; use polkadot_parachain_primitives::primitives::ValidationResult; use std::{ collections::HashMap, - path::{Path, PathBuf}, + path::PathBuf, time::{Duration, SystemTime}, }; @@ -63,12 +64,14 @@ pub const EXECUTE_BINARY_NAME: &str = "polkadot-execute-worker"; pub(crate) type ResultSender = oneshot::Sender>; /// Transmission end used for sending the PVF preparation result. -pub(crate) type PrepareResultSender = oneshot::Sender; +pub(crate) type PrecheckResultSender = oneshot::Sender; /// A handle to the async process serving the validation host requests. #[derive(Clone)] pub struct ValidationHost { to_host_tx: mpsc::Sender, + /// Available security features, detected by the host during startup. + pub security_status: SecurityStatus, } impl ValidationHost { @@ -83,7 +86,7 @@ impl ValidationHost { pub async fn precheck_pvf( &mut self, pvf: PvfPrepData, - result_tx: PrepareResultSender, + result_tx: PrecheckResultSender, ) -> Result<(), String> { self.to_host_tx .send(ToHost::PrecheckPvf { pvf, result_tx }) @@ -133,7 +136,7 @@ impl ValidationHost { } enum ToHost { - PrecheckPvf { pvf: PvfPrepData, result_tx: PrepareResultSender }, + PrecheckPvf { pvf: PvfPrepData, result_tx: PrecheckResultSender }, ExecutePvf(ExecutePvfInputs), HeadsUp { active_pvfs: Vec }, } @@ -153,6 +156,8 @@ pub struct Config { pub cache_path: PathBuf, /// The version of the node. `None` can be passed to skip the version check (only for tests). pub node_version: Option, + /// Whether the node is attempting to run as a secure validator. + pub secure_validator_mode: bool, /// The path to the program that can be used to spawn the prepare workers. pub prepare_worker_program_path: PathBuf, @@ -177,12 +182,14 @@ impl Config { pub fn new( cache_path: PathBuf, node_version: Option, + secure_validator_mode: bool, prepare_worker_program_path: PathBuf, execute_worker_program_path: PathBuf, ) -> Self { Self { cache_path, node_version, + secure_validator_mode, prepare_worker_program_path, prepare_worker_spawn_timeout: Duration::from_secs(3), @@ -204,29 +211,25 @@ impl Config { /// The future should not return normally but if it does then that indicates an unrecoverable error. /// In that case all pending requests will be canceled, dropping the result senders and new ones /// will be rejected. -pub async fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Future) { +pub async fn start( + config: Config, + metrics: Metrics, +) -> SubsystemResult<(ValidationHost, impl Future)> { gum::debug!(target: LOG_TARGET, ?config, "starting PVF validation host"); - // Run checks for supported security features once per host startup. Warn here if not enabled. - let security_status = { - // TODO: add check that syslog is available and that seccomp violations are logged? - let (can_enable_landlock, can_enable_seccomp, can_unshare_user_namespace_and_change_root) = join!( - security::check_landlock(&config.prepare_worker_program_path), - security::check_seccomp(&config.prepare_worker_program_path), - security::check_can_unshare_user_namespace_and_change_root( - &config.prepare_worker_program_path - ) - ); - SecurityStatus { - can_enable_landlock, - can_enable_seccomp, - can_unshare_user_namespace_and_change_root, - } + // Make sure the cache is initialized before doing anything else. + let artifacts = Artifacts::new_and_prune(&config.cache_path).await; + + // Run checks for supported security features once per host startup. If some checks fail, warn + // if Secure Validator Mode is disabled and return an error otherwise. + let security_status = match security::check_security_status(&config).await { + Ok(ok) => ok, + Err(err) => return Err(SubsystemError::Context(err)), }; let (to_host_tx, to_host_rx) = mpsc::channel(10); - let validation_host = ValidationHost { to_host_tx }; + let validation_host = ValidationHost { to_host_tx, security_status: security_status.clone() }; let (to_prepare_pool, from_prepare_pool, run_prepare_pool) = prepare::start_pool( metrics.clone(), @@ -260,10 +263,7 @@ pub async fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Fu let run_sweeper = sweeper_task(to_sweeper_rx); let run_host = async move { - let artifacts = Artifacts::new(&config.cache_path).await; - run(Inner { - cache_path: config.cache_path, cleanup_pulse_interval: Duration::from_secs(3600), artifact_ttl: Duration::from_secs(3600 * 24), artifacts, @@ -288,7 +288,7 @@ pub async fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Fu }; }; - (validation_host, task) + Ok((validation_host, task)) } /// A mapping from an artifact ID which is in preparation state to the list of pending execution @@ -307,7 +307,6 @@ impl AwaitingPrepare { } struct Inner { - cache_path: PathBuf, cleanup_pulse_interval: Duration, artifact_ttl: Duration, artifacts: Artifacts, @@ -328,7 +327,6 @@ struct Fatal; async fn run( Inner { - cache_path, cleanup_pulse_interval, artifact_ttl, mut artifacts, @@ -372,7 +370,6 @@ async fn run( // will notice it. break_if_fatal!(handle_cleanup_pulse( - &cache_path, &mut to_sweeper_tx, &mut artifacts, artifact_ttl, @@ -391,7 +388,6 @@ async fn run( // If the artifact failed before, it could be re-scheduled for preparation here if // the preparation failure cooldown has elapsed. break_if_fatal!(handle_to_host( - &cache_path, &mut artifacts, &mut to_prepare_queue_tx, &mut to_execute_queue_tx, @@ -413,7 +409,6 @@ async fn run( // We could be eager in terms of reporting and plumb the result from the preparation // worker but we don't for the sake of simplicity. break_if_fatal!(handle_prepare_done( - &cache_path, &mut artifacts, &mut to_execute_queue_tx, &mut awaiting_prepare, @@ -425,7 +420,6 @@ async fn run( } async fn handle_to_host( - cache_path: &Path, artifacts: &mut Artifacts, prepare_queue: &mut mpsc::Sender, execute_queue: &mut mpsc::Sender, @@ -437,15 +431,8 @@ async fn handle_to_host( handle_precheck_pvf(artifacts, prepare_queue, pvf, result_tx).await?; }, ToHost::ExecutePvf(inputs) => { - handle_execute_pvf( - cache_path, - artifacts, - prepare_queue, - execute_queue, - awaiting_prepare, - inputs, - ) - .await?; + handle_execute_pvf(artifacts, prepare_queue, execute_queue, awaiting_prepare, inputs) + .await?; }, ToHost::HeadsUp { active_pvfs } => handle_heads_up(artifacts, prepare_queue, active_pvfs).await?, @@ -465,21 +452,21 @@ async fn handle_precheck_pvf( artifacts: &mut Artifacts, prepare_queue: &mut mpsc::Sender, pvf: PvfPrepData, - result_sender: PrepareResultSender, + result_sender: PrecheckResultSender, ) -> Result<(), Fatal> { let artifact_id = ArtifactId::from_pvf_prep_data(&pvf); if let Some(state) = artifacts.artifact_state_mut(&artifact_id) { match state { - ArtifactState::Prepared { last_time_needed, prepare_stats } => { + ArtifactState::Prepared { last_time_needed, .. } => { *last_time_needed = SystemTime::now(); - let _ = result_sender.send(Ok(prepare_stats.clone())); + let _ = result_sender.send(Ok(())); }, ArtifactState::Preparing { waiting_for_response, num_failures: _ } => waiting_for_response.push(result_sender), ArtifactState::FailedToProcess { error, .. } => { // Do not retry an artifact that previously failed preparation. - let _ = result_sender.send(PrepareResult::Err(error.clone())); + let _ = result_sender.send(PrecheckResult::Err(error.clone())); }, } } else { @@ -502,7 +489,6 @@ async fn handle_precheck_pvf( /// When preparing for execution, we use a more lenient timeout ([`LENIENT_PREPARATION_TIMEOUT`]) /// than when prechecking. async fn handle_execute_pvf( - cache_path: &Path, artifacts: &mut Artifacts, prepare_queue: &mut mpsc::Sender, execute_queue: &mut mpsc::Sender, @@ -515,8 +501,8 @@ async fn handle_execute_pvf( if let Some(state) = artifacts.artifact_state_mut(&artifact_id) { match state { - ArtifactState::Prepared { last_time_needed, .. } => { - let file_metadata = std::fs::metadata(artifact_id.path(cache_path)); + ArtifactState::Prepared { ref path, last_time_needed, .. } => { + let file_metadata = std::fs::metadata(path); if file_metadata.is_ok() { *last_time_needed = SystemTime::now(); @@ -525,7 +511,7 @@ async fn handle_execute_pvf( send_execute( execute_queue, execute::ToQueue::Enqueue { - artifact: ArtifactPathId::new(artifact_id, cache_path), + artifact: ArtifactPathId::new(artifact_id, path), pending_execution_request: PendingExecutionRequest { exec_timeout, params, @@ -688,7 +674,6 @@ async fn handle_heads_up( } async fn handle_prepare_done( - cache_path: &Path, artifacts: &mut Artifacts, execute_queue: &mut mpsc::Sender, awaiting_prepare: &mut AwaitingPrepare, @@ -729,7 +714,8 @@ async fn handle_prepare_done( state { for result_sender in waiting_for_response.drain(..) { - let _ = result_sender.send(result.clone()); + let result = result.clone().map(|_| ()); + let _ = result_sender.send(result); } num_failures } else { @@ -749,16 +735,18 @@ async fn handle_prepare_done( continue } - // Don't send failed artifacts to the execution's queue. - if let Err(ref error) = result { - let _ = result_tx.send(Err(ValidationError::from(error.clone()))); - continue - } + let path = match &result { + Ok(success) => success.path.clone(), + Err(error) => { + let _ = result_tx.send(Err(ValidationError::from(error.clone()))); + continue + }, + }; send_execute( execute_queue, execute::ToQueue::Enqueue { - artifact: ArtifactPathId::new(artifact_id.clone(), cache_path), + artifact: ArtifactPathId::new(artifact_id.clone(), &path), pending_execution_request: PendingExecutionRequest { exec_timeout, params, @@ -771,8 +759,8 @@ async fn handle_prepare_done( } *state = match result { - Ok(prepare_stats) => - ArtifactState::Prepared { last_time_needed: SystemTime::now(), prepare_stats }, + Ok(PrepareSuccess { path, stats: prepare_stats }) => + ArtifactState::Prepared { path, last_time_needed: SystemTime::now(), prepare_stats }, Err(error) => { let last_time_failed = SystemTime::now(); let num_failures = *num_failures + 1; @@ -825,7 +813,6 @@ async fn enqueue_prepare_for_execute( } async fn handle_cleanup_pulse( - cache_path: &Path, sweeper_tx: &mut mpsc::Sender, artifacts: &mut Artifacts, artifact_ttl: Duration, @@ -836,14 +823,13 @@ async fn handle_cleanup_pulse( "PVF pruning: {} artifacts reached their end of life", to_remove.len(), ); - for artifact_id in to_remove { + for (artifact_id, path) in to_remove { gum::debug!( target: LOG_TARGET, validation_code_hash = ?artifact_id.code_hash, "pruning artifact", ); - let artifact_path = artifact_id.path(cache_path); - sweeper_tx.send(artifact_path).await.map_err(|_| Fatal)?; + sweeper_tx.send(path).await.map_err(|_| Fatal)?; } Ok(()) @@ -898,10 +884,14 @@ fn pulse_every(interval: std::time::Duration) -> impl futures::Stream #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::InvalidCandidate; + use crate::PossiblyInvalidError; use assert_matches::assert_matches; use futures::future::BoxFuture; - use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats}; + use polkadot_node_core_pvf_common::{ + error::PrepareError, + prepare::{PrepareStats, PrepareSuccess}, + }; + use sp_core::hexdisplay::AsBytesRef; const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); pub(crate) const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(30); @@ -921,12 +911,16 @@ pub(crate) mod tests { } /// Creates a new PVF which artifact id can be uniquely identified by the given number. - fn artifact_id(descriminator: u32) -> ArtifactId { - ArtifactId::from_pvf_prep_data(&PvfPrepData::from_discriminator(descriminator)) + fn artifact_id(discriminator: u32) -> ArtifactId { + ArtifactId::from_pvf_prep_data(&PvfPrepData::from_discriminator(discriminator)) } - fn artifact_path(descriminator: u32) -> PathBuf { - artifact_id(descriminator).path(&PathBuf::from(std::env::temp_dir())).to_owned() + fn artifact_path(discriminator: u32) -> PathBuf { + let pvf = PvfPrepData::from_discriminator(discriminator); + let checksum = blake3::hash(pvf.code().as_bytes_ref()); + artifact_id(discriminator) + .path(&PathBuf::from(std::env::temp_dir()), checksum.to_hex().as_str()) + .to_owned() } struct Builder { @@ -964,8 +958,6 @@ pub(crate) mod tests { impl Test { fn new(Builder { cleanup_pulse_interval, artifact_ttl, artifacts }: Builder) -> Self { - let cache_path = PathBuf::from(std::env::temp_dir()); - let (to_host_tx, to_host_rx) = mpsc::channel(10); let (to_prepare_queue_tx, to_prepare_queue_rx) = mpsc::channel(10); let (from_prepare_queue_tx, from_prepare_queue_rx) = mpsc::unbounded(); @@ -973,7 +965,6 @@ pub(crate) mod tests { let (to_sweeper_tx, to_sweeper_rx) = mpsc::channel(10); let run = run(Inner { - cache_path, cleanup_pulse_interval, artifact_ttl, artifacts, @@ -998,7 +989,8 @@ pub(crate) mod tests { fn host_handle(&mut self) -> ValidationHost { let to_host_tx = self.to_host_tx.take().unwrap(); - ValidationHost { to_host_tx } + let security_status = Default::default(); + ValidationHost { to_host_tx, security_status } } async fn poll_and_recv_result(&mut self, result_rx: oneshot::Receiver) -> T @@ -1122,12 +1114,18 @@ pub(crate) mod tests { let mut builder = Builder::default(); builder.cleanup_pulse_interval = Duration::from_millis(100); builder.artifact_ttl = Duration::from_millis(500); - builder - .artifacts - .insert_prepared(artifact_id(1), mock_now, PrepareStats::default()); - builder - .artifacts - .insert_prepared(artifact_id(2), mock_now, PrepareStats::default()); + builder.artifacts.insert_prepared( + artifact_id(1), + artifact_path(1), + mock_now, + PrepareStats::default(), + ); + builder.artifacts.insert_prepared( + artifact_id(2), + artifact_path(2), + mock_now, + PrepareStats::default(), + ); let mut test = builder.build(); let mut host = test.host_handle(); @@ -1199,7 +1197,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(1), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1215,7 +1213,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(2), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1225,27 +1223,27 @@ pub(crate) mod tests { ); result_tx_pvf_1_1 - .send(Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath))) + .send(Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath))) .unwrap(); assert_matches!( result_rx_pvf_1_1.now_or_never().unwrap().unwrap(), - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) ); result_tx_pvf_1_2 - .send(Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath))) + .send(Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath))) .unwrap(); assert_matches!( result_rx_pvf_1_2.now_or_never().unwrap().unwrap(), - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) ); result_tx_pvf_2 - .send(Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath))) + .send(Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath))) .unwrap(); assert_matches!( result_rx_pvf_2.now_or_never().unwrap().unwrap(), - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) ); } @@ -1269,7 +1267,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(1), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1351,7 +1349,7 @@ pub(crate) mod tests { assert_matches!(result_rx.now_or_never().unwrap().unwrap(), Err(PrepareError::TimedOut)); assert_matches!( result_rx_execute.now_or_never().unwrap().unwrap(), - Err(ValidationError::InternalError(_)) + Err(ValidationError::Internal(_)) ); // Reversed case: first send multiple precheck requests, then ask for an execution. @@ -1382,7 +1380,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(2), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1493,7 +1491,7 @@ pub(crate) mod tests { // The result should contain the error. let result = test.poll_and_recv_result(result_rx).await; - assert_matches!(result, Err(ValidationError::InternalError(_))); + assert_matches!(result, Err(ValidationError::Internal(_))); // Submit another execute request. We shouldn't try to prepare again, yet. let (result_tx_2, result_rx_2) = oneshot::channel(); @@ -1512,7 +1510,7 @@ pub(crate) mod tests { // The result should contain the original error. let result = test.poll_and_recv_result(result_rx_2).await; - assert_matches!(result, Err(ValidationError::InternalError(_))); + assert_matches!(result, Err(ValidationError::Internal(_))); // Pause for enough time to reset the cooldown for this failed prepare request. futures_timer::Delay::new(PREPARE_FAILURE_COOLDOWN).await; @@ -1538,7 +1536,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(1), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1552,11 +1550,11 @@ pub(crate) mod tests { // Send an error for the execution here, just so we can check the result receiver is still // alive. result_tx_3 - .send(Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath))) + .send(Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath))) .unwrap(); assert_matches!( result_rx_3.now_or_never().unwrap().unwrap(), - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) ); } @@ -1595,10 +1593,7 @@ pub(crate) mod tests { // The result should contain the error. let result = test.poll_and_recv_result(result_rx).await; - assert_matches!( - result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::PrepareError(_))) - ); + assert_matches!(result, Err(ValidationError::Preparation(_))); // Submit another execute request. let (result_tx_2, result_rx_2) = oneshot::channel(); @@ -1617,10 +1612,7 @@ pub(crate) mod tests { // The result should contain the original error. let result = test.poll_and_recv_result(result_rx_2).await; - assert_matches!( - result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::PrepareError(_))) - ); + assert_matches!(result, Err(ValidationError::Preparation(_))); // Pause for enough time to reset the cooldown for this failed prepare request. futures_timer::Delay::new(PREPARE_FAILURE_COOLDOWN).await; @@ -1642,10 +1634,7 @@ pub(crate) mod tests { // The result should still contain the original error. let result = test.poll_and_recv_result(result_rx_3).await; - assert_matches!( - result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::PrepareError(_))) - ); + assert_matches!(result, Err(ValidationError::Preparation(_))); } // Test that multiple heads-up requests trigger preparation retries if the first one failed. @@ -1714,7 +1703,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(1), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); diff --git a/polkadot/node/core/pvf/src/lib.rs b/polkadot/node/core/pvf/src/lib.rs index 102a91dbdad7e8bd2172b864979837cf9710e161..79391630b2d324e40f4f42bd802997392cb69b9d 100644 --- a/polkadot/node/core/pvf/src/lib.rs +++ b/polkadot/node/core/pvf/src/lib.rs @@ -84,7 +84,7 @@ //! A pruning task will run at a fixed interval of time. This task will remove all artifacts that //! weren't used or received a heads up signal for a while. //! -//! ## Execution +//! ## Execution //! //! The execute workers will be fed by the requests from the execution queue, which is basically a //! combination of a path to the compiled artifact and the @@ -98,16 +98,16 @@ mod metrics; mod prepare; mod priority; mod security; -mod worker_intf; +mod worker_interface; #[cfg(feature = "test-utils")] pub mod testing; -pub use error::{InvalidCandidate, ValidationError}; +pub use error::{InvalidCandidate, PossiblyInvalidError, ValidationError}; pub use host::{start, Config, ValidationHost, EXECUTE_BINARY_NAME, PREPARE_BINARY_NAME}; pub use metrics::Metrics; pub use priority::Priority; -pub use worker_intf::{framed_recv, framed_send, JOB_TIMEOUT_WALL_CLOCK_FACTOR}; +pub use worker_interface::{framed_recv, framed_send, JOB_TIMEOUT_WALL_CLOCK_FACTOR}; // Re-export some common types. pub use polkadot_node_core_pvf_common::{ diff --git a/polkadot/node/core/pvf/src/prepare/mod.rs b/polkadot/node/core/pvf/src/prepare/mod.rs index 580f67f73fa0c126994395ce5a2129234fca51a3..eb88070c2bab253092c9f8a37637702546a6b686 100644 --- a/polkadot/node/core/pvf/src/prepare/mod.rs +++ b/polkadot/node/core/pvf/src/prepare/mod.rs @@ -24,7 +24,7 @@ mod pool; mod queue; -mod worker_intf; +mod worker_interface; pub use pool::start as start_pool; pub use queue::{start as start_queue, FromQueue, ToQueue}; diff --git a/polkadot/node/core/pvf/src/prepare/pool.rs b/polkadot/node/core/pvf/src/prepare/pool.rs index 6bb6ca5b64453909c0b5de622dcb725de0006d32..4e11f977c9e7d82527111cc9b751e15d1dc6c7a8 100644 --- a/polkadot/node/core/pvf/src/prepare/pool.rs +++ b/polkadot/node/core/pvf/src/prepare/pool.rs @@ -14,10 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::worker_intf::{self, Outcome}; +use super::worker_interface::{self, Outcome}; use crate::{ metrics::Metrics, - worker_intf::{IdleWorker, WorkerHandle}, + worker_interface::{IdleWorker, WorkerHandle}, LOG_TARGET, }; use always_assert::never; @@ -68,7 +68,7 @@ pub enum ToPool { /// /// In either case, the worker is considered busy and no further `StartWork` messages should be /// sent until either `Concluded` or `Rip` message is received. - StartWork { worker: Worker, pvf: PvfPrepData, artifact_path: PathBuf }, + StartWork { worker: Worker, pvf: PvfPrepData, cache_path: PathBuf }, } /// A message sent from pool to its client. @@ -232,7 +232,7 @@ fn handle_to_pool( .boxed(), ); }, - ToPool::StartWork { worker, pvf, artifact_path } => { + ToPool::StartWork { worker, pvf, cache_path } => { if let Some(data) = spawned.get_mut(worker) { if let Some(idle) = data.idle.take() { let preparation_timer = metrics.time_preparation(); @@ -242,7 +242,7 @@ fn handle_to_pool( worker, idle, pvf, - artifact_path, + cache_path, preparation_timer, ) .boxed(), @@ -278,7 +278,7 @@ async fn spawn_worker_task( use futures_timer::Delay; loop { - match worker_intf::spawn( + match worker_interface::spawn( &program_path, &cache_path, spawn_timeout, @@ -303,10 +303,10 @@ async fn start_work_task( worker: Worker, idle: IdleWorker, pvf: PvfPrepData, - artifact_path: PathBuf, + cache_path: PathBuf, _preparation_timer: Option, ) -> PoolEvent { - let outcome = worker_intf::start_work(&metrics, idle, pvf, artifact_path).await; + let outcome = worker_interface::start_work(&metrics, idle, pvf, cache_path).await; PoolEvent::StartWork(worker, outcome) } @@ -339,17 +339,17 @@ fn handle_mux( spawned, worker, idle, - Err(PrepareError::CreateTmpFileErr(err)), + Err(PrepareError::CreateTmpFile(err)), ), // Return `Concluded`, but do not kill the worker since the error was on the host // side. - Outcome::RenameTmpFileErr { worker: idle, result: _, err, src, dest } => + Outcome::RenameTmpFile { worker: idle, result: _, err, src, dest } => handle_concluded_no_rip( from_pool, spawned, worker, idle, - Err(PrepareError::RenameTmpFileErr { err, src, dest }), + Err(PrepareError::RenameTmpFile { err, src, dest }), ), // Could not clear worker cache. Kill the worker so other jobs can't see the data. Outcome::ClearWorkerDir { err } => { @@ -387,6 +387,21 @@ fn handle_mux( Ok(()) }, + // The worker might still be usable, but we kill it just in case. + Outcome::JobDied { err, job_pid } => { + if attempt_retire(metrics, spawned, worker) { + reply( + from_pool, + FromPool::Concluded { + worker, + rip: true, + result: Err(PrepareError::JobDied { err, job_pid }), + }, + )?; + } + + Ok(()) + }, Outcome::TimedOut => { if attempt_retire(metrics, spawned, worker) { reply( diff --git a/polkadot/node/core/pvf/src/prepare/queue.rs b/polkadot/node/core/pvf/src/prepare/queue.rs index c38012d745482e66fa4d74647e34663447b8f0d5..c140a6cafda08ed26aa9556935a591dfa0bd2f0e 100644 --- a/polkadot/node/core/pvf/src/prepare/queue.rs +++ b/polkadot/node/core/pvf/src/prepare/queue.rs @@ -268,12 +268,12 @@ fn find_idle_worker(queue: &mut Queue) -> Option { } async fn handle_from_pool(queue: &mut Queue, from_pool: pool::FromPool) -> Result<(), Fatal> { - use pool::FromPool::*; + use pool::FromPool; match from_pool { - Spawned(worker) => handle_worker_spawned(queue, worker).await?, - Concluded { worker, rip, result } => + FromPool::Spawned(worker) => handle_worker_spawned(queue, worker).await?, + FromPool::Concluded { worker, rip, result } => handle_worker_concluded(queue, worker, rip, result).await?, - Rip(worker) => handle_worker_rip(queue, worker).await?, + FromPool::Rip(worker) => handle_worker_rip(queue, worker).await?, } Ok(()) } @@ -424,17 +424,17 @@ async fn spawn_extra_worker(queue: &mut Queue, critical: bool) -> Result<(), Fat /// Attaches the work to the given worker telling the poll about the job. async fn assign(queue: &mut Queue, worker: Worker, job: Job) -> Result<(), Fatal> { let job_data = &mut queue.jobs[job]; - - let artifact_id = ArtifactId::from_pvf_prep_data(&job_data.pvf); - let artifact_path = artifact_id.path(&queue.cache_path); - job_data.worker = Some(worker); queue.workers[worker].job = Some(job); send_pool( &mut queue.to_pool_tx, - pool::ToPool::StartWork { worker, pvf: job_data.pvf.clone(), artifact_path }, + pool::ToPool::StartWork { + worker, + pvf: job_data.pvf.clone(), + cache_path: queue.cache_path.clone(), + }, ) .await?; @@ -491,7 +491,7 @@ mod tests { use crate::host::tests::TEST_PREPARATION_TIMEOUT; use assert_matches::assert_matches; use futures::{future::BoxFuture, FutureExt}; - use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats}; + use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareSuccess}; use slotmap::SlotMap; use std::task::Poll; @@ -612,7 +612,7 @@ mod tests { test.send_from_pool(pool::FromPool::Concluded { worker: w, rip: false, - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }); assert_eq!( @@ -651,7 +651,7 @@ mod tests { test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: false, - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }); assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); @@ -697,7 +697,7 @@ mod tests { test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: false, - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }); assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Kill(w1)); } @@ -731,7 +731,7 @@ mod tests { test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: true, - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }); // Since there is still work, the queue requested one extra worker to spawn to handle the diff --git a/polkadot/node/core/pvf/src/prepare/worker_intf.rs b/polkadot/node/core/pvf/src/prepare/worker_interface.rs similarity index 79% rename from polkadot/node/core/pvf/src/prepare/worker_intf.rs rename to polkadot/node/core/pvf/src/prepare/worker_interface.rs index 0e50caf1feb5c857b30df07681791d21ead4c497..984a87ce5c9bd745b43c60979a73c4c19c6fddb4 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_intf.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_interface.rs @@ -17,9 +17,9 @@ //! Host interface to the prepare worker. use crate::{ + artifacts::ArtifactId, metrics::Metrics, - security, - worker_intf::{ + worker_interface::{ clear_worker_dir_path, framed_recv, framed_send, spawn_with_program_path, IdleWorker, SpawnErr, WorkerDir, WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }, @@ -27,8 +27,8 @@ use crate::{ }; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ - error::{PrepareError, PrepareResult}, - prepare::PrepareStats, + error::{PrepareError, PrepareResult, PrepareWorkerResult}, + prepare::{PrepareStats, PrepareSuccess, PrepareWorkerSuccess}, pvf::PvfPrepData, worker_dir, SecurityStatus, }; @@ -79,9 +79,9 @@ pub enum Outcome { CreateTmpFileErr { worker: IdleWorker, err: String }, /// The response from the worker is received, but the tmp file cannot be renamed (moved) to the /// final destination location. - RenameTmpFileErr { + RenameTmpFile { worker: IdleWorker, - result: PrepareResult, + result: PrepareWorkerResult, err: String, // Unfortunately `PathBuf` doesn't implement `Encode`/`Decode`, so we do a fallible // conversion to `Option`. @@ -100,6 +100,10 @@ pub enum Outcome { IoErr(String), /// The worker ran out of memory and is aborting. The worker should be ripped. OutOfMemory, + /// The preparation job process died, due to OOM, a seccomp violation, or some other factor. + /// + /// The worker might still be usable, but we kill it just in case. + JobDied { err: String, job_pid: i32 }, } /// Given the idle token of a worker and parameters of work, communicates with the worker and @@ -111,7 +115,7 @@ pub async fn start_work( metrics: &Metrics, worker: IdleWorker, pvf: PvfPrepData, - artifact_path: PathBuf, + cache_path: PathBuf, ) -> Outcome { let IdleWorker { stream, pid, worker_dir } = worker; @@ -119,8 +123,8 @@ pub async fn start_work( target: LOG_TARGET, worker_pid = %pid, ?worker_dir, - "starting prepare for {}", - artifact_path.display(), + "starting prepare for {:?}", + pvf, ); with_worker_dir_setup( @@ -129,9 +133,8 @@ pub async fn start_work( pid, |tmp_artifact_file, mut stream, worker_dir| async move { let preparation_timeout = pvf.prep_timeout(); - let audit_log_file = security::AuditLogFile::try_open_and_seek_to_end().await; - if let Err(err) = send_request(&mut stream, pvf.clone()).await { + if let Err(err) = send_request(&mut stream, &pvf).await { gum::warn!( target: LOG_TARGET, worker_pid = %pid, @@ -155,30 +158,18 @@ pub async fn start_work( match result { // Received bytes from worker within the time limit. - Ok(Ok(prepare_result)) => { - // Check if any syscall violations occurred during the job. For now this is only - // informative, as we are not enforcing the seccomp policy yet. - for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { - gum::error!( - target: LOG_TARGET, - worker_pid = %pid, - %syscall, - ?pvf, - "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" - ); - } - + Ok(Ok(prepare_worker_result)) => handle_response( metrics, IdleWorker { stream, pid, worker_dir }, - prepare_result, + prepare_worker_result, pid, tmp_artifact_file, - artifact_path, + &pvf, + &cache_path, preparation_timeout, ) - .await - }, + .await, Ok(Err(err)) => { // Communication error within the time limit. gum::warn!( @@ -187,21 +178,6 @@ pub async fn start_work( "failed to recv a prepare response: {:?}", err, ); - - // The worker died. Check if it was due to a seccomp violation. - // - // NOTE: Log, but don't change the outcome. Not all validators may have auditing - // enabled, so we don't want attackers to abuse a non-deterministic outcome. - for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { - gum::error!( - target: LOG_TARGET, - worker_pid = %pid, - %syscall, - ?pvf, - "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" - ); - } - Outcome::IoErr(err.to_string()) }, Err(_) => { @@ -226,19 +202,22 @@ pub async fn start_work( async fn handle_response( metrics: &Metrics, worker: IdleWorker, - result: PrepareResult, + result: PrepareWorkerResult, worker_pid: u32, tmp_file: PathBuf, - artifact_path: PathBuf, + pvf: &PvfPrepData, + cache_path: &Path, preparation_timeout: Duration, ) -> Outcome { - let PrepareStats { cpu_time_elapsed, memory_stats } = match result.clone() { - Ok(result) => result, - // Timed out on the child. This should already be logged by the child. - Err(PrepareError::TimedOut) => return Outcome::TimedOut, - Err(PrepareError::OutOfMemory) => return Outcome::OutOfMemory, - Err(_) => return Outcome::Concluded { worker, result }, - }; + let PrepareWorkerSuccess { checksum, stats: PrepareStats { cpu_time_elapsed, memory_stats } } = + match result.clone() { + Ok(result) => result, + // Timed out on the child. This should already be logged by the child. + Err(PrepareError::TimedOut) => return Outcome::TimedOut, + Err(PrepareError::JobDied { err, job_pid }) => return Outcome::JobDied { err, job_pid }, + Err(PrepareError::OutOfMemory) => return Outcome::OutOfMemory, + Err(err) => return Outcome::Concluded { worker, result: Err(err) }, + }; if cpu_time_elapsed > preparation_timeout { // The job didn't complete within the timeout. @@ -253,6 +232,9 @@ async fn handle_response( return Outcome::TimedOut } + let artifact_id = ArtifactId::from_pvf_prep_data(pvf); + let artifact_path = artifact_id.path(cache_path, &checksum); + gum::debug!( target: LOG_TARGET, %worker_pid, @@ -262,7 +244,13 @@ async fn handle_response( ); let outcome = match tokio::fs::rename(&tmp_file, &artifact_path).await { - Ok(()) => Outcome::Concluded { worker, result }, + Ok(()) => Outcome::Concluded { + worker, + result: Ok(PrepareSuccess { + path: artifact_path, + stats: PrepareStats { cpu_time_elapsed, memory_stats: memory_stats.clone() }, + }), + }, Err(err) => { gum::warn!( target: LOG_TARGET, @@ -272,7 +260,7 @@ async fn handle_response( artifact_path.display(), err, ); - Outcome::RenameTmpFileErr { + Outcome::RenameTmpFile { worker, result, err: format!("{:?}", err), @@ -306,7 +294,7 @@ where { // Create the tmp file here so that the child doesn't need any file creation rights. This will // be cleared at the end of this function. - let tmp_file = worker_dir::prepare_tmp_artifact(&worker_dir.path); + let tmp_file = worker_dir::prepare_tmp_artifact(worker_dir.path()); if let Err(err) = tokio::fs::File::create(&tmp_file).await { gum::warn!( target: LOG_TARGET, @@ -321,7 +309,7 @@ where } }; - let worker_dir_path = worker_dir.path.clone(); + let worker_dir_path = worker_dir.path().to_owned(); let outcome = f(tmp_file, stream, worker_dir).await; // Try to clear the worker dir. @@ -339,14 +327,14 @@ where outcome } -async fn send_request(stream: &mut UnixStream, pvf: PvfPrepData) -> io::Result<()> { +async fn send_request(stream: &mut UnixStream, pvf: &PvfPrepData) -> io::Result<()> { framed_send(stream, &pvf.encode()).await?; Ok(()) } -async fn recv_response(stream: &mut UnixStream, pid: u32) -> io::Result { +async fn recv_response(stream: &mut UnixStream, pid: u32) -> io::Result { let result = framed_recv(stream).await?; - let result = PrepareResult::decode(&mut &result[..]).map_err(|e| { + let result = PrepareWorkerResult::decode(&mut &result[..]).map_err(|e| { // We received invalid bytes from the worker. let bound_bytes = &result[..result.len().min(4)]; gum::warn!( diff --git a/polkadot/node/core/pvf/src/security.rs b/polkadot/node/core/pvf/src/security.rs index decd321e415e806ac96139f6e6131412e1e912cc..9d0d4cf49afe940a3376097744ed28dcd71f5e7c 100644 --- a/polkadot/node/core/pvf/src/security.rs +++ b/polkadot/node/core/pvf/src/security.rs @@ -14,259 +14,338 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::LOG_TARGET; -use std::path::Path; -use tokio::{ - fs::{File, OpenOptions}, - io::{AsyncReadExt, AsyncSeekExt, SeekFrom}, -}; +use crate::{Config, SecurityStatus, LOG_TARGET}; +use futures::join; +use std::{fmt, path::Path}; -/// Check if we can sandbox the root and emit a warning if not. +/// Run checks for supported security features. +/// +/// # Returns +/// +/// Returns the set of security features that we were able to enable. If an error occurs while +/// enabling a security feature we set the corresponding status to `false`. +/// +/// # Errors +/// +/// Returns an error only if we could not fully enforce the security level required by the current +/// configuration. +pub async fn check_security_status(config: &Config) -> Result { + let Config { prepare_worker_program_path, secure_validator_mode, cache_path, .. } = config; + + let (landlock, seccomp, change_root) = join!( + check_landlock(prepare_worker_program_path), + check_seccomp(prepare_worker_program_path), + check_can_unshare_user_namespace_and_change_root(prepare_worker_program_path, cache_path) + ); + + let full_security_status = + FullSecurityStatus::new(*secure_validator_mode, landlock, seccomp, change_root); + let security_status = full_security_status.as_partial(); + + if full_security_status.err_occurred() { + print_secure_mode_error_or_warning(&full_security_status); + if !full_security_status.all_errs_allowed() { + return Err("could not enable Secure Validator Mode; check logs".into()) + } + } + + if security_status.secure_validator_mode { + gum::info!( + target: LOG_TARGET, + "👮‍♀️ Running in Secure Validator Mode. \ + It is highly recommended that you operate according to our security guidelines. \ + \nMore information: https://wiki.polkadot.network/docs/maintain-guides-secure-validator#secure-validator-mode" + ); + } + + Ok(security_status) +} + +/// Contains the full security status including error states. +struct FullSecurityStatus { + partial: SecurityStatus, + errs: Vec, +} + +impl FullSecurityStatus { + fn new( + secure_validator_mode: bool, + landlock: SecureModeResult, + seccomp: SecureModeResult, + change_root: SecureModeResult, + ) -> Self { + Self { + partial: SecurityStatus { + secure_validator_mode, + can_enable_landlock: landlock.is_ok(), + can_enable_seccomp: seccomp.is_ok(), + can_unshare_user_namespace_and_change_root: change_root.is_ok(), + }, + errs: [landlock, seccomp, change_root] + .into_iter() + .filter_map(|result| result.err()) + .collect(), + } + } + + fn as_partial(&self) -> SecurityStatus { + self.partial.clone() + } + + fn err_occurred(&self) -> bool { + !self.errs.is_empty() + } + + fn all_errs_allowed(&self) -> bool { + !self.partial.secure_validator_mode || + self.errs.iter().all(|err| err.is_allowed_in_secure_mode(&self.partial)) + } + + fn errs_string(&self) -> String { + self.errs + .iter() + .map(|err| { + format!( + "\n - {}{}", + if err.is_allowed_in_secure_mode(&self.partial) { "Optional: " } else { "" }, + err + ) + }) + .collect() + } +} + +type SecureModeResult = std::result::Result<(), SecureModeError>; + +/// Errors related to enabling Secure Validator Mode. +#[derive(Debug)] +enum SecureModeError { + CannotEnableLandlock(String), + CannotEnableSeccomp(String), + CannotUnshareUserNamespaceAndChangeRoot(String), +} + +impl SecureModeError { + /// Whether this error is allowed with Secure Validator Mode enabled. + fn is_allowed_in_secure_mode(&self, security_status: &SecurityStatus) -> bool { + use SecureModeError::*; + match self { + // Landlock is present on relatively recent Linuxes. This is optional if the unshare + // capability is present, providing FS sandboxing a different way. + CannotEnableLandlock(_) => security_status.can_unshare_user_namespace_and_change_root, + // seccomp should be present on all modern Linuxes unless it's been disabled. + CannotEnableSeccomp(_) => false, + // Should always be present on modern Linuxes. If not, Landlock also provides FS + // sandboxing, so don't enforce this. + CannotUnshareUserNamespaceAndChangeRoot(_) => security_status.can_enable_landlock, + } + } +} + +impl fmt::Display for SecureModeError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use SecureModeError::*; + match self { + CannotEnableLandlock(err) => write!(f, "Cannot enable landlock, a Linux 5.13+ kernel security feature: {err}"), + CannotEnableSeccomp(err) => write!(f, "Cannot enable seccomp, a Linux-specific kernel security feature: {err}"), + CannotUnshareUserNamespaceAndChangeRoot(err) => write!(f, "Cannot unshare user namespace and change root, which are Linux-specific kernel security features: {err}"), + } + } +} + +/// Print an error if Secure Validator Mode and some mandatory errors occurred, warn otherwise. +fn print_secure_mode_error_or_warning(security_status: &FullSecurityStatus) { + // Trying to run securely and some mandatory errors occurred. + const SECURE_MODE_ERROR: &'static str = "🚨 Your system cannot securely run a validator. \ + \nRunning validation of malicious PVF code has a higher risk of compromising this machine."; + // Some errors occurred when running insecurely, or some optional errors occurred when running + // securely. + const SECURE_MODE_WARNING: &'static str = "🚨 Some security issues have been detected. \ + \nRunning validation of malicious PVF code has a higher risk of compromising this machine."; + // Message to be printed only when running securely and mandatory errors occurred. + const IGNORE_SECURE_MODE_TIP: &'static str = + "\nYou can ignore this error with the `--insecure-validator-i-know-what-i-do` \ + command line argument if you understand and accept the risks of running insecurely. \ + With this flag, security features are enabled on a best-effort basis, but not mandatory. \ + \nMore information: https://wiki.polkadot.network/docs/maintain-guides-secure-validator#secure-validator-mode"; + + let all_errs_allowed = security_status.all_errs_allowed(); + let errs_string = security_status.errs_string(); + + if all_errs_allowed { + gum::warn!( + target: LOG_TARGET, + "{}{}", + SECURE_MODE_WARNING, + errs_string, + ); + } else { + gum::error!( + target: LOG_TARGET, + "{}{}{}", + SECURE_MODE_ERROR, + errs_string, + IGNORE_SECURE_MODE_TIP + ); + } +} + +/// Check if we can change root to a new, sandboxed root and return an error if not. /// /// We do this check by spawning a new process and trying to sandbox it. To get as close as possible /// to running the check in a worker, we try it... in a worker. The expected return status is 0 on /// success and -1 on failure. -pub async fn check_can_unshare_user_namespace_and_change_root( +async fn check_can_unshare_user_namespace_and_change_root( #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] prepare_worker_program_path: &Path, -) -> bool { + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] cache_path: &Path, +) -> SecureModeResult { cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { + let cache_dir_tempdir = tempfile::Builder::new() + .prefix("check-can-unshare-") + .tempdir_in(cache_path) + .map_err(|err| SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("could not create a temporary directory in {:?}: {}", cache_path, err) + ))?; match tokio::process::Command::new(prepare_worker_program_path) .arg("--check-can-unshare-user-namespace-and-change-root") + .arg(cache_dir_tempdir.path()) .output() .await { - Ok(output) if output.status.success() => true, + Ok(output) if output.status.success() => Ok(()), Ok(output) => { let stderr = std::str::from_utf8(&output.stderr) .expect("child process writes a UTF-8 string to stderr; qed") .trim(); - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - // Docs say to always print status using `Display` implementation. - status = %output.status, - %stderr, - "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running with support for unsharing user namespaces for maximum security." - ); - false - }, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - "Could not start child process: {}", - err - ); - false + if stderr.is_empty() { + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + "not available".into() + )) + } else { + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("not available: {}", stderr) + )) + } }, + Err(err) => + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("could not start child process: {}", err) + )), } } else { - gum::warn!( - target: LOG_TARGET, - "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with support for unsharing user namespaces for maximum security." - ); - false + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + "only available on Linux".into() + )) } } } -/// Check if landlock is supported and emit a warning if not. +/// Check if landlock is supported and return an error if not. /// /// We do this check by spawning a new process and trying to sandbox it. To get as close as possible /// to running the check in a worker, we try it... in a worker. The expected return status is 0 on /// success and -1 on failure. -pub async fn check_landlock( +async fn check_landlock( #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] prepare_worker_program_path: &Path, -) -> bool { +) -> SecureModeResult { cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { match tokio::process::Command::new(prepare_worker_program_path) .arg("--check-can-enable-landlock") - .status() + .output() .await { - Ok(status) if status.success() => true, - Ok(status) => { + Ok(output) if output.status.success() => Ok(()), + Ok(output) => { let abi = polkadot_node_core_pvf_common::worker::security::landlock::LANDLOCK_ABI as u8; - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - ?status, - %abi, - "Cannot fully enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider upgrading the kernel version for maximum security." - ); - false - }, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - "Could not start child process: {}", - err - ); - false + let stderr = std::str::from_utf8(&output.stderr) + .expect("child process writes a UTF-8 string to stderr; qed") + .trim(); + if stderr.is_empty() { + Err(SecureModeError::CannotEnableLandlock( + format!("landlock ABI {} not available", abi) + )) + } else { + Err(SecureModeError::CannotEnableLandlock( + format!("not available: {}", stderr) + )) + } }, + Err(err) => + Err(SecureModeError::CannotEnableLandlock( + format!("could not start child process: {}", err) + )), } } else { - gum::warn!( - target: LOG_TARGET, - "Cannot enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with landlock support for maximum security." - ); - false + Err(SecureModeError::CannotEnableLandlock( + "only available on Linux".into() + )) } } } -/// Check if seccomp is supported and emit a warning if not. +/// Check if seccomp is supported and return an error if not. /// /// We do this check by spawning a new process and trying to sandbox it. To get as close as possible /// to running the check in a worker, we try it... in a worker. The expected return status is 0 on /// success and -1 on failure. -pub async fn check_seccomp( - #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] +async fn check_seccomp( + #[cfg_attr(not(all(target_os = "linux", target_arch = "x86_64")), allow(unused_variables))] prepare_worker_program_path: &Path, -) -> bool { +) -> SecureModeResult { cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { - match tokio::process::Command::new(prepare_worker_program_path) - .arg("--check-can-enable-seccomp") - .status() - .await - { - Ok(status) if status.success() => true, - Ok(status) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - ?status, - "Cannot fully enable seccomp, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider upgrading the kernel version for maximum security." - ); - false - }, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - "Could not start child process: {}", - err - ); - false - }, + cfg_if::cfg_if! { + if #[cfg(target_arch = "x86_64")] { + match tokio::process::Command::new(prepare_worker_program_path) + .arg("--check-can-enable-seccomp") + .output() + .await + { + Ok(output) if output.status.success() => Ok(()), + Ok(output) => { + let stderr = std::str::from_utf8(&output.stderr) + .expect("child process writes a UTF-8 string to stderr; qed") + .trim(); + if stderr.is_empty() { + Err(SecureModeError::CannotEnableSeccomp( + "not available".into() + )) + } else { + Err(SecureModeError::CannotEnableSeccomp( + format!("not available: {}", stderr) + )) + } + }, + Err(err) => + Err(SecureModeError::CannotEnableSeccomp( + format!("could not start child process: {}", err) + )), + } + } else { + Err(SecureModeError::CannotEnableSeccomp( + "only supported on CPUs from the x86_64 family (usually Intel or AMD)".into() + )) + } } } else { - gum::warn!( - target: LOG_TARGET, - "Cannot enable seccomp, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with seccomp support for maximum security." - ); - false - } - } -} - -const AUDIT_LOG_PATH: &'static str = "/var/log/audit/audit.log"; -const SYSLOG_PATH: &'static str = "/var/log/syslog"; - -/// System audit log. -pub struct AuditLogFile { - file: File, - path: &'static str, -} - -impl AuditLogFile { - /// Looks for an audit log file on the system and opens it, seeking to the end to skip any - /// events from before this was called. - /// - /// A bit of a verbose name, but it should clue future refactorers not to move calls closer to - /// where the `AuditLogFile` is used. - pub async fn try_open_and_seek_to_end() -> Option { - let mut path = AUDIT_LOG_PATH; - let mut file = match OpenOptions::new().read(true).open(AUDIT_LOG_PATH).await { - Ok(file) => Ok(file), - Err(_) => { - path = SYSLOG_PATH; - OpenOptions::new().read(true).open(SYSLOG_PATH).await - }, - } - .ok()?; - - let _pos = file.seek(SeekFrom::End(0)).await; - - Some(Self { file, path }) - } - - async fn read_new_since_open(mut self) -> String { - let mut buf = String::new(); - let _len = self.file.read_to_string(&mut buf).await; - buf - } -} - -/// Check if a seccomp violation occurred for the given worker. As the syslog may be in a different -/// location, or seccomp auditing may be disabled, this function provides a best-effort attempt -/// only. -/// -/// The `audit_log_file` must have been obtained before the job started. It only allows reading -/// entries that were written since it was obtained, so that we do not consider events from previous -/// processes with the same pid. This can still be racy, but it's unlikely and fine for a -/// best-effort attempt. -pub async fn check_seccomp_violations_for_worker( - audit_log_file: Option, - worker_pid: u32, -) -> Vec { - let audit_event_pid_field = format!("pid={worker_pid}"); - - let audit_log_file = match audit_log_file { - Some(file) => { - gum::debug!( - target: LOG_TARGET, - %worker_pid, - audit_log_path = ?file.path, - "checking audit log for seccomp violations", - ); - file - }, - None => { - gum::warn!( - target: LOG_TARGET, - %worker_pid, - "could not open either {AUDIT_LOG_PATH} or {SYSLOG_PATH} for reading audit logs" - ); - return vec![] - }, - }; - let events = audit_log_file.read_new_since_open().await; - - let mut violations = vec![]; - for event in events.lines() { - if let Some(syscall) = parse_audit_log_for_seccomp_event(event, &audit_event_pid_field) { - violations.push(syscall); - } - } - - violations -} - -fn parse_audit_log_for_seccomp_event(event: &str, audit_event_pid_field: &str) -> Option { - const SECCOMP_AUDIT_EVENT_TYPE: &'static str = "type=1326"; - - // Do a series of simple .contains instead of a regex, because I'm not sure if the fields are - // guaranteed to always be in the same order. - if !event.contains(SECCOMP_AUDIT_EVENT_TYPE) || !event.contains(&audit_event_pid_field) { - return None - } - - // Get the syscall. Let's avoid a dependency on regex just for this. - for field in event.split(" ") { - if let Some(syscall) = field.strip_prefix("syscall=") { - return syscall.parse::().ok() + cfg_if::cfg_if! { + if #[cfg(target_arch = "x86_64")] { + Err(SecureModeError::CannotEnableSeccomp( + "only supported on Linux".into() + )) + } else { + Err(SecureModeError::CannotEnableSeccomp( + "only supported on Linux and on CPUs from the x86_64 family (usually Intel or AMD).".into() + )) + } + } } } - - None } #[cfg(test)] @@ -274,39 +353,47 @@ mod tests { use super::*; #[test] - fn test_parse_audit_log_for_seccomp_event() { - let audit_event_pid_field = "pid=2559058"; + fn test_secure_mode_error_optionality() { + let err = SecureModeError::CannotEnableLandlock(String::new()); + assert!(err.is_allowed_in_secure_mode(&SecurityStatus { + secure_validator_mode: true, + can_enable_landlock: false, + can_enable_seccomp: false, + can_unshare_user_namespace_and_change_root: true + })); + assert!(!err.is_allowed_in_secure_mode(&SecurityStatus { + secure_validator_mode: true, + can_enable_landlock: false, + can_enable_seccomp: true, + can_unshare_user_namespace_and_change_root: false + })); - assert_eq!( - parse_audit_log_for_seccomp_event( - r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1326 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559058 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e syscall=53 compat=0 ip=0x7f7542c80d5e code=0x80000000"#, - audit_event_pid_field - ), - Some(53) - ); - // pid is wrong - assert_eq!( - parse_audit_log_for_seccomp_event( - r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1326 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559057 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e syscall=53 compat=0 ip=0x7f7542c80d5e code=0x80000000"#, - audit_event_pid_field - ), - None - ); - // type is wrong - assert_eq!( - parse_audit_log_for_seccomp_event( - r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1327 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559057 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e syscall=53 compat=0 ip=0x7f7542c80d5e code=0x80000000"#, - audit_event_pid_field - ), - None - ); - // no syscall field - assert_eq!( - parse_audit_log_for_seccomp_event( - r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1327 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559057 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e compat=0 ip=0x7f7542c80d5e code=0x80000000"#, - audit_event_pid_field - ), - None - ); + let err = SecureModeError::CannotEnableSeccomp(String::new()); + assert!(!err.is_allowed_in_secure_mode(&SecurityStatus { + secure_validator_mode: true, + can_enable_landlock: false, + can_enable_seccomp: false, + can_unshare_user_namespace_and_change_root: true + })); + assert!(!err.is_allowed_in_secure_mode(&SecurityStatus { + secure_validator_mode: true, + can_enable_landlock: false, + can_enable_seccomp: true, + can_unshare_user_namespace_and_change_root: false + })); + + let err = SecureModeError::CannotUnshareUserNamespaceAndChangeRoot(String::new()); + assert!(err.is_allowed_in_secure_mode(&SecurityStatus { + secure_validator_mode: true, + can_enable_landlock: true, + can_enable_seccomp: false, + can_unshare_user_namespace_and_change_root: false + })); + assert!(!err.is_allowed_in_secure_mode(&SecurityStatus { + secure_validator_mode: true, + can_enable_landlock: false, + can_enable_seccomp: true, + can_unshare_user_namespace_and_change_root: false + })); } } diff --git a/polkadot/node/core/pvf/src/testing.rs b/polkadot/node/core/pvf/src/testing.rs index 4c038896f7f9e29aaed7f7325a445d144ba20457..60b0b4b8d3d0c49199800194570d8ef2783fc67f 100644 --- a/polkadot/node/core/pvf/src/testing.rs +++ b/polkadot/node/core/pvf/src/testing.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Various things for testing other crates. +//! Various utilities for testing. pub use crate::{ host::{EXECUTE_BINARY_NAME, PREPARE_BINARY_NAME}, - worker_intf::{spawn_with_program_path, SpawnErr}, + worker_interface::{spawn_with_program_path, SpawnErr}, }; use crate::get_worker_version; @@ -36,7 +36,7 @@ pub fn validate_candidate( code: &[u8], params: &[u8], ) -> Result, Box> { - use polkadot_node_core_pvf_common::executor_intf::{prepare, prevalidate}; + use polkadot_node_core_pvf_common::executor_interface::{prepare, prevalidate}; use polkadot_node_core_pvf_execute_worker::execute_artifact; let code = sp_maybe_compressed_blob::decompress(code, 10 * 1024 * 1024) @@ -59,27 +59,34 @@ pub fn validate_candidate( /// /// NOTE: This should only be called in dev code (tests, benchmarks) as it relies on the relative /// paths of the built workers. -pub fn get_and_check_worker_paths() -> (PathBuf, PathBuf) { +pub fn build_workers_and_get_paths() -> (PathBuf, PathBuf) { // Only needs to be called once for the current process. static WORKER_PATHS: OnceLock> = OnceLock::new(); fn build_workers() { - let build_args = vec![ + let mut build_args = vec![ "build", "--package=polkadot", "--bin=polkadot-prepare-worker", "--bin=polkadot-execute-worker", ]; - let exit_status = std::process::Command::new("cargo") + + if cfg!(build_type = "release") { + build_args.push("--release"); + } + + let mut cargo = std::process::Command::new("cargo"); + let cmd = cargo // wasm runtime not needed .env("SKIP_WASM_BUILD", "1") .args(build_args) - .stdout(std::process::Stdio::piped()) - .status() - .expect("Failed to run the build program"); + .stdout(std::process::Stdio::piped()); + + println!("INFO: calling `{cmd:?}`"); + let exit_status = cmd.status().expect("Failed to run the build program"); if !exit_status.success() { - eprintln!("Failed to build workers: {}", exit_status.code().unwrap()); + eprintln!("ERROR: Failed to build workers: {}", exit_status.code().unwrap()); std::process::exit(1); } } @@ -95,19 +102,19 @@ pub fn get_and_check_worker_paths() -> (PathBuf, PathBuf) { // explain why a build happens if !prepare_worker_path.is_executable() { - eprintln!("Prepare worker does not exist or is not executable. Workers directory: {:?}", workers_path); + println!("WARN: Prepare worker does not exist or is not executable. Workers directory: {:?}", workers_path); } if !execute_worker_path.is_executable() { - eprintln!("Execute worker does not exist or is not executable. Workers directory: {:?}", workers_path); + println!("WARN: Execute worker does not exist or is not executable. Workers directory: {:?}", workers_path); } if let Ok(ver) = get_worker_version(&prepare_worker_path) { if ver != NODE_VERSION { - eprintln!("Prepare worker version {ver} does not match node version {NODE_VERSION}; worker path: {prepare_worker_path:?}"); + println!("WARN: Prepare worker version {ver} does not match node version {NODE_VERSION}; worker path: {prepare_worker_path:?}"); } } if let Ok(ver) = get_worker_version(&execute_worker_path) { if ver != NODE_VERSION { - eprintln!("Execute worker version {ver} does not match node version {NODE_VERSION}; worker path: {execute_worker_path:?}"); + println!("WARN: Execute worker version {ver} does not match node version {NODE_VERSION}; worker path: {execute_worker_path:?}"); } } diff --git a/polkadot/node/core/pvf/src/worker_intf.rs b/polkadot/node/core/pvf/src/worker_interface.rs similarity index 71% rename from polkadot/node/core/pvf/src/worker_intf.rs rename to polkadot/node/core/pvf/src/worker_interface.rs index 8f9a7de354b89dd50262474aeef9f5184a83b50b..c68ff92b06eb35216a2d9f661d1b60d09847042c 100644 --- a/polkadot/node/core/pvf/src/worker_intf.rs +++ b/polkadot/node/core/pvf/src/worker_interface.rs @@ -19,8 +19,9 @@ use crate::LOG_TARGET; use futures::FutureExt as _; use futures_timer::Delay; +use parity_scale_codec::Encode; use pin_project::pin_project; -use polkadot_node_core_pvf_common::SecurityStatus; +use polkadot_node_core_pvf_common::{SecurityStatus, WorkerHandshake}; use rand::Rng; use std::{ fmt, mem, @@ -68,89 +69,95 @@ pub async fn spawn_with_program_path( let program_path = program_path.into(); let worker_dir = WorkerDir::new(debug_id, cache_path).await?; let extra_args: Vec = extra_args.iter().map(|arg| arg.to_string()).collect(); + // Hack the borrow-checker. + let program_path_clone = program_path.clone(); + let worker_dir_clone = worker_dir.path().to_owned(); + let extra_args_clone = extra_args.clone(); with_transient_socket_path(debug_id, |socket_path| { let socket_path = socket_path.to_owned(); async move { - let listener = UnixListener::bind(&socket_path).map_err(|err| { - gum::warn!( - target: LOG_TARGET, - %debug_id, - ?program_path, - ?extra_args, - ?worker_dir, - ?socket_path, - "cannot bind unix socket: {:?}", - err, - ); - SpawnErr::Bind - })?; - - let handle = WorkerHandle::spawn( - &program_path, - &extra_args, - &socket_path, - &worker_dir.path, - security_status, - ) - .map_err(|err| { - gum::warn!( - target: LOG_TARGET, - %debug_id, - ?program_path, - ?extra_args, - ?worker_dir.path, - ?socket_path, - "cannot spawn a worker: {:?}", - err, - ); - SpawnErr::ProcessSpawn - })?; + let listener = match UnixListener::bind(&socket_path) { + Ok(ok) => ok, + Err(err) => return Err(SpawnErr::Bind { socket_path, err: err.to_string() }), + }; + + let handle = + WorkerHandle::spawn(&program_path, &extra_args, &socket_path, &worker_dir.path()) + .map_err(|err| SpawnErr::ProcessSpawn { program_path, err: err.to_string() })?; - let worker_dir_path = worker_dir.path.clone(); futures::select! { accept_result = listener.accept().fuse() => { - let (stream, _) = accept_result.map_err(|err| { - gum::warn!( - target: LOG_TARGET, - %debug_id, - ?program_path, - ?extra_args, - ?worker_dir_path, - ?socket_path, - "cannot accept a worker: {:?}", - err, - ); - SpawnErr::Accept - })?; + let (mut stream, _) = accept_result + .map_err(|err| SpawnErr::Accept { socket_path, err: err.to_string() })?; + send_worker_handshake(&mut stream, WorkerHandshake { security_status }) + .await + .map_err(|err| SpawnErr::Handshake { err: err.to_string() })?; Ok((IdleWorker { stream, pid: handle.id(), worker_dir }, handle)) } - _ = Delay::new(spawn_timeout).fuse() => { - gum::warn!( - target: LOG_TARGET, - %debug_id, - ?program_path, - ?extra_args, - ?worker_dir_path, - ?socket_path, - ?spawn_timeout, - "spawning and connecting to socket timed out", - ); - Err(SpawnErr::AcceptTimeout) - } + _ = Delay::new(spawn_timeout).fuse() => Err(SpawnErr::AcceptTimeout{spawn_timeout}), } } }) .await + .map_err(|err| { + gum::warn!( + target: LOG_TARGET, + %debug_id, + ?program_path_clone, + ?extra_args_clone, + ?worker_dir_clone, + "error spawning worker: {}", + err, + ); + err + }) } +/// A temporary, random, free path that is necessary only to establish socket communications. If a +/// directory exists at the path at the end of this function, it is removed then. async fn with_transient_socket_path(debug_id: &'static str, f: F) -> Result where F: FnOnce(&Path) -> Fut, Fut: futures::Future> + 'static, { - let socket_path = tmppath(&format!("pvf-host-{}", debug_id)) + /// Returns a path under [`std::env::temp_dir`]. The path name will start with the given prefix. + /// + /// There is only a certain number of retries. If exceeded this function will give up and return + /// an error. + pub async fn tmppath(prefix: &str) -> io::Result { + fn make_tmppath(prefix: &str, dir: &Path) -> PathBuf { + use rand::distributions::Alphanumeric; + + const DESCRIMINATOR_LEN: usize = 10; + + let mut buf = Vec::with_capacity(prefix.len() + DESCRIMINATOR_LEN); + buf.extend(prefix.as_bytes()); + buf.extend(rand::thread_rng().sample_iter(&Alphanumeric).take(DESCRIMINATOR_LEN)); + + let s = std::str::from_utf8(&buf) + .expect("the string is collected from a valid utf-8 sequence; qed"); + + let mut path = dir.to_owned(); + path.push(s); + path + } + + const NUM_RETRIES: usize = 50; + + let dir = std::env::temp_dir(); + for _ in 0..NUM_RETRIES { + let tmp_path = make_tmppath(prefix, &dir); + if !tmp_path.exists() { + return Ok(tmp_path) + } + } + + Err(io::Error::new(io::ErrorKind::Other, "failed to create a temporary path")) + } + + let socket_path = tmppath(&format!("pvf-host-{}-", debug_id)) .await .map_err(|_| SpawnErr::TmpPath)?; let result = f(&socket_path).await; @@ -162,46 +169,6 @@ where result } -/// Returns a path under the given `dir`. The path name will start with the given prefix. -/// -/// There is only a certain number of retries. If exceeded this function will give up and return an -/// error. -pub async fn tmppath_in(prefix: &str, dir: &Path) -> io::Result { - fn make_tmppath(prefix: &str, dir: &Path) -> PathBuf { - use rand::distributions::Alphanumeric; - - const DESCRIMINATOR_LEN: usize = 10; - - let mut buf = Vec::with_capacity(prefix.len() + DESCRIMINATOR_LEN); - buf.extend(prefix.as_bytes()); - buf.extend(rand::thread_rng().sample_iter(&Alphanumeric).take(DESCRIMINATOR_LEN)); - - let s = std::str::from_utf8(&buf) - .expect("the string is collected from a valid utf-8 sequence; qed"); - - let mut path = dir.to_owned(); - path.push(s); - path - } - - const NUM_RETRIES: usize = 50; - - for _ in 0..NUM_RETRIES { - let tmp_path = make_tmppath(prefix, dir); - if !tmp_path.exists() { - return Ok(tmp_path) - } - } - - Err(io::Error::new(io::ErrorKind::Other, "failed to create a temporary path")) -} - -/// The same as [`tmppath_in`], but uses [`std::env::temp_dir`] as the directory. -pub async fn tmppath(prefix: &str) -> io::Result { - let temp_dir = PathBuf::from(std::env::temp_dir()); - tmppath_in(prefix, &temp_dir).await -} - /// A struct that represents an idle worker. /// /// This struct is supposed to be used as a token that is passed by move into a subroutine that @@ -219,23 +186,26 @@ pub struct IdleWorker { pub worker_dir: WorkerDir, } +/// This is publicly exposed only for integration tests. +/// /// An error happened during spawning a worker process. -#[derive(Clone, Debug)] +#[derive(thiserror::Error, Clone, Debug)] +#[doc(hidden)] pub enum SpawnErr { - /// Cannot obtain a temporary path location. + #[error("cannot obtain a temporary path location")] TmpPath, - /// An FS error occurred. - Fs(String), - /// Cannot bind the socket to the given path. - Bind, - /// An error happened during accepting a connection to the socket. - Accept, - /// An error happened during spawning the process. - ProcessSpawn, - /// The deadline allotted for the worker spawning and connecting to the socket has elapsed. - AcceptTimeout, - /// Failed to send handshake after successful spawning was signaled - Handshake, + #[error("cannot bind the socket to the given path {socket_path:?}: {err}")] + Bind { socket_path: PathBuf, err: String }, + #[error( + "an error happened during accepting a connection to the socket {socket_path:?}: {err}" + )] + Accept { socket_path: PathBuf, err: String }, + #[error("an error happened during spawning the process at path {program_path:?}: {err}")] + ProcessSpawn { program_path: PathBuf, err: String }, + #[error("the deadline {}ms allotted for the worker spawning and connecting to the socket has elapsed", .spawn_timeout.as_millis())] + AcceptTimeout { spawn_timeout: Duration }, + #[error("failed to send handshake after successful spawning was signaled: {err}")] + Handshake { err: String }, } /// This is a representation of a potentially running worker. Drop it and the process will be @@ -263,22 +233,7 @@ impl WorkerHandle { extra_args: &[String], socket_path: impl AsRef, worker_dir_path: impl AsRef, - security_status: SecurityStatus, ) -> io::Result { - let security_args = { - let mut args = vec![]; - if security_status.can_enable_landlock { - args.push("--can-enable-landlock".to_string()); - } - if security_status.can_enable_seccomp { - args.push("--can-enable-seccomp".to_string()); - } - if security_status.can_unshare_user_namespace_and_change_root { - args.push("--can-unshare-user-namespace-and-change-root".to_string()); - } - args - }; - // Clear all env vars from the spawned process. let mut command = process::Command::new(program.as_ref()); command.env_clear(); @@ -293,7 +248,6 @@ impl WorkerHandle { .arg(socket_path.as_ref().as_os_str()) .arg("--worker-dir-path") .arg(worker_dir_path.as_ref().as_os_str()) - .args(&security_args) .stdout(std::process::Stdio::piped()) .kill_on_drop(true) .spawn()?; @@ -393,6 +347,14 @@ pub async fn framed_recv(r: &mut (impl AsyncRead + Unpin)) -> io::Result Ok(buf) } +/// Sends a handshake with information for the worker. +async fn send_worker_handshake( + stream: &mut UnixStream, + handshake: WorkerHandshake, +) -> io::Result<()> { + framed_send(stream, &handshake.encode()).await +} + /// A temporary worker dir that contains only files needed by the worker. The worker will change its /// root (the `/` directory) to this directory; it should have access to no other paths on its /// filesystem. @@ -419,33 +381,27 @@ pub async fn framed_recv(r: &mut (impl AsyncRead + Unpin)) -> io::Result /// ``` #[derive(Debug)] pub struct WorkerDir { - pub path: PathBuf, + tempdir: tempfile::TempDir, } impl WorkerDir { /// Creates a new, empty worker dir with a random name in the given cache dir. pub async fn new(debug_id: &'static str, cache_dir: &Path) -> Result { let prefix = format!("worker-dir-{}-", debug_id); - let path = tmppath_in(&prefix, cache_dir).await.map_err(|_| SpawnErr::TmpPath)?; - tokio::fs::create_dir(&path) - .await - .map_err(|err| SpawnErr::Fs(err.to_string()))?; - Ok(Self { path }) + let tempdir = tempfile::Builder::new() + .prefix(&prefix) + .tempdir_in(cache_dir) + .map_err(|_| SpawnErr::TmpPath)?; + Ok(Self { tempdir }) } -} -// Try to clean up the temporary worker dir at the end of the worker's lifetime. It should be wiped -// on startup, but we make a best effort not to leave it around. -impl Drop for WorkerDir { - fn drop(&mut self) { - let _ = std::fs::remove_dir_all(&self.path); + pub fn path(&self) -> &Path { + self.tempdir.path() } } // Not async since Rust has trouble with async recursion. There should be few files here anyway. // -// TODO: A lingering malicious job can still access future files in this dir. See -// for how to fully secure this. /// Clear the temporary worker dir without deleting it. Not deleting is important because the worker /// has mounted its own separate filesystem here. /// @@ -453,7 +409,7 @@ impl Drop for WorkerDir { /// artifacts from previous jobs. pub fn clear_worker_dir_path(worker_dir_path: &Path) -> io::Result<()> { fn remove_dir_contents(path: &Path) -> io::Result<()> { - for entry in std::fs::read_dir(&path)? { + for entry in std::fs::read_dir(path)? { let entry = entry?; let path = entry.path(); diff --git a/polkadot/node/core/pvf/tests/README.md b/polkadot/node/core/pvf/tests/README.md deleted file mode 100644 index 27385e190250df9baa0d14cea7afc9e00f973a98..0000000000000000000000000000000000000000 --- a/polkadot/node/core/pvf/tests/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# PVF host integration tests - -## Testing - -Before running these tests, make sure the worker binaries are built first. This can be done with: - -```sh -cargo build --bin polkadot-execute-worker --bin polkadot-prepare-worker -``` diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index f4fd7f802f5e9dc4e19951fe75a266bb18bd4bc6..09f975b706d24d2eb3f75f733f73e37afe6ec0cc 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -18,26 +18,28 @@ use assert_matches::assert_matches; use parity_scale_codec::Encode as _; +#[cfg(all(feature = "ci-only-tests", target_os = "linux"))] +use polkadot_node_core_pvf::SecurityStatus; use polkadot_node_core_pvf::{ - start, testing::get_and_check_worker_paths, Config, InvalidCandidate, Metrics, PrepareError, - PrepareJobKind, PrepareStats, PvfPrepData, ValidationError, ValidationHost, - JOB_TIMEOUT_WALL_CLOCK_FACTOR, + start, testing::build_workers_and_get_paths, Config, InvalidCandidate, Metrics, PrepareError, + PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; use polkadot_primitives::{ExecutorParam, ExecutorParams}; -#[cfg(target_os = "linux")] -use rusty_fork::rusty_fork_test; use std::time::Duration; use tokio::sync::Mutex; mod adder; +#[cfg(target_os = "linux")] +mod process; mod worker_common; -const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); -const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(3); +const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(6); +const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(6); struct TestHost { + // Keep a reference to the tempdir as it gets deleted on drop. cache_dir: tempfile::TempDir, host: Mutex, } @@ -51,17 +53,18 @@ impl TestHost { where F: FnOnce(&mut Config), { - let (prepare_worker_path, execute_worker_path) = get_and_check_worker_paths(); + let (prepare_worker_path, execute_worker_path) = build_workers_and_get_paths(); let cache_dir = tempfile::tempdir().unwrap(); let mut config = Config::new( cache_dir.path().to_owned(), None, + false, prepare_worker_path, execute_worker_path, ); f(&mut config); - let (host, task) = start(config, Metrics::default()).await; + let (host, task) = start(config, Metrics::default()).await.unwrap(); let _ = tokio::task::spawn(task); Self { cache_dir, host: Mutex::new(host) } } @@ -70,7 +73,7 @@ impl TestHost { &self, code: &[u8], executor_params: ExecutorParams, - ) -> Result { + ) -> Result<(), PrepareError> { let (result_tx, result_rx) = futures::channel::oneshot::channel(); let code = sp_maybe_compressed_blob::decompress(code, 16 * 1024 * 1024) @@ -123,10 +126,34 @@ impl TestHost { .unwrap(); result_rx.await.unwrap() } + + #[cfg(all(feature = "ci-only-tests", target_os = "linux"))] + async fn security_status(&self) -> SecurityStatus { + self.host.lock().await.security_status.clone() + } +} + +#[tokio::test] +async fn prepare_job_terminates_on_timeout() { + let host = TestHost::new().await; + + let start = std::time::Instant::now(); + let result = host + .precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()) + .await; + + match result { + Err(PrepareError::TimedOut) => {}, + r => panic!("{:?}", r), + } + + let duration = std::time::Instant::now().duration_since(start); + assert!(duration >= TEST_PREPARATION_TIMEOUT); + assert!(duration < TEST_PREPARATION_TIMEOUT * JOB_TIMEOUT_WALL_CLOCK_FACTOR); } #[tokio::test] -async fn terminates_on_timeout() { +async fn execute_job_terminates_on_timeout() { let host = TestHost::new().await; let start = std::time::Instant::now(); @@ -144,7 +171,7 @@ async fn terminates_on_timeout() { .await; match result { - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)) => {}, + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) => {}, r => panic!("{:?}", r), } @@ -153,108 +180,6 @@ async fn terminates_on_timeout() { assert!(duration < TEST_EXECUTION_TIMEOUT * JOB_TIMEOUT_WALL_CLOCK_FACTOR); } -#[cfg(target_os = "linux")] -fn kill_by_sid_and_name(sid: i32, exe_name: &'static str) { - use procfs::process; - - let all_processes: Vec = process::all_processes() - .expect("Can't read /proc") - .filter_map(|p| match p { - Ok(p) => Some(p), // happy path - Err(e) => match e { - // process vanished during iteration, ignore it - procfs::ProcError::NotFound(_) => None, - x => { - panic!("some unknown error: {}", x); - }, - }, - }) - .collect(); - - for process in all_processes { - if process.stat().unwrap().session == sid && - process.exe().unwrap().to_str().unwrap().contains(exe_name) - { - assert_eq!(unsafe { libc::kill(process.pid(), 9) }, 0); - } - } -} - -// Run these tests in their own processes with rusty-fork. They work by each creating a new session, -// then killing the worker process that matches the session ID and expected worker name. -#[cfg(target_os = "linux")] -rusty_fork_test! { - // What happens when the prepare worker dies in the middle of a job? - #[test] - fn prepare_worker_killed_during_job() { - const PROCESS_NAME: &'static str = "polkadot-prepare-worker"; - - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - let host = TestHost::new().await; - - // Create a new session and get the session ID. - let sid = unsafe { libc::setsid() }; - assert!(sid > 0); - - let (result, _) = futures::join!( - // Choose a job that would normally take the entire timeout. - host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), - // Run a future that kills the job in the middle of the timeout. - async { - tokio::time::sleep(TEST_PREPARATION_TIMEOUT / 2).await; - kill_by_sid_and_name(sid, PROCESS_NAME); - } - ); - - assert_matches!(result, Err(PrepareError::IoErr(_))); - }) - } - - // What happens when the execute worker dies in the middle of a job? - #[test] - fn execute_worker_killed_during_job() { - const PROCESS_NAME: &'static str = "polkadot-execute-worker"; - - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - let host = TestHost::new().await; - - // Create a new session and get the session ID. - let sid = unsafe { libc::setsid() }; - assert!(sid > 0); - - // Prepare the artifact ahead of time. - let binary = halt::wasm_binary_unwrap(); - host.precheck_pvf(binary, Default::default()).await.unwrap(); - - let (result, _) = futures::join!( - // Choose an job that would normally take the entire timeout. - host.validate_candidate( - binary, - ValidationParams { - block_data: BlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, - Default::default(), - ), - // Run a future that kills the job in the middle of the timeout. - async { - tokio::time::sleep(TEST_EXECUTION_TIMEOUT / 2).await; - kill_by_sid_and_name(sid, PROCESS_NAME); - } - ); - - assert_matches!( - result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) - ); - }) - } -} - #[cfg(feature = "ci-only-tests")] #[tokio::test] async fn ensure_parallel_execution() { @@ -286,8 +211,8 @@ async fn ensure_parallel_execution() { assert_matches!( (res1, res2), ( - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)), - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)) + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) ) ); @@ -428,7 +353,7 @@ async fn deleting_prepared_artifact_does_not_dispute() { .await; match result { - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)) => {}, + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) => {}, r => panic!("{:?}", r), } } @@ -486,3 +411,55 @@ async fn prepare_can_run_serially() { // Prepare a different wasm blob to prevent skipping work. let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); } + +// CI machines should be able to enable all the security features. +#[cfg(all(feature = "ci-only-tests", target_os = "linux"))] +#[tokio::test] +async fn all_security_features_work() { + // Landlock is only available starting Linux 5.13, and we may be testing on an old kernel. + let can_enable_landlock = { + let sysinfo = sc_sysinfo::gather_sysinfo(); + // The version will look something like "5.15.0-87-generic". + let version = sysinfo.linux_kernel.unwrap(); + let version_split: Vec<&str> = version.split(".").collect(); + let major: u32 = version_split[0].parse().unwrap(); + let minor: u32 = version_split[1].parse().unwrap(); + if major >= 6 { + true + } else if major == 5 { + minor >= 13 + } else { + false + } + }; + + let host = TestHost::new().await; + + assert_eq!( + host.security_status().await, + SecurityStatus { + secure_validator_mode: false, + can_enable_landlock, + can_enable_seccomp: true, + can_unshare_user_namespace_and_change_root: true, + } + ); +} + +// Regression test to make sure the unshare-pivot-root capability does not depend on the PVF +// artifacts cache existing. +#[cfg(all(feature = "ci-only-tests", target_os = "linux"))] +#[tokio::test] +async fn nonexistant_cache_dir() { + let host = TestHost::new_with_config(|cfg| { + cfg.cache_path = cfg.cache_path.join("nonexistant_cache_dir"); + }) + .await; + + assert!(host.security_status().await.can_unshare_user_namespace_and_change_root); + + let _stats = host + .precheck_pvf(::adder::wasm_binary_unwrap(), Default::default()) + .await + .unwrap(); +} diff --git a/polkadot/node/core/pvf/tests/it/process.rs b/polkadot/node/core/pvf/tests/it/process.rs new file mode 100644 index 0000000000000000000000000000000000000000..b742acb15d028caf64f2e4c4d147fa31307e6e8d --- /dev/null +++ b/polkadot/node/core/pvf/tests/it/process.rs @@ -0,0 +1,385 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test unexpected behaviors of the spawned processes. We test both worker processes (directly +//! spawned by the host) and job processes (spawned by the workers to securely perform PVF jobs). + +use super::TestHost; +use assert_matches::assert_matches; +use polkadot_node_core_pvf::{ + InvalidCandidate, PossiblyInvalidError, PrepareError, ValidationError, +}; +use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams}; +use procfs::process; +use rusty_fork::rusty_fork_test; +use std::time::Duration; + +const PREPARE_PROCESS_NAME: &'static str = "polkadot-prepare-worker"; +const EXECUTE_PROCESS_NAME: &'static str = "polkadot-execute-worker"; + +const SIGNAL_KILL: i32 = 9; +const SIGNAL_STOP: i32 = 19; + +fn send_signal_by_sid_and_name( + sid: i32, + exe_name: &'static str, + is_direct_child: bool, + signal: i32, +) { + let process = find_process_by_sid_and_name(sid, exe_name, is_direct_child); + assert_eq!(unsafe { libc::kill(process.pid(), signal) }, 0); +} +fn get_num_threads_by_sid_and_name(sid: i32, exe_name: &'static str, is_direct_child: bool) -> i64 { + let process = find_process_by_sid_and_name(sid, exe_name, is_direct_child); + process.stat().unwrap().num_threads +} + +fn find_process_by_sid_and_name( + sid: i32, + exe_name: &'static str, + is_direct_child: bool, +) -> process::Process { + let all_processes: Vec = process::all_processes() + .expect("Can't read /proc") + .filter_map(|p| match p { + Ok(p) => Some(p), // happy path + Err(e) => match e { + // process vanished during iteration, ignore it + procfs::ProcError::NotFound(_) => None, + x => { + panic!("some unknown error: {}", x); + }, + }, + }) + .collect(); + + let mut found = None; + for process in all_processes { + let stat = process.stat().unwrap(); + + if stat.session != sid || !process.exe().unwrap().to_str().unwrap().contains(exe_name) { + continue + } + // The workers are direct children of the current process, the worker job processes are not + // (they are children of the workers). + let process_is_direct_child = stat.ppid as u32 == std::process::id(); + if is_direct_child != process_is_direct_child { + continue + } + + if found.is_some() { + panic!("Found more than one process") + } + found = Some(process); + } + found.expect("Should have found the expected process") +} + +// Run these tests in their own processes with rusty-fork. They work by each creating a new session, +// then doing something with the child process that matches the session ID and expected process +// name. +rusty_fork_test! { + // What happens when the prepare worker (not the job) times out? + #[test] + fn prepare_worker_timeout() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Send a stop signal to pause the worker. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true, SIGNAL_STOP); + } + ); + + assert_matches!(result, Err(PrepareError::TimedOut)); + }) + } + + // What happens when the execute worker (not the job) times out? + #[test] + fn execute_worker_timeout() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let (result, _) = futures::join!( + // Choose an job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Send a stop signal to pause the worker. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true, SIGNAL_STOP); + } + ); + + assert_matches!( + result, + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) + ); + }) + } + + // What happens when the prepare worker dies in the middle of a job? + #[test] + fn prepare_worker_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + + assert_matches!(result, Err(PrepareError::IoErr(_))); + }) + } + + // What happens when the execute worker dies in the middle of a job? + #[test] + fn execute_worker_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let (result, _) = futures::join!( + // Choose an job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + + assert_matches!( + result, + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) + ); + }) + } + + // What happens when the forked prepare job dies in the middle of its job? + #[test] + fn forked_prepare_job_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, false, SIGNAL_KILL); + } + ); + + // Note that we get a more specific error if the job died than if the whole worker died. + assert_matches!( + result, + Err(PrepareError::JobDied{ err, job_pid: _ }) if err == "received signal: SIGKILL" + ); + }) + } + + // What happens when the forked execute job dies in the middle of its job? + #[test] + fn forked_execute_job_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, false, SIGNAL_KILL); + } + ); + + // Note that we get a more specific error if the job died than if the whole worker died. + assert_matches!( + result, + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))) + if err == "received signal: SIGKILL" + ); + }) + } + + // Ensure that the spawned prepare worker is single-threaded. + // + // See `run_worker` for why we need this invariant. + #[test] + fn ensure_prepare_processes_have_correct_num_threads() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let _ = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + assert_eq!( + get_num_threads_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true), + 1 + ); + // Child job should have three threads: main thread, execute thread, CPU time + // monitor, and memory tracking. + assert_eq!( + get_num_threads_by_sid_and_name(sid, PREPARE_PROCESS_NAME, false), + 4 + ); + + // End the test. + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + }) + } + + // Ensure that the spawned execute worker is single-threaded. + // + // See `run_worker` for why we need this invariant. + #[test] + fn ensure_execute_processes_have_correct_num_threads() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let _ = futures::join!( + // Choose a job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Run a future that tests the thread count while the worker is running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + assert_eq!( + get_num_threads_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true), + 1 + ); + // Child job should have three threads: main thread, execute thread, and CPU + // time monitor. + assert_eq!( + get_num_threads_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, false), + 3 + ); + + // End the test. + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + }) + } +} diff --git a/polkadot/node/core/pvf/tests/it/worker_common.rs b/polkadot/node/core/pvf/tests/it/worker_common.rs index df64980dc8064d80486db002b073992593bbce9c..2c24a15b682df651e1c033f686f0549488b32ffa 100644 --- a/polkadot/node/core/pvf/tests/it/worker_common.rs +++ b/polkadot/node/core/pvf/tests/it/worker_common.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use polkadot_node_core_pvf::{ - testing::{get_and_check_worker_paths, spawn_with_program_path, SpawnErr}, + testing::{build_workers_and_get_paths, spawn_with_program_path, SpawnErr}, SecurityStatus, }; use std::{env, time::Duration}; @@ -23,10 +23,11 @@ use std::{env, time::Duration}; // Test spawning a program that immediately exits with a failure code. #[tokio::test] async fn spawn_immediate_exit() { - let (prepare_worker_path, _) = get_and_check_worker_paths(); + let (prepare_worker_path, _) = build_workers_and_get_paths(); // There's no explicit `exit` subcommand in the worker; it will panic on an unknown // subcommand anyway + let spawn_timeout = Duration::from_secs(2); let result = spawn_with_program_path( "integration-test", prepare_worker_path, @@ -36,28 +37,33 @@ async fn spawn_immediate_exit() { SecurityStatus::default(), ) .await; - assert!(matches!(result, Err(SpawnErr::AcceptTimeout))); + assert!( + matches!(result, Err(SpawnErr::AcceptTimeout { spawn_timeout: s }) if s == spawn_timeout) + ); } #[tokio::test] async fn spawn_timeout() { - let (_, execute_worker_path) = get_and_check_worker_paths(); + let (_, execute_worker_path) = build_workers_and_get_paths(); + let spawn_timeout = Duration::from_secs(2); let result = spawn_with_program_path( "integration-test", execute_worker_path, &env::temp_dir(), &["test-sleep"], - Duration::from_secs(2), + spawn_timeout, SecurityStatus::default(), ) .await; - assert!(matches!(result, Err(SpawnErr::AcceptTimeout))); + assert!( + matches!(result, Err(SpawnErr::AcceptTimeout { spawn_timeout: s }) if s == spawn_timeout) + ); } #[tokio::test] async fn should_connect() { - let (prepare_worker_path, _) = get_and_check_worker_paths(); + let (prepare_worker_path, _) = build_workers_and_get_paths(); let _ = spawn_with_program_path( "integration-test", diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index f324f1e79c46b7cae80f3597dc6e7b5dcecbe637..07be4d128c25f6f71b8e39803f60857234f04198 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -1,11 +1,14 @@ [package] name = "polkadot-node-core-runtime-api" version = "1.0.0" -description="Wrapper around the parachain-related runtime APIs" +description = "Wrapper around the parachain-related runtime APIs" authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } @@ -22,7 +25,7 @@ polkadot-node-subsystem-types = { path = "../../subsystem-types" } sp-api = { path = "../../../../substrate/primitives/api" } sp-core = { path = "../../../../substrate/primitives/core" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } -async-trait = "0.1.57" +async-trait = "0.1.74" futures = { version = "0.3.21", features = ["thread-pool"] } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-node-primitives = { path = "../../primitives" } diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 69eea22b23bda4d8c67543c535c5f80afd81703d..5eca551db0a69f0edcab3cbba87c2029274b50ef 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -20,12 +20,13 @@ use schnellru::{ByLength, LruMap}; use sp_consensus_babe::Epoch; use polkadot_primitives::{ - async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, + async_backing, slashing, + vstaging::{self, ApprovalVotingParams}, + AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, + Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; /// For consistency we have the same capacity for all caches. We use 128 as we'll only need that @@ -67,6 +68,8 @@ pub(crate) struct RequestResultCache { disabled_validators: LruMap>, para_backing_state: LruMap<(Hash, ParaId), Option>, async_backing_params: LruMap, + node_features: LruMap, + approval_voting_params: LruMap, } impl Default for RequestResultCache { @@ -97,9 +100,11 @@ impl Default for RequestResultCache { unapplied_slashes: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), key_ownership_proof: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), minimum_backing_votes: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + approval_voting_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), disabled_validators: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + node_features: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), } } } @@ -446,6 +451,21 @@ impl RequestResultCache { self.minimum_backing_votes.insert(session_index, minimum_backing_votes); } + pub(crate) fn node_features( + &mut self, + session_index: SessionIndex, + ) -> Option<&vstaging::NodeFeatures> { + self.node_features.get(&session_index).map(|f| &*f) + } + + pub(crate) fn cache_node_features( + &mut self, + session_index: SessionIndex, + features: vstaging::NodeFeatures, + ) { + self.node_features.insert(session_index, features); + } + pub(crate) fn disabled_validators( &mut self, relay_parent: &Hash, @@ -490,6 +510,21 @@ impl RequestResultCache { ) { self.async_backing_params.insert(key, value); } + + pub(crate) fn approval_voting_params( + &mut self, + key: (Hash, SessionIndex), + ) -> Option<&ApprovalVotingParams> { + self.approval_voting_params.get(&key.1).map(|v| &*v) + } + + pub(crate) fn cache_approval_voting_params( + &mut self, + session_index: SessionIndex, + value: ApprovalVotingParams, + ) { + self.approval_voting_params.insert(session_index, value); + } } pub(crate) enum RequestResult { @@ -537,7 +572,9 @@ pub(crate) enum RequestResult { slashing::OpaqueKeyOwnershipProof, Option<()>, ), + ApprovalVotingParams(Hash, SessionIndex, ApprovalVotingParams), DisabledValidators(Hash, Vec), ParaBackingState(Hash, ParaId, Option), AsyncBackingParams(Hash, async_backing::AsyncBackingParams), + NodeFeatures(SessionIndex, vstaging::NodeFeatures), } diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index bdcca08b10dda6fed122e482e5ac015beb2a29bb..4bedfd827340bc60b0101f1c854f207705bc0b31 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -165,6 +165,8 @@ where KeyOwnershipProof(relay_parent, validator_id, key_ownership_proof) => self .requests_cache .cache_key_ownership_proof((relay_parent, validator_id), key_ownership_proof), + RequestResult::ApprovalVotingParams(_relay_parent, session_index, params) => + self.requests_cache.cache_approval_voting_params(session_index, params), SubmitReportDisputeLost(_, _, _, _) => {}, DisabledValidators(relay_parent, disabled_validators) => self.requests_cache.cache_disabled_validators(relay_parent, disabled_validators), @@ -173,6 +175,8 @@ where .cache_para_backing_state((relay_parent, para_id), constraints), AsyncBackingParams(relay_parent, params) => self.requests_cache.cache_async_backing_params(relay_parent, params), + NodeFeatures(session_index, params) => + self.requests_cache.cache_node_features(session_index, params), } } @@ -298,6 +302,9 @@ where Request::SubmitReportDisputeLost(dispute_proof, key_ownership_proof, sender) }, ), + Request::ApprovalVotingParams(session_index, sender) => + query!(approval_voting_params(session_index), sender) + .map(|sender| Request::ApprovalVotingParams(session_index, sender)), Request::DisabledValidators(sender) => query!(disabled_validators(), sender) .map(|sender| Request::DisabledValidators(sender)), Request::ParaBackingState(para, sender) => query!(para_backing_state(para), sender) @@ -313,6 +320,15 @@ where Some(Request::MinimumBackingVotes(index, sender)) } }, + Request::NodeFeatures(index, sender) => { + if let Some(value) = self.requests_cache.node_features(index) { + self.metrics.on_cached_request(); + let _ = sender.send(Ok(value.clone())); + None + } else { + Some(Request::NodeFeatures(index, sender)) + } + }, } } @@ -408,6 +424,9 @@ where macro_rules! query { ($req_variant:ident, $api_name:ident ($($param:expr),*), ver = $version:expr, $sender:expr) => {{ + query!($req_variant, $api_name($($param),*), ver = $version, $sender, result = ( relay_parent $(, $param )* ) ) + }}; + ($req_variant:ident, $api_name:ident ($($param:expr),*), ver = $version:expr, $sender:expr, result = ( $($results:expr),* ) ) => {{ let sender = $sender; let version: u32 = $version; // enforce type for the version expression let runtime_version = client.api_version_parachain_host(relay_parent).await @@ -441,7 +460,7 @@ where metrics.on_request(res.is_ok()); let _ = sender.send(res.clone()); - res.ok().map(|res| RequestResult::$req_variant(relay_parent, $( $param, )* res)) + res.ok().map(|res| RequestResult::$req_variant($( $results, )* res)) }} } @@ -557,6 +576,14 @@ where ver = Request::KEY_OWNERSHIP_PROOF_RUNTIME_REQUIREMENT, sender ), + Request::ApprovalVotingParams(session_index, sender) => { + query!( + ApprovalVotingParams, + approval_voting_params(session_index), + ver = Request::APPROVAL_VOTING_PARAMS_REQUIREMENT, + sender + ) + }, Request::SubmitReportDisputeLost(dispute_proof, key_ownership_proof, sender) => query!( SubmitReportDisputeLost, submit_report_dispute_lost(dispute_proof, key_ownership_proof), @@ -591,5 +618,12 @@ where sender ) }, + Request::NodeFeatures(index, sender) => query!( + NodeFeatures, + node_features(), + ver = Request::NODE_FEATURES_RUNTIME_REQUIREMENT, + sender, + result = (index) + ), } } diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index 979b3587d2692149d0d5927e80fea79f2618b0a0..f91723b3d39e9a6548d94ffde1da534d2e7592ab 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -20,12 +20,13 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_primitives::{ - async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, ValidatorId, - ValidatorIndex, ValidatorSignature, + async_backing, slashing, + vstaging::{ApprovalVotingParams, NodeFeatures}, + AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, + Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + Slot, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_api::ApiError; use sp_core::testing::TaskExecutor; @@ -242,6 +243,15 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { todo!("Not required for tests") } + /// Approval voting configuration parameters + async fn approval_voting_params( + &self, + _: Hash, + _: SessionIndex, + ) -> Result { + todo!("Not required for tests") + } + async fn current_epoch(&self, _: Hash) -> Result { Ok(self.babe_epoch.as_ref().unwrap().clone()) } @@ -269,6 +279,10 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { todo!("Not required for tests") } + async fn node_features(&self, _: Hash) -> Result { + todo!("Not required for tests") + } + async fn disabled_validators(&self, _: Hash) -> Result, ApiError> { todo!("Not required for tests") } diff --git a/polkadot/node/gum/Cargo.toml b/polkadot/node/gum/Cargo.toml index acee9efd0e098d2e9c31c3ce250ca06484ae5b38..ccb21f64e6375547409f32b6de5257d2ff39b88d 100644 --- a/polkadot/node/gum/Cargo.toml +++ b/polkadot/node/gum/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true description = "Stick logs together with the TraceID as provided by tempo" +[lints] +workspace = true + [dependencies] coarsetime = "0.1.22" tracing = "0.1.35" diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index 1ffaf6160ba2be444ab3ac6e2094c2b690fcde12..3f1c2fd64756d51845b3957db1e04c3ebe5132fa 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true description = "Generate an overseer including builder pattern and message wrapper from a single annotated struct definition." +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -13,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.38", features = ["full", "extra-traits"] } +syn = { version = "2.0.41", features = ["extra-traits", "full"] } quote = "1.0.28" proc-macro2 = "1.0.56" -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.1" expander = "2.0.0" [dev-dependencies] diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml index fcfbbaec611ef22ae593ad05bcb058a2316a0e9b..81947f4f6a4acfeb8623ccf1f5454c94e8067254 100644 --- a/polkadot/node/jaeger/Cargo.toml +++ b/polkadot/node/jaeger/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true description = "Polkadot Jaeger primitives, but equally useful for Grafana/Tempo" +[lints] +workspace = true + [dependencies] mick-jaeger = "0.1.8" lazy_static = "1.4" diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index f52f0cc0282f8286a717f0aeae1f283c9e93db14..659c6eb8cd8a6318b81d462fc9d9a0521adf3adc 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -8,6 +8,9 @@ license.workspace = true readme = "README.md" publish = false +[lints] +workspace = true + [[bin]] name = "malus" path = "src/malus.rs" @@ -26,7 +29,7 @@ path = "../../src/bin/prepare-worker.rs" doc = false [dependencies] -polkadot-cli = { path = "../../cli", features = [ "malus", "rococo-native", "westend-native" ] } +polkadot-cli = { path = "../../cli", features = ["malus", "rococo-native", "westend-native"] } polkadot-node-subsystem = { path = "../subsystem" } polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-node-subsystem-types = { path = "../subsystem-types" } @@ -37,10 +40,10 @@ polkadot-node-primitives = { path = "../primitives" } polkadot-primitives = { path = "../../primitives" } color-eyre = { version = "0.6.1", default-features = false } assert_matches = "1.5" -async-trait = "0.1.57" +async-trait = "0.1.74" sp-keystore = { path = "../../../substrate/primitives/keystore" } sp-core = { path = "../../../substrate/primitives/core" } -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum" } @@ -62,4 +65,4 @@ substrate-build-script-utils = { path = "../../../substrate/utils/build-script-u [features] default = [] -fast-runtime = [ "polkadot-cli/fast-runtime" ] +fast-runtime = ["polkadot-cli/fast-runtime"] diff --git a/polkadot/node/malus/src/interceptor.rs b/polkadot/node/malus/src/interceptor.rs index 04ee0905deeb0758f222fce2cc6d3075b5a6000f..e994319beb9637cf4774468f68faa4e8a331d6b8 100644 --- a/polkadot/node/malus/src/interceptor.rs +++ b/polkadot/node/malus/src/interceptor.rs @@ -21,7 +21,7 @@ //! messages on the overseer level. use polkadot_node_subsystem::*; -pub use polkadot_node_subsystem::{messages, messages::*, overseer, FromOrchestra}; +pub use polkadot_node_subsystem::{messages::*, overseer, FromOrchestra}; use std::{future::Future, pin::Pin}; /// Filter incoming and outgoing messages. diff --git a/polkadot/node/malus/src/malus.rs b/polkadot/node/malus/src/malus.rs index 69dd7c869fc0d4720026998db36e59d063cb9bd8..b8a83e54d4f5200df3d28822d6ecec11eecc9802 100644 --- a/polkadot/node/malus/src/malus.rs +++ b/polkadot/node/malus/src/malus.rs @@ -36,6 +36,8 @@ enum NemesisVariant { BackGarbageCandidate(BackGarbageCandidateOptions), /// Delayed disputing of ancestors that are perfectly fine. DisputeAncestor(DisputeAncestorOptions), + /// Delayed disputing of finalized candidates. + DisputeFinalizedCandidates(DisputeFinalizedCandidatesOptions), } #[derive(Debug, Parser)] @@ -80,6 +82,15 @@ impl MalusCli { finality_delay, )? }, + NemesisVariant::DisputeFinalizedCandidates(opts) => { + let DisputeFinalizedCandidatesOptions { dispute_offset, cli } = opts; + + polkadot_cli::run_node( + cli, + DisputeFinalizedCandidates { dispute_offset }, + finality_delay, + )? + }, } Ok(()) } @@ -184,4 +195,39 @@ mod tests { assert!(run.cli.run.base.bob); }); } + + #[test] + fn dispute_finalized_candidates_works() { + let cli = MalusCli::try_parse_from(IntoIterator::into_iter([ + "malus", + "dispute-finalized-candidates", + "--bob", + ])) + .unwrap(); + assert_matches::assert_matches!(cli, MalusCli { + variant: NemesisVariant::DisputeFinalizedCandidates(run), + .. + } => { + assert!(run.cli.run.base.bob); + }); + } + + #[test] + fn dispute_finalized_offset_value_works() { + let cli = MalusCli::try_parse_from(IntoIterator::into_iter([ + "malus", + "dispute-finalized-candidates", + "--dispute-offset", + "13", + "--bob", + ])) + .unwrap(); + assert_matches::assert_matches!(cli, MalusCli { + variant: NemesisVariant::DisputeFinalizedCandidates(opts), + .. + } => { + assert_eq!(opts.dispute_offset, 13); // This line checks that dispute_offset is correctly set to 13 + assert!(opts.cli.run.base.bob); + }); + } } diff --git a/polkadot/node/malus/src/variants/common.rs b/polkadot/node/malus/src/variants/common.rs index 474887ee8df764a7c1830fbd7b6fe08bfc9f9d4c..92264cd653d052a6da0e4389297496a40404c5e9 100644 --- a/polkadot/node/malus/src/variants/common.rs +++ b/polkadot/node/malus/src/variants/common.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Implements common code for nemesis. Currently, only `FakeValidationResult` +//! Implements common code for nemesis. Currently, only `ReplaceValidationResult` //! interceptor is implemented. use crate::{ interceptor::*, @@ -30,7 +30,7 @@ use polkadot_node_subsystem::{ use polkadot_primitives::{ CandidateCommitments, CandidateDescriptor, CandidateReceipt, PersistedValidationData, - PvfExecTimeoutKind, + PvfExecKind, }; use futures::channel::oneshot; @@ -90,10 +90,10 @@ impl FakeCandidateValidation { } } - fn should_misbehave(&self, timeout: PvfExecTimeoutKind) -> bool { + fn should_misbehave(&self, timeout: PvfExecKind) -> bool { match timeout { - PvfExecTimeoutKind::Backing => self.includes_backing(), - PvfExecTimeoutKind::Approval => self.includes_approval(), + PvfExecKind::Backing => self.includes_backing(), + PvfExecKind::Approval => self.includes_approval(), } } } @@ -188,7 +188,7 @@ where let _candidate_descriptor = candidate_descriptor.clone(); let mut subsystem_sender = subsystem_sender.clone(); let (sender, receiver) = std::sync::mpsc::channel(); - self.spawner.spawn_blocking( + self.spawner.spawn( "malus-get-validation-data", Some("malus"), Box::pin(async move { @@ -279,13 +279,13 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, .. }, } => { match self.fake_validation { - x if x.misbehaves_valid() && x.should_misbehave(exec_timeout_kind) => { + x if x.misbehaves_valid() && x.should_misbehave(exec_kind) => { // Behave normally if the `PoV` is not known to be malicious. if pov.block_data.0.as_slice() != MALICIOUS_POV { return Some(FromOrchestra::Communication { @@ -295,7 +295,7 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }) @@ -333,14 +333,14 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }) }, } }, - x if x.misbehaves_invalid() && x.should_misbehave(exec_timeout_kind) => { + x if x.misbehaves_invalid() && x.should_misbehave(exec_kind) => { // Set the validation result to invalid with probability `p` and trigger a // dispute let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); @@ -373,7 +373,7 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }) @@ -388,7 +388,7 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }), @@ -401,13 +401,13 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, .. }, } => { match self.fake_validation { - x if x.misbehaves_valid() && x.should_misbehave(exec_timeout_kind) => { + x if x.misbehaves_valid() && x.should_misbehave(exec_kind) => { // Behave normally if the `PoV` is not known to be malicious. if pov.block_data.0.as_slice() != MALICIOUS_POV { return Some(FromOrchestra::Communication { @@ -415,7 +415,7 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }) @@ -445,13 +445,13 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }), } }, - x if x.misbehaves_invalid() && x.should_misbehave(exec_timeout_kind) => { + x if x.misbehaves_invalid() && x.should_misbehave(exec_kind) => { // Maliciously set the validation result to invalid for a valid candidate // with probability `p` let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); @@ -479,7 +479,7 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }) @@ -491,7 +491,7 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }), diff --git a/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs new file mode 100644 index 0000000000000000000000000000000000000000..113ab026879d479a425496c74fc920c5017a6614 --- /dev/null +++ b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs @@ -0,0 +1,265 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A malicious node variant that attempts to dispute finalized candidates. +//! +//! This malus variant behaves honestly in backing and approval voting. +//! The maliciousness comes from emitting an extra dispute statement on top of the other ones. +//! +//! Some extra quirks which generally should be insignificant: +//! - The malus node will not dispute at session boundaries +//! - The malus node will not dispute blocks it backed itself +//! - Be cautious about the size of the network to make sure disputes are not auto-confirmed +//! (7 validators is the smallest network size as it needs [(7-1)//3]+1 = 3 votes to get +//! confirmed but it only gets 1 from backing and 1 from malus so 2 in total) +//! +//! +//! Attention: For usage with `zombienet` only! + +#![allow(missing_docs)] + +use futures::channel::oneshot; +use polkadot_cli::{ + prepared_overseer_builder, + service::{ + AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, HeaderBackend, Overseer, + OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, ParachainHost, + ProvideRuntimeApi, + }, + Cli, +}; +use polkadot_node_subsystem::{messages::ApprovalVotingMessage, SpawnGlue}; +use polkadot_node_subsystem_types::{DefaultSubsystemClient, OverseerSignal}; +use polkadot_node_subsystem_util::request_candidate_events; +use polkadot_primitives::CandidateEvent; +use sp_core::traits::SpawnNamed; + +// Filter wrapping related types. +use crate::{interceptor::*, shared::MALUS}; + +use std::sync::Arc; + +/// Wraps around ApprovalVotingSubsystem and replaces it. +/// Listens to finalization messages and if possible triggers disputes for their ancestors. +#[derive(Clone)] +struct AncestorDisputer { + spawner: Spawner, //stores the actual ApprovalVotingSubsystem spawner + dispute_offset: u32, /* relative depth of the disputed block to the finalized block, + * 0=finalized, 1=parent of finalized etc */ +} + +impl MessageInterceptor for AncestorDisputer +where + Sender: overseer::ApprovalVotingSenderTrait + Clone + Send + 'static, + Spawner: overseer::gen::Spawner + Clone + 'static, +{ + type Message = ApprovalVotingMessage; + + /// Intercept incoming `OverseerSignal::BlockFinalized' and pass the rest as normal. + fn intercept_incoming( + &self, + subsystem_sender: &mut Sender, + msg: FromOrchestra, + ) -> Option> { + match msg { + FromOrchestra::Communication { msg } => Some(FromOrchestra::Communication { msg }), + FromOrchestra::Signal(OverseerSignal::BlockFinalized( + finalized_hash, + finalized_height, + )) => { + gum::debug!( + target: MALUS, + "😈 Block Finalization Interception! Block: {:?}", finalized_hash, + ); + + //Ensure that the chain is long enough for the target ancestor to exist + if finalized_height <= self.dispute_offset { + return Some(FromOrchestra::Signal(OverseerSignal::BlockFinalized( + finalized_hash, + finalized_height, + ))) + } + + let dispute_offset = self.dispute_offset; + let mut sender = subsystem_sender.clone(); + self.spawner.spawn( + "malus-dispute-finalized-block", + Some("malus"), + Box::pin(async move { + // Query chain for the block hash at the target depth + let (tx, rx) = oneshot::channel(); + sender + .send_message(ChainApiMessage::FinalizedBlockHash( + finalized_height - dispute_offset, + tx, + )) + .await; + let disputable_hash = match rx.await { + Ok(Ok(Some(hash))) => { + gum::debug!( + target: MALUS, + "😈 Time to search {:?}`th ancestor! Block: {:?}", dispute_offset, hash, + ); + hash + }, + _ => { + gum::debug!( + target: MALUS, + "😈 Seems the target is not yet finalized! Nothing to dispute." + ); + return // Early return from the async block + }, + }; + + // Fetch all candidate events for the target ancestor + let events = + request_candidate_events(disputable_hash, &mut sender).await.await; + let events = match events { + Ok(Ok(events)) => events, + Ok(Err(e)) => { + gum::error!( + target: MALUS, + "😈 Failed to fetch candidate events: {:?}", e + ); + return // Early return from the async block + }, + Err(e) => { + gum::error!( + target: MALUS, + "😈 Failed to fetch candidate events: {:?}", e + ); + return // Early return from the async block + }, + }; + + // Extract a token candidate from the events to use for disputing + let event = events.iter().find(|event| { + matches!(event, CandidateEvent::CandidateIncluded(_, _, _, _)) + }); + let candidate = match event { + Some(CandidateEvent::CandidateIncluded(candidate, _, _, _)) => + candidate, + _ => { + gum::error!( + target: MALUS, + "😈 No candidate included event found! Nothing to dispute." + ); + return // Early return from the async block + }, + }; + + // Extract the candidate hash from the candidate + let candidate_hash = candidate.hash(); + + // Fetch the session index for the candidate + let (tx, rx) = oneshot::channel(); + sender + .send_message(RuntimeApiMessage::Request( + disputable_hash, + RuntimeApiRequest::SessionIndexForChild(tx), + )) + .await; + let session_index = match rx.await { + Ok(Ok(session_index)) => session_index, + _ => { + gum::error!( + target: MALUS, + "😈 Failed to fetch session index for candidate." + ); + return // Early return from the async block + }, + }; + gum::info!( + target: MALUS, + "😈 Disputing candidate with hash: {:?} in session {:?}", candidate_hash, session_index, + ); + + // Start dispute + sender.send_unbounded_message( + DisputeCoordinatorMessage::IssueLocalStatement( + session_index, + candidate_hash, + candidate.clone(), + false, // indicates candidate is invalid -> dispute starts + ), + ); + }), + ); + + // Passthrough the finalization signal as usual (using it as hook only) + Some(FromOrchestra::Signal(OverseerSignal::BlockFinalized( + finalized_hash, + finalized_height, + ))) + }, + FromOrchestra::Signal(signal) => Some(FromOrchestra::Signal(signal)), + } + } +} + +//---------------------------------------------------------------------------------- + +#[derive(Debug, clap::Parser)] +#[clap(rename_all = "kebab-case")] +#[allow(missing_docs)] +pub struct DisputeFinalizedCandidatesOptions { + /// relative depth of the disputed block to the finalized block, 0=finalized, 1=parent of + /// finalized etc + #[clap(long, ignore_case = true, default_value_t = 2, value_parser = clap::value_parser!(u32).range(0..=50))] + pub dispute_offset: u32, + + #[clap(flatten)] + pub cli: Cli, +} + +/// DisputeFinalizedCandidates implementation wrapper which implements `OverseerGen` glue. +pub(crate) struct DisputeFinalizedCandidates { + /// relative depth of the disputed block to the finalized block, 0=finalized, 1=parent of + /// finalized etc + pub dispute_offset: u32, +} + +impl OverseerGen for DisputeFinalizedCandidates { + fn generate( + &self, + connector: OverseerConnector, + args: OverseerGenArgs<'_, Spawner, RuntimeClient>, + ) -> Result< + (Overseer, Arc>>, OverseerHandle), + Error, + > + where + RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, + RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + Spawner: 'static + SpawnNamed + Clone + Unpin, + { + gum::info!( + target: MALUS, + "😈 Started Malus node that disputes finalized blocks after they are {:?} finalizations deep.", + &self.dispute_offset, + ); + + let ancestor_disputer = AncestorDisputer { + spawner: SpawnGlue(args.spawner.clone()), + dispute_offset: self.dispute_offset, + }; + + prepared_overseer_builder(args)? + .replace_approval_voting(move |cb| InterceptedSubsystem::new(cb, ancestor_disputer)) + .build_with_connector(connector) + .map_err(|e| e.into()) + } +} diff --git a/polkadot/node/malus/src/variants/mod.rs b/polkadot/node/malus/src/variants/mod.rs index 3789f33ac98be40d979e331dc67956a48711d4e4..bb4971c145cee8e78494096869877eb79e5b030e 100644 --- a/polkadot/node/malus/src/variants/mod.rs +++ b/polkadot/node/malus/src/variants/mod.rs @@ -18,11 +18,13 @@ mod back_garbage_candidate; mod common; +mod dispute_finalized_candidates; mod dispute_valid_candidates; mod suggest_garbage_candidate; pub(crate) use self::{ back_garbage_candidate::{BackGarbageCandidateOptions, BackGarbageCandidates}, + dispute_finalized_candidates::{DisputeFinalizedCandidates, DisputeFinalizedCandidatesOptions}, dispute_valid_candidates::{DisputeAncestorOptions, DisputeValidCandidates}, suggest_garbage_candidate::{SuggestGarbageCandidateOptions, SuggestGarbageCandidates}, }; diff --git a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs index cf0ff5f809d8c31df07a13fb3eac0661f8486917..817afb58437e16780a0db79173b4e9dc2f7f3e14 100644 --- a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs @@ -113,7 +113,7 @@ where let (sender, receiver) = std::sync::mpsc::channel(); let mut new_sender = subsystem_sender.clone(); let _candidate = candidate.clone(); - self.spawner.spawn_blocking( + self.spawner.spawn( "malus-get-validation-data", Some("malus"), Box::pin(async move { diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index e13ae63199ff0960af100a34451c14090b23e764..e9a4d463f4d907f197ed1bf7ad83f2b5243c8fc3 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -6,12 +6,15 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum" } -metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features=["futures_channel"] } +metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features = ["futures_channel"] } # Both `sc-service` and `sc-cli` are required by runtime metrics `logger_hook()`. sc-service = { path = "../../../substrate/client/service" } sc-cli = { path = "../../../substrate/client/cli" } @@ -28,11 +31,11 @@ assert_cmd = "2.0.4" tempfile = "3.2.0" hyper = { version = "0.14.20", default-features = false, features = ["http1", "tcp"] } tokio = "1.24.2" -polkadot-test-service = { path = "../test/service", features=["runtime-metrics"]} +polkadot-test-service = { path = "../test/service", features = ["runtime-metrics"] } substrate-test-utils = { path = "../../../substrate/test-utils" } sc-service = { path = "../../../substrate/client/service" } sp-keyring = { path = "../../../substrate/primitives/keyring" } -prometheus-parse = {version = "0.2.2"} +prometheus-parse = { version = "0.2.2" } [features] default = [] diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index f8a7cc15f87ee8354cc6bc464d42a7867e0d5c74..6f261ae770011c0933f1775fb8b92f1f455baea7 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] polkadot-node-metrics = { path = "../../metrics" } polkadot-node-network-protocol = { path = "../protocol" } @@ -30,8 +33,9 @@ polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } assert_matches = "1.4.0" -schnorrkel = { version = "0.9.1", default-features = false } -rand_core = "0.5.1" # should match schnorrkel +schnorrkel = { version = "0.11.4", default-features = false } +# rand_core should match schnorrkel +rand_core = "0.6.2" rand_chacha = "0.3.1" env_logger = "0.9.0" log = "0.4.17" diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index 47482eef764096427a1a227810b386491ba49f11..d520febaef51fa2e7a7da34d0e5be8336c673c57 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -32,14 +32,15 @@ use polkadot_node_network_protocol::{ self as net_protocol, filter_by_peer_version, grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology}, peer_set::MAX_NOTIFICATION_SIZE, - v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, PeerId, + v1 as protocol_v1, v2 as protocol_v2, v3 as protocol_v3, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::approval::{ - v1::{ - AssignmentCertKind, BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote, + v1::{AssignmentCertKind, BlockApprovalMeta, IndirectAssignmentCert}, + v2::{ + AsBitIndex, AssignmentCertKindV2, CandidateBitfield, IndirectAssignmentCertV2, + IndirectSignedApprovalVoteV2, }, - v2::{AsBitIndex, AssignmentCertKindV2, CandidateBitfield, IndirectAssignmentCertV2}, }; use polkadot_node_subsystem::{ messages::{ @@ -113,6 +114,14 @@ struct ApprovalRouting { required_routing: RequiredRouting, local: bool, random_routing: RandomRouting, + peers_randomly_routed: Vec, +} + +impl ApprovalRouting { + fn mark_randomly_sent(&mut self, peer: PeerId) { + self.random_routing.inc_sent(); + self.peers_randomly_routed.push(peer); + } } // This struct is responsible for tracking the full state of an assignment and grid routing @@ -121,9 +130,9 @@ struct ApprovalEntry { // The assignment certificate. assignment: IndirectAssignmentCertV2, // The candidates claimed by the certificate. A mapping between bit index and candidate index. - candidates: CandidateBitfield, + assignment_claimed_candidates: CandidateBitfield, // The approval signatures for each `CandidateIndex` claimed by the assignment certificate. - approvals: HashMap, + approvals: HashMap, // The validator index of the assignment signer. validator_index: ValidatorIndex, // Information required for gossiping to other peers using the grid topology. @@ -136,6 +145,8 @@ enum ApprovalEntryError { CandidateIndexOutOfBounds, InvalidCandidateIndex, DuplicateApproval, + UnknownAssignment, + AssignmentsFollowedDifferentPaths(RequiredRouting, RequiredRouting), } impl ApprovalEntry { @@ -148,7 +159,7 @@ impl ApprovalEntry { validator_index: assignment.validator, assignment, approvals: HashMap::with_capacity(candidates.len()), - candidates, + assignment_claimed_candidates: candidates, routing_info, } } @@ -156,23 +167,15 @@ impl ApprovalEntry { // Create a `MessageSubject` to reference the assignment. pub fn create_assignment_knowledge(&self, block_hash: Hash) -> (MessageSubject, MessageKind) { ( - MessageSubject(block_hash, self.candidates.clone(), self.validator_index), + MessageSubject( + block_hash, + self.assignment_claimed_candidates.clone(), + self.validator_index, + ), MessageKind::Assignment, ) } - // Create a `MessageSubject` to reference the approval. - pub fn create_approval_knowledge( - &self, - block_hash: Hash, - candidate_index: CandidateIndex, - ) -> (MessageSubject, MessageKind) { - ( - MessageSubject(block_hash, candidate_index.into(), self.validator_index), - MessageKind::Approval, - ) - } - // Updates routing information and returns the previous information if any. pub fn routing_info_mut(&mut self) -> &mut ApprovalRouting { &mut self.routing_info @@ -188,11 +191,21 @@ impl ApprovalEntry { self.routing_info.required_routing = required_routing; } + // Tells if this entry assignment covers at least one candidate in the approval + pub fn includes_approval_candidates(&self, approval: &IndirectSignedApprovalVoteV2) -> bool { + for candidate_index in approval.candidate_indices.iter_ones() { + if self.assignment_claimed_candidates.bit_at((candidate_index).as_bit_index()) { + return true + } + } + return false + } + // Records a new approval. Returns error if the claimed candidate is not found or we already // have received the approval. pub fn note_approval( &mut self, - approval: IndirectSignedApprovalVote, + approval: IndirectSignedApprovalVoteV2, ) -> Result<(), ApprovalEntryError> { // First do some sanity checks: // - check validator index matches @@ -202,37 +215,29 @@ impl ApprovalEntry { return Err(ApprovalEntryError::InvalidValidatorIndex) } - if self.candidates.len() <= approval.candidate_index as usize { - return Err(ApprovalEntryError::CandidateIndexOutOfBounds) - } - - if !self.candidates.bit_at(approval.candidate_index.as_bit_index()) { + // We need at least one of the candidates in the approval to be in this assignment + if !self.includes_approval_candidates(&approval) { return Err(ApprovalEntryError::InvalidCandidateIndex) } - if self.approvals.contains_key(&approval.candidate_index) { + if self.approvals.contains_key(&approval.candidate_indices) { return Err(ApprovalEntryError::DuplicateApproval) } - self.approvals.insert(approval.candidate_index, approval); + self.approvals.insert(approval.candidate_indices.clone(), approval.clone()); Ok(()) } // Get the assignment certiticate and claimed candidates. pub fn assignment(&self) -> (IndirectAssignmentCertV2, CandidateBitfield) { - (self.assignment.clone(), self.candidates.clone()) + (self.assignment.clone(), self.assignment_claimed_candidates.clone()) } // Get all approvals for all candidates claimed by the assignment. - pub fn approvals(&self) -> Vec { + pub fn approvals(&self) -> Vec { self.approvals.values().cloned().collect::>() } - // Get the approval for a specific candidate index. - pub fn approval(&self, candidate_index: CandidateIndex) -> Option { - self.approvals.get(&candidate_index).cloned() - } - // Get validator index. pub fn validator_index(&self) -> ValidatorIndex { self.validator_index @@ -430,6 +435,41 @@ impl PeerKnowledge { fn contains(&self, message: &MessageSubject, kind: MessageKind) -> bool { self.sent.contains(message, kind) || self.received.contains(message, kind) } + + // Generate the knowledge keys for querying if all assignments of an approval are known + // by this peer. + fn generate_assignments_keys( + approval: &IndirectSignedApprovalVoteV2, + ) -> Vec<(MessageSubject, MessageKind)> { + approval + .candidate_indices + .iter_ones() + .map(|candidate_index| { + ( + MessageSubject( + approval.block_hash, + (candidate_index as CandidateIndex).into(), + approval.validator, + ), + MessageKind::Assignment, + ) + }) + .collect_vec() + } + + // Generate the knowledge keys for querying if an approval is known by peer. + fn generate_approval_key( + approval: &IndirectSignedApprovalVoteV2, + ) -> (MessageSubject, MessageKind) { + ( + MessageSubject( + approval.block_hash, + approval.candidate_indices.clone(), + approval.validator, + ), + MessageKind::Approval, + ) + } } /// Information about blocks in our current view as well as whether peers know of them. @@ -462,13 +502,13 @@ impl BlockEntry { // First map one entry per candidate to the same key we will use in `approval_entries`. // Key is (Validator_index, CandidateBitfield) that links the `ApprovalEntry` to the (K,V) // entry in `candidate_entry.messages`. - for claimed_candidate_index in entry.candidates.iter_ones() { + for claimed_candidate_index in entry.assignment_claimed_candidates.iter_ones() { match self.candidates.get_mut(claimed_candidate_index) { Some(candidate_entry) => { candidate_entry - .messages + .assignments .entry(entry.validator_index()) - .or_insert(entry.candidates.clone()); + .or_insert(entry.assignment_claimed_candidates.clone()); }, None => { // This should never happen, but if it happens, it means the subsystem is @@ -484,50 +524,107 @@ impl BlockEntry { } self.approval_entries - .entry((entry.validator_index, entry.candidates.clone())) + .entry((entry.validator_index, entry.assignment_claimed_candidates.clone())) .or_insert(entry) } - // Returns a mutable reference of `ApprovalEntry` for `candidate_index` from validator - // `validator_index`. - pub fn approval_entry( + // Tels if all candidate_indices are valid candidates + pub fn contains_candidates(&self, candidate_indices: &CandidateBitfield) -> bool { + candidate_indices + .iter_ones() + .all(|candidate_index| self.candidates.get(candidate_index as usize).is_some()) + } + + // Saves the given approval in all ApprovalEntries that contain an assignment for any of the + // candidates in the approval. + // + // Returns the required routing needed for this approval and the lit of random peers the + // covering assignments were sent. + pub fn note_approval( &mut self, - candidate_index: CandidateIndex, - validator_index: ValidatorIndex, - ) -> Option<&mut ApprovalEntry> { - self.candidates - .get(candidate_index as usize) - .map_or(None, |candidate_entry| candidate_entry.messages.get(&validator_index)) - .map_or(None, |candidate_indices| { - self.approval_entries.get_mut(&(validator_index, candidate_indices.clone())) + approval: IndirectSignedApprovalVoteV2, + ) -> Result<(RequiredRouting, HashSet), ApprovalEntryError> { + let mut required_routing = None; + let mut peers_randomly_routed_to = HashSet::new(); + + if self.candidates.len() < approval.candidate_indices.len() as usize { + return Err(ApprovalEntryError::CandidateIndexOutOfBounds) + } + + // First determine all assignments bitfields that might be covered by this approval + let covered_assignments_bitfields: HashSet = approval + .candidate_indices + .iter_ones() + .filter_map(|candidate_index| { + self.candidates.get_mut(candidate_index).map_or(None, |candidate_entry| { + candidate_entry.assignments.get(&approval.validator).cloned() + }) }) - } + .collect(); - // Get all approval entries for a given candidate. - pub fn approval_entries(&self, candidate_index: CandidateIndex) -> Vec<&ApprovalEntry> { - // Get the keys for fetching `ApprovalEntry` from `self.approval_entries`, - let approval_entry_keys = self - .candidates - .get(candidate_index as usize) - .map(|candidate_entry| &candidate_entry.messages); - - if let Some(approval_entry_keys) = approval_entry_keys { - // Ensure no duplicates. - let approval_entry_keys = approval_entry_keys.iter().unique().collect::>(); - - let mut entries = Vec::new(); - for (validator_index, candidate_indices) in approval_entry_keys { - if let Some(entry) = - self.approval_entries.get(&(*validator_index, candidate_indices.clone())) - { - entries.push(entry); + // Mark the vote in all approval entries + for assignment_bitfield in covered_assignments_bitfields { + if let Some(approval_entry) = + self.approval_entries.get_mut(&(approval.validator, assignment_bitfield)) + { + approval_entry.note_approval(approval.clone())?; + peers_randomly_routed_to + .extend(approval_entry.routing_info().peers_randomly_routed.iter()); + + if let Some(required_routing) = required_routing { + if required_routing != approval_entry.routing_info().required_routing { + // This shouldn't happen since the required routing is computed based on the + // validator_index, so two assignments from the same validators will have + // the same required routing. + return Err(ApprovalEntryError::AssignmentsFollowedDifferentPaths( + required_routing, + approval_entry.routing_info().required_routing, + )) + } + } else { + required_routing = Some(approval_entry.routing_info().required_routing) } } - entries + } + + if let Some(required_routing) = required_routing { + Ok((required_routing, peers_randomly_routed_to)) } else { - vec![] + Err(ApprovalEntryError::UnknownAssignment) } } + + /// Returns the list of approval votes covering this candidate + pub fn approval_votes( + &self, + candidate_index: CandidateIndex, + ) -> Vec { + let result: Option< + HashMap<(ValidatorIndex, CandidateBitfield), IndirectSignedApprovalVoteV2>, + > = self.candidates.get(candidate_index as usize).map(|candidate_entry| { + candidate_entry + .assignments + .iter() + .filter_map(|(validator, assignment_bitfield)| { + self.approval_entries.get(&(*validator, assignment_bitfield.clone())) + }) + .flat_map(|approval_entry| { + approval_entry + .approvals + .clone() + .into_iter() + .filter(|(approved_candidates, _)| { + approved_candidates.bit_at(candidate_index.as_bit_index()) + }) + .map(|(approved_candidates, vote)| { + ((approval_entry.validator_index, approved_candidates), vote) + }) + }) + .collect() + }); + + result.map(|result| result.into_values().collect_vec()).unwrap_or_default() + } } // Information about candidates in the context of a particular block they are included in. @@ -537,7 +634,7 @@ impl BlockEntry { struct CandidateEntry { // The value represents part of the lookup key in `approval_entries` to fetch the assignment // and existing votes. - messages: HashMap, + assignments: HashMap, } #[derive(Debug, Clone, PartialEq)] @@ -557,7 +654,7 @@ impl MessageSource { enum PendingMessage { Assignment(IndirectAssignmentCertV2, CandidateBitfield), - Approval(IndirectSignedApprovalVote), + Approval(IndirectSignedApprovalVoteV2), } #[overseer::contextbounds(ApprovalDistribution, prefix = self::overseer)] @@ -830,6 +927,49 @@ impl State { } } + // Entry point for processing an approval coming from a peer. + async fn process_incoming_approvals( + &mut self, + ctx: &mut Context, + metrics: &Metrics, + peer_id: PeerId, + approvals: Vec, + ) { + gum::trace!( + target: LOG_TARGET, + peer_id = %peer_id, + num = approvals.len(), + "Processing approvals from a peer", + ); + for approval_vote in approvals.into_iter() { + if let Some(pending) = self.pending_known.get_mut(&approval_vote.block_hash) { + let block_hash = approval_vote.block_hash; + let validator_index = approval_vote.validator; + + gum::trace!( + target: LOG_TARGET, + %peer_id, + ?block_hash, + ?validator_index, + "Pending assignment candidates {:?}", + approval_vote.candidate_indices, + ); + + pending.push((peer_id, PendingMessage::Approval(approval_vote))); + + continue + } + + self.import_and_circulate_approval( + ctx, + metrics, + MessageSource::Peer(peer_id), + approval_vote, + ) + .await; + } + } + async fn process_incoming_peer_message( &mut self, ctx: &mut Context, @@ -838,16 +978,14 @@ impl State { msg: Versioned< protocol_v1::ApprovalDistributionMessage, protocol_v2::ApprovalDistributionMessage, - protocol_vstaging::ApprovalDistributionMessage, + protocol_v3::ApprovalDistributionMessage, >, rng: &mut R, ) where R: CryptoRng + Rng, { match msg { - Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Assignments( - assignments, - )) => { + Versioned::V3(protocol_v3::ApprovalDistributionMessage::Assignments(assignments)) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, @@ -887,45 +1025,18 @@ impl State { ) .await; }, - Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Approvals( - approvals, - )) | + Versioned::V3(protocol_v3::ApprovalDistributionMessage::Approvals(approvals)) => { + self.process_incoming_approvals(ctx, metrics, peer_id, approvals).await; + }, Versioned::V1(protocol_v1::ApprovalDistributionMessage::Approvals(approvals)) | Versioned::V2(protocol_v2::ApprovalDistributionMessage::Approvals(approvals)) => { - gum::trace!( - target: LOG_TARGET, - peer_id = %peer_id, - num = approvals.len(), - "Processing approvals from a peer", - ); - for approval_vote in approvals.into_iter() { - if let Some(pending) = self.pending_known.get_mut(&approval_vote.block_hash) { - let block_hash = approval_vote.block_hash; - let candidate_index = approval_vote.candidate_index; - let validator_index = approval_vote.validator; - - gum::trace!( - target: LOG_TARGET, - %peer_id, - ?block_hash, - ?candidate_index, - ?validator_index, - "Pending assignment", - ); - - pending.push((peer_id, PendingMessage::Approval(approval_vote))); - - continue - } - - self.import_and_circulate_approval( - ctx, - metrics, - MessageSource::Peer(peer_id), - approval_vote, - ) - .await; - } + self.process_incoming_approvals( + ctx, + metrics, + peer_id, + approvals.into_iter().map(|approval| approval.into()).collect::>(), + ) + .await; }, } } @@ -1071,8 +1182,11 @@ impl State { COST_UNEXPECTED_MESSAGE, ) .await; + gum::debug!(target: LOG_TARGET, "Received assignment for invalid block"); + metrics.on_assignment_recent_outdated(); } } + metrics.on_assignment_invalid_block(); return }, }; @@ -1105,6 +1219,7 @@ impl State { COST_DUPLICATE_MESSAGE, ) .await; + metrics.on_assignment_duplicate(); } else { gum::trace!( target: LOG_TARGET, @@ -1132,6 +1247,7 @@ impl State { COST_UNEXPECTED_MESSAGE, ) .await; + metrics.on_assignment_out_of_view(); }, } @@ -1148,6 +1264,7 @@ impl State { gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known assignment"); peer_knowledge.received.insert(message_subject, message_kind); } + metrics.on_assignment_good_known(); return } @@ -1204,6 +1321,8 @@ impl State { ?peer_id, "Got an `AcceptedDuplicate` assignment", ); + metrics.on_assignment_duplicatevoting(); + return }, AssignmentCheckResult::TooFarInFuture => { @@ -1220,6 +1339,8 @@ impl State { COST_ASSIGNMENT_TOO_FAR_IN_THE_FUTURE, ) .await; + metrics.on_assignment_far(); + return }, AssignmentCheckResult::Bad(error) => { @@ -1237,6 +1358,7 @@ impl State { COST_INVALID_MESSAGE, ) .await; + metrics.on_assignment_bad(); return }, } @@ -1275,7 +1397,12 @@ impl State { let approval_entry = entry.insert_approval_entry(ApprovalEntry::new( assignment.clone(), claimed_candidate_indices.clone(), - ApprovalRouting { required_routing, local, random_routing: Default::default() }, + ApprovalRouting { + required_routing, + local, + random_routing: Default::default(), + peers_randomly_routed: Default::default(), + }, )); // Dispatch the message to all peers in the routing set which @@ -1305,6 +1432,10 @@ impl State { continue } + if !topology.map(|topology| topology.is_validator(&peer)).unwrap_or(false) { + continue + } + // Note: at this point, we haven't received the message from any peers // other than the source peer, and we just got it, so we haven't sent it // to any peers either. @@ -1312,7 +1443,7 @@ impl State { approval_entry.routing_info().random_routing.sample(n_peers_total, rng); if route_random { - approval_entry.routing_info_mut().random_routing.inc_sent(); + approval_entry.routing_info_mut().mark_randomly_sent(peer); peers.push(peer); } } @@ -1346,12 +1477,94 @@ impl State { } } + // Checks if an approval can be processed. + // Returns true if we can continue with processing the approval and false otherwise. + async fn check_approval_can_be_processed( + ctx: &mut Context, + assignments_knowledge_key: &Vec<(MessageSubject, MessageKind)>, + approval_knowledge_key: &(MessageSubject, MessageKind), + entry: &mut BlockEntry, + reputation: &mut ReputationAggregator, + peer_id: PeerId, + metrics: &Metrics, + ) -> bool { + for message_subject in assignments_knowledge_key { + if !entry.knowledge.contains(&message_subject.0, message_subject.1) { + gum::trace!( + target: LOG_TARGET, + ?peer_id, + ?message_subject, + "Unknown approval assignment", + ); + modify_reputation(reputation, ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await; + metrics.on_approval_unknown_assignment(); + return false + } + } + + // check if our knowledge of the peer already contains this approval + match entry.known_by.entry(peer_id) { + hash_map::Entry::Occupied(mut knowledge) => { + let peer_knowledge = knowledge.get_mut(); + if peer_knowledge.contains(&approval_knowledge_key.0, approval_knowledge_key.1) { + if !peer_knowledge + .received + .insert(approval_knowledge_key.0.clone(), approval_knowledge_key.1) + { + gum::trace!( + target: LOG_TARGET, + ?peer_id, + ?approval_knowledge_key, + "Duplicate approval", + ); + + modify_reputation( + reputation, + ctx.sender(), + peer_id, + COST_DUPLICATE_MESSAGE, + ) + .await; + metrics.on_approval_duplicate(); + } + return false + } + }, + hash_map::Entry::Vacant(_) => { + gum::debug!( + target: LOG_TARGET, + ?peer_id, + ?approval_knowledge_key, + "Approval from a peer is out of view", + ); + modify_reputation(reputation, ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await; + metrics.on_approval_out_of_view(); + }, + } + + if entry.knowledge.contains(&approval_knowledge_key.0, approval_knowledge_key.1) { + if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { + peer_knowledge + .received + .insert(approval_knowledge_key.0.clone(), approval_knowledge_key.1); + } + + // We already processed this approval no need to continue. + gum::trace!(target: LOG_TARGET, ?peer_id, ?approval_knowledge_key, "Known approval"); + metrics.on_approval_good_known(); + modify_reputation(reputation, ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE).await; + false + } else { + true + } + } + async fn import_and_circulate_approval( &mut self, ctx: &mut Context, metrics: &Metrics, source: MessageSource, - vote: IndirectSignedApprovalVote, + vote: IndirectSignedApprovalVoteV2, ) { let _span = self .spans @@ -1370,10 +1583,9 @@ impl State { let block_hash = vote.block_hash; let validator_index = vote.validator; - let candidate_index = vote.candidate_index; - + let candidate_indices = &vote.candidate_indices; let entry = match self.blocks.get_mut(&block_hash) { - Some(entry) if entry.candidates.get(candidate_index as usize).is_some() => entry, + Some(entry) if entry.contains_candidates(&vote.candidate_indices) => entry, _ => { if let Some(peer_id) = source.peer_id() { if !self.recent_outdated_blocks.is_recent_outdated(&block_hash) { @@ -1382,7 +1594,7 @@ impl State { ?peer_id, ?block_hash, ?validator_index, - ?candidate_index, + ?candidate_indices, "Approval from a peer is out of view", ); modify_reputation( @@ -1392,6 +1604,9 @@ impl State { COST_UNEXPECTED_MESSAGE, ) .await; + metrics.on_approval_invalid_block(); + } else { + metrics.on_approval_recent_outdated(); } } return @@ -1399,81 +1614,21 @@ impl State { }; // compute metadata on the assignment. - let message_subject = MessageSubject(block_hash, candidate_index.into(), validator_index); - let message_kind = MessageKind::Approval; + let assignments_knowledge_keys = PeerKnowledge::generate_assignments_keys(&vote); + let approval_knwowledge_key = PeerKnowledge::generate_approval_key(&vote); if let Some(peer_id) = source.peer_id() { - if !entry.knowledge.contains(&message_subject, MessageKind::Assignment) { - gum::debug!( - target: LOG_TARGET, - ?peer_id, - ?message_subject, - "Unknown approval assignment", - ); - modify_reputation( - &mut self.reputation, - ctx.sender(), - peer_id, - COST_UNEXPECTED_MESSAGE, - ) - .await; - return - } - - // check if our knowledge of the peer already contains this approval - match entry.known_by.entry(peer_id) { - hash_map::Entry::Occupied(mut knowledge) => { - let peer_knowledge = knowledge.get_mut(); - if peer_knowledge.contains(&message_subject, message_kind) { - if !peer_knowledge.received.insert(message_subject.clone(), message_kind) { - gum::debug!( - target: LOG_TARGET, - ?peer_id, - ?message_subject, - "Duplicate approval", - ); - - modify_reputation( - &mut self.reputation, - ctx.sender(), - peer_id, - COST_DUPLICATE_MESSAGE, - ) - .await; - } - return - } - }, - hash_map::Entry::Vacant(_) => { - gum::debug!( - target: LOG_TARGET, - ?peer_id, - ?message_subject, - "Approval from a peer is out of view", - ); - modify_reputation( - &mut self.reputation, - ctx.sender(), - peer_id, - COST_UNEXPECTED_MESSAGE, - ) - .await; - }, - } - - // if the approval is known to be valid, reward the peer - if entry.knowledge.contains(&message_subject, message_kind) { - gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known approval"); - modify_reputation( - &mut self.reputation, - ctx.sender(), - peer_id, - BENEFIT_VALID_MESSAGE, - ) - .await; - if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { - peer_knowledge.received.insert(message_subject.clone(), message_kind); - } + if !Self::check_approval_can_be_processed( + ctx, + &assignments_knowledge_keys, + &approval_knwowledge_key, + entry, + &mut self.reputation, + peer_id, + metrics, + ) + .await + { return } @@ -1495,8 +1650,8 @@ impl State { gum::trace!( target: LOG_TARGET, ?peer_id, - ?message_subject, ?result, + ?vote, "Checked approval", ); match result { @@ -1509,9 +1664,13 @@ impl State { ) .await; - entry.knowledge.insert(message_subject.clone(), message_kind); + entry + .knowledge + .insert(approval_knwowledge_key.0.clone(), approval_knwowledge_key.1); if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { - peer_knowledge.received.insert(message_subject.clone(), message_kind); + peer_knowledge + .received + .insert(approval_knwowledge_key.0.clone(), approval_knwowledge_key.1); } }, ApprovalCheckResult::Bad(error) => { @@ -1528,74 +1687,55 @@ impl State { %error, "Got a bad approval from peer", ); + metrics.on_approval_bad(); return }, } } else { - if !entry.knowledge.insert(message_subject.clone(), message_kind) { - // if we already imported an approval, there is no need to distribute it again + if !entry + .knowledge + .insert(approval_knwowledge_key.0.clone(), approval_knwowledge_key.1) + { + // if we already imported all approvals, there is no need to distribute it again gum::warn!( target: LOG_TARGET, - ?message_subject, "Importing locally an already known approval", ); return } else { gum::debug!( target: LOG_TARGET, - ?message_subject, "Importing locally a new approval", ); } } - let required_routing = match entry.approval_entry(candidate_index, validator_index) { - Some(approval_entry) => { - // Invariant: to our knowledge, none of the peers except for the `source` know about - // the approval. - metrics.on_approval_imported(); - - if let Err(err) = approval_entry.note_approval(vote.clone()) { - // this would indicate a bug in approval-voting: - // - validator index mismatch - // - candidate index mismatch - // - duplicate approval - gum::warn!( - target: LOG_TARGET, - hash = ?block_hash, - ?candidate_index, - ?validator_index, - ?err, - "Possible bug: Vote import failed", - ); - - return - } - - approval_entry.routing_info().required_routing - }, - None => { - let peer_id = source.peer_id(); - // This indicates a bug in approval-distribution, since we check the knowledge at - // the begining of the function. + let (required_routing, peers_randomly_routed_to) = match entry.note_approval(vote.clone()) { + Ok(required_routing) => required_routing, + Err(err) => { gum::warn!( target: LOG_TARGET, - ?peer_id, - ?message_subject, - "Unknown approval assignment", + hash = ?block_hash, + validator_index = ?vote.validator, + candidate_bitfield = ?vote.candidate_indices, + ?err, + "Possible bug: Vote import failed", ); - // No rep change as this is caused by an issue + metrics.on_approval_bug(); return }, }; + // Invariant: to our knowledge, none of the peers except for the `source` know about the + // approval. + metrics.on_approval_imported(); + // Dispatch a ApprovalDistributionV1Message::Approval(vote) // to all peers required by the topology, with the exception of the source peer. let topology = self.topologies.get_topology(entry.session); let source_peer = source.peer_id(); - let message_subject = &message_subject; - let peer_filter = move |peer, knowledge: &PeerKnowledge| { + let peer_filter = move |peer| { if Some(peer) == source_peer.as_ref() { return false } @@ -1611,13 +1751,13 @@ impl State { // 3. Any randomly selected peers have been sent the assignment already. let in_topology = topology .map_or(false, |t| t.local_grid_neighbors().route_to_peer(required_routing, peer)); - in_topology || knowledge.sent.contains(message_subject, MessageKind::Assignment) + in_topology || peers_randomly_routed_to.contains(peer) }; let peers = entry .known_by .iter() - .filter(|(p, k)| peer_filter(p, k)) + .filter(|(p, _)| peer_filter(p)) .filter_map(|(p, _)| self.peer_views.get(p).map(|entry| (*p, entry.version))) .collect::>(); @@ -1625,7 +1765,7 @@ impl State { for peer in peers.iter() { // we already filtered peers above, so this should always be Some if let Some(entry) = entry.known_by.get_mut(&peer.0) { - entry.sent.insert(message_subject.clone(), message_kind); + entry.sent.insert(approval_knwowledge_key.0.clone(), approval_knwowledge_key.1); } } @@ -1634,7 +1774,6 @@ impl State { gum::trace!( target: LOG_TARGET, ?block_hash, - ?candidate_index, local = source.peer_id().is_none(), num_peers = peers.len(), "Sending an approval to peers", @@ -1647,7 +1786,7 @@ impl State { fn get_approval_signatures( &mut self, indices: HashSet<(Hash, CandidateIndex)>, - ) -> HashMap { + ) -> HashMap, ValidatorSignature)> { let mut all_sigs = HashMap::new(); for (hash, index) in indices { let _span = self @@ -1670,11 +1809,20 @@ impl State { Some(e) => e, }; - let sigs = block_entry - .approval_entries(index) - .into_iter() - .filter_map(|approval_entry| approval_entry.approval(index)) - .map(|approval| (approval.validator, approval.signature)); + let sigs = block_entry.approval_votes(index).into_iter().map(|approval| { + ( + approval.validator, + ( + hash, + approval + .candidate_indices + .iter_ones() + .map(|val| val as CandidateIndex) + .collect_vec(), + approval.signature, + ), + ) + }); all_sigs.extend(sigs); } all_sigs @@ -1718,23 +1866,31 @@ impl State { let peer_knowledge = entry.known_by.entry(peer_id).or_default(); let topology = topologies.get_topology(entry.session); - // We want to iterate the `approval_entries` of the block entry as these contain all - // assignments that also link all approval votes. + // We want to iterate the `approval_entries` of the block entry as these contain + // all assignments that also link all approval votes. for approval_entry in entry.approval_entries.values_mut() { // Propagate the message to all peers in the required routing set OR // randomly sample peers. { let required_routing = approval_entry.routing_info().required_routing; - let random_routing = &mut approval_entry.routing_info_mut().random_routing; + let routing_info = &mut approval_entry.routing_info_mut(); let rng = &mut *rng; let mut peer_filter = move |peer_id| { let in_topology = topology.as_ref().map_or(false, |t| { t.local_grid_neighbors().route_to_peer(required_routing, peer_id) }); in_topology || { - let route_random = random_routing.sample(total_peers, rng); + if !topology + .map(|topology| topology.is_validator(peer_id)) + .unwrap_or(false) + { + return false + } + + let route_random = + routing_info.random_routing.sample(total_peers, rng); if route_random { - random_routing.inc_sent(); + routing_info.mark_randomly_sent(*peer_id); } route_random @@ -1751,7 +1907,8 @@ impl State { let (assignment_knowledge, message_kind) = approval_entry.create_assignment_knowledge(block); - // Only send stuff a peer doesn't know in the context of a relay chain block. + // Only send stuff a peer doesn't know in the context of a relay chain + // block. if !peer_knowledge.contains(&assignment_knowledge, message_kind) { peer_knowledge.sent.insert(assignment_knowledge, message_kind); assignments_to_send.push(assignment_message); @@ -1759,12 +1916,12 @@ impl State { // Filter approval votes. for approval_message in approval_messages { - let (approval_knowledge, message_kind) = approval_entry - .create_approval_knowledge(block, approval_message.candidate_index); + let approval_knowledge = + PeerKnowledge::generate_approval_key(&approval_message); - if !peer_knowledge.contains(&approval_knowledge, message_kind) { - peer_knowledge.sent.insert(approval_knowledge, message_kind); + if !peer_knowledge.contains(&approval_knowledge.0, approval_knowledge.1) { approvals_to_send.push(approval_message); + peer_knowledge.sent.insert(approval_knowledge.0, approval_knowledge.1); } } } @@ -1937,6 +2094,7 @@ impl State { // Punish the peer for the invalid message. modify_reputation(&mut self.reputation, sender, peer_id, COST_OVERSIZED_BITFIELD) .await; + gum::error!(target: LOG_TARGET, block_hash = ?cert.block_hash, ?candidate_index, validator_index = ?cert.validator, kind = ?cert.cert.kind, "Bad assignment v1"); } else { sanitized_assignments.push((cert.into(), candidate_index.into())) } @@ -1979,6 +2137,9 @@ impl State { // Punish the peer for the invalid message. modify_reputation(&mut self.reputation, sender, peer_id, COST_OVERSIZED_BITFIELD) .await; + for candidate_index in candidate_bitfield.iter_ones() { + gum::error!(target: LOG_TARGET, block_hash = ?cert.block_hash, ?candidate_index, validator_index = ?cert.validator, "Bad assignment v2"); + } } else { sanitized_assignments.push((cert, candidate_bitfield)) } @@ -2066,11 +2227,10 @@ async fn adjust_required_routing_and_propagate { gum::debug!( target: LOG_TARGET, - "Distributing our approval vote on candidate (block={}, index={})", + "Distributing our approval vote on candidate (block={}, index={:?})", vote.block_hash, - vote.candidate_index, + vote.candidate_indices, ); state @@ -2296,7 +2456,7 @@ pub const MAX_ASSIGNMENT_BATCH_SIZE: usize = ensure_size_not_zero( /// The maximum amount of approvals per batch is 33% of maximum allowed by protocol. pub const MAX_APPROVAL_BATCH_SIZE: usize = ensure_size_not_zero( - MAX_NOTIFICATION_SIZE as usize / std::mem::size_of::() / 3, + MAX_NOTIFICATION_SIZE as usize / std::mem::size_of::() / 3, ); // Low level helper for sending assignments. @@ -2306,12 +2466,12 @@ async fn send_assignments_batched_inner( peers: Vec, peer_version: ValidationVersion, ) { - if peer_version == ValidationVersion::VStaging { + if peer_version == ValidationVersion::V3 { sender .send_message(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments( + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments( batch.into_iter().collect(), ), )), @@ -2362,7 +2522,7 @@ pub(crate) async fn send_assignments_batched( ) { let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); let v2_peers = filter_by_peer_version(peers, ValidationVersion::V2.into()); - let vstaging_peers = filter_by_peer_version(peers, ValidationVersion::VStaging.into()); + let v3_peers = filter_by_peer_version(peers, ValidationVersion::V3.into()); // V1 and V2 validation protocol do not have any changes with regard to // ApprovalDistributionMessage so they can be treated the same. @@ -2400,18 +2560,13 @@ pub(crate) async fn send_assignments_batched( } } - if !vstaging_peers.is_empty() { - let mut vstaging = v2_assignments.into_iter().peekable(); + if !v3_peers.is_empty() { + let mut v3 = v2_assignments.into_iter().peekable(); - while vstaging.peek().is_some() { - let batch = vstaging.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect::>(); - send_assignments_batched_inner( - sender, - batch, - vstaging_peers.clone(), - ValidationVersion::VStaging, - ) - .await; + while v3.peek().is_some() { + let batch = v3.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect::>(); + send_assignments_batched_inner(sender, batch, v3_peers.clone(), ValidationVersion::V3) + .await; } } } @@ -2419,15 +2574,20 @@ pub(crate) async fn send_assignments_batched( /// Send approvals while honoring the `max_notification_size` of the protocol and peer version. pub(crate) async fn send_approvals_batched( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - approvals: impl IntoIterator + Clone, + approvals: impl IntoIterator + Clone, peers: &[(PeerId, ProtocolVersion)], ) { let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); let v2_peers = filter_by_peer_version(peers, ValidationVersion::V2.into()); - let vstaging_peers = filter_by_peer_version(peers, ValidationVersion::VStaging.into()); + let v3_peers = filter_by_peer_version(peers, ValidationVersion::V3.into()); if !v1_peers.is_empty() || !v2_peers.is_empty() { - let mut batches = approvals.clone().into_iter().peekable(); + let mut batches = approvals + .clone() + .into_iter() + .filter(|approval| approval.candidate_indices.count_ones() == 1) + .filter_map(|val| val.try_into().ok()) + .peekable(); while batches.peek().is_some() { let batch: Vec<_> = batches.by_ref().take(MAX_APPROVAL_BATCH_SIZE).collect(); @@ -2456,7 +2616,7 @@ pub(crate) async fn send_approvals_batched( } } - if !vstaging_peers.is_empty() { + if !v3_peers.is_empty() { let mut batches = approvals.into_iter().peekable(); while batches.peek().is_some() { @@ -2464,12 +2624,10 @@ pub(crate) async fn send_approvals_batched( sender .send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_peers.clone(), - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Approvals(batch), - ), - ), + v3_peers.clone(), + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Approvals(batch), + )), )) .await; } diff --git a/polkadot/node/network/approval-distribution/src/metrics.rs b/polkadot/node/network/approval-distribution/src/metrics.rs index 6864259e6fdb90ce3be3553d101afe1023f1b523..0642b1b2e0cdcea51d4a34263bad87ad6612f035 100644 --- a/polkadot/node/network/approval-distribution/src/metrics.rs +++ b/polkadot/node/network/approval-distribution/src/metrics.rs @@ -31,6 +31,8 @@ struct MetricsInner { time_unify_with_peer: prometheus::Histogram, time_import_pending_now_known: prometheus::Histogram, time_awaiting_approval_voting: prometheus::Histogram, + assignments_received_result: prometheus::CounterVec, + approvals_received_result: prometheus::CounterVec, } trait AsLabel { @@ -78,6 +80,132 @@ impl Metrics { .map(|metrics| metrics.time_import_pending_now_known.start_timer()) } + pub fn on_approval_already_known(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["known"]).inc() + } + } + + pub fn on_approval_entry_not_found(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["noapprovalentry"]).inc() + } + } + + pub fn on_approval_recent_outdated(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["outdated"]).inc() + } + } + + pub fn on_approval_invalid_block(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["invalidblock"]).inc() + } + } + + pub fn on_approval_unknown_assignment(&self) { + if let Some(metrics) = &self.0 { + metrics + .approvals_received_result + .with_label_values(&["unknownassignment"]) + .inc() + } + } + + pub fn on_approval_duplicate(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["duplicate"]).inc() + } + } + + pub fn on_approval_out_of_view(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["outofview"]).inc() + } + } + + pub fn on_approval_good_known(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["goodknown"]).inc() + } + } + + pub fn on_approval_bad(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["bad"]).inc() + } + } + + pub fn on_approval_unexpected(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["unexpected"]).inc() + } + } + + pub fn on_approval_bug(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["bug"]).inc() + } + } + + pub fn on_assignment_already_known(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["known"]).inc() + } + } + + pub fn on_assignment_recent_outdated(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["outdated"]).inc() + } + } + + pub fn on_assignment_invalid_block(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["invalidblock"]).inc() + } + } + + pub fn on_assignment_duplicate(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["duplicate"]).inc() + } + } + + pub fn on_assignment_out_of_view(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["outofview"]).inc() + } + } + + pub fn on_assignment_good_known(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["goodknown"]).inc() + } + } + + pub fn on_assignment_bad(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["bad"]).inc() + } + } + + pub fn on_assignment_duplicatevoting(&self) { + if let Some(metrics) = &self.0 { + metrics + .assignments_received_result + .with_label_values(&["duplicatevoting"]) + .inc() + } + } + + pub fn on_assignment_far(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["far"]).inc() + } + } + pub(crate) fn time_awaiting_approval_voting( &self, ) -> Option { @@ -167,6 +295,26 @@ impl MetricsTrait for Metrics { ).buckets(vec![0.0001, 0.0004, 0.0016, 0.0064, 0.0256, 0.1024, 0.4096, 1.6384, 3.2768, 4.9152, 6.5536,]))?, registry, )?, + assignments_received_result: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_assignments_received_result", + "Result of a processed assignement", + ), + &["status"] + )?, + registry, + )?, + approvals_received_result: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_approvals_received_result", + "Result of a processed approval", + ), + &["status"] + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 33c38c7c5dfa309ae7dc52ec38dd541bba52a3d4..7d933e2047f26033a3c23cbee2bc82595a62f839 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -25,8 +25,8 @@ use polkadot_node_network_protocol::{ }; use polkadot_node_primitives::approval::{ v1::{ - AssignmentCert, AssignmentCertKind, IndirectAssignmentCert, VrfOutput, VrfProof, - VrfSignature, + AssignmentCert, AssignmentCertKind, IndirectAssignmentCert, IndirectSignedApprovalVote, + VrfPreOutput, VrfProof, VrfSignature, }, v2::{ AssignmentCertKindV2, AssignmentCertV2, CoreBitfield, IndirectAssignmentCertV2, @@ -133,14 +133,13 @@ fn make_gossip_topology( all_peers: &[(PeerId, AuthorityDiscoveryId)], neighbors_x: &[usize], neighbors_y: &[usize], + local_index: usize, ) -> network_bridge_event::NewGossipTopology { // This builds a grid topology which is a square matrix. // The local validator occupies the top left-hand corner. // The X peers occupy the same row and the Y peers occupy // the same column. - let local_index = 1; - assert_eq!( neighbors_x.len(), neighbors_y.len(), @@ -277,16 +276,16 @@ async fn send_message_from_peer_v2( .await; } -async fn send_message_from_peer_vstaging( +async fn send_message_from_peer_v3( virtual_overseer: &mut VirtualOverseer, peer_id: &PeerId, - msg: protocol_vstaging::ApprovalDistributionMessage, + msg: protocol_v3::ApprovalDistributionMessage, ) { overseer_send( virtual_overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( *peer_id, - Versioned::VStaging(msg), + Versioned::V3(msg), )), ) .await; @@ -298,14 +297,14 @@ fn fake_assignment_cert(block_hash: Hash, validator: ValidatorIndex) -> Indirect let mut prng = rand_core::OsRng; let keypair = schnorrkel::Keypair::generate_with(&mut prng); let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); - let out = inout.to_output(); + let preout = inout.to_preout(); IndirectAssignmentCert { block_hash, validator, cert: AssignmentCert { kind: AssignmentCertKind::RelayVRFModulo { sample: 1 }, - vrf: VrfSignature { output: VrfOutput(out), proof: VrfProof(proof) }, + vrf: VrfSignature { pre_output: VrfPreOutput(preout), proof: VrfProof(proof) }, }, } } @@ -320,14 +319,14 @@ fn fake_assignment_cert_v2( let mut prng = rand_core::OsRng; let keypair = schnorrkel::Keypair::generate_with(&mut prng); let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); - let out = inout.to_output(); + let preout = inout.to_preout(); IndirectAssignmentCertV2 { block_hash, validator, cert: AssignmentCertV2 { kind: AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield }, - vrf: VrfSignature { output: VrfOutput(out), proof: VrfProof(proof) }, + vrf: VrfSignature { pre_output: VrfPreOutput(preout), proof: VrfProof(proof) }, }, } } @@ -380,10 +379,11 @@ fn state_with_reputation_delay() -> State { /// the new peer sends us the same assignment #[test] fn try_import_the_same_assignment() { - let peer_a = PeerId::random(); - let peer_b = PeerId::random(); - let peer_c = PeerId::random(); - let peer_d = PeerId::random(); + let peers = make_peers_and_authority_ids(15); + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_c = peers.get(2).unwrap().0; + let peer_d = peers.get(4).unwrap().0; let parent_hash = Hash::repeat_byte(0xFF); let hash = Hash::repeat_byte(0xAA); @@ -394,6 +394,10 @@ fn try_import_the_same_assignment() { setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V1).await; + // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // testing. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; + // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { hash, @@ -446,7 +450,7 @@ fn try_import_the_same_assignment() { ); // setup new peer with V2 - setup_peer_with_view(overseer, &peer_d, view![], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_d, view![], ValidationVersion::V3).await; // send the same assignment from peer_d let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments); @@ -464,19 +468,24 @@ fn try_import_the_same_assignment() { /// cores. #[test] fn try_import_the_same_assignment_v2() { - let peer_a = PeerId::random(); - let peer_b = PeerId::random(); - let peer_c = PeerId::random(); - let peer_d = PeerId::random(); + let peers = make_peers_and_authority_ids(15); + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_c = peers.get(2).unwrap().0; + let peer_d = peers.get(4).unwrap().0; let parent_hash = Hash::repeat_byte(0xFF); let hash = Hash::repeat_byte(0xAA); let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // setup peers - setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::VStaging).await; - setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::VStaging).await; - setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V3).await; + + // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // testing. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -503,8 +512,8 @@ fn try_import_the_same_assignment_v2() { let cert = fake_assignment_cert_v2(hash, validator_index, core_bitfield.clone()); let assignments = vec![(cert.clone(), cores.clone().try_into().unwrap())]; - let msg = protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments.clone()); - send_message_from_peer_vstaging(overseer, &peer_a, msg).await; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(assignments.clone()); + send_message_from_peer_v3(overseer, &peer_a, msg).await; expect_reputation_change(overseer, &peer_a, COST_UNEXPECTED_MESSAGE).await; @@ -528,8 +537,8 @@ fn try_import_the_same_assignment_v2() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments) + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) )) )) => { assert_eq!(peers.len(), 2); @@ -538,11 +547,11 @@ fn try_import_the_same_assignment_v2() { ); // setup new peer - setup_peer_with_view(overseer, &peer_d, view![], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_d, view![], ValidationVersion::V3).await; // send the same assignment from peer_d - let msg = protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments); - send_message_from_peer_vstaging(overseer, &peer_d, msg).await; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(assignments); + send_message_from_peer_v3(overseer, &peer_d, msg).await; expect_reputation_change(overseer, &peer_d, COST_UNEXPECTED_MESSAGE).await; expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE).await; @@ -705,14 +714,19 @@ fn spam_attack_results_in_negative_reputation_change() { #[test] fn peer_sending_us_the_same_we_just_sent_them_is_ok() { let parent_hash = Hash::repeat_byte(0xFF); - let peer_a = PeerId::random(); let hash = Hash::repeat_byte(0xAA); + let peers = make_peers_and_authority_ids(8); + let peer_a = peers.first().unwrap().0; + let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; let peer = &peer_a; setup_peer_with_view(overseer, peer, view![], ValidationVersion::V1).await; + // Setup a topology where peer_a is neigboor to current node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0], &[2], 1)).await; + // new block `hash` with 1 candidates let meta = BlockApprovalMeta { hash, @@ -780,10 +794,12 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { } #[test] -fn import_approval_happy_path() { - let peer_a = PeerId::random(); - let peer_b = PeerId::random(); - let peer_c = PeerId::random(); +fn import_approval_happy_path_v1_v2_peers() { + let peers = make_peers_and_authority_ids(15); + + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_c = peers.get(2).unwrap().0; let parent_hash = Hash::repeat_byte(0xFF); let hash = Hash::repeat_byte(0xAA); @@ -791,7 +807,7 @@ fn import_approval_happy_path() { let overseer = &mut virtual_overseer; // setup peers with V1 and V2 protocol versions setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V1).await; - setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V3).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V1).await; // new block `hash_a` with 1 candidates @@ -806,6 +822,9 @@ fn import_approval_happy_path() { let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); overseer_send(overseer, msg).await; + // Set up a gossip topology, where a, b, and c are topology neighboors to the node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; + // import an assignment related to `hash` locally let validator_index = ValidatorIndex(0); let candidate_index = 0u32; @@ -838,8 +857,8 @@ fn import_approval_happy_path() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments) + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) )) )) => { assert_eq!(peers.len(), 1); @@ -848,14 +867,15 @@ fn import_approval_happy_path() { ); // send the an approval from peer_b - let approval = IndirectSignedApprovalVote { + let approval = IndirectSignedApprovalVoteV2 { block_hash: hash, - candidate_index, + candidate_indices: candidate_index.into(), validator: validator_index, signature: dummy_signature(), }; - let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_b, msg).await; + let msg: protocol_v3::ApprovalDistributionMessage = + protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, &peer_b, msg).await; assert_matches!( overseer_recv(overseer).await, @@ -886,6 +906,474 @@ fn import_approval_happy_path() { }); } +// Test a v2 approval that signs multiple candidate is correctly processed. +#[test] +fn import_approval_happy_path_v2() { + let peers = make_peers_and_authority_ids(15); + + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_c = peers.get(2).unwrap().0; + let parent_hash = Hash::repeat_byte(0xFF); + let hash = Hash::repeat_byte(0xAA); + + let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { + let overseer = &mut virtual_overseer; + // setup peers with V2 protocol versions + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V3).await; + + // new block `hash_a` with 1 candidates + let meta = BlockApprovalMeta { + hash, + parent_hash, + number: 1, + candidates: vec![Default::default(); 2], + slot: 1.into(), + session: 1, + }; + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + // Set up a gossip topology, where a, b, and c are topology neighboors to the node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; + + // import an assignment related to `hash` locally + let validator_index = ValidatorIndex(0); + let candidate_indices: CandidateBitfield = + vec![0 as CandidateIndex, 1 as CandidateIndex].try_into().unwrap(); + let candidate_bitfields = vec![CoreIndex(0), CoreIndex(1)].try_into().unwrap(); + let cert = fake_assignment_cert_v2(hash, validator_index, candidate_bitfields); + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_indices.clone(), + ), + ) + .await; + + // 1 peer is v2 + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert_eq!(peers.len(), 2); + assert_eq!(assignments.len(), 1); + } + ); + + // send the an approval from peer_b + let approval = IndirectSignedApprovalVoteV2 { + block_hash: hash, + candidate_indices, + validator: validator_index, + signature: dummy_signature(), + }; + let msg = protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, &peer_b, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportApproval( + vote, + tx, + )) => { + assert_eq!(vote, approval); + tx.send(ApprovalCheckResult::Accepted).unwrap(); + } + ); + + expect_reputation_change(overseer, &peer_b, BENEFIT_VALID_MESSAGE_FIRST).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Approvals(approvals) + )) + )) => { + assert_eq!(peers.len(), 1); + assert_eq!(approvals.len(), 1); + } + ); + virtual_overseer + }); +} + +// Tests that votes that cover multiple assignments candidates are correctly processed on importing +#[test] +fn multiple_assignments_covered_with_one_approval_vote() { + let peers = make_peers_and_authority_ids(15); + + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_c = peers.get(2).unwrap().0; + let peer_d = peers.get(4).unwrap().0; + let parent_hash = Hash::repeat_byte(0xFF); + let hash = Hash::repeat_byte(0xAA); + + let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { + let overseer = &mut virtual_overseer; + // setup peers with V2 protocol versions + setup_peer_with_view(overseer, &peer_a, view![hash], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_d, view![hash], ValidationVersion::V3).await; + + // new block `hash_a` with 1 candidates + let meta = BlockApprovalMeta { + hash, + parent_hash, + number: 1, + candidates: vec![Default::default(); 2], + slot: 1.into(), + session: 1, + }; + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + // Set up a gossip topology, where a, b, and c, d are topology neighboors to the node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; + + // import an assignment related to `hash` locally + let validator_index = ValidatorIndex(2); // peer_c is the originator + let candidate_indices: CandidateBitfield = + vec![0 as CandidateIndex, 1 as CandidateIndex].try_into().unwrap(); + + let core_bitfields = vec![CoreIndex(0)].try_into().unwrap(); + let cert = fake_assignment_cert_v2(hash, validator_index, core_bitfields); + + // send the candidate 0 assignment from peer_b + let assignment = IndirectAssignmentCertV2 { + block_hash: hash, + validator: validator_index, + cert: cert.cert, + }; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(vec![( + assignment, + (0 as CandidateIndex).into(), + )]); + send_message_from_peer_v3(overseer, &peer_d, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( + _, _, + tx, + )) => { + tx.send(AssignmentCheckResult::Accepted).unwrap(); + } + ); + expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE_FIRST).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert!(peers.len() >= 2); + assert!(peers.contains(&peer_a)); + assert!(peers.contains(&peer_b)); + assert_eq!(assignments.len(), 1); + } + ); + + let candidate_bitfields = vec![CoreIndex(1)].try_into().unwrap(); + let cert = fake_assignment_cert_v2(hash, validator_index, candidate_bitfields); + + // send the candidate 1 assignment from peer_c + let assignment = IndirectAssignmentCertV2 { + block_hash: hash, + validator: validator_index, + cert: cert.cert, + }; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(vec![( + assignment, + (1 as CandidateIndex).into(), + )]); + + send_message_from_peer_v3(overseer, &peer_c, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( + _, _, + tx, + )) => { + tx.send(AssignmentCheckResult::Accepted).unwrap(); + } + ); + expect_reputation_change(overseer, &peer_c, BENEFIT_VALID_MESSAGE_FIRST).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert!(peers.len() >= 2); + assert!(peers.contains(&peer_b)); + assert!(peers.contains(&peer_a)); + assert_eq!(assignments.len(), 1); + } + ); + + // send an approval from peer_b + let approval = IndirectSignedApprovalVoteV2 { + block_hash: hash, + candidate_indices, + validator: validator_index, + signature: dummy_signature(), + }; + let msg = protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, &peer_d, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportApproval( + vote, + tx, + )) => { + assert_eq!(vote, approval); + tx.send(ApprovalCheckResult::Accepted).unwrap(); + } + ); + + expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE_FIRST).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Approvals(approvals) + )) + )) => { + assert!(peers.len() >= 2); + assert!(peers.contains(&peer_b)); + assert!(peers.contains(&peer_a)); + assert_eq!(approvals.len(), 1); + } + ); + for candidate_index in 0..1 { + let (tx_distribution, rx_distribution) = oneshot::channel(); + let mut candidates_requesting_signatures = HashSet::new(); + candidates_requesting_signatures.insert((hash, candidate_index)); + overseer_send( + overseer, + ApprovalDistributionMessage::GetApprovalSignatures( + candidates_requesting_signatures, + tx_distribution, + ), + ) + .await; + let signatures = rx_distribution.await.unwrap(); + + assert_eq!(signatures.len(), 1); + for (signing_validator, signature) in signatures { + assert_eq!(validator_index, signing_validator); + assert_eq!(signature.0, hash); + assert_eq!(signature.2, approval.signature); + assert_eq!(signature.1, vec![0, 1]); + } + } + virtual_overseer + }); +} + +// Tests that votes that cover multiple assignments candidates are correctly processed when unify +// with peer view +#[test] +fn unify_with_peer_multiple_assignments_covered_with_one_approval_vote() { + let peers = make_peers_and_authority_ids(15); + + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_d = peers.get(4).unwrap().0; + let parent_hash = Hash::repeat_byte(0xFF); + let hash = Hash::repeat_byte(0xAA); + + let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { + let overseer = &mut virtual_overseer; + setup_peer_with_view(overseer, &peer_d, view![hash], ValidationVersion::V3).await; + + // new block `hash_a` with 1 candidates + let meta = BlockApprovalMeta { + hash, + parent_hash, + number: 1, + candidates: vec![Default::default(); 2], + slot: 1.into(), + session: 1, + }; + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + // Set up a gossip topology, where a, b, and c, d are topology neighboors to the node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; + + // import an assignment related to `hash` locally + let validator_index = ValidatorIndex(2); // peer_c is the originator + let candidate_indices: CandidateBitfield = + vec![0 as CandidateIndex, 1 as CandidateIndex].try_into().unwrap(); + + let core_bitfields = vec![CoreIndex(0)].try_into().unwrap(); + let cert = fake_assignment_cert_v2(hash, validator_index, core_bitfields); + + // send the candidate 0 assignment from peer_b + let assignment = IndirectAssignmentCertV2 { + block_hash: hash, + validator: validator_index, + cert: cert.cert, + }; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(vec![( + assignment, + (0 as CandidateIndex).into(), + )]); + send_message_from_peer_v3(overseer, &peer_d, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( + _, _, + tx, + )) => { + tx.send(AssignmentCheckResult::Accepted).unwrap(); + } + ); + expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE_FIRST).await; + + let candidate_bitfields = vec![CoreIndex(1)].try_into().unwrap(); + let cert = fake_assignment_cert_v2(hash, validator_index, candidate_bitfields); + + // send the candidate 1 assignment from peer_c + let assignment = IndirectAssignmentCertV2 { + block_hash: hash, + validator: validator_index, + cert: cert.cert, + }; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(vec![( + assignment, + (1 as CandidateIndex).into(), + )]); + + send_message_from_peer_v3(overseer, &peer_d, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( + _, _, + tx, + )) => { + tx.send(AssignmentCheckResult::Accepted).unwrap(); + } + ); + expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE_FIRST).await; + + // send an approval from peer_b + let approval = IndirectSignedApprovalVoteV2 { + block_hash: hash, + candidate_indices, + validator: validator_index, + signature: dummy_signature(), + }; + let msg = protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, &peer_d, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportApproval( + vote, + tx, + )) => { + assert_eq!(vote, approval); + tx.send(ApprovalCheckResult::Accepted).unwrap(); + } + ); + + expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE_FIRST).await; + + // setup peers with V2 protocol versions + setup_peer_with_view(overseer, &peer_a, view![hash], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V3).await; + let mut expected_peers_assignments = vec![peer_a, peer_b]; + let mut expected_peers_approvals = vec![peer_a, peer_b]; + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert!(peers.len() == 1); + assert!(expected_peers_assignments.contains(peers.first().unwrap())); + expected_peers_assignments.retain(|peer| peer != peers.first().unwrap()); + assert_eq!(assignments.len(), 2); + } + ); + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Approvals(approvals) + )) + )) => { + assert!(peers.len() == 1); + assert!(expected_peers_approvals.contains(peers.first().unwrap())); + expected_peers_approvals.retain(|peer| peer != peers.first().unwrap()); + assert_eq!(approvals.len(), 1); + } + ); + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert!(peers.len() == 1); + assert!(expected_peers_assignments.contains(peers.first().unwrap())); + expected_peers_assignments.retain(|peer| peer != peers.first().unwrap()); + assert_eq!(assignments.len(), 2); + } + ); + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Approvals(approvals) + )) + )) => { + assert!(peers.len() == 1); + assert!(expected_peers_approvals.contains(peers.first().unwrap())); + expected_peers_approvals.retain(|peer| peer != peers.first().unwrap()); + assert_eq!(approvals.len(), 1); + } + ); + + virtual_overseer + }); +} + #[test] fn import_approval_bad() { let peer_a = PeerId::random(); @@ -916,14 +1404,14 @@ fn import_approval_bad() { let cert = fake_assignment_cert(hash, validator_index); // send the an approval from peer_b, we don't have an assignment yet - let approval = IndirectSignedApprovalVote { + let approval = IndirectSignedApprovalVoteV2 { block_hash: hash, - candidate_index, + candidate_indices: candidate_index.into(), validator: validator_index, signature: dummy_signature(), }; - let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_b, msg).await; + let msg = protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, &peer_b, msg).await; expect_reputation_change(overseer, &peer_b, COST_UNEXPECTED_MESSAGE).await; @@ -948,8 +1436,8 @@ fn import_approval_bad() { expect_reputation_change(overseer, &peer_b, BENEFIT_VALID_MESSAGE_FIRST).await; // and try again - let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_b, msg).await; + let msg = protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, &peer_b, msg).await; assert_matches!( overseer_recv(overseer).await, @@ -1048,7 +1536,8 @@ fn update_peer_view() { let hash_b = Hash::repeat_byte(0xBB); let hash_c = Hash::repeat_byte(0xCC); let hash_d = Hash::repeat_byte(0xDD); - let peer_a = PeerId::random(); + let peers = make_peers_and_authority_ids(8); + let peer_a = peers.first().unwrap().0; let peer = &peer_a; let state = test_harness(State::default(), |mut virtual_overseer| async move { @@ -1082,6 +1571,9 @@ fn update_peer_view() { let msg = ApprovalDistributionMessage::NewBlocks(vec![meta_a, meta_b, meta_c]); overseer_send(overseer, msg).await; + // Setup a topology where peer_a is neigboor to current node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0], &[2], 1)).await; + let cert_a = fake_assignment_cert(hash_a, ValidatorIndex(0)); let cert_b = fake_assignment_cert(hash_b, ValidatorIndex(0)); @@ -1264,14 +1756,14 @@ fn import_remotely_then_locally() { assert!(overseer.recv().timeout(TIMEOUT).await.is_none(), "no message should be sent"); // send the approval remotely - let approval = IndirectSignedApprovalVote { + let approval = IndirectSignedApprovalVoteV2 { block_hash: hash, - candidate_index, + candidate_indices: candidate_index.into(), validator: validator_index, signature: dummy_signature(), }; - let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, peer, msg).await; + let msg = protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, peer, msg).await; assert_matches!( overseer_recv(overseer).await, @@ -1295,7 +1787,8 @@ fn import_remotely_then_locally() { #[test] fn sends_assignments_even_when_state_is_approved() { - let peer_a = PeerId::random(); + let peers = make_peers_and_authority_ids(8); + let peer_a = peers.first().unwrap().0; let parent_hash = Hash::repeat_byte(0xFF); let hash = Hash::repeat_byte(0xAA); let peer = &peer_a; @@ -1315,6 +1808,9 @@ fn sends_assignments_even_when_state_is_approved() { let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); overseer_send(overseer, msg).await; + // Setup a topology where peer_a is neigboor to current node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0], &[2], 1)).await; + let validator_index = ValidatorIndex(0); let candidate_index = 0u32; @@ -1336,8 +1832,11 @@ fn sends_assignments_even_when_state_is_approved() { ) .await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeApproval(approval.clone())) - .await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeApproval(approval.clone().into()), + ) + .await; // connect the peer. setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; @@ -1380,7 +1879,8 @@ fn sends_assignments_even_when_state_is_approved() { /// assignemnts. #[test] fn sends_assignments_even_when_state_is_approved_v2() { - let peer_a = PeerId::random(); + let peers = make_peers_and_authority_ids(8); + let peer_a = peers.first().unwrap().0; let parent_hash = Hash::repeat_byte(0xFF); let hash = Hash::repeat_byte(0xAA); let peer = &peer_a; @@ -1400,6 +1900,9 @@ fn sends_assignments_even_when_state_is_approved_v2() { let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); overseer_send(overseer, msg).await; + // Setup a topology where peer_a is neigboor to current node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0], &[2], 1)).await; + let validator_index = ValidatorIndex(0); let cores = vec![0, 1, 2, 3]; let candidate_bitfield: CandidateBitfield = cores.clone().try_into().unwrap(); @@ -1416,9 +1919,9 @@ fn sends_assignments_even_when_state_is_approved_v2() { // Assumes candidate index == core index. let approvals = cores .iter() - .map(|core| IndirectSignedApprovalVote { + .map(|core| IndirectSignedApprovalVoteV2 { block_hash: hash, - candidate_index: *core, + candidate_indices: (*core).into(), validator: validator_index, signature: dummy_signature(), }) @@ -1442,7 +1945,7 @@ fn sends_assignments_even_when_state_is_approved_v2() { } // connect the peer. - setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V3).await; let assignments = vec![(cert.clone(), candidate_bitfield.clone())]; @@ -1450,8 +1953,8 @@ fn sends_assignments_even_when_state_is_approved_v2() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments(sent_assignments) + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(sent_assignments) )) )) => { assert_eq!(peers, vec![*peer]); @@ -1463,14 +1966,14 @@ fn sends_assignments_even_when_state_is_approved_v2() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Approvals(sent_approvals) + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Approvals(sent_approvals) )) )) => { // Construct a hashmaps of approvals for comparison. Approval distribution reorders messages because they are kept in a // hashmap as well. - let sent_approvals = sent_approvals.into_iter().map(|approval| (approval.candidate_index, approval)).collect::>(); - let approvals = approvals.into_iter().map(|approval| (approval.candidate_index, approval)).collect::>(); + let sent_approvals = sent_approvals.into_iter().map(|approval| (approval.candidate_indices.clone(), approval)).collect::>(); + let approvals = approvals.into_iter().map(|approval| (approval.candidate_indices.clone(), approval)).collect::>(); assert_eq!(peers, vec![*peer]); assert_eq!(sent_approvals, approvals); @@ -1580,13 +2083,19 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology( + 1, + &peers, + &[0, 10, 20, 30, 40, 60, 70, 80], + &[50, 51, 52, 53, 54, 55, 56, 57], + 1, + ), ) .await; let expected_indices = [ // Both dimensions in the gossip topology - 0, 10, 20, 30, 50, 51, 52, 53, + 0, 10, 20, 30, 40, 60, 70, 80, 50, 51, 52, 53, 54, 55, 56, 57, ]; // new block `hash_a` with 1 candidates @@ -1623,8 +2132,11 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { ) .await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeApproval(approval.clone())) - .await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeApproval(approval.clone().into()), + ) + .await; let assignments = vec![(cert.clone(), candidate_index)]; let approvals = vec![approval.clone()]; @@ -1688,7 +2200,7 @@ fn propagates_assignments_along_unshared_dimension() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53], 1), ) .await; @@ -1831,13 +2343,19 @@ fn propagates_to_required_after_connect() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology( + 1, + &peers, + &[0, 10, 20, 30, 40, 60, 70, 80], + &[50, 51, 52, 53, 54, 55, 56, 57], + 1, + ), ) .await; let expected_indices = [ // Both dimensions in the gossip topology, minus omitted. - 20, 30, 52, 53, + 20, 30, 40, 60, 70, 80, 52, 53, 54, 55, 56, 57, ]; // new block `hash_a` with 1 candidates @@ -1874,8 +2392,11 @@ fn propagates_to_required_after_connect() { ) .await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeApproval(approval.clone())) - .await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeApproval(approval.clone().into()), + ) + .await; let assignments = vec![(cert.clone(), candidate_index)]; let approvals = vec![approval.clone()]; @@ -2002,53 +2523,21 @@ fn sends_to_more_peers_after_getting_topology() { ) .await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeApproval(approval.clone())) - .await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeApproval(approval.clone().into()), + ) + .await; let assignments = vec![(cert.clone(), candidate_index)]; let approvals = vec![approval.clone()]; - let mut expected_indices = vec![0, 10, 20, 30, 50, 51, 52, 53]; - let assignment_sent_peers = assert_matches!( - overseer_recv(overseer).await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( - sent_peers, - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Assignments(sent_assignments) - )) - )) => { - // Only sends to random peers. - assert_eq!(sent_peers.len(), 4); - for peer in &sent_peers { - let i = peers.iter().position(|p| peer == &p.0).unwrap(); - // Random gossip before topology can send to topology-targeted peers. - // Remove them from the expected indices so we don't expect - // them to get the messages again after the assignment. - expected_indices.retain(|&i2| i2 != i); - } - assert_eq!(sent_assignments, assignments); - sent_peers - } - ); - - assert_matches!( - overseer_recv(overseer).await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( - sent_peers, - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Approvals(sent_approvals) - )) - )) => { - // Random sampling is reused from the assignment. - assert_eq!(sent_peers, assignment_sent_peers); - assert_eq!(sent_approvals, approvals); - } - ); + let expected_indices = vec![0, 10, 20, 30, 50, 51, 52, 53]; // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53], 1), ) .await; @@ -2151,7 +2640,7 @@ fn originator_aggression_l1() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53], 1), ) .await; @@ -2164,8 +2653,11 @@ fn originator_aggression_l1() { ) .await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeApproval(approval.clone())) - .await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeApproval(approval.clone().into()), + ) + .await; let assignments = vec![(cert.clone(), candidate_index)]; let approvals = vec![approval.clone()]; @@ -2307,7 +2799,7 @@ fn non_originator_aggression_l1() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53], 1), ) .await; @@ -2412,7 +2904,7 @@ fn non_originator_aggression_l2() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53], 1), ) .await; @@ -2558,7 +3050,7 @@ fn resends_messages_periodically() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53], 1), ) .await; @@ -2681,12 +3173,13 @@ fn resends_messages_periodically() { /// Tests that peers correctly receive versioned messages. #[test] fn import_versioned_approval() { - let peer_a = PeerId::random(); - let peer_b = PeerId::random(); - let peer_c = PeerId::random(); + let peers = make_peers_and_authority_ids(15); + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_c = peers.get(2).unwrap().0; + let parent_hash = Hash::repeat_byte(0xFF); let hash = Hash::repeat_byte(0xAA); - let state = state_without_reputation_delay(); let _ = test_harness(state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2695,6 +3188,10 @@ fn import_versioned_approval() { setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V2).await; + // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // testing. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; + // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { hash, @@ -2762,7 +3259,7 @@ fn import_versioned_approval() { vote, tx, )) => { - assert_eq!(vote, approval); + assert_eq!(vote, approval.into()); tx.send(ApprovalCheckResult::Accepted).unwrap(); } ); @@ -2782,6 +3279,7 @@ fn import_versioned_approval() { assert_eq!(approvals.len(), 1); } ); + assert_matches!( overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( @@ -2821,9 +3319,9 @@ fn batch_test_round(message_count: usize) { .collect(); let approvals: Vec<_> = validators - .map(|index| IndirectSignedApprovalVote { + .map(|index| IndirectSignedApprovalVoteV2 { block_hash: Hash::zero(), - candidate_index: 0, + candidate_indices: 0u32.into(), validator: ValidatorIndex(index as u32), signature: dummy_signature(), }) @@ -2890,7 +3388,7 @@ fn batch_test_round(message_count: usize) { assert_eq!(peers.len(), 1); for (message_index, approval) in sent_approvals.iter().enumerate() { - assert_eq!(approval, &approvals[approval_index + message_index]); + assert_eq!(approval, &approvals[approval_index + message_index].clone().try_into().unwrap()); } } ); diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 91ed1026e41e8e2abed16489edbe1cd15258adce..0d52c013a33c3f22e6a8e82b75ec9e6331f9e28d 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs index 12a97a1fb5a18330be6d94c43ef6a59b14772b40..6f9ef9f6a9f83ee9b6d1e50f5d1c61145b6ce0a1 100644 --- a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs @@ -146,7 +146,9 @@ mod tests { AllMessages, AvailabilityDistributionMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_node_subsystem_test_helpers as test_helpers; - use polkadot_primitives::{CandidateHash, ExecutorParams, Hash, ValidatorIndex}; + use polkadot_primitives::{ + vstaging::NodeFeatures, CandidateHash, ExecutorParams, Hash, ValidatorIndex, + }; use test_helpers::mock::make_ferdie_keystore; use super::*; @@ -214,6 +216,12 @@ mod tests { )) => { tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(_, si_tx), + )) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + }, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests( mut reqs, _, diff --git a/polkadot/node/network/availability-distribution/src/requester/tests.rs b/polkadot/node/network/availability-distribution/src/requester/tests.rs index c4252b4e439e8a58a5ed54e2039999fb71dfde60..2f5d900b037e322e92f0e203fbb8f2a1fcf43492 100644 --- a/polkadot/node/network/availability-distribution/src/requester/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/tests.rs @@ -25,8 +25,8 @@ use polkadot_node_primitives::{BlockData, ErasureChunk, PoV}; use polkadot_node_subsystem_test_helpers::mock::new_leaf; use polkadot_node_subsystem_util::runtime::RuntimeInfo; use polkadot_primitives::{ - BlockNumber, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, ScheduledCore, - SessionIndex, SessionInfo, + vstaging::NodeFeatures, BlockNumber, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, + ScheduledCore, SessionIndex, SessionInfo, }; use sp_core::traits::SpawnNamed; @@ -125,6 +125,10 @@ fn spawn_virtual_overseer( tx.send(Ok(Some(ExecutorParams::default()))) .expect("Receiver should be alive."); }, + RuntimeApiRequest::NodeFeatures(_, tx) => { + tx.send(Ok(NodeFeatures::EMPTY)) + .expect("Receiver should be alive."); + }, RuntimeApiRequest::AvailabilityCores(tx) => { let para_id = ParaId::from(1_u32); let maybe_block_position = diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs index 101d917c0db5bbd572b86647a03c4b7b2bedf969..e95c1c3a27c2fb01e2a90b075caf6dd67f531e6d 100644 --- a/polkadot/node/network/availability-distribution/src/tests/state.rs +++ b/polkadot/node/network/availability-distribution/src/tests/state.rs @@ -46,8 +46,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - CandidateHash, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, ScheduledCore, - SessionInfo, ValidatorIndex, + vstaging::NodeFeatures, CandidateHash, CoreState, ExecutorParams, GroupIndex, Hash, + Id as ParaId, ScheduledCore, SessionInfo, ValidatorIndex, }; use test_helpers::mock::{make_ferdie_keystore, new_leaf}; @@ -264,6 +264,9 @@ impl TestState { tx.send(Ok(Some(ExecutorParams::default()))) .expect("Receiver should be alive."); }, + RuntimeApiRequest::NodeFeatures(_, si_tx) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).expect("Receiver should be alive."); + }, RuntimeApiRequest::AvailabilityCores(tx) => { gum::trace!(target: LOG_TARGET, cores= ?self.cores[&hash], hash = ?hash, "Sending out cores for hash"); tx.send(Ok(self.cores[&hash].clone())) diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 6048e6323cb45e4ef82256a94414e04bfda3c263..ec1cf475302b353a85021d637ff319dba61bb733 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -6,13 +6,17 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] futures = "0.3.21" +tokio = "1.24.2" schnellru = "0.2.1" rand = "0.8.5" fatality = "0.0.6" thiserror = "1.0.48" -async-trait = "0.1.73" +async-trait = "0.1.74" gum = { package = "tracing-gum", path = "../../gum" } polkadot-erasure-coding = { path = "../../../erasure-coding" } @@ -37,3 +41,6 @@ sc-network = { path = "../../../../substrate/client/network" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } + +[features] +subsystem-benchmarks = [] diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index 9acc48ea92e04347402edcadf5bb2479e1c2ab3f..fb8064878f4f6c02236afbede9f8e91b8f2dd594 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -65,7 +65,7 @@ mod error; mod futures_undead; mod metrics; mod task; -use metrics::Metrics; +pub use metrics::Metrics; #[cfg(test)] mod tests; @@ -105,6 +105,17 @@ pub struct AvailabilityRecoverySubsystem { req_receiver: IncomingRequestReceiver, /// Metrics for this subsystem. metrics: Metrics, + /// The type of check to perform after available data was recovered. + post_recovery_check: PostRecoveryCheck, +} + +#[derive(Clone, PartialEq, Debug)] +/// The type of check to perform after available data was recovered. +pub enum PostRecoveryCheck { + /// Reencode the data and check erasure root. For validators. + Reencode, + /// Only check the pov hash. For collators only. + PovHash, } /// Expensive erasure coding computations that we want to run on a blocking thread. @@ -344,6 +355,7 @@ async fn launch_recovery_task( metrics: &Metrics, recovery_strategies: VecDeque::Sender>>>, bypass_availability_store: bool, + post_recovery_check: PostRecoveryCheck, ) -> error::Result<()> { let candidate_hash = receipt.hash(); let params = RecoveryParams { @@ -354,6 +366,8 @@ async fn launch_recovery_task( erasure_root: receipt.descriptor.erasure_root, metrics: metrics.clone(), bypass_availability_store, + post_recovery_check, + pov_hash: receipt.descriptor.pov_hash, }; let recovery_task = RecoveryTask::new(ctx.sender().clone(), params, recovery_strategies); @@ -390,6 +404,7 @@ async fn handle_recover( erasure_task_tx: futures::channel::mpsc::Sender, recovery_strategy_kind: RecoveryStrategyKind, bypass_availability_store: bool, + post_recovery_check: PostRecoveryCheck, ) -> error::Result<()> { let candidate_hash = receipt.hash(); @@ -486,6 +501,7 @@ async fn handle_recover( metrics, recovery_strategies, bypass_availability_store, + post_recovery_check, ) .await }, @@ -527,15 +543,17 @@ async fn query_chunk_size( #[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)] impl AvailabilityRecoverySubsystem { - /// Create a new instance of `AvailabilityRecoverySubsystem` which never requests the - /// `AvailabilityStoreSubsystem` subsystem. - pub fn with_availability_store_skip( + /// Create a new instance of `AvailabilityRecoverySubsystem` suitable for collator nodes, + /// which never requests the `AvailabilityStoreSubsystem` subsystem and only checks the POV hash + /// instead of reencoding the available data. + pub fn for_collator( req_receiver: IncomingRequestReceiver, metrics: Metrics, ) -> Self { Self { recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), bypass_availability_store: true, + post_recovery_check: PostRecoveryCheck::PovHash, req_receiver, metrics, } @@ -550,6 +568,7 @@ impl AvailabilityRecoverySubsystem { Self { recovery_strategy_kind: RecoveryStrategyKind::BackersFirstAlways, bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, } @@ -563,6 +582,7 @@ impl AvailabilityRecoverySubsystem { Self { recovery_strategy_kind: RecoveryStrategyKind::ChunksAlways, bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, } @@ -577,15 +597,22 @@ impl AvailabilityRecoverySubsystem { Self { recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, } } - async fn run(self, mut ctx: Context) -> SubsystemResult<()> { + /// Starts the inner subsystem loop. + pub async fn run(self, mut ctx: Context) -> SubsystemResult<()> { let mut state = State::default(); - let Self { mut req_receiver, metrics, recovery_strategy_kind, bypass_availability_store } = - self; + let Self { + mut req_receiver, + metrics, + recovery_strategy_kind, + bypass_availability_store, + post_recovery_check, + } = self; let (erasure_task_tx, erasure_task_rx) = futures::channel::mpsc::channel(16); let mut erasure_task_rx = erasure_task_rx.fuse(); @@ -655,6 +682,7 @@ impl AvailabilityRecoverySubsystem { &mut state, signal, ).await? { + gum::debug!(target: LOG_TARGET, "subsystem concluded"); return Ok(()); } FromOrchestra::Communication { msg } => { @@ -675,7 +703,8 @@ impl AvailabilityRecoverySubsystem { &metrics, erasure_task_tx.clone(), recovery_strategy_kind.clone(), - bypass_availability_store + bypass_availability_store, + post_recovery_check.clone() ).await { gum::warn!( target: LOG_TARGET, @@ -818,12 +847,17 @@ async fn erasure_task_thread( let _ = sender.send(maybe_data); }, None => { - gum::debug!( + gum::trace!( target: LOG_TARGET, "Erasure task channel closed. Node shutting down ?", ); break }, } + + // In benchmarks this is a very hot loop not yielding at all. + // To update CPU metrics for the task we need to yield. + #[cfg(feature = "subsystem-benchmarks")] + tokio::task::yield_now().await; } } diff --git a/polkadot/node/network/availability-recovery/src/metrics.rs b/polkadot/node/network/availability-recovery/src/metrics.rs index aa7216739507668c758f3c335e00da2772311789..d82a8f9ae5faf662e05c7a8dcaf731e17756a636 100644 --- a/polkadot/node/network/availability-recovery/src/metrics.rs +++ b/polkadot/node/network/availability-recovery/src/metrics.rs @@ -29,7 +29,10 @@ struct MetricsInner { /// /// Gets incremented on each sent chunk requests. chunk_requests_issued: Counter, - + /// Total number of bytes recovered + /// + /// Gets incremented on each succesful recovery + recovered_bytes_total: Counter, /// A counter for finished chunk requests. /// /// Split by result: @@ -133,9 +136,10 @@ impl Metrics { } /// A full recovery succeeded. - pub fn on_recovery_succeeded(&self) { + pub fn on_recovery_succeeded(&self, bytes: usize) { if let Some(metrics) = &self.0 { - metrics.full_recoveries_finished.with_label_values(&["success"]).inc() + metrics.full_recoveries_finished.with_label_values(&["success"]).inc(); + metrics.recovered_bytes_total.inc_by(bytes as u64) } } @@ -171,6 +175,13 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + recovered_bytes_total: prometheus::register( + Counter::new( + "polkadot_parachain_availability_recovery_bytes_total", + "Total number of bytes recovered", + )?, + registry, + )?, chunk_requests_finished: prometheus::register( CounterVec::new( Opts::new( diff --git a/polkadot/node/network/availability-recovery/src/task.rs b/polkadot/node/network/availability-recovery/src/task.rs index d5bc2da84944a3403f9d4a3bf9d5a11b0e772f40..c300c221da5c6da8f40e8a6db3dede59ba207a58 100644 --- a/polkadot/node/network/availability-recovery/src/task.rs +++ b/polkadot/node/network/availability-recovery/src/task.rs @@ -20,9 +20,10 @@ use crate::{ futures_undead::FuturesUndead, is_chunk_valid, is_unavailable, metrics::Metrics, ErasureTask, - LOG_TARGET, + PostRecoveryCheck, LOG_TARGET, }; use futures::{channel::oneshot, SinkExt}; +use parity_scale_codec::Encode; #[cfg(not(test))] use polkadot_node_network_protocol::request_response::CHUNK_REQUEST_TIMEOUT; use polkadot_node_network_protocol::request_response::{ @@ -95,6 +96,12 @@ pub struct RecoveryParams { /// Do not request data from availability-store. Useful for collators. pub bypass_availability_store: bool, + + /// The type of check to perform after available data was recovered. + pub post_recovery_check: PostRecoveryCheck, + + /// The blake2-256 hash of the PoV. + pub pov_hash: Hash, } /// Intermediate/common data that must be passed between `RecoveryStrategy`s belonging to the @@ -426,7 +433,7 @@ where return Err(err) }, Ok(data) => { - self.params.metrics.on_recovery_succeeded(); + self.params.metrics.on_recovery_succeeded(data.encoded_size()); return Ok(data) }, } @@ -501,39 +508,48 @@ impl RecoveryStrategy match response.await { Ok(req_res::v1::AvailableDataFetchingResponse::AvailableData(data)) => { - let (reencode_tx, reencode_rx) = oneshot::channel(); - self.params - .erasure_task_tx - .send(ErasureTask::Reencode( - common_params.n_validators, - common_params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - let reencode_response = - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; - - if let Some(data) = reencode_response { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - "Received full data", - ); + let maybe_data = match common_params.post_recovery_check { + PostRecoveryCheck::Reencode => { + let (reencode_tx, reencode_rx) = oneshot::channel(); + self.params + .erasure_task_tx + .send(ErasureTask::Reencode( + common_params.n_validators, + common_params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)? + }, + PostRecoveryCheck::PovHash => + (data.pov.hash() == common_params.pov_hash).then_some(data), + }; - return Ok(data) - } else { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - ?validator_index, - "Invalid data response", - ); + match maybe_data { + Some(data) => { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + "Received full data", + ); - // it doesn't help to report the peer with req/res. - } + return Ok(data) + }, + None => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + ?validator_index, + "Invalid data response", + ); + + // it doesn't help to report the peer with req/res. + // we'll try the next backer. + }, + }; }, Ok(req_res::v1::AvailableDataFetchingResponse::NoSuchData) => {}, Err(e) => gum::debug!( @@ -647,22 +663,43 @@ impl FetchChunks { match available_data_response { Ok(data) => { - // Send request to re-encode the chunks and check merkle root. - let (reencode_tx, reencode_rx) = oneshot::channel(); - self.erasure_task_tx - .send(ErasureTask::Reencode( - common_params.n_validators, - common_params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - let reencode_response = - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; - - if let Some(data) = reencode_response { + let maybe_data = match common_params.post_recovery_check { + PostRecoveryCheck::Reencode => { + // Send request to re-encode the chunks and check merkle root. + let (reencode_tx, reencode_rx) = oneshot::channel(); + self.erasure_task_tx + .send(ErasureTask::Reencode( + common_params.n_validators, + common_params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?.or_else(|| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + "Data recovery error - root mismatch", + ); + None + }) + }, + PostRecoveryCheck::PovHash => + (data.pov.hash() == common_params.pov_hash).then_some(data).or_else(|| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + pov_hash = ?common_params.pov_hash, + "Data recovery error - PoV hash mismatch", + ); + None + }), + }; + + if let Some(data) = maybe_data { gum::trace!( target: LOG_TARGET, candidate_hash = ?common_params.candidate_hash, @@ -673,12 +710,6 @@ impl FetchChunks { Ok(data) } else { recovery_duration.map(|rd| rd.stop_and_discard()); - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - erasure_root = ?common_params.erasure_root, - "Data recovery error - root mismatch", - ); Err(RecoveryError::Invalid) } diff --git a/polkadot/node/network/availability-recovery/src/tests.rs b/polkadot/node/network/availability-recovery/src/tests.rs index 63ccf0e94f91ebad2a8cba3158d6db24159683c0..1cb52757bac92da93bebfcc72e682a9e2b2027df 100644 --- a/polkadot/node/network/availability-recovery/src/tests.rs +++ b/polkadot/node/network/availability-recovery/src/tests.rs @@ -24,12 +24,12 @@ use parity_scale_codec::Encode; use polkadot_node_network_protocol::request_response::{ self as req_res, IncomingRequest, Recipient, ReqProtocolNames, Requests, }; +use polkadot_node_subsystem_test_helpers::derive_erasure_chunks_with_proofs_and_root; use super::*; use sc_network::{config::RequestResponseConfig, IfDisconnected, OutboundFailure, RequestFailure}; -use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks}; use polkadot_node_primitives::{BlockData, PoV, Proof}; use polkadot_node_subsystem::messages::{ AllMessages, NetworkBridgeTxMessage, RuntimeApiMessage, RuntimeApiRequest, @@ -456,33 +456,6 @@ fn validator_authority_id(val_ids: &[Sr25519Keyring]) -> Vec), -) -> (Vec, Hash) { - let mut chunks: Vec> = obtain_chunks(n_validators, available_data).unwrap(); - - for (i, chunk) in chunks.iter_mut().enumerate() { - alter_chunk(i, chunk) - } - - // create proofs for each erasure chunk - let branches = branches(chunks.as_ref()); - - let root = branches.root(); - let erasure_chunks = branches - .enumerate() - .map(|(index, (proof, chunk))| ErasureChunk { - chunk: chunk.to_vec(), - index: ValidatorIndex(index as _), - proof: Proof::try_from(proof).unwrap(), - }) - .collect::>(); - - (erasure_chunks, root) -} - impl Default for TestState { fn default() -> Self { let validators = vec![ diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml index 0e61e9cf6209a4e99d2e51d05a0315b4128aff25..5c5bd875a96f82e1aee3d08ad5df73d87913c650 100644 --- a/polkadot/node/network/bitfield-distribution/Cargo.toml +++ b/polkadot/node/network/bitfield-distribution/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] always-assert = "0.1" futures = "0.3.21" diff --git a/polkadot/node/network/bitfield-distribution/src/lib.rs b/polkadot/node/network/bitfield-distribution/src/lib.rs index 9cc79aee8490705893a5065cb1fbde6124e4f61c..76baf499cad7a63263a7bbd42968e44328c29387 100644 --- a/polkadot/node/network/bitfield-distribution/src/lib.rs +++ b/polkadot/node/network/bitfield-distribution/src/lib.rs @@ -32,7 +32,7 @@ use polkadot_node_network_protocol::{ GridNeighbors, RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage, }, peer_set::{ProtocolVersion, ValidationVersion}, - v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, OurView, PeerId, + v1 as protocol_v1, v2 as protocol_v2, v3 as protocol_v3, OurView, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_subsystem::{ @@ -102,8 +102,8 @@ impl BitfieldGossipMessage { self.relay_parent, self.signed_availability.into(), )), - Some(ValidationVersion::VStaging) => - Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + Some(ValidationVersion::V3) => + Versioned::V3(protocol_v3::BitfieldDistributionMessage::Bitfield( self.relay_parent, self.signed_availability.into(), )), @@ -503,8 +503,8 @@ async fn relay_message( let v2_interested_peers = filter_by_peer_version(&interested_peers, ValidationVersion::V2.into()); - let vstaging_interested_peers = - filter_by_peer_version(&interested_peers, ValidationVersion::VStaging.into()); + let v3_interested_peers = + filter_by_peer_version(&interested_peers, ValidationVersion::V3.into()); if !v1_interested_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( @@ -522,10 +522,10 @@ async fn relay_message( .await } - if !vstaging_interested_peers.is_empty() { + if !v3_interested_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_interested_peers, - message.into_validation_protocol(ValidationVersion::VStaging.into()), + v3_interested_peers, + message.into_validation_protocol(ValidationVersion::V3.into()), )) .await } @@ -551,7 +551,7 @@ async fn process_incoming_peer_message( relay_parent, bitfield, )) | - Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + Versioned::V3(protocol_v3::BitfieldDistributionMessage::Bitfield( relay_parent, bitfield, )) => (relay_parent, bitfield), diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index 6ae765c252f2d53b80da52c7509e725b2a9d19a8..a2a4735d7a19f6726c0b5795a593412a043b4849 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -6,9 +6,12 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] always-assert = "0.1" -async-trait = "0.1.57" +async-trait = "0.1.74" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/network/bridge/src/lib.rs b/polkadot/node/network/bridge/src/lib.rs index 46d4a00faace6344cbfcfd16ea702f1dcf3ee907..ddce99d5c2a8a721164e73ea61ed989b78cdfbdc 100644 --- a/polkadot/node/network/bridge/src/lib.rs +++ b/polkadot/node/network/bridge/src/lib.rs @@ -83,6 +83,7 @@ pub(crate) enum WireMessage { ViewUpdate(View), } +#[derive(Debug)] pub(crate) struct PeerData { /// The Latest view sent by the peer. view: View, diff --git a/polkadot/node/network/bridge/src/network.rs b/polkadot/node/network/bridge/src/network.rs index c264c94cc19bf6f0d6973b418671b4b13c0bf2ac..2fcf5cec489da07e034baa90f145d63f1c3af310 100644 --- a/polkadot/node/network/bridge/src/network.rs +++ b/polkadot/node/network/bridge/src/network.rs @@ -14,25 +14,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::{collections::HashSet, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use async_trait::async_trait; -use futures::{prelude::*, stream::BoxStream}; +use parking_lot::Mutex; use parity_scale_codec::Encode; use sc_network::{ - config::parse_addr, multiaddr::Multiaddr, types::ProtocolName, Event as NetworkEvent, - IfDisconnected, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkRequest, - NetworkService, OutboundFailure, ReputationChange, RequestFailure, + config::parse_addr, multiaddr::Multiaddr, types::ProtocolName, IfDisconnected, MessageSink, + NetworkPeers, NetworkRequest, NetworkService, OutboundFailure, ReputationChange, + RequestFailure, }; use polkadot_node_network_protocol::{ - peer_set::{ - CollationVersion, PeerSet, PeerSetProtocolNames, ProtocolVersion, ValidationVersion, - }, + peer_set::{CollationVersion, PeerSet, ProtocolVersion, ValidationVersion}, request_response::{OutgoingRequest, Recipient, ReqProtocolNames, Requests}, - v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, PeerId, + v1 as protocol_v1, v2 as protocol_v2, v3 as protocol_v3, PeerId, }; use polkadot_primitives::{AuthorityDiscoveryId, Block, Hash}; @@ -44,104 +45,94 @@ const LOG_TARGET: &'static str = "parachain::network-bridge-net"; // Helper function to send a validation v1 message to a list of peers. // Messages are always sent via the main protocol, even legacy protocol messages. pub(crate) fn send_validation_message_v1( - net: &mut impl Network, peers: Vec, - peerset_protocol_names: &PeerSetProtocolNames, message: WireMessage, metrics: &Metrics, + notification_sinks: &Arc>>>, ) { gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation v1 message to peers",); send_message( - net, peers, PeerSet::Validation, ValidationVersion::V1.into(), - peerset_protocol_names, message, metrics, + notification_sinks, ); } -// Helper function to send a validation vstaging message to a list of peers. +// Helper function to send a validation v3 message to a list of peers. // Messages are always sent via the main protocol, even legacy protocol messages. -pub(crate) fn send_validation_message_vstaging( - net: &mut impl Network, +pub(crate) fn send_validation_message_v3( peers: Vec, - peerset_protocol_names: &PeerSetProtocolNames, - message: WireMessage, + message: WireMessage, metrics: &Metrics, + notification_sinks: &Arc>>>, ) { - gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation vstaging message to peers",); + gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation v3 message to peers",); send_message( - net, peers, PeerSet::Validation, - ValidationVersion::VStaging.into(), - peerset_protocol_names, + ValidationVersion::V3.into(), message, metrics, + notification_sinks, ); } // Helper function to send a validation v2 message to a list of peers. // Messages are always sent via the main protocol, even legacy protocol messages. pub(crate) fn send_validation_message_v2( - net: &mut impl Network, peers: Vec, - protocol_names: &PeerSetProtocolNames, message: WireMessage, metrics: &Metrics, + notification_sinks: &Arc>>>, ) { send_message( - net, peers, PeerSet::Validation, ValidationVersion::V2.into(), - protocol_names, message, metrics, + notification_sinks, ); } // Helper function to send a collation v1 message to a list of peers. // Messages are always sent via the main protocol, even legacy protocol messages. pub(crate) fn send_collation_message_v1( - net: &mut impl Network, peers: Vec, - peerset_protocol_names: &PeerSetProtocolNames, message: WireMessage, metrics: &Metrics, + notification_sinks: &Arc>>>, ) { send_message( - net, peers, PeerSet::Collation, CollationVersion::V1.into(), - peerset_protocol_names, message, metrics, + notification_sinks, ); } // Helper function to send a collation v2 message to a list of peers. // Messages are always sent via the main protocol, even legacy protocol messages. pub(crate) fn send_collation_message_v2( - net: &mut impl Network, peers: Vec, - peerset_protocol_names: &PeerSetProtocolNames, message: WireMessage, metrics: &Metrics, + notification_sinks: &Arc>>>, ) { send_message( - net, peers, PeerSet::Collation, CollationVersion::V2.into(), - peerset_protocol_names, message, metrics, + notification_sinks, ); } @@ -151,19 +142,19 @@ pub(crate) fn send_collation_message_v2( /// messages that are compatible with the passed peer set, as that is currently not enforced by /// this function. These are messages of type `WireMessage` parameterized on the matching type. fn send_message( - net: &mut impl Network, mut peers: Vec, peer_set: PeerSet, version: ProtocolVersion, - protocol_names: &PeerSetProtocolNames, message: M, metrics: &super::Metrics, + network_notification_sinks: &Arc>>>, ) where M: Encode + Clone, { if peers.is_empty() { return } + let message = { let encoded = message.encode(); metrics.on_notification_sent(peer_set, version, encoded.len(), peers.len()); @@ -171,13 +162,13 @@ fn send_message( encoded }; - // optimization: generate the protocol name once. - let protocol_name = protocol_names.get_name(peer_set, version); + let notification_sinks = network_notification_sinks.lock(); + gum::trace!( target: LOG_TARGET, ?peers, + ?peer_set, ?version, - ?protocol_name, ?message, "Sending message to peers", ); @@ -185,29 +176,26 @@ fn send_message( // optimization: avoid cloning the message for the last peer in the // list. The message payload can be quite large. If the underlying // network used `Bytes` this would not be necessary. + // + // peer may have gotten disconnect by the time `send_message()` is called + // at which point the the sink is not available. let last_peer = peers.pop(); - - // We always send messages on the "main" name even when a negotiated - // fallback is used. The libp2p implementation handles the fallback - // under the hood. - let protocol_name = protocol_names.get_main_name(peer_set); peers.into_iter().for_each(|peer| { - net.write_notification(peer, protocol_name.clone(), message.clone()); + if let Some(sink) = notification_sinks.get(&(peer_set, peer)) { + sink.send_sync_notification(message.clone()); + } }); + if let Some(peer) = last_peer { - net.write_notification(peer, protocol_name, message); + if let Some(sink) = notification_sinks.get(&(peer_set, peer)) { + sink.send_sync_notification(message.clone()); + } } } /// An abstraction over networking for the purposes of this subsystem. #[async_trait] pub trait Network: Clone + Send + 'static { - /// Get a stream of all events occurring on the network. This may include events unrelated - /// to the Polkadot protocol - the user of this function should filter only for events related - /// to the [`VALIDATION_PROTOCOL_NAME`](VALIDATION_PROTOCOL_NAME) - /// or [`COLLATION_PROTOCOL_NAME`](COLLATION_PROTOCOL_NAME) - fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent>; - /// Ask the network to keep a substream open with these nodes and not disconnect from them /// until removed from the protocol's peer set. /// Note that `out_peers` setting has no effect on this. @@ -239,16 +227,12 @@ pub trait Network: Clone + Send + 'static { /// Disconnect a given peer from the protocol specified without harming reputation. fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName); - /// Write a notification to a peer on the given protocol. - fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec); + /// Get peer role. + fn peer_role(&self, who: PeerId, handshake: Vec) -> Option; } #[async_trait] impl Network for Arc> { - fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> { - NetworkService::event_stream(self, "polkadot-network-bridge").boxed() - } - async fn set_reserved_peers( &mut self, protocol: ProtocolName, @@ -273,10 +257,6 @@ impl Network for Arc> { NetworkService::disconnect_peer(&**self, who, protocol); } - fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec) { - NetworkService::write_notification(&**self, who, protocol, message); - } - async fn start_request( &self, authority_discovery: &mut AD, @@ -348,6 +328,10 @@ impl Network for Arc> { if_disconnected, ); } + + fn peer_role(&self, who: PeerId, handshake: Vec) -> Option { + NetworkService::peer_role(self, who, handshake) + } } /// We assume one `peer_id` per `authority_id`. diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 06be57ead0060749227c01f18eb142c9db5375b2..11ac73259e3a178f418f4a6aa316e337acb31322 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -20,11 +20,14 @@ use super::*; use always_assert::never; use bytes::Bytes; -use futures::stream::{BoxStream, StreamExt}; use net_protocol::filter_by_peer_version; use parity_scale_codec::{Decode, DecodeAll}; +use parking_lot::Mutex; -use sc_network::Event as NetworkEvent; +use sc_network::{ + service::traits::{NotificationEvent, ValidationResult}, + MessageSink, NotificationService, +}; use sp_consensus::SyncOracle; use polkadot_node_network_protocol::{ @@ -34,8 +37,8 @@ use polkadot_node_network_protocol::{ CollationVersion, PeerSet, PeerSetProtocolNames, PerPeerSet, ProtocolVersion, ValidationVersion, }, - v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, ObservedRole, OurView, - PeerId, UnifiedReputationChange as Rep, View, + v1 as protocol_v1, v2 as protocol_v2, v3 as protocol_v3, ObservedRole, OurView, PeerId, + UnifiedReputationChange as Rep, View, }; use polkadot_node_subsystem::{ @@ -50,11 +53,6 @@ use polkadot_node_subsystem::{ use polkadot_primitives::{AuthorityDiscoveryId, BlockNumber, Hash, ValidatorIndex}; -/// Peer set info for network initialization. -/// -/// To be passed to [`FullNetworkConfiguration::add_notification_protocol`](). -pub use polkadot_node_network_protocol::peer_set::{peer_sets_info, IsAuthority}; - use std::{ collections::{hash_map, HashMap}, iter::ExactSizeIterator, @@ -67,7 +65,7 @@ use super::validator_discovery; /// Defines the `Network` trait with an implementation for an `Arc`. use crate::network::{ send_collation_message_v1, send_collation_message_v2, send_validation_message_v1, - send_validation_message_v2, send_validation_message_vstaging, Network, + send_validation_message_v2, send_validation_message_v3, Network, }; use crate::{network::get_peer_id_by_authority_id, WireMessage}; @@ -88,6 +86,9 @@ pub struct NetworkBridgeRx { shared: Shared, metrics: Metrics, peerset_protocol_names: PeerSetProtocolNames, + validation_service: Box, + collation_service: Box, + notification_sinks: Arc>>>, } impl NetworkBridgeRx { @@ -102,8 +103,18 @@ impl NetworkBridgeRx { sync_oracle: Box, metrics: Metrics, peerset_protocol_names: PeerSetProtocolNames, + mut notification_services: HashMap>, + notification_sinks: Arc>>>, ) -> Self { let shared = Shared::default(); + + let validation_service = notification_services + .remove(&PeerSet::Validation) + .expect("validation protocol was enabled so `NotificationService` must exist; qed"); + let collation_service = notification_services + .remove(&PeerSet::Collation) + .expect("collation protocol was enabled so `NotificationService` must exist; qed"); + Self { network_service, authority_discovery_service, @@ -111,6 +122,9 @@ impl NetworkBridgeRx { shared, metrics, peerset_protocol_names, + validation_service, + collation_service, + notification_sinks, } } } @@ -121,444 +135,562 @@ where Net: Network + Sync, AD: validator_discovery::AuthorityDiscovery + Clone + Sync, { - fn start(mut self, ctx: Context) -> SpawnedSubsystem { - // The stream of networking events has to be created at initialization, otherwise the - // networking might open connections before the stream of events has been grabbed. - let network_stream = self.network_service.event_stream(); - + fn start(self, ctx: Context) -> SpawnedSubsystem { // Swallow error because failure is fatal to the node and we log with more precision // within `run_network`. - let future = run_network_in(self, ctx, network_stream) + let future = run_network_in(self, ctx) .map_err(|e| SubsystemError::with_origin("network-bridge", e)) .boxed(); SpawnedSubsystem { name: "network-bridge-rx-subsystem", future } } } -async fn handle_network_messages( - mut sender: impl overseer::NetworkBridgeRxSenderTrait, - mut network_service: impl Network, - network_stream: BoxStream<'static, NetworkEvent>, - mut authority_discovery_service: AD, - metrics: Metrics, - shared: Shared, - peerset_protocol_names: PeerSetProtocolNames, -) -> Result<(), Error> -where +/// Handle notification event received over the validation protocol. +async fn handle_validation_message( + event: NotificationEvent, + network_service: &mut impl Network, + sender: &mut impl overseer::NetworkBridgeRxSenderTrait, + authority_discovery_service: &mut AD, + metrics: &Metrics, + shared: &Shared, + peerset_protocol_names: &PeerSetProtocolNames, + notification_service: &mut Box, + notification_sinks: &mut Arc>>>, +) where AD: validator_discovery::AuthorityDiscovery + Send, { - let mut network_stream = network_stream.fuse(); - loop { - match network_stream.next().await { - None => return Err(Error::EventStreamConcluded), - Some(NetworkEvent::Dht(_)) => {}, - Some(NetworkEvent::NotificationStreamOpened { - remote: peer, - protocol, - role, - negotiated_fallback, - received_handshake: _, - }) => { - let role = ObservedRole::from(role); - let (peer_set, version) = { - let (peer_set, version) = - match peerset_protocol_names.try_get_protocol(&protocol) { - None => continue, - Some(p) => p, - }; - - if let Some(fallback) = negotiated_fallback { - match peerset_protocol_names.try_get_protocol(&fallback) { - None => { + match event { + NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx, .. } => { + // only accept peers whose role can be determined + let result = network_service + .peer_role(peer, handshake) + .map_or(ValidationResult::Reject, |_| ValidationResult::Accept); + let _ = result_tx.send(result); + }, + NotificationEvent::NotificationStreamOpened { + peer, + handshake, + negotiated_fallback, + .. + } => { + let role = match network_service.peer_role(peer, handshake) { + Some(role) => ObservedRole::from(role), + None => { + gum::debug!( + target: LOG_TARGET, + ?peer, + "Failed to determine peer role for validation protocol", + ); + return + }, + }; + + let (peer_set, version) = { + let (peer_set, version) = + (PeerSet::Validation, PeerSet::Validation.get_main_version()); + + if let Some(fallback) = negotiated_fallback { + match peerset_protocol_names.try_get_protocol(&fallback) { + None => { + gum::debug!( + target: LOG_TARGET, + fallback = &*fallback, + ?peer, + peerset = ?peer_set, + "Unknown fallback", + ); + + return + }, + Some((p2, v2)) => { + if p2 != peer_set { gum::debug!( target: LOG_TARGET, fallback = &*fallback, - ?peer, - ?peer_set, - "Unknown fallback", + fallback_peerset = ?p2, + peerset = ?peer_set, + "Fallback mismatched peer-set", ); - continue - }, - Some((p2, v2)) => { - if p2 != peer_set { - gum::debug!( - target: LOG_TARGET, - fallback = &*fallback, - fallback_peerset = ?p2, - protocol = &*protocol, - peerset = ?peer_set, - "Fallback mismatched peer-set", - ); - - continue - } - - (p2, v2) - }, - } - } else { - (peer_set, version) - } - }; - - gum::debug!( - target: LOG_TARGET, - action = "PeerConnected", - peer_set = ?peer_set, - version = %version, - peer = ?peer, - role = ?role - ); - - let local_view = { - let mut shared = shared.0.lock(); - let peer_map = match peer_set { - PeerSet::Validation => &mut shared.validation_peers, - PeerSet::Collation => &mut shared.collation_peers, - }; + return + } - match peer_map.entry(peer) { - hash_map::Entry::Occupied(_) => continue, - hash_map::Entry::Vacant(vacant) => { - vacant.insert(PeerData { view: View::default(), version }); + (p2, v2) }, } + } else { + (peer_set, version) + } + }; + // store the notification sink to `notification_sinks` so both `NetworkBridgeRx` + // and `NetworkBridgeTx` can send messages to the peer. + match notification_service.message_sink(&peer) { + Some(sink) => { + notification_sinks.lock().insert((peer_set, peer), sink); + }, + None => { + gum::warn!( + target: LOG_TARGET, + peerset = ?peer_set, + version = %version, + ?peer, + ?role, + "Message sink not available for peer", + ); + return + }, + } + + gum::debug!( + target: LOG_TARGET, + action = "PeerConnected", + peer_set = ?peer_set, + version = %version, + peer = ?peer, + role = ?role + ); + + let local_view = { + let mut shared = shared.0.lock(); + let peer_map = &mut shared.validation_peers; + + match peer_map.entry(peer) { + hash_map::Entry::Occupied(_) => return, + hash_map::Entry::Vacant(vacant) => { + vacant.insert(PeerData { view: View::default(), version }); + }, + } - metrics.on_peer_connected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); + metrics.on_peer_connected(peer_set, version); + metrics.note_peer_count(peer_set, version, peer_map.len()); - shared.local_view.clone().unwrap_or(View::default()) - }; + shared.local_view.clone().unwrap_or(View::default()) + }; - let maybe_authority = - authority_discovery_service.get_authority_ids_by_peer_id(peer).await; - - match peer_set { - PeerSet::Validation => { - dispatch_validation_events_to_all( - vec![ - NetworkBridgeEvent::PeerConnected( - peer, - role, - version, - maybe_authority, - ), - NetworkBridgeEvent::PeerViewChange(peer, View::default()), - ], - &mut sender, - &metrics, - ) - .await; + let maybe_authority = + authority_discovery_service.get_authority_ids_by_peer_id(peer).await; - match ValidationVersion::try_from(version) - .expect("try_get_protocol has already checked version is known; qed") - { - ValidationVersion::V1 => send_validation_message_v1( - &mut network_service, - vec![peer], - &peerset_protocol_names, - WireMessage::::ViewUpdate( - local_view, - ), - &metrics, - ), - ValidationVersion::VStaging => send_validation_message_vstaging( - &mut network_service, - vec![peer], - &peerset_protocol_names, - WireMessage::::ViewUpdate( - local_view, - ), - &metrics, - ), - ValidationVersion::V2 => send_validation_message_v2( - &mut network_service, - vec![peer], - &peerset_protocol_names, - WireMessage::::ViewUpdate( - local_view, - ), - &metrics, - ), - } - }, - PeerSet::Collation => { - dispatch_collation_events_to_all( - vec![ - NetworkBridgeEvent::PeerConnected( - peer, - role, - version, - maybe_authority, - ), - NetworkBridgeEvent::PeerViewChange(peer, View::default()), - ], - &mut sender, - ) - .await; + dispatch_validation_events_to_all( + vec![ + NetworkBridgeEvent::PeerConnected(peer, role, version, maybe_authority), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), + ], + sender, + &metrics, + ) + .await; - match CollationVersion::try_from(version) - .expect("try_get_protocol has already checked version is known; qed") - { - CollationVersion::V1 => send_collation_message_v1( - &mut network_service, - vec![peer], - &peerset_protocol_names, - WireMessage::::ViewUpdate( - local_view, - ), - &metrics, - ), - CollationVersion::V2 => send_collation_message_v2( - &mut network_service, - vec![peer], - &peerset_protocol_names, - WireMessage::::ViewUpdate( - local_view, - ), - &metrics, - ), - } - }, + match ValidationVersion::try_from(version) + .expect("try_get_protocol has already checked version is known; qed") + { + ValidationVersion::V1 => send_validation_message_v1( + vec![peer], + WireMessage::::ViewUpdate(local_view), + metrics, + notification_sinks, + ), + ValidationVersion::V3 => send_validation_message_v3( + vec![peer], + WireMessage::::ViewUpdate(local_view), + metrics, + notification_sinks, + ), + ValidationVersion::V2 => send_validation_message_v2( + vec![peer], + WireMessage::::ViewUpdate(local_view), + metrics, + notification_sinks, + ), + } + }, + NotificationEvent::NotificationStreamClosed { peer } => { + let (peer_set, version) = (PeerSet::Validation, PeerSet::Validation.get_main_version()); + + gum::debug!( + target: LOG_TARGET, + action = "PeerDisconnected", + ?peer_set, + ?peer + ); + + let was_connected = { + let mut shared = shared.0.lock(); + let peer_map = &mut shared.validation_peers; + + let w = peer_map.remove(&peer).is_some(); + + metrics.on_peer_disconnected(peer_set, version); + metrics.note_peer_count(peer_set, version, peer_map.len()); + + w + }; + + notification_sinks.lock().remove(&(peer_set, peer)); + + if was_connected && version == peer_set.get_main_version() { + dispatch_validation_event_to_all( + NetworkBridgeEvent::PeerDisconnected(peer), + sender, + &metrics, + ) + .await; + } + }, + NotificationEvent::NotificationReceived { peer, notification } => { + let expected_versions = { + let mut versions = PerPeerSet::>::default(); + let shared = shared.0.lock(); + + if let Some(peer_data) = shared.validation_peers.get(&peer) { + versions[PeerSet::Validation] = Some(peer_data.version); } - }, - Some(NetworkEvent::NotificationStreamClosed { remote: peer, protocol }) => { - let (peer_set, version) = match peerset_protocol_names.try_get_protocol(&protocol) { - None => continue, - Some(peer_set) => peer_set, - }; - gum::debug!( + versions + }; + + gum::trace!( + target: LOG_TARGET, + action = "PeerMessage", + peerset = ?PeerSet::Validation, + ?peer, + ); + + let (events, reports) = if expected_versions[PeerSet::Validation] == + Some(ValidationVersion::V1.into()) + { + handle_peer_messages::( + peer, + PeerSet::Validation, + &mut shared.0.lock().validation_peers, + vec![notification.into()], + metrics, + ) + } else if expected_versions[PeerSet::Validation] == Some(ValidationVersion::V2.into()) { + handle_peer_messages::( + peer, + PeerSet::Validation, + &mut shared.0.lock().validation_peers, + vec![notification.into()], + metrics, + ) + } else if expected_versions[PeerSet::Validation] == Some(ValidationVersion::V3.into()) { + handle_peer_messages::( + peer, + PeerSet::Validation, + &mut shared.0.lock().validation_peers, + vec![notification.into()], + metrics, + ) + } else { + gum::warn!( target: LOG_TARGET, - action = "PeerDisconnected", - peer_set = ?peer_set, - peer = ?peer + version = ?expected_versions[PeerSet::Validation], + "Major logic bug. Peer somehow has unsupported validation protocol version." ); - let was_connected = { - let mut shared = shared.0.lock(); - let peer_map = match peer_set { - PeerSet::Validation => &mut shared.validation_peers, - PeerSet::Collation => &mut shared.collation_peers, - }; + never!( + "Only versions 1 and 2 are supported; peer set connection checked above; qed" + ); - let w = peer_map.remove(&peer).is_some(); + // If a peer somehow triggers this, we'll disconnect them + // eventually. + (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) + }; - metrics.on_peer_disconnected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); + for report in reports { + network_service.report_peer(peer, report.into()); + } - w - }; + dispatch_validation_events_to_all(events, sender, &metrics).await; + }, + } +} - if was_connected && version == peer_set.get_main_version() { - match peer_set { - PeerSet::Validation => - dispatch_validation_event_to_all( - NetworkBridgeEvent::PeerDisconnected(peer), - &mut sender, - &metrics, - ) - .await, - PeerSet::Collation => - dispatch_collation_event_to_all( - NetworkBridgeEvent::PeerDisconnected(peer), - &mut sender, - ) - .await, - } - } - }, - Some(NetworkEvent::NotificationsReceived { remote, messages }) => { - let expected_versions = { - let mut versions = PerPeerSet::>::default(); - let shared = shared.0.lock(); - if let Some(peer_data) = shared.validation_peers.get(&remote) { - versions[PeerSet::Validation] = Some(peer_data.version); - } +/// Handle notification event received over the collation protocol. +async fn handle_collation_message( + event: NotificationEvent, + network_service: &mut impl Network, + sender: &mut impl overseer::NetworkBridgeRxSenderTrait, + authority_discovery_service: &mut AD, + metrics: &Metrics, + shared: &Shared, + peerset_protocol_names: &PeerSetProtocolNames, + notification_service: &mut Box, + notification_sinks: &mut Arc>>>, +) where + AD: validator_discovery::AuthorityDiscovery + Send, +{ + match event { + NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx, .. } => { + // only accept peers whose role can be determined + let result = network_service + .peer_role(peer, handshake) + .map_or(ValidationResult::Reject, |_| ValidationResult::Accept); + let _ = result_tx.send(result); + }, + NotificationEvent::NotificationStreamOpened { + peer, + handshake, + negotiated_fallback, + .. + } => { + let role = match network_service.peer_role(peer, handshake) { + Some(role) => ObservedRole::from(role), + None => { + gum::debug!( + target: LOG_TARGET, + ?peer, + "Failed to determine peer role for validation protocol", + ); + return + }, + }; + + let (peer_set, version) = { + let (peer_set, version) = + (PeerSet::Collation, PeerSet::Collation.get_main_version()); + + if let Some(fallback) = negotiated_fallback { + match peerset_protocol_names.try_get_protocol(&fallback) { + None => { + gum::debug!( + target: LOG_TARGET, + fallback = &*fallback, + ?peer, + ?peer_set, + "Unknown fallback", + ); + + return + }, + Some((p2, v2)) => { + if p2 != peer_set { + gum::debug!( + target: LOG_TARGET, + fallback = &*fallback, + fallback_peerset = ?p2, + peerset = ?peer_set, + "Fallback mismatched peer-set", + ); - if let Some(peer_data) = shared.collation_peers.get(&remote) { - versions[PeerSet::Collation] = Some(peer_data.version); - } + return + } - versions - }; + (p2, v2) + }, + } + } else { + (peer_set, version) + } + }; + + // store the notification sink to `notification_sinks` so both `NetworkBridgeRx` + // and `NetworkBridgeTx` can send messages to the peer. + match notification_service.message_sink(&peer) { + Some(sink) => { + notification_sinks.lock().insert((peer_set, peer), sink); + }, + None => { + gum::warn!( + target: LOG_TARGET, + peer_set = ?peer_set, + version = %version, + peer = ?peer, + role = ?role, + "Message sink not available for peer", + ); + return + }, + } - // non-decoded, but version-checked validation messages. - let v_messages: Result, _> = messages - .iter() - .filter_map(|(protocol, msg_bytes)| { - // version doesn't matter because we always receive on the 'correct' - // protocol name, not the negotiated fallback. - let (peer_set, version) = - peerset_protocol_names.try_get_protocol(protocol)?; - gum::trace!( - target: LOG_TARGET, - ?peer_set, - ?protocol, - ?version, - "Received notification" - ); + gum::debug!( + target: LOG_TARGET, + action = "PeerConnected", + peer_set = ?peer_set, + version = %version, + peer = ?peer, + role = ?role + ); + + let local_view = { + let mut shared = shared.0.lock(); + let peer_map = &mut shared.collation_peers; + + match peer_map.entry(peer) { + hash_map::Entry::Occupied(_) => return, + hash_map::Entry::Vacant(vacant) => { + vacant.insert(PeerData { view: View::default(), version }); + }, + } - if peer_set == PeerSet::Validation { - if expected_versions[PeerSet::Validation].is_none() { - return Some(Err(UNCONNECTED_PEERSET_COST)) - } + metrics.on_peer_connected(peer_set, version); + metrics.note_peer_count(peer_set, version, peer_map.len()); - Some(Ok(msg_bytes.clone())) - } else { - None - } - }) - .collect(); + shared.local_view.clone().unwrap_or(View::default()) + }; - let v_messages = match v_messages { - Err(rep) => { - gum::debug!(target: LOG_TARGET, action = "ReportPeer"); - network_service.report_peer(remote, rep.into()); + let maybe_authority = + authority_discovery_service.get_authority_ids_by_peer_id(peer).await; - continue - }, - Ok(v) => v, - }; + dispatch_collation_events_to_all( + vec![ + NetworkBridgeEvent::PeerConnected(peer, role, version, maybe_authority), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), + ], + sender, + ) + .await; - // non-decoded, but version-checked collation messages. - let c_messages: Result, _> = messages - .iter() - .filter_map(|(protocol, msg_bytes)| { - // version doesn't matter because we always receive on the 'correct' - // protocol name, not the negotiated fallback. - let (peer_set, _version) = - peerset_protocol_names.try_get_protocol(protocol)?; - - if peer_set == PeerSet::Collation { - if expected_versions[PeerSet::Collation].is_none() { - return Some(Err(UNCONNECTED_PEERSET_COST)) - } + match CollationVersion::try_from(version) + .expect("try_get_protocol has already checked version is known; qed") + { + CollationVersion::V1 => send_collation_message_v1( + vec![peer], + WireMessage::::ViewUpdate(local_view), + metrics, + notification_sinks, + ), + CollationVersion::V2 => send_collation_message_v2( + vec![peer], + WireMessage::::ViewUpdate(local_view), + metrics, + notification_sinks, + ), + } + }, + NotificationEvent::NotificationStreamClosed { peer } => { + let (peer_set, version) = (PeerSet::Collation, PeerSet::Collation.get_main_version()); - Some(Ok(msg_bytes.clone())) - } else { - None - } - }) - .collect(); + gum::debug!( + target: LOG_TARGET, + action = "PeerDisconnected", + ?peer_set, + ?peer + ); - let c_messages = match c_messages { - Err(rep) => { - gum::debug!(target: LOG_TARGET, action = "ReportPeer"); - network_service.report_peer(remote, rep.into()); + let was_connected = { + let mut shared = shared.0.lock(); + let peer_map = &mut shared.collation_peers; - continue - }, - Ok(v) => v, - }; + let w = peer_map.remove(&peer).is_some(); - if v_messages.is_empty() && c_messages.is_empty() { - continue - } + metrics.on_peer_disconnected(peer_set, version); + metrics.note_peer_count(peer_set, version, peer_map.len()); - gum::trace!( - target: LOG_TARGET, - action = "PeerMessages", - peer = ?remote, - num_validation_messages = %v_messages.len(), - num_collation_messages = %c_messages.len() - ); + w + }; - if !v_messages.is_empty() { - let (events, reports) = if expected_versions[PeerSet::Validation] == - Some(ValidationVersion::V1.into()) - { - handle_peer_messages::( - remote, - PeerSet::Validation, - &mut shared.0.lock().validation_peers, - v_messages, - &metrics, - ) - } else if expected_versions[PeerSet::Validation] == - Some(ValidationVersion::V2.into()) - { - handle_peer_messages::( - remote, - PeerSet::Validation, - &mut shared.0.lock().validation_peers, - v_messages, - &metrics, - ) - } else if expected_versions[PeerSet::Validation] == - Some(ValidationVersion::VStaging.into()) - { - handle_peer_messages::( - remote, - PeerSet::Validation, - &mut shared.0.lock().validation_peers, - v_messages, - &metrics, - ) - } else { - gum::warn!( - target: LOG_TARGET, - version = ?expected_versions[PeerSet::Validation], - "Major logic bug. Peer somehow has unsupported validation protocol version." - ); + notification_sinks.lock().remove(&(peer_set, peer)); - never!("Only versions 1 and 2 are supported; peer set connection checked above; qed"); + if was_connected && version == peer_set.get_main_version() { + dispatch_collation_event_to_all(NetworkBridgeEvent::PeerDisconnected(peer), sender) + .await; + } + }, + NotificationEvent::NotificationReceived { peer, notification } => { + let expected_versions = { + let mut versions = PerPeerSet::>::default(); + let shared = shared.0.lock(); - // If a peer somehow triggers this, we'll disconnect them - // eventually. - (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) - }; + if let Some(peer_data) = shared.collation_peers.get(&peer) { + versions[PeerSet::Collation] = Some(peer_data.version); + } - for report in reports { - network_service.report_peer(remote, report.into()); - } + versions + }; - dispatch_validation_events_to_all(events, &mut sender, &metrics).await; - } + gum::trace!( + target: LOG_TARGET, + action = "PeerMessage", + peerset = ?PeerSet::Collation, + ?peer, + ); + + let (events, reports) = + if expected_versions[PeerSet::Collation] == Some(CollationVersion::V1.into()) { + handle_peer_messages::( + peer, + PeerSet::Collation, + &mut shared.0.lock().collation_peers, + vec![notification.into()], + metrics, + ) + } else if expected_versions[PeerSet::Collation] == Some(CollationVersion::V2.into()) + { + handle_peer_messages::( + peer, + PeerSet::Collation, + &mut shared.0.lock().collation_peers, + vec![notification.into()], + metrics, + ) + } else { + gum::warn!( + target: LOG_TARGET, + version = ?expected_versions[PeerSet::Collation], + "Major logic bug. Peer somehow has unsupported collation protocol version." + ); - if !c_messages.is_empty() { - let (events, reports) = if expected_versions[PeerSet::Collation] == - Some(CollationVersion::V1.into()) - { - handle_peer_messages::( - remote, - PeerSet::Collation, - &mut shared.0.lock().collation_peers, - c_messages, - &metrics, - ) - } else if expected_versions[PeerSet::Collation] == - Some(CollationVersion::V2.into()) - { - handle_peer_messages::( - remote, - PeerSet::Collation, - &mut shared.0.lock().collation_peers, - c_messages, - &metrics, - ) - } else { - gum::warn!( - target: LOG_TARGET, - version = ?expected_versions[PeerSet::Collation], - "Major logic bug. Peer somehow has unsupported collation protocol version." - ); + never!("Only versions 1 and 2 are supported; peer set connection checked above; qed"); - never!("Only versions 1 and 2 are supported; peer set connection checked above; qed"); + // If a peer somehow triggers this, we'll disconnect them + // eventually. + (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) + }; - // If a peer somehow triggers this, we'll disconnect them - // eventually. - (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) - }; + for report in reports { + network_service.report_peer(peer, report.into()); + } - for report in reports { - network_service.report_peer(remote, report.into()); - } + dispatch_collation_events_to_all(events, sender).await; + }, + } +} - dispatch_collation_events_to_all(events, &mut sender).await; - } +async fn handle_network_messages( + mut sender: impl overseer::NetworkBridgeRxSenderTrait, + mut network_service: impl Network, + mut authority_discovery_service: AD, + metrics: Metrics, + shared: Shared, + peerset_protocol_names: PeerSetProtocolNames, + mut validation_service: Box, + mut collation_service: Box, + mut notification_sinks: Arc>>>, +) -> Result<(), Error> +where + AD: validator_discovery::AuthorityDiscovery + Send, +{ + loop { + futures::select! { + event = validation_service.next_event().fuse() => match event { + Some(event) => handle_validation_message( + event, + &mut network_service, + &mut sender, + &mut authority_discovery_service, + &metrics, + &shared, + &peerset_protocol_names, + &mut validation_service, + &mut notification_sinks, + ).await, + None => return Err(Error::EventStreamConcluded), }, + event = collation_service.next_event().fuse() => match event { + Some(event) => handle_collation_message( + event, + &mut network_service, + &mut sender, + &mut authority_discovery_service, + &metrics, + &shared, + &peerset_protocol_names, + &mut collation_service, + &mut notification_sinks, + ).await, + None => return Err(Error::EventStreamConcluded), + } } } } @@ -593,17 +725,15 @@ where } #[overseer::contextbounds(NetworkBridgeRx, prefix = self::overseer)] -async fn run_incoming_orchestra_signals( +async fn run_incoming_orchestra_signals( mut ctx: Context, - mut network_service: N, mut authority_discovery_service: AD, shared: Shared, sync_oracle: Box, metrics: Metrics, - peerset_protocol_names: PeerSetProtocolNames, + notification_sinks: Arc>>>, ) -> Result<(), Error> where - N: Network, AD: validator_discovery::AuthorityDiscovery + Clone, { // This is kept sorted, descending, by block number. @@ -695,13 +825,12 @@ where mode = Mode::Active; update_our_view( - &mut network_service, &mut ctx, &live_heads, &shared, finalized_number, &metrics, - &peerset_protocol_names, + ¬ification_sinks, ); } } @@ -735,7 +864,6 @@ where async fn run_network_in( bridge: NetworkBridgeRx, mut ctx: Context, - network_stream: BoxStream<'static, NetworkEvent>, ) -> Result<(), Error> where N: Network, @@ -748,16 +876,21 @@ where sync_oracle, shared, peerset_protocol_names, + validation_service, + collation_service, + notification_sinks, } = bridge; let (task, network_event_handler) = handle_network_messages( ctx.sender().clone(), network_service.clone(), - network_stream, authority_discovery_service.clone(), metrics.clone(), shared.clone(), peerset_protocol_names.clone(), + validation_service, + collation_service, + notification_sinks.clone(), ) .remote_handle(); @@ -766,12 +899,11 @@ where let orchestra_signal_handler = run_incoming_orchestra_signals( ctx, - network_service, authority_discovery_service, shared, sync_oracle, metrics, - peerset_protocol_names, + notification_sinks, ); futures::pin_mut!(orchestra_signal_handler); @@ -791,17 +923,14 @@ fn construct_view( } #[overseer::contextbounds(NetworkBridgeRx, prefix = self::overseer)] -fn update_our_view( - net: &mut Net, +fn update_our_view( ctx: &mut Context, live_heads: &[ActivatedLeaf], shared: &Shared, finalized_number: BlockNumber, metrics: &Metrics, - peerset_protocol_names: &PeerSetProtocolNames, -) where - Net: Network, -{ + notification_sinks: &Arc>>>, +) { let new_view = construct_view(live_heads.iter().map(|v| v.hash), finalized_number); let (validation_peers, collation_peers) = { @@ -845,47 +974,42 @@ fn update_our_view( filter_by_peer_version(&validation_peers, ValidationVersion::V2.into()); let v2_collation_peers = filter_by_peer_version(&collation_peers, CollationVersion::V2.into()); - let vstaging_validation_peers = - filter_by_peer_version(&validation_peers, ValidationVersion::VStaging.into()); + let v3_validation_peers = + filter_by_peer_version(&validation_peers, ValidationVersion::V3.into()); send_validation_message_v1( - net, v1_validation_peers, - peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, + notification_sinks, ); send_collation_message_v1( - net, v1_collation_peers, - peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, + notification_sinks, ); send_validation_message_v2( - net, v2_validation_peers, - peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, + notification_sinks, ); send_collation_message_v2( - net, v2_collation_peers, - peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, + notification_sinks, ); - send_validation_message_vstaging( - net, - vstaging_validation_peers, - peerset_protocol_names, + send_validation_message_v3( + v3_validation_peers, WireMessage::ViewUpdate(new_view.clone()), metrics, + notification_sinks, ); let our_view = OurView::new( diff --git a/polkadot/node/network/bridge/src/rx/tests.rs b/polkadot/node/network/bridge/src/rx/tests.rs index f784e78a7f2154c92b9ee85f5b6a1d05ea51c82f..6847b8a7e24db5b13df2873d1b54f37506e76831 100644 --- a/polkadot/node/network/bridge/src/rx/tests.rs +++ b/polkadot/node/network/bridge/src/rx/tests.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use super::*; -use futures::{channel::oneshot, executor, stream::BoxStream}; +use futures::{channel::oneshot, executor}; use overseer::jaeger; use polkadot_node_network_protocol::{self as net_protocol, OurView}; use polkadot_node_subsystem::messages::NetworkBridgeEvent; @@ -26,10 +26,13 @@ use parking_lot::Mutex; use std::{ collections::HashSet, sync::atomic::{AtomicBool, Ordering}, - task::Poll, }; -use sc_network::{Event as NetworkEvent, IfDisconnected, ProtocolName, ReputationChange}; +use sc_network::{ + service::traits::{Direction, MessageSink, NotificationService}, + IfDisconnected, Multiaddr, ObservedRole as SubstrateObservedRole, ProtocolName, + ReputationChange, Roles, +}; use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, @@ -47,9 +50,8 @@ use polkadot_node_subsystem_test_helpers::{ mock::new_leaf, SingleItemSink, SingleItemStream, TestSubsystemContextHandle, }; use polkadot_node_subsystem_util::metered; -use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, Hash}; +use polkadot_primitives::{AuthorityDiscoveryId, Hash}; -use sc_network::Multiaddr; use sp_keyring::Sr25519Keyring; use crate::{network::Network, validator_discovery::AuthorityDiscovery}; @@ -64,10 +66,9 @@ pub enum NetworkAction { WriteNotification(PeerId, PeerSet, Vec), } -// The subsystem's view of the network - only supports a single call to `event_stream`. +// The subsystem's view of the network. #[derive(Clone)] struct TestNetwork { - net_events: Arc>>>, action_tx: Arc>>, protocol_names: Arc, } @@ -79,37 +80,42 @@ struct TestAuthorityDiscovery; // of `NetworkAction`s. struct TestNetworkHandle { action_rx: metered::UnboundedMeteredReceiver, - net_tx: SingleItemSink, - protocol_names: PeerSetProtocolNames, + validation_tx: SingleItemSink, + collation_tx: SingleItemSink, } fn new_test_network( protocol_names: PeerSetProtocolNames, -) -> (TestNetwork, TestNetworkHandle, TestAuthorityDiscovery) { - let (net_tx, net_rx) = polkadot_node_subsystem_test_helpers::single_item_sink(); +) -> ( + TestNetwork, + TestNetworkHandle, + TestAuthorityDiscovery, + Box, + Box, +) { let (action_tx, action_rx) = metered::unbounded(); + let (validation_tx, validation_rx) = polkadot_node_subsystem_test_helpers::single_item_sink(); + let (collation_tx, collation_rx) = polkadot_node_subsystem_test_helpers::single_item_sink(); + let action_tx = Arc::new(Mutex::new(action_tx)); ( TestNetwork { - net_events: Arc::new(Mutex::new(Some(net_rx))), - action_tx: Arc::new(Mutex::new(action_tx)), + action_tx: action_tx.clone(), protocol_names: Arc::new(protocol_names.clone()), }, - TestNetworkHandle { action_rx, net_tx, protocol_names }, + TestNetworkHandle { action_rx, validation_tx, collation_tx }, TestAuthorityDiscovery, + Box::new(TestNotificationService::new( + PeerSet::Validation, + action_tx.clone(), + validation_rx, + )), + Box::new(TestNotificationService::new(PeerSet::Collation, action_tx, collation_rx)), ) } #[async_trait] impl Network for TestNetwork { - fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> { - self.net_events - .lock() - .take() - .expect("Subsystem made more than one call to `event_stream`") - .boxed() - } - async fn set_reserved_peers( &mut self, _protocol: ProtocolName, @@ -143,7 +149,8 @@ impl Network for TestNetwork { } fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { - let (peer_set, _) = self.protocol_names.try_get_protocol(&protocol).unwrap(); + let (peer_set, version) = self.protocol_names.try_get_protocol(&protocol).unwrap(); + assert_eq!(version, peer_set.get_main_version()); self.action_tx .lock() @@ -151,13 +158,10 @@ impl Network for TestNetwork { .unwrap(); } - fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec) { - let (peer_set, _) = self.protocol_names.try_get_protocol(&protocol).unwrap(); - - self.action_tx - .lock() - .unbounded_send(NetworkAction::WriteNotification(who, peer_set, message)) - .unwrap(); + fn peer_role(&self, _peer_id: PeerId, handshake: Vec) -> Option { + Roles::decode_all(&mut &handshake[..]) + .ok() + .and_then(|role| Some(SubstrateObservedRole::from(role))) } } @@ -201,35 +205,85 @@ impl TestNetworkHandle { peer_set: PeerSet, role: ObservedRole, ) { - let protocol_version = ProtocolVersion::from(protocol_version); - self.send_network_event(NetworkEvent::NotificationStreamOpened { - remote: peer, - protocol: self.protocol_names.get_name(peer_set, protocol_version), - negotiated_fallback: None, - role: role.into(), - received_handshake: vec![], - }) - .await; + fn observed_role_to_handshake(role: &ObservedRole) -> Vec { + match role { + &ObservedRole::Light => Roles::LIGHT.encode(), + &ObservedRole::Authority => Roles::AUTHORITY.encode(), + &ObservedRole::Full => Roles::FULL.encode(), + } + } + + // because of how protocol negotiation works, if two peers support at least one common + // protocol, the protocol is negotiated over the main protocol (`ValidationVersion::V2`) but + // if either one of the peers used a fallback protocol for the negotiation (meaning they + // don't support the main protocol but some older version of it ), `negotiated_fallback` is + // set to that protocol. + let negotiated_fallback = match protocol_version { + ValidationVersion::V2 => None, + ValidationVersion::V1 => match peer_set { + PeerSet::Validation => Some(ProtocolName::from("/polkadot/validation/1")), + PeerSet::Collation => Some(ProtocolName::from("/polkadot/collation/1")), + }, + ValidationVersion::V3 => match peer_set { + PeerSet::Validation => Some(ProtocolName::from("/polkadot/validation/3")), + PeerSet::Collation => unreachable!(), + }, + }; + + match peer_set { + PeerSet::Validation => { + self.validation_tx + .send(NotificationEvent::NotificationStreamOpened { + peer, + direction: Direction::Inbound, + handshake: observed_role_to_handshake(&role), + negotiated_fallback, + }) + .await + .expect("subsystem concluded early"); + }, + PeerSet::Collation => { + self.collation_tx + .send(NotificationEvent::NotificationStreamOpened { + peer, + direction: Direction::Inbound, + handshake: observed_role_to_handshake(&role), + negotiated_fallback, + }) + .await + .expect("subsystem concluded early"); + }, + } } async fn disconnect_peer(&mut self, peer: PeerId, peer_set: PeerSet) { - self.send_network_event(NetworkEvent::NotificationStreamClosed { - remote: peer, - protocol: self.protocol_names.get_main_name(peer_set), - }) - .await; + match peer_set { + PeerSet::Validation => self + .validation_tx + .send(NotificationEvent::NotificationStreamClosed { peer }) + .await + .expect("subsystem concluded early"), + PeerSet::Collation => self + .collation_tx + .send(NotificationEvent::NotificationStreamClosed { peer }) + .await + .expect("subsystem concluded early"), + } } async fn peer_message(&mut self, peer: PeerId, peer_set: PeerSet, message: Vec) { - self.send_network_event(NetworkEvent::NotificationsReceived { - remote: peer, - messages: vec![(self.protocol_names.get_main_name(peer_set), message.into())], - }) - .await; - } - - async fn send_network_event(&mut self, event: NetworkEvent) { - self.net_tx.send(event).await.expect("subsystem concluded early"); + match peer_set { + PeerSet::Validation => self + .validation_tx + .send(NotificationEvent::NotificationReceived { peer, notification: message }) + .await + .expect("subsystem concluded early"), + PeerSet::Collation => self + .collation_tx + .send(NotificationEvent::NotificationReceived { peer, notification: message }) + .await + .expect("subsystem concluded early"), + } } } @@ -240,6 +294,121 @@ fn assert_network_actions_contains(actions: &[NetworkAction], action: &NetworkAc } } +struct TestNotificationService { + peer_set: PeerSet, + action_tx: Arc>>, + rx: SingleItemStream, +} + +impl std::fmt::Debug for TestNotificationService { + fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Ok(()) + } +} + +impl TestNotificationService { + pub fn new( + peer_set: PeerSet, + action_tx: Arc>>, + rx: SingleItemStream, + ) -> Self { + Self { peer_set, action_tx, rx } + } +} + +struct TestMessageSink { + peer: PeerId, + peer_set: PeerSet, + action_tx: Arc>>, +} + +impl TestMessageSink { + fn new( + peer: PeerId, + peer_set: PeerSet, + action_tx: Arc>>, + ) -> TestMessageSink { + Self { peer, peer_set, action_tx } + } +} + +#[async_trait::async_trait] +impl MessageSink for TestMessageSink { + fn send_sync_notification(&self, notification: Vec) { + self.action_tx + .lock() + .unbounded_send(NetworkAction::WriteNotification( + self.peer, + self.peer_set, + notification, + )) + .unwrap(); + } + + async fn send_async_notification( + &self, + _notification: Vec, + ) -> Result<(), sc_network::error::Error> { + unimplemented!(); + } +} + +#[async_trait::async_trait] +impl NotificationService for TestNotificationService { + /// Instruct `Notifications` to open a new substream for `peer`. + async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + /// Instruct `Notifications` to close substream for `peer`. + async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + /// Send synchronous `notification` to `peer`. + fn send_sync_notification(&self, _peer: &PeerId, _notification: Vec) { + unimplemented!(); + } + + /// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure. + async fn send_async_notification( + &self, + _peer: &PeerId, + _notification: Vec, + ) -> Result<(), sc_network::error::Error> { + unimplemented!(); + } + + /// Set handshake for the notification protocol replacing the old handshake. + async fn set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + fn try_set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + /// Get next event from the `Notifications` event stream. + async fn next_event(&mut self) -> Option { + self.rx.next().await + } + + // Clone [`NotificationService`] + fn clone(&mut self) -> Result, ()> { + unimplemented!(); + } + + /// Get protocol name. + fn protocol(&self) -> &ProtocolName { + unimplemented!(); + } + + /// Get notification sink of the peer. + fn message_sink(&self, peer: &PeerId) -> Option> { + Some(Box::new(TestMessageSink::new(*peer, self.peer_set, self.action_tx.clone()))) + } +} + #[derive(Clone)] struct TestSyncOracle { is_major_syncing: Arc, @@ -335,10 +504,11 @@ fn test_harness>( let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, fork_id); let pool = sp_core::testing::TaskExecutor::new(); - let (mut network, network_handle, discovery) = new_test_network(peerset_protocol_names.clone()); + let (network, network_handle, discovery, validation_service, collation_service) = + new_test_network(peerset_protocol_names.clone()); let (context, virtual_overseer) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); - let network_stream = network.event_stream(); + let notification_sinks = Arc::new(Mutex::new(HashMap::new())); let shared = Shared::default(); let bridge = NetworkBridgeRx { @@ -348,9 +518,12 @@ fn test_harness>( sync_oracle, shared: shared.clone(), peerset_protocol_names, + validation_service, + collation_service, + notification_sinks, }; - let network_bridge = run_network_in(bridge, context, network_stream) + let network_bridge = run_network_in(bridge, context) .map_err(|_| panic!("subsystem execution failed")) .map(|_| ()); @@ -942,8 +1115,6 @@ fn relays_collation_protocol_messages() { .await; } - // peer A gets reported for sending a collation message. - let collator_protocol_message = protocol_v1::CollatorProtocolMessage::Declare( Sr25519Keyring::Alice.public().into(), Default::default(), @@ -953,19 +1124,23 @@ fn relays_collation_protocol_messages() { let message_v1 = protocol_v1::CollationProtocol::CollatorProtocol(collator_protocol_message.clone()); - network_handle - .peer_message( - peer_a, - PeerSet::Collation, - WireMessage::ProtocolMessage(message_v1.clone()).encode(), - ) - .await; - - let actions = network_handle.next_network_actions(3).await; - assert_network_actions_contains( - &actions, - &NetworkAction::ReputationChange(peer_a, UNCONNECTED_PEERSET_COST.into()), - ); + // peer A gets reported for sending a collation message. + // NOTE: this is not possible since peer A cannot send + // a collation message if it has not opened a collation protocol + + // network_handle + // .peer_message( + // peer_a, + // PeerSet::Collation, + // WireMessage::ProtocolMessage(message_v1.clone()).encode(), + // ) + // .await; + + // let actions = network_handle.next_network_actions(3).await; + // assert_network_actions_contains( + // &actions, + // &NetworkAction::ReputationChange(peer_a, UNCONNECTED_PEERSET_COST.into()), + // ); // peer B has the message relayed. @@ -1212,7 +1387,7 @@ fn our_view_updates_decreasing_order_and_limited_to_max() { fn network_protocol_versioning_view_update() { let (oracle, handle) = make_sync_oracle(false); test_harness(Box::new(oracle), |test_harness| async move { - let TestHarness { mut network_handle, mut virtual_overseer, .. } = test_harness; + let TestHarness { mut network_handle, mut virtual_overseer, shared } = test_harness; let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect(); let peers = [ @@ -1231,12 +1406,22 @@ fn network_protocol_versioning_view_update() { handle.await_mode_switch().await; + let mut total_validation_peers = 0; + let mut total_collation_peers = 0; + for &(peer_id, peer_set, version) in &peers { network_handle .connect_peer(peer_id, version, peer_set, ObservedRole::Full) .await; + + match peer_set { + PeerSet::Validation => total_validation_peers += 1, + PeerSet::Collation => total_collation_peers += 1, + } } + await_peer_connections(&shared, total_validation_peers, total_collation_peers).await; + let view = view![head]; let actions = network_handle.next_network_actions(4).await; @@ -1248,8 +1433,8 @@ fn network_protocol_versioning_view_update() { ValidationVersion::V2 => WireMessage::::ViewUpdate(view.clone()) .encode(), - ValidationVersion::VStaging => - WireMessage::::ViewUpdate(view.clone()) + ValidationVersion::V3 => + WireMessage::::ViewUpdate(view.clone()) .encode(), }; assert_network_actions_contains( @@ -1264,15 +1449,19 @@ fn network_protocol_versioning_view_update() { #[test] fn network_protocol_versioning_subsystem_msg() { + use polkadot_primitives::CandidateHash; + use std::task::Poll; + let (oracle, _handle) = make_sync_oracle(false); test_harness(Box::new(oracle), |test_harness| async move { - let TestHarness { mut network_handle, mut virtual_overseer, .. } = test_harness; + let TestHarness { mut network_handle, mut virtual_overseer, shared } = test_harness; let peer = PeerId::random(); network_handle .connect_peer(peer, ValidationVersion::V2, PeerSet::Validation, ObservedRole::Full) .await; + await_peer_connections(&shared, 1, 0).await; // bridge will inform about all connected peers. { @@ -1280,7 +1469,7 @@ fn network_protocol_versioning_subsystem_msg() { NetworkBridgeEvent::PeerConnected( peer, ObservedRole::Full, - ValidationVersion::V2.into(), + ValidationVersion::V3.into(), None, ), &mut virtual_overseer, @@ -1295,9 +1484,9 @@ fn network_protocol_versioning_subsystem_msg() { } let approval_distribution_message = - protocol_v2::ApprovalDistributionMessage::Approvals(Vec::new()); + protocol_v3::ApprovalDistributionMessage::Approvals(Vec::new()); - let msg = protocol_v2::ValidationProtocol::ApprovalDistribution( + let msg = protocol_v3::ValidationProtocol::ApprovalDistribution( approval_distribution_message.clone(), ); @@ -1313,7 +1502,7 @@ fn network_protocol_versioning_subsystem_msg() { virtual_overseer.recv().await, AllMessages::ApprovalDistribution( ApprovalDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage(p, Versioned::V2(m)) + NetworkBridgeEvent::PeerMessage(p, Versioned::V3(m)) ) ) => { assert_eq!(p, peer); @@ -1347,7 +1536,7 @@ fn network_protocol_versioning_subsystem_msg() { virtual_overseer.recv().await, AllMessages::StatementDistribution( StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage(p, Versioned::V2(m)) + NetworkBridgeEvent::PeerMessage(p, Versioned::V3(m)) ) ) => { assert_eq!(p, peer); diff --git a/polkadot/node/network/bridge/src/tx/mod.rs b/polkadot/node/network/bridge/src/tx/mod.rs index 5f222ad59c75e229f8d8df0c0fb0a080aa2e1505..d5be6f01c33737a2b09bd39f40640d29c99ca94c 100644 --- a/polkadot/node/network/bridge/src/tx/mod.rs +++ b/polkadot/node/network/bridge/src/tx/mod.rs @@ -27,12 +27,8 @@ use polkadot_node_subsystem::{ overseer, FromOrchestra, OverseerSignal, SpawnedSubsystem, }; -/// Peer set info for network initialization. -/// -/// To be passed to [`FullNetworkConfiguration::add_notification_protocol`](). -pub use polkadot_node_network_protocol::peer_set::{peer_sets_info, IsAuthority}; use polkadot_node_network_protocol::request_response::Requests; -use sc_network::ReputationChange; +use sc_network::{MessageSink, ReputationChange}; use crate::validator_discovery; @@ -41,7 +37,7 @@ use crate::validator_discovery; /// Defines the `Network` trait with an implementation for an `Arc`. use crate::network::{ send_collation_message_v1, send_collation_message_v2, send_validation_message_v1, - send_validation_message_v2, send_validation_message_vstaging, Network, + send_validation_message_v2, send_validation_message_v3, Network, }; use crate::metrics::Metrics; @@ -60,6 +56,7 @@ pub struct NetworkBridgeTx { metrics: Metrics, req_protocol_names: ReqProtocolNames, peerset_protocol_names: PeerSetProtocolNames, + notification_sinks: Arc>>>, } impl NetworkBridgeTx { @@ -74,6 +71,7 @@ impl NetworkBridgeTx { metrics: Metrics, req_protocol_names: ReqProtocolNames, peerset_protocol_names: PeerSetProtocolNames, + notification_sinks: Arc>>>, ) -> Self { Self { network_service, @@ -81,6 +79,7 @@ impl NetworkBridgeTx { metrics, req_protocol_names, peerset_protocol_names, + notification_sinks, } } } @@ -107,6 +106,7 @@ async fn handle_subsystem_messages( metrics: Metrics, req_protocol_names: ReqProtocolNames, peerset_protocol_names: PeerSetProtocolNames, + notification_sinks: Arc>>>, ) -> Result<(), Error> where N: Network, @@ -130,6 +130,7 @@ where &metrics, &req_protocol_names, &peerset_protocol_names, + ¬ification_sinks, ) .await; }, @@ -140,13 +141,14 @@ where #[overseer::contextbounds(NetworkBridgeTx, prefix = self::overseer)] async fn handle_incoming_subsystem_communication( _ctx: &mut Context, - mut network_service: N, + network_service: N, validator_discovery: &mut validator_discovery::Service, mut authority_discovery_service: AD, msg: NetworkBridgeTxMessage, metrics: &Metrics, req_protocol_names: &ReqProtocolNames, peerset_protocol_names: &PeerSetProtocolNames, + notification_sinks: &Arc>>>, ) -> (N, AD) where N: Network, @@ -194,25 +196,22 @@ where match msg { Versioned::V1(msg) => send_validation_message_v1( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), - Versioned::VStaging(msg) => send_validation_message_vstaging( - &mut network_service, + Versioned::V3(msg) => send_validation_message_v3( peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), Versioned::V2(msg) => send_validation_message_v2( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), } }, @@ -227,25 +226,22 @@ where for (peers, msg) in msgs { match msg { Versioned::V1(msg) => send_validation_message_v1( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), - Versioned::VStaging(msg) => send_validation_message_vstaging( - &mut network_service, + Versioned::V3(msg) => send_validation_message_v3( peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), Versioned::V2(msg) => send_validation_message_v2( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), } } @@ -259,18 +255,16 @@ where match msg { Versioned::V1(msg) => send_collation_message_v1( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), - Versioned::V2(msg) | Versioned::VStaging(msg) => send_collation_message_v2( - &mut network_service, + Versioned::V2(msg) | Versioned::V3(msg) => send_collation_message_v2( peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), } }, @@ -284,18 +278,16 @@ where for (peers, msg) in msgs { match msg { Versioned::V1(msg) => send_collation_message_v1( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), - Versioned::V2(msg) | Versioned::VStaging(msg) => send_collation_message_v2( - &mut network_service, + Versioned::V2(msg) | Versioned::V3(msg) => send_collation_message_v2( peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), } } @@ -389,6 +381,7 @@ where metrics, req_protocol_names, peerset_protocol_names, + notification_sinks, } = bridge; handle_subsystem_messages( @@ -398,6 +391,7 @@ where metrics, req_protocol_names, peerset_protocol_names, + notification_sinks, ) .await?; diff --git a/polkadot/node/network/bridge/src/tx/tests.rs b/polkadot/node/network/bridge/src/tx/tests.rs index 1a2d9a7a4240cb64d545f3657804ada5155463bf..c3cf0f322f681b5cf2bca5c7a8bbadd53553a90d 100644 --- a/polkadot/node/network/bridge/src/tx/tests.rs +++ b/polkadot/node/network/bridge/src/tx/tests.rs @@ -15,15 +15,18 @@ // along with Polkadot. If not, see . use super::*; -use futures::{executor, stream::BoxStream}; +use futures::executor; use polkadot_node_subsystem_util::TimeoutExt; use async_trait::async_trait; use parking_lot::Mutex; use std::collections::HashSet; -use sc_network::{Event as NetworkEvent, IfDisconnected, ProtocolName, ReputationChange}; +use sc_network::{ + IfDisconnected, ObservedRole as SubstrateObservedRole, ProtocolName, ReputationChange, Roles, +}; +use parity_scale_codec::DecodeAll; use polkadot_node_network_protocol::{ peer_set::{PeerSetProtocolNames, ValidationVersion}, request_response::{outgoing::Requests, ReqProtocolNames}, @@ -51,10 +54,9 @@ pub enum NetworkAction { WriteNotification(PeerId, PeerSet, Vec), } -// The subsystem's view of the network - only supports a single call to `event_stream`. +// The subsystem's view of the network. #[derive(Clone)] struct TestNetwork { - net_events: Arc>>>, action_tx: Arc>>, peerset_protocol_names: Arc, } @@ -66,37 +68,78 @@ struct TestAuthorityDiscovery; // of `NetworkAction`s. struct TestNetworkHandle { action_rx: metered::UnboundedMeteredReceiver, - net_tx: metered::MeteredSender, - peerset_protocol_names: PeerSetProtocolNames, + _peerset_protocol_names: PeerSetProtocolNames, + notification_sinks: Arc>>>, + action_tx: Arc>>, +} + +struct TestMessageSink { + peer: PeerId, + peer_set: PeerSet, + action_tx: Arc>>, +} + +impl TestMessageSink { + fn new( + peer: PeerId, + peer_set: PeerSet, + action_tx: Arc>>, + ) -> TestMessageSink { + Self { peer, peer_set, action_tx } + } +} + +#[async_trait::async_trait] +impl MessageSink for TestMessageSink { + fn send_sync_notification(&self, notification: Vec) { + self.action_tx + .lock() + .unbounded_send(NetworkAction::WriteNotification( + self.peer, + self.peer_set, + notification, + )) + .unwrap(); + } + + async fn send_async_notification( + &self, + _notification: Vec, + ) -> Result<(), sc_network::error::Error> { + unimplemented!(); + } } fn new_test_network( peerset_protocol_names: PeerSetProtocolNames, -) -> (TestNetwork, TestNetworkHandle, TestAuthorityDiscovery) { - let (net_tx, net_rx) = metered::channel(10); +) -> ( + TestNetwork, + TestNetworkHandle, + TestAuthorityDiscovery, + Arc>>>, +) { let (action_tx, action_rx) = metered::unbounded(); + let notification_sinks = Arc::new(Mutex::new(HashMap::new())); + let action_tx = Arc::new(Mutex::new(action_tx)); ( TestNetwork { - net_events: Arc::new(Mutex::new(Some(net_rx))), - action_tx: Arc::new(Mutex::new(action_tx)), + action_tx: action_tx.clone(), peerset_protocol_names: Arc::new(peerset_protocol_names.clone()), }, - TestNetworkHandle { action_rx, net_tx, peerset_protocol_names }, + TestNetworkHandle { + action_rx, + _peerset_protocol_names: peerset_protocol_names, + action_tx, + notification_sinks: notification_sinks.clone(), + }, TestAuthorityDiscovery, + notification_sinks, ) } #[async_trait] impl Network for TestNetwork { - fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> { - self.net_events - .lock() - .take() - .expect("Subsystem made more than one call to `event_stream`") - .boxed() - } - async fn set_reserved_peers( &mut self, _protocol: ProtocolName, @@ -130,7 +173,8 @@ impl Network for TestNetwork { } fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { - let (peer_set, _) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); + let (peer_set, version) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); + assert_eq!(version, peer_set.get_main_version()); self.action_tx .lock() @@ -138,13 +182,10 @@ impl Network for TestNetwork { .unwrap(); } - fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec) { - let (peer_set, _) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); - - self.action_tx - .lock() - .unbounded_send(NetworkAction::WriteNotification(who, peer_set, message)) - .unwrap(); + fn peer_role(&self, _peer_id: PeerId, handshake: Vec) -> Option { + Roles::decode_all(&mut &handshake[..]) + .ok() + .and_then(|role| Some(SubstrateObservedRole::from(role))) } } @@ -174,23 +215,14 @@ impl TestNetworkHandle { async fn connect_peer( &mut self, peer: PeerId, - protocol_version: ValidationVersion, + _protocol_version: ValidationVersion, peer_set: PeerSet, - role: ObservedRole, + _role: ObservedRole, ) { - let protocol_version = ProtocolVersion::from(protocol_version); - self.send_network_event(NetworkEvent::NotificationStreamOpened { - remote: peer, - protocol: self.peerset_protocol_names.get_name(peer_set, protocol_version), - negotiated_fallback: None, - role: role.into(), - received_handshake: vec![], - }) - .await; - } - - async fn send_network_event(&mut self, event: NetworkEvent) { - self.net_tx.send(event).await.expect("subsystem concluded early"); + self.notification_sinks.lock().insert( + (peer_set, peer), + Box::new(TestMessageSink::new(peer, peer_set, self.action_tx.clone())), + ); } } @@ -208,7 +240,8 @@ fn test_harness>(test: impl FnOnce(TestHarne let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, fork_id); let pool = sp_core::testing::TaskExecutor::new(); - let (network, network_handle, discovery) = new_test_network(peerset_protocol_names.clone()); + let (network, network_handle, discovery, network_notification_sinks) = + new_test_network(peerset_protocol_names.clone()); let (context, virtual_overseer) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); @@ -219,6 +252,7 @@ fn test_harness>(test: impl FnOnce(TestHarne Metrics(None), req_protocol_names, peerset_protocol_names, + network_notification_sinks, ); let network_bridge_out_fut = run_network_out(bridge_out, context) @@ -364,9 +398,9 @@ fn network_protocol_versioning_send() { approval_distribution_message.clone(), ); - // Note that bridge doesn't ensure neither peer's protocol version - // or peer set match the message. - let receivers = vec![peer_ids[0], peer_ids[3]]; + // only `peer_ids[0]` opened the validation protocol v2 + // so only they will be sent a notification + let receivers = vec![peer_ids[0]]; virtual_overseer .send(FromOrchestra::Communication { msg: NetworkBridgeTxMessage::SendValidationMessage( @@ -406,7 +440,9 @@ fn network_protocol_versioning_send() { let msg = protocol_v2::CollationProtocol::CollatorProtocol(collator_protocol_message.clone()); - let receivers = vec![peer_ids[1], peer_ids[2]]; + // only `peer_ids[0]` opened the collation protocol v2 + // so only they will be sent a notification + let receivers = vec![peer_ids[1]]; virtual_overseer .send(FromOrchestra::Communication { diff --git a/polkadot/node/network/bridge/src/validator_discovery.rs b/polkadot/node/network/bridge/src/validator_discovery.rs index 86e861fbc5b5c1cc4159a86c47fcf88cc6a6953d..b11af8a8a089c4aa64b26312562636b039068a78 100644 --- a/polkadot/node/network/bridge/src/validator_discovery.rs +++ b/polkadot/node/network/bridge/src/validator_discovery.rs @@ -169,13 +169,12 @@ mod tests { use crate::network::Network; use async_trait::async_trait; - use futures::stream::BoxStream; use polkadot_node_network_protocol::{ request_response::{outgoing::Requests, ReqProtocolNames}, PeerId, }; use polkadot_primitives::Hash; - use sc_network::{Event as NetworkEvent, IfDisconnected, ProtocolName, ReputationChange}; + use sc_network::{IfDisconnected, ProtocolName, ReputationChange}; use sp_keyring::Sr25519Keyring; use std::collections::{HashMap, HashSet}; @@ -224,10 +223,6 @@ mod tests { #[async_trait] impl Network for TestNetwork { - fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> { - panic!() - } - async fn set_reserved_peers( &mut self, _protocol: ProtocolName, @@ -263,7 +258,11 @@ mod tests { panic!() } - fn write_notification(&self, _: PeerId, _: ProtocolName, _: Vec) { + fn peer_role( + &self, + _peer_id: PeerId, + _handshake: Vec, + ) -> Option { panic!() } } diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index 367a97f35d994c566564247cf093e73e15ec4396..bcf4f74132fc0dd0ac85c84e8655abc37d5218ac 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] bitvec = { version = "1.0.1", default-features = false, features = ["alloc"] } futures = "0.3.21" diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index b3a396e1be3488e34424681132e1ffde11097717..8fb0bb2154445f99000f329ff9aeb30e29c02092 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -882,7 +882,7 @@ async fn handle_incoming_peer_message( match msg { Versioned::V1(V1::Declare(..)) | Versioned::V2(V2::Declare(..)) | - Versioned::VStaging(V2::Declare(..)) => { + Versioned::V3(V2::Declare(..)) => { gum::trace!( target: LOG_TARGET, ?origin, @@ -895,7 +895,7 @@ async fn handle_incoming_peer_message( }, Versioned::V1(V1::AdvertiseCollation(_)) | Versioned::V2(V2::AdvertiseCollation { .. }) | - Versioned::VStaging(V2::AdvertiseCollation { .. }) => { + Versioned::V3(V2::AdvertiseCollation { .. }) => { gum::trace!( target: LOG_TARGET, ?origin, @@ -911,7 +911,7 @@ async fn handle_incoming_peer_message( }, Versioned::V1(V1::CollationSeconded(relay_parent, statement)) | Versioned::V2(V2::CollationSeconded(relay_parent, statement)) | - Versioned::VStaging(V2::CollationSeconded(relay_parent, statement)) => { + Versioned::V3(V2::CollationSeconded(relay_parent, statement)) => { if !matches!(statement.unchecked_payload(), Statement::Seconded(_)) { gum::warn!( target: LOG_TARGET, diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index 7dd2287dab684debb39f68f94098c1b225024285..1b1194c72706703283d0002a149a59d950d33975 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -45,8 +45,9 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt}; use polkadot_primitives::{ - AuthorityDiscoveryId, CollatorPair, ExecutorParams, GroupIndex, GroupRotationInfo, IndexedVec, - ScheduledCore, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, + vstaging::NodeFeatures, AuthorityDiscoveryId, CollatorPair, ExecutorParams, GroupIndex, + GroupRotationInfo, IndexedVec, ScheduledCore, SessionIndex, SessionInfo, ValidatorId, + ValidatorIndex, }; use polkadot_primitives_test_helpers::TestCandidateBuilder; use test_helpers::mock::new_leaf; @@ -406,7 +407,12 @@ async fn distribute_collation_with_receipt( tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); }, - + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(_, si_tx), + )) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + }, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _relay_parent, RuntimeApiRequest::ValidatorGroups(tx), diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 20b3b9ea1d265ba242e6dd6dabf5999ae46b286c..48ad3c711a6db9c5e9e09c26d1a6290ffbed60f4 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -777,7 +777,7 @@ async fn process_incoming_peer_message( match msg { Versioned::V1(V1::Declare(collator_id, para_id, signature)) | Versioned::V2(V2::Declare(collator_id, para_id, signature)) | - Versioned::VStaging(V2::Declare(collator_id, para_id, signature)) => { + Versioned::V3(V2::Declare(collator_id, para_id, signature)) => { if collator_peer_id(&state.peer_data, &collator_id).is_some() { modify_reputation( &mut state.reputation, @@ -894,7 +894,7 @@ async fn process_incoming_peer_message( candidate_hash, parent_head_data_hash, }) | - Versioned::VStaging(V2::AdvertiseCollation { + Versioned::V3(V2::AdvertiseCollation { relay_parent, candidate_hash, parent_head_data_hash, @@ -923,7 +923,7 @@ async fn process_incoming_peer_message( }, Versioned::V1(V1::CollationSeconded(..)) | Versioned::V2(V2::CollationSeconded(..)) | - Versioned::VStaging(V2::CollationSeconded(..)) => { + Versioned::V3(V2::CollationSeconded(..)) => { gum::warn!( target: LOG_TARGET, peer_id = ?origin, diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index f4ea358c41b513c29c24e477bd9ba009f6c55b9a..6b494c65336de7806019cd2ea4465e93c818f212 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] futures = "0.3.21" futures-timer = "3.0.2" @@ -28,7 +31,7 @@ indexmap = "1.9.1" [dev-dependencies] async-channel = "1.8.0" -async-trait = "0.1.57" +async-trait = "0.1.74" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-tracing = { path = "../../../../substrate/primitives/tracing" } diff --git a/polkadot/node/network/dispute-distribution/src/tests/mod.rs b/polkadot/node/network/dispute-distribution/src/tests/mod.rs index 96f045cbf769219e737b4366fb0c96201c9c3b6e..a3520bf35f8023e8931a33f29f0bb9e83e72a62c 100644 --- a/polkadot/node/network/dispute-distribution/src/tests/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/tests/mod.rs @@ -57,8 +57,8 @@ use polkadot_node_subsystem_test_helpers::{ subsystem_test_harness, TestSubsystemContextHandle, }; use polkadot_primitives::{ - AuthorityDiscoveryId, CandidateHash, CandidateReceipt, ExecutorParams, Hash, SessionIndex, - SessionInfo, + vstaging::NodeFeatures, AuthorityDiscoveryId, CandidateHash, CandidateReceipt, ExecutorParams, + Hash, SessionIndex, SessionInfo, }; use self::mock::{ @@ -646,6 +646,16 @@ async fn nested_network_dispute_request<'a, F, O>( }, unexpected => panic!("Unexpected message {:?}", unexpected), } + + match handle.recv().await { + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(_, si_tx), + )) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + }, + unexpected => panic!("Unexpected message {:?}", unexpected), + } } // Import should get initiated: @@ -773,6 +783,14 @@ async fn activate_leaf( tx.send(Ok(Some(ExecutorParams::default()))).expect("Receiver should stay alive."); } ); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); } assert_matches!( diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index a9f68261addf4b0fd349bc212287009e1936ab5c..9ad7292b0fdcf979897663bff03e36eaa36acb4b 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } @@ -33,5 +36,6 @@ sp-authority-discovery = { path = "../../../../substrate/primitives/authority-di polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } assert_matches = "1.4.0" -async-trait = "0.1.57" +async-trait = "0.1.74" lazy_static = "1.4.0" +quickcheck = "1.0.3" diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs index 674c86e5ce27a122253a3363364375af5bd0f8dc..22417795d5ea55ff65d35ffb7d0f4b960582c570 100644 --- a/polkadot/node/network/gossip-support/src/lib.rs +++ b/polkadot/node/network/gossip-support/src/lib.rs @@ -32,7 +32,7 @@ use std::{ use futures::{channel::oneshot, select, FutureExt as _}; use futures_timer::Delay; -use rand::{seq::SliceRandom as _, SeedableRng}; +use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; use sc_network::{config::parse_addr, Multiaddr}; @@ -477,7 +477,7 @@ where match message { Versioned::V1(m) => match m {}, Versioned::V2(m) => match m {}, - Versioned::VStaging(m) => match m {}, + Versioned::V3(m) => match m {}, } }, } @@ -607,7 +607,7 @@ async fn update_gossip_topology( .map(|(i, a)| (a.clone(), ValidatorIndex(i as _))) .collect(); - canonical_shuffling.shuffle(&mut rng); + fisher_yates_shuffle(&mut rng, &mut canonical_shuffling[..]); for (i, (_, validator_index)) in canonical_shuffling.iter().enumerate() { shuffled_indices[validator_index.0 as usize] = i; } @@ -627,6 +627,16 @@ async fn update_gossip_topology( Ok(()) } +// Durstenfeld algorithm for the Fisher-Yates shuffle +// https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#The_modern_algorithm +fn fisher_yates_shuffle(rng: &mut R, items: &mut [T]) { + for i in (1..items.len()).rev() { + // invariant: elements with index > i have been locked in place. + let index = rng.gen_range(0u32..(i as u32 + 1)); + items.swap(i, index as usize); + } +} + #[overseer::subsystem(GossipSupport, error = SubsystemError, prefix = self::overseer)] impl GossipSupport where diff --git a/polkadot/node/network/gossip-support/src/tests.rs b/polkadot/node/network/gossip-support/src/tests.rs index 2e909bb0a67433e667ae87d679005091fe505810..e5ee101c31d857b2dbd540596649ddaf9b826bd5 100644 --- a/polkadot/node/network/gossip-support/src/tests.rs +++ b/polkadot/node/network/gossip-support/src/tests.rs @@ -22,6 +22,8 @@ use assert_matches::assert_matches; use async_trait::async_trait; use futures::{executor, future, Future}; use lazy_static::lazy_static; +use quickcheck::quickcheck; +use rand::seq::SliceRandom as _; use sc_network::multiaddr::Protocol; use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; @@ -710,3 +712,23 @@ fn issues_a_connection_request_when_last_request_was_mostly_unresolved() { assert_eq!(state.last_session_index, Some(1)); assert!(state.last_failure.is_none()); } + +// note: this test was added at a time where the default `rand::SliceRandom::shuffle` +// function was used to shuffle authorities for the topology and ensures backwards compatibility. +// +// in the same commit, an explicit fisher-yates implementation was added in place of the unspecified +// behavior of that function. If this test begins to fail at some point in the future, it can simply +// be removed as the desired behavior has been preserved. +quickcheck! { + fn rng_shuffle_equals_fisher_yates(x: Vec, seed_base: u8) -> bool { + let mut rng1: ChaCha20Rng = SeedableRng::from_seed([seed_base; 32]); + let mut rng2: ChaCha20Rng = SeedableRng::from_seed([seed_base; 32]); + + let mut data1 = x.clone(); + let mut data2 = x; + + data1.shuffle(&mut rng1); + crate::fisher_yates_shuffle(&mut rng2, &mut data2[..]); + data1 == data2 + } +} diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index c33b9eae3252606e8d2fcbd954a0e180f4a47acb..e683c662fbe78085c192786a2c177e93f8418fae 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -6,9 +6,12 @@ edition.workspace = true license.workspace = true description = "Primitives types for the Node-side" +[lints] +workspace = true + [dependencies] async-channel = "1.8.0" -async-trait = "0.1.57" +async-trait = "0.1.74" hex = "0.4.3" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } @@ -27,6 +30,3 @@ bitvec = "1" [dev-dependencies] rand_chacha = "0.3.1" - -[features] -network-protocol-staging = [] diff --git a/polkadot/node/network/protocol/src/grid_topology.rs b/polkadot/node/network/protocol/src/grid_topology.rs index 99dd513c4d7909012162a075f6bbc7ce041377fb..8bd9adbc17c1089bdaab9d16b271b9ae4bc8e708 100644 --- a/polkadot/node/network/protocol/src/grid_topology.rs +++ b/polkadot/node/network/protocol/src/grid_topology.rs @@ -73,12 +73,20 @@ pub struct SessionGridTopology { shuffled_indices: Vec, /// The canonical shuffling of validators for the session. canonical_shuffling: Vec, + /// The list of peer-ids in an efficient way to search. + peer_ids: HashSet, } impl SessionGridTopology { /// Create a new session grid topology. pub fn new(shuffled_indices: Vec, canonical_shuffling: Vec) -> Self { - SessionGridTopology { shuffled_indices, canonical_shuffling } + let mut peer_ids = HashSet::new(); + for peer_info in canonical_shuffling.iter() { + for peer_id in peer_info.peer_ids.iter() { + peer_ids.insert(*peer_id); + } + } + SessionGridTopology { shuffled_indices, canonical_shuffling, peer_ids } } /// Produces the outgoing routing logic for a particular peer. @@ -111,6 +119,11 @@ impl SessionGridTopology { Some(grid_subset) } + + /// Tells if a given peer id is validator in a session + pub fn is_validator(&self, peer: &PeerId) -> bool { + self.peer_ids.contains(peer) + } } struct MatrixNeighbors { @@ -273,6 +286,11 @@ impl SessionGridTopologyEntry { pub fn get(&self) -> &SessionGridTopology { &self.topology } + + /// Tells if a given peer id is validator in a session + pub fn is_validator(&self, peer: &PeerId) -> bool { + self.topology.is_validator(peer) + } } /// A set of topologies indexed by session @@ -347,6 +365,7 @@ impl Default for SessionBoundGridTopologyStorage { topology: SessionGridTopology { shuffled_indices: Vec::new(), canonical_shuffling: Vec::new(), + peer_ids: Default::default(), }, local_neighbors: GridNeighbors::empty(), }, diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index 9aeeb98ea9d6f6217668aa5dc3376cadc3acfb53..ae72230ee43d506549482e9a063aef5412b1283c 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -253,29 +253,29 @@ impl View { /// A protocol-versioned type. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum Versioned { +pub enum Versioned { /// V1 type. V1(V1), /// V2 type. V2(V2), - /// VStaging type - VStaging(VStaging), + /// V3 type + V3(V3), } -impl Versioned<&'_ V1, &'_ V2, &'_ VStaging> { +impl Versioned<&'_ V1, &'_ V2, &'_ V3> { /// Convert to a fully-owned version of the message. - pub fn clone_inner(&self) -> Versioned { + pub fn clone_inner(&self) -> Versioned { match *self { Versioned::V1(inner) => Versioned::V1(inner.clone()), Versioned::V2(inner) => Versioned::V2(inner.clone()), - Versioned::VStaging(inner) => Versioned::VStaging(inner.clone()), + Versioned::V3(inner) => Versioned::V3(inner.clone()), } } } /// All supported versions of the validation protocol message. pub type VersionedValidationProtocol = - Versioned; + Versioned; impl From for VersionedValidationProtocol { fn from(v1: v1::ValidationProtocol) -> Self { @@ -289,9 +289,9 @@ impl From for VersionedValidationProtocol { } } -impl From for VersionedValidationProtocol { - fn from(vstaging: vstaging::ValidationProtocol) -> Self { - VersionedValidationProtocol::VStaging(vstaging) +impl From for VersionedValidationProtocol { + fn from(v3: v3::ValidationProtocol) -> Self { + VersionedValidationProtocol::V3(v3) } } @@ -317,7 +317,7 @@ macro_rules! impl_versioned_full_protocol_from { match versioned_from { Versioned::V1(x) => Versioned::V1(x.into()), Versioned::V2(x) => Versioned::V2(x.into()), - Versioned::VStaging(x) => Versioned::VStaging(x.into()), + Versioned::V3(x) => Versioned::V3(x.into()), } } } @@ -331,7 +331,7 @@ macro_rules! impl_versioned_try_from { $out:ty, $v1_pat:pat => $v1_out:expr, $v2_pat:pat => $v2_out:expr, - $vstaging_pat:pat => $vstaging_out:expr + $v3_pat:pat => $v3_out:expr ) => { impl TryFrom<$from> for $out { type Error = crate::WrongVariant; @@ -341,7 +341,7 @@ macro_rules! impl_versioned_try_from { match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out)), Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out)), - Versioned::VStaging($vstaging_pat) => Ok(Versioned::VStaging($vstaging_out)), + Versioned::V3($v3_pat) => Ok(Versioned::V3($v3_out)), _ => Err(crate::WrongVariant), } } @@ -355,8 +355,7 @@ macro_rules! impl_versioned_try_from { match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out.clone())), Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out.clone())), - Versioned::VStaging($vstaging_pat) => - Ok(Versioned::VStaging($vstaging_out.clone())), + Versioned::V3($v3_pat) => Ok(Versioned::V3($v3_out.clone())), _ => Err(crate::WrongVariant), } } @@ -368,7 +367,7 @@ macro_rules! impl_versioned_try_from { pub type BitfieldDistributionMessage = Versioned< v1::BitfieldDistributionMessage, v2::BitfieldDistributionMessage, - vstaging::BitfieldDistributionMessage, + v3::BitfieldDistributionMessage, >; impl_versioned_full_protocol_from!( BitfieldDistributionMessage, @@ -380,14 +379,14 @@ impl_versioned_try_from!( BitfieldDistributionMessage, v1::ValidationProtocol::BitfieldDistribution(x) => x, v2::ValidationProtocol::BitfieldDistribution(x) => x, - vstaging::ValidationProtocol::BitfieldDistribution(x) => x + v3::ValidationProtocol::BitfieldDistribution(x) => x ); /// Version-annotated messages used by the statement distribution subsystem. pub type StatementDistributionMessage = Versioned< v1::StatementDistributionMessage, v2::StatementDistributionMessage, - vstaging::StatementDistributionMessage, + v3::StatementDistributionMessage, >; impl_versioned_full_protocol_from!( StatementDistributionMessage, @@ -399,14 +398,14 @@ impl_versioned_try_from!( StatementDistributionMessage, v1::ValidationProtocol::StatementDistribution(x) => x, v2::ValidationProtocol::StatementDistribution(x) => x, - vstaging::ValidationProtocol::StatementDistribution(x) => x + v3::ValidationProtocol::StatementDistribution(x) => x ); /// Version-annotated messages used by the approval distribution subsystem. pub type ApprovalDistributionMessage = Versioned< v1::ApprovalDistributionMessage, v2::ApprovalDistributionMessage, - vstaging::ApprovalDistributionMessage, + v3::ApprovalDistributionMessage, >; impl_versioned_full_protocol_from!( ApprovalDistributionMessage, @@ -418,7 +417,7 @@ impl_versioned_try_from!( ApprovalDistributionMessage, v1::ValidationProtocol::ApprovalDistribution(x) => x, v2::ValidationProtocol::ApprovalDistribution(x) => x, - vstaging::ValidationProtocol::ApprovalDistribution(x) => x + v3::ValidationProtocol::ApprovalDistribution(x) => x ); @@ -426,7 +425,7 @@ impl_versioned_try_from!( pub type GossipSupportNetworkMessage = Versioned< v1::GossipSupportNetworkMessage, v2::GossipSupportNetworkMessage, - vstaging::GossipSupportNetworkMessage, + v3::GossipSupportNetworkMessage, >; // This is a void enum placeholder, so never gets sent over the wire. @@ -871,19 +870,17 @@ pub mod v2 { } } -/// vstaging network protocol types, intended to become v3. -/// Initial purpose is for chaning ApprovalDistributionMessage to -/// include more than one assignment in the message. -pub mod vstaging { +/// v3 network protocol types. +/// Purpose is for chaning ApprovalDistributionMessage to +/// include more than one assignment and approval in a message. +pub mod v3 { use parity_scale_codec::{Decode, Encode}; - use polkadot_node_primitives::approval::{ - v1::IndirectSignedApprovalVote, - v2::{CandidateBitfield, IndirectAssignmentCertV2}, + use polkadot_node_primitives::approval::v2::{ + CandidateBitfield, IndirectAssignmentCertV2, IndirectSignedApprovalVoteV2, }; - /// This parts of the protocol did not change from v2, so just alias them in vstaging, - /// no reason why they can't be change untill vstaging becomes v3 and is released. + /// This parts of the protocol did not change from v2, so just alias them in v3. pub use super::v2::{ declare_signature_payload, BackedCandidateAcknowledgement, BackedCandidateManifest, BitfieldDistributionMessage, GossipSupportNetworkMessage, StatementDistributionMessage, @@ -903,7 +900,7 @@ pub mod vstaging { Assignments(Vec<(IndirectAssignmentCertV2, CandidateBitfield)>), /// Approvals for candidates in some recent, unfinalized block. #[codec(index = 1)] - Approvals(Vec), + Approvals(Vec), } /// All network messages on the validation peer-set. diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs index eb483dec9709a249b31e150f846085759b21319e..cb329607ad6127024af9e6d6bc3c75c7e813c0e3 100644 --- a/polkadot/node/network/protocol/src/peer_set.rs +++ b/polkadot/node/network/protocol/src/peer_set.rs @@ -21,6 +21,7 @@ use polkadot_primitives::Hash; use sc_network::{ config::{NonDefaultSetConfig, SetConfig}, types::ProtocolName, + NotificationService, }; use std::{ collections::{hash_map::Entry, HashMap}, @@ -68,47 +69,59 @@ impl PeerSet { self, is_authority: IsAuthority, peerset_protocol_names: &PeerSetProtocolNames, - ) -> NonDefaultSetConfig { + ) -> (NonDefaultSetConfig, (PeerSet, Box)) { // Networking layer relies on `get_main_name()` being the main name of the protocol // for peersets and connection management. let protocol = peerset_protocol_names.get_main_name(self); - let fallback_names = PeerSetProtocolNames::get_fallback_names(self); + let fallback_names = PeerSetProtocolNames::get_fallback_names( + self, + &peerset_protocol_names.genesis_hash, + peerset_protocol_names.fork_id.as_deref(), + ); let max_notification_size = self.get_max_notification_size(is_authority); match self { - PeerSet::Validation => NonDefaultSetConfig { - notifications_protocol: protocol, - fallback_names, - max_notification_size, - handshake: None, - set_config: SetConfig { - // we allow full nodes to connect to validators for gossip - // to ensure any `MIN_GOSSIP_PEERS` always include reserved peers - // we limit the amount of non-reserved slots to be less - // than `MIN_GOSSIP_PEERS` in total - in_peers: super::MIN_GOSSIP_PEERS as u32 / 2 - 1, - out_peers: super::MIN_GOSSIP_PEERS as u32 / 2 - 1, - reserved_nodes: Vec::new(), - non_reserved_mode: sc_network::config::NonReservedPeerMode::Accept, - }, + PeerSet::Validation => { + let (config, notification_service) = NonDefaultSetConfig::new( + protocol, + fallback_names, + max_notification_size, + None, + SetConfig { + // we allow full nodes to connect to validators for gossip + // to ensure any `MIN_GOSSIP_PEERS` always include reserved peers + // we limit the amount of non-reserved slots to be less + // than `MIN_GOSSIP_PEERS` in total + in_peers: super::MIN_GOSSIP_PEERS as u32 / 2 - 1, + out_peers: super::MIN_GOSSIP_PEERS as u32 / 2 - 1, + reserved_nodes: Vec::new(), + non_reserved_mode: sc_network::config::NonReservedPeerMode::Accept, + }, + ); + + (config, (PeerSet::Validation, notification_service)) }, - PeerSet::Collation => NonDefaultSetConfig { - notifications_protocol: protocol, - fallback_names, - max_notification_size, - handshake: None, - set_config: SetConfig { - // Non-authority nodes don't need to accept incoming connections on this peer - // set: - in_peers: if is_authority == IsAuthority::Yes { 100 } else { 0 }, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: if is_authority == IsAuthority::Yes { - sc_network::config::NonReservedPeerMode::Accept - } else { - sc_network::config::NonReservedPeerMode::Deny + PeerSet::Collation => { + let (config, notification_service) = NonDefaultSetConfig::new( + protocol, + fallback_names, + max_notification_size, + None, + SetConfig { + // Non-authority nodes don't need to accept incoming connections on this + // peer set: + in_peers: if is_authority == IsAuthority::Yes { 100 } else { 0 }, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: if is_authority == IsAuthority::Yes { + sc_network::config::NonReservedPeerMode::Accept + } else { + sc_network::config::NonReservedPeerMode::Deny + }, }, - }, + ); + + (config, (PeerSet::Collation, notification_service)) }, } } @@ -118,15 +131,8 @@ impl PeerSet { /// Networking layer relies on `get_main_version()` being the version /// of the main protocol name reported by [`PeerSetProtocolNames::get_main_name()`]. pub fn get_main_version(self) -> ProtocolVersion { - #[cfg(not(feature = "network-protocol-staging"))] match self { - PeerSet::Validation => ValidationVersion::V2.into(), - PeerSet::Collation => CollationVersion::V2.into(), - } - - #[cfg(feature = "network-protocol-staging")] - match self { - PeerSet::Validation => ValidationVersion::VStaging.into(), + PeerSet::Validation => ValidationVersion::V3.into(), PeerSet::Collation => CollationVersion::V2.into(), } } @@ -154,7 +160,7 @@ impl PeerSet { Some("validation/1") } else if version == ValidationVersion::V2.into() { Some("validation/2") - } else if version == ValidationVersion::VStaging.into() { + } else if version == ValidationVersion::V3.into() { Some("validation/3") } else { None @@ -204,7 +210,7 @@ impl IndexMut for PerPeerSet { pub fn peer_sets_info( is_authority: IsAuthority, peerset_protocol_names: &PeerSetProtocolNames, -) -> Vec { +) -> Vec<(NonDefaultSetConfig, (PeerSet, Box))> { PeerSet::iter() .map(|s| s.get_info(is_authority, &peerset_protocol_names)) .collect() @@ -227,9 +233,10 @@ pub enum ValidationVersion { V1 = 1, /// The second version. V2 = 2, - /// The staging version to gather changes - /// that before the release become v3. - VStaging = 3, + /// The third version where changes to ApprovalDistributionMessage had been made. + /// The changes are translatable to V2 format untill assignments v2 and approvals + /// coalescing is enabled through a runtime upgrade. + V3 = 3, } /// Supported collation protocol versions. Only versions defined here must be used in the codebase. @@ -286,10 +293,12 @@ impl From for ProtocolVersion { } /// On the wire protocol name to [`PeerSet`] mapping. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct PeerSetProtocolNames { protocols: HashMap, names: HashMap<(PeerSet, ProtocolVersion), ProtocolName>, + genesis_hash: Hash, + fork_id: Option, } impl PeerSetProtocolNames { @@ -324,7 +333,7 @@ impl PeerSetProtocolNames { } Self::register_legacy_protocol(&mut protocols, protocol); } - Self { protocols, names } + Self { protocols, names, genesis_hash, fork_id: fork_id.map(|fork_id| fork_id.into()) } } /// Helper function to register main protocol. @@ -428,9 +437,30 @@ impl PeerSetProtocolNames { } /// Get the protocol fallback names. Currently only holds the legacy name - /// for `LEGACY_PROTOCOL_VERSION` = 1. - fn get_fallback_names(protocol: PeerSet) -> Vec { - std::iter::once(Self::get_legacy_name(protocol)).collect() + /// for `LEGACY_PROTOCOL_VERSION` = 1 and v2 for validation. + fn get_fallback_names( + protocol: PeerSet, + genesis_hash: &Hash, + fork_id: Option<&str>, + ) -> Vec { + let mut fallbacks = vec![Self::get_legacy_name(protocol)]; + match protocol { + PeerSet::Validation => { + // Fallbacks are tried one by one, till one matches so push v2 at the top, so + // that it is used ahead of the legacy one(v1). + fallbacks.insert( + 0, + Self::generate_name( + genesis_hash, + fork_id, + protocol, + ValidationVersion::V2.into(), + ), + ) + }, + PeerSet::Collation => {}, + }; + fallbacks } } diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs index 96f7adeb29ba02987857fbac1eeed7b6770d9a35..2df3021343df008c36ff00ce539d86adb86448d2 100644 --- a/polkadot/node/network/protocol/src/request_response/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/mod.rs @@ -248,8 +248,8 @@ impl Protocol { name, fallback_names, max_request_size: 1_000, - /// Responses are just confirmation, in essence not even a bit. So 100 seems - /// plenty. + // Responses are just confirmation, in essence not even a bit. So 100 seems + // plenty. max_response_size: 100, request_timeout: DISPUTE_REQUEST_TIMEOUT, inbound_queue: tx, diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index bf516e7b7ba9b8b89dbea715bef9bf6af2ff5d36..85d2c75aa797826959a0b5ae447bd2aa82164106 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] futures = "0.3.21" futures-timer = "3.0.2" @@ -16,7 +19,6 @@ sp-keystore = { path = "../../../../substrate/primitives/keystore" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-subsystem-types = { path = "../../subsystem-types" } polkadot-node-network-protocol = { path = "../protocol" } arrayvec = "0.7.4" indexmap = "1.9.1" diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs index d9866af1ee233627911fc2a6b2ff2e5a63a3d93e..93f97fe1dd6ede274c4109f4ae7a74765d9ee649 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -22,8 +22,8 @@ use polkadot_node_network_protocol::{ grid_topology::{GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage}, peer_set::{IsAuthority, PeerSet, ValidationVersion}, v1::{self as protocol_v1, StatementMetadata}, - v2 as protocol_v2, vstaging as protocol_vstaging, IfDisconnected, PeerId, - UnifiedReputationChange as Rep, Versioned, View, + v2 as protocol_v2, v3 as protocol_v3, IfDisconnected, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_primitives::{ SignedFullStatement, Statement, StatementWithPVD, UncheckedSignedFullStatement, @@ -1075,7 +1075,7 @@ async fn circulate_statement<'a, Context>( }) .partition::, _>(|(_, _, version)| match version { ValidationVersion::V1 => true, - ValidationVersion::V2 | ValidationVersion::VStaging => false, + ValidationVersion::V2 | ValidationVersion::V3 => false, }); // partition is handy here but not if we add more protocol versions let payload = v1_statement_message(relay_parent, stored.statement.clone(), metrics); @@ -1108,8 +1108,7 @@ async fn circulate_statement<'a, Context>( .collect(); let v2_peers_to_send = filter_by_peer_version(&peers_to_send, ValidationVersion::V2.into()); - let vstaging_to_send = - filter_by_peer_version(&peers_to_send, ValidationVersion::VStaging.into()); + let v3_to_send = filter_by_peer_version(&peers_to_send, ValidationVersion::V3.into()); if !v2_peers_to_send.is_empty() { gum::trace!( @@ -1126,17 +1125,17 @@ async fn circulate_statement<'a, Context>( .await; } - if !vstaging_to_send.is_empty() { + if !v3_to_send.is_empty() { gum::trace!( target: LOG_TARGET, - ?vstaging_to_send, + ?v3_to_send, ?relay_parent, statement = ?stored.statement, - "Sending statement to vstaging peers", + "Sending statement to v3 peers", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_to_send, - compatible_v1_message(ValidationVersion::VStaging, payload.clone()).into(), + v3_to_send, + compatible_v1_message(ValidationVersion::V3, payload.clone()).into(), )) .await; } @@ -1472,10 +1471,8 @@ async fn handle_incoming_message<'a, Context>( let message = match message { Versioned::V1(m) => m, Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(m)) | - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility( - m, - )) => m, - Versioned::V2(_) | Versioned::VStaging(_) => { + Versioned::V3(protocol_v3::StatementDistributionMessage::V1Compatibility(m)) => m, + Versioned::V2(_) | Versioned::V3(_) => { // The higher-level subsystem code is supposed to filter out // all non v1 messages. gum::debug!( @@ -2201,8 +2198,7 @@ fn compatible_v1_message( ValidationVersion::V1 => Versioned::V1(message), ValidationVersion::V2 => Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(message)), - ValidationVersion::VStaging => Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(message), - ), + ValidationVersion::V3 => + Versioned::V3(protocol_v3::StatementDistributionMessage::V1Compatibility(message)), } } diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index ca3038f9b3f3a4250b5337e70f9e009699afbc3b..8ac9895ec5ad27ea271d07059612e85806a334b9 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -43,8 +43,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers::mock::{make_ferdie_keystore, new_leaf}; use polkadot_primitives::{ - ExecutorParams, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, SessionInfo, - ValidationCode, + vstaging::NodeFeatures, ExecutorParams, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, + SessionInfo, ValidationCode, }; use polkadot_primitives_test_helpers::{ dummy_committed_candidate_receipt, dummy_hash, AlwaysZeroRng, @@ -834,6 +834,15 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { } ); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + // notify of peers and view handle .send(FromOrchestra::Communication { @@ -1074,6 +1083,15 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( } ); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + // notify of peers and view handle .send(FromOrchestra::Communication { @@ -1604,6 +1622,15 @@ fn delay_reputation_changes() { } ); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + // notify of peers and view handle .send(FromOrchestra::Communication { @@ -2084,6 +2111,15 @@ fn share_prioritizes_backing_group() { } ); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + // notify of dummy peers and view for (peer, pair) in dummy_peers.clone().into_iter().zip(dummy_pairs) { handle @@ -2406,6 +2442,15 @@ fn peer_cant_flood_with_large_statements() { } ); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + // notify of peers and view handle .send(FromOrchestra::Communication { @@ -2631,6 +2676,14 @@ fn handle_multiple_seconded_statements() { } ); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); // notify of peers and view for peer in all_peers.iter() { handle diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs index 0a80c1491a901068a0838bfb69463b699e0c2c97..a1ba1137b5acf119edbd9d228db3fa41e57afbc3 100644 --- a/polkadot/node/network/statement-distribution/src/lib.rs +++ b/polkadot/node/network/statement-distribution/src/lib.rs @@ -19,7 +19,7 @@ //! This is responsible for distributing signed statements about candidate //! validity among validators. -// #![deny(unused_crate_dependencies)] +#![deny(unused_crate_dependencies)] #![warn(missing_docs)] use error::{log_error, FatalResult}; @@ -27,7 +27,7 @@ use std::time::Duration; use polkadot_node_network_protocol::{ request_response::{v1 as request_v1, v2::AttestedCandidateRequest, IncomingRequestReceiver}, - v2 as protocol_v2, vstaging as protocol_vstaging, Versioned, + v2 as protocol_v2, v3 as protocol_v3, Versioned, }; use polkadot_node_primitives::StatementWithPVD; use polkadot_node_subsystem::{ @@ -400,11 +400,11 @@ impl StatementDistributionSubsystem { Versioned::V2( protocol_v2::StatementDistributionMessage::V1Compatibility(_), ) | - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + Versioned::V3( + protocol_v3::StatementDistributionMessage::V1Compatibility(_), ) => VersionTarget::Legacy, Versioned::V1(_) => VersionTarget::Legacy, - Versioned::V2(_) | Versioned::VStaging(_) => VersionTarget::Current, + Versioned::V2(_) | Versioned::V3(_) => VersionTarget::Current, }, _ => VersionTarget::Both, }; diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 6f39a5c504d078608038cfb29345b85f07f0835c..2f06d3685b8149416960da4c94ed4b52f06d2c5b 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -29,8 +29,7 @@ use polkadot_node_network_protocol::{ MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, v2::{self as protocol_v2, StatementFilter}, - vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, - Versioned, View, + v3 as protocol_v3, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::{ SignedFullStatementWithPVD, StatementWithPVD as FullStatementWithPVD, @@ -142,8 +141,27 @@ struct PerRelayParentState { session: SessionIndex, } +impl PerRelayParentState { + fn active_validator_state(&self) -> Option<&ActiveValidatorState> { + self.local_validator.as_ref().and_then(|local| local.active.as_ref()) + } + + fn active_validator_state_mut(&mut self) -> Option<&mut ActiveValidatorState> { + self.local_validator.as_mut().and_then(|local| local.active.as_mut()) + } +} + // per-relay-parent local validator state. struct LocalValidatorState { + // the grid-level communication at this relay-parent. + grid_tracker: GridTracker, + // additional fields in case local node is an active validator. + active: Option, + // local index actually exists in case node is inactive validator, however, + // it's not needed outside of `build_session_topology`, where it's known. +} + +struct ActiveValidatorState { // The index of the validator. index: ValidatorIndex, // our validator group @@ -152,8 +170,14 @@ struct LocalValidatorState { assignment: Option, // the 'direct-in-group' communication at this relay-parent. cluster_tracker: ClusterTracker, - // the grid-level communication at this relay-parent. - grid_tracker: GridTracker, +} + +#[derive(Debug, Copy, Clone)] +enum LocalValidatorIndex { + // Local node is an active validator. + Active(ValidatorIndex), + // Local node is not in active validator set. + Inactive, } #[derive(Debug)] @@ -164,7 +188,7 @@ struct PerSessionState { // is only `None` in the time between seeing a session and // getting the topology from the gossip-support subsystem grid_view: Option, - local_validator: Option, + local_validator: Option, } impl PerSessionState { @@ -178,15 +202,10 @@ impl PerSessionState { let local_validator = polkadot_node_subsystem_util::signing_key_and_index( session_info.validators.iter(), keystore, - ); + ) + .map(|(_, index)| LocalValidatorIndex::Active(index)); - PerSessionState { - session_info, - groups, - authority_lookup, - grid_view: None, - local_validator: local_validator.map(|(_key, index)| index), - } + PerSessionState { session_info, groups, authority_lookup, grid_view: None, local_validator } } fn supply_topology( @@ -204,6 +223,16 @@ impl PerSessionState { ); self.grid_view = Some(grid_view); + if local_index.is_some() { + self.local_validator.get_or_insert(LocalValidatorIndex::Inactive); + } + } + + /// Returns `true` if local is neither active or inactive validator node. + /// + /// `false` is also returned if session topology is not known yet. + fn is_not_validator(&self) -> bool { + self.grid_view.is_some() && self.local_validator.is_none() } } @@ -336,7 +365,7 @@ pub(crate) async fn handle_network_update( gum::trace!(target: LOG_TARGET, ?peer_id, ?role, ?protocol_version, "Peer connected"); let versioned_protocol = if protocol_version != ValidationVersion::V2.into() && - protocol_version != ValidationVersion::VStaging.into() + protocol_version != ValidationVersion::V3.into() { return } else { @@ -402,28 +431,28 @@ pub(crate) async fn handle_network_update( net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::V1Compatibility(_), ) | - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + net_protocol::StatementDistributionMessage::V3( + protocol_v3::StatementDistributionMessage::V1Compatibility(_), ) => return, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) | - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + net_protocol::StatementDistributionMessage::V3( + protocol_v3::StatementDistributionMessage::Statement(relay_parent, statement), ) => handle_incoming_statement(ctx, state, peer_id, relay_parent, statement, reputation) .await, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::BackedCandidateManifest(inner), ) | - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(inner), + net_protocol::StatementDistributionMessage::V3( + protocol_v3::StatementDistributionMessage::BackedCandidateManifest(inner), ) => handle_incoming_manifest(ctx, state, peer_id, inner, reputation).await, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::BackedCandidateKnown(inner), ) | - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(inner), + net_protocol::StatementDistributionMessage::V3( + protocol_v3::StatementDistributionMessage::BackedCandidateKnown(inner), ) => handle_incoming_acknowledgement(ctx, state, peer_id, inner, reputation).await, }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => @@ -554,13 +583,17 @@ pub(crate) async fn handle_active_leaves_update( .expect("either existed or just inserted; qed"); let local_validator = per_session.local_validator.and_then(|v| { - find_local_validator_state( - v, - &per_session.groups, - &availability_cores, - &group_rotation_info, - seconding_limit, - ) + if let LocalValidatorIndex::Active(idx) = v { + find_active_validator_state( + idx, + &per_session.groups, + &availability_cores, + &group_rotation_info, + seconding_limit, + ) + } else { + Some(LocalValidatorState { grid_tracker: GridTracker::default(), active: None }) + } }); state.per_relay_parent.insert( @@ -607,7 +640,7 @@ pub(crate) async fn handle_active_leaves_update( Ok(()) } -fn find_local_validator_state( +fn find_active_validator_state( validator_index: ValidatorIndex, groups: &Groups, availability_cores: &[CoreState], @@ -628,11 +661,13 @@ fn find_local_validator_state( let group_validators = groups.get(our_group)?.to_owned(); Some(LocalValidatorState { - index: validator_index, - group: our_group, - assignment: para, - cluster_tracker: ClusterTracker::new(group_validators, seconding_limit) - .expect("group is non-empty because we are in it; qed"), + active: Some(ActiveValidatorState { + index: validator_index, + group: our_group, + assignment: para, + cluster_tracker: ClusterTracker::new(group_validators, seconding_limit) + .expect("group is non-empty because we are in it; qed"), + }), grid_tracker: GridTracker::default(), }) } @@ -725,13 +760,17 @@ async fn send_peer_messages_for_relay_parent( for validator_id in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { per_session_state.authority_lookup.get(a) }) { - if let Some(local_validator_state) = relay_parent_state.local_validator.as_mut() { + if let Some(active) = relay_parent_state + .local_validator + .as_mut() + .and_then(|local| local.active.as_mut()) + { send_pending_cluster_statements( ctx, relay_parent, &(peer, peer_data.protocol_version), validator_id, - &mut local_validator_state.cluster_tracker, + &mut active.cluster_tracker, &state.candidates, &relay_parent_state.statement_store, ) @@ -766,13 +805,13 @@ fn pending_statement_network_message( protocol_v2::StatementDistributionMessage::Statement(relay_parent, signed) }) .map(|msg| (vec![peer.0], Versioned::V2(msg).into())), - ValidationVersion::VStaging => statement_store + ValidationVersion::V3 => statement_store .validator_statement(originator, compact) .map(|s| s.as_unchecked().clone()) .map(|signed| { - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed) + protocol_v3::StatementDistributionMessage::Statement(relay_parent, signed) }) - .map(|msg| (vec![peer.0], Versioned::VStaging(msg).into())), + .map(|msg| (vec![peer.0], Versioned::V3(msg).into())), ValidationVersion::V1 => { gum::error!( target: LOG_TARGET, @@ -905,10 +944,10 @@ async fn send_pending_grid_messages( ) .into(), )), - ValidationVersion::VStaging => messages.push(( + ValidationVersion::V3 => messages.push(( vec![peer_id.0], - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + Versioned::V3( + protocol_v3::StatementDistributionMessage::BackedCandidateManifest( manifest, ), ) @@ -920,7 +959,7 @@ async fn send_pending_grid_messages( "Bug ValidationVersion::V1 should not be used in statement-distribution v2, legacy should have handled this" ); - } + }, }; }, grid::ManifestKind::Acknowledgement => { @@ -1009,7 +1048,7 @@ pub(crate) async fn share_local_statement( }; let (local_index, local_assignment, local_group) = - match per_relay_parent.local_validator.as_ref() { + match per_relay_parent.active_validator_state() { None => return Err(JfyiError::InvalidShare), Some(l) => (l.index, l.assignment, l.group), }; @@ -1086,7 +1125,7 @@ pub(crate) async fn share_local_statement( } { - let l = per_relay_parent.local_validator.as_mut().expect("checked above; qed"); + let l = per_relay_parent.active_validator_state_mut().expect("checked above; qed"); l.cluster_tracker.note_issued(local_index, compact_statement.payload().clone()); } @@ -1173,31 +1212,41 @@ async fn circulate_statement( // We're not meant to circulate statements in the cluster until we have the confirmed // candidate. - let cluster_relevant = Some(local_validator.group) == statement_group; - let cluster_targets = if is_confirmed && cluster_relevant { - Some( - local_validator - .cluster_tracker - .targets() - .iter() - .filter(|&&v| { - local_validator + // + // Cluster is only relevant if local node is an active validator. + let (cluster_relevant, cluster_targets, all_cluster_targets) = local_validator + .active + .as_mut() + .map(|active| { + let cluster_relevant = Some(active.group) == statement_group; + let cluster_targets = if is_confirmed && cluster_relevant { + Some( + active .cluster_tracker - .can_send(v, originator, compact_statement.clone()) - .is_ok() - }) - .filter(|&v| v != &local_validator.index) - .map(|v| (*v, DirectTargetKind::Cluster)), - ) - } else { - None - }; + .targets() + .iter() + .filter(|&&v| { + active + .cluster_tracker + .can_send(v, originator, compact_statement.clone()) + .is_ok() + }) + .filter(|&v| v != &active.index) + .map(|v| (*v, DirectTargetKind::Cluster)), + ) + } else { + None + }; + let all_cluster_targets = active.cluster_tracker.targets(); + (cluster_relevant, cluster_targets, all_cluster_targets) + }) + .unwrap_or((false, None, &[])); let grid_targets = local_validator .grid_tracker .direct_statement_targets(&per_session.groups, originator, &compact_statement) .into_iter() - .filter(|v| !cluster_relevant || !local_validator.cluster_tracker.targets().contains(v)) + .filter(|v| !cluster_relevant || !all_cluster_targets.contains(v)) .map(|v| (v, DirectTargetKind::Grid)); let targets = cluster_targets @@ -1229,18 +1278,17 @@ async fn circulate_statement( match kind { DirectTargetKind::Cluster => { + let active = local_validator + .active + .as_mut() + .expect("cluster target means local is active validator; qed"); + // At this point, all peers in the cluster should 'know' // the candidate, so we don't expect for this to fail. - if let Ok(()) = local_validator.cluster_tracker.can_send( - target, - originator, - compact_statement.clone(), - ) { - local_validator.cluster_tracker.note_sent( - target, - originator, - compact_statement.clone(), - ); + if let Ok(()) = + active.cluster_tracker.can_send(target, originator, compact_statement.clone()) + { + active.cluster_tracker.note_sent(target, originator, compact_statement.clone()); statement_to_peers.push(peer_id); } }, @@ -1259,8 +1307,8 @@ async fn circulate_statement( let statement_to_v2_peers = filter_by_peer_version(&statement_to_peers, ValidationVersion::V2.into()); - let statement_to_vstaging_peers = - filter_by_peer_version(&statement_to_peers, ValidationVersion::VStaging.into()); + let statement_to_v3_peers = + filter_by_peer_version(&statement_to_peers, ValidationVersion::V3.into()); // ship off the network messages to the network bridge. if !statement_to_v2_peers.is_empty() { @@ -1282,17 +1330,17 @@ async fn circulate_statement( .await; } - if !statement_to_vstaging_peers.is_empty() { + if !statement_to_v3_peers.is_empty() { gum::debug!( target: LOG_TARGET, ?compact_statement, n_peers = ?statement_to_peers.len(), - "Sending statement to vstaging peers", + "Sending statement to v3 peers", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - statement_to_vstaging_peers, - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( + statement_to_v3_peers, + Versioned::V3(protocol_v3::StatementDistributionMessage::Statement( relay_parent, statement.as_unchecked().clone(), )) @@ -1387,7 +1435,9 @@ async fn handle_incoming_statement( None => { // we shouldn't be receiving statements unless we're a validator // this session. - modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + if per_session.is_not_validator() { + modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + } return }, Some(l) => l, @@ -1402,73 +1452,81 @@ async fn handle_incoming_statement( }, }; - let cluster_sender_index = { + let (active, cluster_sender_index) = { // This block of code only returns `Some` when both the originator and // the sending peer are in the cluster. + let active = local_validator.active.as_mut(); - let allowed_senders = local_validator - .cluster_tracker - .senders_for_originator(statement.unchecked_validator_index()); + let allowed_senders = active + .as_ref() + .map(|active| { + active + .cluster_tracker + .senders_for_originator(statement.unchecked_validator_index()) + }) + .unwrap_or_default(); - allowed_senders + let idx = allowed_senders .iter() .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (*i, ad))) .filter(|(_, ad)| peer_state.is_authority(ad)) .map(|(i, _)| i) - .next() - }; - - let checked_statement = if let Some(cluster_sender_index) = cluster_sender_index { - match handle_cluster_statement( - relay_parent, - &mut local_validator.cluster_tracker, - per_relay_parent.session, - &per_session.session_info, - statement, - cluster_sender_index, - ) { - Ok(Some(s)) => s, - Ok(None) => return, - Err(rep) => { - modify_reputation(reputation, ctx.sender(), peer, rep).await; - return - }, - } - } else { - let grid_sender_index = local_validator - .grid_tracker - .direct_statement_providers( - &per_session.groups, - statement.unchecked_validator_index(), - statement.unchecked_payload(), - ) - .into_iter() - .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) - .filter(|(_, ad)| peer_state.is_authority(ad)) - .map(|(i, _)| i) .next(); + (active, idx) + }; - if let Some(grid_sender_index) = grid_sender_index { - match handle_grid_statement( + let checked_statement = + if let Some((active, cluster_sender_index)) = active.zip(cluster_sender_index) { + match handle_cluster_statement( relay_parent, - &mut local_validator.grid_tracker, + &mut active.cluster_tracker, per_relay_parent.session, - &per_session, + &per_session.session_info, statement, - grid_sender_index, + cluster_sender_index, ) { - Ok(s) => s, + Ok(Some(s)) => s, + Ok(None) => return, Err(rep) => { modify_reputation(reputation, ctx.sender(), peer, rep).await; return }, } } else { - // Not a cluster or grid peer. - modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; - return - } - }; + let grid_sender_index = local_validator + .grid_tracker + .direct_statement_providers( + &per_session.groups, + statement.unchecked_validator_index(), + statement.unchecked_payload(), + ) + .into_iter() + .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) + .filter(|(_, ad)| peer_state.is_authority(ad)) + .map(|(i, _)| i) + .next(); + + if let Some(grid_sender_index) = grid_sender_index { + match handle_grid_statement( + relay_parent, + &mut local_validator.grid_tracker, + per_relay_parent.session, + &per_session, + statement, + grid_sender_index, + ) { + Ok(s) => s, + Err(rep) => { + modify_reputation(reputation, ctx.sender(), peer, rep).await; + return + }, + } + } else { + // Not a cluster or grid peer. + modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + } + }; let statement = checked_statement.payload().clone(); let originator_index = checked_statement.validator_index(); @@ -1536,7 +1594,7 @@ async fn handle_incoming_statement( local_validator.grid_tracker.learned_fresh_statement( &per_session.groups, session_topology, - local_validator.index, + originator_index, &statement, ); } @@ -1828,13 +1886,12 @@ async fn provide_candidate_to_grid( } let manifest_peers_v2 = filter_by_peer_version(&manifest_peers, ValidationVersion::V2.into()); - let manifest_peers_vstaging = - filter_by_peer_version(&manifest_peers, ValidationVersion::VStaging.into()); + let manifest_peers_v3 = filter_by_peer_version(&manifest_peers, ValidationVersion::V3.into()); if !manifest_peers_v2.is_empty() { gum::debug!( target: LOG_TARGET, ?candidate_hash, - local_validator = ?local_validator.index, + local_validator = ?per_session.local_validator, n_peers = manifest_peers_v2.len(), "Sending manifest to v2 peers" ); @@ -1849,32 +1906,32 @@ async fn provide_candidate_to_grid( .await; } - if !manifest_peers_vstaging.is_empty() { + if !manifest_peers_v3.is_empty() { gum::debug!( target: LOG_TARGET, ?candidate_hash, - local_validator = ?local_validator.index, - n_peers = manifest_peers_vstaging.len(), - "Sending manifest to vstaging peers" + local_validator = ?per_session.local_validator, + n_peers = manifest_peers_v3.len(), + "Sending manifest to v3 peers" ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - manifest_peers_vstaging, - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), - ) + manifest_peers_v3, + Versioned::V3(protocol_v3::StatementDistributionMessage::BackedCandidateManifest( + manifest, + )) .into(), )) .await; } let ack_peers_v2 = filter_by_peer_version(&ack_peers, ValidationVersion::V2.into()); - let ack_peers_vstaging = filter_by_peer_version(&ack_peers, ValidationVersion::VStaging.into()); + let ack_peers_v3 = filter_by_peer_version(&ack_peers, ValidationVersion::V3.into()); if !ack_peers_v2.is_empty() { gum::debug!( target: LOG_TARGET, ?candidate_hash, - local_validator = ?local_validator.index, + local_validator = ?per_session.local_validator, n_peers = ack_peers_v2.len(), "Sending acknowledgement to v2 peers" ); @@ -1889,22 +1946,20 @@ async fn provide_candidate_to_grid( .await; } - if !ack_peers_vstaging.is_empty() { + if !ack_peers_v3.is_empty() { gum::debug!( target: LOG_TARGET, ?candidate_hash, - local_validator = ?local_validator.index, - n_peers = ack_peers_vstaging.len(), - "Sending acknowledgement to vstaging peers" + local_validator = ?per_session.local_validator, + n_peers = ack_peers_v3.len(), + "Sending acknowledgement to v3 peers" ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - ack_peers_vstaging, - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown( - acknowledgement, - ), - ) + ack_peers_v3, + Versioned::V3(protocol_v3::StatementDistributionMessage::BackedCandidateKnown( + acknowledgement, + )) .into(), )) .await; @@ -2086,13 +2141,15 @@ async fn handle_incoming_manifest_common<'a, Context>( let local_validator = match relay_parent_state.local_validator.as_mut() { None => { - modify_reputation( - reputation, - ctx.sender(), - peer, - COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE, - ) - .await; + if per_session.is_not_validator() { + modify_reputation( + reputation, + ctx.sender(), + peer, + COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE, + ) + .await; + } return None }, Some(x) => x, @@ -2188,7 +2245,7 @@ async fn handle_incoming_manifest_common<'a, Context>( target: LOG_TARGET, ?candidate_hash, from = ?sender_index, - local_index = ?local_validator.index, + local_index = ?per_session.local_validator, ?manifest_kind, "immediate ack, known candidate" ); @@ -2232,8 +2289,8 @@ fn post_acknowledgement_statement_messages( ) .into(), )), - ValidationVersion::VStaging => messages.push(Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::Statement( + ValidationVersion::V3 => messages.push(Versioned::V3( + protocol_v3::StatementDistributionMessage::Statement( relay_parent, statement.as_unchecked().clone(), ) @@ -2380,9 +2437,9 @@ fn acknowledgement_and_statement_messages( let mut messages = match peer.1 { ValidationVersion::V2 => vec![(vec![peer.0], msg_v2.into())], - ValidationVersion::VStaging => vec![( + ValidationVersion::V3 => vec![( vec![peer.0], - Versioned::VStaging(protocol_v2::StatementDistributionMessage::BackedCandidateKnown( + Versioned::V3(protocol_v2::StatementDistributionMessage::BackedCandidateKnown( acknowledgement, )) .into(), @@ -2593,7 +2650,7 @@ async fn send_cluster_candidate_statements( Some(s) => s, }; - let local_group = match relay_parent_state.local_validator.as_mut() { + let local_group = match relay_parent_state.active_validator_state_mut() { None => return, Some(v) => v.group, }; @@ -2680,11 +2737,10 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St }) { // For cluster members, they haven't advertised any statements in particular, // but have surely sent us some. - if local_validator - .cluster_tracker - .knows_candidate(validator_id, identifier.candidate_hash) - { - return Some(StatementFilter::blank(local_validator.cluster_tracker.targets().len())) + if let Some(active) = local_validator.active.as_ref() { + if active.cluster_tracker.knows_candidate(validator_id, identifier.candidate_hash) { + return Some(StatementFilter::blank(active.cluster_tracker.targets().len())) + } } let filter = local_validator @@ -2715,7 +2771,11 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St } // don't require a backing threshold for cluster candidates. - let require_backing = relay_parent_state.local_validator.as_ref()?.group != group_index; + let local_validator = relay_parent_state.local_validator.as_ref()?; + let require_backing = local_validator + .active + .as_ref() + .map_or(true, |active| active.group != group_index); Some(RequestProperties { unwanted_mask, @@ -2973,7 +3033,11 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { for v in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { per_session.authority_lookup.get(a) }) { - if local_validator.cluster_tracker.can_request(v, *candidate_hash) { + if local_validator + .active + .as_ref() + .map_or(false, |active| active.cluster_tracker.can_request(v, *candidate_hash)) + { validator_id = Some(v); is_cluster = true; break @@ -3015,11 +3079,16 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { // Update bookkeeping about which statements peers have received. for statement in &statements { if is_cluster { - local_validator.cluster_tracker.note_sent( - validator_id, - statement.unchecked_validator_index(), - statement.unchecked_payload().clone(), - ); + local_validator + .active + .as_mut() + .expect("cluster peer means local is active validator; qed") + .cluster_tracker + .note_sent( + validator_id, + statement.unchecked_validator_index(), + statement.unchecked_payload().clone(), + ); } else { local_validator.grid_tracker.sent_or_received_direct_statement( &per_session.groups, diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs index 80dec1d75ab98be43b820bf7382b4436ef87c3b2..a9f5b537b3238ab8b13ff37a0e952b35ba92a066 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs @@ -23,7 +23,7 @@ fn share_seconded_circulated_to_cluster() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -34,7 +34,8 @@ fn share_seconded_circulated_to_cluster() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -52,7 +53,7 @@ fn share_seconded_circulated_to_cluster() { // peer B is in group, has no relay parent in view. // peer C is not in group, has relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -130,7 +131,7 @@ fn cluster_valid_statement_before_seconded_ignored() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -139,12 +140,13 @@ fn cluster_valid_statement_before_seconded_ignored() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; connect_peer( &mut overseer, @@ -197,7 +199,7 @@ fn cluster_statement_bad_signature() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -206,12 +208,13 @@ fn cluster_statement_bad_signature() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -277,7 +280,7 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -286,13 +289,13 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is not in group, has relay parent in view. - let not_our_group = - if local_validator.group_index.0 == 0 { GroupIndex(1) } else { GroupIndex(0) }; + let not_our_group = if local_group_index.0 == 0 { GroupIndex(1) } else { GroupIndex(0) }; let that_group_validators = state.group_validators(not_our_group, false); let v_non = that_group_validators[0]; @@ -346,7 +349,7 @@ fn statement_from_non_cluster_originator_unexpected() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -355,12 +358,13 @@ fn statement_from_non_cluster_originator_unexpected() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is not in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; connect_peer(&mut overseer, peer_a.clone(), None).await; @@ -408,7 +412,7 @@ fn seconded_statement_leads_to_request() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -417,7 +421,8 @@ fn seconded_statement_leads_to_request() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -432,7 +437,7 @@ fn seconded_statement_leads_to_request() { let candidate_hash = candidate.hash(); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; connect_peer( @@ -503,7 +508,7 @@ fn cluster_statements_shared_seconded_first() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -512,7 +517,8 @@ fn cluster_statements_shared_seconded_first() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -528,7 +534,7 @@ fn cluster_statements_shared_seconded_first() { // peer A is in group, no relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -624,7 +630,7 @@ fn cluster_accounts_for_implicit_view() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -634,7 +640,8 @@ fn cluster_accounts_for_implicit_view() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -651,7 +658,7 @@ fn cluster_accounts_for_implicit_view() { // peer A is in group, has relay parent in view. // peer B is in group, has no relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -775,7 +782,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -784,7 +791,8 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -799,7 +807,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { let candidate_hash = candidate.hash(); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; { connect_peer( @@ -907,7 +915,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -916,7 +924,8 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -931,7 +940,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { let candidate_hash = candidate.hash(); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; { connect_peer( @@ -1048,7 +1057,7 @@ fn ensure_seconding_limit_is_respected() { let config = TestConfig { validator_count: 20, group_size: 4, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: Some(AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3, @@ -1060,7 +1069,8 @@ fn ensure_seconding_limit_is_respected() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1092,7 +1102,7 @@ fn ensure_seconding_limit_is_respected() { let candidate_hash_2 = candidate_2.hash(); let candidate_hash_3 = candidate_3.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; // peers A,B,C are in group, have relay parent in view. diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 5b1dabfc8a0e3a9690be131d5abb27cc78750866..aa1a473b833f4c4dd6a3c01cf80eff205e0b5e8e 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -29,7 +29,7 @@ fn backed_candidate_leads_to_advertisement() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -41,7 +41,10 @@ fn backed_candidate_leads_to_advertisement() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); + + let other_group = next_group_index(local_group_index, validator_count, group_size); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -55,13 +58,12 @@ fn backed_candidate_leads_to_advertisement() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); - let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); - let v_a = other_group_validators[0]; - let v_b = other_group_validators[1]; - let v_c = target_group_validators[0]; - let v_d = target_group_validators[1]; + let local_group_validators = state.group_validators(local_group_index, true); + let other_group_validators = state.group_validators(other_group, true); + let v_a = local_group_validators[0]; + let v_b = local_group_validators[1]; + let v_c = other_group_validators[0]; + let v_d = other_group_validators[1]; // peer A is in group, has relay parent in view. // peer B is in group, has no relay parent in view. @@ -219,7 +221,7 @@ fn backed_candidate_leads_to_advertisement() { assert_eq!(manifest, BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -244,7 +246,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -256,9 +258,9 @@ fn received_advertisement_before_confirmation_leads_to_request() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -273,12 +275,12 @@ fn received_advertisement_before_confirmation_leads_to_request() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); - let target_group_validators = state.group_validators(other_group, true); - let v_a = other_group_validators[0]; - let v_b = other_group_validators[1]; - let v_c = target_group_validators[0]; - let v_d = target_group_validators[1]; + let local_group_validators = state.group_validators(local_group_index, true); + let other_group_validators = state.group_validators(other_group, true); + let v_a = local_group_validators[0]; + let v_b = local_group_validators[1]; + let v_c = other_group_validators[0]; + let v_d = other_group_validators[1]; // peer A is in group, has relay parent in view. // peer B is in group, has no relay parent in view. @@ -424,23 +426,37 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; - let relay_parent = Hash::repeat_byte(1); - let peer_c = PeerId::random(); - let peer_d = PeerId::random(); - let peer_e = PeerId::random(); - test_harness(config, |state, mut overseer| async move { - let local_validator = state.local.clone().unwrap(); - - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); - let other_para = ParaId::from(other_group.0); - - let test_leaf = state.make_dummy_leaf(relay_parent); + let peers_to_connect = [ + TestPeerToConnect { local: true, relay_parent_in_view: false }, + TestPeerToConnect { local: true, relay_parent_in_view: false }, + TestPeerToConnect { local: false, relay_parent_in_view: true }, + TestPeerToConnect { local: false, relay_parent_in_view: true }, + TestPeerToConnect { local: false, relay_parent_in_view: true }, + ]; + + let TestSetupInfo { + other_group, + other_para, + relay_parent, + test_leaf, + peers, + validators, + .. + } = setup_test_and_connect_peers( + &state, + &mut overseer, + validator_count, + group_size, + &peers_to_connect, + ) + .await; + let [_, _, peer_c, peer_d, _] = peers[..] else { panic!() }; + let [_, _, v_c, v_d, v_e] = validators[..] else { panic!() }; let (candidate, pvd) = make_candidate( relay_parent, @@ -452,52 +468,6 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { ); let candidate_hash = candidate.hash(); - let target_group_validators = state.group_validators(other_group, true); - let v_c = target_group_validators[0]; - let v_d = target_group_validators[1]; - let v_e = target_group_validators[2]; - - // Connect C, D, E - { - connect_peer( - &mut overseer, - peer_c.clone(), - Some(vec![state.discovery_id(v_c)].into_iter().collect()), - ) - .await; - - connect_peer( - &mut overseer, - peer_d.clone(), - Some(vec![state.discovery_id(v_d)].into_iter().collect()), - ) - .await; - - connect_peer( - &mut overseer, - peer_e.clone(), - Some(vec![state.discovery_id(v_e)].into_iter().collect()), - ) - .await; - - send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; - send_peer_view_change(&mut overseer, peer_d.clone(), view![relay_parent]).await; - send_peer_view_change(&mut overseer, peer_e.clone(), view![relay_parent]).await; - } - - activate_leaf(&mut overseer, &test_leaf, &state, true).await; - - answer_expected_hypothetical_depth_request( - &mut overseer, - vec![], - Some(relay_parent), - false, - ) - .await; - - // Send gossip topology. - send_new_topology(&mut overseer, state.make_dummy_topology()).await; - let manifest = BackedCandidateManifest { relay_parent, candidate_hash, @@ -529,14 +499,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { // Receive an advertisement from C. { - send_peer_message( - &mut overseer, - peer_c.clone(), - protocol_v2::StatementDistributionMessage::BackedCandidateManifest( - manifest.clone(), - ), - ) - .await; + send_manifest_from_peer(&mut overseer, peer_c, manifest.clone()).await; // Should send a request to C. let statements = vec![ @@ -562,37 +525,16 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { ) .await; - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) - if p == peer_c && r == BENEFIT_VALID_STATEMENT.into() - ); - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) - if p == peer_c && r == BENEFIT_VALID_STATEMENT.into() - ); - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) - if p == peer_c && r == BENEFIT_VALID_STATEMENT.into() - ); - - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) - if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() - ); + assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_STATEMENT); + assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_STATEMENT); + assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_STATEMENT); + assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_RESPONSE); answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; } // Receive Backed message. - overseer - .send(FromOrchestra::Communication { - msg: StatementDistributionMessage::Backed(candidate_hash), - }) - .await; + send_backed_message(&mut overseer, candidate_hash).await; // Should send an acknowledgement back to C. { @@ -624,14 +566,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { // Receive a manifest about the same candidate from peer D. { - send_peer_message( - &mut overseer, - peer_d.clone(), - protocol_v2::StatementDistributionMessage::BackedCandidateManifest( - manifest.clone(), - ), - ) - .await; + send_manifest_from_peer(&mut overseer, peer_d, manifest.clone()).await; let expected_ack = BackedCandidateAcknowledgement { candidate_hash, @@ -664,6 +599,360 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { }); } +#[test] +fn receive_ack_for_unconfirmed_candidate() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: LocalRole::Validator, + async_backing_params: None, + }; + + test_harness(config, |state, mut overseer| async move { + let peers_to_connect = [ + TestPeerToConnect { local: true, relay_parent_in_view: true }, + TestPeerToConnect { local: true, relay_parent_in_view: false }, + TestPeerToConnect { local: false, relay_parent_in_view: true }, + TestPeerToConnect { local: false, relay_parent_in_view: false }, + ]; + let TestSetupInfo { local_para, relay_parent, test_leaf, peers, .. } = + setup_test_and_connect_peers( + &state, + &mut overseer, + validator_count, + group_size, + &peers_to_connect, + ) + .await; + let [_, _, peer_c, _] = peers[..] else { panic!() }; + + let (candidate, _pvd) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let ack = BackedCandidateAcknowledgement { + candidate_hash, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + // Receive an acknowledgement from a peer before the candidate is confirmed. + send_ack_from_peer(&mut overseer, peer_c, ack.clone()).await; + assert_peer_reported!( + &mut overseer, + peer_c, + COST_UNEXPECTED_ACKNOWLEDGEMENT_UNKNOWN_CANDIDATE, + ); + + overseer + }); +} + +// Test receiving unexpected and expected acknowledgements for a locally confirmed candidate. +#[test] +fn received_acknowledgements_for_locally_confirmed() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: LocalRole::Validator, + async_backing_params: None, + }; + + test_harness(config, |state, mut overseer| async move { + let peers_to_connect = [ + TestPeerToConnect { local: true, relay_parent_in_view: true }, + TestPeerToConnect { local: true, relay_parent_in_view: false }, + TestPeerToConnect { local: false, relay_parent_in_view: true }, + TestPeerToConnect { local: false, relay_parent_in_view: false }, + ]; + let TestSetupInfo { + local_validator, + local_group, + local_para, + relay_parent, + test_leaf, + peers, + validators, + .. + } = setup_test_and_connect_peers( + &state, + &mut overseer, + validator_count, + group_size, + &peers_to_connect, + ) + .await; + let [peer_a, peer_b, peer_c, peer_d] = peers[..] else { panic!() }; + let [_, v_b, _, _] = validators[..] else { panic!() }; + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let ack = BackedCandidateAcknowledgement { + candidate_hash, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + // Confirm the candidate locally so that we don't send out requests. + { + let statement = state + .sign_full_statement( + local_validator.validator_index, + Statement::Seconded(candidate.clone()), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + pvd.clone(), + ) + .clone(); + + send_share_message(&mut overseer, relay_parent, statement).await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Receive an unexpected acknowledgement from peer D. + send_ack_from_peer(&mut overseer, peer_d, ack.clone()).await; + assert_peer_reported!(&mut overseer, peer_d, COST_UNEXPECTED_MANIFEST_DISALLOWED); + + // Send statement from peer B. + { + let statement = state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_b.clone(), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_peer_reported!(&mut overseer, peer_b, BENEFIT_VALID_STATEMENT_FIRST); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] + ); + } + + // Send Backed notification. + { + send_backed_message(&mut overseer, candidate_hash).await; + + // We should send out a manifest. + assert_matches!( + overseer.recv().await, + AllMessages:: NetworkBridgeTx( + NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), + ), + ), + ) + ) => { + assert_eq!(peers, vec![peer_c]); + assert_eq!(manifest, BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: local_group, + para_id: local_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }); + } + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Receive an unexpected acknowledgement from peer D. + // + // It still shouldn't know this manifest. + send_ack_from_peer(&mut overseer, peer_d, ack.clone()).await; + assert_peer_reported!(&mut overseer, peer_d, COST_UNEXPECTED_MANIFEST_DISALLOWED); + + // Receive an acknowledgement from peer C. + // + // It's OK, we know they know it because we sent them a manifest. + send_ack_from_peer(&mut overseer, peer_c, ack.clone()).await; + + // What happens if we get another valid ack? + send_ack_from_peer(&mut overseer, peer_c, ack.clone()).await; + + overseer + }); +} + +// Test receiving unexpected acknowledgements for a candidate confirmed in a different group. +#[test] +fn received_acknowledgements_for_externally_confirmed() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: LocalRole::Validator, + async_backing_params: None, + }; + + test_harness(config, |state, mut overseer| async move { + let peers_to_connect = [ + TestPeerToConnect { local: true, relay_parent_in_view: true }, + TestPeerToConnect { local: true, relay_parent_in_view: false }, + TestPeerToConnect { local: false, relay_parent_in_view: true }, + TestPeerToConnect { local: false, relay_parent_in_view: true }, + TestPeerToConnect { local: false, relay_parent_in_view: true }, + ]; + let TestSetupInfo { + other_group, + other_para, + relay_parent, + test_leaf, + peers, + validators, + .. + } = setup_test_and_connect_peers( + &state, + &mut overseer, + validator_count, + group_size, + &peers_to_connect, + ) + .await; + let [peer_a, _, peer_c, peer_d, _] = peers[..] else { panic!() }; + let [_, _, v_c, v_d, v_e] = validators[..] else { panic!() }; + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + let statement_c = state + .sign_statement( + v_c, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statement_d = state + .sign_statement( + v_d, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + // Receive an advertisement from C, confirming the candidate. + { + send_manifest_from_peer(&mut overseer, peer_c, manifest.clone()).await; + + // Should send a request to C. + let statements = vec![ + statement_c.clone(), + statement_d.clone(), + state + .sign_statement( + v_e, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + handle_sent_request( + &mut overseer, + peer_c, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_STATEMENT); + assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_STATEMENT); + assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_STATEMENT); + assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_RESPONSE); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + let ack = BackedCandidateAcknowledgement { + candidate_hash, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + // Receive an unexpected acknowledgement from peer D. + send_ack_from_peer(&mut overseer, peer_d, ack.clone()).await; + assert_peer_reported!(&mut overseer, peer_d, COST_UNEXPECTED_MANIFEST_PEER_UNKNOWN); + + // Receive an unexpected acknowledgement from peer A. + send_ack_from_peer(&mut overseer, peer_a, ack.clone()).await; + assert_peer_reported!(&mut overseer, peer_a, COST_UNEXPECTED_MANIFEST_DISALLOWED); + + overseer + }); +} + // Received advertisement after confirmation but before backing leads to nothing. #[test] fn received_advertisement_after_confirmation_before_backing() { @@ -672,7 +961,7 @@ fn received_advertisement_after_confirmation_before_backing() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -683,9 +972,9 @@ fn received_advertisement_after_confirmation_before_backing() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -700,10 +989,10 @@ fn received_advertisement_after_confirmation_before_backing() { ); let candidate_hash = candidate.hash(); - let target_group_validators = state.group_validators(other_group, true); - let v_c = target_group_validators[0]; - let v_d = target_group_validators[1]; - let v_e = target_group_validators[2]; + let other_group_validators = state.group_validators(other_group, true); + let v_c = other_group_validators[0]; + let v_d = other_group_validators[1]; + let v_e = other_group_validators[2]; // Connect C, D, E { @@ -858,7 +1147,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -869,9 +1158,9 @@ fn additional_statements_are_shared_after_manifest_exchange() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -886,10 +1175,10 @@ fn additional_statements_are_shared_after_manifest_exchange() { ); let candidate_hash = candidate.hash(); - let target_group_validators = state.group_validators(other_group, true); - let v_c = target_group_validators[0]; - let v_d = target_group_validators[1]; - let v_e = target_group_validators[2]; + let other_group_validators = state.group_validators(other_group, true); + let v_c = other_group_validators[0]; + let v_d = other_group_validators[1]; + let v_e = other_group_validators[2]; // Connect C, D, E { @@ -1155,7 +1444,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1167,7 +1456,8 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1181,13 +1471,12 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); - let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); - let v_a = other_group_validators[0]; - let v_b = other_group_validators[1]; - let v_c = target_group_validators[0]; - let v_d = target_group_validators[1]; + let local_group_validators = state.group_validators(local_group_index, true); + let other_group_validators = state.group_validators((local_group_index.0 + 1).into(), true); + let v_a = local_group_validators[0]; + let v_b = local_group_validators[1]; + let v_c = other_group_validators[0]; + let v_d = other_group_validators[1]; // peer A is in group, has relay parent in view. // peer B is in group, has no relay parent in view. @@ -1336,7 +1625,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { let expected_manifest = BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -1377,7 +1666,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1389,7 +1678,8 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1403,13 +1693,12 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); - let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); - let v_a = other_group_validators[0]; - let v_b = other_group_validators[1]; - let v_c = target_group_validators[0]; - let v_d = target_group_validators[1]; + let local_group_validators = state.group_validators(local_group_index, true); + let other_group_validators = state.group_validators((local_group_index.0 + 1).into(), true); + let v_a = local_group_validators[0]; + let v_b = local_group_validators[1]; + let v_c = other_group_validators[0]; + let v_d = other_group_validators[1]; // peer A is in group, has relay parent in view. // peer B is in group, has no relay parent in view. @@ -1567,7 +1856,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { assert_eq!(manifest, BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -1599,7 +1888,7 @@ fn grid_statements_imported_to_backing() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1610,9 +1899,9 @@ fn grid_statements_imported_to_backing() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1627,10 +1916,10 @@ fn grid_statements_imported_to_backing() { ); let candidate_hash = candidate.hash(); - let target_group_validators = state.group_validators(other_group, true); - let v_c = target_group_validators[0]; - let v_d = target_group_validators[1]; - let v_e = target_group_validators[2]; + let other_group_validators = state.group_validators(other_group, true); + let v_c = other_group_validators[0]; + let v_d = other_group_validators[1]; + let v_e = other_group_validators[2]; // Connect C, D, E { @@ -1803,7 +2092,7 @@ fn advertisements_rejected_from_incorrect_peers() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1815,9 +2104,9 @@ fn advertisements_rejected_from_incorrect_peers() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1832,10 +2121,10 @@ fn advertisements_rejected_from_incorrect_peers() { ); let candidate_hash = candidate.hash(); - let target_group_validators = state.group_validators(local_validator.group_index, true); + let local_group_validators = state.group_validators(local_group_index, true); let other_group_validators = state.group_validators(other_group, true); - let v_a = target_group_validators[0]; - let v_b = target_group_validators[1]; + let v_a = local_group_validators[0]; + let v_b = local_group_validators[1]; let v_c = other_group_validators[0]; let v_d = other_group_validators[1]; @@ -1948,7 +2237,7 @@ fn manifest_rejected_with_unknown_relay_parent() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1959,9 +2248,9 @@ fn manifest_rejected_with_unknown_relay_parent() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1976,9 +2265,9 @@ fn manifest_rejected_with_unknown_relay_parent() { ); let candidate_hash = candidate.hash(); - let target_group_validators = state.group_validators(other_group, true); - let v_c = target_group_validators[0]; - let v_d = target_group_validators[1]; + let other_group_validators = state.group_validators(other_group, true); + let v_c = other_group_validators[0]; + let v_d = other_group_validators[1]; // peer C is not in group, has relay parent in view. // peer D is not in group, has no relay parent in view. @@ -2054,7 +2343,7 @@ fn manifest_rejected_when_not_a_validator() { let config = TestConfig { validator_count, group_size, - local_validator: false, + local_validator: LocalRole::None, async_backing_params: None, }; @@ -2078,9 +2367,9 @@ fn manifest_rejected_when_not_a_validator() { ); let candidate_hash = candidate.hash(); - let target_group_validators = state.group_validators(other_group, true); - let v_c = target_group_validators[0]; - let v_d = target_group_validators[1]; + let other_group_validators = state.group_validators(other_group, true); + let v_c = other_group_validators[0]; + let v_d = other_group_validators[1]; // peer C is not in group, has relay parent in view. // peer D is not in group, has no relay parent in view. @@ -2156,7 +2445,7 @@ fn manifest_rejected_when_group_does_not_match_para() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -2166,9 +2455,9 @@ fn manifest_rejected_when_group_does_not_match_para() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); // Create a mismatch between group and para. let other_para = next_group_index(other_group, validator_count, group_size); let other_para = ParaId::from(other_para.0); @@ -2185,9 +2474,9 @@ fn manifest_rejected_when_group_does_not_match_para() { ); let candidate_hash = candidate.hash(); - let target_group_validators = state.group_validators(other_group, true); - let v_c = target_group_validators[0]; - let v_d = target_group_validators[1]; + let other_group_validators = state.group_validators(other_group, true); + let v_c = other_group_validators[0]; + let v_d = other_group_validators[1]; // peer C is not in group, has relay parent in view. // peer D is not in group, has no relay parent in view. @@ -2263,7 +2552,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -2274,9 +2563,9 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -2291,10 +2580,10 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { ); let candidate_hash = candidate.hash(); - let target_group_validators = state.group_validators(other_group, true); - let v_c = target_group_validators[0]; - let v_d = target_group_validators[1]; - let v_e = target_group_validators[2]; + let other_group_validators = state.group_validators(other_group, true); + let v_c = other_group_validators[0]; + let v_d = other_group_validators[1]; + let v_e = other_group_validators[2]; // Connect C, D, E { @@ -2454,3 +2743,141 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { overseer }); } + +#[test] +fn inactive_local_participates_in_grid() { + let validator_count = 11; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: LocalRole::InactiveValidator, + async_backing_params: None, + }; + + let dummy_relay_parent = Hash::repeat_byte(2); + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + assert_eq!(local_validator.validator_index.0, validator_count as u32); + + let group_idx = GroupIndex::from(0); + let para = ParaId::from(0); + + // Dummy leaf is needed to update topology. + let dummy_leaf = state.make_dummy_leaf(Hash::repeat_byte(2)); + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + para, + test_leaf.para_data(para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let first_group = state.group_validators(group_idx, true); + let v_a = first_group.last().unwrap().clone(); + let v_b = first_group.first().unwrap().clone(); + + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &dummy_leaf, &state, true).await; + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(dummy_relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + activate_leaf(&mut overseer, &test_leaf, &state, false).await; + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Receive an advertisement from A. + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: group_idx, + para_id: para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }; + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_v3::StatementDistributionMessage::BackedCandidateManifest(manifest), + ) + .await; + + let statements = vec![ + state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + // Inactive node requests this candidate. + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + for _ in 0..2 { + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_STATEMENT.into() => { } + ); + } + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } + ); + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + overseer + }); +} diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 4150377a0c6c219832104024f6d6c046dccec709..c34cf20d716caa0ef2b7f66a6c9b322d23a8d9a5 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -21,6 +21,7 @@ use crate::*; use polkadot_node_network_protocol::{ grid_topology::TopologyPeerInfo, request_response::{outgoing::Recipient, ReqProtocolNames}, + v2::{BackedCandidateAcknowledgement, BackedCandidateManifest}, view, ObservedRole, }; use polkadot_node_primitives::Statement; @@ -61,19 +62,30 @@ const DEFAULT_ASYNC_BACKING_PARAMETERS: AsyncBackingParams = // Some deterministic genesis hash for req/res protocol names const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); +#[derive(Debug, Copy, Clone)] +enum LocalRole { + /// Active validator. + Validator, + /// Authority, not in active validator set. + InactiveValidator, + /// Not a validator. + None, +} + struct TestConfig { + // number of active validators. validator_count: usize, // how many validators to place in each group. group_size: usize, // whether the local node should be a validator - local_validator: bool, + local_validator: LocalRole, async_backing_params: Option, } #[derive(Debug, Clone)] struct TestLocalValidator { validator_index: ValidatorIndex, - group_index: GroupIndex, + group_index: Option, } struct TestState { @@ -99,7 +111,7 @@ impl TestState { let mut assignment_keys = Vec::new(); let mut validator_groups = Vec::new(); - let local_validator_pos = if config.local_validator { + let local_validator_pos = if let LocalRole::Validator = config.local_validator { // ensure local validator is always in a full group. Some(rng.gen_range(0..config.validator_count).saturating_sub(config.group_size - 1)) } else { @@ -128,13 +140,19 @@ impl TestState { } } - let local = if let Some(local_pos) = local_validator_pos { - Some(TestLocalValidator { + let local = match (config.local_validator, local_validator_pos) { + (LocalRole::Validator, Some(local_pos)) => Some(TestLocalValidator { validator_index: ValidatorIndex(local_pos as _), - group_index: GroupIndex((local_pos / config.group_size) as _), - }) - } else { - None + group_index: Some(GroupIndex((local_pos / config.group_size) as _)), + }), + (LocalRole::InactiveValidator, None) => { + discovery_keys.push(AuthorityDiscoveryPair::generate().0.public()); + Some(TestLocalValidator { + validator_index: ValidatorIndex(config.validator_count as u32), + group_index: None, + }) + }, + _ => None, }; let validator_public = validator_pubkeys(&validators); @@ -181,15 +199,23 @@ impl TestState { fn make_dummy_topology(&self) -> NewGossipTopology { let validator_count = self.config.validator_count; + let is_local_inactive = matches!(self.config.local_validator, LocalRole::InactiveValidator); + + let mut indices: Vec = (0..validator_count).collect(); + if is_local_inactive { + indices.push(validator_count); + } + NewGossipTopology { session: 1, topology: SessionGridTopology::new( - (0..validator_count).collect(), - (0..validator_count) + indices.clone(), + indices + .into_iter() .map(|i| TopologyPeerInfo { peer_ids: Vec::new(), validator_index: ValidatorIndex(i as u32), - discovery_id: AuthorityDiscoveryPair::generate().0.public(), + discovery_id: self.session_info.discovery_keys[i].clone(), }) .collect(), ), @@ -276,7 +302,7 @@ fn test_harness>( test: impl FnOnce(TestState, VirtualOverseer) -> T, ) { let pool = sp_core::testing::TaskExecutor::new(); - let keystore = if config.local_validator { + let keystore = if let LocalRole::Validator = config.local_validator { test_helpers::mock::make_ferdie_keystore() } else { Arc::new(LocalKeystore::in_memory()) as KeystorePtr @@ -352,6 +378,95 @@ impl TestLeaf { } } +struct TestSetupInfo { + local_validator: TestLocalValidator, + local_group: GroupIndex, + local_para: ParaId, + other_group: GroupIndex, + other_para: ParaId, + relay_parent: Hash, + test_leaf: TestLeaf, + peers: Vec, + validators: Vec, +} + +struct TestPeerToConnect { + local: bool, + relay_parent_in_view: bool, +} + +// TODO: Generalize, use in more places. +/// Sets up some test info that is common to most tests, and connects the requested peers. +async fn setup_test_and_connect_peers( + state: &TestState, + overseer: &mut VirtualOverseer, + validator_count: usize, + group_size: usize, + peers_to_connect: &[TestPeerToConnect], +) -> TestSetupInfo { + let local_validator = state.local.clone().unwrap(); + let local_group = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group.0); + + let other_group = next_group_index(local_group, validator_count, group_size); + let other_para = ParaId::from(other_group.0); + + let relay_parent = Hash::repeat_byte(1); + let test_leaf = state.make_dummy_leaf(relay_parent); + + // Because we are testing grid mod, the "target" group (the one we communicate with) is usually + // other_group, a non-local group. + // + // TODO: change based on `LocalRole`? + let local_group_validators = state.group_validators(local_group, true); + let other_group_validators = state.group_validators(other_group, true); + + let mut peers = vec![]; + let mut validators = vec![]; + let mut local_group_idx = 0; + let mut other_group_idx = 0; + for peer_to_connect in peers_to_connect { + let peer = PeerId::random(); + peers.push(peer); + + let v = if peer_to_connect.local { + let v = local_group_validators[local_group_idx]; + local_group_idx += 1; + v + } else { + let v = other_group_validators[other_group_idx]; + other_group_idx += 1; + v + }; + validators.push(v); + + connect_peer(overseer, peer, Some(vec![state.discovery_id(v)].into_iter().collect())).await; + + if peer_to_connect.relay_parent_in_view { + send_peer_view_change(overseer, peer.clone(), view![relay_parent]).await; + } + } + + activate_leaf(overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request(overseer, vec![], Some(relay_parent), false).await; + + // Send gossip topology. + send_new_topology(overseer, state.make_dummy_topology()).await; + + TestSetupInfo { + local_validator, + local_group, + local_para, + other_group, + other_para, + test_leaf, + relay_parent, + peers, + validators, + } +} + async fn activate_leaf( virtual_overseer: &mut VirtualOverseer, leaf: &TestLeaf, @@ -522,6 +637,66 @@ async fn answer_expected_hypothetical_depth_request( ) } +#[macro_export] +macro_rules! assert_peer_reported { + ($virtual_overseer:expr, $peer_id:expr, $rep_change:expr $(,)*) => { + assert_matches!( + $virtual_overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == $peer_id && r == $rep_change.into() + ); + } +} + +async fn send_share_message( + virtual_overseer: &mut VirtualOverseer, + relay_parent: Hash, + statement: SignedFullStatementWithPVD, +) { + virtual_overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, statement), + }) + .await; +} + +async fn send_backed_message( + virtual_overseer: &mut VirtualOverseer, + candidate_hash: CandidateHash, +) { + virtual_overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Backed(candidate_hash), + }) + .await; +} + +async fn send_manifest_from_peer( + virtual_overseer: &mut VirtualOverseer, + peer_id: PeerId, + manifest: BackedCandidateManifest, +) { + send_peer_message( + virtual_overseer, + peer_id, + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), + ) + .await; +} + +async fn send_ack_from_peer( + virtual_overseer: &mut VirtualOverseer, + peer_id: PeerId, + ack: BackedCandidateAcknowledgement, +) { + send_peer_message( + virtual_overseer, + peer_id, + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack), + ) + .await; +} + fn validator_pubkeys(val_ids: &[ValidatorPair]) -> IndexedVec { val_ids.iter().map(|v| v.public().into()).collect() } diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index 4734d7a0f960bf43cd09a50ceb96c29fa0deadf3..1eec8290fabaeec37c1dea2b53de3d8c32385336 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -32,7 +32,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -43,7 +43,8 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -57,7 +58,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -188,7 +189,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: Some(AsyncBackingParams { // Makes `seconding_limit: 2` (easier to hit the limit). max_candidate_depth: 1, @@ -203,9 +204,9 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -475,7 +476,7 @@ fn peer_reported_for_not_enough_statements() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -486,9 +487,9 @@ fn peer_reported_for_not_enough_statements() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -670,7 +671,7 @@ fn peer_reported_for_duplicate_statements() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -681,7 +682,8 @@ fn peer_reported_for_duplicate_statements() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -695,7 +697,7 @@ fn peer_reported_for_duplicate_statements() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -830,7 +832,7 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -841,7 +843,8 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -855,8 +858,8 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); - state.group_validators((local_validator.group_index.0 + 1).into(), true); + let other_group_validators = state.group_validators(local_group_index, true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -968,7 +971,7 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -979,7 +982,8 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -993,9 +997,8 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); - let next_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + let other_group_validators = state.group_validators(local_group_index, true); + let next_group_validators = state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_c = next_group_validators[0]; @@ -1105,7 +1108,7 @@ fn local_node_sanity_checks_incoming_requests() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1117,7 +1120,8 @@ fn local_node_sanity_checks_incoming_requests() { test_harness(config, |mut state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1135,7 +1139,7 @@ fn local_node_sanity_checks_incoming_requests() { // peer B is in group, has no relay parent in view. // peer C is not in group, has relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -1311,7 +1315,7 @@ fn local_node_checks_that_peer_can_request_before_responding() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1321,7 +1325,8 @@ fn local_node_checks_that_peer_can_request_before_responding() { test_harness(config, |mut state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1336,7 +1341,7 @@ fn local_node_checks_that_peer_can_request_before_responding() { let candidate_hash = candidate.hash(); // Peers A and B are in group and have relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -1515,7 +1520,7 @@ fn local_node_respects_statement_mask() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1527,7 +1532,8 @@ fn local_node_respects_statement_mask() { test_harness(config, |mut state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1541,9 +1547,9 @@ fn local_node_respects_statement_mask() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; let v_c = target_group_validators[0]; @@ -1707,7 +1713,7 @@ fn local_node_respects_statement_mask() { assert_eq!(manifest, BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -1761,7 +1767,7 @@ fn should_delay_before_retrying_dropped_requests() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1772,9 +1778,9 @@ fn should_delay_before_retrying_dropped_requests() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index ac1e4443f0c8d92eea96d01496df71d7ff2b856a..40df8d3514a40280c1cdb709bf6f91ea4ca1017d 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true description = "System overseer of the Polkadot node" +[lints] +workspace = true + [dependencies] client = { package = "sc-client-api", path = "../../../substrate/client/api" } sp-api = { path = "../../../substrate/primitives/api" } @@ -17,14 +20,14 @@ polkadot-node-primitives = { path = "../primitives" } polkadot-node-subsystem-types = { path = "../subsystem-types" } polkadot-node-metrics = { path = "../metrics" } polkadot-primitives = { path = "../../primitives" } -orchestra = { version = "0.3.3", default-features = false, features=["futures_channel"] } +orchestra = { version = "0.3.3", default-features = false, features = ["futures_channel"] } gum = { package = "tracing-gum", path = "../gum" } sp-core = { path = "../../../substrate/primitives/core" } -async-trait = "0.1.57" +async-trait = "0.1.74" tikv-jemalloc-ctl = { version = "0.5.0", optional = true } [dev-dependencies] -metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features=["futures_channel"] } +metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features = ["futures_channel"] } sp-core = { path = "../../../substrate/primitives/core" } futures = { version = "0.3.21", features = ["thread-pool"] } femme = "2.2.1" @@ -36,8 +39,8 @@ node-test-helpers = { package = "polkadot-node-subsystem-test-helpers", path = " tikv-jemalloc-ctl = "0.5.0" [features] -default = [ "futures_channel" ] -dotgraph = [ "orchestra/dotgraph" ] -expand = [ "orchestra/expand" ] -futures_channel = [ "metered/futures_channel", "orchestra/futures_channel" ] -jemalloc-allocator = [ "dep:tikv-jemalloc-ctl" ] +default = ["futures_channel"] +dotgraph = ["orchestra/dotgraph"] +expand = ["orchestra/expand"] +futures_channel = ["metered/futures_channel", "orchestra/futures_channel"] +jemalloc-allocator = ["dep:tikv-jemalloc-ctl"] diff --git a/polkadot/node/overseer/examples/minimal-example.rs b/polkadot/node/overseer/examples/minimal-example.rs index b2c0ea2f75a816c0ccf6f96187d3f48a13102ccc..857cdba673db267d2824d8a995257316fc67225f 100644 --- a/polkadot/node/overseer/examples/minimal-example.rs +++ b/polkadot/node/overseer/examples/minimal-example.rs @@ -32,7 +32,7 @@ use polkadot_overseer::{ gen::{FromOrchestra, SpawnedSubsystem}, HeadSupportsParachains, SubsystemError, }; -use polkadot_primitives::{CandidateReceipt, Hash, PvfExecTimeoutKind}; +use polkadot_primitives::{CandidateReceipt, Hash, PvfExecKind}; struct AlwaysSupportsParachains; @@ -77,7 +77,7 @@ impl Subsystem1 { candidate_receipt, pov: PoV { block_data: BlockData(Vec::new()) }.into(), executor_params: Default::default(), - exec_timeout_kind: PvfExecTimeoutKind::Backing, + exec_kind: PvfExecKind::Backing, response_sender: tx, }; ctx.send_message(msg).await; diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index 5207bb830d8c1506eb92ce702900a39c44b74834..f4eddf1f41ceb90d61391ac5140941bdca8b0bf1 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -87,8 +87,8 @@ use polkadot_node_subsystem_types::messages::{ pub use polkadot_node_subsystem_types::{ errors::{SubsystemError, SubsystemResult}, - jaeger, ActivatedLeaf, ActiveLeavesUpdate, OverseerSignal, RuntimeApiSubsystemClient, - UnpinHandle, + jaeger, ActivatedLeaf, ActiveLeavesUpdate, ChainApiBackend, OverseerSignal, + RuntimeApiSubsystemClient, UnpinHandle, }; pub mod metrics; @@ -276,6 +276,7 @@ impl From> for BlockInfo { /// An event from outside the overseer scope, such /// as the substrate framework or user interaction. +#[derive(Debug)] pub enum Event { /// A new block was imported. /// @@ -300,6 +301,7 @@ pub enum Event { } /// Some request from outer world. +#[derive(Debug)] pub enum ExternalRequest { /// Wait for the activation of a particular hash /// and be notified by means of the return channel. diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index 254f5fe4512019acb9cdb3be211e5f88774ae9c4..0494274367d953146ad06e93c3ee00cfc9c3b983 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -30,7 +30,7 @@ use polkadot_node_subsystem_types::messages::{ }; use polkadot_primitives::{ CandidateHash, CandidateReceipt, CollatorPair, Id as ParaId, InvalidDisputeStatementKind, - PvfExecTimeoutKind, SessionIndex, ValidDisputeStatementKind, ValidatorIndex, + PvfExecKind, SessionIndex, ValidDisputeStatementKind, ValidatorIndex, }; use crate::{ @@ -106,7 +106,7 @@ where candidate_receipt, pov: PoV { block_data: BlockData(Vec::new()) }.into(), executor_params: Default::default(), - exec_timeout_kind: PvfExecTimeoutKind::Backing, + exec_kind: PvfExecKind::Backing, response_sender: tx, }) .await; @@ -804,7 +804,7 @@ fn test_candidate_validation_msg() -> CandidateValidationMessage { candidate_receipt, pov, executor_params: Default::default(), - exec_timeout_kind: PvfExecTimeoutKind::Backing, + exec_kind: PvfExecKind::Backing, response_sender, } } diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index c39fd5947b0b7b80b2bf78bce9f1bb76e364302b..09817ed1cf3ea7d348a4cbe2378c85f19b8dc97d 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] bounded-vec = "0.7" futures = "0.3.21" @@ -18,10 +21,10 @@ sp-keystore = { path = "../../../substrate/primitives/keystore" } sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } sp-runtime = { path = "../../../substrate/primitives/runtime" } polkadot-parachain-primitives = { path = "../../parachain", default-features = false } -schnorrkel = "0.9.1" +schnorrkel = "0.11.4" thiserror = "1.0.48" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } [target.'cfg(not(target_os = "unknown"))'.dependencies] zstd = { version = "0.12.4", default-features = false } diff --git a/polkadot/node/primitives/src/approval.rs b/polkadot/node/primitives/src/approval.rs index e5ae24f7a51e2a3f000b25a11e6c62d4592ae904..f2a79e025affe3de108f0ce8abd985e53c862a5c 100644 --- a/polkadot/node/primitives/src/approval.rs +++ b/polkadot/node/primitives/src/approval.rs @@ -20,7 +20,7 @@ pub mod v1 { use sp_consensus_babe as babe_primitives; pub use sp_consensus_babe::{ - Randomness, Slot, VrfOutput, VrfProof, VrfSignature, VrfTranscript, + Randomness, Slot, VrfPreOutput, VrfProof, VrfSignature, VrfTranscript, }; use parity_scale_codec::{Decode, Encode}; @@ -145,14 +145,14 @@ pub mod v1 { AuthorityOutOfBounds(usize), } - /// An unsafe VRF output. Provide BABE Epoch info to create a `RelayVRFStory`. - pub struct UnsafeVRFOutput { - vrf_output: VrfOutput, + /// An unsafe VRF pre-output. Provide BABE Epoch info to create a `RelayVRFStory`. + pub struct UnsafeVRFPreOutput { + vrf_pre_output: VrfPreOutput, slot: Slot, authority_index: u32, } - impl UnsafeVRFOutput { + impl UnsafeVRFPreOutput { /// Get the slot. pub fn slot(&self) -> Slot { self.slot @@ -177,7 +177,7 @@ pub mod v1 { sp_consensus_babe::make_vrf_transcript(randomness, self.slot, epoch_index); let inout = self - .vrf_output + .vrf_pre_output .0 .attach_input_hash(&pubkey, transcript.0) .map_err(ApprovalError::SchnorrkelSignature)?; @@ -190,7 +190,7 @@ pub mod v1 { /// This fails if either there is no BABE `PreRuntime` digest or /// the digest has type `SecondaryPlain`, which Substrate nodes do /// not produce or accept anymore. - pub fn babe_unsafe_vrf_info(header: &Header) -> Option { + pub fn babe_unsafe_vrf_info(header: &Header) -> Option { use babe_primitives::digests::CompatibleDigestItem; for digest in &header.digest.logs { @@ -198,8 +198,8 @@ pub mod v1 { let slot = pre.slot(); let authority_index = pre.authority_index(); - return pre.vrf_signature().map(|sig| UnsafeVRFOutput { - vrf_output: sig.output.clone(), + return pre.vrf_signature().map(|sig| UnsafeVRFPreOutput { + vrf_pre_output: sig.pre_output.clone(), slot, authority_index, }) @@ -214,12 +214,14 @@ pub mod v1 { pub mod v2 { use parity_scale_codec::{Decode, Encode}; pub use sp_consensus_babe::{ - Randomness, Slot, VrfOutput, VrfProof, VrfSignature, VrfTranscript, + Randomness, Slot, VrfPreOutput, VrfProof, VrfSignature, VrfTranscript, }; use std::ops::BitOr; use bitvec::{prelude::Lsb0, vec::BitVec}; - use polkadot_primitives::{CandidateIndex, CoreIndex, Hash, ValidatorIndex}; + use polkadot_primitives::{ + CandidateIndex, CoreIndex, Hash, ValidatorIndex, ValidatorSignature, + }; /// A static context associated with producing randomness for a core. pub const CORE_RANDOMNESS_CONTEXT: &[u8] = b"A&V CORE v2"; @@ -473,6 +475,59 @@ pub mod v2 { }) } } + + impl From for IndirectSignedApprovalVoteV2 { + fn from(value: super::v1::IndirectSignedApprovalVote) -> Self { + Self { + block_hash: value.block_hash, + validator: value.validator, + candidate_indices: value.candidate_index.into(), + signature: value.signature, + } + } + } + + /// Errors that can occur when trying to convert to/from approvals v1/v2 + #[derive(Debug)] + pub enum ApprovalConversionError { + /// More than one candidate was signed. + MoreThanOneCandidate(usize), + } + + impl TryFrom for super::v1::IndirectSignedApprovalVote { + type Error = ApprovalConversionError; + + fn try_from(value: IndirectSignedApprovalVoteV2) -> Result { + if value.candidate_indices.count_ones() != 1 { + return Err(ApprovalConversionError::MoreThanOneCandidate( + value.candidate_indices.count_ones(), + )) + } + Ok(Self { + block_hash: value.block_hash, + validator: value.validator, + candidate_index: value.candidate_indices.first_one().expect("Qed we checked above") + as u32, + signature: value.signature, + }) + } + } + + /// A signed approval vote which references the candidate indirectly via the block. + /// + /// In practice, we have a look-up from block hash and candidate index to candidate hash, + /// so this can be transformed into a `SignedApprovalVote`. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct IndirectSignedApprovalVoteV2 { + /// A block hash where the candidate appears. + pub block_hash: Hash, + /// The index of the candidate in the list of candidates fully included as-of the block. + pub candidate_indices: CandidateBitfield, + /// The validator index. + pub validator: ValidatorIndex, + /// The signature by the validator. + pub signature: ValidatorSignature, + } } #[cfg(test)] diff --git a/polkadot/node/primitives/src/disputes/message.rs b/polkadot/node/primitives/src/disputes/message.rs index 89d3ea6c0af9023ad74fba97f4f2abd73cf84ad8..31fe73a7ba1c4dec821762714195f3c1792beac8 100644 --- a/polkadot/node/primitives/src/disputes/message.rs +++ b/polkadot/node/primitives/src/disputes/message.rs @@ -170,7 +170,7 @@ impl DisputeMessage { let valid_vote = ValidDisputeVote { validator_index: valid_index, signature: valid_statement.validator_signature().clone(), - kind: *valid_kind, + kind: valid_kind.clone(), }; let invalid_vote = InvalidDisputeVote { diff --git a/polkadot/node/primitives/src/disputes/mod.rs b/polkadot/node/primitives/src/disputes/mod.rs index 500b705be9574868c4b173333b6f84dd427d470c..768b95f65537b7ebfe5e4a8baadfd9eec685af4f 100644 --- a/polkadot/node/primitives/src/disputes/mod.rs +++ b/polkadot/node/primitives/src/disputes/mod.rs @@ -46,6 +46,15 @@ pub struct SignedDisputeStatement { session_index: SessionIndex, } +/// Errors encountered while signing a dispute statement +#[derive(Debug)] +pub enum SignedDisputeStatementError { + /// Encountered a keystore error while signing + KeyStoreError(KeystoreError), + /// Could not generate signing payload + PayloadError, +} + /// Tracked votes on candidates, for the purposes of dispute resolution. #[derive(Debug, Clone)] pub struct CandidateVotes { @@ -107,8 +116,9 @@ impl ValidCandidateVotes { ValidDisputeStatementKind::BackingValid(_) | ValidDisputeStatementKind::BackingSeconded(_) => false, ValidDisputeStatementKind::Explicit | - ValidDisputeStatementKind::ApprovalChecking => { - occupied.insert((kind, sig)); + ValidDisputeStatementKind::ApprovalChecking | + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(_) => { + occupied.insert((kind.clone(), sig)); kind != occupied.get().0 }, }, @@ -213,16 +223,19 @@ impl SignedDisputeStatement { candidate_hash: CandidateHash, session_index: SessionIndex, validator_public: ValidatorId, - ) -> Result, KeystoreError> { + ) -> Result, SignedDisputeStatementError> { let dispute_statement = if valid { DisputeStatement::Valid(ValidDisputeStatementKind::Explicit) } else { DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit) }; - let data = dispute_statement.payload_data(candidate_hash, session_index); + let data = dispute_statement + .payload_data(candidate_hash, session_index) + .map_err(|_| SignedDisputeStatementError::PayloadError)?; let signature = keystore - .sr25519_sign(ValidatorId::ID, validator_public.as_ref(), &data)? + .sr25519_sign(ValidatorId::ID, validator_public.as_ref(), &data) + .map_err(SignedDisputeStatementError::KeyStoreError)? .map(|sig| Self { dispute_statement, candidate_hash, diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index dab72bb2a5ed80b1842d4ca341c609109dee8c75..6ac6b82c223dff54d0fb82d96b3d5200e4f9a58b 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -58,7 +58,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.1.0"; +pub const NODE_VERSION: &'static str = "1.5.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: @@ -442,7 +442,7 @@ pub struct CollationSecondedSignal { pub relay_parent: Hash, /// The statement about seconding the collation. /// - /// Anything else than [`Statement::Seconded`](Statement::Seconded) is forbidden here. + /// Anything else than [`Statement::Seconded`] is forbidden here. pub statement: SignedFullStatement, } diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index e7a4f4a825c0bc37713a95447b8c6336358c3c62..0671b912120fd3340d629d30810dafdfdcd4ef5c 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -7,6 +7,9 @@ edition.workspace = true license.workspace = true description = "Utils to tie different Polkadot components together and allow instantiation of a node." +[lints] +workspace = true + [dependencies] # Substrate Client sc-authority-discovery = { path = "../../../substrate/client/authority-discovery" } @@ -75,20 +78,21 @@ frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-c frame-benchmarking = { path = "../../../substrate/frame/benchmarking" } # External Crates -async-trait = "0.1.57" +async-trait = "0.1.74" futures = "0.3.21" hex-literal = "0.4.1" is_executable = "1.0.1" gum = { package = "tracing-gum", path = "../gum" } log = "0.4.17" schnellru = "0.2.1" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" thiserror = "1.0.48" kvdb = "0.13.0" kvdb-rocksdb = { version = "0.19.0", optional = true } -parity-db = { version = "0.4.8", optional = true } +parity-db = { version = "0.4.12", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } +parking_lot = "0.12.1" # Polkadot polkadot-core-primitives = { path = "../../core-primitives" } @@ -147,9 +151,9 @@ serial_test = "2.0.0" tempfile = "3.2" [features] -default = [ "db", "full-node" ] +default = ["db", "full-node"] -db = [ "service/rocksdb" ] +db = ["service/rocksdb"] full-node = [ "kvdb-rocksdb", @@ -180,8 +184,8 @@ full-node = [ ] # Configure the native runtimes to use. -westend-native = [ "westend-runtime", "westend-runtime-constants" ] -rococo-native = [ "rococo-runtime", "rococo-runtime-constants" ] +westend-native = ["westend-runtime", "westend-runtime-constants"] +rococo-native = ["rococo-runtime", "rococo-runtime-constants"] runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", @@ -218,13 +222,9 @@ fast-runtime = [ "westend-runtime?/fast-runtime", ] -malus = [ "full-node" ] +malus = ["full-node"] runtime-metrics = [ "polkadot-runtime-parachains/runtime-metrics", "rococo-runtime?/runtime-metrics", "westend-runtime?/runtime-metrics", ] - -network-protocol-staging = [ - "polkadot-node-network-protocol/network-protocol-staging", -] diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index 6676bbe154b0591f4271aa727b64a9bdaf9e82ce..979550c7570643380c5a2e0f0f5de7c049c01f85 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -33,7 +33,9 @@ "/dns/ksm-bootnode.stakeworld.io/tcp/30301/ws/p2p/12D3KooWFRin7WWVS6RgUsSpkfUHSv4tfGKnr2zJPmf1pbMv118H", "/dns/ksm-bootnode.stakeworld.io/tcp/30302/wss/p2p/12D3KooWFRin7WWVS6RgUsSpkfUHSv4tfGKnr2zJPmf1pbMv118H", "/dns/ksm14.rotko.net/tcp/35224/wss/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK", - "/dns/ksm14.rotko.net/tcp/33224/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK" + "/dns/ksm14.rotko.net/tcp/33224/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK", + "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30333/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT", + "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30334/wss/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index 533492088161b3fb7e3862b706ef22716b30ee71..71dbb9004038d1394cebe568627e1e6163ee9049 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -34,7 +34,9 @@ "/dns/dot-bootnode.stakeworld.io/tcp/30311/ws/p2p/12D3KooWAb5MyC1UJiEQJk4Hg4B2Vi3AJdqSUhTGYUqSnEqCFMFg", "/dns/dot-bootnode.stakeworld.io/tcp/30312/wss/p2p/12D3KooWAb5MyC1UJiEQJk4Hg4B2Vi3AJdqSUhTGYUqSnEqCFMFg", "/dns/dot14.rotko.net/tcp/35214/wss/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff", - "/dns/dot14.rotko.net/tcp/33214/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff" + "/dns/dot14.rotko.net/tcp/33214/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff", + "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30333/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ", + "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/chain-specs/westend.json b/polkadot/node/service/chain-specs/westend.json index b2ffba9304bcc840a2b869a57bc549d2cdf95bda..697675871fcd7b4b11cac5a25e71c704d471cbde 100644 --- a/polkadot/node/service/chain-specs/westend.json +++ b/polkadot/node/service/chain-specs/westend.json @@ -31,7 +31,9 @@ "/dns/wnd-bootnode.stakeworld.io/tcp/30321/ws/p2p/12D3KooWBYdKipcNbrV5rCbgT5hco8HMLME7cE9hHC3ckqCKDuzP", "/dns/wnd-bootnode.stakeworld.io/tcp/30322/wss/p2p/12D3KooWBYdKipcNbrV5rCbgT5hco8HMLME7cE9hHC3ckqCKDuzP", "/dns/wnd14.rotko.net/tcp/35234/wss/p2p/12D3KooWLK8Zj1uZ46phU3vQwiDVda8tB76S8J26rXZQLHpwWkDJ", - "/dns/wnd14.rotko.net/tcp/33234/p2p/12D3KooWLK8Zj1uZ46phU3vQwiDVda8tB76S8J26rXZQLHpwWkDJ" + "/dns/wnd14.rotko.net/tcp/33234/p2p/12D3KooWLK8Zj1uZ46phU3vQwiDVda8tB76S8J26rXZQLHpwWkDJ", + "/dns/ibp-boot-westend.luckyfriday.io/tcp/30333/p2p/12D3KooWDg1YEytdwFFNWroFj6gio4YFsMB3miSbHKgdpJteUMB9", + "/dns/ibp-boot-westend.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWDg1YEytdwFFNWroFj6gio4YFsMB3miSbHKgdpJteUMB9" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index 871d7e82911ab2494dd5aac22a625bd2938222c3..fd35a4aaf6ab1780f9f6a1510c6d83a3939925c8 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -18,7 +18,6 @@ use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; use grandpa::AuthorityId as GrandpaId; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; #[cfg(feature = "westend-native")] use pallet_staking::Forcing; use polkadot_primitives::{AccountId, AccountPublic, AssignmentId, ValidatorId}; @@ -162,7 +161,6 @@ fn default_parachains_host_configuration_is_consistent() { fn westend_session_keys( babe: BabeId, grandpa: GrandpaId, - im_online: ImOnlineId, para_validator: ValidatorId, para_assignment: AssignmentId, authority_discovery: AuthorityDiscoveryId, @@ -171,7 +169,6 @@ fn westend_session_keys( westend::SessionKeys { babe, grandpa, - im_online, para_validator, para_assignment, authority_discovery, @@ -183,7 +180,6 @@ fn westend_session_keys( fn rococo_session_keys( babe: BabeId, grandpa: GrandpaId, - im_online: ImOnlineId, para_validator: ValidatorId, para_assignment: AssignmentId, authority_discovery: AuthorityDiscoveryId, @@ -192,7 +188,6 @@ fn rococo_session_keys( rococo_runtime::SessionKeys { babe, grandpa, - im_online, para_validator, para_assignment, authority_discovery, @@ -220,7 +215,6 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { AccountId, BabeId, GrandpaId, - ImOnlineId, ValidatorId, AssignmentId, AuthorityDiscoveryId, @@ -237,9 +231,6 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { //5Eb7wM65PNgtY6e33FEAzYtU5cRTXt6WQvZTnzaKQwkVcABk hex!["6faae44b21c6f2681a7f60df708e9f79d340f7d441d28bd987fab8d05c6487e8"] .unchecked_into(), - //5CdS2wGo4qdTQceVfEnbZH8vULeBrnGYCxSCxDna4tQSMV6y - hex!["18f5d55f138bfa8e0ea26ed6fa56817b247de3c2e2030a908c63fb37c146473f"] - .unchecked_into(), //5FqMLAgygdX9UqzukDp15Uid9PAKdFAR621U7xtp5ut2NfrW hex!["a6c1a5b501985a83cb1c37630c5b41e6b0a15b3675b2fd94694758e6cfa6794d"] .unchecked_into(), @@ -264,9 +255,6 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { //5FXFsPReTUEYPRNKhbTdUathcWBsxTNsLbk2mTpYdKCJewjA hex!["98f4d81cb383898c2c3d54dab28698c0f717c81b509cb32dc6905af3cc697b18"] .unchecked_into(), - //5CDYSCJK91r8y2r1V4Ddrit4PFMEkwZXJe8mNBqGXJ4xWCWq - hex!["06bd7dd4ab4c808c7d09d9cb6bd27fbcd99ad8400e99212b335056c475c24031"] - .unchecked_into(), //5CZjurB78XbSHf6SLkLhCdkqw52Zm7aBYUDdfkLqEDWJ9Zhj hex!["162508accd470e379b04cb0c7c60b35a7d5357e84407a89ed2dd48db4b726960"] .unchecked_into(), @@ -291,9 +279,6 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { //5G4kCbgqUhEyrRHCyFwFEkgBZXoYA8sbgsRxT9rY8Tp5Jj5F hex!["b0f8d2b9e4e1eafd4dab6358e0b9d5380d78af27c094e69ae9d6d30ca300fd86"] .unchecked_into(), - //5HVhFBLFTKSZK9fX6RktckWDTgYNoSd33fgonsEC8zfr4ddm - hex!["f03c3e184b2883eec9beaeb97f54321587e7476b228831ea0b5fc6da847ea975"] - .unchecked_into(), //5CS7thd2n54WfqeKU3cjvZzK4z5p7zku1Zw97mSzXgPioAAs hex!["1055100a283968271a0781450b389b9093231be809be1e48a305ebad2a90497e"] .unchecked_into(), @@ -318,9 +303,6 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { //5ChfdrAqmLjCeDJvynbMjcxYLHYzPe8UWXd3HnX9JDThUMbn hex!["1c309a70b4e274314b84c9a0a1f973c9c4fc084df5479ef686c54b1ae4950424"] .unchecked_into(), - //5DnsMm24575xK2b2aGfmafiDxwCet6Mr4iiZQeDdWvi8CzuF - hex!["4c64868ba6d8ace235d3efb4c10d745a67cf3bdfeae23b264d7ea2f3439dec42"] - .unchecked_into(), //5D8C3HHEp5E8fJsXRD56494F413CdRSR9QKGXe7v5ZEfymdj hex!["2ee4d78f328db178c54f205ac809da12e291a33bcbd4f29f081ce7e74bdc5044"] .unchecked_into(), @@ -361,7 +343,6 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { x.5.clone(), x.6.clone(), x.7.clone(), - x.8.clone(), ), ) }) @@ -408,7 +389,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { AccountId, BabeId, GrandpaId, - ImOnlineId, ValidatorId, AssignmentId, AuthorityDiscoveryId, @@ -425,9 +405,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5CPd3zoV9Aaah4xWucuDivMHJ2nEEmpdi864nPTiyRZp4t87 hex!["0e6d7d1afbcc6547b92995a394ba0daed07a2420be08220a5a1336c6731f0bfa"] .unchecked_into(), - //5F7BEa1LGFksUihyatf3dCDYneB8pWzVyavnByCsm5nBgezi - hex!["86975a37211f8704e947a365b720f7a3e2757988eaa7d0f197e83dba355ef743"] - .unchecked_into(), //5CP6oGfwqbEfML8efqm1tCZsUgRsJztp9L8ZkEUxA16W8PPz hex!["0e07a51d3213842f8e9363ce8e444255990a225f87e80a3d651db7841e1a0205"] .unchecked_into(), @@ -452,9 +429,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5HnDVBN9mD6mXyx8oryhDbJtezwNSj1VRXgLoYCBA6uEkiao hex!["fcd5f87a6fd5707a25122a01b4dac0a8482259df7d42a9a096606df1320df08d"] .unchecked_into(), - //5DhyXZiuB1LvqYKFgT5tRpgGsN3is2cM9QxgW7FikvakbAZP - hex!["48a910c0af90898f11bd57d37ceaea53c78994f8e1833a7ade483c9a84bde055"] - .unchecked_into(), //5EPEWRecy2ApL5n18n3aHyU1956zXTRqaJpzDa9DoqiggNwF hex!["669a10892119453e9feb4e3f1ee8e028916cc3240022920ad643846fbdbee816"] .unchecked_into(), @@ -479,9 +453,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5HAes2RQYPbYKbLBfKb88f4zoXv6pPA6Ke8CjN7dob3GpmSP hex!["e1b68fbd84333e31486c08e6153d9a1415b2e7e71b413702b7d64e9b631184a1"] .unchecked_into(), - //5HTXBf36LXmkFWJLokNUK6fPxVpkr2ToUnB1pvaagdGu4c1T - hex!["ee93e26259decb89afcf17ef2aa0fa2db2e1042fb8f56ecfb24d19eae8629878"] - .unchecked_into(), //5FtAGDZYJKXkhVhAxCQrXmaP7EE2mGbBMfmKDHjfYDgq2BiU hex!["a8e61ffacafaf546283dc92d14d7cc70ea0151a5dd81fdf73ff5a2951f2b6037"] .unchecked_into(), @@ -506,9 +477,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5DJV3zCBTJBLGNDCcdWrYxWDacSz84goGTa4pFeKVvehEBte hex!["36be9069cdb4a8a07ecd51f257875150f0a8a1be44a10d9d98dabf10a030aef4"] .unchecked_into(), - //5FHf8kpK4fPjEJeYcYon2gAPwEBubRvtwpzkUbhMWSweKPUY - hex!["8e95b9b5b4dc69790b67b566567ca8bf8cdef3a3a8bb65393c0d1d1c87cd2d2c"] - .unchecked_into(), //5F9FsRjpecP9GonktmtFL3kjqNAMKjHVFjyjRdTPa4hbQRZA hex!["882d72965e642677583b333b2d173ac94b5fd6c405c76184bb14293be748a13b"] .unchecked_into(), @@ -533,9 +501,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5EX1JBghGbQqWohTPU6msR9qZ2nYPhK9r3RTQ2oD1K8TCxaG hex!["6c878e33b83c20324238d22240f735457b6fba544b383e70bb62a27b57380c81"] .unchecked_into(), - //5GqL8RbVAuNXpDhjQi1KrS1MyNuKhvus2AbmQwRGjpuGZmFu - hex!["d2f9d537ffa59919a4028afdb627c14c14c97a1547e13e8e82203d2049b15b1a"] - .unchecked_into(), //5EUNaBpX9mJgcmLQHyG5Pkms6tbDiKuLbeTEJS924Js9cA1N hex!["6a8570b9c6408e54bacf123cc2bb1b0f087f9c149147d0005badba63a5a4ac01"] .unchecked_into(), @@ -560,9 +525,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5GzDPGbUM9uH52ZEwydasTj8edokGUJ7vEpoFWp9FE1YNuFB hex!["d9c056c98ca0e6b4eb7f5c58c007c1db7be0fe1f3776108f797dd4990d1ccc33"] .unchecked_into(), - //5GWZbVkJEfWZ7fRca39YAQeqri2Z7pkeHyd7rUctUHyQifLp - hex!["c4a980da30939d5bb9e4a734d12bf81259ae286aa21fa4b65405347fa40eff35"] - .unchecked_into(), //5CmLCFeSurRXXtwMmLcVo7sdJ9EqDguvJbuCYDcHkr3cpqyE hex!["1efc23c0b51ad609ab670ecf45807e31acbd8e7e5cb7c07cf49ee42992d2867c"] .unchecked_into(), @@ -587,9 +549,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5DnEySxbnppWEyN8cCLqvGjAorGdLRg2VmkY96dbJ1LHFK8N hex!["4bea0b37e0cce9bddd80835fa2bfd5606f5dcfb8388bbb10b10c483f0856cf14"] .unchecked_into(), - //5E1Y1FJ7dVP7qtE3wm241pTm72rTMcDT5Jd8Czv7Pwp7N3AH - hex!["560d90ca51e9c9481b8a9810060e04d0708d246714960439f804e5c6f40ca651"] - .unchecked_into(), //5CAC278tFCHAeHYqE51FTWYxHmeLcENSS1RG77EFRTvPZMJT hex!["042f07fc5268f13c026bbe199d63e6ac77a0c2a780f71cda05cee5a6f1b3f11f"] .unchecked_into(), @@ -614,9 +573,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5DrA2fZdzmNqT5j6DXNwVxPBjDV9jhkAqvjt6Us3bQHKy3cF hex!["4ee66173993dd0db5d628c4c9cb61a27b76611ad3c3925947f0d0011ee2c5dcc"] .unchecked_into(), - //5FNFDUGNLUtqg5LgrwYLNmBiGoP8KRxsvQpBkc7GQP6qaBUG - hex!["92156f54a114ee191415898f2da013d9db6a5362d6b36330d5fc23e27360ab66"] - .unchecked_into(), //5Gx6YeNhynqn8qkda9QKpc9S7oDr4sBrfAu516d3sPpEt26F hex!["d822d4088b20dca29a580a577a97d6f024bb24c9550bebdfd7d2d18e946a1c7d"] .unchecked_into(), @@ -657,7 +613,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { x.5.clone(), x.6.clone(), x.7.clone(), - x.8.clone(), ), ) }) @@ -768,35 +723,24 @@ pub fn get_authority_keys_from_seed( AccountId, BabeId, GrandpaId, - ImOnlineId, ValidatorId, AssignmentId, AuthorityDiscoveryId, BeefyId, ) { let keys = get_authority_keys_from_seed_no_beefy(seed); - (keys.0, keys.1, keys.2, keys.3, keys.4, keys.5, keys.6, keys.7, get_from_seed::(seed)) + (keys.0, keys.1, keys.2, keys.3, keys.4, keys.5, keys.6, get_from_seed::(seed)) } /// Helper function to generate stash, controller and session key from seed pub fn get_authority_keys_from_seed_no_beefy( seed: &str, -) -> ( - AccountId, - AccountId, - BabeId, - GrandpaId, - ImOnlineId, - ValidatorId, - AssignmentId, - AuthorityDiscoveryId, -) { +) -> (AccountId, AccountId, BabeId, GrandpaId, ValidatorId, AssignmentId, AuthorityDiscoveryId) { ( get_account_id_from_seed::(&format!("{}//stash", seed)), get_account_id_from_seed::(seed), get_from_seed::(seed), get_from_seed::(seed), - get_from_seed::(seed), get_from_seed::(seed), get_from_seed::(seed), get_from_seed::(seed), @@ -829,7 +773,6 @@ pub fn westend_testnet_genesis( AccountId, BabeId, GrandpaId, - ImOnlineId, ValidatorId, AssignmentId, AuthorityDiscoveryId, @@ -861,7 +804,6 @@ pub fn westend_testnet_genesis( x.5.clone(), x.6.clone(), x.7.clone(), - x.8.clone(), ), ) }) @@ -899,7 +841,6 @@ pub fn rococo_testnet_genesis( AccountId, BabeId, GrandpaId, - ImOnlineId, ValidatorId, AssignmentId, AuthorityDiscoveryId, @@ -930,7 +871,6 @@ pub fn rococo_testnet_genesis( x.5.clone(), x.6.clone(), x.7.clone(), - x.8.clone(), ), ) }) diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index d9553afa024b49fe68153500f7d1a2e102052c6c..ccc3da22400dfc38f5e94aa4f6e89969499dbee8 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -116,7 +116,7 @@ sp_api::impl_runtime_apis! { } } - impl runtime_api::ParachainHost for Runtime { + impl runtime_api::ParachainHost for Runtime { fn validators() -> Vec { unimplemented!() } diff --git a/polkadot/node/service/src/grandpa_support.rs b/polkadot/node/service/src/grandpa_support.rs index 3a767d9783f0dfcff13166a39220ab161533ff72..729dbfde5c76b850dd1b76bd9195de5066d551bc 100644 --- a/polkadot/node/service/src/grandpa_support.rs +++ b/polkadot/node/service/src/grandpa_support.rs @@ -16,8 +16,6 @@ //! Polkadot-specific GRANDPA integration utilities. -use std::sync::Arc; - use sp_runtime::traits::{Block as BlockT, Header as _, NumberFor}; use crate::HeaderProvider; @@ -59,55 +57,6 @@ where } } -/// A custom GRANDPA voting rule that "pauses" voting (i.e. keeps voting for the -/// same last finalized block) after a given block at height `N` has been -/// finalized and for a delay of `M` blocks, i.e. until the best block reaches -/// `N` + `M`, the voter will keep voting for block `N`. -#[derive(Clone)] -pub(crate) struct PauseAfterBlockFor(pub(crate) N, pub(crate) N); - -impl grandpa::VotingRule for PauseAfterBlockFor> -where - Block: BlockT, - B: sp_blockchain::HeaderBackend + 'static, -{ - fn restrict_vote( - &self, - backend: Arc, - base: &Block::Header, - best_target: &Block::Header, - current_target: &Block::Header, - ) -> grandpa::VotingRuleResult { - let aux = || { - // only restrict votes targeting a block higher than the block - // we've set for the pause - if *current_target.number() > self.0 { - // if we're past the pause period (i.e. `self.0 + self.1`) - // then we no longer need to restrict any votes - if *best_target.number() > self.0 + self.1 { - return None - } - - // if we've finalized the pause block, just keep returning it - // until best number increases enough to pass the condition above - if *base.number() >= self.0 { - return Some((base.hash(), *base.number())) - } - - // otherwise find the target header at the pause block - // to vote on - return walk_backwards_to_target_block(&*backend, self.0, current_target).ok() - } - - None - }; - - let target = aux(); - - Box::pin(async move { target }) - } -} - /// GRANDPA hard forks due to borked migration of session keys after a runtime /// upgrade (at #1491596), the signaled authority set changes were invalid /// (blank keys) and were impossible to finalize. The authorities for these @@ -214,130 +163,3 @@ pub(crate) fn kusama_hard_forks() -> Vec> { }) .collect() } - -#[cfg(test)] -mod tests { - use consensus_common::BlockOrigin; - use grandpa::VotingRule; - use polkadot_test_client::{ - ClientBlockImportExt, DefaultTestClientBuilderExt, InitPolkadotBlockBuilder, - TestClientBuilder, TestClientBuilderExt, - }; - use sp_blockchain::HeaderBackend; - use sp_runtime::traits::Header; - use std::sync::Arc; - - #[test] - fn grandpa_pause_voting_rule_works() { - let _ = env_logger::try_init(); - - let client = Arc::new(TestClientBuilder::new().build()); - let mut hashes = vec![]; - hashes.push(client.info().genesis_hash); - - let mut push_blocks = { - let mut client = client.clone(); - - move |hashes: &mut Vec<_>, n| { - for _ in 0..n { - let block = client.init_polkadot_block_builder().build().unwrap().block; - hashes.push(block.header.hash()); - futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); - } - } - }; - - let get_header = { - let client = client.clone(); - move |n| client.expect_header(n).unwrap() - }; - - // the rule should filter all votes after block #20 - // is finalized until block #50 is imported. - let voting_rule = super::PauseAfterBlockFor(20, 30); - - // add 10 blocks - push_blocks(&mut hashes, 10); - assert_eq!(client.info().best_number, 10); - - // we have not reached the pause block - // therefore nothing should be restricted - assert_eq!( - futures::executor::block_on(voting_rule.restrict_vote( - client.clone(), - &get_header(hashes[0]), - &get_header(hashes[10]), - &get_header(hashes[10]) - )), - None, - ); - - // add 15 more blocks - // best block: #25 - push_blocks(&mut hashes, 15); - - // we are targeting the pause block, - // the vote should not be restricted - assert_eq!( - futures::executor::block_on(voting_rule.restrict_vote( - client.clone(), - &get_header(hashes[10]), - &get_header(hashes[20]), - &get_header(hashes[20]) - )), - None, - ); - - // we are past the pause block, votes should - // be limited to the pause block. - let pause_block = get_header(hashes[20]); - assert_eq!( - futures::executor::block_on(voting_rule.restrict_vote( - client.clone(), - &get_header(hashes[10]), - &get_header(hashes[21]), - &get_header(hashes[21]) - )), - Some((pause_block.hash(), *pause_block.number())), - ); - - // we've finalized the pause block, so we'll keep - // restricting our votes to it. - assert_eq!( - futures::executor::block_on(voting_rule.restrict_vote( - client.clone(), - &pause_block, // #20 - &get_header(hashes[21]), - &get_header(hashes[21]), - )), - Some((pause_block.hash(), *pause_block.number())), - ); - - // add 30 more blocks - // best block: #55 - push_blocks(&mut hashes, 30); - - // we're at the last block of the pause, this block - // should still be considered in the pause period - assert_eq!( - futures::executor::block_on(voting_rule.restrict_vote( - client.clone(), - &pause_block, // #20 - &get_header(hashes[50]), - &get_header(hashes[50]), - )), - Some((pause_block.hash(), *pause_block.number())), - ); - - // we're past the pause period, no votes should be filtered - assert_eq!( - futures::executor::block_on(voting_rule.restrict_vote( - client.clone(), - &pause_block, // #20 - &get_header(hashes[51]), - &get_header(hashes[51]), - )), - None, - ); - } -} diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 0ed7940b3e80abcb32e11618ba21a8c5fb909d17..e92e15fc0e0058c4318df3ad88b5f7259dd94774 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -51,7 +51,8 @@ use { }, polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig, polkadot_node_network_protocol::{ - peer_set::PeerSetProtocolNames, request_response::ReqProtocolNames, + peer_set::{PeerSet, PeerSetProtocolNames}, + request_response::ReqProtocolNames, }, sc_client_api::BlockBackend, sc_transaction_pool_api::OffchainTransactionPoolFactory, @@ -74,7 +75,7 @@ pub use { #[cfg(feature = "full-node")] use polkadot_node_subsystem::jaeger; -use std::{path::PathBuf, sync::Arc, time::Duration}; +use std::{collections::HashMap, path::PathBuf, sync::Arc, time::Duration}; use prometheus_endpoint::Registry; #[cfg(feature = "full-node")] @@ -98,7 +99,7 @@ pub use service::{ ChainSpec, Configuration, Error as SubstrateServiceError, PruningMode, Role, RuntimeGenesis, TFullBackend, TFullCallExecutor, TFullClient, TaskManager, TransactionPoolOptions, }; -pub use sp_api::{ApiRef, ConstructRuntimeApi, Core as CoreApi, ProvideRuntimeApi, StateBackend}; +pub use sp_api::{ApiRef, ConstructRuntimeApi, Core as CoreApi, ProvideRuntimeApi}; pub use sp_runtime::{ generic, traits::{self as runtime_traits, BlakeTwo256, Block as BlockT, Header as HeaderT, NumberFor}, @@ -623,13 +624,17 @@ where #[cfg(feature = "full-node")] pub struct NewFullParams { pub is_parachain_node: IsParachainNode, - pub grandpa_pause: Option<(u32, u32)>, pub enable_beefy: bool, + /// Whether to enable the block authoring backoff on production networks + /// where it isn't enabled by default. + pub force_authoring_backoff: bool, pub jaeger_agent: Option, pub telemetry_worker_handle: Option, /// The version of the node. TESTING ONLY: `None` can be passed to skip the node/worker version /// check, both on startup and in the workers. pub node_version: Option, + /// Whether the node is attempting to run as a secure validator. + pub secure_validator_mode: bool, /// An optional path to a directory containing the workers. pub workers_path: Option, /// Optional custom names for the prepare and execute workers. @@ -714,11 +719,12 @@ pub fn new_full( mut config: Configuration, NewFullParams { is_parachain_node, - grandpa_pause, enable_beefy, + force_authoring_backoff, jaeger_agent, telemetry_worker_handle, node_version, + secure_validator_mode, workers_path, workers_names, overseer_gen, @@ -733,15 +739,21 @@ pub fn new_full( let is_offchain_indexing_enabled = config.offchain_worker.indexing_enabled; let role = config.role.clone(); let force_authoring = config.force_authoring; - let backoff_authoring_blocks = { + let backoff_authoring_blocks = if !force_authoring_backoff && + (config.chain_spec.is_polkadot() || config.chain_spec.is_kusama()) + { + // the block authoring backoff is disabled by default on production networks + None + } else { let mut backoff = sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default(); if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() || - config.chain_spec.is_versi() + config.chain_spec.is_versi() || + config.chain_spec.is_dev() { - // it's a testnet that's in flux, finality has stalled sometimes due - // to operational issues and it's annoying to slow down block + // on testnets that are in flux (like rococo or versi), finality has stalled + // sometimes due to operational issues and it's annoying to slow down block // production to 1 block per hour. backoff.max_interval = 10; } @@ -801,9 +813,9 @@ pub fn new_full( // anything in terms of behaviour, but makes the logs more consistent with the other // Substrate nodes. let grandpa_protocol_name = grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec); - net_config.add_notification_protocol(grandpa::grandpa_peers_set_config( - grandpa_protocol_name.clone(), - )); + let (grandpa_protocol_config, grandpa_notification_service) = + grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone()); + net_config.add_notification_protocol(grandpa_protocol_config); let beefy_gossip_proto_name = beefy::gossip_protocol_name(&genesis_hash, config.chain_spec.fork_id()); @@ -816,12 +828,17 @@ pub fn new_full( client.clone(), prometheus_registry.clone(), ); - if enable_beefy { - net_config.add_notification_protocol(beefy::communication::beefy_peers_set_config( - beefy_gossip_proto_name.clone(), - )); - net_config.add_request_response_protocol(beefy_req_resp_cfg); - } + let beefy_notification_service = match enable_beefy { + false => None, + true => { + let (beefy_notification_config, beefy_notification_service) = + beefy::communication::beefy_peers_set_config(beefy_gossip_proto_name.clone()); + + net_config.add_notification_protocol(beefy_notification_config); + net_config.add_request_response_protocol(beefy_req_resp_cfg); + Some(beefy_notification_service) + }, + }; // validation/collation protocols are enabled only if `Overseer` is enabled let peerset_protocol_names = @@ -832,13 +849,21 @@ pub fn new_full( // // Collators and parachain full nodes require the collator and validator networking to send // collations and to be able to recover PoVs. - if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() { - use polkadot_network_bridge::{peer_sets_info, IsAuthority}; - let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; - for config in peer_sets_info(is_authority, &peerset_protocol_names) { - net_config.add_notification_protocol(config); - } - } + let notification_services = + if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() { + use polkadot_network_bridge::{peer_sets_info, IsAuthority}; + let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; + + peer_sets_info(is_authority, &peerset_protocol_names) + .into_iter() + .map(|(config, (peerset, service))| { + net_config.add_notification_protocol(config); + (peerset, service) + }) + .collect::>>() + } else { + std::collections::HashMap::new() + }; let req_protocol_names = ReqProtocolNames::new(&genesis_hash, config.chain_spec.fork_id()); @@ -931,6 +956,7 @@ pub fn new_full( .ok_or(Error::DatabasePathRequired)? .join("pvf-artifacts"), node_version, + secure_validator_mode, prep_worker_path, exec_worker_path, }) @@ -1070,6 +1096,7 @@ pub fn new_full( offchain_transaction_pool_factory: OffchainTransactionPoolFactory::new( transaction_pool.clone(), ), + notification_services, }, ) .map_err(|e| { @@ -1171,13 +1198,15 @@ pub fn new_full( // need a keystore, regardless of which protocol we use below. let keystore_opt = if role.is_authority() { Some(keystore_container.keystore()) } else { None }; - if enable_beefy { + // beefy is enabled if its notification service exists + if let Some(notification_service) = beefy_notification_service { let justifications_protocol_name = beefy_on_demand_justifications_handler.protocol_name(); let network_params = beefy::BeefyNetworkParams { network: network.clone(), sync: sync_service.clone(), gossip_protocol_name: beefy_gossip_proto_name, justifications_protocol_name, + notification_service, _phantom: core::marker::PhantomData::, }; let payload_provider = beefy_primitives::mmr::MmrRootProvider::new(client.clone()); @@ -1238,32 +1267,14 @@ pub fn new_full( // provide better guarantees of block and vote data availability than // the observer. - // add a custom voting rule to temporarily stop voting for new blocks - // after the given pause block is finalized and restarting after the - // given delay. - let mut builder = grandpa::VotingRulesBuilder::default(); + let mut voting_rules_builder = grandpa::VotingRulesBuilder::default(); #[cfg(not(feature = "malus"))] let _malus_finality_delay = None; if let Some(delay) = _malus_finality_delay { info!(?delay, "Enabling malus finality delay",); - builder = builder.add(grandpa::BeforeBestBlockBy(delay)); - }; - - let voting_rule = match grandpa_pause { - Some((block, delay)) => { - info!( - block_number = %block, - delay = %delay, - "GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.", - block, - delay, - ); - - builder.add(grandpa_support::PauseAfterBlockFor(block, delay)).build() - }, - None => builder.build(), + voting_rules_builder = voting_rules_builder.add(grandpa::BeforeBestBlockBy(delay)); }; let grandpa_config = grandpa::GrandpaParams { @@ -1271,10 +1282,11 @@ pub fn new_full( link: link_half, network: network.clone(), sync: sync_service.clone(), - voting_rule, + voting_rule: voting_rules_builder.build(), prometheus_registry: prometheus_registry.clone(), shared_voter_state, telemetry: telemetry.as_ref().map(|x| x.handle()), + notification_service: grandpa_notification_service, offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), }; diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index fd618863eeaa737ec4be26b24a0db82e0bdbf307..599563d64549246247d29c6f4f1d297f3d08f042 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -26,7 +26,7 @@ use polkadot_node_core_candidate_validation::Config as CandidateValidationConfig use polkadot_node_core_chain_selection::Config as ChainSelectionConfig; use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig; use polkadot_node_network_protocol::{ - peer_set::PeerSetProtocolNames, + peer_set::{PeerSet, PeerSetProtocolNames}, request_response::{ v1 as request_v1, v2 as request_v2, IncomingRequestReceiver, ReqProtocolNames, }, @@ -41,15 +41,16 @@ use polkadot_overseer::{ OverseerConnector, OverseerHandle, SpawnGlue, }; +use parking_lot::Mutex; use polkadot_primitives::runtime_api::ParachainHost; use sc_authority_discovery::Service as AuthorityDiscoveryService; use sc_client_api::AuxStore; use sc_keystore::LocalKeystore; -use sc_network::NetworkStateInfo; +use sc_network::{NetworkStateInfo, NotificationService}; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_consensus_babe::BabeApi; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; pub use polkadot_approval_distribution::ApprovalDistribution as ApprovalDistributionSubsystem; pub use polkadot_availability_bitfield_distribution::BitfieldDistribution as BitfieldDistributionSubsystem; @@ -140,6 +141,8 @@ where pub peerset_protocol_names: PeerSetProtocolNames, /// The offchain transaction pool factory. pub offchain_transaction_pool_factory: OffchainTransactionPoolFactory, + /// Notification services for validation/collation protocols. + pub notification_services: HashMap>, } /// Obtain a prepared `OverseerBuilder`, that is initialized @@ -173,6 +176,7 @@ pub fn prepared_overseer_builder( req_protocol_names, peerset_protocol_names, offchain_transaction_pool_factory, + notification_services, }: OverseerGenArgs, ) -> Result< InitializedOverseerBuilder< @@ -218,6 +222,7 @@ where use polkadot_node_subsystem_util::metrics::Metrics; let metrics = ::register(registry)?; + let notification_sinks = Arc::new(Mutex::new(HashMap::new())); let spawner = SpawnGlue(spawner); @@ -235,6 +240,7 @@ where network_bridge_metrics.clone(), req_protocol_names, peerset_protocol_names.clone(), + notification_sinks.clone(), )) .network_bridge_rx(NetworkBridgeRxSubsystem::new( network_service.clone(), @@ -242,6 +248,8 @@ where Box::new(sync_service.clone()), network_bridge_metrics, peerset_protocol_names, + notification_services, + notification_sinks, )) .availability_distribution(AvailabilityDistributionSubsystem::new( keystore.clone(), diff --git a/polkadot/node/service/src/parachains_db/mod.rs b/polkadot/node/service/src/parachains_db/mod.rs index 92f3f167f22fb6468b53681b4a7c33283cdb29c6..59af30dceeb90f47ca6a4263e2c1428fc74ac436 100644 --- a/polkadot/node/service/src/parachains_db/mod.rs +++ b/polkadot/node/service/src/parachains_db/mod.rs @@ -43,10 +43,7 @@ pub(crate) mod columns { // Version 4 only changed structures in approval voting, so we can re-export the v4 definitions. pub mod v3 { - pub use super::v4::{ - COL_APPROVAL_DATA, COL_AVAILABILITY_DATA, COL_AVAILABILITY_META, - COL_CHAIN_SELECTION_DATA, COL_DISPUTE_COORDINATOR_DATA, NUM_COLUMNS, ORDERED_COL, - }; + pub use super::v4::{NUM_COLUMNS, ORDERED_COL}; } pub mod v4 { diff --git a/polkadot/node/service/src/parachains_db/upgrade.rs b/polkadot/node/service/src/parachains_db/upgrade.rs index 1d76c79d3e32320e29dd781d99eedbd9669320d9..d22eebb5c8d4edebdd2174f3cb1ba144fea0b130 100644 --- a/polkadot/node/service/src/parachains_db/upgrade.rs +++ b/polkadot/node/service/src/parachains_db/upgrade.rs @@ -20,10 +20,16 @@ use std::{ fs, io, path::{Path, PathBuf}, str::FromStr, + sync::Arc, }; -use polkadot_node_core_approval_voting::approval_db::v2::{ - migration_helpers::v1_to_v2, Config as ApprovalDbConfig, +use polkadot_node_core_approval_voting::approval_db::{ + common::{Config as ApprovalDbConfig, Result as ApprovalDbResult}, + v2::migration_helpers::v1_to_latest, + v3::migration_helpers::v2_to_latest, +}; +use polkadot_node_subsystem_util::database::{ + kvdb_impl::DbAdapter as RocksDbAdapter, paritydb_impl::DbAdapter as ParityDbAdapter, Database, }; type Version = u32; @@ -32,7 +38,9 @@ const VERSION_FILE_NAME: &'static str = "parachain_db_version"; /// Current db version. /// Version 4 changes approval db format for `OurAssignment`. -pub(crate) const CURRENT_VERSION: Version = 4; +/// Version 5 changes approval db format to hold some additional +/// information about delayed approvals. +pub(crate) const CURRENT_VERSION: Version = 5; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -101,7 +109,8 @@ pub(crate) fn try_upgrade_db_to_next_version( // 2 -> 3 migration Some(2) => migrate_from_version_2_to_3(db_path, db_kind)?, // 3 -> 4 migration - Some(3) => migrate_from_version_3_to_4(db_path, db_kind)?, + Some(3) => migrate_from_version_3_or_4_to_5(db_path, db_kind, v1_to_latest)?, + Some(4) => migrate_from_version_3_or_4_to_5(db_path, db_kind, v2_to_latest)?, // Already at current version, do nothing. Some(CURRENT_VERSION) => CURRENT_VERSION, // This is an arbitrary future version, we don't handle it. @@ -174,14 +183,19 @@ fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result Result { +fn migrate_from_version_3_or_4_to_5( + path: &Path, + db_kind: DatabaseKind, + migration_function: F, +) -> Result +where + F: Fn(Arc, ApprovalDbConfig) -> ApprovalDbResult<()>, +{ gum::info!(target: LOG_TARGET, "Migrating parachains db from version 3 to version 4 ..."); - use polkadot_node_subsystem_util::database::{ - kvdb_impl::DbAdapter as RocksDbAdapter, paritydb_impl::DbAdapter as ParityDbAdapter, - }; - use std::sync::Arc; let approval_db_config = ApprovalDbConfig { col_approval_data: super::REAL_COLUMNS.col_approval_data }; @@ -194,7 +208,8 @@ fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result { let db_path = path @@ -207,7 +222,8 @@ fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result + +Commands: + data-availability-read Benchmark availability recovery strategies + +``` + +Note: `test-sequence` is a special test objective that wraps up an arbitrary number of test objectives. It is tipically +used to run a suite of tests defined in a `yaml` file like in this [example](examples/availability_read.yaml). + +### Standard test options + +``` +Options: + --network The type of network to be emulated [default: ideal] [possible values: + ideal, healthy, degraded] + --n-cores Number of cores to fetch availability for [default: 100] + --n-validators Number of validators to fetch chunks from [default: 500] + --min-pov-size The minimum pov size in KiB [default: 5120] + --max-pov-size The maximum pov size bytes [default: 5120] + -n, --num-blocks The number of blocks the test is going to run [default: 1] + -p, --peer-bandwidth The bandwidth of simulated remote peers in KiB + -b, --bandwidth The bandwidth of our simulated node in KiB + --peer-error Simulated conection error ratio [0-100] + --peer-min-latency Minimum remote peer latency in milliseconds [0-5000] + --peer-max-latency Maximum remote peer latency in milliseconds [0-5000] + --profile Enable CPU Profiling with Pyroscope + --pyroscope-url Pyroscope Server URL [default: http://localhost:4040] + --pyroscope-sample-rate Pyroscope Sample Rate [default: 113] + -h, --help Print help + -V, --version Print version +``` + +These apply to all test objectives, except `test-sequence` which relies on the values being specified in a file. + +### Test objectives + +Each test objective can have it's specific configuration options, in contrast with the standard test options. + +For `data-availability-read` the recovery strategy to be used is configurable. + +``` +target/testnet/subsystem-bench data-availability-read --help +Benchmark availability recovery strategies + +Usage: subsystem-bench data-availability-read [OPTIONS] + +Options: + -f, --fetch-from-backers Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU + as we don't need to re-construct from chunks. Tipically this is only faster if nodes + have enough bandwidth + -h, --help Print help +``` + +### Understanding the test configuration + +A single test configuration `TestConfiguration` struct applies to a single run of a certain test objective. + +The configuration describes the following important parameters that influence the test duration and resource +usage: + +- how many validators are on the emulated network (`n_validators`) +- how many cores per block the subsystem will have to do work on (`n_cores`) +- for how many blocks the test should run (`num_blocks`) + +From the perspective of the subsystem under test, this means that it will receive an `ActiveLeavesUpdate` signal +followed by an arbitrary amount of messages. This process repeats itself for `num_blocks`. The messages are generally +test payloads pre-generated before the test run, or constructed on pre-genereated payloads. For example the +`AvailabilityRecoveryMessage::RecoverAvailableData` message includes a `CandidateReceipt` which is generated before +the test is started. + +### Example run + +Let's run an availabilty read test which will recover availability for 10 cores with max PoV size on a 500 +node validator network. + +``` + target/testnet/subsystem-bench --n-cores 10 data-availability-read +[2023-11-28T09:01:59Z INFO subsystem_bench::core::display] n_validators = 500, n_cores = 10, pov_size = 5120 - 5120, + error = 0, latency = None +[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Generating template candidate index=0 pov_size=5242880 +[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Created test environment. +[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Pre-generating 10 candidates. +[2023-11-28T09:02:01Z INFO subsystem-bench::core] Initializing network emulation for 500 peers. +[2023-11-28T09:02:01Z INFO substrate_prometheus_endpoint] 〽️ Prometheus exporter started at 127.0.0.1:9999 +[2023-11-28T09:02:01Z INFO subsystem-bench::availability] Current block 1/1 +[2023-11-28T09:02:01Z INFO subsystem_bench::availability] 10 recoveries pending +[2023-11-28T09:02:04Z INFO subsystem_bench::availability] Block time 3231ms +[2023-11-28T09:02:04Z INFO subsystem-bench::availability] Sleeping till end of block (2768ms) +[2023-11-28T09:02:07Z INFO subsystem_bench::availability] All blocks processed in 6001ms +[2023-11-28T09:02:07Z INFO subsystem_bench::availability] Throughput: 51200 KiB/block +[2023-11-28T09:02:07Z INFO subsystem_bench::availability] Block time: 6001 ms +[2023-11-28T09:02:07Z INFO subsystem_bench::availability] + + Total received from network: 66 MiB + Total sent to network: 58 KiB + Total subsystem CPU usage 4.16s + CPU usage per block 4.16s + Total test environment CPU usage 0.00s + CPU usage per block 0.00s +``` + +`Block time` in the context of `data-availability-read` has a different meaning. It measures the amount of time it +took the subsystem to finish processing all of the messages sent in the context of the current test block. + +### Test logs + +You can select log target, subtarget and verbosity just like with Polkadot node CLI, simply setting +`RUST_LOOG="parachain=debug"` turns on debug logs for all parachain consensus subsystems in the test. + +### View test metrics + +Assuming the Grafana/Prometheus stack installation steps completed succesfully, you should be able to +view the test progress in real time by accessing [this link](http://localhost:3000/goto/SM5B8pNSR?orgId=1). + +Now run +`target/testnet/subsystem-bench test-sequence --path polkadot/node/subsystem-bench/examples/availability_read.yaml` +and view the metrics in real time and spot differences between different `n_validators` values. +## Create new test objectives + +This tool is intended to make it easy to write new test objectives that focus individual subsystems, +or even multiple subsystems (for example `approval-distribution` and `approval-voting`). + +A special kind of test objectives are performance regression tests for the CI pipeline. These should be sequences +of tests that check the performance characteristics (such as CPU usage, speed) of the subsystem under test in both +happy and negative scenarios (low bandwidth, network errors and low connectivity). + +### Reusable test components + +To faster write a new test objective you need to use some higher level wrappers and logic: `TestEnvironment`, +`TestConfiguration`, `TestAuthorities`, `NetworkEmulator`. To create the `TestEnvironment` you will +need to also build an `Overseer`, but that should be easy using the mockups for subsystems in`core::mock`. + +### Mocking + +Ideally we want to have a single mock implementation for subsystems that can be minimally configured to +be used in different tests. A good example is `runtime-api` which currently only responds to session information +requests based on static data. It can be easily extended to service other requests. diff --git a/polkadot/node/subsystem-bench/docker/docker-compose.yml b/polkadot/node/subsystem-bench/docker/docker-compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..fc5eb1f634e64a2c0a527ec7b3fd1cd25083c925 --- /dev/null +++ b/polkadot/node/subsystem-bench/docker/docker-compose.yml @@ -0,0 +1,35 @@ +services: + grafana: + image: grafana/grafana-enterprise:latest + container_name: grafana + restart: always + networks: + - subsystem-bench + ports: + - "3000:3000" + + prometheus: + image: prom/prometheus:latest + container_name: prometheus + restart: always + networks: + - subsystem-bench + volumes: + - ./prometheus:/etc/prometheus + extra_hosts: + - "host.docker.internal:host-gateway" + ports: + - "9090:9090" + - "9999:9999" + + pyroscope: + container_name: pyroscope + image: grafana/pyroscope:latest + restart: always + networks: + - subsystem-bench + ports: + - "4040:4040" + +networks: + subsystem-bench: diff --git a/polkadot/node/subsystem-bench/docker/prometheus/prometheus.yml b/polkadot/node/subsystem-bench/docker/prometheus/prometheus.yml new file mode 100644 index 0000000000000000000000000000000000000000..0bb25cfcb36c667b7609fcfc650ee81092baf28a --- /dev/null +++ b/polkadot/node/subsystem-bench/docker/prometheus/prometheus.yml @@ -0,0 +1,11 @@ +global: + scrape_interval: 5s + +scrape_configs: + - job_name: "prometheus" + static_configs: + - targets: ["localhost:9090"] + - job_name: "subsystem-bench" + scrape_interval: 0s500ms + static_configs: + - targets: ['host.docker.internal:9999'] diff --git a/polkadot/node/subsystem-bench/examples/availability_read.yaml b/polkadot/node/subsystem-bench/examples/availability_read.yaml new file mode 100644 index 0000000000000000000000000000000000000000..311ea972141fc339367d30234cdf0c60911dd824 --- /dev/null +++ b/polkadot/node/subsystem-bench/examples/availability_read.yaml @@ -0,0 +1,57 @@ +TestConfiguration: +# Test 1 +- objective: !DataAvailabilityRead + fetch_from_backers: false + n_validators: 300 + n_cores: 20 + min_pov_size: 5120 + max_pov_size: 5120 + peer_bandwidth: 52428800 + bandwidth: 52428800 + latency: + min_latency: + secs: 0 + nanos: 1000000 + max_latency: + secs: 0 + nanos: 100000000 + error: 3 + num_blocks: 3 + +# Test 2 +- objective: !DataAvailabilityRead + fetch_from_backers: false + n_validators: 500 + n_cores: 20 + min_pov_size: 5120 + max_pov_size: 5120 + peer_bandwidth: 52428800 + bandwidth: 52428800 + latency: + min_latency: + secs: 0 + nanos: 1000000 + max_latency: + secs: 0 + nanos: 100000000 + error: 3 + num_blocks: 3 + +# Test 3 +- objective: !DataAvailabilityRead + fetch_from_backers: false + n_validators: 1000 + n_cores: 20 + min_pov_size: 5120 + max_pov_size: 5120 + peer_bandwidth: 52428800 + bandwidth: 52428800 + latency: + min_latency: + secs: 0 + nanos: 1000000 + max_latency: + secs: 0 + nanos: 100000000 + error: 3 + num_blocks: 3 diff --git a/polkadot/node/subsystem-bench/grafana/availability-read.json b/polkadot/node/subsystem-bench/grafana/availability-read.json new file mode 100644 index 0000000000000000000000000000000000000000..31c4ad3c795230402ec54d5558c24a3ab9664db4 --- /dev/null +++ b/polkadot/node/subsystem-bench/grafana/availability-read.json @@ -0,0 +1,1874 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "Subsystem and test environment metrics", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 2, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": 60000, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 90, + "interval": "1s", + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "editorMode": "code", + "expr": "subsystem_benchmark_n_validators{}", + "instant": false, + "legendFormat": "n_vaidators", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "editorMode": "code", + "expr": "subsystem_benchmark_n_cores{}", + "hide": false, + "instant": false, + "legendFormat": "n_cores", + "range": true, + "refId": "B" + } + ], + "title": "Test configuration", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 31, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Overview", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$data_source" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 57, + "interval": "1s", + "options": { + "legend": { + "calcs": [ + "mean", + "min", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.0.2", + "repeat": "nodename", + "targets": [ + { + "datasource": { + "uid": "$data_source" + }, + "editorMode": "code", + "expr": "sum(rate(substrate_tasks_polling_duration_sum{}[2s])) by ($cpu_group_by)", + "interval": "", + "legendFormat": "{{task_group}}", + "range": true, + "refId": "A" + } + ], + "title": "All tasks CPU usage breakdown", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$data_source" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "area" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 6 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 20 + }, + "id": 93, + "interval": "1s", + "options": { + "legend": { + "calcs": [ + "mean", + "min", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "uid": "$data_source" + }, + "editorMode": "code", + "expr": "increase(substrate_tasks_polling_duration_sum{task_group=\"availability-recovery\"}[6s])", + "interval": "", + "legendFormat": "{{task_name}}", + "range": true, + "refId": "A" + } + ], + "title": "Availability subsystem CPU usage per block", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$data_source" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 30 + }, + "id": 94, + "interval": "1s", + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Last", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "uid": "$data_source" + }, + "editorMode": "code", + "expr": "sum(substrate_tasks_polling_duration_sum{}) by ($cpu_group_by)", + "interval": "", + "legendFormat": "{{task_name}}", + "range": true, + "refId": "A" + } + ], + "title": "Total CPU burn", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$data_source" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "area" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "dark-red", + "value": 6000 + } + ] + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 40 + }, + "id": 95, + "interval": "1s", + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true, + "sortBy": "Last", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "uid": "$data_source" + }, + "editorMode": "code", + "expr": "subsystem_benchmark_block_time", + "interval": "", + "legendFormat": "Instant block time", + "range": true, + "refId": "A" + } + ], + "title": "All candidates in block recovery time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 100, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 2, + "scaleDistribution": { + "log": 2, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 40 + }, + "id": 89, + "interval": "1s", + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "editorMode": "code", + "expr": "sum(rate(subsystem_benchmark_network_peer_total_bytes_received{}[5s]))", + "instant": false, + "legendFormat": "Received", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "editorMode": "code", + "expr": "sum(rate(subsystem_benchmark_network_peer_total_bytes_sent{}[5s]))", + "hide": false, + "instant": false, + "legendFormat": "Sent", + "range": true, + "refId": "B" + } + ], + "title": "Emulated network throughput ", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 15, + "w": 12, + "x": 0, + "y": 52 + }, + "id": 88, + "interval": "1s", + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "editorMode": "code", + "expr": "rate(subsystem_benchmark_network_peer_total_bytes_received{}[10s])", + "instant": false, + "legendFormat": "Received by {{peer}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "editorMode": "code", + "expr": "rate(subsystem_benchmark_network_peer_total_bytes_sent{}[10s])", + "hide": false, + "instant": false, + "legendFormat": "Sent by {{peer}}", + "range": true, + "refId": "B" + } + ], + "title": "Emulated peer throughput", + "type": "timeseries" + }, + { + "cards": {}, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateInferno", + "exponent": 0.5, + "mode": "spectrum" + }, + "dataFormat": "tsbuckets", + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 15, + "w": 12, + "x": 12, + "y": 52 + }, + "heatmap": {}, + "hideZeroBuckets": true, + "highlightCards": true, + "id": 92, + "interval": "1s", + "legend": { + "show": true + }, + "maxDataPoints": 1340, + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 2, + "cellValues": { + "decimals": 0 + }, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Inferno", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "bytes" + } + }, + "pluginVersion": "10.1.1", + "reverseYBuckets": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(increase(subsystem_benchmark_pov_size_bucket{}[$__rate_interval])) by (le)", + "format": "heatmap", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{le}}", + "queryType": "randomWalk", + "refId": "B" + } + ], + "title": "Recovered PoV sizes", + "tooltip": { + "show": true, + "showHistogram": true + }, + "tooltipDecimals": 0, + "transformations": [], + "type": "heatmap", + "xAxis": { + "show": true + }, + "yAxis": { + "decimals": 0, + "format": "s", + "logBase": 1, + "show": true + }, + "yBucketBound": "auto" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "description": "Number of erasure-encoded chunks of data belonging to candidate blocks. ", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic", + "seriesBy": "max" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "chunks/s" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 67 + }, + "id": 43, + "interval": "1s", + "maxDataPoints": 1340, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(polkadot_parachain_availability_recovery_chunk_requests_issued{}[10s]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Chunks requested", + "queryType": "randomWalk", + "refId": "B" + } + ], + "title": "Availability", + "transformations": [], + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 77 + }, + "id": 35, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Availability subystem metrics", + "type": "row" + }, + { + "cards": {}, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateInferno", + "exponent": 0.5, + "mode": "spectrum" + }, + "dataFormat": "tsbuckets", + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 78 + }, + "heatmap": {}, + "hideZeroBuckets": true, + "highlightCards": true, + "id": 68, + "interval": "1s", + "legend": { + "show": true + }, + "maxDataPoints": 1340, + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 2, + "cellValues": { + "decimals": 0 + }, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Inferno", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "10.1.1", + "reverseYBuckets": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(increase(polkadot_parachain_availability_recovery_time_total_bucket{}[$__rate_interval])) by (le)", + "format": "heatmap", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{le}}", + "queryType": "randomWalk", + "refId": "B" + } + ], + "title": "Time to recover a PoV", + "tooltip": { + "show": true, + "showHistogram": true + }, + "tooltipDecimals": 0, + "transformations": [], + "type": "heatmap", + "xAxis": { + "show": true + }, + "yAxis": { + "decimals": 0, + "format": "s", + "logBase": 1, + "show": true + }, + "yBucketBound": "auto" + }, + { + "cards": {}, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateInferno", + "exponent": 0.5, + "mode": "spectrum" + }, + "dataFormat": "tsbuckets", + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 78 + }, + "heatmap": {}, + "hideZeroBuckets": true, + "highlightCards": true, + "id": 67, + "interval": "1s", + "legend": { + "show": true + }, + "maxDataPoints": 1340, + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 2, + "cellValues": { + "decimals": 0 + }, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Inferno", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "10.1.1", + "reverseYBuckets": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(increase(polkadot_parachain_availability_recovery_time_chunk_request_bucket{}[$__rate_interval])) by (le)", + "format": "heatmap", + "instant": false, + "interval": "", + "legendFormat": "{{le}}", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Chunk request duration", + "tooltip": { + "show": true, + "showHistogram": true + }, + "tooltipDecimals": 0, + "transformations": [], + "type": "heatmap", + "xAxis": { + "show": true + }, + "yAxis": { + "decimals": 0, + "format": "bitfields", + "logBase": 1, + "show": true + }, + "yBucketBound": "auto" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic", + "seriesBy": "max" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 88 + }, + "id": 85, + "interval": "1s", + "maxDataPoints": 1340, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "rate(polkadot_parachain_availability_recovery_bytes_total{}[30s])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Bytes recovered", + "queryType": "randomWalk", + "refId": "B" + } + ], + "title": "Recovery throughtput", + "transformations": [], + "type": "timeseries" + }, + { + "cards": {}, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateInferno", + "exponent": 0.5, + "mode": "spectrum" + }, + "dataFormat": "tsbuckets", + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 88 + }, + "heatmap": {}, + "hideZeroBuckets": true, + "highlightCards": true, + "id": 84, + "interval": "1s", + "legend": { + "show": true + }, + "maxDataPoints": 1340, + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 2, + "cellValues": { + "decimals": 0 + }, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Inferno", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "10.1.1", + "reverseYBuckets": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(increase(polkadot_parachain_availability_reencode_chunks_bucket{}[$__rate_interval])) by (le)", + "format": "heatmap", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{le}}", + "queryType": "randomWalk", + "refId": "B" + } + ], + "title": "Re-encoding chunks timing", + "tooltip": { + "show": true, + "showHistogram": true + }, + "tooltipDecimals": 0, + "transformations": [], + "type": "heatmap", + "xAxis": { + "show": true + }, + "yAxis": { + "decimals": 0, + "format": "s", + "logBase": 1, + "show": true + }, + "yBucketBound": "auto" + }, + { + "cards": {}, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateInferno", + "exponent": 0.5, + "mode": "spectrum" + }, + "dataFormat": "tsbuckets", + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 98 + }, + "heatmap": {}, + "hideZeroBuckets": true, + "highlightCards": true, + "id": 83, + "interval": "1s", + "legend": { + "show": true + }, + "maxDataPoints": 1340, + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 2, + "cellValues": { + "decimals": 0 + }, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Inferno", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "10.1.1", + "reverseYBuckets": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(increase(polkadot_parachain_availability_recovery_time_erasure_recovery_bucket{}[$__rate_interval])) by (le)", + "format": "heatmap", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{le}}", + "queryType": "randomWalk", + "refId": "B" + } + ], + "title": "Erasure recovery (no I/O)", + "tooltip": { + "show": true, + "showHistogram": true + }, + "tooltipDecimals": 0, + "transformations": [], + "type": "heatmap", + "xAxis": { + "show": true + }, + "yAxis": { + "decimals": 0, + "format": "s", + "logBase": 1, + "show": true + }, + "yBucketBound": "auto" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "description": "Number of erasure-encoded chunks of data belonging to candidate blocks. ", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic", + "seriesBy": "max" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "stepAfter", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "cps" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 108 + }, + "id": 86, + "interval": "1s", + "maxDataPoints": 1340, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(polkadot_parachain_availability_recovery_recoveries_finished{}[1s]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Finished", + "queryType": "randomWalk", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(polkadot_parachain_availability_recovery_recovieries_started{}[1s]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Started", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Recoveries", + "transformations": [], + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 118 + }, + "id": 2, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Approval voting", + "type": "row" + } + ], + "refresh": false, + "schemaVersion": 38, + "style": "dark", + "tags": [ + "subsystem", + "benchmark" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Prometheus", + "value": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "hide": 0, + "includeAll": false, + "label": "Source of data", + "multi": false, + "name": "data_source", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": { + "selected": true, + "text": "task_name", + "value": "task_name" + }, + "description": "Sum CPU usage by task name or task group.", + "hide": 0, + "includeAll": false, + "label": "Group CPU usage", + "multi": false, + "name": "cpu_group_by", + "options": [ + { + "selected": true, + "text": "task_name", + "value": "task_name" + }, + { + "selected": false, + "text": "task_group", + "value": "task_group" + } + ], + "query": "task_name, task_group", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "2023-11-28T13:05:32.794Z", + "to": "2023-11-28T13:06:56.173Z" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s" + ] + }, + "timezone": "utc", + "title": "Data Availability Read", + "uid": "asdadasd1", + "version": 58, + "weekStart": "" +} \ No newline at end of file diff --git a/polkadot/node/subsystem-bench/grafana/cpu-profiling.json b/polkadot/node/subsystem-bench/grafana/cpu-profiling.json new file mode 100644 index 0000000000000000000000000000000000000000..0d53a1b9365762f60154a28188d03aab1857ddb4 --- /dev/null +++ b/polkadot/node/subsystem-bench/grafana/cpu-profiling.json @@ -0,0 +1,70 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 1, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "grafana-pyroscope-datasource", + "uid": "bc3bc04f-85f9-464b-8ae3-fbe0949063f6" + }, + "gridPos": { + "h": 18, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 1, + "targets": [ + { + "datasource": { + "type": "grafana-pyroscope-datasource", + "uid": "bc3bc04f-85f9-464b-8ae3-fbe0949063f6" + }, + "groupBy": [], + "labelSelector": "{service_name=\"subsystem-bench\"}", + "profileTypeId": "process_cpu:cpu:nanoseconds:cpu:nanoseconds", + "queryType": "profile", + "refId": "A" + } + ], + "title": "CPU Profiling", + "type": "flamegraph" + } + ], + "refresh": "", + "schemaVersion": 38, + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "CPU Profiling", + "uid": "c31191d5-fe2b-49e2-8b1c-1451f31d1628", + "version": 1, + "weekStart": "" + } diff --git a/polkadot/node/subsystem-bench/grafana/task-cpu-usage.json b/polkadot/node/subsystem-bench/grafana/task-cpu-usage.json new file mode 100644 index 0000000000000000000000000000000000000000..90763444abf195dd62379ac518e4473d04c12a04 --- /dev/null +++ b/polkadot/node/subsystem-bench/grafana/task-cpu-usage.json @@ -0,0 +1,755 @@ +{ + "annotations": { + "list": [ + { + "$$hashKey": "object:326", + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "limit": 100, + "name": "Annotations & Alerts", + "showIn": 0, + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + }, + { + "$$hashKey": "object:327", + "datasource": { + "uid": "$data_source" + }, + "enable": true, + "expr": "increase(${metric_namespace}_tasks_ended_total{reason=\"panic\", node=~\"${nodename}\"}[10m])", + "hide": true, + "iconColor": "rgba(255, 96, 96, 1)", + "limit": 100, + "name": "Task panics", + "rawQuery": "SELECT\n extract(epoch from time_column) AS time,\n text_column as text,\n tags_column as tags\nFROM\n metric_table\nWHERE\n $__timeFilter(time_column)\n", + "showIn": 0, + "step": "10m", + "tags": [], + "textFormat": "{{node}} - {{task_name}}", + "titleFormat": "Panic!", + "type": "tags" + }, + { + "$$hashKey": "object:621", + "datasource": { + "uid": "$data_source" + }, + "enable": true, + "expr": "changes(${metric_namespace}_process_start_time_seconds{node=~\"${nodename}\"}[10m])", + "hide": false, + "iconColor": "#8AB8FF", + "name": "Node reboots", + "showIn": 0, + "step": "10m", + "textFormat": "{{node}}", + "titleFormat": "Reboots" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 1, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 29, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Tasks", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 3, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 11, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "repeat": "nodename", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "datasource": { + "uid": "$data_source" + }, + "editorMode": "code", + "expr": "sum(rate(substrate_tasks_polling_duration_sum{}[$__rate_interval])) by (task_name)", + "interval": "", + "legendFormat": "{{task_name}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "CPU time spent on each task", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:2721", + "format": "percentunit", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:2722", + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 3, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 10 + }, + "hiddenSeries": false, + "id": 30, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "repeat": "nodename", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "datasource": { + "uid": "$data_source" + }, + "editorMode": "code", + "expr": "rate(substrate_tasks_polling_duration_count{}[$__rate_interval])", + "interval": "", + "legendFormat": "{{task_name}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Task polling rate per second", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:2571", + "format": "cps", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:2572", + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 16 + }, + "hiddenSeries": false, + "id": 43, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": true, + "hideZero": false, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "repeat": "nodename", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "uid": "$data_source" + }, + "editorMode": "code", + "expr": "increase(substrate_tasks_polling_duration_sum{}[$__rate_interval]) / increase(substrate_tasks_polling_duration_count{}[$__rate_interval])", + "interval": "", + "legendFormat": "{{task_name}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Average time it takes to call Future::poll()", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:2571", + "format": "s", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:2572", + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 22 + }, + "hiddenSeries": false, + "id": 15, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": true, + "values": true + }, + "lines": false, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "repeat": "nodename", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": true, + "targets": [ + { + "datasource": { + "uid": "$data_source" + }, + "editorMode": "code", + "expr": "increase(substrate_tasks_spawned_total{}[$__rate_interval])", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{task_name}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Number of tasks started", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:771", + "format": "short", + "logBase": 10, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:772", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 28 + }, + "hiddenSeries": false, + "id": 2, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "repeat": "nodename", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "uid": "$data_source" + }, + "editorMode": "code", + "expr": "substrate_tasks_spawned_total{} - sum(substrate_tasks_ended_total{}) without(reason)\n\n# Fallback if tasks_ended_total is null for that task\nor on(task_name) substrate_tasks_spawned_total{}", + "interval": "", + "legendFormat": "{{task_name}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Number of tasks running", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:919", + "format": "short", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:920", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "e56e7dd2-a992-4eec-aa96-e47b21c9020b" + }, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 34 + }, + "hiddenSeries": false, + "id": 7, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "repeat": "nodename", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": true, + "targets": [ + { + "datasource": { + "uid": "$data_source" + }, + "editorMode": "code", + "expr": "irate(substrate_tasks_polling_duration_bucket{le=\"+Inf\"}[$__rate_interval])\n - ignoring(le)\n irate(substrate_tasks_polling_duration_bucket{le=\"1.024\"}[$__rate_interval]) > 0", + "interval": "", + "legendFormat": "{{task_name}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Number of calls to `Future::poll` that took more than one second", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:3040", + "format": "cps", + "label": "Calls to `Future::poll`/second", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:3041", + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 40 + }, + "id": 27, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Unbounded Channels", + "type": "row" + } + ], + "refresh": "5s", + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "utc", + "title": "Substrate Service Tasks with substrate prefix", + "uid": "S7sc-M_Gk", + "version": 17, + "weekStart": "" + } \ No newline at end of file diff --git a/polkadot/node/subsystem-bench/src/availability/cli.rs b/polkadot/node/subsystem-bench/src/availability/cli.rs new file mode 100644 index 0000000000000000000000000000000000000000..65df8c1552aa8266497eb2738cc562f656050b68 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/availability/cli.rs @@ -0,0 +1,37 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use serde::{Deserialize, Serialize}; + +#[derive(clap::ValueEnum, Clone, Copy, Debug, PartialEq)] +#[value(rename_all = "kebab-case")] +#[non_exhaustive] +pub enum NetworkEmulation { + Ideal, + Healthy, + Degraded, +} + +#[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)] +#[clap(rename_all = "kebab-case")] +#[allow(missing_docs)] +pub struct DataAvailabilityReadOptions { + #[clap(short, long, default_value_t = false)] + /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as + /// we don't need to re-construct from chunks. Tipically this is only faster if nodes have + /// enough bandwidth. + pub fetch_from_backers: bool, +} diff --git a/polkadot/node/subsystem-bench/src/availability/mod.rs b/polkadot/node/subsystem-bench/src/availability/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..7c81b9313659771889f52ceb063089a26fc079c7 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/availability/mod.rs @@ -0,0 +1,339 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +use itertools::Itertools; +use std::{collections::HashMap, iter::Cycle, ops::Sub, sync::Arc, time::Instant}; + +use crate::TestEnvironment; +use polkadot_node_subsystem::{Overseer, OverseerConnector, SpawnGlue}; +use polkadot_node_subsystem_test_helpers::derive_erasure_chunks_with_proofs_and_root; +use polkadot_overseer::Handle as OverseerHandle; +use sc_network::request_responses::ProtocolConfig; + +use colored::Colorize; + +use futures::{channel::oneshot, stream::FuturesUnordered, StreamExt}; +use polkadot_node_metrics::metrics::Metrics; + +use polkadot_availability_recovery::AvailabilityRecoverySubsystem; + +use crate::GENESIS_HASH; +use parity_scale_codec::Encode; +use polkadot_node_network_protocol::request_response::{IncomingRequest, ReqProtocolNames}; +use polkadot_node_primitives::{BlockData, PoV}; +use polkadot_node_subsystem::messages::{AllMessages, AvailabilityRecoveryMessage}; + +use crate::core::{ + environment::TestEnvironmentDependencies, + mock::{ + av_store, + network_bridge::{self, MockNetworkBridgeTx, NetworkAvailabilityState}, + runtime_api, MockAvailabilityStore, MockRuntimeApi, + }, +}; + +use super::core::{configuration::TestConfiguration, mock::dummy_builder, network::*}; + +const LOG_TARGET: &str = "subsystem-bench::availability"; + +use polkadot_node_primitives::{AvailableData, ErasureChunk}; + +use super::{cli::TestObjective, core::mock::AlwaysSupportsParachains}; +use polkadot_node_subsystem_test_helpers::mock::new_block_import_info; +use polkadot_primitives::{ + CandidateHash, CandidateReceipt, GroupIndex, Hash, HeadData, PersistedValidationData, +}; +use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; +use sc_service::SpawnTaskHandle; + +mod cli; +pub use cli::{DataAvailabilityReadOptions, NetworkEmulation}; + +fn build_overseer( + spawn_task_handle: SpawnTaskHandle, + runtime_api: MockRuntimeApi, + av_store: MockAvailabilityStore, + network_bridge: MockNetworkBridgeTx, + availability_recovery: AvailabilityRecoverySubsystem, +) -> (Overseer, AlwaysSupportsParachains>, OverseerHandle) { + let overseer_connector = OverseerConnector::with_event_capacity(64000); + let dummy = dummy_builder!(spawn_task_handle); + let builder = dummy + .replace_runtime_api(|_| runtime_api) + .replace_availability_store(|_| av_store) + .replace_network_bridge_tx(|_| network_bridge) + .replace_availability_recovery(|_| availability_recovery); + + let (overseer, raw_handle) = + builder.build_with_connector(overseer_connector).expect("Should not fail"); + + (overseer, OverseerHandle::new(raw_handle)) +} + +/// Takes a test configuration and uses it to creates the `TestEnvironment`. +pub fn prepare_test( + config: TestConfiguration, + state: &mut TestState, +) -> (TestEnvironment, ProtocolConfig) { + prepare_test_inner(config, state, TestEnvironmentDependencies::default()) +} + +fn prepare_test_inner( + config: TestConfiguration, + state: &mut TestState, + dependencies: TestEnvironmentDependencies, +) -> (TestEnvironment, ProtocolConfig) { + // Generate test authorities. + let test_authorities = config.generate_authorities(); + + let runtime_api = runtime_api::MockRuntimeApi::new(config.clone(), test_authorities.clone()); + + let av_store = + av_store::MockAvailabilityStore::new(state.chunks.clone(), state.candidate_hashes.clone()); + + let availability_state = NetworkAvailabilityState { + candidate_hashes: state.candidate_hashes.clone(), + available_data: state.available_data.clone(), + chunks: state.chunks.clone(), + }; + + let network = NetworkEmulator::new(&config, &dependencies, &test_authorities); + + let network_bridge_tx = network_bridge::MockNetworkBridgeTx::new( + config.clone(), + availability_state, + network.clone(), + ); + + let use_fast_path = match &state.config().objective { + TestObjective::DataAvailabilityRead(options) => options.fetch_from_backers, + _ => panic!("Unexpected objective"), + }; + + let (collation_req_receiver, req_cfg) = + IncomingRequest::get_config_receiver(&ReqProtocolNames::new(GENESIS_HASH, None)); + + let subsystem = if use_fast_path { + AvailabilityRecoverySubsystem::with_fast_path( + collation_req_receiver, + Metrics::try_register(&dependencies.registry).unwrap(), + ) + } else { + AvailabilityRecoverySubsystem::with_chunks_only( + collation_req_receiver, + Metrics::try_register(&dependencies.registry).unwrap(), + ) + }; + + let (overseer, overseer_handle) = build_overseer( + dependencies.task_manager.spawn_handle(), + runtime_api, + av_store, + network_bridge_tx, + subsystem, + ); + + (TestEnvironment::new(dependencies, config, network, overseer, overseer_handle), req_cfg) +} + +#[derive(Clone)] +pub struct TestState { + // Full test configuration + config: TestConfiguration, + // A cycle iterator on all PoV sizes used in the test. + pov_sizes: Cycle>, + // Generated candidate receipts to be used in the test + candidates: Cycle>, + // Map from pov size to candidate index + pov_size_to_candidate: HashMap, + // Map from generated candidate hashes to candidate index in `available_data` + // and `chunks`. + candidate_hashes: HashMap, + // Per candidate index receipts. + candidate_receipt_templates: Vec, + // Per candidate index `AvailableData` + available_data: Vec, + // Per candiadte index chunks + chunks: Vec>, +} + +impl TestState { + fn config(&self) -> &TestConfiguration { + &self.config + } + + pub fn next_candidate(&mut self) -> Option { + let candidate = self.candidates.next(); + let candidate_hash = candidate.as_ref().unwrap().hash(); + gum::trace!(target: LOG_TARGET, "Next candidate selected {:?}", candidate_hash); + candidate + } + + /// Generate candidates to be used in the test. + fn generate_candidates(&mut self) { + let count = self.config.n_cores * self.config.num_blocks; + gum::info!(target: LOG_TARGET,"{}", format!("Pre-generating {} candidates.", count).bright_blue()); + + // Generate all candidates + self.candidates = (0..count) + .map(|index| { + let pov_size = self.pov_sizes.next().expect("This is a cycle; qed"); + let candidate_index = *self + .pov_size_to_candidate + .get(&pov_size) + .expect("pov_size always exists; qed"); + let mut candidate_receipt = + self.candidate_receipt_templates[candidate_index].clone(); + + // Make it unique. + candidate_receipt.descriptor.relay_parent = Hash::from_low_u64_be(index as u64); + // Store the new candidate in the state + self.candidate_hashes.insert(candidate_receipt.hash(), candidate_index); + + gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_receipt.hash(), "new candidate"); + + candidate_receipt + }) + .collect::>() + .into_iter() + .cycle(); + } + + pub fn new(config: &TestConfiguration) -> Self { + let config = config.clone(); + + let mut chunks = Vec::new(); + let mut available_data = Vec::new(); + let mut candidate_receipt_templates = Vec::new(); + let mut pov_size_to_candidate = HashMap::new(); + + // we use it for all candidates. + let persisted_validation_data = PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: Default::default(), + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }; + + // For each unique pov we create a candidate receipt. + for (index, pov_size) in config.pov_sizes().iter().cloned().unique().enumerate() { + gum::info!(target: LOG_TARGET, index, pov_size, "{}", "Generating template candidate".bright_blue()); + + let mut candidate_receipt = dummy_candidate_receipt(dummy_hash()); + let pov = PoV { block_data: BlockData(vec![index as u8; pov_size]) }; + + let new_available_data = AvailableData { + validation_data: persisted_validation_data.clone(), + pov: Arc::new(pov), + }; + + let (new_chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( + config.n_validators, + &new_available_data, + |_, _| {}, + ); + + candidate_receipt.descriptor.erasure_root = erasure_root; + + chunks.push(new_chunks); + available_data.push(new_available_data); + pov_size_to_candidate.insert(pov_size, index); + candidate_receipt_templates.push(candidate_receipt); + } + + let pov_sizes = config.pov_sizes().to_owned(); + let pov_sizes = pov_sizes.into_iter().cycle(); + gum::info!(target: LOG_TARGET, "{}","Created test environment.".bright_blue()); + + let mut _self = Self { + config, + available_data, + candidate_receipt_templates, + chunks, + pov_size_to_candidate, + pov_sizes, + candidate_hashes: HashMap::new(), + candidates: Vec::new().into_iter().cycle(), + }; + + _self.generate_candidates(); + _self + } +} + +pub async fn benchmark_availability_read(env: &mut TestEnvironment, mut state: TestState) { + let config = env.config().clone(); + + env.import_block(new_block_import_info(Hash::repeat_byte(1), 1)).await; + + let start_marker = Instant::now(); + let mut batch = FuturesUnordered::new(); + let mut availability_bytes = 0u128; + + env.metrics().set_n_validators(config.n_validators); + env.metrics().set_n_cores(config.n_cores); + + for block_num in 0..env.config().num_blocks { + gum::info!(target: LOG_TARGET, "Current block {}/{}", block_num + 1, env.config().num_blocks); + env.metrics().set_current_block(block_num); + + let block_start_ts = Instant::now(); + for candidate_num in 0..config.n_cores as u64 { + let candidate = + state.next_candidate().expect("We always send up to n_cores*num_blocks; qed"); + let (tx, rx) = oneshot::channel(); + batch.push(rx); + + let message = AllMessages::AvailabilityRecovery( + AvailabilityRecoveryMessage::RecoverAvailableData( + candidate.clone(), + 1, + Some(GroupIndex( + candidate_num as u32 % (std::cmp::max(5, config.n_cores) / 5) as u32, + )), + tx, + ), + ); + env.send_message(message).await; + } + + gum::info!("{}", format!("{} recoveries pending", batch.len()).bright_black()); + while let Some(completed) = batch.next().await { + let available_data = completed.unwrap().unwrap(); + env.metrics().on_pov_size(available_data.encoded_size()); + availability_bytes += available_data.encoded_size() as u128; + } + + let block_time = Instant::now().sub(block_start_ts).as_millis() as u64; + env.metrics().set_block_time(block_time); + gum::info!("All work for block completed in {}", format!("{:?}ms", block_time).cyan()); + } + + let duration: u128 = start_marker.elapsed().as_millis(); + let availability_bytes = availability_bytes / 1024; + gum::info!("All blocks processed in {}", format!("{:?}ms", duration).cyan()); + gum::info!( + "Throughput: {}", + format!("{} KiB/block", availability_bytes / env.config().num_blocks as u128).bright_red() + ); + gum::info!( + "Block time: {}", + format!("{} ms", start_marker.elapsed().as_millis() / env.config().num_blocks as u128) + .red() + ); + + gum::info!("{}", &env); + env.stop().await; +} diff --git a/polkadot/node/subsystem-bench/src/cli.rs b/polkadot/node/subsystem-bench/src/cli.rs new file mode 100644 index 0000000000000000000000000000000000000000..3352f33a3503bcdb53cd4ba5f0bc789b9d4cf159 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/cli.rs @@ -0,0 +1,60 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +use super::availability::DataAvailabilityReadOptions; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)] +#[clap(rename_all = "kebab-case")] +#[allow(missing_docs)] +pub struct TestSequenceOptions { + #[clap(short, long, ignore_case = true)] + pub path: String, +} + +/// Define the supported benchmarks targets +#[derive(Debug, Clone, clap::Parser, Serialize, Deserialize)] +#[command(rename_all = "kebab-case")] +pub enum TestObjective { + /// Benchmark availability recovery strategies. + DataAvailabilityRead(DataAvailabilityReadOptions), + /// Run a test sequence specified in a file + TestSequence(TestSequenceOptions), +} + +#[derive(Debug, clap::Parser)] +#[clap(rename_all = "kebab-case")] +#[allow(missing_docs)] +pub struct StandardTestOptions { + #[clap(long, ignore_case = true, default_value_t = 100)] + /// Number of cores to fetch availability for. + pub n_cores: usize, + + #[clap(long, ignore_case = true, default_value_t = 500)] + /// Number of validators to fetch chunks from. + pub n_validators: usize, + + #[clap(long, ignore_case = true, default_value_t = 5120)] + /// The minimum pov size in KiB + pub min_pov_size: usize, + + #[clap(long, ignore_case = true, default_value_t = 5120)] + /// The maximum pov size bytes + pub max_pov_size: usize, + + #[clap(short, long, ignore_case = true, default_value_t = 1)] + /// The number of blocks the test is going to run. + pub num_blocks: usize, +} diff --git a/polkadot/node/subsystem-bench/src/core/configuration.rs b/polkadot/node/subsystem-bench/src/core/configuration.rs new file mode 100644 index 0000000000000000000000000000000000000000..164addb51900656a278dba2eafc19a7ef558037b --- /dev/null +++ b/polkadot/node/subsystem-bench/src/core/configuration.rs @@ -0,0 +1,262 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +// +//! Test configuration definition and helpers. +use super::*; +use keyring::Keyring; +use std::{path::Path, time::Duration}; + +pub use crate::cli::TestObjective; +use polkadot_primitives::{AuthorityDiscoveryId, ValidatorId}; +use rand::{distributions::Uniform, prelude::Distribution, thread_rng}; +use serde::{Deserialize, Serialize}; + +pub fn random_pov_size(min_pov_size: usize, max_pov_size: usize) -> usize { + random_uniform_sample(min_pov_size, max_pov_size) +} + +fn random_uniform_sample + From>(min_value: T, max_value: T) -> T { + Uniform::from(min_value.into()..=max_value.into()) + .sample(&mut thread_rng()) + .into() +} + +/// Peer response latency configuration. +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct PeerLatency { + /// Min latency for `NetworkAction` completion. + pub min_latency: Duration, + /// Max latency or `NetworkAction` completion. + pub max_latency: Duration, +} + +// Default PoV size in KiB. +fn default_pov_size() -> usize { + 5120 +} + +// Default bandwidth in bytes +fn default_bandwidth() -> usize { + 52428800 +} + +// Default connectivity percentage +fn default_connectivity() -> usize { + 100 +} + +/// The test input parameters +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct TestConfiguration { + /// The test objective + pub objective: TestObjective, + /// Number of validators + pub n_validators: usize, + /// Number of cores + pub n_cores: usize, + /// The min PoV size + #[serde(default = "default_pov_size")] + pub min_pov_size: usize, + /// The max PoV size, + #[serde(default = "default_pov_size")] + pub max_pov_size: usize, + /// Randomly sampled pov_sizes + #[serde(skip)] + pov_sizes: Vec, + /// The amount of bandiwdth remote validators have. + #[serde(default = "default_bandwidth")] + pub peer_bandwidth: usize, + /// The amount of bandiwdth our node has. + #[serde(default = "default_bandwidth")] + pub bandwidth: usize, + /// Optional peer emulation latency + #[serde(default)] + pub latency: Option, + /// Error probability, applies to sending messages to the emulated network peers + #[serde(default)] + pub error: usize, + /// Connectivity ratio, the percentage of peers we are not connected to, but ar part of + /// the topology. + #[serde(default = "default_connectivity")] + pub connectivity: usize, + /// Number of blocks to run the test for + pub num_blocks: usize, +} + +fn generate_pov_sizes(count: usize, min_kib: usize, max_kib: usize) -> Vec { + (0..count).map(|_| random_pov_size(min_kib * 1024, max_kib * 1024)).collect() +} + +#[derive(Serialize, Deserialize)] +pub struct TestSequence { + #[serde(rename(serialize = "TestConfiguration", deserialize = "TestConfiguration"))] + test_configurations: Vec, +} + +impl TestSequence { + pub fn into_vec(self) -> Vec { + self.test_configurations + .into_iter() + .map(|mut config| { + config.pov_sizes = + generate_pov_sizes(config.n_cores, config.min_pov_size, config.max_pov_size); + config + }) + .collect() + } +} + +impl TestSequence { + pub fn new_from_file(path: &Path) -> std::io::Result { + let string = String::from_utf8(std::fs::read(path)?).expect("File is valid UTF8"); + Ok(serde_yaml::from_str(&string).expect("File is valid test sequence YA")) + } +} + +/// Helper struct for authority related state. +#[derive(Clone)] +pub struct TestAuthorities { + pub keyrings: Vec, + pub validator_public: Vec, + pub validator_authority_id: Vec, +} + +impl TestConfiguration { + #[allow(unused)] + pub fn write_to_disk(&self) { + // Serialize a slice of configurations + let yaml = serde_yaml::to_string(&TestSequence { test_configurations: vec![self.clone()] }) + .unwrap(); + std::fs::write("last_test.yaml", yaml).unwrap(); + } + + pub fn pov_sizes(&self) -> &[usize] { + &self.pov_sizes + } + + /// Generates the authority keys we need for the network emulation. + pub fn generate_authorities(&self) -> TestAuthorities { + let keyrings = (0..self.n_validators) + .map(|peer_index| Keyring::new(format!("Node{}", peer_index))) + .collect::>(); + + // Generate `AuthorityDiscoveryId`` for each peer + let validator_public: Vec = keyrings + .iter() + .map(|keyring: &Keyring| keyring.clone().public().into()) + .collect::>(); + + let validator_authority_id: Vec = keyrings + .iter() + .map(|keyring| keyring.clone().public().into()) + .collect::>(); + + TestAuthorities { keyrings, validator_public, validator_authority_id } + } + + /// An unconstrained standard configuration matching Polkadot/Kusama + pub fn ideal_network( + objective: TestObjective, + num_blocks: usize, + n_validators: usize, + n_cores: usize, + min_pov_size: usize, + max_pov_size: usize, + ) -> TestConfiguration { + Self { + objective, + n_cores, + n_validators, + pov_sizes: generate_pov_sizes(n_cores, min_pov_size, max_pov_size), + bandwidth: 50 * 1024 * 1024, + peer_bandwidth: 50 * 1024 * 1024, + // No latency + latency: None, + error: 0, + num_blocks, + min_pov_size, + max_pov_size, + connectivity: 100, + } + } + + pub fn healthy_network( + objective: TestObjective, + num_blocks: usize, + n_validators: usize, + n_cores: usize, + min_pov_size: usize, + max_pov_size: usize, + ) -> TestConfiguration { + Self { + objective, + n_cores, + n_validators, + pov_sizes: generate_pov_sizes(n_cores, min_pov_size, max_pov_size), + bandwidth: 50 * 1024 * 1024, + peer_bandwidth: 50 * 1024 * 1024, + latency: Some(PeerLatency { + min_latency: Duration::from_millis(1), + max_latency: Duration::from_millis(100), + }), + error: 3, + num_blocks, + min_pov_size, + max_pov_size, + connectivity: 95, + } + } + + pub fn degraded_network( + objective: TestObjective, + num_blocks: usize, + n_validators: usize, + n_cores: usize, + min_pov_size: usize, + max_pov_size: usize, + ) -> TestConfiguration { + Self { + objective, + n_cores, + n_validators, + pov_sizes: generate_pov_sizes(n_cores, min_pov_size, max_pov_size), + bandwidth: 50 * 1024 * 1024, + peer_bandwidth: 50 * 1024 * 1024, + latency: Some(PeerLatency { + min_latency: Duration::from_millis(10), + max_latency: Duration::from_millis(500), + }), + error: 33, + num_blocks, + min_pov_size, + max_pov_size, + connectivity: 67, + } + } +} + +/// Produce a randomized duration between `min` and `max`. +pub fn random_latency(maybe_peer_latency: Option<&PeerLatency>) -> Option { + maybe_peer_latency.map(|peer_latency| { + Uniform::from(peer_latency.min_latency..=peer_latency.max_latency).sample(&mut thread_rng()) + }) +} + +/// Generate a random error based on `probability`. +/// `probability` should be a number between 0 and 100. +pub fn random_error(probability: usize) -> bool { + Uniform::from(0..=99).sample(&mut thread_rng()) < probability +} diff --git a/polkadot/node/subsystem-bench/src/core/display.rs b/polkadot/node/subsystem-bench/src/core/display.rs new file mode 100644 index 0000000000000000000000000000000000000000..d600cc484c14a45361c19621213dbf666475a778 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/core/display.rs @@ -0,0 +1,191 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +// +//! Display implementations and helper methods for parsing prometheus metrics +//! to a format that can be displayed in the CLI. +//! +//! Currently histogram buckets are skipped. +use super::{configuration::TestConfiguration, LOG_TARGET}; +use colored::Colorize; +use prometheus::{ + proto::{MetricFamily, MetricType}, + Registry, +}; +use std::fmt::Display; + +#[derive(Default)] +pub struct MetricCollection(Vec); + +impl From> for MetricCollection { + fn from(metrics: Vec) -> Self { + MetricCollection(metrics) + } +} + +impl MetricCollection { + pub fn all(&self) -> &Vec { + &self.0 + } + + /// Sums up all metrics with the given name in the collection + pub fn sum_by(&self, name: &str) -> f64 { + self.all() + .iter() + .filter(|metric| metric.name == name) + .map(|metric| metric.value) + .sum() + } + + pub fn subset_with_label_value(&self, label_name: &str, label_value: &str) -> MetricCollection { + self.0 + .iter() + .filter_map(|metric| { + if let Some(index) = metric.label_names.iter().position(|label| label == label_name) + { + if Some(&String::from(label_value)) == metric.label_values.get(index) { + Some(metric.clone()) + } else { + None + } + } else { + None + } + }) + .collect::>() + .into() + } +} + +impl Display for MetricCollection { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f)?; + let metrics = self.all(); + for metric in metrics { + writeln!(f, "{}", metric)?; + } + Ok(()) + } +} +#[derive(Debug, Clone)] +pub struct TestMetric { + name: String, + label_names: Vec, + label_values: Vec, + value: f64, +} + +impl Display for TestMetric { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "({} = {}) [{:?}, {:?}]", + self.name.cyan(), + format!("{}", self.value).white(), + self.label_names, + self.label_values + ) + } +} + +// Returns `false` if metric should be skipped. +fn check_metric_family(mf: &MetricFamily) -> bool { + if mf.get_metric().is_empty() { + gum::error!(target: LOG_TARGET, "MetricFamily has no metrics: {:?}", mf); + return false + } + if mf.get_name().is_empty() { + gum::error!(target: LOG_TARGET, "MetricFamily has no name: {:?}", mf); + return false + } + + true +} + +pub fn parse_metrics(registry: &Registry) -> MetricCollection { + let metric_families = registry.gather(); + let mut test_metrics = Vec::new(); + for mf in metric_families { + if !check_metric_family(&mf) { + continue + } + + let name: String = mf.get_name().into(); + let metric_type = mf.get_field_type(); + for m in mf.get_metric() { + let (label_names, label_values): (Vec, Vec) = m + .get_label() + .iter() + .map(|pair| (String::from(pair.get_name()), String::from(pair.get_value()))) + .unzip(); + + match metric_type { + MetricType::COUNTER => { + test_metrics.push(TestMetric { + name: name.clone(), + label_names, + label_values, + value: m.get_counter().get_value(), + }); + }, + MetricType::GAUGE => { + test_metrics.push(TestMetric { + name: name.clone(), + label_names, + label_values, + value: m.get_gauge().get_value(), + }); + }, + MetricType::HISTOGRAM => { + let h = m.get_histogram(); + let h_name = name.clone() + "_sum"; + test_metrics.push(TestMetric { + name: h_name, + label_names: label_names.clone(), + label_values: label_values.clone(), + value: h.get_sample_sum(), + }); + + let h_name = name.clone() + "_count"; + test_metrics.push(TestMetric { + name: h_name, + label_names, + label_values, + value: h.get_sample_sum(), + }); + }, + MetricType::SUMMARY => { + unimplemented!(); + }, + MetricType::UNTYPED => { + unimplemented!(); + }, + } + } + } + test_metrics.into() +} + +pub fn display_configuration(test_config: &TestConfiguration) { + gum::info!( + "{}, {}, {}, {}, {}", + format!("n_validators = {}", test_config.n_validators).blue(), + format!("n_cores = {}", test_config.n_cores).blue(), + format!("pov_size = {} - {}", test_config.min_pov_size, test_config.max_pov_size) + .bright_black(), + format!("error = {}", test_config.error).bright_black(), + format!("latency = {:?}", test_config.latency).bright_black(), + ); +} diff --git a/polkadot/node/subsystem-bench/src/core/environment.rs b/polkadot/node/subsystem-bench/src/core/environment.rs new file mode 100644 index 0000000000000000000000000000000000000000..247596474078ef73a74f1762c30b56b52ce4417f --- /dev/null +++ b/polkadot/node/subsystem-bench/src/core/environment.rs @@ -0,0 +1,333 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +//! Test environment implementation +use crate::{ + core::{mock::AlwaysSupportsParachains, network::NetworkEmulator}, + TestConfiguration, +}; +use colored::Colorize; +use core::time::Duration; +use futures::FutureExt; +use polkadot_overseer::{BlockInfo, Handle as OverseerHandle}; + +use polkadot_node_subsystem::{messages::AllMessages, Overseer, SpawnGlue, TimeoutExt}; +use polkadot_node_subsystem_types::Hash; +use polkadot_node_subsystem_util::metrics::prometheus::{ + self, Gauge, Histogram, PrometheusError, Registry, U64, +}; + +use sc_network::peer_store::LOG_TARGET; +use sc_service::{SpawnTaskHandle, TaskManager}; +use std::{ + fmt::Display, + net::{Ipv4Addr, SocketAddr}, +}; +use tokio::runtime::Handle; + +const MIB: f64 = 1024.0 * 1024.0; + +/// Test environment/configuration metrics +#[derive(Clone)] +pub struct TestEnvironmentMetrics { + /// Number of bytes sent per peer. + n_validators: Gauge, + /// Number of received sent per peer. + n_cores: Gauge, + /// PoV size + pov_size: Histogram, + /// Current block + current_block: Gauge, + /// Current block + block_time: Gauge, +} + +impl TestEnvironmentMetrics { + pub fn new(registry: &Registry) -> Result { + let mut buckets = prometheus::exponential_buckets(16384.0, 2.0, 9) + .expect("arguments are always valid; qed"); + buckets.extend(vec![5.0 * MIB, 6.0 * MIB, 7.0 * MIB, 8.0 * MIB, 9.0 * MIB, 10.0 * MIB]); + + Ok(Self { + n_validators: prometheus::register( + Gauge::new( + "subsystem_benchmark_n_validators", + "Total number of validators in the test", + )?, + registry, + )?, + n_cores: prometheus::register( + Gauge::new( + "subsystem_benchmark_n_cores", + "Number of cores we fetch availability for each block", + )?, + registry, + )?, + current_block: prometheus::register( + Gauge::new("subsystem_benchmark_current_block", "The current test block")?, + registry, + )?, + block_time: prometheus::register( + Gauge::new("subsystem_benchmark_block_time", "The time it takes for the target subsystems(s) to complete all the requests in a block")?, + registry, + )?, + pov_size: prometheus::register( + Histogram::with_opts( + prometheus::HistogramOpts::new( + "subsystem_benchmark_pov_size", + "The compressed size of the proof of validity of a candidate", + ) + .buckets(buckets), + )?, + registry, + )?, + }) + } + + pub fn set_n_validators(&self, n_validators: usize) { + self.n_validators.set(n_validators as u64); + } + + pub fn set_n_cores(&self, n_cores: usize) { + self.n_cores.set(n_cores as u64); + } + + pub fn set_current_block(&self, current_block: usize) { + self.current_block.set(current_block as u64); + } + + pub fn set_block_time(&self, block_time_ms: u64) { + self.block_time.set(block_time_ms); + } + + pub fn on_pov_size(&self, pov_size: usize) { + self.pov_size.observe(pov_size as f64); + } +} + +fn new_runtime() -> tokio::runtime::Runtime { + tokio::runtime::Builder::new_multi_thread() + .thread_name("subsystem-bench") + .enable_all() + .thread_stack_size(3 * 1024 * 1024) + .build() + .unwrap() +} + +/// Wrapper for dependencies +pub struct TestEnvironmentDependencies { + pub registry: Registry, + pub task_manager: TaskManager, + pub runtime: tokio::runtime::Runtime, +} + +impl Default for TestEnvironmentDependencies { + fn default() -> Self { + let runtime = new_runtime(); + let registry = Registry::new(); + let task_manager: TaskManager = + TaskManager::new(runtime.handle().clone(), Some(®istry)).unwrap(); + + Self { runtime, registry, task_manager } + } +} + +// A dummy genesis hash +pub const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); + +// We use this to bail out sending messages to the subsystem if it is overloaded such that +// the time of flight is breaches 5s. +// This should eventually be a test parameter. +const MAX_TIME_OF_FLIGHT: Duration = Duration::from_millis(5000); + +/// The test environment is the high level wrapper of all things required to test +/// a certain subsystem. +/// +/// ## Mockups +/// The overseer is passed in during construction and it can host an arbitrary number of +/// real subsystems instances and the corresponding mocked instances such that the real +/// subsystems can get their messages answered. +/// +/// As the subsystem's performance depends on network connectivity, the test environment +/// emulates validator nodes on the network, see `NetworkEmulator`. The network emulation +/// is configurable in terms of peer bandwidth, latency and connection error rate using +/// uniform distribution sampling. +/// +/// +/// ## Usage +/// `TestEnvironment` is used in tests to send `Overseer` messages or signals to the subsystem +/// under test. +/// +/// ## Collecting test metrics +/// +/// ### Prometheus +/// A prometheus endpoint is exposed while the test is running. A local Prometheus instance +/// can scrape it every 1s and a Grafana dashboard is the preferred way of visualizing +/// the performance characteristics of the subsystem. +/// +/// ### CLI +/// A subset of the Prometheus metrics are printed at the end of the test. +pub struct TestEnvironment { + /// Test dependencies + dependencies: TestEnvironmentDependencies, + /// A runtime handle + runtime_handle: tokio::runtime::Handle, + /// A handle to the lovely overseer + overseer_handle: OverseerHandle, + /// The test configuration. + config: TestConfiguration, + /// A handle to the network emulator. + network: NetworkEmulator, + /// Configuration/env metrics + metrics: TestEnvironmentMetrics, +} + +impl TestEnvironment { + /// Create a new test environment + pub fn new( + dependencies: TestEnvironmentDependencies, + config: TestConfiguration, + network: NetworkEmulator, + overseer: Overseer, AlwaysSupportsParachains>, + overseer_handle: OverseerHandle, + ) -> Self { + let metrics = TestEnvironmentMetrics::new(&dependencies.registry) + .expect("Metrics need to be registered"); + + let spawn_handle = dependencies.task_manager.spawn_handle(); + spawn_handle.spawn_blocking("overseer", "overseer", overseer.run().boxed()); + + let registry_clone = dependencies.registry.clone(); + dependencies.task_manager.spawn_handle().spawn_blocking( + "prometheus", + "test-environment", + async move { + prometheus_endpoint::init_prometheus( + SocketAddr::new(std::net::IpAddr::V4(Ipv4Addr::LOCALHOST), 9999), + registry_clone, + ) + .await + .unwrap(); + }, + ); + + TestEnvironment { + runtime_handle: dependencies.runtime.handle().clone(), + dependencies, + overseer_handle, + config, + network, + metrics, + } + } + + pub fn config(&self) -> &TestConfiguration { + &self.config + } + + pub fn network(&self) -> &NetworkEmulator { + &self.network + } + + pub fn registry(&self) -> &Registry { + &self.dependencies.registry + } + + pub fn metrics(&self) -> &TestEnvironmentMetrics { + &self.metrics + } + + pub fn runtime(&self) -> Handle { + self.runtime_handle.clone() + } + + // Send a message to the subsystem under test environment. + pub async fn send_message(&mut self, msg: AllMessages) { + self.overseer_handle + .send_msg(msg, LOG_TARGET) + .timeout(MAX_TIME_OF_FLIGHT) + .await + .unwrap_or_else(|| { + panic!("{}ms maximum time of flight breached", MAX_TIME_OF_FLIGHT.as_millis()) + }); + } + + // Send an `ActiveLeavesUpdate` signal to all subsystems under test. + pub async fn import_block(&mut self, block: BlockInfo) { + self.overseer_handle + .block_imported(block) + .timeout(MAX_TIME_OF_FLIGHT) + .await + .unwrap_or_else(|| { + panic!("{}ms maximum time of flight breached", MAX_TIME_OF_FLIGHT.as_millis()) + }); + } + + // Stop overseer and subsystems. + pub async fn stop(&mut self) { + self.overseer_handle.stop().await; + } +} + +impl Display for TestEnvironment { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let stats = self.network().stats(); + + writeln!(f, "\n")?; + writeln!( + f, + "Total received from network: {}", + format!( + "{} MiB", + stats + .iter() + .enumerate() + .map(|(_index, stats)| stats.tx_bytes_total as u128) + .sum::() / (1024 * 1024) + ) + .cyan() + )?; + writeln!( + f, + "Total sent to network: {}", + format!("{} KiB", stats[0].tx_bytes_total / (1024)).cyan() + )?; + + let test_metrics = super::display::parse_metrics(self.registry()); + let subsystem_cpu_metrics = + test_metrics.subset_with_label_value("task_group", "availability-recovery"); + let total_cpu = subsystem_cpu_metrics.sum_by("substrate_tasks_polling_duration_sum"); + writeln!(f, "Total subsystem CPU usage {}", format!("{:.2}s", total_cpu).bright_purple())?; + writeln!( + f, + "CPU usage per block {}", + format!("{:.2}s", total_cpu / self.config().num_blocks as f64).bright_purple() + )?; + + let test_env_cpu_metrics = + test_metrics.subset_with_label_value("task_group", "test-environment"); + let total_cpu = test_env_cpu_metrics.sum_by("substrate_tasks_polling_duration_sum"); + writeln!( + f, + "Total test environment CPU usage {}", + format!("{:.2}s", total_cpu).bright_purple() + )?; + writeln!( + f, + "CPU usage per block {}", + format!("{:.2}s", total_cpu / self.config().num_blocks as f64).bright_purple() + ) + } +} diff --git a/polkadot/node/subsystem-bench/src/core/keyring.rs b/polkadot/node/subsystem-bench/src/core/keyring.rs new file mode 100644 index 0000000000000000000000000000000000000000..2d9aa348a922bf8cf136e307dcbd6ecca3d3e49c --- /dev/null +++ b/polkadot/node/subsystem-bench/src/core/keyring.rs @@ -0,0 +1,40 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub use sp_core::sr25519; +use sp_core::{ + sr25519::{Pair, Public}, + Pair as PairT, +}; +/// Set of test accounts. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Keyring { + name: String, +} + +impl Keyring { + pub fn new(name: String) -> Keyring { + Self { name } + } + + pub fn pair(self) -> Pair { + Pair::from_string(&format!("//{}", self.name), None).expect("input is always good; qed") + } + + pub fn public(self) -> Public { + self.pair().public() + } +} diff --git a/polkadot/node/subsystem-bench/src/core/mock/av_store.rs b/polkadot/node/subsystem-bench/src/core/mock/av_store.rs new file mode 100644 index 0000000000000000000000000000000000000000..a471230f1b3f0e5be27494988f04590ff4aaa78e --- /dev/null +++ b/polkadot/node/subsystem-bench/src/core/mock/av_store.rs @@ -0,0 +1,137 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +//! +//! A generic av store subsystem mockup suitable to be used in benchmarks. + +use parity_scale_codec::Encode; +use polkadot_primitives::CandidateHash; + +use std::collections::HashMap; + +use futures::{channel::oneshot, FutureExt}; + +use polkadot_node_primitives::ErasureChunk; + +use polkadot_node_subsystem::{ + messages::AvailabilityStoreMessage, overseer, SpawnedSubsystem, SubsystemError, +}; + +use polkadot_node_subsystem_types::OverseerSignal; + +pub struct AvailabilityStoreState { + candidate_hashes: HashMap, + chunks: Vec>, +} + +const LOG_TARGET: &str = "subsystem-bench::av-store-mock"; + +/// A mock of the availability store subsystem. This one also generates all the +/// candidates that a +pub struct MockAvailabilityStore { + state: AvailabilityStoreState, +} + +impl MockAvailabilityStore { + pub fn new( + chunks: Vec>, + candidate_hashes: HashMap, + ) -> MockAvailabilityStore { + Self { state: AvailabilityStoreState { chunks, candidate_hashes } } + } + + async fn respond_to_query_all_request( + &self, + candidate_hash: CandidateHash, + send_chunk: impl Fn(usize) -> bool, + tx: oneshot::Sender>, + ) { + let candidate_index = self + .state + .candidate_hashes + .get(&candidate_hash) + .expect("candidate was generated previously; qed"); + gum::debug!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index"); + + let v = self + .state + .chunks + .get(*candidate_index) + .unwrap() + .iter() + .filter(|c| send_chunk(c.index.0 as usize)) + .cloned() + .collect(); + + let _ = tx.send(v); + } +} + +#[overseer::subsystem(AvailabilityStore, error=SubsystemError, prefix=self::overseer)] +impl MockAvailabilityStore { + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = self.run(ctx).map(|_| Ok(())).boxed(); + + SpawnedSubsystem { name: "test-environment", future } + } +} + +#[overseer::contextbounds(AvailabilityStore, prefix = self::overseer)] +impl MockAvailabilityStore { + async fn run(self, mut ctx: Context) { + gum::debug!(target: LOG_TARGET, "Subsystem running"); + loop { + let msg = ctx.recv().await.expect("Overseer never fails us"); + + match msg { + orchestra::FromOrchestra::Signal(signal) => + if signal == OverseerSignal::Conclude { + return + }, + orchestra::FromOrchestra::Communication { msg } => match msg { + AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx) => { + gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_hash, "Responding to QueryAvailableData"); + + // We never have the full available data. + let _ = tx.send(None); + }, + AvailabilityStoreMessage::QueryAllChunks(candidate_hash, tx) => { + // We always have our own chunk. + gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_hash, "Responding to QueryAllChunks"); + self.respond_to_query_all_request(candidate_hash, |index| index == 0, tx) + .await; + }, + AvailabilityStoreMessage::QueryChunkSize(candidate_hash, tx) => { + gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_hash, "Responding to QueryChunkSize"); + + let candidate_index = self + .state + .candidate_hashes + .get(&candidate_hash) + .expect("candidate was generated previously; qed"); + gum::debug!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index"); + + let chunk_size = + self.state.chunks.get(*candidate_index).unwrap()[0].encoded_size(); + let _ = tx.send(Some(chunk_size)); + }, + _ => { + unimplemented!("Unexpected av-store message") + }, + }, + } + } + } +} diff --git a/polkadot/node/subsystem-bench/src/core/mock/dummy.rs b/polkadot/node/subsystem-bench/src/core/mock/dummy.rs new file mode 100644 index 0000000000000000000000000000000000000000..0628368a49c08af69077ba558b5dc8b34f8b57bd --- /dev/null +++ b/polkadot/node/subsystem-bench/src/core/mock/dummy.rs @@ -0,0 +1,98 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +//! Dummy subsystem mocks. +use paste::paste; + +use futures::FutureExt; +use polkadot_node_subsystem::{overseer, SpawnedSubsystem, SubsystemError}; +use std::time::Duration; +use tokio::time::sleep; + +const LOG_TARGET: &str = "subsystem-bench::mockery"; + +macro_rules! mock { + // Just query by relay parent + ($subsystem_name:ident) => { + paste! { + pub struct [] {} + #[overseer::subsystem($subsystem_name, error=SubsystemError, prefix=self::overseer)] + impl [] { + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = self.run(ctx).map(|_| Ok(())).boxed(); + + // The name will appear in substrate CPU task metrics as `task_group`.` + SpawnedSubsystem { name: "test-environment", future } + } + } + + #[overseer::contextbounds($subsystem_name, prefix = self::overseer)] + impl [] { + async fn run(self, mut ctx: Context) { + let mut count_total_msg = 0; + loop { + futures::select!{ + msg = ctx.recv().fuse() => { + match msg.unwrap() { + orchestra::FromOrchestra::Signal(signal) => { + match signal { + polkadot_node_subsystem_types::OverseerSignal::Conclude => {return}, + _ => {} + } + }, + orchestra::FromOrchestra::Communication { msg } => { + gum::debug!(target: LOG_TARGET, msg = ?msg, "mocked subsystem received message"); + } + } + + count_total_msg +=1; + } + _ = sleep(Duration::from_secs(6)).fuse() => { + if count_total_msg > 0 { + gum::trace!(target: LOG_TARGET, "Subsystem {} processed {} messages since last time", stringify!($subsystem_name), count_total_msg); + } + count_total_msg = 0; + } + } + } + } + } + } + }; +} + +mock!(AvailabilityStore); +mock!(StatementDistribution); +mock!(BitfieldSigning); +mock!(BitfieldDistribution); +mock!(Provisioner); +mock!(NetworkBridgeRx); +mock!(CollationGeneration); +mock!(CollatorProtocol); +mock!(GossipSupport); +mock!(DisputeDistribution); +mock!(DisputeCoordinator); +mock!(ProspectiveParachains); +mock!(PvfChecker); +mock!(CandidateBacking); +mock!(AvailabilityDistribution); +mock!(CandidateValidation); +mock!(AvailabilityRecovery); +mock!(NetworkBridgeTx); +mock!(ChainApi); +mock!(ChainSelection); +mock!(ApprovalVoting); +mock!(ApprovalDistribution); +mock!(RuntimeApi); diff --git a/polkadot/node/subsystem-bench/src/core/mock/mod.rs b/polkadot/node/subsystem-bench/src/core/mock/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..d59642e9605861bd18628b2660664a25865ed28e --- /dev/null +++ b/polkadot/node/subsystem-bench/src/core/mock/mod.rs @@ -0,0 +1,77 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use polkadot_node_subsystem::HeadSupportsParachains; +use polkadot_node_subsystem_types::Hash; + +pub mod av_store; +pub mod dummy; +pub mod network_bridge; +pub mod runtime_api; + +pub use av_store::*; +pub use network_bridge::*; +pub use runtime_api::*; + +pub struct AlwaysSupportsParachains {} +#[async_trait::async_trait] +impl HeadSupportsParachains for AlwaysSupportsParachains { + async fn head_supports_parachains(&self, _head: &Hash) -> bool { + true + } +} + +// An orchestra with dummy subsystems +macro_rules! dummy_builder { + ($spawn_task_handle: ident) => {{ + use super::core::mock::dummy::*; + + // Initialize a mock overseer. + // All subsystem except approval_voting and approval_distribution are mock subsystems. + Overseer::builder() + .approval_voting(MockApprovalVoting {}) + .approval_distribution(MockApprovalDistribution {}) + .availability_recovery(MockAvailabilityRecovery {}) + .candidate_validation(MockCandidateValidation {}) + .chain_api(MockChainApi {}) + .chain_selection(MockChainSelection {}) + .dispute_coordinator(MockDisputeCoordinator {}) + .runtime_api(MockRuntimeApi {}) + .network_bridge_tx(MockNetworkBridgeTx {}) + .availability_distribution(MockAvailabilityDistribution {}) + .availability_store(MockAvailabilityStore {}) + .pvf_checker(MockPvfChecker {}) + .candidate_backing(MockCandidateBacking {}) + .statement_distribution(MockStatementDistribution {}) + .bitfield_signing(MockBitfieldSigning {}) + .bitfield_distribution(MockBitfieldDistribution {}) + .provisioner(MockProvisioner {}) + .network_bridge_rx(MockNetworkBridgeRx {}) + .collation_generation(MockCollationGeneration {}) + .collator_protocol(MockCollatorProtocol {}) + .gossip_support(MockGossipSupport {}) + .dispute_distribution(MockDisputeDistribution {}) + .prospective_parachains(MockProspectiveParachains {}) + .activation_external_listeners(Default::default()) + .span_per_active_leaf(Default::default()) + .active_leaves(Default::default()) + .metrics(Default::default()) + .supports_parachains(AlwaysSupportsParachains {}) + .spawner(SpawnGlue($spawn_task_handle)) + }}; +} + +pub(crate) use dummy_builder; diff --git a/polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs b/polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs new file mode 100644 index 0000000000000000000000000000000000000000..b106b832011a81e69c7ea9258b9f4d72cf71ae84 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs @@ -0,0 +1,323 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +//! +//! A generic av store subsystem mockup suitable to be used in benchmarks. + +use futures::Future; +use parity_scale_codec::Encode; +use polkadot_node_subsystem_types::OverseerSignal; +use std::{collections::HashMap, pin::Pin}; + +use futures::FutureExt; + +use polkadot_node_primitives::{AvailableData, ErasureChunk}; + +use polkadot_primitives::CandidateHash; +use sc_network::{OutboundFailure, RequestFailure}; + +use polkadot_node_subsystem::{ + messages::NetworkBridgeTxMessage, overseer, SpawnedSubsystem, SubsystemError, +}; + +use polkadot_node_network_protocol::request_response::{ + self as req_res, v1::ChunkResponse, Requests, +}; +use polkadot_primitives::AuthorityDiscoveryId; + +use crate::core::{ + configuration::{random_error, random_latency, TestConfiguration}, + network::{NetworkAction, NetworkEmulator, RateLimit}, +}; + +/// The availability store state of all emulated peers. +/// The network bridge tx mock will respond to requests as if the request is being serviced +/// by a remote peer on the network +pub struct NetworkAvailabilityState { + pub candidate_hashes: HashMap, + pub available_data: Vec, + pub chunks: Vec>, +} + +const LOG_TARGET: &str = "subsystem-bench::network-bridge-tx-mock"; + +/// A mock of the network bridge tx subsystem. +pub struct MockNetworkBridgeTx { + /// The test configurationg + config: TestConfiguration, + /// The network availability state + availabilty: NetworkAvailabilityState, + /// A network emulator instance + network: NetworkEmulator, +} + +impl MockNetworkBridgeTx { + pub fn new( + config: TestConfiguration, + availabilty: NetworkAvailabilityState, + network: NetworkEmulator, + ) -> MockNetworkBridgeTx { + Self { config, availabilty, network } + } + + fn not_connected_response( + &self, + authority_discovery_id: &AuthorityDiscoveryId, + future: Pin + Send>>, + ) -> NetworkAction { + // The network action will send the error after a random delay expires. + return NetworkAction::new( + authority_discovery_id.clone(), + future, + 0, + // Generate a random latency based on configuration. + random_latency(self.config.latency.as_ref()), + ) + } + /// Returns an `NetworkAction` corresponding to the peer sending the response. If + /// the peer is connected, the error is sent with a randomized latency as defined in + /// configuration. + fn respond_to_send_request( + &mut self, + request: Requests, + ingress_tx: &mut tokio::sync::mpsc::UnboundedSender, + ) -> NetworkAction { + let ingress_tx = ingress_tx.clone(); + + match request { + Requests::ChunkFetchingV1(outgoing_request) => { + let authority_discovery_id = match outgoing_request.peer { + req_res::Recipient::Authority(authority_discovery_id) => authority_discovery_id, + _ => unimplemented!("Peer recipient not supported yet"), + }; + // Account our sent request bytes. + self.network.peer_stats(0).inc_sent(outgoing_request.payload.encoded_size()); + + // If peer is disconnected return an error + if !self.network.is_peer_connected(&authority_discovery_id) { + // We always send `NotConnected` error and we ignore `IfDisconnected` value in + // the caller. + let future = async move { + let _ = outgoing_request + .pending_response + .send(Err(RequestFailure::NotConnected)); + } + .boxed(); + return self.not_connected_response(&authority_discovery_id, future) + } + + // Account for remote received request bytes. + self.network + .peer_stats_by_id(&authority_discovery_id) + .inc_received(outgoing_request.payload.encoded_size()); + + let validator_index: usize = outgoing_request.payload.index.0 as usize; + let candidate_hash = outgoing_request.payload.candidate_hash; + + let candidate_index = self + .availabilty + .candidate_hashes + .get(&candidate_hash) + .expect("candidate was generated previously; qed"); + gum::warn!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index"); + + let chunk: ChunkResponse = self.availabilty.chunks.get(*candidate_index).unwrap() + [validator_index] + .clone() + .into(); + let mut size = chunk.encoded_size(); + + let response = if random_error(self.config.error) { + // Error will not account to any bandwidth used. + size = 0; + Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)) + } else { + Ok(req_res::v1::ChunkFetchingResponse::from(Some(chunk)).encode()) + }; + + let authority_discovery_id_clone = authority_discovery_id.clone(); + + let future = async move { + let _ = outgoing_request.pending_response.send(response); + } + .boxed(); + + let future_wrapper = async move { + // Forward the response to the ingress channel of our node. + // On receive side we apply our node receiving rate limit. + let action = + NetworkAction::new(authority_discovery_id_clone, future, size, None); + ingress_tx.send(action).unwrap(); + } + .boxed(); + + NetworkAction::new( + authority_discovery_id, + future_wrapper, + size, + // Generate a random latency based on configuration. + random_latency(self.config.latency.as_ref()), + ) + }, + Requests::AvailableDataFetchingV1(outgoing_request) => { + let candidate_hash = outgoing_request.payload.candidate_hash; + let candidate_index = self + .availabilty + .candidate_hashes + .get(&candidate_hash) + .expect("candidate was generated previously; qed"); + gum::debug!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index"); + + let authority_discovery_id = match outgoing_request.peer { + req_res::Recipient::Authority(authority_discovery_id) => authority_discovery_id, + _ => unimplemented!("Peer recipient not supported yet"), + }; + + // Account our sent request bytes. + self.network.peer_stats(0).inc_sent(outgoing_request.payload.encoded_size()); + + // If peer is disconnected return an error + if !self.network.is_peer_connected(&authority_discovery_id) { + let future = async move { + let _ = outgoing_request + .pending_response + .send(Err(RequestFailure::NotConnected)); + } + .boxed(); + return self.not_connected_response(&authority_discovery_id, future) + } + + // Account for remote received request bytes. + self.network + .peer_stats_by_id(&authority_discovery_id) + .inc_received(outgoing_request.payload.encoded_size()); + + let available_data = + self.availabilty.available_data.get(*candidate_index).unwrap().clone(); + + let size = available_data.encoded_size(); + + let response = if random_error(self.config.error) { + Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)) + } else { + Ok(req_res::v1::AvailableDataFetchingResponse::from(Some(available_data)) + .encode()) + }; + + let future = async move { + let _ = outgoing_request.pending_response.send(response); + } + .boxed(); + + let authority_discovery_id_clone = authority_discovery_id.clone(); + + let future_wrapper = async move { + // Forward the response to the ingress channel of our node. + // On receive side we apply our node receiving rate limit. + let action = + NetworkAction::new(authority_discovery_id_clone, future, size, None); + ingress_tx.send(action).unwrap(); + } + .boxed(); + + NetworkAction::new( + authority_discovery_id, + future_wrapper, + size, + // Generate a random latency based on configuration. + random_latency(self.config.latency.as_ref()), + ) + }, + _ => panic!("received an unexpected request"), + } + } +} + +#[overseer::subsystem(NetworkBridgeTx, error=SubsystemError, prefix=self::overseer)] +impl MockNetworkBridgeTx { + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = self.run(ctx).map(|_| Ok(())).boxed(); + + SpawnedSubsystem { name: "test-environment", future } + } +} + +#[overseer::contextbounds(NetworkBridgeTx, prefix = self::overseer)] +impl MockNetworkBridgeTx { + async fn run(mut self, mut ctx: Context) { + let (mut ingress_tx, mut ingress_rx) = + tokio::sync::mpsc::unbounded_channel::(); + + // Initialize our node bandwidth limits. + let mut rx_limiter = RateLimit::new(10, self.config.bandwidth); + + let our_network = self.network.clone(); + + // This task will handle node messages receipt from the simulated network. + ctx.spawn_blocking( + "network-receive", + async move { + while let Some(action) = ingress_rx.recv().await { + let size = action.size(); + + // account for our node receiving the data. + our_network.inc_received(size); + rx_limiter.reap(size).await; + action.run().await; + } + } + .boxed(), + ) + .expect("We never fail to spawn tasks"); + + // Main subsystem loop. + loop { + let msg = ctx.recv().await.expect("Overseer never fails us"); + + match msg { + orchestra::FromOrchestra::Signal(signal) => + if signal == OverseerSignal::Conclude { + return + }, + orchestra::FromOrchestra::Communication { msg } => match msg { + NetworkBridgeTxMessage::SendRequests(requests, _if_disconnected) => { + for request in requests { + gum::debug!(target: LOG_TARGET, request = ?request, "Processing request"); + self.network.inc_sent(request_size(&request)); + let action = self.respond_to_send_request(request, &mut ingress_tx); + + // Will account for our node sending the request over the emulated + // network. + self.network.submit_peer_action(action.peer(), action); + } + }, + _ => { + unimplemented!("Unexpected network bridge message") + }, + }, + } + } + } +} + +// A helper to determine the request payload size. +fn request_size(request: &Requests) -> usize { + match request { + Requests::ChunkFetchingV1(outgoing_request) => outgoing_request.payload.encoded_size(), + Requests::AvailableDataFetchingV1(outgoing_request) => + outgoing_request.payload.encoded_size(), + _ => unimplemented!("received an unexpected request"), + } +} diff --git a/polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs new file mode 100644 index 0000000000000000000000000000000000000000..d664ebead3cc416c502d32c0a1922b49b408eb29 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs @@ -0,0 +1,110 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +//! +//! A generic runtime api subsystem mockup suitable to be used in benchmarks. + +use polkadot_primitives::{GroupIndex, IndexedVec, SessionInfo, ValidatorIndex}; + +use polkadot_node_subsystem::{ + messages::{RuntimeApiMessage, RuntimeApiRequest}, + overseer, SpawnedSubsystem, SubsystemError, +}; +use polkadot_node_subsystem_types::OverseerSignal; + +use crate::core::configuration::{TestAuthorities, TestConfiguration}; +use futures::FutureExt; + +const LOG_TARGET: &str = "subsystem-bench::runtime-api-mock"; + +pub struct RuntimeApiState { + authorities: TestAuthorities, +} + +pub struct MockRuntimeApi { + state: RuntimeApiState, + config: TestConfiguration, +} + +impl MockRuntimeApi { + pub fn new(config: TestConfiguration, authorities: TestAuthorities) -> MockRuntimeApi { + Self { state: RuntimeApiState { authorities }, config } + } + + fn session_info(&self) -> SessionInfo { + let all_validators = (0..self.config.n_validators) + .map(|i| ValidatorIndex(i as _)) + .collect::>(); + + let validator_groups = all_validators.chunks(5).map(Vec::from).collect::>(); + + SessionInfo { + validators: self.state.authorities.validator_public.clone().into(), + discovery_keys: self.state.authorities.validator_authority_id.clone(), + validator_groups: IndexedVec::>::from(validator_groups), + assignment_keys: vec![], + n_cores: self.config.n_cores as u32, + zeroth_delay_tranche_width: 0, + relay_vrf_modulo_samples: 0, + n_delay_tranches: 0, + no_show_slots: 0, + needed_approvals: 0, + active_validator_indices: vec![], + dispute_period: 6, + random_seed: [0u8; 32], + } + } +} + +#[overseer::subsystem(RuntimeApi, error=SubsystemError, prefix=self::overseer)] +impl MockRuntimeApi { + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = self.run(ctx).map(|_| Ok(())).boxed(); + + SpawnedSubsystem { name: "test-environment", future } + } +} + +#[overseer::contextbounds(RuntimeApi, prefix = self::overseer)] +impl MockRuntimeApi { + async fn run(self, mut ctx: Context) { + loop { + let msg = ctx.recv().await.expect("Overseer never fails us"); + + match msg { + orchestra::FromOrchestra::Signal(signal) => + if signal == OverseerSignal::Conclude { + return + }, + orchestra::FromOrchestra::Communication { msg } => { + gum::debug!(target: LOG_TARGET, msg=?msg, "recv message"); + + match msg { + RuntimeApiMessage::Request( + _request, + RuntimeApiRequest::SessionInfo(_session_index, sender), + ) => { + let _ = sender.send(Ok(Some(self.session_info()))); + }, + // Long term TODO: implement more as needed. + _ => { + unimplemented!("Unexpected runtime-api message") + }, + } + }, + } + } + } +} diff --git a/polkadot/node/subsystem-bench/src/core/mod.rs b/polkadot/node/subsystem-bench/src/core/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..282788d143b44a9a2444533f1eda756e0385c0a2 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/core/mod.rs @@ -0,0 +1,24 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +const LOG_TARGET: &str = "subsystem-bench::core"; + +pub mod configuration; +pub mod display; +pub mod environment; +pub mod keyring; +pub mod mock; +pub mod network; diff --git a/polkadot/node/subsystem-bench/src/core/network.rs b/polkadot/node/subsystem-bench/src/core/network.rs new file mode 100644 index 0000000000000000000000000000000000000000..c4e20b421d342fc50a0fa36fb7c8ab6d959a5fff --- /dev/null +++ b/polkadot/node/subsystem-bench/src/core/network.rs @@ -0,0 +1,485 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +use super::{ + configuration::{TestAuthorities, TestConfiguration}, + environment::TestEnvironmentDependencies, + *, +}; +use colored::Colorize; +use polkadot_primitives::AuthorityDiscoveryId; +use prometheus_endpoint::U64; +use rand::{seq::SliceRandom, thread_rng}; +use sc_service::SpawnTaskHandle; +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; +use tokio::sync::mpsc::UnboundedSender; + +// An emulated node egress traffic rate_limiter. +#[derive(Debug)] +pub struct RateLimit { + // How often we refill credits in buckets + tick_rate: usize, + // Total ticks + total_ticks: usize, + // Max refill per tick + max_refill: usize, + // Available credit. We allow for bursts over 1/tick_rate of `cps` budget, but we + // account it by negative credit. + credits: isize, + // When last refilled. + last_refill: Instant, +} + +impl RateLimit { + // Create a new `RateLimit` from a `cps` (credits per second) budget and + // `tick_rate`. + pub fn new(tick_rate: usize, cps: usize) -> Self { + // Compute how much refill for each tick + let max_refill = cps / tick_rate; + RateLimit { + tick_rate, + total_ticks: 0, + max_refill, + // A fresh start + credits: max_refill as isize, + last_refill: Instant::now(), + } + } + + pub async fn refill(&mut self) { + // If this is called to early, we need to sleep until next tick. + let now = Instant::now(); + let next_tick_delta = + (self.last_refill + Duration::from_millis(1000 / self.tick_rate as u64)) - now; + + // Sleep until next tick. + if !next_tick_delta.is_zero() { + gum::trace!(target: LOG_TARGET, "need to sleep {}ms", next_tick_delta.as_millis()); + tokio::time::sleep(next_tick_delta).await; + } + + self.total_ticks += 1; + self.credits += self.max_refill as isize; + self.last_refill = Instant::now(); + } + + // Reap credits from the bucket. + // Blocks if credits budged goes negative during call. + pub async fn reap(&mut self, amount: usize) { + self.credits -= amount as isize; + + if self.credits >= 0 { + return + } + + while self.credits < 0 { + gum::trace!(target: LOG_TARGET, "Before refill: {:?}", &self); + self.refill().await; + gum::trace!(target: LOG_TARGET, "After refill: {:?}", &self); + } + } +} + +#[cfg(test)] +mod tests { + use std::time::Instant; + + use super::RateLimit; + + #[tokio::test] + async fn test_expected_rate() { + let tick_rate = 200; + let budget = 1_000_000; + // rate must not exceeed 100 credits per second + let mut rate_limiter = RateLimit::new(tick_rate, budget); + let mut total_sent = 0usize; + let start = Instant::now(); + + let mut reap_amount = 0; + while rate_limiter.total_ticks < tick_rate { + reap_amount += 1; + reap_amount %= 100; + + rate_limiter.reap(reap_amount).await; + total_sent += reap_amount; + } + + let end = Instant::now(); + + println!("duration: {}", (end - start).as_millis()); + + // Allow up to `budget/max_refill` error tolerance + let lower_bound = budget as u128 * ((end - start).as_millis() / 1000u128); + let upper_bound = budget as u128 * + ((end - start).as_millis() / 1000u128 + rate_limiter.max_refill as u128); + assert!(total_sent as u128 >= lower_bound); + assert!(total_sent as u128 <= upper_bound); + } +} + +// A network peer emulator. It spawns a task that accepts `NetworkActions` and +// executes them with a configurable delay and bandwidth constraints. Tipically +// these actions wrap a future that performs a channel send to the subsystem(s) under test. +#[derive(Clone)] +struct PeerEmulator { + // The queue of requests waiting to be served by the emulator + actions_tx: UnboundedSender, +} + +impl PeerEmulator { + pub fn new( + bandwidth: usize, + spawn_task_handle: SpawnTaskHandle, + stats: Arc, + ) -> Self { + let (actions_tx, mut actions_rx) = tokio::sync::mpsc::unbounded_channel(); + + spawn_task_handle + .clone() + .spawn("peer-emulator", "test-environment", async move { + // Rate limit peer send. + let mut rate_limiter = RateLimit::new(10, bandwidth); + loop { + let stats_clone = stats.clone(); + let maybe_action: Option = actions_rx.recv().await; + if let Some(action) = maybe_action { + let size = action.size(); + rate_limiter.reap(size).await; + if let Some(latency) = action.latency { + spawn_task_handle.spawn( + "peer-emulator-latency", + "test-environment", + async move { + tokio::time::sleep(latency).await; + action.run().await; + stats_clone.inc_sent(size); + }, + ) + } else { + action.run().await; + stats_clone.inc_sent(size); + } + } else { + break + } + } + }); + + Self { actions_tx } + } + + // Queue a send request from the emulated peer. + pub fn send(&mut self, action: NetworkAction) { + self.actions_tx.send(action).expect("peer emulator task lives"); + } +} + +pub type ActionFuture = std::pin::Pin + std::marker::Send>>; +/// An network action to be completed by the emulator task. +pub struct NetworkAction { + // The function that performs the action + run: ActionFuture, + // The payload size that we simulate sending/receiving from a peer + size: usize, + // Peer which should run the action. + peer: AuthorityDiscoveryId, + // The amount of time to delay the polling `run` + latency: Option, +} + +unsafe impl Send for NetworkAction {} + +/// Book keeping of sent and received bytes. +pub struct PeerEmulatorStats { + rx_bytes_total: AtomicU64, + tx_bytes_total: AtomicU64, + metrics: Metrics, + peer_index: usize, +} + +impl PeerEmulatorStats { + pub(crate) fn new(peer_index: usize, metrics: Metrics) -> Self { + Self { + metrics, + rx_bytes_total: AtomicU64::from(0), + tx_bytes_total: AtomicU64::from(0), + peer_index, + } + } + + pub fn inc_sent(&self, bytes: usize) { + self.tx_bytes_total.fetch_add(bytes as u64, Ordering::Relaxed); + self.metrics.on_peer_sent(self.peer_index, bytes); + } + + pub fn inc_received(&self, bytes: usize) { + self.rx_bytes_total.fetch_add(bytes as u64, Ordering::Relaxed); + self.metrics.on_peer_received(self.peer_index, bytes); + } + + pub fn sent(&self) -> u64 { + self.tx_bytes_total.load(Ordering::Relaxed) + } + + pub fn received(&self) -> u64 { + self.rx_bytes_total.load(Ordering::Relaxed) + } +} + +#[derive(Debug, Default)] +pub struct PeerStats { + pub rx_bytes_total: u64, + pub tx_bytes_total: u64, +} +impl NetworkAction { + pub fn new( + peer: AuthorityDiscoveryId, + run: ActionFuture, + size: usize, + latency: Option, + ) -> Self { + Self { run, size, peer, latency } + } + + pub fn size(&self) -> usize { + self.size + } + + pub async fn run(self) { + self.run.await; + } + + pub fn peer(&self) -> AuthorityDiscoveryId { + self.peer.clone() + } +} + +/// The state of a peer on the emulated network. +#[derive(Clone)] +enum Peer { + Connected(PeerEmulator), + Disconnected(PeerEmulator), +} + +impl Peer { + pub fn disconnect(&mut self) { + let new_self = match self { + Peer::Connected(peer) => Peer::Disconnected(peer.clone()), + _ => return, + }; + *self = new_self; + } + + pub fn is_connected(&self) -> bool { + matches!(self, Peer::Connected(_)) + } + + pub fn emulator(&mut self) -> &mut PeerEmulator { + match self { + Peer::Connected(ref mut emulator) => emulator, + Peer::Disconnected(ref mut emulator) => emulator, + } + } +} + +/// Mocks the network bridge and an arbitrary number of connected peer nodes. +/// Implements network latency, bandwidth and connection errors. +#[derive(Clone)] +pub struct NetworkEmulator { + // Per peer network emulation. + peers: Vec, + /// Per peer stats. + stats: Vec>, + /// Each emulated peer is a validator. + validator_authority_ids: HashMap, +} + +impl NetworkEmulator { + pub fn new( + config: &TestConfiguration, + dependencies: &TestEnvironmentDependencies, + authorities: &TestAuthorities, + ) -> Self { + let n_peers = config.n_validators; + gum::info!(target: LOG_TARGET, "{}",format!("Initializing emulation for a {} peer network.", n_peers).bright_blue()); + gum::info!(target: LOG_TARGET, "{}",format!("connectivity {}%, error {}%", config.connectivity, config.error).bright_black()); + + let metrics = + Metrics::new(&dependencies.registry).expect("Metrics always register succesfully"); + let mut validator_authority_id_mapping = HashMap::new(); + + // Create a `PeerEmulator` for each peer. + let (stats, mut peers): (_, Vec<_>) = (0..n_peers) + .zip(authorities.validator_authority_id.clone()) + .map(|(peer_index, authority_id)| { + validator_authority_id_mapping.insert(authority_id, peer_index); + let stats = Arc::new(PeerEmulatorStats::new(peer_index, metrics.clone())); + ( + stats.clone(), + Peer::Connected(PeerEmulator::new( + config.peer_bandwidth, + dependencies.task_manager.spawn_handle(), + stats, + )), + ) + }) + .unzip(); + + let connected_count = config.n_validators as f64 / (100.0 / config.connectivity as f64); + + let (_connected, to_disconnect) = + peers.partial_shuffle(&mut thread_rng(), connected_count as usize); + + for peer in to_disconnect { + peer.disconnect(); + } + + gum::info!(target: LOG_TARGET, "{}",format!("Network created, connected validator count {}", connected_count).bright_black()); + + Self { peers, stats, validator_authority_ids: validator_authority_id_mapping } + } + + pub fn is_peer_connected(&self, peer: &AuthorityDiscoveryId) -> bool { + self.peer(peer).is_connected() + } + + pub fn submit_peer_action(&mut self, peer: AuthorityDiscoveryId, action: NetworkAction) { + let index = self + .validator_authority_ids + .get(&peer) + .expect("all test authorities are valid; qed"); + + let peer = self.peers.get_mut(*index).expect("We just retrieved the index above; qed"); + + // Only actions of size 0 are allowed on disconnected peers. + // Typically this are delayed error response sends. + if action.size() > 0 && !peer.is_connected() { + gum::warn!(target: LOG_TARGET, peer_index = index, "Attempted to send data from a disconnected peer, operation ignored"); + return + } + + peer.emulator().send(action); + } + + // Returns the sent/received stats for `peer_index`. + pub fn peer_stats(&self, peer_index: usize) -> Arc { + self.stats[peer_index].clone() + } + + // Helper to get peer index by `AuthorityDiscoveryId` + fn peer_index(&self, peer: &AuthorityDiscoveryId) -> usize { + *self + .validator_authority_ids + .get(peer) + .expect("all test authorities are valid; qed") + } + + // Return the Peer entry for a given `AuthorityDiscoveryId`. + fn peer(&self, peer: &AuthorityDiscoveryId) -> &Peer { + &self.peers[self.peer_index(peer)] + } + // Returns the sent/received stats for `peer`. + pub fn peer_stats_by_id(&mut self, peer: &AuthorityDiscoveryId) -> Arc { + let peer_index = self.peer_index(peer); + + self.stats[peer_index].clone() + } + + // Returns the sent/received stats for all peers. + pub fn stats(&self) -> Vec { + let r = self + .stats + .iter() + .map(|stats| PeerStats { + rx_bytes_total: stats.received(), + tx_bytes_total: stats.sent(), + }) + .collect::>(); + r + } + + // Increment bytes sent by our node (the node that contains the subsystem under test) + pub fn inc_sent(&self, bytes: usize) { + // Our node always is peer 0. + self.peer_stats(0).inc_sent(bytes); + } + + // Increment bytes received by our node (the node that contains the subsystem under test) + pub fn inc_received(&self, bytes: usize) { + // Our node always is peer 0. + self.peer_stats(0).inc_received(bytes); + } +} + +use polkadot_node_subsystem_util::metrics::prometheus::{ + self, CounterVec, Opts, PrometheusError, Registry, +}; + +/// Emulated network metrics. +#[derive(Clone)] +pub(crate) struct Metrics { + /// Number of bytes sent per peer. + peer_total_sent: CounterVec, + /// Number of received sent per peer. + peer_total_received: CounterVec, +} + +impl Metrics { + pub fn new(registry: &Registry) -> Result { + Ok(Self { + peer_total_sent: prometheus::register( + CounterVec::new( + Opts::new( + "subsystem_benchmark_network_peer_total_bytes_sent", + "Total number of bytes a peer has sent.", + ), + &["peer"], + )?, + registry, + )?, + peer_total_received: prometheus::register( + CounterVec::new( + Opts::new( + "subsystem_benchmark_network_peer_total_bytes_received", + "Total number of bytes a peer has received.", + ), + &["peer"], + )?, + registry, + )?, + }) + } + + /// Increment total sent for a peer. + pub fn on_peer_sent(&self, peer_index: usize, bytes: usize) { + self.peer_total_sent + .with_label_values(vec![format!("node{}", peer_index).as_str()].as_slice()) + .inc_by(bytes as u64); + } + + /// Increment total receioved for a peer. + pub fn on_peer_received(&self, peer_index: usize, bytes: usize) { + self.peer_total_received + .with_label_values(vec![format!("node{}", peer_index).as_str()].as_slice()) + .inc_by(bytes as u64); + } +} diff --git a/polkadot/node/subsystem-bench/src/subsystem-bench.rs b/polkadot/node/subsystem-bench/src/subsystem-bench.rs new file mode 100644 index 0000000000000000000000000000000000000000..29b62b27855a2f4867540ccd5dc19d1fa72cd5bd --- /dev/null +++ b/polkadot/node/subsystem-bench/src/subsystem-bench.rs @@ -0,0 +1,215 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A tool for running subsystem benchmark tests designed for development and +//! CI regression testing. +use clap::Parser; +use color_eyre::eyre; +use pyroscope::PyroscopeAgent; +use pyroscope_pprofrs::{pprof_backend, PprofConfig}; + +use colored::Colorize; +use std::{path::Path, time::Duration}; + +pub(crate) mod availability; +pub(crate) mod cli; +pub(crate) mod core; + +use availability::{prepare_test, NetworkEmulation, TestState}; +use cli::TestObjective; + +use core::{ + configuration::TestConfiguration, + environment::{TestEnvironment, GENESIS_HASH}, +}; + +use clap_num::number_range; + +use crate::core::display::display_configuration; + +fn le_100(s: &str) -> Result { + number_range(s, 0, 100) +} + +fn le_5000(s: &str) -> Result { + number_range(s, 0, 5000) +} + +#[derive(Debug, Parser)] +#[allow(missing_docs)] +struct BenchCli { + #[arg(long, value_enum, ignore_case = true, default_value_t = NetworkEmulation::Ideal)] + /// The type of network to be emulated + pub network: NetworkEmulation, + + #[clap(flatten)] + pub standard_configuration: cli::StandardTestOptions, + + #[clap(short, long)] + /// The bandwidth of simulated remote peers in KiB + pub peer_bandwidth: Option, + + #[clap(short, long)] + /// The bandwidth of our simulated node in KiB + pub bandwidth: Option, + + #[clap(long, value_parser=le_100)] + /// Simulated conection error ratio [0-100]. + pub peer_error: Option, + + #[clap(long, value_parser=le_5000)] + /// Minimum remote peer latency in milliseconds [0-5000]. + pub peer_min_latency: Option, + + #[clap(long, value_parser=le_5000)] + /// Maximum remote peer latency in milliseconds [0-5000]. + pub peer_max_latency: Option, + + #[clap(long, default_value_t = false)] + /// Enable CPU Profiling with Pyroscope + pub profile: bool, + + #[clap(long, requires = "profile", default_value_t = String::from("http://localhost:4040"))] + /// Pyroscope Server URL + pub pyroscope_url: String, + + #[clap(long, requires = "profile", default_value_t = 113)] + /// Pyroscope Sample Rate + pub pyroscope_sample_rate: u32, + + #[command(subcommand)] + pub objective: cli::TestObjective, +} + +impl BenchCli { + fn launch(self) -> eyre::Result<()> { + let agent_running = if self.profile { + let agent = PyroscopeAgent::builder(self.pyroscope_url.as_str(), "subsystem-bench") + .backend(pprof_backend(PprofConfig::new().sample_rate(self.pyroscope_sample_rate))) + .build()?; + + Some(agent.start()?) + } else { + None + }; + + let configuration = self.standard_configuration; + let mut test_config = match self.objective { + TestObjective::TestSequence(options) => { + let test_sequence = + core::configuration::TestSequence::new_from_file(Path::new(&options.path)) + .expect("File exists") + .into_vec(); + let num_steps = test_sequence.len(); + gum::info!( + "{}", + format!("Sequence contains {} step(s)", num_steps).bright_purple() + ); + for (index, test_config) in test_sequence.into_iter().enumerate() { + gum::info!("{}", format!("Step {}/{}", index + 1, num_steps).bright_purple(),); + display_configuration(&test_config); + + let mut state = TestState::new(&test_config); + let (mut env, _protocol_config) = prepare_test(test_config, &mut state); + env.runtime() + .block_on(availability::benchmark_availability_read(&mut env, state)); + } + return Ok(()) + }, + TestObjective::DataAvailabilityRead(ref _options) => match self.network { + NetworkEmulation::Healthy => TestConfiguration::healthy_network( + self.objective, + configuration.num_blocks, + configuration.n_validators, + configuration.n_cores, + configuration.min_pov_size, + configuration.max_pov_size, + ), + NetworkEmulation::Degraded => TestConfiguration::degraded_network( + self.objective, + configuration.num_blocks, + configuration.n_validators, + configuration.n_cores, + configuration.min_pov_size, + configuration.max_pov_size, + ), + NetworkEmulation::Ideal => TestConfiguration::ideal_network( + self.objective, + configuration.num_blocks, + configuration.n_validators, + configuration.n_cores, + configuration.min_pov_size, + configuration.max_pov_size, + ), + }, + }; + + let mut latency_config = test_config.latency.clone().unwrap_or_default(); + + if let Some(latency) = self.peer_min_latency { + latency_config.min_latency = Duration::from_millis(latency); + } + + if let Some(latency) = self.peer_max_latency { + latency_config.max_latency = Duration::from_millis(latency); + } + + if let Some(error) = self.peer_error { + test_config.error = error; + } + + if let Some(bandwidth) = self.peer_bandwidth { + // CLI expects bw in KiB + test_config.peer_bandwidth = bandwidth * 1024; + } + + if let Some(bandwidth) = self.bandwidth { + // CLI expects bw in KiB + test_config.bandwidth = bandwidth * 1024; + } + + display_configuration(&test_config); + + let mut state = TestState::new(&test_config); + let (mut env, _protocol_config) = prepare_test(test_config, &mut state); + // test_config.write_to_disk(); + env.runtime() + .block_on(availability::benchmark_availability_read(&mut env, state)); + + if let Some(agent_running) = agent_running { + let agent_ready = agent_running.stop()?; + agent_ready.shutdown(); + } + + Ok(()) + } +} + +fn main() -> eyre::Result<()> { + color_eyre::install()?; + env_logger::builder() + .filter(Some("hyper"), log::LevelFilter::Info) + // Avoid `Terminating due to subsystem exit subsystem` warnings + .filter(Some("polkadot_overseer"), log::LevelFilter::Error) + .filter(None, log::LevelFilter::Info) + // .filter(None, log::LevelFilter::Trace) + .try_init() + .unwrap(); + + let cli: BenchCli = BenchCli::parse(); + cli.launch()?; + Ok(()) +} diff --git a/polkadot/node/subsystem-test-helpers/Cargo.toml b/polkadot/node/subsystem-test-helpers/Cargo.toml index 9087ca11f5d22ee5307fbc3877c88e1be5a72e27..d0be9af4ed639a70d3fbba59cba523d04e857072 100644 --- a/polkadot/node/subsystem-test-helpers/Cargo.toml +++ b/polkadot/node/subsystem-test-helpers/Cargo.toml @@ -7,13 +7,19 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.74" futures = "0.3.21" parking_lot = "0.12.0" polkadot-node-subsystem = { path = "../subsystem" } +polkadot-erasure-coding = { path = "../../erasure-coding" } polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-primitives = { path = "../../primitives" } +polkadot-node-primitives = { path = "../primitives" } + sc-client-api = { path = "../../../substrate/client/api" } sc-utils = { path = "../../../substrate/client/utils" } sp-core = { path = "../../../substrate/primitives/core" } diff --git a/polkadot/node/subsystem-test-helpers/src/lib.rs b/polkadot/node/subsystem-test-helpers/src/lib.rs index 3f92513498c4129f418690946c2a2e2ac85605cc..dfa78e04b8c963c10f8a0ce0e4d6e3d361935810 100644 --- a/polkadot/node/subsystem-test-helpers/src/lib.rs +++ b/polkadot/node/subsystem-test-helpers/src/lib.rs @@ -18,11 +18,14 @@ #![warn(missing_docs)] +use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks}; +use polkadot_node_primitives::{AvailableData, ErasureChunk, Proof}; use polkadot_node_subsystem::{ messages::AllMessages, overseer, FromOrchestra, OverseerSignal, SpawnGlue, SpawnedSubsystem, SubsystemError, SubsystemResult, TrySendError, }; use polkadot_node_subsystem_util::TimeoutExt; +use polkadot_primitives::{Hash, ValidatorIndex}; use futures::{channel::mpsc, poll, prelude::*}; use parking_lot::Mutex; @@ -440,6 +443,34 @@ impl Future for Yield { } } +/// Helper for chunking available data. +pub fn derive_erasure_chunks_with_proofs_and_root( + n_validators: usize, + available_data: &AvailableData, + alter_chunk: impl Fn(usize, &mut Vec), +) -> (Vec, Hash) { + let mut chunks: Vec> = obtain_chunks(n_validators, available_data).unwrap(); + + for (i, chunk) in chunks.iter_mut().enumerate() { + alter_chunk(i, chunk) + } + + // create proofs for each erasure chunk + let branches = branches(chunks.as_ref()); + + let root = branches.root(); + let erasure_chunks = branches + .enumerate() + .map(|(index, (proof, chunk))| ErasureChunk { + chunk: chunk.to_vec(), + index: ValidatorIndex(index as _), + proof: Proof::try_from(proof).unwrap(), + }) + .collect::>(); + + (erasure_chunks, root) +} + #[cfg(test)] mod tests { use super::*; diff --git a/polkadot/node/subsystem-test-helpers/src/mock.rs b/polkadot/node/subsystem-test-helpers/src/mock.rs index 522bc3c2cc4f4ec91357de9b373f3588a96bde33..14026960ac13ec285c0dc972c26fbf7dd1b9b4b9 100644 --- a/polkadot/node/subsystem-test-helpers/src/mock.rs +++ b/polkadot/node/subsystem-test-helpers/src/mock.rs @@ -16,7 +16,7 @@ use std::sync::Arc; -use polkadot_node_subsystem::{jaeger, ActivatedLeaf}; +use polkadot_node_subsystem::{jaeger, ActivatedLeaf, BlockInfo}; use sc_client_api::UnpinHandle; use sc_keystore::LocalKeystore; use sc_utils::mpsc::tracing_unbounded; @@ -59,3 +59,8 @@ pub fn new_leaf(hash: Hash, number: BlockNumber) -> ActivatedLeaf { span: Arc::new(jaeger::Span::Disabled), } } + +/// Create a new leaf with the given hash and number. +pub fn new_block_import_info(hash: Hash, number: BlockNumber) -> BlockInfo { + BlockInfo { hash, parent_hash: Hash::default(), number, unpin_handle: dummy_unpin_handle(hash) } +} diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index 9fd3775da5917dd7a571ae2e5f7acec7ecdb29ed..6713e9031234aad219d780918eb18af1b8b08bc8 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] derive_more = "0.99.17" futures = "0.3.21" @@ -14,15 +17,17 @@ polkadot-node-primitives = { path = "../primitives" } polkadot-node-network-protocol = { path = "../network/protocol" } polkadot-statement-table = { path = "../../statement-table" } polkadot-node-jaeger = { path = "../jaeger" } -orchestra = { version = "0.3.3", default-features = false, features=["futures_channel"] } +orchestra = { version = "0.3.3", default-features = false, features = ["futures_channel"] } sc-network = { path = "../../../substrate/client/network" } sp-api = { path = "../../../substrate/primitives/api" } +sp-blockchain = { path = "../../../substrate/primitives/blockchain" } sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } +sp-runtime = { path = "../../../substrate/primitives/runtime" } sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } sc-client-api = { path = "../../../substrate/client/api" } sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } smallvec = "1.8.0" substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } thiserror = "1.0.48" -async-trait = "0.1.57" +async-trait = "0.1.74" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/polkadot/node/subsystem-types/src/lib.rs b/polkadot/node/subsystem-types/src/lib.rs index e3d6e4decf20e1c28e99b7d821d04421a1a2211b..cd39aa03e56736399ab491705cc9568a52b2784e 100644 --- a/polkadot/node/subsystem-types/src/lib.rs +++ b/polkadot/node/subsystem-types/src/lib.rs @@ -40,7 +40,7 @@ pub mod errors; pub mod messages; mod runtime_client; -pub use runtime_client::{DefaultSubsystemClient, RuntimeApiSubsystemClient}; +pub use runtime_client::{ChainApiBackend, DefaultSubsystemClient, RuntimeApiSubsystemClient}; pub use jaeger::*; pub use polkadot_node_jaeger as jaeger; diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 4ddffc6dc5e830e056a2cab202c33a0b29f8040b..c7675c84b91c007eb05136ef25e900b748372b51 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -33,8 +33,8 @@ use polkadot_node_network_protocol::{ }; use polkadot_node_primitives::{ approval::{ - v1::{BlockApprovalMeta, IndirectSignedApprovalVote}, - v2::{CandidateBitfield, IndirectAssignmentCertV2}, + v1::BlockApprovalMeta, + v2::{CandidateBitfield, IndirectAssignmentCertV2, IndirectSignedApprovalVoteV2}, }, AvailableData, BabeEpoch, BlockWeight, CandidateVotes, CollationGenerationConfig, CollationSecondedSignal, DisputeMessage, DisputeStatus, ErasureChunk, PoV, @@ -42,14 +42,15 @@ use polkadot_node_primitives::{ ValidationResult, }; use polkadot_primitives::{ - async_backing, slashing, AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, - CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, - CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, - Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, - SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, slashing, + vstaging::{ApprovalVotingParams, NodeFeatures}, + AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, CandidateHash, + CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreState, + DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Header as BlockHeader, + Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, MultiDisputeStatementSet, + OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, PvfExecKind, SessionIndex, + SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use polkadot_statement_table::v2::Misbehavior; use std::{ @@ -150,8 +151,8 @@ pub enum CandidateValidationMessage { pov: Arc, /// Session's executor parameters executor_params: ExecutorParams, - /// Execution timeout kind (backing/approvals) - exec_timeout_kind: PvfExecTimeoutKind, + /// Execution kind, used for timeouts and retries (backing/approvals) + exec_kind: PvfExecKind, /// The sending side of the response channel response_sender: oneshot::Sender>, }, @@ -175,8 +176,8 @@ pub enum CandidateValidationMessage { pov: Arc, /// Session's executor parameters executor_params: ExecutorParams, - /// Execution timeout kind (backing/approvals) - exec_timeout_kind: PvfExecTimeoutKind, + /// Execution kind, used for timeouts and retries (backing/approvals) + exec_kind: PvfExecKind, /// The sending side of the response channel response_sender: oneshot::Sender>, }, @@ -718,6 +719,11 @@ pub enum RuntimeApiRequest { /// /// If it's not supported by the Runtime, the async backing is said to be disabled. AsyncBackingParams(RuntimeApiSender), + /// Get the node features. + NodeFeatures(SessionIndex, RuntimeApiSender), + /// Approval voting params + /// `V10` + ApprovalVotingParams(SessionIndex, RuntimeApiSender), } impl RuntimeApiRequest { @@ -746,6 +752,12 @@ impl RuntimeApiRequest { /// `DisabledValidators` pub const DISABLED_VALIDATORS_RUNTIME_REQUIREMENT: u32 = 8; + + /// `Node features` + pub const NODE_FEATURES_RUNTIME_REQUIREMENT: u32 = 9; + + /// `approval_voting_params` + pub const APPROVAL_VOTING_PARAMS_REQUIREMENT: u32 = 10; } /// A message to the Runtime API subsystem. @@ -931,7 +943,7 @@ pub enum ApprovalVotingMessage { /// protocol. /// /// Should not be sent unless the block hash within the indirect vote is known. - CheckAndImportApproval(IndirectSignedApprovalVote, oneshot::Sender), + CheckAndImportApproval(IndirectSignedApprovalVoteV2, oneshot::Sender), /// Returns the highest possible ancestor hash of the provided block hash which is /// acceptable to vote on finality for. /// The `BlockNumber` provided is the number of the block's ancestor which is the @@ -947,7 +959,7 @@ pub enum ApprovalVotingMessage { /// requires calling into `approval-distribution`: Calls should be infrequent and bounded. GetApprovalSignaturesForCandidate( CandidateHash, - oneshot::Sender>, + oneshot::Sender, ValidatorSignature)>>, ), } @@ -963,7 +975,7 @@ pub enum ApprovalDistributionMessage { /// Distribute an approval vote for the local validator. The approval vote is assumed to be /// valid, relevant, and the corresponding approval already issued. /// If not, the subsystem is free to drop the message. - DistributeApproval(IndirectSignedApprovalVote), + DistributeApproval(IndirectSignedApprovalVoteV2), /// An update from the network bridge. #[from] NetworkBridgeUpdate(NetworkBridgeEvent), @@ -971,7 +983,7 @@ pub enum ApprovalDistributionMessage { /// Get all approval signatures for all chains a candidate appeared in. GetApprovalSignatures( HashSet<(Hash, CandidateIndex)>, - oneshot::Sender>, + oneshot::Sender, ValidatorSignature)>>, ), /// Approval checking lag update measured in blocks. ApprovalCheckingLagUpdate(BlockNumber), diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index f7adcf9862b5d09829cb1d0edbb3f068b9c55713..7f6183076101b4474e8059435b42a69b108fbb05 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -16,19 +16,76 @@ use async_trait::async_trait; use polkadot_primitives::{ - async_backing, runtime_api::ParachainHost, slashing, Block, BlockNumber, CandidateCommitments, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, + runtime_api::ParachainHost, + slashing, + vstaging::{self, ApprovalVotingParams}, + Block, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, + Header, Id, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; +use sc_client_api::HeaderBackend; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::{ApiError, ApiExt, ProvideRuntimeApi}; use sp_authority_discovery::AuthorityDiscoveryApi; +use sp_blockchain::Info; use sp_consensus_babe::{BabeApi, Epoch}; +use sp_runtime::traits::{Header as HeaderT, NumberFor}; use std::{collections::BTreeMap, sync::Arc}; +/// Offers header utilities. +/// +/// This is a async wrapper trait for ['HeaderBackend'] to be used with the +/// `ChainApiSubsystem`. +// This trait was introduced to suit the needs of collators. Depending on their operating mode, they +// might not have a client of the relay chain that can supply a synchronous HeaderBackend +// implementation. +#[async_trait] +pub trait ChainApiBackend: Send + Sync { + /// Get block header. Returns `None` if block is not found. + async fn header(&self, hash: Hash) -> sp_blockchain::Result>; + /// Get blockchain info. + async fn info(&self) -> sp_blockchain::Result>; + /// Get block number by hash. Returns `None` if the header is not in the chain. + async fn number( + &self, + hash: Hash, + ) -> sp_blockchain::Result::Number>>; + /// Get block hash by number. Returns `None` if the header is not in the chain. + async fn hash(&self, number: NumberFor) -> sp_blockchain::Result>; +} + +#[async_trait] +impl ChainApiBackend for T +where + T: HeaderBackend, +{ + /// Get block header. Returns `None` if block is not found. + async fn header(&self, hash: Hash) -> sp_blockchain::Result> { + HeaderBackend::header(self, hash) + } + + /// Get blockchain info. + async fn info(&self) -> sp_blockchain::Result> { + Ok(HeaderBackend::info(self)) + } + + /// Get block number by hash. Returns `None` if the header is not in the chain. + async fn number( + &self, + hash: Hash, + ) -> sp_blockchain::Result::Number>> { + HeaderBackend::number(self, hash) + } + + /// Get block hash by number. Returns `None` if the header is not in the chain. + async fn hash(&self, number: NumberFor) -> sp_blockchain::Result> { + HeaderBackend::hash(self, number) + } +} + /// Exposes all runtime calls that are used by the runtime API subsystem. #[async_trait] pub trait RuntimeApiSubsystemClient { @@ -257,8 +314,21 @@ pub trait RuntimeApiSubsystemClient { ) -> Result, ApiError>; // === v8 === + /// Gets the disabled validators at a specific block height async fn disabled_validators(&self, at: Hash) -> Result, ApiError>; + + // === v9 === + /// Get the node features. + async fn node_features(&self, at: Hash) -> Result; + + // == v10: Approval voting params == + /// Approval voting configuration parameters + async fn approval_voting_params( + &self, + at: Hash, + session_index: SessionIndex, + ) -> Result; } /// Default implementation of [`RuntimeApiSubsystemClient`] using the client. @@ -508,7 +578,20 @@ where self.client.runtime_api().async_backing_params(at) } + async fn node_features(&self, at: Hash) -> Result { + self.client.runtime_api().node_features(at) + } + async fn disabled_validators(&self, at: Hash) -> Result, ApiError> { self.client.runtime_api().disabled_validators(at) } + + /// Approval voting configuration parameters + async fn approval_voting_params( + &self, + at: Hash, + _session_index: SessionIndex, + ) -> Result { + self.client.runtime_api().approval_voting_params(at) + } } diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index d9364e2c2c0f15158b861ef0e8c44121a7e2d650..6668430d3b71857248e7428679c8ea0c3ab30511 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -6,8 +6,11 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.74" futures = "0.3.21" futures-channel = "0.3.23" itertools = "0.10" @@ -29,7 +32,7 @@ polkadot-node-network-protocol = { path = "../network/protocol" } polkadot-primitives = { path = "../../primitives" } polkadot-node-primitives = { path = "../primitives" } polkadot-overseer = { path = "../overseer" } -metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features=["futures_channel"] } +metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features = ["futures_channel"] } sp-core = { path = "../../../substrate/primitives/core" } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } @@ -37,7 +40,7 @@ sp-keystore = { path = "../../../substrate/primitives/keystore" } sc-client-api = { path = "../../../substrate/client/api" } kvdb = "0.13.0" -parity-db = { version = "0.4.8"} +parity-db = { version = "0.4.12" } [dev-dependencies] assert_matches = "1.4.0" diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 8d7cef88a70e082779cd6e0892502f27e486c7c6..0e44423b4e34338b0de2f56710695928c0ef89c3 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -30,10 +30,12 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ - slashing, AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, EncodeAs, - ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, - ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, + slashing, + vstaging::{node_features::FeatureIndex, NodeFeatures}, + AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, EncodeAs, ExecutorParams, + GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, ScrapedOnChainVotes, + SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, }; use crate::{ @@ -92,6 +94,8 @@ pub struct ExtendedSessionInfo { pub validator_info: ValidatorInfo, /// Session executor parameters pub executor_params: ExecutorParams, + /// Node features + pub node_features: NodeFeatures, } /// Information about ourselves, in case we are an `Authority`. @@ -202,7 +206,20 @@ impl RuntimeInfo { let validator_info = self.get_validator_info(&session_info)?; - let full_info = ExtendedSessionInfo { session_info, validator_info, executor_params }; + let node_features = request_node_features(parent, session_index, sender) + .await? + .unwrap_or(NodeFeatures::EMPTY); + let last_set_index = node_features.iter_ones().last().unwrap_or_default(); + if last_set_index >= FeatureIndex::FirstUnassigned as usize { + gum::warn!(target: LOG_TARGET, "Runtime requires feature bit {} that node doesn't support, please upgrade node version", last_set_index); + } + + let full_info = ExtendedSessionInfo { + session_info, + validator_info, + executor_params, + node_features, + }; self.session_info_cache.insert(session_index, full_info); } @@ -507,3 +524,32 @@ pub async fn request_min_backing_votes( min_backing_votes_res } } + +/// Request the node features enabled in the runtime. +/// Pass in the session index for caching purposes, as it should only change on session boundaries. +/// Prior to runtime API version 9, just return `None`. +pub async fn request_node_features( + parent: Hash, + session_index: SessionIndex, + sender: &mut impl overseer::SubsystemSender, +) -> Result> { + let res = recv_runtime( + request_from_runtime(parent, sender, |tx| { + RuntimeApiRequest::NodeFeatures(session_index, tx) + }) + .await, + ) + .await; + + if let Err(Error::RuntimeRequest(RuntimeApiError::NotSupported { .. })) = res { + gum::trace!( + target: LOG_TARGET, + ?parent, + "Querying the node features from the runtime is not supported by the current Runtime API", + ); + + Ok(None) + } else { + res.map(Some) + } +} diff --git a/polkadot/node/subsystem/Cargo.toml b/polkadot/node/subsystem/Cargo.toml index 9b77359517c9926511eda509c7976386540619b8..b0b396d7f62b91fc1c97e34a743264d0c11f9667 100644 --- a/polkadot/node/subsystem/Cargo.toml +++ b/polkadot/node/subsystem/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] polkadot-overseer = { path = "../overseer" } polkadot-node-subsystem-types = { path = "../subsystem-types" } diff --git a/polkadot/node/test/client/Cargo.toml b/polkadot/node/test/client/Cargo.toml index bc4ff74be4bb89587408ee91f54cf9d00d97abe3..36748c3b455b90315ab445c7c8b612b5a0d4ab0b 100644 --- a/polkadot/node/test/client/Cargo.toml +++ b/polkadot/node/test/client/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -38,7 +41,7 @@ sp-keyring = { path = "../../../../substrate/primitives/keyring" } futures = "0.3.21" [features] -runtime-benchmarks= [ +runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "polkadot-test-runtime/runtime-benchmarks", diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index 437fa66b75a2218efdc7069ab5fe001db3b71c52..3199dc262bb2e57d5dd08a1b524bb02124df5e8d 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] futures = "0.3.21" hex = "0.4.3" @@ -63,8 +66,8 @@ substrate-test-utils = { path = "../../../../substrate/test-utils" } tokio = { version = "1.24.2", features = ["macros"] } [features] -runtime-metrics=[ "polkadot-test-runtime/runtime-metrics" ] -runtime-benchmarks= [ +runtime-metrics = ["polkadot-test-runtime/runtime-metrics"] +runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-staking/runtime-benchmarks", diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index be2746daf32121651d7d5c1bbf395ff3e243976a..e9423d513bf023c59887c2c8c459eef2299ee269 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -80,11 +80,12 @@ pub fn new_full( config, polkadot_service::NewFullParams { is_parachain_node, - grandpa_pause: None, enable_beefy: true, + force_authoring_backoff: false, jaeger_agent: None, telemetry_worker_handle: None, node_version: None, + secure_validator_mode: false, workers_path, workers_names: None, overseer_gen: polkadot_service::RealOverseerGen, diff --git a/polkadot/node/tracking-allocator/Cargo.toml b/polkadot/node/tracking-allocator/Cargo.toml index b1b330913440bdae890c4b0a6092d0c191ff0ced..486346e1fe1c00c7d5fa37b4a2a58540b003bc62 100644 --- a/polkadot/node/tracking-allocator/Cargo.toml +++ b/polkadot/node/tracking-allocator/Cargo.toml @@ -5,3 +5,6 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true + +[lints] +workspace = true diff --git a/polkadot/node/tracking-allocator/src/lib.rs b/polkadot/node/tracking-allocator/src/lib.rs index ab8597b5c382d80dc90e55d7f2a2e0ef2c906ea6..33f110ce711978c83474550b0f436ca712a413ed 100644 --- a/polkadot/node/tracking-allocator/src/lib.rs +++ b/polkadot/node/tracking-allocator/src/lib.rs @@ -226,7 +226,7 @@ unsafe impl GlobalAlloc for TrackingAllocator
{ } #[inline] - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) -> () { + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { let guard = ALLOCATOR_DATA.lock(); TrackingAllocatorData::track_and_check_limits(guard, -(layout.size() as isize)); self.0.dealloc(ptr, layout) diff --git a/polkadot/node/zombienet-backchannel/Cargo.toml b/polkadot/node/zombienet-backchannel/Cargo.toml index c1b08b4a2bb94a0a5fce172f54f64dcdf3cd653c..e81ab2db14bbd76827e612eb8808a5fc6ddec99e 100644 --- a/polkadot/node/zombienet-backchannel/Cargo.toml +++ b/polkadot/node/zombienet-backchannel/Cargo.toml @@ -8,6 +8,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] tokio = { version = "1.24.2", default-features = false, features = ["macros", "net", "rt-multi-thread", "sync"] } url = "2.3.1" diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 27aa117a87f0bb932617cf159bd9e8814ed87a3d..0521af3bf2dbefef3705497df18c9e1f2175624c 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -6,11 +6,14 @@ edition.workspace = true license.workspace = true version = "1.0.0" +[lints] +workspace = true + [dependencies] # note: special care is taken to avoid inclusion of `sp-io` externals when compiling # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. -parity-scale-codec = { version = "3.6.1", default-features = false, features = [ "derive" ] } +parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false, features = ["serde"] } @@ -21,10 +24,10 @@ derive_more = "0.99.11" bounded-collections = { version = "0.1.8", default-features = false, features = ["serde"] } # all optional crates. -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } [features] -default = [ "std" ] +default = ["std"] wasm-api = [] std = [ "bounded-collections/std", @@ -37,4 +40,4 @@ std = [ "sp-std/std", "sp-weights/std", ] -runtime-benchmarks = [ "sp-runtime/runtime-benchmarks" ] +runtime-benchmarks = ["sp-runtime/runtime-benchmarks"] diff --git a/polkadot/parachain/src/lib.rs b/polkadot/parachain/src/lib.rs index 913d887e4a8a73a4a35aea9dd5b62edc579efce3..bd75296bf837128c7b10b8a89a823cb66dc1ca8b 100644 --- a/polkadot/parachain/src/lib.rs +++ b/polkadot/parachain/src/lib.rs @@ -14,8 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -#![warn(unused_crate_dependencies)] - //! Defines primitive types for creating or validating a parachain. //! //! When compiled with standard library support, this crate exports a `wasm` diff --git a/polkadot/parachain/test-parachains/Cargo.toml b/polkadot/parachain/test-parachains/Cargo.toml index 3252d1f83cd3ab2e72b9fb8945ee14b060c037f8..6acdedf67ff2e4be34da2caa70c572a41f861eca 100644 --- a/polkadot/parachain/test-parachains/Cargo.toml +++ b/polkadot/parachain/test-parachains/Cargo.toml @@ -7,6 +7,9 @@ edition.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] tiny-keccak = { version = "2.0.2", features = ["keccak"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -18,5 +21,5 @@ halt = { package = "test-parachain-halt", path = "halt" } sp-core = { path = "../../../substrate/primitives/core" } [features] -default = [ "std" ] -std = [ "adder/std", "halt/std", "parity-scale-codec/std" ] +default = ["std"] +std = ["adder/std", "halt/std", "parity-scale-codec/std"] diff --git a/polkadot/parachain/test-parachains/adder/Cargo.toml b/polkadot/parachain/test-parachains/adder/Cargo.toml index 1a47328b28e9caedc93b99d8c752f7fd0a397342..eec19ef788aad510d7ea9ef6d2ab61d7c6aeb8f9 100644 --- a/polkadot/parachain/test-parachains/adder/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/Cargo.toml @@ -8,19 +8,22 @@ version = "1.0.0" authors.workspace = true publish = false +[lints] +workspace = true + [dependencies] -parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = [ "wasm-api" ] } +parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = ["wasm-api"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } tiny-keccak = { version = "2.0.2", features = ["keccak"] } -dlmalloc = { version = "0.2.4", features = [ "global" ] } +dlmalloc = { version = "0.2.4", features = ["global"] } # We need to make sure the global allocator is disabled until we have support of full substrate externalities -sp-io = { path = "../../../../substrate/primitives/io", default-features = false, features = [ "disable_allocator" ] } +sp-io = { path = "../../../../substrate/primitives/io", default-features = false, features = ["disable_allocator"] } [build-dependencies] substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] -std = [ "parachain/std", "parity-scale-codec/std", "sp-io/std", "sp-std/std" ] +default = ["std"] +std = ["parachain/std", "parity-scale-codec/std", "sp-io/std", "sp-std/std"] diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index 70f2ae769a8f4cd5f59aaf2cc707053f0d3f35e2..b8f0c579b8b194cbd4fd7323c39677136f32e11e 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -7,13 +7,16 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [[bin]] name = "adder-collator" path = "src/main.rs" [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.2" log = "0.4.17" diff --git a/polkadot/parachain/test-parachains/adder/collator/src/cli.rs b/polkadot/parachain/test-parachains/adder/collator/src/cli.rs index 14b259706835f3649d93af8b3b814f19755f8e62..f81e4cc0fff62dae630c48b932a87bbc4eca904a 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/cli.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/cli.rs @@ -24,16 +24,16 @@ use sc_cli::SubstrateCli; pub enum Subcommand { /// Export the genesis state of the parachain. #[command(name = "export-genesis-state")] - ExportGenesisState(ExportGenesisStateCommand), + ExportGenesisState(ExportGenesisHeadCommand), /// Export the genesis wasm of the parachain. #[command(name = "export-genesis-wasm")] ExportGenesisWasm(ExportGenesisWasmCommand), } -/// Command for exporting the genesis state of the parachain +/// Command for exporting the genesis head data of the parachain #[derive(Debug, Parser)] -pub struct ExportGenesisStateCommand {} +pub struct ExportGenesisHeadCommand {} /// Command for exporting the genesis wasm file. #[derive(Debug, Parser)] diff --git a/polkadot/parachain/test-parachains/adder/collator/src/main.rs b/polkadot/parachain/test-parachains/adder/collator/src/main.rs index dfaa1973206c24b27f87cec591bd249c83754e43..6ce93ef4ad148341b4aece7668261cb5d1284751 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/main.rs @@ -62,13 +62,14 @@ fn main() -> Result<()> { is_parachain_node: polkadot_service::IsParachainNode::Collator( collator.collator_key(), ), - grandpa_pause: None, enable_beefy: false, + force_authoring_backoff: false, jaeger_agent: None, telemetry_worker_handle: None, // Collators don't spawn PVF workers, so we can disable version checks. node_version: None, + secure_validator_mode: false, workers_path: None, workers_names: None, diff --git a/polkadot/parachain/test-parachains/halt/Cargo.toml b/polkadot/parachain/test-parachains/halt/Cargo.toml index cb2918273eb03a8db6cb76fa72d7e6507f263ae5..1bdd4392ad313dbdcf62d36bd04cab7330fdf3fb 100644 --- a/polkadot/parachain/test-parachains/halt/Cargo.toml +++ b/polkadot/parachain/test-parachains/halt/Cargo.toml @@ -8,6 +8,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] [build-dependencies] @@ -15,5 +18,5 @@ substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } rustversion = "1.0.6" [features] -default = [ "std" ] +default = ["std"] std = [] diff --git a/polkadot/parachain/test-parachains/undying/Cargo.toml b/polkadot/parachain/test-parachains/undying/Cargo.toml index 273eef4b63a0e8de50ff4a9e1b4ecb3762510d85..19e1261db1e7c4f17308061929d559df34943159 100644 --- a/polkadot/parachain/test-parachains/undying/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/Cargo.toml @@ -8,22 +8,25 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] -parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = [ "wasm-api" ] } +parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = ["wasm-api"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } tiny-keccak = { version = "2.0.2", features = ["keccak"] } -dlmalloc = { version = "0.2.4", features = [ "global" ] } +dlmalloc = { version = "0.2.4", features = ["global"] } log = { version = "0.4.17", default-features = false } # We need to make sure the global allocator is disabled until we have support of full substrate externalities -sp-io = { path = "../../../../substrate/primitives/io", default-features = false, features = [ "disable_allocator" ] } +sp-io = { path = "../../../../substrate/primitives/io", default-features = false, features = ["disable_allocator"] } [build-dependencies] substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] std = [ "log/std", "parachain/std", diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 578c3d6715dc30c1130833680465a0398f4c495c..4ef24ca83dc2d8c05e75202f2381acd9fde12ec3 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -7,13 +7,16 @@ version = "1.0.0" authors.workspace = true publish = false +[lints] +workspace = true + [[bin]] name = "undying-collator" path = "src/main.rs" [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.2" log = "0.4.17" @@ -39,6 +42,3 @@ sc-service = { path = "../../../../../substrate/client/service" } sp-keyring = { path = "../../../../../substrate/primitives/keyring" } tokio = { version = "1.24.2", features = ["macros"] } - -[features] -network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ] diff --git a/polkadot/parachain/test-parachains/undying/collator/src/cli.rs b/polkadot/parachain/test-parachains/undying/collator/src/cli.rs index cd16133dbf197019bac15e0bca381759eabce5b6..9572887a51a2a195a01e6ceced60ee711288ead2 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/cli.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/cli.rs @@ -18,22 +18,27 @@ use clap::Parser; use sc_cli::SubstrateCli; +use std::path::PathBuf; /// Sub-commands supported by the collator. #[derive(Debug, Parser)] pub enum Subcommand { /// Export the genesis state of the parachain. #[command(name = "export-genesis-state")] - ExportGenesisState(ExportGenesisStateCommand), + ExportGenesisState(ExportGenesisHeadCommand), /// Export the genesis wasm of the parachain. #[command(name = "export-genesis-wasm")] ExportGenesisWasm(ExportGenesisWasmCommand), } -/// Command for exporting the genesis state of the parachain +/// Command for exporting the genesis head data of the parachain #[derive(Debug, Parser)] -pub struct ExportGenesisStateCommand { +pub struct ExportGenesisHeadCommand { + /// Output file name or stdout if unspecified. + #[arg()] + pub output: Option, + /// Id of the parachain this collator collates for. #[arg(long, default_value_t = 100)] pub parachain_id: u32, @@ -50,7 +55,11 @@ pub struct ExportGenesisStateCommand { /// Command for exporting the genesis wasm file. #[derive(Debug, Parser)] -pub struct ExportGenesisWasmCommand {} +pub struct ExportGenesisWasmCommand { + /// Output file name or stdout if unspecified. + #[arg()] + pub output: Option, +} #[allow(missing_docs)] #[derive(Debug, Parser)] diff --git a/polkadot/parachain/test-parachains/undying/collator/src/main.rs b/polkadot/parachain/test-parachains/undying/collator/src/main.rs index e564e221f01376ff8b573ec6a1fd1f2937156f77..4a15cdd697c4c1fae5a530224884a7293ff82b6a 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/main.rs @@ -22,6 +22,10 @@ use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProt use polkadot_primitives::Id as ParaId; use sc_cli::{Error as SubstrateCliError, SubstrateCli}; use sp_core::hexdisplay::HexDisplay; +use std::{ + fs, + io::{self, Write}, +}; use test_parachain_undying_collator::Collator; mod cli; @@ -35,14 +39,30 @@ fn main() -> Result<()> { // `pov_size` and `pvf_complexity` need to match the ones that we start the collator // with. let collator = Collator::new(params.pov_size, params.pvf_complexity); - println!("0x{:?}", HexDisplay::from(&collator.genesis_head())); + + let output_buf = + format!("0x{:?}", HexDisplay::from(&collator.genesis_head())).into_bytes(); + + if let Some(output) = params.output { + std::fs::write(output, output_buf)?; + } else { + std::io::stdout().write_all(&output_buf)?; + } Ok::<_, Error>(()) }, - Some(cli::Subcommand::ExportGenesisWasm(_params)) => { + Some(cli::Subcommand::ExportGenesisWasm(params)) => { // We pass some dummy values for `pov_size` and `pvf_complexity` as these don't // matter for `wasm` export. - println!("0x{:?}", HexDisplay::from(&Collator::default().validation_code())); + let output_buf = + format!("0x{:?}", HexDisplay::from(&Collator::default().validation_code())) + .into_bytes(); + + if let Some(output) = params.output { + fs::write(output, output_buf)?; + } else { + io::stdout().write_all(&output_buf)?; + } Ok(()) }, @@ -62,13 +82,14 @@ fn main() -> Result<()> { is_parachain_node: polkadot_service::IsParachainNode::Collator( collator.collator_key(), ), - grandpa_pause: None, enable_beefy: false, + force_authoring_backoff: false, jaeger_agent: None, telemetry_worker_handle: None, // Collators don't spawn PVF workers, so we can disable version checks. node_version: None, + secure_validator_mode: false, workers_path: None, workers_names: None, diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index b318c2d4be7ed5df3f1885dda9bc5b94138415c8..de6df85051a0d08ba6b9abe7b3e4bc1bde6896df 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -6,12 +6,15 @@ edition.workspace = true license.workspace = true description = "Shared primitives used by Polkadot runtime" +[lints] +workspace = true + [dependencies] -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } +bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "serde"] } hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } application-crypto = { package = "sp-application-crypto", path = "../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } inherents = { package = "sp-inherents", path = "../../substrate/primitives/inherents", default-features = false } @@ -30,7 +33,7 @@ polkadot-core-primitives = { path = "../core-primitives", default-features = fal polkadot-parachain-primitives = { path = "../parachain", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "application-crypto/std", "bitvec/std", diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 4ba8b8b031fcc330bb66d1ec4bd329809958db07..2570bcadf606ab8ef0809a1f258a6068263f1db1 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -48,11 +48,11 @@ pub use v6::{ HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, - PersistedValidationData, PvfCheckStatement, PvfExecTimeoutKind, PvfPrepTimeoutKind, - RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, - RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, - SessionInfo, Signature, Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, - SignedStatement, SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, + PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, RuntimeMetricLabel, + RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, RuntimeMetricOp, + RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signature, + Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, SignedStatement, + SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, UncheckedSignedStatement, UpgradeGoAhead, UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index 5ec897c8cbb40a7b3fe6a36ead8c72d3479b13e2..d661005e32ffc9ce146e96ad984fb3f60025ca4c 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -114,13 +114,14 @@ //! separated from the stable primitives. use crate::{ - async_backing, slashing, AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, slashing, + vstaging::{self, ApprovalVotingParams}, + AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, + OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, + SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, }; -use parity_scale_codec::{Decode, Encode}; + use polkadot_core_primitives as pcp; use polkadot_parachain_primitives::primitives as ppp; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -128,18 +129,18 @@ use sp_std::{collections::btree_map::BTreeMap, prelude::*}; sp_api::decl_runtime_apis! { /// The API for querying the state of parachains on-chain. #[api_version(5)] - pub trait ParachainHost { + pub trait ParachainHost { /// Get the current validators. fn validators() -> Vec; /// Returns the validator groups and rotation info localized based on the hypothetical child /// of a block whose state this is invoked on. Note that `now` in the `GroupRotationInfo` /// should be the successor of the number of the block. - fn validator_groups() -> (Vec>, GroupRotationInfo); + fn validator_groups() -> (Vec>, GroupRotationInfo); /// Yields information on all availability cores as relevant to the child block. /// Cores are either free or occupied. Free cores can have paras assigned to them. - fn availability_cores() -> Vec>; + fn availability_cores() -> Vec>; /// Yields the persisted validation data for the given `ParaId` along with an assumption that /// should be used if the para currently occupies a core. @@ -147,15 +148,15 @@ sp_api::decl_runtime_apis! { /// Returns `None` if either the para is not registered or the assumption is `Freed` /// and the para already occupies a core. fn persisted_validation_data(para_id: ppp::Id, assumption: OccupiedCoreAssumption) - -> Option>; + -> Option>; /// Returns the persisted validation data for the given `ParaId` along with the corresponding /// validation code hash. Instead of accepting assumption about the para, matches the validation /// data hash against an expected one and yields `None` if they're not equal. fn assumed_validation_data( para_id: ppp::Id, - expected_persisted_validation_data_hash: pcp::v2::Hash, - ) -> Option<(PersistedValidationData, ppp::ValidationCodeHash)>; + expected_persisted_validation_data_hash: Hash, + ) -> Option<(PersistedValidationData, ppp::ValidationCodeHash)>; /// Checks if the given validation outputs pass the acceptance criteria. fn check_validation_outputs(para_id: ppp::Id, outputs: CandidateCommitments) -> bool; @@ -169,30 +170,34 @@ sp_api::decl_runtime_apis! { /// /// Returns `None` if either the para is not registered or the assumption is `Freed` /// and the para already occupies a core. - fn validation_code(para_id: ppp::Id, assumption: OccupiedCoreAssumption) - -> Option; + fn validation_code( + para_id: ppp::Id, + assumption: OccupiedCoreAssumption, + ) -> Option; /// Get the receipt of a candidate pending availability. This returns `Some` for any paras /// assigned to occupied cores in `availability_cores` and `None` otherwise. - fn candidate_pending_availability(para_id: ppp::Id) -> Option>; + fn candidate_pending_availability(para_id: ppp::Id) -> Option>; /// Get a vector of events concerning candidates that occurred within a block. - fn candidate_events() -> Vec>; + fn candidate_events() -> Vec>; /// Get all the pending inbound messages in the downward message queue for a para. fn dmq_contents( recipient: ppp::Id, - ) -> Vec>; + ) -> Vec>; /// Get the contents of all channels addressed to the given recipient. Channels that have no /// messages in them are also included. - fn inbound_hrmp_channels_contents(recipient: ppp::Id) -> BTreeMap>>; + fn inbound_hrmp_channels_contents( + recipient: ppp::Id, + ) -> BTreeMap>>; /// Get the validation code from its hash. fn validation_code_by_hash(hash: ppp::ValidationCodeHash) -> Option; /// Scrape dispute relevant from on-chain, backing votes and resolved disputes. - fn on_chain_votes() -> Option>; + fn on_chain_votes() -> Option>; /***** Added in v2 *****/ @@ -253,7 +258,7 @@ sp_api::decl_runtime_apis! { /// Returns the state of parachain backing for a given para. #[api_version(7)] - fn para_backing_state(_: ppp::Id) -> Option>; + fn para_backing_state(_: ppp::Id) -> Option>; /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. #[api_version(7)] @@ -264,5 +269,17 @@ sp_api::decl_runtime_apis! { /// Returns a list of all disabled validators at the given block. #[api_version(8)] fn disabled_validators() -> Vec; + + /***** Added in v9 *****/ + + /// Get node features. + /// This is a staging method! Do not use on production runtimes! + #[api_version(9)] + fn node_features() -> vstaging::NodeFeatures; + + /***** Added in v10 *****/ + /// Approval voting configuration parameters + #[api_version(10)] + fn approval_voting_params() -> ApprovalVotingParams; } } diff --git a/polkadot/primitives/src/v6/executor_params.rs b/polkadot/primitives/src/v6/executor_params.rs index bb9980f68796297848ad216cf12de4cf0084b8e9..112a529f62b0570e9270d1f09e397e6b9dc358b0 100644 --- a/polkadot/primitives/src/v6/executor_params.rs +++ b/polkadot/primitives/src/v6/executor_params.rs @@ -21,7 +21,7 @@ //! by the first element of the vector). Decoding to a usable semantics structure is //! done in `polkadot-node-core-pvf`. -use crate::{BlakeTwo256, HashT as _, PvfExecTimeoutKind, PvfPrepTimeoutKind}; +use crate::{BlakeTwo256, HashT as _, PvfExecKind, PvfPrepKind}; use parity_scale_codec::{Decode, Encode}; use polkadot_core_primitives::Hash; use scale_info::TypeInfo; @@ -45,7 +45,7 @@ pub const PRECHECK_MEM_MAX_LO: u64 = 256 * 1024 * 1024; pub const PRECHECK_MEM_MAX_HI: u64 = 16 * 1024 * 1024 * 1024; // Default PVF timeouts. Must never be changed! Use executor environment parameters to adjust them. -// See also `PvfPrepTimeoutKind` and `PvfExecTimeoutKind` docs. +// See also `PvfPrepKind` and `PvfExecKind` docs. /// Default PVF preparation timeout for prechecking requests. pub const DEFAULT_PRECHECK_PREPARATION_TIMEOUT: Duration = Duration::from_secs(60); @@ -99,12 +99,12 @@ pub enum ExecutorParam { /// Always ensure that `precheck_timeout` < `lenient_timeout`. /// When absent, the default values will be used. #[codec(index = 5)] - PvfPrepTimeout(PvfPrepTimeoutKind, u64), + PvfPrepTimeout(PvfPrepKind, u64), /// PVF execution timeouts, in millisecond. /// Always ensure that `backing_timeout` < `approval_timeout`. /// When absent, the default values will be used. #[codec(index = 6)] - PvfExecTimeout(PvfExecTimeoutKind, u64), + PvfExecTimeout(PvfExecKind, u64), /// Enables WASM bulk memory proposal #[codec(index = 7)] WasmExtBulkMemory, @@ -174,7 +174,7 @@ impl ExecutorParams { } /// Returns a PVF preparation timeout, if any - pub fn pvf_prep_timeout(&self, kind: PvfPrepTimeoutKind) -> Option { + pub fn pvf_prep_timeout(&self, kind: PvfPrepKind) -> Option { for param in &self.0 { if let ExecutorParam::PvfPrepTimeout(k, timeout) = param { if kind == *k { @@ -186,7 +186,7 @@ impl ExecutorParams { } /// Returns a PVF execution timeout, if any - pub fn pvf_exec_timeout(&self, kind: PvfExecTimeoutKind) -> Option { + pub fn pvf_exec_timeout(&self, kind: PvfExecKind) -> Option { for param in &self.0 { if let ExecutorParam::PvfExecTimeout(k, timeout) = param { if kind == *k { @@ -242,12 +242,12 @@ impl ExecutorParams { StackNativeMax(_) => "StackNativeMax", PrecheckingMaxMemory(_) => "PrecheckingMaxMemory", PvfPrepTimeout(kind, _) => match kind { - PvfPrepTimeoutKind::Precheck => "PvfPrepTimeoutKind::Precheck", - PvfPrepTimeoutKind::Lenient => "PvfPrepTimeoutKind::Lenient", + PvfPrepKind::Precheck => "PvfPrepKind::Precheck", + PvfPrepKind::Prepare => "PvfPrepKind::Prepare", }, PvfExecTimeout(kind, _) => match kind { - PvfExecTimeoutKind::Backing => "PvfExecTimeoutKind::Backing", - PvfExecTimeoutKind::Approval => "PvfExecTimeoutKind::Approval", + PvfExecKind::Backing => "PvfExecKind::Backing", + PvfExecKind::Approval => "PvfExecKind::Approval", }, WasmExtBulkMemory => "WasmExtBulkMemory", }; @@ -297,30 +297,23 @@ impl ExecutorParams { } if let (Some(precheck), Some(lenient)) = ( - seen.get("PvfPrepTimeoutKind::Precheck") + seen.get("PvfPrepKind::Precheck") .or(Some(&DEFAULT_PRECHECK_PREPARATION_TIMEOUT_MS)), - seen.get("PvfPrepTimeoutKind::Lenient") + seen.get("PvfPrepKind::Prepare") .or(Some(&DEFAULT_LENIENT_PREPARATION_TIMEOUT_MS)), ) { if *precheck >= *lenient { - return Err(IncompatibleValues( - "PvfPrepTimeoutKind::Precheck", - "PvfPrepTimeoutKind::Lenient", - )) + return Err(IncompatibleValues("PvfPrepKind::Precheck", "PvfPrepKind::Prepare")) } } if let (Some(backing), Some(approval)) = ( - seen.get("PvfExecTimeoutKind::Backing") - .or(Some(&DEFAULT_BACKING_EXECUTION_TIMEOUT_MS)), - seen.get("PvfExecTimeoutKind::Approval") + seen.get("PvfExecKind::Backing").or(Some(&DEFAULT_BACKING_EXECUTION_TIMEOUT_MS)), + seen.get("PvfExecKind::Approval") .or(Some(&DEFAULT_APPROVAL_EXECUTION_TIMEOUT_MS)), ) { if *backing >= *approval { - return Err(IncompatibleValues( - "PvfExecTimeoutKind::Backing", - "PvfExecTimeoutKind::Approval", - )) + return Err(IncompatibleValues("PvfExecKind::Backing", "PvfExecKind::Approval")) } } diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index 9371b3db406b3e6247f4ac9c12dc5ba053a77f01..c3a947644fff60a2958f5e9b618e584a18688dcc 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -1070,6 +1070,26 @@ impl ApprovalVote { } } +/// A vote of approval for multiple candidates. +#[derive(Clone, RuntimeDebug)] +pub struct ApprovalVoteMultipleCandidates<'a>(pub &'a [CandidateHash]); + +impl<'a> ApprovalVoteMultipleCandidates<'a> { + /// Yields the signing payload for this approval vote. + pub fn signing_payload(&self, session_index: SessionIndex) -> Vec { + const MAGIC: [u8; 4] = *b"APPR"; + // Make this backwards compatible with `ApprovalVote` so if we have just on candidate the + // signature will look the same. + // This gives us the nice benefit that old nodes can still check signatures when len is 1 + // and the new node can check the signature coming from old nodes. + if self.0.len() == 1 { + (MAGIC, self.0.first().expect("QED: we just checked"), session_index).encode() + } else { + (MAGIC, &self.0, session_index).encode() + } + } +} + /// Custom validity errors used in Polkadot while validating transactions. #[repr(u8)] pub enum ValidityError { @@ -1246,25 +1266,42 @@ pub enum DisputeStatement { impl DisputeStatement { /// Get the payload data for this type of dispute statement. - pub fn payload_data(&self, candidate_hash: CandidateHash, session: SessionIndex) -> Vec { - match *self { + /// + /// Returns Error if the candidate_hash is not included in the list of signed + /// candidate from ApprovalCheckingMultipleCandidate. + pub fn payload_data( + &self, + candidate_hash: CandidateHash, + session: SessionIndex, + ) -> Result, ()> { + match self { DisputeStatement::Valid(ValidDisputeStatementKind::Explicit) => - ExplicitDisputeStatement { valid: true, candidate_hash, session }.signing_payload(), + Ok(ExplicitDisputeStatement { valid: true, candidate_hash, session } + .signing_payload()), DisputeStatement::Valid(ValidDisputeStatementKind::BackingSeconded( inclusion_parent, - )) => CompactStatement::Seconded(candidate_hash).signing_payload(&SigningContext { + )) => Ok(CompactStatement::Seconded(candidate_hash).signing_payload(&SigningContext { session_index: session, - parent_hash: inclusion_parent, - }), + parent_hash: *inclusion_parent, + })), DisputeStatement::Valid(ValidDisputeStatementKind::BackingValid(inclusion_parent)) => - CompactStatement::Valid(candidate_hash).signing_payload(&SigningContext { + Ok(CompactStatement::Valid(candidate_hash).signing_payload(&SigningContext { session_index: session, - parent_hash: inclusion_parent, - }), + parent_hash: *inclusion_parent, + })), DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) => - ApprovalVote(candidate_hash).signing_payload(session), + Ok(ApprovalVote(candidate_hash).signing_payload(session)), + DisputeStatement::Valid( + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(candidate_hashes), + ) => + if candidate_hashes.contains(&candidate_hash) { + Ok(ApprovalVoteMultipleCandidates(candidate_hashes).signing_payload(session)) + } else { + Err(()) + }, DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit) => - ExplicitDisputeStatement { valid: false, candidate_hash, session }.signing_payload(), + Ok(ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload()), } } @@ -1276,7 +1313,7 @@ impl DisputeStatement { session: SessionIndex, validator_signature: &ValidatorSignature, ) -> Result<(), ()> { - let payload = self.payload_data(candidate_hash, session); + let payload = self.payload_data(candidate_hash, session)?; if validator_signature.verify(&payload[..], &validator_public) { Ok(()) @@ -1308,13 +1345,14 @@ impl DisputeStatement { Self::Valid(ValidDisputeStatementKind::BackingValid(_)) => true, Self::Valid(ValidDisputeStatementKind::Explicit) | Self::Valid(ValidDisputeStatementKind::ApprovalChecking) | + Self::Valid(ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(_)) | Self::Invalid(_) => false, } } } /// Different kinds of statements of validity on a candidate. -#[derive(Encode, Decode, Copy, Clone, PartialEq, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug, TypeInfo)] pub enum ValidDisputeStatementKind { /// An explicit statement issued as part of a dispute. #[codec(index = 0)] @@ -1328,6 +1366,12 @@ pub enum ValidDisputeStatementKind { /// An approval vote from the approval checking phase. #[codec(index = 3)] ApprovalChecking, + /// An approval vote from the new version. + /// We can't create this version untill all nodes + /// have been updated to support it and max_approval_coalesce_count + /// is set to more than 1. + #[codec(index = 4)] + ApprovalCheckingMultipleCandidates(Vec), } /// Different kinds of statements of invalidity on a candidate. @@ -1781,30 +1825,22 @@ impl WellKnownKey { } } -/// Type discriminator for PVF preparation timeouts +/// Type discriminator for PVF preparation. #[derive(Encode, Decode, TypeInfo, Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub enum PvfPrepTimeoutKind { - /// For prechecking requests, the time period after which the preparation worker is considered - /// unresponsive and will be killed. +pub enum PvfPrepKind { + /// For prechecking requests. Precheck, - /// For execution and heads-up requests, the time period after which the preparation worker is - /// considered unresponsive and will be killed. More lenient than the timeout for prechecking - /// to prevent honest validators from timing out on valid PVFs. - Lenient, + /// For execution and heads-up requests. + Prepare, } -/// Type discriminator for PVF execution timeouts +/// Type discriminator for PVF execution. #[derive(Encode, Decode, TypeInfo, Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub enum PvfExecTimeoutKind { - /// The amount of time to spend on execution during backing. +pub enum PvfExecKind { + /// For backing requests. Backing, - - /// The amount of time to spend on execution during approval or disputes. - /// - /// This should be much longer than the backing execution timeout to ensure that in the - /// absence of extremely large disparities between hardware, blocks that pass backing are - /// considered executable by approval checkers or dispute participants. + /// For approval and dispute request. Approval, } diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 1429b0c326aceef4b9088bd4ddef6828f8dcfbd8..630bcf8679ad3046ce734042a1557058ea440110 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -17,3 +17,56 @@ //! Staging Primitives. // Put any primitives used by staging APIs functions here +pub use crate::v6::*; +use sp_std::prelude::*; + +use parity_scale_codec::{Decode, Encode}; +use primitives::RuntimeDebug; +use scale_info::TypeInfo; + +/// Approval voting configuration parameters +#[derive( + RuntimeDebug, + Copy, + Clone, + PartialEq, + Encode, + Decode, + TypeInfo, + serde::Serialize, + serde::Deserialize, +)] +pub struct ApprovalVotingParams { + /// The maximum number of candidates `approval-voting` can vote for with + /// a single signatures. + /// + /// Setting it to 1, means we send the approval as soon as we have it available. + pub max_approval_coalesce_count: u32, +} + +impl Default for ApprovalVotingParams { + fn default() -> Self { + Self { max_approval_coalesce_count: 1 } + } +} + +use bitvec::vec::BitVec; + +/// Bit indices in the `HostConfiguration.node_features` that correspond to different node features. +pub type NodeFeatures = BitVec; + +/// Module containing feature-specific bit indices into the `NodeFeatures` bitvec. +pub mod node_features { + /// A feature index used to indentify a bit into the node_features array stored + /// in the HostConfiguration. + #[repr(u8)] + pub enum FeatureIndex { + /// Tells if tranch0 assignments could be sent in a single certificate. + /// Reserved for: `` + EnableAssignmentsV2 = 0, + /// First unassigned feature bit. + /// Every time a new feature flag is assigned it should take this value. + /// and this should be incremented. + FirstUnassigned = 1, + } +} diff --git a/polkadot/primitives/test-helpers/Cargo.toml b/polkadot/primitives/test-helpers/Cargo.toml index 8215b842ba47aba53162427dd6807762cd8b2ca6..fab9480cfdeb9876c2556ae78a690775bf16d7a8 100644 --- a/polkadot/primitives/test-helpers/Cargo.toml +++ b/polkadot/primitives/test-helpers/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] sp-keyring = { path = "../../../substrate/primitives/keyring" } sp-application-crypto = { package = "sp-application-crypto", path = "../../../substrate/primitives/application-crypto", default-features = false } diff --git a/polkadot/roadmap/implementers-guide/README.md b/polkadot/roadmap/implementers-guide/README.md index e03c0c45ddba009821861cc5ba39a77c7c03dd59..abff017138c06120a9a1e7a85a393eb156ad2181 100644 --- a/polkadot/roadmap/implementers-guide/README.md +++ b/polkadot/roadmap/implementers-guide/README.md @@ -8,7 +8,7 @@ This is available [here](https://paritytech.github.io/polkadot-sdk/book/). ## Local build -To view it locally from the repo root: +To view it locally, run the following (from the `polkadot/` directory): Ensure graphviz is installed: diff --git a/polkadot/roadmap/implementers-guide/book.toml b/polkadot/roadmap/implementers-guide/book.toml index 1e6680f6f4b7e3b356af787fe0412fbab177e902..f91591ff1708c6b7e1eb54721a3c50345bc4d1f5 100644 --- a/polkadot/roadmap/implementers-guide/book.toml +++ b/polkadot/roadmap/implementers-guide/book.toml @@ -15,8 +15,8 @@ renderer = ["html"] [output.html] additional-css = ["last-changed.css"] -additional-js = ["mermaid.min.js", "mermaid-init.js"] +additional-js = ["mermaid-init.js", "mermaid.min.js"] # Repository URL used in the last-changed link. -git-repository-url = "https://github.com/paritytech/polkadot" +git-repository-url = "https://github.com/paritytech/polkadot-sdk" [output.linkcheck] diff --git a/polkadot/roadmap/implementers-guide/src/glossary.md b/polkadot/roadmap/implementers-guide/src/glossary.md index b2365ba51c5ce80fd0d60b53c592b1037f540a52..ac2392b14d2ae1a75a6690567f32407ebc3a1637 100644 --- a/polkadot/roadmap/implementers-guide/src/glossary.md +++ b/polkadot/roadmap/implementers-guide/src/glossary.md @@ -48,10 +48,13 @@ has exactly one downward message queue. - **Proof-of-Validity (PoV):** A stateless-client proof that a parachain candidate is valid, with respect to some validation function. - **PVF:** Parachain Validation Function. The validation code that is run by validators on parachains. -- **PVF Prechecking:** This is the process of initially checking the PVF when it is first added. We attempt preparation - of the PVF and make sure it succeeds within a given timeout, plus some additional checks. +- **PVF Prechecking:** This is the process of checking a PVF when it appears + on-chain, either when the parachain is onboarded or when it signalled an + upgrade of its validation code. We attempt preparation of the PVF and make + sure it that succeeds within a given timeout, plus some additional checks. - **PVF Preparation:** This is the process of preparing the WASM blob and includes both prevalidation and compilation. - As there is no prevalidation right now, preparation just consists of compilation. +- **PVF Prevalidation:** Some basic checks for correctness of the PVF blob. The + first step of PVF preparation, before compilation. - **Relay Parent:** A block in the relay chain, referred to in a context where work is being done in the context of the state at this block. - **Runtime:** The relay-chain state machine. diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md index 1a17f90d9ba37e1eb8722a28cee22669d08d948e..345b3d2e6970403f3096272cc51f903e0566a22e 100644 --- a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -4,10 +4,13 @@ Reading the [section on the approval protocol](../../protocol-approval.md) will aims of this subsystem. Approval votes are split into two parts: Assignments and Approvals. Validators first broadcast their assignment to -indicate intent to check a candidate. Upon successfully checking, they broadcast an approval vote. If a validator -doesn't broadcast their approval vote shortly after issuing an assignment, this is an indication that they are being -prevented from recovering or validating the block data and that more validators should self-select to check the -candidate. This is known as a "no-show". +indicate intent to check a candidate. Upon successfully checking, they don't immediately send the vote instead +they queue the check for a short period of time `MAX_APPROVAL_COALESCE_WAIT_TICKS` to give the opportunity of the +validator to vote for more than one candidate. Once MAX_APPROVAL_COALESCE_WAIT_TICKS have passed or at least +`MAX_APPROVAL_COALESCE_COUNT` are ready they broadcast an approval vote for all candidates. If a validator +doesn't broadcast their approval vote shortly after issuing an assignment, this is an indication that they are +being prevented from recovering or validating the block data and that more validators should self-select to +check the candidate. This is known as a "no-show". The core of this subsystem is a Tick-based timer loop, where Ticks are 500ms. We also reason about time in terms of `DelayTranche`s, which measure the number of ticks elapsed since a block was produced. We track metadata for all @@ -120,6 +123,13 @@ struct BlockEntry { // this block. The block can be considered approved has all bits set to 1 approved_bitfield: Bitfield, children: Vec, + // A list of candidates we have checked, but didn't not sign and + // advertise the vote yet. + candidates_pending_signature: BTreeMap, + // Assignments we already distributed. A 1 bit means the candidate index for which + // we already have sent out an assignment. We need this to avoid distributing + // multiple core assignments more than once. + distributed_assignments: Bitfield, } // slot_duration * 2 + DelayTranche gives the number of delay tranches since the @@ -303,12 +313,12 @@ entry. The cert itself contains information necessary to determine the candidate On receiving a `CheckAndImportApproval(indirect_approval_vote, response_channel)` message: * Fetch the `BlockEntry` from the indirect approval vote's `block_hash`. If none, return `ApprovalCheckResult::Bad`. - * Fetch the `CandidateEntry` from the indirect approval vote's `candidate_index`. If the block did not trigger + * Fetch all `CandidateEntry` from the indirect approval vote's `candidate_indices`. If the block did not trigger inclusion of enough candidates, return `ApprovalCheckResult::Bad`. - * Construct a `SignedApprovalVote` using the candidate hash and check against the validator's approval key, based on - the session info of the block. If invalid or no such validator, return `ApprovalCheckResult::Bad`. + * Construct a `SignedApprovalVote` using the candidates hashes and check against the validator's approval key, + based on the session info of the block. If invalid or no such validator, return `ApprovalCheckResult::Bad`. * Send `ApprovalCheckResult::Accepted` - * [Import the checked approval vote](#import-checked-approval) + * [Import the checked approval vote](#import-checked-approval) for all candidates #### `ApprovalVotingMessage::ApprovedAncestor` @@ -402,10 +412,25 @@ On receiving an `ApprovedAncestor(Hash, BlockNumber, response_channel)`: #### Issue Approval Vote * Fetch the block entry and candidate entry. Ignore if `None` - we've probably just lost a race with finality. - * Construct a `SignedApprovalVote` with the validator index for the session. * [Import the checked approval vote](#import-checked-approval). It is "checked" as we've just issued the signature. - * Construct a `IndirectSignedApprovalVote` using the information about the vote. - * Dispatch `ApprovalDistributionMessage::DistributeApproval`. + * IF `MAX_APPROVAL_COALESCE_COUNT` candidates are in the waiting queue + * Construct a `SignedApprovalVote` with the validator index for the session and all candidate hashes in the waiting queue. + * Construct a `IndirectSignedApprovalVote` using the information about the vote. + * Dispatch `ApprovalDistributionMessage::DistributeApproval`. + * ELSE + * Queue the candidate in the `BlockEntry::candidates_pending_signature` + * Arm a per BlockEntry timer with latest tick we can send the vote. + +### Delayed vote distribution + * [Issue Approval Vote](#issue-approval-vote) arms once a per block timer if there are no requirements to send the + vote immediately. + * When the timer wakes up it will either: + * IF there is a candidate in the queue past its sending tick: + * Construct a `SignedApprovalVote` with the validator index for the session and all candidate hashes in the waiting queue. + * Construct a `IndirectSignedApprovalVote` using the information about the vote. + * Dispatch `ApprovalDistributionMessage::DistributeApproval`. + * ELSE + * Re-arm the timer with latest tick we have the send a the vote. ### Determining Approval of Candidate diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md b/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md index e252ec237b7971116f505a8dbd4ae1f708bee577..1a3ff1c6aff061ad845dcaaba4258129eb1673bb 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md @@ -5,6 +5,31 @@ This subsystem is responsible for handling candidate validation requests. It is A variety of subsystems want to know if a parachain block candidate is valid. None of them care about the detailed mechanics of how a candidate gets validated, just the results. This subsystem handles those details. +## High-Level Flow + +```dot process +digraph { + rankdir="LR"; + + pre [label = "Pvf-Checker"; shape = square] + bac [label = "Backing"; shape = square] + app [label = "Approval\nVoting"; shape = square] + dis [label = "Dispute\nCoordinator"; shape = square] + + can [label = "Candidate\nValidation"; shape = square] + + pvf [label = "PVF Host"; shape = square] + + pre -> can [style = dashed] + bac -> can + app -> can + dis -> can + + can -> pvf [label = "Precheck"; style = dashed] + can -> pvf [label = "Validate"] +} +``` + ## Protocol Input: [`CandidateValidationMessage`](../../types/overseer-protocol.md#validation-request-type) diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md index 52129f9eb80aff9974717d2f6a8d49060b4dacc4..e0984bd58d1dd8ea22b145a15b4a3bab8779de80 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md @@ -1,9 +1,83 @@ # PVF Host and Workers The PVF host is responsible for handling requests to prepare and execute PVF -code blobs, which it sends to PVF workers running in their own child processes. +code blobs, which it sends to PVF **workers** running in their own child +processes. These workers are spawned from the `polkadot-prepare-worker` and +`polkadot-execute-worker` binaries. -This system has two high-levels goals that we will touch on here: *determinism* +While the workers are generally long-living, they also spawn one-off secure +**job processes** that perform the jobs. See "Job Processes" section below. + +## High-Level Flow + +```dot process +digraph { + rankdir="LR"; + + can [label = "Candidate\nValidation\nSubsystem"; shape = square] + + pvf [label = "PVF Host"; shape = square] + + pq [label = "Prepare\nQueue"; shape = square] + eq [label = "Execute\nQueue"; shape = square] + pp [label = "Prepare\nPool"; shape = square] + + subgraph "cluster partial_sandbox_prep" { + label = "polkadot-prepare-worker\n(Partial Sandbox)\n\n\n"; + labelloc = "t"; + + pw [label = "Prepare\nWorker"; shape = square] + + subgraph "cluster full_sandbox_prep" { + label = "Fully Isolated Sandbox\n\n\n"; + labelloc = "t"; + + pj [label = "Prepare\nJob"; shape = square] + } + } + + subgraph "cluster partial_sandbox_exec" { + label = "polkadot-execute-worker\n(Partial Sandbox)\n\n\n"; + labelloc = "t"; + + ew [label = "Execute\nWorker"; shape = square] + + subgraph "cluster full_sandbox_exec" { + label = "Fully Isolated Sandbox\n\n\n"; + labelloc = "t"; + + ej [label = "Execute\nJob"; shape = square] + } + } + + can -> pvf [label = "Precheck"; style = dashed] + can -> pvf [label = "Validate"] + + pvf -> pq [label = "Prepare"; style = dashed] + pvf -> eq [label = "Execute";] + pvf -> pvf [label = "see (2) and (3)"; style = dashed] + pq -> pp [style = dashed] + + pp -> pw [style = dashed] + eq -> ew + + pw -> pj [style = dashed] + ew -> ej +} +``` + +Some notes about the graph: + +1. Once a job has finished, the response will flow back up the way it came. +2. In the case of execution, the host will send a request for preparation to the + Prepare Queue if needed. In that case, only after the preparation succeeds + does the Execute Queue continue with validation. +3. Multiple requests for preparing the same artifact are coalesced, so that the + work is only done once. + +## Goals + +This system has two high-level goals that we will touch on here: *determinism* and *security*. ## Determinism @@ -27,17 +101,30 @@ hopefully resolve. We use a more brief delay here (1 second as opposed to 15 minutes for preparation (see above)), because a successful execution must happen in a short amount of time. +If the execution fails during the backing phase, we won't retry to reduce the chance of +supporting nondeterministic candidates. This reduces the chance of nondeterministic blocks +getting backed and honest backers getting slashed. + We currently know of the following specific cases that will lead to a retried execution request: -1. **OOM:** The host might have been temporarily low on memory due to other - processes running on the same machine. **NOTE:** This case will lead to - voting against the candidate (and possibly a dispute) if the retry is still - not successful. -2. **Artifact missing:** The prepared artifact might have been deleted due to +1. **OOM:** We have memory limits to try to prevent attackers from exhausting + host memory. If the memory limit is hit, we kill the job process and retry + the job. Alternatively, the host might have been temporarily low on memory + due to other processes running on the same machine. **NOTE:** This case will + lead to voting against the candidate (and possibly a dispute) if the retry is + still not successful. +2. **Syscall violations:** If the job attempts a system call that is blocked by + the sandbox's security policy, the job process is immediately killed and we + retry. **NOTE:** In the future, if we have a proper way to detect that the + job died due to a security violation, it might make sense not to retry in + this case. +3. **Artifact missing:** The prepared artifact might have been deleted due to operator error or some bug in the system. -3. **Panic:** The worker thread panicked for some indeterminate reason, which - may or may not be independent of the candidate or PVF. +4. **Job errors:** For example, the job process panicked for some indeterminate + reason, which may or may not be independent of the candidate or PVF. +5. **Internal errors:** See "Internal Errors" section. In this case, after the + retry we abstain from voting. ### Preparation timeouts @@ -62,10 +149,16 @@ more than the CPU time. ### Internal errors +An internal, or local, error is one that we treat as independent of the PVF +and/or candidate, i.e. local to the running machine. If this happens, then we +will first retry the job and if the errors persists, then we simply do not vote. +This prevents slashes, since otherwise our vote may not agree with that of the +other validators. + In general, for errors not raising a dispute we have to be very careful. This is -only sound, if we either: +only sound, if either: -1. Ruled out that error in pre-checking. If something is not checked in +1. We ruled out that error in pre-checking. If something is not checked in pre-checking, even if independent of the candidate and PVF, we must raise a dispute. 2. We are 100% confident that it is a hardware/local issue: Like corrupted file, @@ -75,11 +168,11 @@ Reasoning: Otherwise it would be possible to register a PVF where candidates can not be checked, but we don't get a dispute - so nobody gets punished. Second, we end up with a finality stall that is not going to resolve! -There are some error conditions where we can't be sure whether the candidate is -really invalid or some internal glitch occurred, e.g. panics. Whenever we are -unsure, we can never treat an error as internal as we would abstain from voting. -So we will first retry the candidate, and if the issue persists we are forced to -vote invalid. +Note that any error from the job process we cannot treat as internal. The job +runs untrusted code and an attacker can therefore return arbitrary errors. If +they were to return errors that we treat as internal, they could make us abstain +from voting. Since we are unsure if such errors are legitimate, we will first +retry the candidate, and if the issue persists we are forced to vote invalid. ## Security @@ -119,6 +212,25 @@ So what are we actually worried about? Things that come to mind: 6. **Intercepting and manipulating packages** - Effect very similar to the above, hard to do without also being able to do 4 or 5. +We do not protect against (1), (2), and (3), because there are too many sources +of randomness for an attacker to exploit. + +We provide very good protection against (4), (5), and (6). + +### Job Processes + +As mentioned above, our architecture includes long-living **worker processes** +and one-off **job processes**. This separation is important so that the handling +of untrusted code can be limited to the job processes. A hijacked job process +can therefore not interfere with other jobs running in separate processes. + +Furthermore, if an unexpected execution error occurred in the execution worker +and not the job itself, we generally can be confident that it has nothing to do +with the candidate, so we can abstain from voting. On the other hand, a hijacked +job is able to send back erroneous responses for candidates, so we know that we +should not abstain from voting on such errors from jobs. Otherwise, an attacker +could trigger a finality stall. (See "Internal Errors" section above.) + ### Restricting file-system access A basic security mechanism is to make sure that any process directly interfacing @@ -128,16 +240,14 @@ data on the host machine. *Currently this is only supported on Linux.* - - - +### Restricting networking - - - - +We also disable networking on PVF threads by disabling certain syscalls, such as +the creation of sockets. This prevents attackers from either downloading +payloads or communicating sensitive data from the validator's machine to the +outside world. - +*Currently this is only supported on Linux.* ### Clearing env vars diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md index f0de50f2267b943f65ee2d9783869cd180eb07a9..7f6fef7ddf631f10eadd7c1cc5f4d7d4b7f9cd04 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md @@ -8,9 +8,9 @@ pre-checking. Head over to [overview] for the PVF pre-checking process overview. There is no dedicated input mechanism for PVF pre-checker. Instead, PVF pre-checker looks on the `ActiveLeavesUpdate` event stream for work. -This subsytem does not produce any output messages either. The subsystem will, however, send messages to the [Runtime -API] subsystem to query for the pending PVFs and to submit votes. In addition to that, it will also communicate with -[Candidate Validation] Subsystem to request PVF pre-check. +This subsytem does not produce any output messages either. The subsystem will, however, send messages to the +[Runtime API] subsystem to query for the pending PVFs and to submit votes. In addition to that, it will also +communicate with [Candidate Validation] Subsystem to request PVF pre-check. ## Functionality diff --git a/polkadot/roadmap/implementers-guide/src/protocol-approval.md b/polkadot/roadmap/implementers-guide/src/protocol-approval.md index 70bc0233d65a09edbf3bddff960d52982aa54581..b6aa16646ad25f339af3a6b3db9d7b2d65997c4d 100644 --- a/polkadot/roadmap/implementers-guide/src/protocol-approval.md +++ b/polkadot/roadmap/implementers-guide/src/protocol-approval.md @@ -296,6 +296,18 @@ provide somewhat more security. TODO: When? Is this optimal for the network? etc. +## Approval coalescing +To reduce the necessary network bandwidth and cpu time when a validator has more than one candidate to approve we are +doing our best effort to send a single message that approves all available candidates with a single signature. +The implemented heuristic, is that each time we are ready to create a signature and send a vote for a candidate we +delay sending it until one of three things happen: +- We gathered a maximum of `MAX_APPROVAL_COALESCE_COUNT` candidates that we have already checked and we are + ready to sign approval for. +- `MAX_APPROVAL_COALESCE_WAIT_TICKS` have passed since checking oldest candidate and we were ready to sign + and send the approval message. +- We are already in the last third of the no-show period in order to avoid creating accidental no-shows, which in + turn might trigger other assignments. + ## On-chain verification We should verify approval on-chain to reward approval checkers. We therefore require the "no show" timeout to be longer diff --git a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md index 26058c446cb927fa78abd892d416597364a04ab0..32a7fe652dbcb0ee60aa7d828d3a8226dcb80730 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md @@ -182,6 +182,7 @@ struct CoreAssignment { core: CoreIndex, para_id: ParaId, kind: AssignmentKind, + group_idx: GroupIndex, } // reasons a core might be freed. enum FreedReason { diff --git a/polkadot/roadmap/phase-1.toml b/polkadot/roadmap/phase-1.toml index 50ef1f741fe9d448a29e9a4018746305b3fb2586..3a5f0d752debee41c51a2124f8e91f3e03dc9d68 100644 --- a/polkadot/roadmap/phase-1.toml +++ b/polkadot/roadmap/phase-1.toml @@ -14,7 +14,7 @@ requires = ["phase-0"] items = [ { label = "Buffer submitted parachain candidate until considered available." }, { label = "Validators submit signed bitfields re: availability of parachains" }, - { label = "relay chain fully includes candidate once considered available" } + { label = "relay chain fully includes candidate once considered available" }, ] [[group]] @@ -23,8 +23,8 @@ label = "Secondary checks and self-selection by validators" requires = ["two-phase-inclusion"] items = [ { label = "Extract #VCheck for all checkable candidates" }, - { label = "Maintain a frontier of candidates that are likely to be checked soon" }, { label = "Listen for new reports on candidates and new checks to update frontier" }, + { label = "Maintain a frontier of candidates that are likely to be checked soon" }, ] [[group]] @@ -32,8 +32,8 @@ name = "runtime-availability-validity-slashing" label = "Availability and Validity slashing in the runtime" requires = ["two-phase-inclusion"] items = [ + { label = "Submit secondary checks to runtime", port = "submitsecondary", requires = ["secondary-checking"] }, { label = "Track all candidates within the slash period as well as their session" }, - { label = "Submit secondary checks to runtime", port = "submitsecondary", requires = ["secondary-checking"]}, { label = "Track reports and attestatations for candidates" }, ] @@ -41,10 +41,10 @@ items = [ name = "non-direct-ancestor" label = "Allow candidates with non-direct ancestor" items = [ - { label = "Extend GlobalValidationData with random seed and session index"}, { label = "Block author can provide minimally-attested candidate with older relay parent" }, - { label = "Runtime can accept and process candidates with older relay-parent" }, + { label = "Extend GlobalValidationData with random seed and session index" }, { label = "Revise availability-store pruning to ensure only needed data is kept" }, + { label = "Runtime can accept and process candidates with older relay-parent" }, ] [[group]] @@ -52,13 +52,13 @@ name = "grandpa-voting-rule" label = "GRANDPA voting rule to follow valid/available chains" requires = ["runtime-availability-validity-slashing"] items = [ - { label = "Add a utility to flag a block and all of its ancestors as abandoned" }, { label = "Accept new blocks on abandoned but mark them abandoned as well." }, + { label = "Add a utility to flag a block and all of its ancestors as abandoned" }, { label = "Do not vote or build on abandoned chains" }, ] [[group]] name = "phase-1" label = "Phase 1: Availability and Validity" -requires = ["non-direct-ancestor", "grandpa-voting-rule", "runtime-availability-validity-slashing"] +requires = ["grandpa-voting-rule", "non-direct-ancestor", "runtime-availability-validity-slashing"] items = [] diff --git a/polkadot/rpc/Cargo.toml b/polkadot/rpc/Cargo.toml index ce11b26e5549719e7005e026239b034478e5c2dc..8c582c623baf143a16068627de8af9c23178a28a 100644 --- a/polkadot/rpc/Cargo.toml +++ b/polkadot/rpc/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true description = "Polkadot specific RPC functionality." +[lints] +workspace = true + [dependencies] jsonrpsee = { version = "0.16.2", features = ["server"] } polkadot-primitives = { path = "../primitives" } diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 0882e555aafeb89c0dde20990063a714318491c8..c841c0847c0f64f7a97d27df7b36b568f6c709d3 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] impl-trait-for-tuples = "0.2.2" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } @@ -13,7 +16,7 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["alloc"] } +serde = { version = "1.0.193", default-features = false, features = ["alloc"] } serde_derive = { version = "1.0.117" } static_assertions = "1.1.0" @@ -21,15 +24,17 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false inherents = { package = "sp-inherents", path = "../../../substrate/primitives/inherents", default-features = false } sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false , features=["serde"]} +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features=["serde"] } -sp-core = { path = "../../../substrate/primitives/core", default-features = false , features=["serde"]} -sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", default-features = false, features=["serde"] } +sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features = ["serde"] } +sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = ["serde"] } +sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", default-features = false, features = ["serde"] } pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } +pallet-broker = { path = "../../../substrate/frame/broker", default-features = false } pallet-fast-unstake = { path = "../../../substrate/frame/fast-unstake", default-features = false } +pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } pallet-session = { path = "../../../substrate/frame/session", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false } pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } @@ -69,7 +74,7 @@ libsecp256k1 = "0.7.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } [features] -default = [ "std" ] +default = ["std"] no_std = [] std = [ "bitvec/std", @@ -83,8 +88,10 @@ std = [ "pallet-asset-rate?/std", "pallet-authorship/std", "pallet-balances/std", + "pallet-broker/std", "pallet-election-provider-multi-phase/std", "pallet-fast-unstake/std", + "pallet-identity/std", "pallet-session/std", "pallet-staking-reward-fn/std", "pallet-staking/std", @@ -122,8 +129,10 @@ runtime-benchmarks = [ "pallet-asset-rate/runtime-benchmarks", "pallet-babe/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-broker/runtime-benchmarks", "pallet-election-provider-multi-phase/runtime-benchmarks", "pallet-fast-unstake/runtime-benchmarks", + "pallet-identity/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", @@ -145,8 +154,10 @@ try-runtime = [ "pallet-authorship/try-runtime", "pallet-babe?/try-runtime", "pallet-balances/try-runtime", + "pallet-broker/try-runtime", "pallet-election-provider-multi-phase/try-runtime", "pallet-fast-unstake/try-runtime", + "pallet-identity/try-runtime", "pallet-session/try-runtime", "pallet-staking/try-runtime", "pallet-timestamp/try-runtime", diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index 59c76a6cabb3c92cd45dc739330921aca11ca9bf..3a402d011961f041214f82ca0d40d20318a9d88f 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true description = "Helper crate for generating slot ranges for the Polkadot runtime." +[lints] +workspace = true + [dependencies] paste = "1.0" enumn = "0.1.12" @@ -14,5 +17,5 @@ sp-std = { package = "sp-std", path = "../../../../substrate/primitives/std", de sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } [features] -default = [ "std" ] -std = [ "parity-scale-codec/std", "sp-runtime/std", "sp-std/std" ] +default = ["std"] +std = ["parity-scale-codec/std", "sp-runtime/std", "sp-std/std"] diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index cb2e5083b0ac64e30e3579c7a1b4bdc387bb9eba..cb56cb8a118c4dff7d8e0c9830ee6f5d1605d838 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -636,7 +636,7 @@ mod tests { use crate::{assigned_slots, mock::TestRegistrar, slots}; use ::test_helpers::{dummy_head_data, dummy_validation_code}; - use frame_support::{assert_noop, assert_ok, parameter_types}; + use frame_support::{assert_noop, assert_ok, derive_impl, parameter_types}; use frame_system::EnsureRoot; use pallet_balances; use primitives::BlockNumber; @@ -679,6 +679,8 @@ mod tests { parameter_types! { pub const BlockHashCount: u32 = 250; } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -741,6 +743,7 @@ mod tests { type QueueFootprinter = (); type NextSessionRotation = crate::mock::TestNextSessionRotation; type OnNewHead = (); + type AssignCoretime = (); } impl parachains_shared::Config for Test {} diff --git a/polkadot/runtime/common/src/auctions.rs b/polkadot/runtime/common/src/auctions.rs index 267413eb1badda7c4e802b32c9b144451dd97f55..baa66d83a3ff804337d141c83e205ec3634e46a9 100644 --- a/polkadot/runtime/common/src/auctions.rs +++ b/polkadot/runtime/common/src/auctions.rs @@ -677,7 +677,8 @@ mod tests { use crate::{auctions, mock::TestRegistrar}; use ::test_helpers::{dummy_hash, dummy_head_data, dummy_validation_code}; use frame_support::{ - assert_noop, assert_ok, assert_storage_noop, ord_parameter_types, parameter_types, + assert_noop, assert_ok, assert_storage_noop, derive_impl, ord_parameter_types, + parameter_types, traits::{ConstU32, EitherOfDiverse, OnFinalize, OnInitialize}, }; use frame_system::{EnsureRoot, EnsureSignedBy}; @@ -705,6 +706,8 @@ mod tests { parameter_types! { pub const BlockHashCount: u32 = 250; } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs index 548adc6fbd5a77db2ab8e0ad6b41dfc20a8753ec..d15e04a660f736608b836917e3de3ac7f77eed0a 100644 --- a/polkadot/runtime/common/src/claims.rs +++ b/polkadot/runtime/common/src/claims.rs @@ -561,7 +561,7 @@ impl Pallet { } // We first need to deposit the balance to ensure that the account exists. - CurrencyOf::::deposit_creating(&dest, balance_due); + let _ = CurrencyOf::::deposit_creating(&dest, balance_due); // Check if this claim should have a vesting schedule. if let Some(vs) = vesting { @@ -710,7 +710,7 @@ mod tests { use crate::claims; use claims::Call as ClaimsCall; use frame_support::{ - assert_err, assert_noop, assert_ok, + assert_err, assert_noop, assert_ok, derive_impl, dispatch::{GetDispatchInfo, Pays}, ord_parameter_types, parameter_types, traits::{ConstU32, ExistenceRequirement, WithdrawReasons}, @@ -739,6 +739,8 @@ mod tests { parameter_types! { pub const BlockHashCount: u32 = 250; } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -799,6 +801,7 @@ mod tests { type MinVestedTransfer = MinVestedTransfer; type WeightInfo = (); type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; + type BlockNumberProvider = System; const MAX_VESTING_SCHEDULES: u32 = 28; } diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index f67fc12a67f219573f70e9f0102f39de8cd83c4b..77ef406e57983d7108b7eac0ed9caf12c52b55ee 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -863,7 +863,7 @@ mod tests { use super::*; use frame_support::{ - assert_noop, assert_ok, parameter_types, + assert_noop, assert_ok, derive_impl, parameter_types, traits::{ConstU32, OnFinalize, OnInitialize}, }; use primitives::Id as ParaId; @@ -900,6 +900,7 @@ mod tests { type BlockNumber = u64; + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/polkadot/runtime/common/src/identity_migrator.rs b/polkadot/runtime/common/src/identity_migrator.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc2c3ce7773c250efb72f9d26ed7604a6bd756e9 --- /dev/null +++ b/polkadot/runtime/common/src/identity_migrator.rs @@ -0,0 +1,305 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This pallet is designed to go into a source chain and destination chain to migrate data. The +//! design motivations are: +//! +//! - Call some function on the source chain that executes some migration (clearing state, +//! forwarding an XCM program). +//! - Call some function (probably from an XCM program) on the destination chain. +//! - Avoid cluttering the source pallet with new dispatchables that are unrelated to its +//! functionality and only used for migration. +//! +//! After the migration is complete, the pallet may be removed from both chains' runtimes as well as +//! the `polkadot-runtime-common` crate. + +use frame_support::{dispatch::DispatchResult, traits::Currency, weights::Weight}; +pub use pallet::*; +use pallet_identity; +use sp_core::Get; + +#[cfg(feature = "runtime-benchmarks")] +use frame_benchmarking::{account, impl_benchmark_test_suite, v2::*, BenchmarkError}; + +pub trait WeightInfo { + fn reap_identity(r: u32, s: u32) -> Weight; + fn poke_deposit() -> Weight; +} + +impl WeightInfo for () { + fn reap_identity(_r: u32, _s: u32) -> Weight { + Weight::MAX + } + fn poke_deposit() -> Weight { + Weight::MAX + } +} + +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn reap_identity(_r: u32, _s: u32) -> Weight { + Weight::zero() + } + fn poke_deposit() -> Weight { + Weight::zero() + } +} + +// Must use the same `Balance` as `T`'s Identity pallet to handle deposits. +type BalanceOf = <::Currency as Currency< + ::AccountId, +>>::Balance; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{ + dispatch::{DispatchResultWithPostInfo, PostDispatchInfo}, + pallet_prelude::*, + traits::EnsureOrigin, + }; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + pallet_identity::Config { + /// Overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The origin that can reap identities. Expected to be `EnsureSigned` on the + /// source chain such that anyone can all this function. + type Reaper: EnsureOrigin; + + /// A handler for what to do when an identity is reaped. + type ReapIdentityHandler: OnReapIdentity; + + /// Weight information for the extrinsics in the pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// The identity and all sub accounts were reaped for `who`. + IdentityReaped { who: T::AccountId }, + /// The deposits held for `who` were updated. `identity` is the new deposit held for + /// identity info, and `subs` is the new deposit held for the sub-accounts. + DepositUpdated { who: T::AccountId, identity: BalanceOf, subs: BalanceOf }, + } + + #[pallet::call] + impl Pallet { + /// Reap the `IdentityInfo` of `who` from the Identity pallet of `T`, unreserving any + /// deposits held and removing storage items associated with `who`. + #[pallet::call_index(0)] + #[pallet::weight(::WeightInfo::reap_identity( + T::MaxRegistrars::get(), + T::MaxSubAccounts::get() + ))] + pub fn reap_identity( + origin: OriginFor, + who: T::AccountId, + ) -> DispatchResultWithPostInfo { + T::Reaper::ensure_origin(origin)?; + // - number of registrars (required to calculate weight) + // - byte size of `IdentityInfo` (required to calculate remote deposit) + // - number of sub accounts (required to calculate both weight and remote deposit) + let (registrars, bytes, subs) = pallet_identity::Pallet::::reap_identity(&who)?; + T::ReapIdentityHandler::on_reap_identity(&who, bytes, subs)?; + Self::deposit_event(Event::IdentityReaped { who }); + let post = PostDispatchInfo { + actual_weight: Some(::WeightInfo::reap_identity( + registrars, subs, + )), + pays_fee: Pays::No, + }; + Ok(post) + } + + /// Update the deposit of `who`. Meant to be called by the system with an XCM `Transact` + /// Instruction. + #[pallet::call_index(1)] + #[pallet::weight(::WeightInfo::poke_deposit())] + pub fn poke_deposit(origin: OriginFor, who: T::AccountId) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + let (id_deposit, subs_deposit) = pallet_identity::Pallet::::poke_deposit(&who)?; + Self::deposit_event(Event::DepositUpdated { + who, + identity: id_deposit, + subs: subs_deposit, + }); + Ok(Pays::No.into()) + } + } +} + +/// Trait to handle reaping identity from state. +pub trait OnReapIdentity { + /// What to do when an identity is reaped. For example, the implementation could send an XCM + /// program to another chain. Concretely, a type implementing this trait in the Polkadot + /// runtime would teleport enough DOT to the People Chain to cover the Identity deposit there. + /// + /// This could also directly include `Transact { poke_deposit(..), ..}`. + /// + /// Inputs + /// - `who`: Whose identity was reaped. + /// - `bytes`: The byte size of `IdentityInfo`. + /// - `subs`: The number of sub-accounts they had. + fn on_reap_identity(who: &AccountId, bytes: u32, subs: u32) -> DispatchResult; +} + +impl OnReapIdentity for () { + fn on_reap_identity(_who: &AccountId, _bytes: u32, _subs: u32) -> DispatchResult { + Ok(()) + } +} + +#[cfg(feature = "runtime-benchmarks")] +#[benchmarks] +mod benchmarks { + use super::*; + use frame_support::traits::EnsureOrigin; + use frame_system::RawOrigin; + use pallet_identity::{Data, IdentityInformationProvider, Judgement, Pallet as Identity}; + use parity_scale_codec::Encode; + use sp_runtime::{ + traits::{Bounded, Hash, StaticLookup}, + Saturating, + }; + use sp_std::{boxed::Box, vec::Vec, *}; + + const SEED: u32 = 0; + + fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); + } + + #[benchmark] + fn reap_identity( + r: Linear<0, { T::MaxRegistrars::get() }>, + s: Linear<0, { T::MaxSubAccounts::get() }>, + ) -> Result<(), BenchmarkError> { + // set up target + let target: T::AccountId = account("target", 0, SEED); + let target_origin = + ::RuntimeOrigin::from(RawOrigin::Signed(target.clone())); + let target_lookup = T::Lookup::unlookup(target.clone()); + let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); + + // set identity + let info = ::IdentityInformation::create_identity_info(); + Identity::::set_identity( + RawOrigin::Signed(target.clone()).into(), + Box::new(info.clone()), + )?; + + // create and set subs + let mut subs = Vec::new(); + let data = Data::Raw(vec![0; 32].try_into().unwrap()); + for ii in 0..s { + let sub_account = account("sub", ii, SEED); + subs.push((sub_account, data.clone())); + } + Identity::::set_subs(target_origin.clone(), subs.clone())?; + + // add registrars and provide judgements + let registrar_origin = T::RegistrarOrigin::try_successful_origin() + .expect("RegistrarOrigin has no successful origin required for the benchmark"); + for ii in 0..r { + // registrar account + let registrar: T::AccountId = account("registrar", ii, SEED); + let registrar_lookup = T::Lookup::unlookup(registrar.clone()); + let _ = ::Currency::make_free_balance_be( + ®istrar, + ::Currency::minimum_balance(), + ); + + // add registrar + Identity::::add_registrar(registrar_origin.clone(), registrar_lookup)?; + Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), ii, 10u32.into())?; + let fields = ::IdentityInformation::all_fields(); + Identity::::set_fields(RawOrigin::Signed(registrar.clone()).into(), ii, fields)?; + + // request and provide judgement + Identity::::request_judgement(target_origin.clone(), ii, 10u32.into())?; + Identity::::provide_judgement( + RawOrigin::Signed(registrar).into(), + ii, + target_lookup.clone(), + Judgement::Reasonable, + ::Hashing::hash_of(&info), + )?; + } + + let origin = T::Reaper::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, target.clone()); + + assert_last_event::(Event::::IdentityReaped { who: target.clone() }.into()); + + let fields = ::IdentityInformation::all_fields(); + assert!(!Identity::::has_identity(&target, fields)); + assert_eq!(Identity::::subs(&target).len(), 0); + + Ok(()) + } + + #[benchmark] + fn poke_deposit() -> Result<(), BenchmarkError> { + let target: T::AccountId = account("target", 0, SEED); + let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); + let info = ::IdentityInformation::create_identity_info(); + + let _ = Identity::::set_identity_no_deposit(&target, info.clone()); + + let sub_account: T::AccountId = account("sub", 0, SEED); + let _ = Identity::::set_sub_no_deposit(&target, sub_account.clone()); + + // expected deposits + let expected_id_deposit = ::BasicDeposit::get() + .saturating_add( + ::ByteDeposit::get() + .saturating_mul(>::from(info.encoded_size() as u32)), + ); + // only 1 sub + let expected_sub_deposit = ::SubAccountDeposit::get(); + + #[extrinsic_call] + _(RawOrigin::Root, target.clone()); + + assert_last_event::( + Event::::DepositUpdated { + who: target, + identity: expected_id_deposit, + subs: expected_sub_deposit, + } + .into(), + ); + + Ok(()) + } + + impl_benchmark_test_suite!( + Pallet, + crate::integration_tests::new_test_ext(), + crate::integration_tests::Test, + ); +} diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index e50ffb634b305eda8feb3c1dcf87c9f3366defb5..d71c626cd98dd70e01633b82517604c81510cb17 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -149,8 +149,11 @@ impl TryConvert<&VersionedMultiLocation, xcm::latest::MultiLocation> #[cfg(feature = "runtime-benchmarks")] pub mod benchmarks { use super::VersionedLocatableAsset; + use core::marker::PhantomData; + use frame_support::traits::Get; use pallet_asset_rate::AssetKindFactory; use pallet_treasury::ArgumentsFactory as TreasuryArgumentsFactory; + use sp_core::{ConstU32, ConstU8}; use xcm::prelude::*; /// Provides a factory method for the [`VersionedLocatableAsset`]. @@ -172,12 +175,22 @@ pub mod benchmarks { /// Provide factory methods for the [`VersionedLocatableAsset`] and the `Beneficiary` of the /// [`VersionedMultiLocation`]. The location of the asset is determined as a Parachain with an /// ID equal to the passed seed. - pub struct TreasuryArguments; - impl TreasuryArgumentsFactory - for TreasuryArguments + pub struct TreasuryArguments, ParaId = ConstU32<0>>( + PhantomData<(Parents, ParaId)>, + ); + impl, ParaId: Get> + TreasuryArgumentsFactory + for TreasuryArguments { fn create_asset_kind(seed: u32) -> VersionedLocatableAsset { - AssetRateArguments::create_asset_kind(seed) + VersionedLocatableAsset::V3 { + location: xcm::v3::MultiLocation::new(Parents::get(), X1(Parachain(ParaId::get()))), + asset_id: xcm::v3::MultiLocation::new( + 0, + X2(PalletInstance(seed.try_into().unwrap()), GeneralIndex(seed.into())), + ) + .into(), + } } fn create_beneficiary(seed: [u8; 32]) -> VersionedMultiLocation { VersionedMultiLocation::V3(xcm::v3::MultiLocation::new( @@ -192,6 +205,7 @@ pub mod benchmarks { mod tests { use super::*; use frame_support::{ + derive_impl, dispatch::DispatchClass, parameter_types, traits::{ @@ -237,6 +251,7 @@ mod tests { pub const AvailableBlockRatio: Perbill = Perbill::one(); } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index d5a32775fd49640228fb948727dd7b3aaf4a8392..4870432d22f93e100c31766d6db1b0a406b1f992 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -17,7 +17,7 @@ //! Mocking utilities for testing with real pallets. use crate::{ - auctions, crowdloan, + auctions, crowdloan, identity_migrator, mock::{conclude_pvf_checking, validators_public_keys}, paras_registrar, slot_range::SlotRange, @@ -25,13 +25,14 @@ use crate::{ traits::{AuctionStatus, Auctioneer, Leaser, Registrar as RegistrarT}, }; use frame_support::{ - assert_noop, assert_ok, parameter_types, + assert_noop, assert_ok, derive_impl, parameter_types, traits::{ConstU32, Currency, OnFinalize, OnInitialize}, weights::Weight, PalletId, }; use frame_support_test::TestRandomness; use frame_system::EnsureRoot; +use pallet_identity::{self, legacy::IdentityInfo}; use parity_scale_codec::Encode; use primitives::{ BlockNumber, HeadData, Id as ParaId, SessionIndex, ValidationCode, LOWEST_PUBLIC_ID, @@ -88,6 +89,10 @@ frame_support::construct_runtime!( Auctions: auctions::{Pallet, Call, Storage, Event}, Crowdloan: crowdloan::{Pallet, Call, Storage, Event}, Slots: slots::{Pallet, Call, Storage, Event}, + + // Migrators + Identity: pallet_identity::{Pallet, Call, Storage, Event}, + IdentityMigrator: identity_migrator::{Pallet, Call, Event}, } ); @@ -109,6 +114,7 @@ parameter_types! { ); } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; @@ -206,6 +212,7 @@ impl paras::Config for Test { type QueueFootprinter = (); type NextSessionRotation = crate::mock::TestNextSessionRotation; type OnNewHead = (); + type AssignCoretime = (); } parameter_types! { @@ -274,6 +281,28 @@ impl crowdloan::Config for Test { type WeightInfo = crate::crowdloan::TestWeightInfo; } +impl pallet_identity::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type Slashed = (); + type BasicDeposit = ConstU32<100>; + type ByteDeposit = ConstU32<10>; + type SubAccountDeposit = ConstU32<100>; + type MaxSubAccounts = ConstU32<2>; + type IdentityInformation = IdentityInfo>; + type MaxRegistrars = ConstU32<20>; + type RegistrarOrigin = EnsureRoot; + type ForceOrigin = EnsureRoot; + type WeightInfo = (); +} + +impl identity_migrator::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Reaper = EnsureRoot; + type ReapIdentityHandler = (); + type WeightInfo = crate::identity_migrator::TestWeightInfo; +} + /// Create a new set of test externalities. pub fn new_test_ext() -> TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); diff --git a/polkadot/runtime/common/src/lib.rs b/polkadot/runtime/common/src/lib.rs index 70722d5098878bd47d14bfcc129c36c0b77abfb2..bd49d3cccc9cac99f3a11b698629dfd51b847816 100644 --- a/polkadot/runtime/common/src/lib.rs +++ b/polkadot/runtime/common/src/lib.rs @@ -23,6 +23,7 @@ pub mod auctions; pub mod claims; pub mod crowdloan; pub mod elections; +pub mod identity_migrator; pub mod impls; pub mod paras_registrar; pub mod paras_sudo_wrapper; diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 2d33cf28993dffdd8ae12f28c45464ba69608b06..9719f02677dc3d732369dfad9969b21cd9471154 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -699,7 +699,7 @@ mod tests { mock::conclude_pvf_checking, paras_registrar, traits::Registrar as RegistrarTrait, }; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, derive_impl, error::BadOrigin, parameter_types, traits::{ConstU32, OnFinalize, OnInitialize}, @@ -751,6 +751,7 @@ mod tests { limits::BlockLength::max_with_normal_ratio(4 * 1024 * 1024, NORMAL_RATIO); } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; @@ -813,6 +814,7 @@ mod tests { type QueueFootprinter = (); type NextSessionRotation = crate::mock::TestNextSessionRotation; type OnNewHead = (); + type AssignCoretime = (); } impl configuration::Config for Test { diff --git a/polkadot/runtime/common/src/paras_sudo_wrapper.rs b/polkadot/runtime/common/src/paras_sudo_wrapper.rs index 0fc2644b2a0b0e21b07e8dca719e7c702c9f40a4..4735c176329192abc586e1765b08a0712cbe6fc0 100644 --- a/polkadot/runtime/common/src/paras_sudo_wrapper.rs +++ b/polkadot/runtime/common/src/paras_sudo_wrapper.rs @@ -23,7 +23,7 @@ use parity_scale_codec::Encode; use primitives::Id as ParaId; use runtime_parachains::{ configuration, dmp, hrmp, - paras::{self, ParaGenesisArgs}, + paras::{self, AssignCoretime, ParaGenesisArgs}, ParaLifecycle, }; use sp_std::boxed::Box; @@ -58,6 +58,8 @@ pub mod pallet { CannotUpgrade, /// Cannot downgrade lease holding parachain to on-demand. CannotDowngrade, + /// There are more cores than supported by the runtime. + TooManyCores, } #[pallet::hooks] @@ -66,6 +68,10 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Schedule a para to be initialized at the start of the next session. + /// + /// This should only be used for TESTING and not on PRODUCTION chains. It automatically + /// assigns Coretime to the chain and increases the number of cores. Thus, there is no + /// running coretime chain required. #[pallet::call_index(0)] #[pallet::weight((1_000, DispatchClass::Operational))] pub fn sudo_schedule_para_initialize( @@ -76,6 +82,9 @@ pub mod pallet { ensure_root(origin)?; runtime_parachains::schedule_para_initialize::(id, genesis) .map_err(|_| Error::::ParaAlreadyExists)?; + + T::AssignCoretime::assign_coretime(id)?; + Ok(()) } diff --git a/polkadot/runtime/common/src/purchase.rs b/polkadot/runtime/common/src/purchase.rs index bc95483dd7ede3a6eb29e841d75a7ec59c2595b7..f43f16b838cbc91745889089df3d29525bce2180 100644 --- a/polkadot/runtime/common/src/purchase.rs +++ b/polkadot/runtime/common/src/purchase.rs @@ -484,7 +484,7 @@ mod tests { // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use crate::purchase; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{Currency, WithdrawReasons}, }; use sp_runtime::{ @@ -511,6 +511,8 @@ mod tests { parameter_types! { pub const BlockHashCount: u32 = 250; } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -571,6 +573,7 @@ mod tests { type MinVestedTransfer = MinVestedTransfer; type WeightInfo = (); type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; + type BlockNumberProvider = System; const MAX_VESTING_SCHEDULES: u32 = 28; } diff --git a/polkadot/runtime/common/src/slots/mod.rs b/polkadot/runtime/common/src/slots/mod.rs index 01f6365b791f6fe36d6cf5260a2d86b9eada4cca..6a8cddd8d914cf45656b6fed8e3d5117ee7fe55f 100644 --- a/polkadot/runtime/common/src/slots/mod.rs +++ b/polkadot/runtime/common/src/slots/mod.rs @@ -326,6 +326,18 @@ impl Pallet { tracker.into_iter().collect() } + + /// Current lease index and how many blocks we are already in. + pub fn lease_period_index_plus_progress( + b: BlockNumberFor, + ) -> Option<(>>::LeasePeriod, BlockNumberFor)> { + // Note that blocks before `LeaseOffset` do not count as any lease period. + let offset_block_now = b.checked_sub(&T::LeaseOffset::get())?; + let lease_period = offset_block_now / T::LeasePeriod::get(); + let in_lease = offset_block_now % T::LeasePeriod::get(); + + Some((lease_period, in_lease)) + } } impl crate::traits::OnSwap for Pallet { @@ -449,12 +461,8 @@ impl Leaser> for Pallet { } fn lease_period_index(b: BlockNumberFor) -> Option<(Self::LeasePeriod, bool)> { - // Note that blocks before `LeaseOffset` do not count as any lease period. - let offset_block_now = b.checked_sub(&T::LeaseOffset::get())?; - let lease_period = offset_block_now / T::LeasePeriod::get(); - let first_block = (offset_block_now % T::LeasePeriod::get()).is_zero(); - - Some((lease_period, first_block)) + Self::lease_period_index_plus_progress(b) + .map(|(period, progress)| (period, progress.is_zero())) } fn already_leased( @@ -505,7 +513,7 @@ mod tests { use crate::{mock::TestRegistrar, slots}; use ::test_helpers::{dummy_head_data, dummy_validation_code}; - use frame_support::{assert_noop, assert_ok, parameter_types}; + use frame_support::{assert_noop, assert_ok, derive_impl, parameter_types}; use frame_system::EnsureRoot; use pallet_balances; use primitives::BlockNumber; @@ -529,6 +537,8 @@ mod tests { parameter_types! { pub const BlockHashCount: u32 = 250; } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index cdfab82d00c1850ecb6c5e240bf3d4df92ccc75a..9a16749bf602f9c27bcc39c6ebe71138eeb4cc07 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -6,8 +6,11 @@ edition.workspace = true license.workspace = true description = "Runtime metric interface for the Polkadot node" +[lints] +workspace = true + [dependencies] -sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false} +sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } parity-scale-codec = { version = "3.6.1", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } @@ -16,7 +19,7 @@ frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-f bs58 = { version = "0.5.0", default-features = false, features = ["alloc"] } [features] -default = [ "std" ] +default = ["std"] std = [ "bs58/std", "frame-benchmarking?/std", @@ -25,4 +28,4 @@ std = [ "sp-std/std", "sp-tracing/std", ] -runtime-metrics = [ "frame-benchmarking", "sp-tracing/with-tracing" ] +runtime-metrics = ["frame-benchmarking", "sp-tracing/with-tracing"] diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index b6800fc0844dfacc424ec642d7a06cbb456e360b..1f381400cf5382e0ce42aebfc954c8e4f9ba8292 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] impl-trait-for-tuples = "0.2.2" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } @@ -13,7 +16,7 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } derive_more = "0.99.17" bitflags = "1.3.2" @@ -21,18 +24,20 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false inherents = { package = "sp-inherents", path = "../../../substrate/primitives/inherents", default-features = false } sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features=["serde"] } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features=["serde"] } -sp-core = { path = "../../../substrate/primitives/core", default-features = false, features=["serde"] } +sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features = ["serde"] } +sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = ["serde"] } sp-keystore = { path = "../../../substrate/primitives/keystore", optional = true } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false, optional = true } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false, optional = true } +sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false } pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } pallet-babe = { path = "../../../substrate/frame/babe", default-features = false } +pallet-broker = { path = "../../../substrate/frame/broker", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } pallet-session = { path = "../../../substrate/frame/session", default-features = false } pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } @@ -50,7 +55,7 @@ rand = { version = "0.8.5", default-features = false } rand_chacha = { version = "0.3.1", default-features = false } static_assertions = { version = "1.1.0", optional = true } polkadot-parachain-primitives = { path = "../../parachain", default-features = false } -polkadot-runtime-metrics = { path = "../metrics", default-features = false} +polkadot-runtime-metrics = { path = "../metrics", default-features = false } polkadot-core-primitives = { path = "../../core-primitives", default-features = false } [dev-dependencies] @@ -66,7 +71,7 @@ assert_matches = "1" serde_json = "1.0.108" [features] -default = [ "std" ] +default = ["std"] no_std = [] std = [ "bitvec/std", @@ -79,6 +84,7 @@ std = [ "pallet-authorship/std", "pallet-babe/std", "pallet-balances/std", + "pallet-broker/std", "pallet-message-queue/std", "pallet-session/std", "pallet-staking/std", @@ -96,6 +102,7 @@ std = [ "serde/std", "sp-api/std", "sp-application-crypto?/std", + "sp-arithmetic/std", "sp-core/std", "sp-io/std", "sp-keystore", @@ -112,6 +119,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-babe/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-broker/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", @@ -132,6 +140,7 @@ try-runtime = [ "pallet-authorship/try-runtime", "pallet-babe/try-runtime", "pallet-balances/try-runtime", + "pallet-broker/try-runtime", "pallet-message-queue/try-runtime", "pallet-session/try-runtime", "pallet-staking/try-runtime", diff --git a/polkadot/runtime/parachains/src/assigner.rs b/polkadot/runtime/parachains/src/assigner.rs deleted file mode 100644 index 9e408df61dc18d3da7205a7a4ef5f8b0d9386c03..0000000000000000000000000000000000000000 --- a/polkadot/runtime/parachains/src/assigner.rs +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! The Polkadot multiplexing assignment provider. -//! Provides blockspace assignments for both bulk and on demand parachains. -use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{CoreIndex, Id as ParaId}; - -use crate::{ - configuration, paras, - scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, -}; - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - - #[pallet::pallet] - #[pallet::without_storage_info] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config + configuration::Config + paras::Config { - type ParachainsAssignmentProvider: AssignmentProvider>; - type OnDemandAssignmentProvider: AssignmentProvider>; - } -} - -// Aliases to make the impl more readable. -type ParachainAssigner = ::ParachainsAssignmentProvider; -type OnDemandAssigner = ::OnDemandAssignmentProvider; - -impl Pallet { - // Helper fn for the AssignmentProvider implementation. - // Assumes that the first allocation of cores is to bulk parachains. - // This function will return false if there are no cores assigned to the bulk parachain - // assigner. - fn is_bulk_core(core_idx: &CoreIndex) -> bool { - let parachain_cores = - as AssignmentProvider>>::session_core_count(); - - core_idx.0 < parachain_cores - } -} - -impl AssignmentProvider> for Pallet { - fn session_core_count() -> u32 { - let parachain_cores = - as AssignmentProvider>>::session_core_count(); - let on_demand_cores = - as AssignmentProvider>>::session_core_count(); - - parachain_cores.saturating_add(on_demand_cores) - } - - /// Pops an `Assignment` from a specified `CoreIndex` - fn pop_assignment_for_core( - core_idx: CoreIndex, - concluded_para: Option, - ) -> Option { - if Pallet::::is_bulk_core(&core_idx) { - as AssignmentProvider>>::pop_assignment_for_core( - core_idx, - concluded_para, - ) - } else { - as AssignmentProvider>>::pop_assignment_for_core( - core_idx, - concluded_para, - ) - } - } - - fn push_assignment_for_core(core_idx: CoreIndex, assignment: Assignment) { - if Pallet::::is_bulk_core(&core_idx) { - as AssignmentProvider>>::push_assignment_for_core( - core_idx, assignment, - ) - } else { - as AssignmentProvider>>::push_assignment_for_core( - core_idx, assignment, - ) - } - } - - fn get_provider_config(core_idx: CoreIndex) -> AssignmentProviderConfig> { - if Pallet::::is_bulk_core(&core_idx) { - as AssignmentProvider>>::get_provider_config( - core_idx, - ) - } else { - as AssignmentProvider>>::get_provider_config( - core_idx, - ) - } - } -} diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs new file mode 100644 index 0000000000000000000000000000000000000000..71c3f1fa39f7c6c127ac954b8299fb16a323e72d --- /dev/null +++ b/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs @@ -0,0 +1,87 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Helper functions for tests, also used in runtime-benchmarks. + +#![cfg(test)] + +use super::*; + +use crate::{ + mock::MockGenesisConfig, + paras::{ParaGenesisArgs, ParaKind}, +}; +use sp_runtime::Perbill; + +use primitives::{Balance, HeadData, ValidationCode}; + +fn default_genesis_config() -> MockGenesisConfig { + MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: crate::configuration::HostConfiguration { ..Default::default() }, + }, + ..Default::default() + } +} + +#[derive(Debug)] +pub struct GenesisConfigBuilder { + pub on_demand_cores: u32, + pub on_demand_base_fee: Balance, + pub on_demand_fee_variability: Perbill, + pub on_demand_max_queue_size: u32, + pub on_demand_target_queue_utilization: Perbill, + pub onboarded_on_demand_chains: Vec, +} + +impl Default for GenesisConfigBuilder { + fn default() -> Self { + Self { + on_demand_cores: 10, + on_demand_base_fee: 10_000, + on_demand_fee_variability: Perbill::from_percent(1), + on_demand_max_queue_size: 100, + on_demand_target_queue_utilization: Perbill::from_percent(25), + onboarded_on_demand_chains: vec![], + } + } +} + +impl GenesisConfigBuilder { + pub(super) fn build(self) -> MockGenesisConfig { + let mut genesis = default_genesis_config(); + let config = &mut genesis.configuration.config; + config.coretime_cores = self.on_demand_cores; + config.on_demand_base_fee = self.on_demand_base_fee; + config.on_demand_fee_variability = self.on_demand_fee_variability; + config.on_demand_queue_max_size = self.on_demand_max_queue_size; + config.on_demand_target_queue_utilization = self.on_demand_target_queue_utilization; + + let paras = &mut genesis.paras.paras; + for para_id in self.onboarded_on_demand_chains { + paras.push(( + para_id, + ParaGenesisArgs { + genesis_head: HeadData::from(vec![0u8]), + validation_code: ValidationCode::from(vec![0u8]), + para_kind: ParaKind::Parathread, + }, + )) + } + + genesis + } +} diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..9da81dc816cabeb7019e44b8f88c0f526582830d --- /dev/null +++ b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs @@ -0,0 +1,496 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The parachain coretime assignment module. +//! +//! Handles scheduling of assignments coming from the coretime/broker chain. For on-demand +//! assignments it relies on the separate on-demand assignment provider, where it forwards requests +//! to. +//! +//! `CoreDescriptor` contains pointers to the begin and the end of a list of schedules, together +//! with the currently active assignments. + +mod mock_helpers; +#[cfg(test)] +mod tests; + +use crate::{ + assigner_on_demand, configuration, + paras::AssignCoretime, + scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, + ParaId, +}; + +use frame_support::{defensive, pallet_prelude::*}; +use frame_system::pallet_prelude::*; +use pallet_broker::CoreAssignment; +use primitives::CoreIndex; +use sp_runtime::traits::{One, Saturating}; + +use sp_std::prelude::*; + +pub use pallet::*; + +/// Fraction expressed as a nominator with an assumed denominator of 57,600. +#[derive(RuntimeDebug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Encode, Decode, TypeInfo)] +pub struct PartsOf57600(u16); + +impl PartsOf57600 { + pub const ZERO: Self = Self(0); + pub const FULL: Self = Self(57600); + + pub fn new_saturating(v: u16) -> Self { + Self::ZERO.saturating_add(Self(v)) + } + + pub fn is_full(&self) -> bool { + *self == Self::FULL + } + + pub fn saturating_add(self, rhs: Self) -> Self { + let inner = self.0.saturating_add(rhs.0); + if inner > 57600 { + Self(57600) + } else { + Self(inner) + } + } + + pub fn saturating_sub(self, rhs: Self) -> Self { + Self(self.0.saturating_sub(rhs.0)) + } + + pub fn checked_add(self, rhs: Self) -> Option { + let inner = self.0.saturating_add(rhs.0); + if inner > 57600 { + None + } else { + Some(Self(inner)) + } + } +} + +/// Assignments as they are scheduled by block number +/// +/// for a particular core. +#[derive(Encode, Decode, TypeInfo)] +#[cfg_attr(test, derive(PartialEq, RuntimeDebug))] +struct Schedule { + // Original assignments + assignments: Vec<(CoreAssignment, PartsOf57600)>, + /// When do our assignments become invalid, if at all? + /// + /// If this is `Some`, then this `CoreState` will be dropped at that block number. If this is + /// `None`, then we will keep serving our core assignments in a circle until a new set of + /// assignments is scheduled. + end_hint: Option, + + /// The next queued schedule for this core. + /// + /// Schedules are forming a queue. + next_schedule: Option, +} + +/// Descriptor for a core. +/// +/// Contains pointers to first and last schedule into `CoreSchedules` for that core and keeps track +/// of the currently active work as well. +#[derive(Encode, Decode, TypeInfo, Default)] +#[cfg_attr(test, derive(PartialEq, RuntimeDebug, Clone))] +struct CoreDescriptor { + /// Meta data about the queued schedules for this core. + queue: Option>, + /// Currently performed work. + current_work: Option>, +} + +/// Pointers into `CoreSchedules` for a particular core. +/// +/// Schedules in `CoreSchedules` form a queue. `Schedule::next_schedule` always pointing to the next +/// item. +#[derive(Encode, Decode, TypeInfo, Copy, Clone)] +#[cfg_attr(test, derive(PartialEq, RuntimeDebug))] +struct QueueDescriptor { + /// First scheduled item, that is not yet active. + first: N, + /// Last scheduled item. + last: N, +} + +#[derive(Encode, Decode, TypeInfo)] +#[cfg_attr(test, derive(PartialEq, RuntimeDebug, Clone))] +struct WorkState { + /// Assignments with current state. + /// + /// Assignments and book keeping on how much has been served already. We keep track of serviced + /// assignments in order to adhere to the specified ratios. + assignments: Vec<(CoreAssignment, AssignmentState)>, + /// When do our assignments become invalid if at all? + /// + /// If this is `Some`, then this `CoreState` will be dropped at that block number. If this is + /// `None`, then we will keep serving our core assignments in a circle until a new set of + /// assignments is scheduled. + end_hint: Option, + /// Position in the assignments we are currently in. + /// + /// Aka which core assignment will be popped next on + /// `AssignmentProvider::pop_assignment_for_core`. + pos: u16, + /// Step width + /// + /// How much we subtract from `AssignmentState::remaining` for a core served. + step: PartsOf57600, +} + +#[derive(Encode, Decode, TypeInfo)] +#[cfg_attr(test, derive(PartialEq, RuntimeDebug, Clone, Copy))] +struct AssignmentState { + /// Ratio of the core this assignment has. + /// + /// As initially received via `assign_core`. + ratio: PartsOf57600, + /// How many parts are remaining in this round? + /// + /// At the end of each round (in preparation for the next), ratio will be added to remaining. + /// Then every time we get scheduled we subtract a core worth of points. Once we reach 0 or a + /// number lower than what a core is worth (`CoreState::step` size), we move on to the next + /// item in the `Vec`. + /// + /// The first round starts with remaining = ratio. + remaining: PartsOf57600, +} + +impl From> for WorkState { + fn from(schedule: Schedule) -> Self { + let Schedule { assignments, end_hint, next_schedule: _ } = schedule; + let step = + if let Some(min_step_assignment) = assignments.iter().min_by(|a, b| a.1.cmp(&b.1)) { + min_step_assignment.1 + } else { + // Assignments empty, should not exist. In any case step size does not matter here: + log::debug!("assignments of a `Schedule` should never be empty."); + PartsOf57600(1) + }; + let assignments = assignments + .into_iter() + .map(|(a, ratio)| (a, AssignmentState { ratio, remaining: ratio })) + .collect(); + + Self { assignments, end_hint, pos: 0, step } + } +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: + frame_system::Config + configuration::Config + assigner_on_demand::Config + { + } + + /// Scheduled assignment sets. + /// + /// Assignments as of the given block number. They will go into state once the block number is + /// reached (and replace whatever was in there before). + #[pallet::storage] + pub(super) type CoreSchedules = StorageMap< + _, + Twox256, + (BlockNumberFor, CoreIndex), + Schedule>, + OptionQuery, + >; + + /// Assignments which are currently active. + /// + /// They will be picked from `PendingAssignments` once we reach the scheduled block number in + /// `PendingAssignments`. + #[pallet::storage] + pub(super) type CoreDescriptors = StorageMap< + _, + Twox256, + CoreIndex, + CoreDescriptor>, + ValueQuery, + GetDefault, + >; + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::error] + pub enum Error { + AssignmentsEmpty, + /// Assignments together exceeded 57600. + OverScheduled, + /// Assignments together less than 57600 + UnderScheduled, + /// assign_core is only allowed to append new assignments at the end of already existing + /// ones. + DisallowedInsert, + /// Tried to insert a schedule for the same core and block number as an existing schedule + DuplicateInsert, + /// Tried to add an unsorted set of assignments + AssignmentsNotSorted, + } +} + +impl AssignmentProvider> for Pallet { + fn pop_assignment_for_core(core_idx: CoreIndex) -> Option { + let now = >::block_number(); + + CoreDescriptors::::mutate(core_idx, |core_state| { + Self::ensure_workload(now, core_idx, core_state); + + let work_state = core_state.current_work.as_mut()?; + + // Wrap around: + work_state.pos = work_state.pos % work_state.assignments.len() as u16; + let (a_type, a_state) = &mut work_state + .assignments + .get_mut(work_state.pos as usize) + .expect("We limited pos to the size of the vec one line above. qed"); + + // advance for next pop: + a_state.remaining = a_state.remaining.saturating_sub(work_state.step); + if a_state.remaining < work_state.step { + // Assignment exhausted, need to move to the next and credit remaining for + // next round. + work_state.pos += 1; + // Reset to ratio + still remaining "credits": + a_state.remaining = a_state.remaining.saturating_add(a_state.ratio); + } + + match a_type { + CoreAssignment::Idle => None, + CoreAssignment::Pool => + assigner_on_demand::Pallet::::pop_assignment_for_core(core_idx), + CoreAssignment::Task(para_id) => Some(Assignment::Bulk((*para_id).into())), + } + }) + } + + fn report_processed(assignment: Assignment) { + match assignment { + Assignment::Pool { para_id, core_index } => + assigner_on_demand::Pallet::::report_processed(para_id, core_index), + Assignment::Bulk(_) => {}, + } + } + + /// Push an assignment back to the front of the queue. + /// + /// The assignment has not been processed yet. Typically used on session boundaries. + /// Parameters: + /// - `assignment`: The on demand assignment. + fn push_back_assignment(assignment: Assignment) { + match assignment { + Assignment::Pool { para_id, core_index } => + assigner_on_demand::Pallet::::push_back_assignment(para_id, core_index), + Assignment::Bulk(_) => { + // Session changes are rough. We just drop assignments that did not make it on a + // session boundary. This seems sensible as bulk is region based. Meaning, even if + // we made the effort catching up on those dropped assignments, this would very + // likely lead to other assignments not getting served at the "end" (when our + // assignment set gets replaced). + }, + } + } + + fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig> { + let config = >::config(); + AssignmentProviderConfig { + max_availability_timeouts: config.on_demand_retries, + ttl: config.on_demand_ttl, + } + } + + #[cfg(any(feature = "runtime-benchmarks", test))] + fn get_mock_assignment(_: CoreIndex, para_id: primitives::Id) -> Assignment { + // Given that we are not tracking anything in `Bulk` assignments, it is safe to always + // return a bulk assignment. + Assignment::Bulk(para_id) + } + + fn session_core_count() -> u32 { + let config = >::config(); + config.coretime_cores + } +} + +impl Pallet { + /// Ensure given workload for core is up to date. + fn ensure_workload( + now: BlockNumberFor, + core_idx: CoreIndex, + descriptor: &mut CoreDescriptor>, + ) { + // Workload expired? + if descriptor + .current_work + .as_ref() + .and_then(|w| w.end_hint) + .map_or(false, |e| e <= now) + { + descriptor.current_work = None; + } + + let Some(queue) = descriptor.queue else { + // No queue. + return + }; + + let mut next_scheduled = queue.first; + + if next_scheduled > now { + // Not yet ready. + return + } + + // Update is needed: + let update = loop { + let Some(update) = CoreSchedules::::take((next_scheduled, core_idx)) else { + break None + }; + // Still good? + if update.end_hint.map_or(true, |e| e > now) { + break Some(update) + } + // Move on if possible: + if let Some(n) = update.next_schedule { + next_scheduled = n; + } else { + break None + } + }; + + let new_first = update.as_ref().and_then(|u| u.next_schedule); + descriptor.current_work = update.map(Into::into); + + descriptor.queue = new_first.map(|new_first| { + QueueDescriptor { + first: new_first, + // `last` stays unaffected, if not empty: + last: queue.last, + } + }); + } + + /// Append another assignment for a core. + /// + /// Important only appending is allowed. Meaning, all already existing assignments must have a + /// begin smaller than the one passed here. This restriction exists, because it makes the + /// insertion O(1) and the author could not think of a reason, why this restriction should be + /// causing any problems. Inserting arbitrarily causes a `DispatchError::DisallowedInsert` + /// error. This restriction could easily be lifted if need be and in fact an implementation is + /// available + /// [here](https://github.com/paritytech/polkadot-sdk/pull/1694/commits/c0c23b01fd2830910cde92c11960dad12cdff398#diff-0c85a46e448de79a5452395829986ee8747e17a857c27ab624304987d2dde8baR386). + /// The problem is that insertion complexity then depends on the size of the existing queue, + /// which makes determining weights hard and could lead to issues like overweight blocks (at + /// least in theory). + pub fn assign_core( + core_idx: CoreIndex, + begin: BlockNumberFor, + assignments: Vec<(CoreAssignment, PartsOf57600)>, + end_hint: Option>, + ) -> Result<(), DispatchError> { + // There should be at least one assignment. + ensure!(!assignments.is_empty(), Error::::AssignmentsEmpty); + + // Checking for sort and unique manually, since we don't have access to iterator tools. + // This way of checking uniqueness only works since we also check sortedness. + assignments.iter().map(|x| &x.0).try_fold(None, |prev, cur| { + if prev.map_or(false, |p| p >= cur) { + Err(Error::::AssignmentsNotSorted) + } else { + Ok(Some(cur)) + } + })?; + + // Check that the total parts between all assignments are equal to 57600 + let parts_sum = assignments + .iter() + .map(|assignment| assignment.1) + .try_fold(PartsOf57600::ZERO, |sum, parts| { + sum.checked_add(parts).ok_or(Error::::OverScheduled) + })?; + ensure!(parts_sum.is_full(), Error::::UnderScheduled); + + CoreDescriptors::::mutate(core_idx, |core_descriptor| { + let new_queue = match core_descriptor.queue { + Some(queue) => { + ensure!(begin > queue.last, Error::::DisallowedInsert); + + CoreSchedules::::try_mutate((queue.last, core_idx), |schedule| { + if let Some(schedule) = schedule.as_mut() { + debug_assert!(schedule.next_schedule.is_none(), "queue.end was supposed to be the end, so the next item must be `None`!"); + schedule.next_schedule = Some(begin); + } else { + defensive!("Queue end entry does not exist?"); + } + CoreSchedules::::try_mutate((begin, core_idx), |schedule| { + // It should already be impossible to overwrite an existing schedule due + // to strictly increasing block number. But we check here for safety and + // in case the design changes. + ensure!(schedule.is_none(), Error::::DuplicateInsert); + *schedule = + Some(Schedule { assignments, end_hint, next_schedule: None }); + Ok::<(), DispatchError>(()) + })?; + Ok::<(), DispatchError>(()) + })?; + + QueueDescriptor { first: queue.first, last: begin } + }, + None => { + // Queue empty, just insert: + CoreSchedules::::insert( + (begin, core_idx), + Schedule { assignments, end_hint, next_schedule: None }, + ); + QueueDescriptor { first: begin, last: begin } + }, + }; + core_descriptor.queue = Some(new_queue); + Ok(()) + }) + } +} + +impl AssignCoretime for Pallet { + fn assign_coretime(id: ParaId) -> DispatchResult { + let current_block = frame_system::Pallet::::block_number(); + + // Add a new core and assign the para to it. + let mut config = >::config(); + let core = config.coretime_cores; + config.coretime_cores.saturating_inc(); + + // `assign_coretime` is only called at genesis or by root, so setting the active + // config here is fine. + configuration::Pallet::::force_set_active_config(config); + + let begin = current_block + One::one(); + let assignment = vec![(pallet_broker::CoreAssignment::Task(id.into()), PartsOf57600::FULL)]; + Pallet::::assign_core(CoreIndex(core), begin, assignment, None) + } +} diff --git a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..998e39670f97d6f6e48d079e4544e547d702509b --- /dev/null +++ b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs @@ -0,0 +1,817 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; + +use crate::{ + assigner_coretime::{mock_helpers::GenesisConfigBuilder, pallet::Error, Schedule}, + initializer::SessionChangeNotification, + mock::{ + new_test_ext, Balances, CoretimeAssigner, OnDemandAssigner, Paras, ParasShared, + RuntimeOrigin, Scheduler, System, Test, + }, + paras::{ParaGenesisArgs, ParaKind}, + scheduler::common::Assignment, +}; +use frame_support::{assert_noop, assert_ok, pallet_prelude::*, traits::Currency}; +use pallet_broker::TaskId; +use primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode}; +use sp_std::collections::btree_map::BTreeMap; + +fn schedule_blank_para(id: ParaId, parakind: ParaKind) { + let validation_code: ValidationCode = vec![1, 2, 3].into(); + assert_ok!(Paras::schedule_para_initialize( + id, + ParaGenesisArgs { + genesis_head: Vec::new().into(), + validation_code: validation_code.clone(), + para_kind: parakind, + } + )); + + assert_ok!(Paras::add_trusted_validation_code(RuntimeOrigin::root(), validation_code)); +} + +fn run_to_block( + to: BlockNumber, + new_session: impl Fn(BlockNumber) -> Option>, +) { + while System::block_number() < to { + let b = System::block_number(); + + Scheduler::initializer_finalize(); + Paras::initializer_finalize(b); + + if let Some(notification) = new_session(b + 1) { + let mut notification_with_session_index = notification; + // We will make every session change trigger an action queue. Normally this may require + // 2 or more session changes. + if notification_with_session_index.session_index == SessionIndex::default() { + notification_with_session_index.session_index = ParasShared::scheduled_session(); + } + Paras::initializer_on_new_session(¬ification_with_session_index); + Scheduler::initializer_on_new_session(¬ification_with_session_index); + } + + System::on_finalize(b); + + System::on_initialize(b + 1); + System::set_block_number(b + 1); + + Paras::initializer_initialize(b + 1); + Scheduler::initializer_initialize(b + 1); + + // In the real runtime this is expected to be called by the `InclusionInherent` pallet. + Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), b + 1); + } +} + +fn default_test_assignments() -> Vec<(CoreAssignment, PartsOf57600)> { + vec![(CoreAssignment::Idle, PartsOf57600::FULL)] +} + +fn default_test_schedule() -> Schedule> { + Schedule { assignments: default_test_assignments(), end_hint: None, next_schedule: None } +} + +#[test] +// Should create new QueueDescriptor and add new schedule to CoreSchedules +fn assign_core_works_with_no_prior_schedule() { + let core_idx = CoreIndex(0); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); + + // Call assign_core + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + default_test_assignments(), + None, + )); + + // Check CoreSchedules + assert_eq!( + CoreSchedules::::get((BlockNumberFor::::from(11u32), core_idx)), + Some(default_test_schedule()) + ); + + // Check QueueDescriptor + assert_eq!( + CoreDescriptors::::get(core_idx) + .queue + .as_ref() + .and_then(|q| Some(q.first)), + Some(BlockNumberFor::::from(11u32)) + ); + assert_eq!( + CoreDescriptors::::get(core_idx).queue.as_ref().and_then(|q| Some(q.last)), + Some(BlockNumberFor::::from(11u32)) + ); + }); +} + +#[test] +fn end_hint_is_properly_honored() { + let core_idx = CoreIndex(0); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); + + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + vec![(CoreAssignment::Task(1), PartsOf57600::FULL)], + Some(15u32), + )); + + assert!( + CoretimeAssigner::pop_assignment_for_core(core_idx).is_none(), + "No assignment yet in effect" + ); + + run_to_block(11, |_| None); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(1.into())), + "Assignment should now be present" + ); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(1.into())), + "Nothing changed, assignment should still be present" + ); + + run_to_block(15, |_| None); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + None, + "Assignment should now be gone" + ); + + // Insert assignment that is already dead: + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + vec![(CoreAssignment::Task(1), PartsOf57600::FULL)], + Some(15u32), + )); + + // Core should still be empty: + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + None, + "Assignment should now be gone" + ); + }); +} + +#[test] +// Should update last in QueueDescriptor and add new schedule to CoreSchedules +fn assign_core_works_with_prior_schedule() { + let core_idx = CoreIndex(0); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); + let default_with_next_schedule = + Schedule { next_schedule: Some(15u32), ..default_test_schedule() }; + + // Call assign_core twice + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + default_test_assignments(), + None, + )); + + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(15u32), + default_test_assignments(), + None, + )); + + // Check CoreSchedules for two entries + assert_eq!( + CoreSchedules::::get((BlockNumberFor::::from(11u32), core_idx)), + Some(default_with_next_schedule) + ); + assert_eq!( + CoreSchedules::::get((BlockNumberFor::::from(15u32), core_idx)), + Some(default_test_schedule()) + ); + + // Check QueueDescriptor + assert_eq!( + CoreDescriptors::::get(core_idx) + .queue + .as_ref() + .and_then(|q| Some(q.first)), + Some(BlockNumberFor::::from(11u32)) + ); + assert_eq!( + CoreDescriptors::::get(core_idx).queue.as_ref().and_then(|q| Some(q.last)), + Some(BlockNumberFor::::from(15u32)) + ); + }); +} + +#[test] +// Invariants: We assume that CoreSchedules is append only and consumed. In other words new +// schedules inserted for a core must have a higher block number than all of the already existing +// schedules. +fn assign_core_enforces_higher_block_number() { + let core_idx = CoreIndex(0); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); + + // Call assign core twice to establish some schedules + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(12u32), + default_test_assignments(), + None, + )); + + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(15u32), + default_test_assignments(), + None, + )); + + // Call assign core with block number before QueueDescriptor first, expecting an error + assert_noop!( + CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + default_test_assignments(), + None, + ), + Error::::DisallowedInsert + ); + + // Call assign core with block number between already scheduled assignments, expecting an + // error + assert_noop!( + CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(13u32), + default_test_assignments(), + None, + ), + Error::::DisallowedInsert + ); + }); +} + +#[test] +fn assign_core_enforces_well_formed_schedule() { + let para_id = ParaId::from(1u32); + let core_idx = CoreIndex(0); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); + + let empty_assignments: Vec<(CoreAssignment, PartsOf57600)> = vec![]; + let overscheduled = vec![ + (CoreAssignment::Pool, PartsOf57600::FULL), + (CoreAssignment::Task(para_id.into()), PartsOf57600::FULL), + ]; + let underscheduled = vec![(CoreAssignment::Pool, PartsOf57600(30000))]; + let not_unique = vec![ + (CoreAssignment::Pool, PartsOf57600::FULL / 2), + (CoreAssignment::Pool, PartsOf57600::FULL / 2), + ]; + let not_sorted = vec![ + (CoreAssignment::Task(para_id.into()), PartsOf57600(19200)), + (CoreAssignment::Pool, PartsOf57600(19200)), + (CoreAssignment::Idle, PartsOf57600(19200)), + ]; + + // Attempting assign_core with malformed assignments such that all error cases + // are tested + assert_noop!( + CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + empty_assignments, + None, + ), + Error::::AssignmentsEmpty + ); + assert_noop!( + CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + overscheduled, + None, + ), + Error::::OverScheduled + ); + assert_noop!( + CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + underscheduled, + None, + ), + Error::::UnderScheduled + ); + assert_noop!( + CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + not_unique, + None, + ), + Error::::AssignmentsNotSorted + ); + assert_noop!( + CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + not_sorted, + None, + ), + Error::::AssignmentsNotSorted + ); + }); +} + +#[test] +fn next_schedule_always_points_to_next_work_plan_item() { + let core_idx = CoreIndex(0); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); + let start_1 = 15u32; + let start_2 = 20u32; + let start_3 = 25u32; + let start_4 = 30u32; + let start_5 = 35u32; + + let expected_schedule_3 = + Schedule { next_schedule: Some(start_4), ..default_test_schedule() }; + let expected_schedule_4 = + Schedule { next_schedule: Some(start_5), ..default_test_schedule() }; + let expected_schedule_5 = default_test_schedule(); + + // Call assign_core for each of five schedules + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(start_1), + default_test_assignments(), + None, + )); + + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(start_2), + default_test_assignments(), + None, + )); + + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(start_3), + default_test_assignments(), + None, + )); + + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(start_4), + default_test_assignments(), + None, + )); + + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(start_5), + default_test_assignments(), + None, + )); + + // Rotate through the first two schedules + run_to_block(start_1, |n| if n == start_1 { Some(Default::default()) } else { None }); + CoretimeAssigner::pop_assignment_for_core(core_idx); + run_to_block(start_2, |n| if n == start_2 { Some(Default::default()) } else { None }); + CoretimeAssigner::pop_assignment_for_core(core_idx); + + // Use saved starting block numbers to check that schedules chain + // together correctly + assert_eq!( + CoreSchedules::::get((BlockNumberFor::::from(start_3), core_idx)), + Some(expected_schedule_3) + ); + assert_eq!( + CoreSchedules::::get((BlockNumberFor::::from(start_4), core_idx)), + Some(expected_schedule_4) + ); + assert_eq!( + CoreSchedules::::get((BlockNumberFor::::from(start_5), core_idx)), + Some(expected_schedule_5) + ); + + // Check QueueDescriptor + assert_eq!( + CoreDescriptors::::get(core_idx) + .queue + .as_ref() + .and_then(|q| Some(q.first)), + Some(start_3) + ); + assert_eq!( + CoreDescriptors::::get(core_idx).queue.as_ref().and_then(|q| Some(q.last)), + Some(start_5) + ); + }); +} + +#[test] +fn ensure_workload_works() { + let core_idx = CoreIndex(0); + let test_assignment_state = + AssignmentState { ratio: PartsOf57600::FULL, remaining: PartsOf57600::FULL }; + + let empty_descriptor: CoreDescriptor> = + CoreDescriptor { queue: None, current_work: None }; + let assignments_queued_descriptor = CoreDescriptor { + queue: Some(QueueDescriptor { + first: BlockNumberFor::::from(11u32), + last: BlockNumberFor::::from(11u32), + }), + current_work: None, + }; + let assignments_active_descriptor = CoreDescriptor { + queue: None, + current_work: Some(WorkState { + assignments: vec![(CoreAssignment::Pool, test_assignment_state)], + end_hint: Some(BlockNumberFor::::from(15u32)), + pos: 0, + step: PartsOf57600::FULL, + }), + }; + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let mut core_descriptor: CoreDescriptor> = empty_descriptor.clone(); + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); + + // Case 1: No new schedule in CoreSchedules for core + CoretimeAssigner::ensure_workload(10u32, core_idx, &mut core_descriptor); + assert_eq!(core_descriptor, empty_descriptor); + + // Case 2: New schedule exists in CoreSchedules for core, but new + // schedule start is not yet reached. + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + vec![(CoreAssignment::Pool, PartsOf57600::FULL)], + Some(BlockNumberFor::::from(15u32)), + )); + + // Propagate changes from storage to Core_Descriptor handle. Normally + // pop_assignment_for_core would handle this. + core_descriptor = CoreDescriptors::::get(core_idx); + + CoretimeAssigner::ensure_workload(10u32, core_idx, &mut core_descriptor); + assert_eq!(core_descriptor, assignments_queued_descriptor); + + // Case 3: Next schedule exists in CoreSchedules for core. Next starting + // block has been reached. Swaps new WorkState into CoreDescriptors from + // CoreSchedules. + CoretimeAssigner::ensure_workload(11u32, core_idx, &mut core_descriptor); + assert_eq!(core_descriptor, assignments_active_descriptor); + + // Case 4: end_hint reached but new schedule start not yet reached. WorkState in + // CoreDescriptor is cleared + CoretimeAssigner::ensure_workload(15u32, core_idx, &mut core_descriptor); + assert_eq!(core_descriptor, empty_descriptor); + }); +} + +#[test] +fn pop_assignment_for_core_works() { + let para_id = ParaId::from(1); + let core_idx = CoreIndex(0); + let alice = 1u64; + let amt = 10_000_000u128; + + let assignments_pool = vec![(CoreAssignment::Pool, PartsOf57600::FULL)]; + let assignments_task = vec![(CoreAssignment::Task(para_id.into()), PartsOf57600::FULL)]; + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // Initialize the parathread, wait for it to be ready, then add an + // on demand order to later pop with our Coretime assigner. + schedule_blank_para(para_id, ParaKind::Parathread); + Balances::make_free_balance_be(&alice, amt); + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); + assert_ok!(OnDemandAssigner::place_order_allow_death( + RuntimeOrigin::signed(alice), + amt, + para_id + )); + + // Case 1: Assignment idle + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + default_test_assignments(), // Default is Idle + None, + )); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + assert_eq!(CoretimeAssigner::pop_assignment_for_core(core_idx), None); + + // Case 2: Assignment pool + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(21u32), + assignments_pool, + None, + )); + + run_to_block(21, |n| if n == 21 { Some(Default::default()) } else { None }); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Pool { para_id, core_index: 0.into() }) + ); + + // Case 3: Assignment task + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(31u32), + assignments_task, + None, + )); + + run_to_block(31, |n| if n == 31 { Some(Default::default()) } else { None }); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(para_id)) + ); + }); +} + +#[test] +fn assignment_proportions_in_core_state_work() { + let core_idx = CoreIndex(0); + let task_1 = TaskId::from(1u32); + let task_2 = TaskId::from(2u32); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); + + // Task 1 gets 2/3 core usage, while task 2 gets 1/3 + let test_assignments = vec![ + (CoreAssignment::Task(task_1), PartsOf57600::FULL / 3 * 2), + (CoreAssignment::Task(task_2), PartsOf57600::FULL / 3), + ]; + + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + test_assignments, + None, + )); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + // Case 1: Current assignment remaining >= step after pop + { + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_1.into())) + ); + + assert_eq!( + CoreDescriptors::::get(core_idx) + .current_work + .as_ref() + .and_then(|w| Some(w.pos)), + Some(0u16) + ); + // Consumed step should be 1/3 of core parts, leaving 1/3 remaining + assert_eq!( + CoreDescriptors::::get(core_idx) + .current_work + .as_ref() + .and_then(|w| Some(w.assignments[0].1.remaining)), + Some(PartsOf57600::FULL / 3) + ); + } + + // Case 2: Current assignment remaning < step after pop + { + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_1.into())) + ); + // Pos should have incremented, as assignment had remaining < step + assert_eq!( + CoreDescriptors::::get(core_idx) + .current_work + .as_ref() + .and_then(|w| Some(w.pos)), + Some(1u16) + ); + // Remaining should have started at 1/3 of core work parts. We then subtract + // step (1/3) and add back ratio (2/3), leaving us with 2/3 of core work parts. + assert_eq!( + CoreDescriptors::::get(core_idx) + .current_work + .as_ref() + .and_then(|w| Some(w.assignments[0].1.remaining)), + Some(PartsOf57600::FULL / 3 * 2) + ); + } + + // Final check, task 2's turn to be served + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_2.into())) + ); + }); +} + +#[test] +fn equal_assignments_served_equally() { + let core_idx = CoreIndex(0); + let task_1 = TaskId::from(1u32); + let task_2 = TaskId::from(2u32); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); + + // Tasks 1 and 2 get equal work parts + let test_assignments = vec![ + (CoreAssignment::Task(task_1), PartsOf57600::FULL / 2), + (CoreAssignment::Task(task_2), PartsOf57600::FULL / 2), + ]; + + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + test_assignments, + None, + )); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + // Test that popped assignments alternate between tasks 1 and 2 + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_1.into())) + ); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_2.into())) + ); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_1.into())) + ); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_2.into())) + ); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_1.into())) + ); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_2.into())) + ); + }); +} + +#[test] +// Checks that core is shared fairly, even in case of `ratio` not being +// divisible by `step` (over multiple rounds). +fn assignment_proportions_indivisible_by_step_work() { + let core_idx = CoreIndex(0); + let task_1 = TaskId::from(1u32); + let ratio_1 = PartsOf57600::FULL / 5 * 3; + let ratio_2 = PartsOf57600::FULL / 5 * 2; + let task_2 = TaskId::from(2u32); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); + + // Task 1 gets 3/5 core usage, while task 2 gets 2/5. That way + // step is set to 2/5 and task 1 is indivisible by step. + let test_assignments = + vec![(CoreAssignment::Task(task_1), ratio_1), (CoreAssignment::Task(task_2), ratio_2)]; + + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + test_assignments, + None, + )); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + // Pop 5 assignments. Should Result in the the following work ordering: + // 1, 2, 1, 1, 2. The remaining parts for each assignment should be same + // at the end as in the beginning. + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_1.into())) + ); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_2.into())) + ); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_1.into())) + ); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_1.into())) + ); + + assert_eq!( + CoretimeAssigner::pop_assignment_for_core(core_idx), + Some(Assignment::Bulk(task_2.into())) + ); + + // Remaining should equal ratio for both assignments. + assert_eq!( + CoreDescriptors::::get(core_idx) + .current_work + .as_ref() + .and_then(|w| Some(w.assignments[0].1.remaining)), + Some(ratio_1) + ); + assert_eq!( + CoreDescriptors::::get(core_idx) + .current_work + .as_ref() + .and_then(|w| Some(w.assignments[1].1.remaining)), + Some(ratio_2) + ); + }); +} + +#[cfg(test)] +impl std::ops::Div for PartsOf57600 { + type Output = Self; + + fn div(self, rhs: u16) -> Self::Output { + if rhs == 0 { + panic!("Cannot divide by zero!"); + } + + Self(self.0 / rhs) + } +} + +#[cfg(test)] +impl std::ops::Mul for PartsOf57600 { + type Output = Self; + + fn mul(self, rhs: u16) -> Self { + Self(self.0 * rhs) + } +} + +#[test] +fn parts_of_57600_ops() { + assert!(PartsOf57600::new_saturating(57601).is_full()); + assert!(PartsOf57600::FULL.saturating_add(PartsOf57600(1)).is_full()); + assert_eq!(PartsOf57600::ZERO.saturating_sub(PartsOf57600(1)), PartsOf57600::ZERO); + assert_eq!(PartsOf57600::FULL.checked_add(PartsOf57600(0)), Some(PartsOf57600::FULL)); + assert_eq!(PartsOf57600::FULL.checked_add(PartsOf57600(1)), None); +} diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs index 42ca94d5185fc99c10930d9a242f53dfe25a4cdb..5a6060cd2b4eab88867088dee30b6fb1047bdf20 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs @@ -43,7 +43,7 @@ where { ParasShared::::set_session_index(SESSION_INDEX); let mut config = HostConfiguration::default(); - config.on_demand_cores = 1; + config.coretime_cores = 1; ConfigurationPallet::::force_set_active_config(config); let mut parachains = ParachainsCache::new(); ParasPallet::::initialize_para_now( @@ -70,11 +70,10 @@ mod benchmarks { let para_id = ParaId::from(111u32); init_parathread::(para_id); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let assignment = Assignment::new(para_id); + let order = EnqueuedOrder::new(para_id); for _ in 0..s { - Pallet::::add_on_demand_assignment(assignment.clone(), QueuePushDirection::Back) - .unwrap(); + Pallet::::add_on_demand_order(order.clone(), QueuePushDirection::Back).unwrap(); } #[extrinsic_call] @@ -88,11 +87,10 @@ mod benchmarks { let para_id = ParaId::from(111u32); init_parathread::(para_id); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let assignment = Assignment::new(para_id); + let order = EnqueuedOrder::new(para_id); for _ in 0..s { - Pallet::::add_on_demand_assignment(assignment.clone(), QueuePushDirection::Back) - .unwrap(); + Pallet::::add_on_demand_order(order.clone(), QueuePushDirection::Back).unwrap(); } #[extrinsic_call] diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs index acfb24cbf1943e9cc40e67338b448814c3ab7adf..de30330ac84e0a7715799d71d26fb42ce48efff8 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs @@ -27,7 +27,7 @@ use crate::{ use primitives::{Balance, HeadData, ValidationCode}; -pub fn default_genesis_config() -> MockGenesisConfig { +fn default_genesis_config() -> MockGenesisConfig { MockGenesisConfig { configuration: crate::configuration::GenesisConfig { config: crate::configuration::HostConfiguration { ..Default::default() }, @@ -63,7 +63,7 @@ impl GenesisConfigBuilder { pub(super) fn build(self) -> MockGenesisConfig { let mut genesis = default_genesis_config(); let config = &mut genesis.configuration.config; - config.on_demand_cores = self.on_demand_cores; + config.coretime_cores = self.on_demand_cores; config.on_demand_base_fee = self.on_demand_base_fee; config.on_demand_fee_variability = self.on_demand_fee_variability; config.on_demand_queue_max_size = self.on_demand_max_queue_size; diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 75c29bd6fbe4f79532823468a4c0a596942491a6..1b746e88694c9f105db119d351a76e336fd3fdba 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -32,10 +32,7 @@ mod mock_helpers; #[cfg(test)] mod tests; -use crate::{ - configuration, paras, - scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, -}; +use crate::{configuration, paras, scheduler::common::Assignment}; use frame_support::{ pallet_prelude::*, @@ -79,7 +76,7 @@ impl WeightInfo for TestWeightInfo { /// Keeps track of how many assignments a scheduler currently has at a specific `CoreIndex` for a /// specific `ParaId`. #[derive(Encode, Decode, Default, Clone, Copy, TypeInfo)] -#[cfg_attr(test, derive(PartialEq, Debug))] +#[cfg_attr(test, derive(PartialEq, RuntimeDebug))] pub struct CoreAffinityCount { core_idx: CoreIndex, count: u32, @@ -107,6 +104,18 @@ pub enum SpotTrafficCalculationErr { Division, } +/// Internal representation of an order after it has been enqueued already. +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone)] +pub(super) struct EnqueuedOrder { + pub para_id: ParaId, +} + +impl EnqueuedOrder { + pub fn new(para_id: ParaId) -> Self { + Self { para_id } + } +} + #[frame_support::pallet] pub mod pallet { @@ -140,7 +149,7 @@ pub mod pallet { /// Creates an empty on demand queue if one isn't present in storage already. #[pallet::type_value] - pub fn OnDemandQueueOnEmpty() -> VecDeque { + pub(super) fn OnDemandQueueOnEmpty() -> VecDeque { VecDeque::new() } @@ -153,8 +162,8 @@ pub mod pallet { /// The order storage entry. Uses a VecDeque to be able to push to the front of the /// queue from the scheduler on session boundaries. #[pallet::storage] - pub type OnDemandQueue = - StorageValue<_, VecDeque, ValueQuery, OnDemandQueueOnEmpty>; + pub(super) type OnDemandQueue = + StorageValue<_, VecDeque, ValueQuery, OnDemandQueueOnEmpty>; /// Maps a `ParaId` to `CoreIndex` and keeps track of how many assignments the scheduler has in /// it's lookahead. Keeping track of this affinity prevents parallel execution of the same @@ -182,9 +191,6 @@ pub mod pallet { /// The current spot price is higher than the max amount specified in the `place_order` /// call, making it invalid. SpotPriceHigherThanMaxAmount, - /// There are no on demand cores available. `place_order` will not add anything to the - /// queue. - NoOnDemandCores, } #[pallet::hooks] @@ -248,7 +254,6 @@ pub mod pallet { /// - `InvalidParaId` /// - `QueueFull` /// - `SpotPriceHigherThanMaxAmount` - /// - `NoOnDemandCores` /// /// Events: /// - `SpotOrderPlaced` @@ -276,7 +281,6 @@ pub mod pallet { /// - `InvalidParaId` /// - `QueueFull` /// - `SpotPriceHigherThanMaxAmount` - /// - `NoOnDemandCores` /// /// Events: /// - `SpotOrderPlaced` @@ -311,7 +315,6 @@ where /// - `InvalidParaId` /// - `QueueFull` /// - `SpotPriceHigherThanMaxAmount` - /// - `NoOnDemandCores` /// /// Events: /// - `SpotOrderPlaced` @@ -323,9 +326,6 @@ where ) -> DispatchResult { let config = >::config(); - // Are there any schedulable cores in this session - ensure!(config.on_demand_cores > 0, Error::::NoOnDemandCores); - // Traffic always falls back to 1.0 let traffic = SpotTraffic::::get(); @@ -337,19 +337,22 @@ where ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); // Charge the sending account the spot price - T::Currency::withdraw(&sender, spot_price, WithdrawReasons::FEE, existence_requirement)?; + let _ = T::Currency::withdraw( + &sender, + spot_price, + WithdrawReasons::FEE, + existence_requirement, + )?; - let assignment = Assignment::new(para_id); + let order = EnqueuedOrder::new(para_id); - let res = Pallet::::add_on_demand_assignment(assignment, QueuePushDirection::Back); + let res = Pallet::::add_on_demand_order(order, QueuePushDirection::Back); - match res { - Ok(_) => { - Pallet::::deposit_event(Event::::OnDemandOrderPlaced { para_id, spot_price }); - return Ok(()) - }, - Err(err) => return Err(err), + if res.is_ok() { + Pallet::::deposit_event(Event::::OnDemandOrderPlaced { para_id, spot_price }); } + + res } /// The spot price multiplier. This is based on the transaction fee calculations defined in: @@ -423,10 +426,10 @@ where } } - /// Adds an assignment to the on demand queue. + /// Adds an order to the on demand queue. /// /// Paramenters: - /// - `assignment`: The on demand assignment to add to the queue. + /// - `order`: The `EnqueuedOrder` to add to the queue. /// - `location`: Whether to push this entry to the back or the front of the queue. Pushing an /// entry to the front of the queue is only used when the scheduler wants to push back an /// entry it has already popped. @@ -436,12 +439,12 @@ where /// Errors: /// - `InvalidParaId` /// - `QueueFull` - pub fn add_on_demand_assignment( - assignment: Assignment, + fn add_on_demand_order( + order: EnqueuedOrder, location: QueuePushDirection, ) -> Result<(), DispatchError> { // Only parathreads are valid paraids for on the go parachains. - ensure!(>::is_parathread(assignment.para_id), Error::::InvalidParaId); + ensure!(>::is_parathread(order.para_id), Error::::InvalidParaId); let config = >::config(); @@ -449,8 +452,8 @@ where // Abort transaction if queue is too large ensure!(Self::queue_size() < config.on_demand_queue_max_size, Error::::QueueFull); match location { - QueuePushDirection::Back => queue.push_back(assignment), - QueuePushDirection::Front => queue.push_front(assignment), + QueuePushDirection::Back => queue.push_back(order), + QueuePushDirection::Front => queue.push_front(order), }; Ok(()) }) @@ -475,7 +478,8 @@ where } /// Getter for the order queue. - pub fn get_queue() -> VecDeque { + #[cfg(test)] + fn get_queue() -> VecDeque { OnDemandQueue::::get() } @@ -523,12 +527,7 @@ where } } -impl AssignmentProvider> for Pallet { - fn session_core_count() -> u32 { - let config = >::config(); - config.on_demand_cores - } - +impl Pallet { /// Take the next queued entry that is available for a given core index. /// Invalidates and removes orders with a `para_id` that is not `ParaLifecycle::Parathread` /// but only in [0..P] range slice of the order queue, where P is the element that is @@ -536,20 +535,8 @@ impl AssignmentProvider> for Pallet { /// /// Parameters: /// - `core_idx`: The core index - /// - `previous_paraid`: Which paraid was previously processed on the requested core. Is None if - /// nothing was processed on the core. - fn pop_assignment_for_core( - core_idx: CoreIndex, - previous_para: Option, - ) -> Option { - // Only decrease the affinity of the previous para if it exists. - // A nonexistant `ParaId` indicates that the scheduler has not processed any - // `ParaId` this session. - if let Some(previous_para_id) = previous_para { - Pallet::::decrease_affinity(previous_para_id, core_idx) - } - - let mut queue: VecDeque = OnDemandQueue::::get(); + pub fn pop_assignment_for_core(core_idx: CoreIndex) -> Option { + let mut queue: VecDeque = OnDemandQueue::::get(); let mut invalidated_para_id_indexes: Vec = vec![]; @@ -586,28 +573,28 @@ impl AssignmentProvider> for Pallet { // Write changes to storage. OnDemandQueue::::set(queue); - popped + popped.map(|p| Assignment::Pool { para_id: p.para_id, core_index: core_idx }) } - /// Push an assignment back to the queue. - /// Typically used on session boundaries. + /// Report that the `para_id` & `core_index` combination was processed. + pub fn report_processed(para_id: ParaId, core_index: CoreIndex) { + Pallet::::decrease_affinity(para_id, core_index) + } + + /// Push an assignment back to the front of the queue. + /// + /// The assignment has not been processed yet. Typically used on session boundaries. /// Parameters: - /// - `core_idx`: The core index /// - `assignment`: The on demand assignment. - fn push_assignment_for_core(core_idx: CoreIndex, assignment: Assignment) { - Pallet::::decrease_affinity(assignment.para_id, core_idx); + pub fn push_back_assignment(para_id: ParaId, core_index: CoreIndex) { + Pallet::::decrease_affinity(para_id, core_index); // Skip the queue on push backs from scheduler - match Pallet::::add_on_demand_assignment(assignment, QueuePushDirection::Front) { + match Pallet::::add_on_demand_order( + EnqueuedOrder::new(para_id), + QueuePushDirection::Front, + ) { Ok(_) => {}, Err(_) => {}, } } - - fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig> { - let config = >::config(); - AssignmentProviderConfig { - max_availability_timeouts: config.on_demand_retries, - ttl: config.on_demand_ttl, - } - } } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index d07964b691654b9de57b3f108b6439e2c720514f..8404700780c84e493d6436c5f3174f814c1082ef 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -24,7 +24,6 @@ use crate::{ System, Test, }, paras::{ParaGenesisArgs, ParaKind}, - scheduler::common::Assignment, }; use frame_support::{assert_noop, assert_ok, error::BadOrigin}; use pallet_balances::Error as BalancesError; @@ -75,7 +74,7 @@ fn run_to_block( Scheduler::initializer_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::update_claimqueue(BTreeMap::new(), b + 1); + Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), b + 1); } } @@ -280,9 +279,9 @@ fn place_order_keep_alive_keeps_alive() { } #[test] -fn add_on_demand_assignment_works() { +fn add_on_demand_order_works() { let para_a = ParaId::from(111); - let assignment = Assignment::new(para_a); + let order = EnqueuedOrder::new(para_a); let mut genesis = GenesisConfigBuilder::default(); genesis.on_demand_max_queue_size = 1; @@ -292,10 +291,7 @@ fn add_on_demand_assignment_works() { // `para_a` is not onboarded as a parathread yet. assert_noop!( - OnDemandAssigner::add_on_demand_assignment( - assignment.clone(), - QueuePushDirection::Back - ), + OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back), Error::::InvalidParaId ); @@ -304,14 +300,11 @@ fn add_on_demand_assignment_works() { assert!(Paras::is_parathread(para_a)); // `para_a` is now onboarded as a valid parathread. - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment.clone(), - QueuePushDirection::Back - )); + assert_ok!(OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back)); // Max queue size is 1, queue should be full. assert_noop!( - OnDemandAssigner::add_on_demand_assignment(assignment, QueuePushDirection::Back), + OnDemandAssigner::add_on_demand_order(order, QueuePushDirection::Back), Error::::QueueFull ); }); @@ -330,29 +323,131 @@ fn spotqueue_push_directions() { run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - let assignment_a = Assignment { para_id: para_a }; - let assignment_b = Assignment { para_id: para_b }; - let assignment_c = Assignment { para_id: para_c }; + let order_a = EnqueuedOrder::new(para_a); + let order_b = EnqueuedOrder::new(para_b); + let order_c = EnqueuedOrder::new(para_c); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_a.clone(), + assert_ok!(OnDemandAssigner::add_on_demand_order( + order_a.clone(), QueuePushDirection::Front )); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_b.clone(), + assert_ok!(OnDemandAssigner::add_on_demand_order( + order_b.clone(), QueuePushDirection::Front )); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_c.clone(), + assert_ok!(OnDemandAssigner::add_on_demand_order( + order_c.clone(), QueuePushDirection::Back )); assert_eq!(OnDemandAssigner::queue_size(), 3); + assert_eq!(OnDemandAssigner::get_queue(), VecDeque::from(vec![order_b, order_a, order_c])) + }); +} + +#[test] +fn pop_assignment_for_core_works() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + let para_b = ParaId::from(110); + schedule_blank_para(para_a, ParaKind::Parathread); + schedule_blank_para(para_b, ParaKind::Parathread); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + let order_a = EnqueuedOrder::new(para_a); + let order_b = EnqueuedOrder::new(para_b); + let assignment_a = Assignment::Pool { para_id: para_a, core_index: CoreIndex(0) }; + let assignment_b = Assignment::Pool { para_id: para_b, core_index: CoreIndex(1) }; + + // Pop should return none with empty queue + assert_eq!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)), None); + + // Add enough assignments to the order queue. + for _ in 0..2 { + OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + } + + // Queue should contain orders a, b, a, b + { + let queue: Vec = OnDemandQueue::::get().into_iter().collect(); + assert_eq!( + queue, + vec![order_a.clone(), order_b.clone(), order_a.clone(), order_b.clone()] + ); + } + + // Popped assignments should be for the correct paras and cores assert_eq!( - OnDemandAssigner::get_queue(), - VecDeque::from(vec![assignment_b, assignment_a, assignment_c]) - ) + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)), + Some(assignment_a.clone()) + ); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)), + Some(assignment_b.clone()) + ); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)), + Some(assignment_a.clone()) + ); + + // Queue should contain one left over order + { + let queue: Vec = OnDemandQueue::::get().into_iter().collect(); + assert_eq!(queue, vec![order_b.clone(),]); + } + }); +} + +#[test] +fn push_back_assignment_works() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + let para_b = ParaId::from(110); + schedule_blank_para(para_a, ParaKind::Parathread); + schedule_blank_para(para_b, ParaKind::Parathread); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + let order_a = EnqueuedOrder::new(para_a); + let order_b = EnqueuedOrder::new(para_b); + + // Add enough assignments to the order queue. + OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + // Pop order a + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); + + // Para a should have affinity for core 0 + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, CoreIndex(0)); + + // Queue should still contain order b + { + let queue: Vec = OnDemandQueue::::get().into_iter().collect(); + assert_eq!(queue, vec![order_b.clone()]); + } + + // Push back order a + OnDemandAssigner::push_back_assignment(para_a, CoreIndex(0)); + + // Para a should have no affinity + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).is_none(), true); + + // Queue should contain orders a, b. A in front of b. + { + let queue: Vec = OnDemandQueue::::get().into_iter().collect(); + assert_eq!(queue, vec![order_a.clone(), order_b.clone()]); + } }); } @@ -360,39 +455,38 @@ fn spotqueue_push_directions() { fn affinity_changes_work() { new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { let para_a = ParaId::from(111); + let core_index = CoreIndex(0); schedule_blank_para(para_a, ParaKind::Parathread); + let order_a = EnqueuedOrder::new(para_a); run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - let assignment_a = Assignment { para_id: para_a }; // There should be no affinity before starting. assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); // Add enough assignments to the order queue. for _ in 0..10 { - OnDemandAssigner::add_on_demand_assignment( - assignment_a.clone(), - QueuePushDirection::Front, - ) - .expect("Invalid paraid or queue full"); + OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Front) + .expect("Invalid paraid or queue full"); } // There should be no affinity before the scheduler pops. assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None); + OnDemandAssigner::pop_assignment_for_core(core_index); // Affinity count is 1 after popping. assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a)); + OnDemandAssigner::report_processed(para_a, 0.into()); + OnDemandAssigner::pop_assignment_for_core(core_index); // Affinity count is 1 after popping with a previous para. assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); assert_eq!(OnDemandAssigner::queue_size(), 8); for _ in 0..3 { - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None); + OnDemandAssigner::pop_assignment_for_core(core_index); } // Affinity count is 4 after popping 3 times without a previous para. @@ -400,7 +494,8 @@ fn affinity_changes_work() { assert_eq!(OnDemandAssigner::queue_size(), 5); for _ in 0..5 { - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a)); + OnDemandAssigner::report_processed(para_a, 0.into()); + OnDemandAssigner::pop_assignment_for_core(core_index); } // Affinity count should still be 4 but queue should be empty. @@ -409,12 +504,14 @@ fn affinity_changes_work() { // Pop 4 times and get to exactly 0 (None) affinity. for _ in 0..4 { - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a)); + OnDemandAssigner::report_processed(para_a, 0.into()); + OnDemandAssigner::pop_assignment_for_core(core_index); } assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); // Decreasing affinity beyond 0 should still be None. - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a)); + OnDemandAssigner::report_processed(para_a, 0.into()); + OnDemandAssigner::pop_assignment_for_core(core_index); assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); }); } @@ -430,28 +527,28 @@ fn affinity_prohibits_parallel_scheduling() { run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - let assignment_a = Assignment { para_id: para_a }; - let assignment_b = Assignment { para_id: para_b }; + let order_a = EnqueuedOrder::new(para_a); + let order_b = EnqueuedOrder::new(para_b); // There should be no affinity before starting. assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); // Add 2 assignments for para_a for every para_b. - OnDemandAssigner::add_on_demand_assignment(assignment_a.clone(), QueuePushDirection::Back) + OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) .expect("Invalid paraid or queue full"); - OnDemandAssigner::add_on_demand_assignment(assignment_a.clone(), QueuePushDirection::Back) + OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) .expect("Invalid paraid or queue full"); - OnDemandAssigner::add_on_demand_assignment(assignment_b.clone(), QueuePushDirection::Back) + OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) .expect("Invalid paraid or queue full"); assert_eq!(OnDemandAssigner::queue_size(), 3); // Approximate having 1 core. for _ in 0..3 { - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None); + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); } // Affinity on one core is meaningless. @@ -463,24 +560,25 @@ fn affinity_prohibits_parallel_scheduling() { ); // Clear affinity - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a)); - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a)); - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_b)); + OnDemandAssigner::report_processed(para_a, 0.into()); + OnDemandAssigner::report_processed(para_a, 0.into()); + OnDemandAssigner::report_processed(para_b, 0.into()); // Add 2 assignments for para_a for every para_b. - OnDemandAssigner::add_on_demand_assignment(assignment_a.clone(), QueuePushDirection::Back) + OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) .expect("Invalid paraid or queue full"); - OnDemandAssigner::add_on_demand_assignment(assignment_a.clone(), QueuePushDirection::Back) + OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) .expect("Invalid paraid or queue full"); - OnDemandAssigner::add_on_demand_assignment(assignment_b.clone(), QueuePushDirection::Back) + OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) .expect("Invalid paraid or queue full"); - // Approximate having 2 cores. + // Approximate having 3 cores. CoreIndex 2 should be unable to obtain an assignment for _ in 0..3 { - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None); - OnDemandAssigner::pop_assignment_for_core(CoreIndex(1), None); + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); + OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)); + assert_eq!(None, OnDemandAssigner::pop_assignment_for_core(CoreIndex(2))); } // Affinity should be the same as before, but on different cores. @@ -488,38 +586,23 @@ fn affinity_prohibits_parallel_scheduling() { assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, CoreIndex(0)); assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().core_idx, CoreIndex(1)); - }); -} - -#[test] -fn cannot_place_order_when_no_on_demand_cores() { - let mut genesis = GenesisConfigBuilder::default(); - genesis.on_demand_cores = 0; - let para_id = ParaId::from(10); - let alice = 1u64; - let amt = 10_000_000u128; - - new_test_ext(genesis.build()).execute_with(|| { - schedule_blank_para(para_id, ParaKind::Parathread); - Balances::make_free_balance_be(&alice, amt); - assert!(!Paras::is_parathread(para_id)); - - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); - - assert!(Paras::is_parathread(para_id)); + // Clear affinity + OnDemandAssigner::report_processed(para_a, 0.into()); + OnDemandAssigner::report_processed(para_a, 0.into()); + OnDemandAssigner::report_processed(para_b, 1.into()); - assert_noop!( - OnDemandAssigner::place_order_allow_death(RuntimeOrigin::signed(alice), amt, para_id), - Error::::NoOnDemandCores - ); + // There should be no affinity after clearing. + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); }); } #[test] fn on_demand_orders_cannot_be_popped_if_lifecycle_changes() { let para_id = ParaId::from(10); - let assignment = Assignment { para_id }; + let core_index = CoreIndex(0); + let order = EnqueuedOrder::new(para_id); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { // Register the para_id as a parathread @@ -530,17 +613,14 @@ fn on_demand_orders_cannot_be_popped_if_lifecycle_changes() { assert!(Paras::is_parathread(para_id)); // Add two assignments for a para_id with a valid lifecycle. - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment.clone(), - QueuePushDirection::Back - )); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment.clone(), - QueuePushDirection::Back - )); + assert_ok!(OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back)); + assert_ok!(OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back)); // First pop is fine - assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None) == Some(assignment)); + assert!( + OnDemandAssigner::pop_assignment_for_core(core_index) == + Some(Assignment::Pool { para_id, core_index }) + ); // Deregister para assert_ok!(Paras::schedule_para_cleanup(para_id)); @@ -551,6 +631,7 @@ fn on_demand_orders_cannot_be_popped_if_lifecycle_changes() { assert!(!Paras::is_parathread(para_id)); // Second pop should be None. - assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_id)) == None); + OnDemandAssigner::report_processed(para_id, core_index); + assert_eq!(OnDemandAssigner::pop_assignment_for_core(core_index), None); }); } diff --git a/polkadot/runtime/parachains/src/assigner_parachains.rs b/polkadot/runtime/parachains/src/assigner_parachains.rs index 866e8290052a8ebdaf7f154def8ab073005978d8..34b5d3c1ec51811e8e4c50255592b1e9344014d8 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains.rs @@ -17,13 +17,20 @@ //! The bulk (parachain slot auction) blockspace assignment provider. //! This provider is tightly coupled with the configuration and paras modules. +#[cfg(test)] +mod mock_helpers; +#[cfg(test)] +mod tests; + +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::CoreIndex; + use crate::{ configuration, paras, scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, }; -use frame_system::pallet_prelude::BlockNumberFor; + pub use pallet::*; -use primitives::{CoreIndex, Id as ParaId}; #[frame_support::pallet] pub mod pallet { @@ -38,23 +45,18 @@ pub mod pallet { } impl AssignmentProvider> for Pallet { - fn session_core_count() -> u32 { - paras::Parachains::::decode_len().unwrap_or(0) as u32 - } - - fn pop_assignment_for_core( - core_idx: CoreIndex, - _concluded_para: Option, - ) -> Option { + fn pop_assignment_for_core(core_idx: CoreIndex) -> Option { >::parachains() .get(core_idx.0 as usize) .copied() - .map(|para_id| Assignment::new(para_id)) + .map(Assignment::Bulk) } + fn report_processed(_: Assignment) {} + /// Bulk assignment has no need to push the assignment back on a session change, /// this is a no-op in the case of a bulk assignment slot. - fn push_assignment_for_core(_: CoreIndex, _: Assignment) {} + fn push_back_assignment(_: Assignment) {} fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig> { AssignmentProviderConfig { @@ -65,4 +67,13 @@ impl AssignmentProvider> for Pallet { ttl: 10u32.into(), } } + + #[cfg(any(feature = "runtime-benchmarks", test))] + fn get_mock_assignment(_: CoreIndex, para_id: primitives::Id) -> Assignment { + Assignment::Bulk(para_id) + } + + fn session_core_count() -> u32 { + paras::Parachains::::decode_len().unwrap_or(0) as u32 + } } diff --git a/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs new file mode 100644 index 0000000000000000000000000000000000000000..e6e9fb074aa97e87e6fe92819cc57e5bfa2ca656 --- /dev/null +++ b/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs @@ -0,0 +1,83 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Helper functions for tests + +use crate::{ + mock::MockGenesisConfig, + paras::{ParaGenesisArgs, ParaKind}, +}; + +use primitives::{Balance, HeadData, ValidationCode}; +use sp_runtime::Perbill; + +fn default_genesis_config() -> MockGenesisConfig { + MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: crate::configuration::HostConfiguration { ..Default::default() }, + }, + ..Default::default() + } +} + +#[derive(Debug)] +pub struct GenesisConfigBuilder { + pub on_demand_cores: u32, + pub on_demand_base_fee: Balance, + pub on_demand_fee_variability: Perbill, + pub on_demand_max_queue_size: u32, + pub on_demand_target_queue_utilization: Perbill, + pub onboarded_on_demand_chains: Vec, +} + +impl Default for GenesisConfigBuilder { + fn default() -> Self { + Self { + on_demand_cores: 10, + on_demand_base_fee: 10_000, + on_demand_fee_variability: Perbill::from_percent(1), + on_demand_max_queue_size: 100, + on_demand_target_queue_utilization: Perbill::from_percent(25), + onboarded_on_demand_chains: vec![], + } + } +} + +impl GenesisConfigBuilder { + pub(super) fn build(self) -> MockGenesisConfig { + let mut genesis = default_genesis_config(); + let config = &mut genesis.configuration.config; + config.coretime_cores = self.on_demand_cores; + config.on_demand_base_fee = self.on_demand_base_fee; + config.on_demand_fee_variability = self.on_demand_fee_variability; + config.on_demand_queue_max_size = self.on_demand_max_queue_size; + config.on_demand_target_queue_utilization = self.on_demand_target_queue_utilization; + + let paras = &mut genesis.paras.paras; + for para_id in self.onboarded_on_demand_chains { + paras.push(( + para_id, + ParaGenesisArgs { + genesis_head: HeadData::from(vec![0u8]), + validation_code: ValidationCode::from(vec![0u8]), + para_kind: ParaKind::Parathread, + }, + )) + } + + genesis + } +} diff --git a/polkadot/runtime/parachains/src/assigner_parachains/tests.rs b/polkadot/runtime/parachains/src/assigner_parachains/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..a110686aaeb08d6bcbb77c0bf42ba2ef4ab7adbf --- /dev/null +++ b/polkadot/runtime/parachains/src/assigner_parachains/tests.rs @@ -0,0 +1,112 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; +use crate::{ + assigner_parachains::mock_helpers::GenesisConfigBuilder, + initializer::SessionChangeNotification, + mock::{ + new_test_ext, ParachainsAssigner, Paras, ParasShared, RuntimeOrigin, Scheduler, System, + }, + paras::{ParaGenesisArgs, ParaKind}, +}; +use frame_support::{assert_ok, pallet_prelude::*}; +use primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode}; +use sp_std::collections::btree_map::BTreeMap; + +fn schedule_blank_para(id: ParaId, parakind: ParaKind) { + let validation_code: ValidationCode = vec![1, 2, 3].into(); + assert_ok!(Paras::schedule_para_initialize( + id, + ParaGenesisArgs { + genesis_head: Vec::new().into(), + validation_code: validation_code.clone(), + para_kind: parakind, + } + )); + + assert_ok!(Paras::add_trusted_validation_code(RuntimeOrigin::root(), validation_code)); +} + +fn run_to_block( + to: BlockNumber, + new_session: impl Fn(BlockNumber) -> Option>, +) { + while System::block_number() < to { + let b = System::block_number(); + + Scheduler::initializer_finalize(); + Paras::initializer_finalize(b); + + if let Some(notification) = new_session(b + 1) { + let mut notification_with_session_index = notification; + // We will make every session change trigger an action queue. Normally this may require + // 2 or more session changes. + if notification_with_session_index.session_index == SessionIndex::default() { + notification_with_session_index.session_index = ParasShared::scheduled_session(); + } + Paras::initializer_on_new_session(¬ification_with_session_index); + Scheduler::initializer_on_new_session(¬ification_with_session_index); + } + + System::on_finalize(b); + + System::on_initialize(b + 1); + System::set_block_number(b + 1); + + Paras::initializer_initialize(b + 1); + Scheduler::initializer_initialize(b + 1); + + // In the real runtime this is expected to be called by the `InclusionInherent` pallet. + Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), b + 1); + } +} + +// This and the scheduler test schedule_schedules_including_just_freed together +// ensure that next_up_on_available and next_up_on_time_out will always be +// filled with scheduler claims for lease holding parachains. (Removes the need +// for two other scheduler tests) +#[test] +fn parachains_assigner_pop_assignment_is_always_some() { + let core_index = CoreIndex(0); + let para_id = ParaId::from(10); + let expected_assignment = Assignment::Bulk(para_id); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // Register the para_id as a lease holding parachain + schedule_blank_para(para_id, ParaKind::Parachain); + + assert!(!Paras::is_parachain(para_id)); + run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + assert!(Paras::is_parachain(para_id)); + + for _ in 0..20 { + assert!( + ParachainsAssigner::pop_assignment_for_core(core_index) == + Some(expected_assignment.clone()) + ); + } + + run_to_block(20, |n| if n == 20 { Some(Default::default()) } else { None }); + + for _ in 0..20 { + assert!( + ParachainsAssigner::pop_assignment_for_core(core_index) == + Some(expected_assignment.clone()) + ); + } + }); +} diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index dced24df0aec83d1f3aba619426f607f0dd88d1b..016b3fca589a5b845110d9f25199cc8b8aef5bfe 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -20,7 +20,7 @@ use crate::{ paras_inherent, scheduler::{ self, - common::{Assignment, AssignmentProviderConfig}, + common::{AssignmentProvider, AssignmentProviderConfig}, CoreOccupied, ParasEntry, }, session_info, shared, @@ -96,6 +96,8 @@ pub(crate) struct BenchBuilder { /// Make every candidate include a code upgrade by setting this to `Some` where the interior /// value is the byte length of the new code. code_upgrade: Option, + /// Specifies whether the claimqueue should be filled. + fill_claimqueue: bool, _phantom: sp_std::marker::PhantomData, } @@ -122,6 +124,7 @@ impl BenchBuilder { dispute_sessions: Default::default(), backed_and_concluding_cores: Default::default(), code_upgrade: None, + fill_claimqueue: true, _phantom: sp_std::marker::PhantomData::, } } @@ -225,6 +228,13 @@ impl BenchBuilder { self.max_validators() / self.max_validators_per_core() } + /// Set whether the claim queue should be filled. + #[cfg(not(feature = "runtime-benchmarks"))] + pub(crate) fn set_fill_claimqueue(mut self, f: bool) -> Self { + self.fill_claimqueue = f; + self + } + /// Get the minimum number of validity votes in order for a backed candidate to be included. #[cfg(feature = "runtime-benchmarks")] pub(crate) fn fallback_min_validity_votes() -> u32 { @@ -636,14 +646,14 @@ impl BenchBuilder { } else { DisputeStatement::Valid(ValidDisputeStatementKind::Explicit) }; - let data = dispute_statement.payload_data(candidate_hash, session); + let data = dispute_statement.payload_data(candidate_hash, session).unwrap(); let statement_sig = validator_public.sign(&data).unwrap(); (dispute_statement, ValidatorIndex(validator_index), statement_sig) }) .collect(); - DisputeStatementSet { candidate_hash: candidate_hash, session, statements } + DisputeStatementSet { candidate_hash, session, statements } }) .collect() } @@ -663,14 +673,18 @@ impl BenchBuilder { inclusion::PendingAvailability::::remove_all(None); // We don't allow a core to have both disputes and be marked fully available at this block. - let cores = self.max_cores(); + let max_cores = self.max_cores(); let used_cores = (self.dispute_sessions.len() + self.backed_and_concluding_cores.len()) as u32; - assert!(used_cores <= cores); + assert!(used_cores <= max_cores); + let fill_claimqueue = self.fill_claimqueue; // NOTE: there is an n+2 session delay for these actions to take effect. // We are currently in Session 0, so these changes will take effect in Session 2. Self::setup_para_ids(used_cores); + configuration::ActiveConfig::::mutate(|c| { + c.coretime_cores = used_cores; + }); let validator_ids = Self::generate_validator_pairs(self.max_validators()); let target_session = SessionIndex::from(self.target_session); @@ -702,13 +716,33 @@ impl BenchBuilder { .map(|i| { let AssignmentProviderConfig { ttl, .. } = scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); - CoreOccupied::Paras(ParasEntry::new( - Assignment::new(ParaId::from(i as u32)), - now + ttl, - )) + // Load an assignment into provider so that one is present to pop + let assignment = ::AssignmentProvider::get_mock_assignment( + CoreIndex(i), + ParaId::from(i), + ); + CoreOccupied::Paras(ParasEntry::new(assignment, now + ttl)) }) .collect(); scheduler::AvailabilityCores::::set(cores); + if fill_claimqueue { + // Add items to claim queue as well: + let cores = (0..used_cores) + .into_iter() + .map(|i| { + let AssignmentProviderConfig { ttl, .. } = + scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); + // Load an assignment into provider so that one is present to pop + let assignment = + ::AssignmentProvider::get_mock_assignment( + CoreIndex(i), + ParaId::from(i), + ); + (CoreIndex(i), [ParasEntry::new(assignment, now + ttl)].into()) + }) + .collect(); + scheduler::ClaimQueue::::set(cores); + } Bench:: { data: ParachainsInherentData { diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index d85c267496f425b32702e6f5d9ed4fc8a7275c99..4619313590ebc2d5d92dfc85a4e845c381c61607 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -26,6 +26,7 @@ use polkadot_parachain_primitives::primitives::{ MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM, }; use primitives::{ + vstaging::{ApprovalVotingParams, NodeFeatures}, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, @@ -171,8 +172,8 @@ pub struct HostConfiguration { /// How long to keep code on-chain, in blocks. This should be sufficiently long that disputes /// have concluded. pub code_retention_period: BlockNumber, - /// The amount of execution cores to dedicate to on demand execution. - pub on_demand_cores: u32, + /// How many cores are managed by the coretime chain. + pub coretime_cores: u32, /// The number of retries that a on demand author has to submit their block. pub on_demand_retries: u32, /// The maximum queue size of the pay as you go module. @@ -261,6 +262,10 @@ pub struct HostConfiguration { /// The minimum number of valid backing statements required to consider a parachain candidate /// backable. pub minimum_backing_votes: u32, + /// Node features enablement. + pub node_features: NodeFeatures, + /// Params used by approval-voting + pub approval_voting_params: ApprovalVotingParams, } impl> Default for HostConfiguration { @@ -279,7 +284,7 @@ impl> Default for HostConfiguration> Default for HostConfiguration Weight; fn set_config_with_executor_params() -> Weight; fn set_config_with_perbill() -> Weight; + fn set_node_feature() -> Weight; } pub struct TestWeightInfo; @@ -488,6 +496,9 @@ impl WeightInfo for TestWeightInfo { fn set_config_with_perbill() -> Weight { Weight::MAX } + fn set_node_feature() -> Weight { + Weight::MAX + } } #[frame_support::pallet] @@ -496,18 +507,20 @@ pub mod pallet { /// The current storage version. /// - /// v0-v1: - /// v1-v2: - /// v2-v3: - /// v3-v4: - /// v4-v5: - /// + - /// + - /// v5-v6: (remove UMP dispatch queue) - /// v6-v7: - /// v7-v8: - /// v8-v9: - const STORAGE_VERSION: StorageVersion = StorageVersion::new(9); + /// v0-v1: + /// v1-v2: + /// v2-v3: + /// v3-v4: + /// v4-v5: + /// + + /// + + /// v5-v6: (remove UMP dispatch queue) + /// v6-v7: + /// v7-v8: + /// v8-v9: + /// v9-v10: + /// v10-11: + const STORAGE_VERSION: StorageVersion = StorageVersion::new(11); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -652,17 +665,18 @@ pub mod pallet { }) } - /// Set the number of on demand execution cores. + /// Set the number of coretime execution cores. + /// + /// Note that this configuration is managed by the coretime chain. Only manually change + /// this, if you really know what you are doing! #[pallet::call_index(6)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), DispatchClass::Operational, ))] - pub fn set_on_demand_cores(origin: OriginFor, new: u32) -> DispatchResult { + pub fn set_coretime_cores(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; - Self::schedule_config_update(|config| { - config.on_demand_cores = new; - }) + Self::set_coretime_cores_unchecked(new) } /// Set the number of retries for a particular on demand. @@ -1183,6 +1197,7 @@ pub mod pallet { config.on_demand_ttl = new; }) } + /// Set the minimum backing votes threshold. #[pallet::call_index(52)] #[pallet::weight(( @@ -1195,6 +1210,51 @@ pub mod pallet { config.minimum_backing_votes = new; }) } + + /// Set/Unset a node feature. + #[pallet::call_index(53)] + #[pallet::weight(( + T::WeightInfo::set_node_feature(), + DispatchClass::Operational + ))] + pub fn set_node_feature(origin: OriginFor, index: u8, value: bool) -> DispatchResult { + ensure_root(origin)?; + + Self::schedule_config_update(|config| { + let index = usize::from(index); + if config.node_features.len() <= index { + config.node_features.resize(index + 1, false); + } + config.node_features.set(index, value); + }) + } + + /// Set approval-voting-params. + #[pallet::call_index(54)] + #[pallet::weight(( + T::WeightInfo::set_config_with_executor_params(), + DispatchClass::Operational, + ))] + pub fn set_approval_voting_params( + origin: OriginFor, + new: ApprovalVotingParams, + ) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.approval_voting_params = new; + }) + } + } + + impl Pallet { + /// Set coretime cores. + /// + /// To be used if authorization is checked otherwise. + pub fn set_coretime_cores_unchecked(new: u32) -> DispatchResult { + Self::schedule_config_update(|config| { + config.coretime_cores = new; + }) + } } #[pallet::hooks] diff --git a/polkadot/runtime/parachains/src/configuration/benchmarking.rs b/polkadot/runtime/parachains/src/configuration/benchmarking.rs index d9d11ab56e496980e8090a99ed88452a351e970e..67daf1c459884d42378056b6528605da29578c95 100644 --- a/polkadot/runtime/parachains/src/configuration/benchmarking.rs +++ b/polkadot/runtime/parachains/src/configuration/benchmarking.rs @@ -17,7 +17,7 @@ use crate::configuration::*; use frame_benchmarking::{benchmarks, BenchmarkError, BenchmarkResult}; use frame_system::RawOrigin; -use primitives::{ExecutorParam, ExecutorParams, PvfExecTimeoutKind, PvfPrepTimeoutKind}; +use primitives::{ExecutorParam, ExecutorParams, PvfExecKind, PvfPrepKind}; use sp_runtime::traits::One; benchmarks! { @@ -41,14 +41,16 @@ benchmarks! { ExecutorParam::StackNativeMax(256 * 1024 * 1024), ExecutorParam::WasmExtBulkMemory, ExecutorParam::PrecheckingMaxMemory(2 * 1024 * 1024 * 1024), - ExecutorParam::PvfPrepTimeout(PvfPrepTimeoutKind::Precheck, 60_000), - ExecutorParam::PvfPrepTimeout(PvfPrepTimeoutKind::Lenient, 360_000), - ExecutorParam::PvfExecTimeout(PvfExecTimeoutKind::Backing, 2_000), - ExecutorParam::PvfExecTimeout(PvfExecTimeoutKind::Approval, 12_000), + ExecutorParam::PvfPrepTimeout(PvfPrepKind::Precheck, 60_000), + ExecutorParam::PvfPrepTimeout(PvfPrepKind::Prepare, 360_000), + ExecutorParam::PvfExecTimeout(PvfExecKind::Backing, 2_000), + ExecutorParam::PvfExecTimeout(PvfExecKind::Approval, 12_000), ][..])) set_config_with_perbill {}: set_on_demand_fee_variability(RawOrigin::Root, Perbill::from_percent(100)) + set_node_feature{}: set_node_feature(RawOrigin::Root, 255, true) + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext(Default::default()), diff --git a/polkadot/runtime/parachains/src/configuration/migration.rs b/polkadot/runtime/parachains/src/configuration/migration.rs index 26f8a85b496d5a76385c6c85b52f66d719d88c82..2838b73092dbab4a029a684948a85123aa489906 100644 --- a/polkadot/runtime/parachains/src/configuration/migration.rs +++ b/polkadot/runtime/parachains/src/configuration/migration.rs @@ -16,6 +16,8 @@ //! A module that is responsible for migration of storage. +pub mod v10; +pub mod v11; pub mod v6; pub mod v7; pub mod v8; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v10.rs b/polkadot/runtime/parachains/src/configuration/migration/v10.rs new file mode 100644 index 0000000000000000000000000000000000000000..cf228610e5c9cec1dde8cfb31880fa2b1f68821f --- /dev/null +++ b/polkadot/runtime/parachains/src/configuration/migration/v10.rs @@ -0,0 +1,381 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. + +use crate::configuration::{Config, Pallet}; +use frame_support::{pallet_prelude::*, traits::Defensive, weights::Weight}; +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::{ + vstaging::NodeFeatures, AsyncBackingParams, Balance, ExecutorParams, SessionIndex, + LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, +}; +use sp_runtime::Perbill; +use sp_std::vec::Vec; + +use frame_support::traits::OnRuntimeUpgrade; + +use super::v9::V9HostConfiguration; +// All configuration of the runtime with respect to paras. +#[derive(Clone, Encode, PartialEq, Decode, Debug)] +pub struct V10HostConfiguration { + pub max_code_size: u32, + pub max_head_data_size: u32, + pub max_upward_queue_count: u32, + pub max_upward_queue_size: u32, + pub max_upward_message_size: u32, + pub max_upward_message_num_per_candidate: u32, + pub hrmp_max_message_num_per_candidate: u32, + pub validation_upgrade_cooldown: BlockNumber, + pub validation_upgrade_delay: BlockNumber, + pub async_backing_params: AsyncBackingParams, + pub max_pov_size: u32, + pub max_downward_message_size: u32, + pub hrmp_max_parachain_outbound_channels: u32, + pub hrmp_sender_deposit: Balance, + pub hrmp_recipient_deposit: Balance, + pub hrmp_channel_max_capacity: u32, + pub hrmp_channel_max_total_size: u32, + pub hrmp_max_parachain_inbound_channels: u32, + pub hrmp_channel_max_message_size: u32, + pub executor_params: ExecutorParams, + pub code_retention_period: BlockNumber, + pub on_demand_cores: u32, + pub on_demand_retries: u32, + pub on_demand_queue_max_size: u32, + pub on_demand_target_queue_utilization: Perbill, + pub on_demand_fee_variability: Perbill, + pub on_demand_base_fee: Balance, + pub on_demand_ttl: BlockNumber, + pub group_rotation_frequency: BlockNumber, + pub paras_availability_period: BlockNumber, + pub scheduling_lookahead: u32, + pub max_validators_per_core: Option, + pub max_validators: Option, + pub dispute_period: SessionIndex, + pub dispute_post_conclusion_acceptance_period: BlockNumber, + pub no_show_slots: u32, + pub n_delay_tranches: u32, + pub zeroth_delay_tranche_width: u32, + pub needed_approvals: u32, + pub relay_vrf_modulo_samples: u32, + pub pvf_voting_ttl: SessionIndex, + pub minimum_validation_upgrade_delay: BlockNumber, + pub minimum_backing_votes: u32, + pub node_features: NodeFeatures, +} + +impl> Default for V10HostConfiguration { + fn default() -> Self { + Self { + async_backing_params: AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, + group_rotation_frequency: 1u32.into(), + paras_availability_period: 1u32.into(), + no_show_slots: 1u32.into(), + validation_upgrade_cooldown: Default::default(), + validation_upgrade_delay: 2u32.into(), + code_retention_period: Default::default(), + max_code_size: Default::default(), + max_pov_size: Default::default(), + max_head_data_size: Default::default(), + on_demand_cores: Default::default(), + on_demand_retries: Default::default(), + scheduling_lookahead: 1, + max_validators_per_core: Default::default(), + max_validators: None, + dispute_period: 6, + dispute_post_conclusion_acceptance_period: 100.into(), + n_delay_tranches: Default::default(), + zeroth_delay_tranche_width: Default::default(), + needed_approvals: Default::default(), + relay_vrf_modulo_samples: Default::default(), + max_upward_queue_count: Default::default(), + max_upward_queue_size: Default::default(), + max_downward_message_size: Default::default(), + max_upward_message_size: Default::default(), + max_upward_message_num_per_candidate: Default::default(), + hrmp_sender_deposit: Default::default(), + hrmp_recipient_deposit: Default::default(), + hrmp_channel_max_capacity: Default::default(), + hrmp_channel_max_total_size: Default::default(), + hrmp_max_parachain_inbound_channels: Default::default(), + hrmp_channel_max_message_size: Default::default(), + hrmp_max_parachain_outbound_channels: Default::default(), + hrmp_max_message_num_per_candidate: Default::default(), + pvf_voting_ttl: 2u32.into(), + minimum_validation_upgrade_delay: 2.into(), + executor_params: Default::default(), + on_demand_queue_max_size: ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_ttl: 5u32.into(), + minimum_backing_votes: LEGACY_MIN_BACKING_VOTES, + node_features: NodeFeatures::EMPTY, + } + } +} + +mod v9 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V9HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V9HostConfiguration>)>, + OptionQuery, + >; +} + +mod v10 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V10HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V10HostConfiguration>)>, + OptionQuery, + >; +} + +pub struct VersionUncheckedMigrateToV10(sp_std::marker::PhantomData); +impl OnRuntimeUpgrade for VersionUncheckedMigrateToV10 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV10"); + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + migrate_to_v10::() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running post_upgrade() for HostConfiguration MigrateToV10"); + ensure!( + Pallet::::on_chain_storage_version() >= StorageVersion::new(10), + "Storage version should be >= 10 after the migration" + ); + + Ok(()) + } +} + +pub type MigrateToV10 = frame_support::migrations::VersionedMigration< + 9, + 10, + VersionUncheckedMigrateToV10, + Pallet, + ::DbWeight, +>; + +// Unusual formatting is justified: +// - make it easier to verify that fields assign what they supposed to assign. +// - this code is transient and will be removed after all migrations are done. +// - this code is important enough to optimize for legibility sacrificing consistency. +#[rustfmt::skip] +fn translate(pre: V9HostConfiguration>) -> V10HostConfiguration> { + V10HostConfiguration { + max_code_size : pre.max_code_size, + max_head_data_size : pre.max_head_data_size, + max_upward_queue_count : pre.max_upward_queue_count, + max_upward_queue_size : pre.max_upward_queue_size, + max_upward_message_size : pre.max_upward_message_size, + max_upward_message_num_per_candidate : pre.max_upward_message_num_per_candidate, + hrmp_max_message_num_per_candidate : pre.hrmp_max_message_num_per_candidate, + validation_upgrade_cooldown : pre.validation_upgrade_cooldown, + validation_upgrade_delay : pre.validation_upgrade_delay, + max_pov_size : pre.max_pov_size, + max_downward_message_size : pre.max_downward_message_size, + hrmp_sender_deposit : pre.hrmp_sender_deposit, + hrmp_recipient_deposit : pre.hrmp_recipient_deposit, + hrmp_channel_max_capacity : pre.hrmp_channel_max_capacity, + hrmp_channel_max_total_size : pre.hrmp_channel_max_total_size, + hrmp_max_parachain_inbound_channels : pre.hrmp_max_parachain_inbound_channels, + hrmp_max_parachain_outbound_channels : pre.hrmp_max_parachain_outbound_channels, + hrmp_channel_max_message_size : pre.hrmp_channel_max_message_size, + code_retention_period : pre.code_retention_period, + on_demand_cores : pre.on_demand_cores, + on_demand_retries : pre.on_demand_retries, + group_rotation_frequency : pre.group_rotation_frequency, + paras_availability_period : pre.paras_availability_period, + scheduling_lookahead : pre.scheduling_lookahead, + max_validators_per_core : pre.max_validators_per_core, + max_validators : pre.max_validators, + dispute_period : pre.dispute_period, + dispute_post_conclusion_acceptance_period: pre.dispute_post_conclusion_acceptance_period, + no_show_slots : pre.no_show_slots, + n_delay_tranches : pre.n_delay_tranches, + zeroth_delay_tranche_width : pre.zeroth_delay_tranche_width, + needed_approvals : pre.needed_approvals, + relay_vrf_modulo_samples : pre.relay_vrf_modulo_samples, + pvf_voting_ttl : pre.pvf_voting_ttl, + minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, + async_backing_params : pre.async_backing_params, + executor_params : pre.executor_params, + on_demand_queue_max_size : pre.on_demand_queue_max_size, + on_demand_base_fee : pre.on_demand_base_fee, + on_demand_fee_variability : pre.on_demand_fee_variability, + on_demand_target_queue_utilization : pre.on_demand_target_queue_utilization, + on_demand_ttl : pre.on_demand_ttl, + minimum_backing_votes : pre.minimum_backing_votes, + node_features : NodeFeatures::EMPTY + } +} + +fn migrate_to_v10() -> Weight { + let v9 = v9::ActiveConfig::::get() + .defensive_proof("Could not decode old config") + .unwrap_or_default(); + let v10 = translate::(v9); + v10::ActiveConfig::::set(Some(v10)); + + // Allowed to be empty. + let pending_v9 = v9::PendingConfigs::::get().unwrap_or_default(); + let mut pending_v10 = Vec::new(); + + for (session, v9) in pending_v9.into_iter() { + let v10 = translate::(v9); + pending_v10.push((session, v10)); + } + v10::PendingConfigs::::set(Some(pending_v10.clone())); + + let num_configs = (pending_v10.len() + 1) as u64; + T::DbWeight::get().reads_writes(num_configs, num_configs) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{new_test_ext, Test}; + use primitives::LEGACY_MIN_BACKING_VOTES; + + #[test] + fn v10_deserialized_from_actual_data() { + // Example how to get new `raw_config`: + // We'll obtain the raw_config at a specified a block + // Steps: + // 1. Go to Polkadot.js -> Developer -> Chain state -> Storage: https://polkadot.js.org/apps/#/chainstate + // 2. Set these parameters: + // 2.1. selected state query: configuration; activeConfig(): + // PolkadotRuntimeParachainsConfigurationHostConfiguration + // 2.2. blockhash to query at: + // 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of + // the block) + // 2.3. Note the value of encoded storage key -> + // 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the + // referenced block. + // 2.4. You'll also need the decoded values to update the test. + // 3. Go to Polkadot.js -> Developer -> Chain state -> Raw storage + // 3.1 Enter the encoded storage key and you get the raw config. + + // This exceeds the maximal line width length, but that's fine, since this is not code and + // doesn't need to be read and also leaving it as one line allows to easily copy it. + let raw_config = + hex_literal::hex![" + 0000300000800000080000000000100000c8000005000000050000000200000002000000000000000000000000005000000010000400000000000000000000000000000000000000000000000000000000000000000000000800000000200000040000000000100000b004000000000000000000001027000080b2e60e80c3c90180969800000000000000000000000000050000001400000004000000010000000101000000000600000064000000020000001900000000000000020000000200000002000000050000000200000000" + ]; + + let v10 = + V10HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + + // We check only a sample of the values here. If we missed any fields or messed up data + // types that would skew all the fields coming after. + assert_eq!(v10.max_code_size, 3_145_728); + assert_eq!(v10.validation_upgrade_cooldown, 2); + assert_eq!(v10.max_pov_size, 5_242_880); + assert_eq!(v10.hrmp_channel_max_message_size, 1_048_576); + assert_eq!(v10.n_delay_tranches, 25); + assert_eq!(v10.minimum_validation_upgrade_delay, 5); + assert_eq!(v10.group_rotation_frequency, 20); + assert_eq!(v10.on_demand_cores, 0); + assert_eq!(v10.on_demand_base_fee, 10_000_000); + assert_eq!(v10.minimum_backing_votes, LEGACY_MIN_BACKING_VOTES); + assert_eq!(v10.node_features, NodeFeatures::EMPTY); + } + + // Test that `migrate_to_v10`` correctly applies the `translate` function to current and pending + // configs. + #[test] + fn test_migrate_to_v10() { + // Host configuration has lots of fields. However, in this migration we only add one + // field. The most important part to check are a couple of the last fields. We also pick + // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and + // also their type. + // + // We specify only the picked fields and the rest should be provided by the `Default` + // implementation. That implementation is copied over between the two types and should work + // fine. + let v9 = V9HostConfiguration:: { + needed_approvals: 69, + paras_availability_period: 55, + hrmp_recipient_deposit: 1337, + max_pov_size: 1111, + minimum_validation_upgrade_delay: 20, + ..Default::default() + }; + + let mut pending_configs = Vec::new(); + pending_configs.push((100, v9.clone())); + pending_configs.push((300, v9.clone())); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v9 version in the state. + v9::ActiveConfig::::set(Some(v9.clone())); + v9::PendingConfigs::::set(Some(pending_configs)); + + migrate_to_v10::(); + + let v10 = translate::(v9); + let mut configs_to_check = v10::PendingConfigs::::get().unwrap(); + configs_to_check.push((0, v10::ActiveConfig::::get().unwrap())); + + for (_, config) in configs_to_check { + assert_eq!(config, v10); + assert_eq!(config.node_features, NodeFeatures::EMPTY); + } + }); + } + + // Test that migration doesn't panic in case there're no pending configurations upgrades in + // pallet's storage. + #[test] + fn test_migrate_to_v10_no_pending() { + let v9 = V9HostConfiguration::::default(); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v9 version in the state. + v9::ActiveConfig::::set(Some(v9)); + // Ensure there're no pending configs. + v9::PendingConfigs::::set(None); + + // Shouldn't fail. + migrate_to_v10::(); + }); + } +} diff --git a/polkadot/runtime/parachains/src/configuration/migration/v11.rs b/polkadot/runtime/parachains/src/configuration/migration/v11.rs new file mode 100644 index 0000000000000000000000000000000000000000..f4db9196b1a089723cd061897f4a6638ce1c615f --- /dev/null +++ b/polkadot/runtime/parachains/src/configuration/migration/v11.rs @@ -0,0 +1,329 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. + +use crate::configuration::{self, Config, Pallet}; +use frame_support::{ + migrations::VersionedMigration, pallet_prelude::*, traits::Defensive, weights::Weight, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::{vstaging::ApprovalVotingParams, SessionIndex}; +use sp_std::vec::Vec; + +use frame_support::traits::OnRuntimeUpgrade; + +use super::v10::V10HostConfiguration; +type V11HostConfiguration = configuration::HostConfiguration; + +mod v10 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V10HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V10HostConfiguration>)>, + OptionQuery, + >; +} + +mod v11 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V11HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V11HostConfiguration>)>, + OptionQuery, + >; +} + +pub type MigrateToV11 = VersionedMigration< + 10, + 11, + UncheckedMigrateToV11, + Pallet, + ::DbWeight, +>; + +pub struct UncheckedMigrateToV11(sp_std::marker::PhantomData); +impl OnRuntimeUpgrade for UncheckedMigrateToV11 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV11"); + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + log::info!(target: configuration::LOG_TARGET, "HostConfiguration MigrateToV11 started"); + let weight_consumed = migrate_to_v11::(); + + log::info!(target: configuration::LOG_TARGET, "HostConfiguration MigrateToV11 executed successfully"); + + weight_consumed + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running post_upgrade() for HostConfiguration MigrateToV11"); + ensure!( + StorageVersion::get::>() >= 11, + "Storage version should be >= 11 after the migration" + ); + + Ok(()) + } +} + +fn migrate_to_v11() -> Weight { + // Unusual formatting is justified: + // - make it easier to verify that fields assign what they supposed to assign. + // - this code is transient and will be removed after all migrations are done. + // - this code is important enough to optimize for legibility sacrificing consistency. + #[rustfmt::skip] + let translate = + |pre: V10HostConfiguration>| -> + V11HostConfiguration> + { + V11HostConfiguration { +max_code_size : pre.max_code_size, +max_head_data_size : pre.max_head_data_size, +max_upward_queue_count : pre.max_upward_queue_count, +max_upward_queue_size : pre.max_upward_queue_size, +max_upward_message_size : pre.max_upward_message_size, +max_upward_message_num_per_candidate : pre.max_upward_message_num_per_candidate, +hrmp_max_message_num_per_candidate : pre.hrmp_max_message_num_per_candidate, +validation_upgrade_cooldown : pre.validation_upgrade_cooldown, +validation_upgrade_delay : pre.validation_upgrade_delay, +max_pov_size : pre.max_pov_size, +max_downward_message_size : pre.max_downward_message_size, +hrmp_sender_deposit : pre.hrmp_sender_deposit, +hrmp_recipient_deposit : pre.hrmp_recipient_deposit, +hrmp_channel_max_capacity : pre.hrmp_channel_max_capacity, +hrmp_channel_max_total_size : pre.hrmp_channel_max_total_size, +hrmp_max_parachain_inbound_channels : pre.hrmp_max_parachain_inbound_channels, +hrmp_max_parachain_outbound_channels : pre.hrmp_max_parachain_outbound_channels, +hrmp_channel_max_message_size : pre.hrmp_channel_max_message_size, +code_retention_period : pre.code_retention_period, +coretime_cores : pre.on_demand_cores, +on_demand_retries : pre.on_demand_retries, +group_rotation_frequency : pre.group_rotation_frequency, +paras_availability_period : pre.paras_availability_period, +scheduling_lookahead : pre.scheduling_lookahead, +max_validators_per_core : pre.max_validators_per_core, +max_validators : pre.max_validators, +dispute_period : pre.dispute_period, +dispute_post_conclusion_acceptance_period: pre.dispute_post_conclusion_acceptance_period, +no_show_slots : pre.no_show_slots, +n_delay_tranches : pre.n_delay_tranches, +zeroth_delay_tranche_width : pre.zeroth_delay_tranche_width, +needed_approvals : pre.needed_approvals, +relay_vrf_modulo_samples : pre.relay_vrf_modulo_samples, +pvf_voting_ttl : pre.pvf_voting_ttl, +minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, +async_backing_params : pre.async_backing_params, +executor_params : pre.executor_params, +on_demand_queue_max_size : pre.on_demand_queue_max_size, +on_demand_base_fee : pre.on_demand_base_fee, +on_demand_fee_variability : pre.on_demand_fee_variability, +on_demand_target_queue_utilization : pre.on_demand_target_queue_utilization, +on_demand_ttl : pre.on_demand_ttl, +minimum_backing_votes : pre.minimum_backing_votes, +node_features : pre.node_features, +approval_voting_params : ApprovalVotingParams { + max_approval_coalesce_count: 1, + } + } + }; + + let v10 = v10::ActiveConfig::::get() + .defensive_proof("Could not decode old config") + .unwrap_or_default(); + let v11 = translate(v10); + v11::ActiveConfig::::set(Some(v11)); + + // Allowed to be empty. + let pending_v9 = v10::PendingConfigs::::get().unwrap_or_default(); + let mut pending_v10 = Vec::new(); + + for (session, v10) in pending_v9.into_iter() { + let v11 = translate(v10); + pending_v10.push((session, v11)); + } + v11::PendingConfigs::::set(Some(pending_v10.clone())); + + let num_configs = (pending_v10.len() + 1) as u64; + T::DbWeight::get().reads_writes(num_configs, num_configs) +} + +#[cfg(test)] +mod tests { + use primitives::LEGACY_MIN_BACKING_VOTES; + + use super::*; + use crate::mock::{new_test_ext, Test}; + + #[test] + fn v11_deserialized_from_actual_data() { + // Example how to get new `raw_config`: + // We'll obtain the raw_config at a specified a block + // Steps: + // 1. Go to Polkadot.js -> Developer -> Chain state -> Storage: https://polkadot.js.org/apps/#/chainstate + // 2. Set these parameters: + // 2.1. selected state query: configuration; activeConfig(): + // PolkadotRuntimeParachainsConfigurationHostConfiguration + // 2.2. blockhash to query at: + // 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of + // the block) + // 2.3. Note the value of encoded storage key -> + // 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the + // referenced block. + // 2.4. You'll also need the decoded values to update the test. + // 3. Go to Polkadot.js -> Developer -> Chain state -> Raw storage + // 3.1 Enter the encoded storage key and you get the raw config. + + // This exceeds the maximal line width length, but that's fine, since this is not code and + // doesn't need to be read and also leaving it as one line allows to easily copy it. + let raw_config = + hex_literal::hex![" + 0000300000800000080000000000100000c8000005000000050000000200000002000000000000000000000000005000000010000400000000000000000000000000000000000000000000000000000000000000000000000800000000200000040000000000100000b004000000000000000000001027000080b2e60e80c3c9018096980000000000000000000000000005000000140000000400000001000000010100000000060000006400000002000000190000000000000002000000020000000200000005000000020000000001000000" + ]; + + let v11 = + V11HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + + // We check only a sample of the values here. If we missed any fields or messed up data + // types that would skew all the fields coming after. + assert_eq!(v11.max_code_size, 3_145_728); + assert_eq!(v11.validation_upgrade_cooldown, 2); + assert_eq!(v11.max_pov_size, 5_242_880); + assert_eq!(v11.hrmp_channel_max_message_size, 1_048_576); + assert_eq!(v11.n_delay_tranches, 25); + assert_eq!(v11.minimum_validation_upgrade_delay, 5); + assert_eq!(v11.group_rotation_frequency, 20); + assert_eq!(v11.coretime_cores, 0); + assert_eq!(v11.on_demand_base_fee, 10_000_000); + assert_eq!(v11.minimum_backing_votes, LEGACY_MIN_BACKING_VOTES); + assert_eq!(v11.approval_voting_params.max_approval_coalesce_count, 1); + } + + #[test] + fn test_migrate_to_v11() { + // Host configuration has lots of fields. However, in this migration we only add one + // field. The most important part to check are a couple of the last fields. We also pick + // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and + // also their type. + // + // We specify only the picked fields and the rest should be provided by the `Default` + // implementation. That implementation is copied over between the two types and should work + // fine. + let v10 = V10HostConfiguration:: { + needed_approvals: 69, + paras_availability_period: 55, + hrmp_recipient_deposit: 1337, + max_pov_size: 1111, + minimum_validation_upgrade_delay: 20, + ..Default::default() + }; + + let mut pending_configs = Vec::new(); + pending_configs.push((100, v10.clone())); + pending_configs.push((300, v10.clone())); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v10 version in the state. + v10::ActiveConfig::::set(Some(v10)); + v10::PendingConfigs::::set(Some(pending_configs)); + + migrate_to_v11::(); + + let v11 = v11::ActiveConfig::::get().unwrap(); + assert_eq!(v11.approval_voting_params.max_approval_coalesce_count, 1); + + let mut configs_to_check = v11::PendingConfigs::::get().unwrap(); + configs_to_check.push((0, v11.clone())); + + for (_, v10) in configs_to_check { + #[rustfmt::skip] + { + assert_eq!(v10.max_code_size , v11.max_code_size); + assert_eq!(v10.max_head_data_size , v11.max_head_data_size); + assert_eq!(v10.max_upward_queue_count , v11.max_upward_queue_count); + assert_eq!(v10.max_upward_queue_size , v11.max_upward_queue_size); + assert_eq!(v10.max_upward_message_size , v11.max_upward_message_size); + assert_eq!(v10.max_upward_message_num_per_candidate , v11.max_upward_message_num_per_candidate); + assert_eq!(v10.hrmp_max_message_num_per_candidate , v11.hrmp_max_message_num_per_candidate); + assert_eq!(v10.validation_upgrade_cooldown , v11.validation_upgrade_cooldown); + assert_eq!(v10.validation_upgrade_delay , v11.validation_upgrade_delay); + assert_eq!(v10.max_pov_size , v11.max_pov_size); + assert_eq!(v10.max_downward_message_size , v11.max_downward_message_size); + assert_eq!(v10.hrmp_max_parachain_outbound_channels , v11.hrmp_max_parachain_outbound_channels); + assert_eq!(v10.hrmp_sender_deposit , v11.hrmp_sender_deposit); + assert_eq!(v10.hrmp_recipient_deposit , v11.hrmp_recipient_deposit); + assert_eq!(v10.hrmp_channel_max_capacity , v11.hrmp_channel_max_capacity); + assert_eq!(v10.hrmp_channel_max_total_size , v11.hrmp_channel_max_total_size); + assert_eq!(v10.hrmp_max_parachain_inbound_channels , v11.hrmp_max_parachain_inbound_channels); + assert_eq!(v10.hrmp_channel_max_message_size , v11.hrmp_channel_max_message_size); + assert_eq!(v10.code_retention_period , v11.code_retention_period); + assert_eq!(v10.coretime_cores , v11.coretime_cores); + assert_eq!(v10.on_demand_retries , v11.on_demand_retries); + assert_eq!(v10.group_rotation_frequency , v11.group_rotation_frequency); + assert_eq!(v10.paras_availability_period , v11.paras_availability_period); + assert_eq!(v10.scheduling_lookahead , v11.scheduling_lookahead); + assert_eq!(v10.max_validators_per_core , v11.max_validators_per_core); + assert_eq!(v10.max_validators , v11.max_validators); + assert_eq!(v10.dispute_period , v11.dispute_period); + assert_eq!(v10.no_show_slots , v11.no_show_slots); + assert_eq!(v10.n_delay_tranches , v11.n_delay_tranches); + assert_eq!(v10.zeroth_delay_tranche_width , v11.zeroth_delay_tranche_width); + assert_eq!(v10.needed_approvals , v11.needed_approvals); + assert_eq!(v10.relay_vrf_modulo_samples , v11.relay_vrf_modulo_samples); + assert_eq!(v10.pvf_voting_ttl , v11.pvf_voting_ttl); + assert_eq!(v10.minimum_validation_upgrade_delay , v11.minimum_validation_upgrade_delay); + assert_eq!(v10.async_backing_params.allowed_ancestry_len, v11.async_backing_params.allowed_ancestry_len); + assert_eq!(v10.async_backing_params.max_candidate_depth , v11.async_backing_params.max_candidate_depth); + assert_eq!(v10.executor_params , v11.executor_params); + assert_eq!(v10.minimum_backing_votes , v11.minimum_backing_votes); + }; // ; makes this a statement. `rustfmt::skip` cannot be put on an expression. + } + }); + } + + // Test that migration doesn't panic in case there're no pending configurations upgrades in + // pallet's storage. + #[test] + fn test_migrate_to_v11_no_pending() { + let v10 = V10HostConfiguration::::default(); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v10 version in the state. + v10::ActiveConfig::::set(Some(v10)); + // Ensure there're no pending configs. + v11::PendingConfigs::::set(None); + + // Shouldn't fail. + migrate_to_v11::(); + }); + } +} diff --git a/polkadot/runtime/parachains/src/configuration/migration/v8.rs b/polkadot/runtime/parachains/src/configuration/migration/v8.rs index d1bc9005112529d55749a82de78fe17ff28391f7..537dfa9abd77040f7017be6bd46150aa29bfef3c 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v8.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v8.rs @@ -250,7 +250,7 @@ on_demand_fee_variability : Perbill::from_percent(3), on_demand_target_queue_utilization : Perbill::from_percent(25), on_demand_ttl : 5u32.into(), } - }; +}; let v7 = v7::ActiveConfig::::get() .defensive_proof("Could not decode old config") diff --git a/polkadot/runtime/parachains/src/configuration/migration/v9.rs b/polkadot/runtime/parachains/src/configuration/migration/v9.rs index e37f0b9b0e3d06801cbede3e99b143ee132b6a36..ca4bbd9dacef3fb645c41a060798e0298a0efc27 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v9.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v9.rs @@ -23,13 +23,116 @@ use frame_support::{ weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{SessionIndex, LEGACY_MIN_BACKING_VOTES}; +use primitives::{ + AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, + ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, +}; +use sp_runtime::Perbill; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; use super::v8::V8HostConfiguration; -type V9HostConfiguration = configuration::HostConfiguration; +/// All configuration of the runtime with respect to paras. +#[derive(Clone, Encode, Decode, Debug)] +pub struct V9HostConfiguration { + pub max_code_size: u32, + pub max_head_data_size: u32, + pub max_upward_queue_count: u32, + pub max_upward_queue_size: u32, + pub max_upward_message_size: u32, + pub max_upward_message_num_per_candidate: u32, + pub hrmp_max_message_num_per_candidate: u32, + pub validation_upgrade_cooldown: BlockNumber, + pub validation_upgrade_delay: BlockNumber, + pub async_backing_params: AsyncBackingParams, + pub max_pov_size: u32, + pub max_downward_message_size: u32, + pub hrmp_max_parachain_outbound_channels: u32, + pub hrmp_sender_deposit: Balance, + pub hrmp_recipient_deposit: Balance, + pub hrmp_channel_max_capacity: u32, + pub hrmp_channel_max_total_size: u32, + pub hrmp_max_parachain_inbound_channels: u32, + pub hrmp_channel_max_message_size: u32, + pub executor_params: ExecutorParams, + pub code_retention_period: BlockNumber, + pub on_demand_cores: u32, + pub on_demand_retries: u32, + pub on_demand_queue_max_size: u32, + pub on_demand_target_queue_utilization: Perbill, + pub on_demand_fee_variability: Perbill, + pub on_demand_base_fee: Balance, + pub on_demand_ttl: BlockNumber, + pub group_rotation_frequency: BlockNumber, + pub paras_availability_period: BlockNumber, + pub scheduling_lookahead: u32, + pub max_validators_per_core: Option, + pub max_validators: Option, + pub dispute_period: SessionIndex, + pub dispute_post_conclusion_acceptance_period: BlockNumber, + pub no_show_slots: u32, + pub n_delay_tranches: u32, + pub zeroth_delay_tranche_width: u32, + pub needed_approvals: u32, + pub relay_vrf_modulo_samples: u32, + pub pvf_voting_ttl: SessionIndex, + pub minimum_validation_upgrade_delay: BlockNumber, + pub minimum_backing_votes: u32, +} + +impl> Default for V9HostConfiguration { + fn default() -> Self { + Self { + async_backing_params: AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, + group_rotation_frequency: 1u32.into(), + paras_availability_period: 1u32.into(), + no_show_slots: 1u32.into(), + validation_upgrade_cooldown: Default::default(), + validation_upgrade_delay: 2u32.into(), + code_retention_period: Default::default(), + max_code_size: Default::default(), + max_pov_size: Default::default(), + max_head_data_size: Default::default(), + on_demand_cores: Default::default(), + on_demand_retries: Default::default(), + scheduling_lookahead: 1, + max_validators_per_core: Default::default(), + max_validators: None, + dispute_period: 6, + dispute_post_conclusion_acceptance_period: 100.into(), + n_delay_tranches: Default::default(), + zeroth_delay_tranche_width: Default::default(), + needed_approvals: Default::default(), + relay_vrf_modulo_samples: Default::default(), + max_upward_queue_count: Default::default(), + max_upward_queue_size: Default::default(), + max_downward_message_size: Default::default(), + max_upward_message_size: Default::default(), + max_upward_message_num_per_candidate: Default::default(), + hrmp_sender_deposit: Default::default(), + hrmp_recipient_deposit: Default::default(), + hrmp_channel_max_capacity: Default::default(), + hrmp_channel_max_total_size: Default::default(), + hrmp_max_parachain_inbound_channels: Default::default(), + hrmp_channel_max_message_size: Default::default(), + hrmp_max_parachain_outbound_channels: Default::default(), + hrmp_max_message_num_per_candidate: Default::default(), + pvf_voting_ttl: 2u32.into(), + minimum_validation_upgrade_delay: 2.into(), + executor_params: Default::default(), + on_demand_queue_max_size: ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_ttl: 5u32.into(), + minimum_backing_votes: LEGACY_MIN_BACKING_VOTES, + } + } +} mod v8 { use super::*; diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs index ea39628c95876a92b9072bd5adc18584e48df0b5..c915eb12a0ca1712a1d923d126daac959a40cd09 100644 --- a/polkadot/runtime/parachains/src/configuration/tests.rs +++ b/polkadot/runtime/parachains/src/configuration/tests.rs @@ -16,6 +16,7 @@ use super::*; use crate::mock::{new_test_ext, Configuration, ParasShared, RuntimeOrigin, Test}; +use bitvec::{bitvec, prelude::Lsb0}; use frame_support::{assert_err, assert_noop, assert_ok}; fn on_new_session(session_index: SessionIndex) -> (HostConfiguration, HostConfiguration) { @@ -282,7 +283,7 @@ fn setting_pending_config_members() { max_code_size: 100_000, max_pov_size: 1024, max_head_data_size: 1_000, - on_demand_cores: 2, + coretime_cores: 2, on_demand_retries: 5, group_rotation_frequency: 20, paras_availability_period: 10, @@ -312,12 +313,14 @@ fn setting_pending_config_members() { pvf_voting_ttl: 3, minimum_validation_upgrade_delay: 20, executor_params: Default::default(), + approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 1 }, on_demand_queue_max_size: 10_000u32, on_demand_base_fee: 10_000_000u128, on_demand_fee_variability: Perbill::from_percent(3), on_demand_target_queue_utilization: Perbill::from_percent(25), on_demand_ttl: 5u32, minimum_backing_votes: 5, + node_features: bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], }; Configuration::set_validation_upgrade_cooldown( @@ -339,7 +342,7 @@ fn setting_pending_config_members() { Configuration::set_max_pov_size(RuntimeOrigin::root(), new_config.max_pov_size).unwrap(); Configuration::set_max_head_data_size(RuntimeOrigin::root(), new_config.max_head_data_size) .unwrap(); - Configuration::set_on_demand_cores(RuntimeOrigin::root(), new_config.on_demand_cores) + Configuration::set_coretime_cores(RuntimeOrigin::root(), new_config.coretime_cores) .unwrap(); Configuration::set_on_demand_retries(RuntimeOrigin::root(), new_config.on_demand_retries) .unwrap(); @@ -473,6 +476,12 @@ fn setting_pending_config_members() { new_config.minimum_backing_votes, ) .unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 1, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 1, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 3, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 10, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 10, false).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 11, true).unwrap(); assert_eq!(PendingConfigs::::get(), vec![(shared::SESSION_DELAY, new_config)],); }) diff --git a/polkadot/runtime/parachains/src/coretime/benchmarking.rs b/polkadot/runtime/parachains/src/coretime/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..d1ac71f580ee0e70015bf130b6836519005ee280 --- /dev/null +++ b/polkadot/runtime/parachains/src/coretime/benchmarking.rs @@ -0,0 +1,73 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! On demand assigner pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use frame_benchmarking::v2::*; +use frame_support::traits::OriginTrait; +use pallet_broker::CoreIndex as BrokerCoreIndex; + +#[benchmarks] +mod benchmarks { + use super::*; + use assigner_coretime::PartsOf57600; + + #[benchmark] + fn request_core_count() { + // Setup + let root_origin = ::RuntimeOrigin::root(); + + #[extrinsic_call] + _( + root_origin as ::RuntimeOrigin, + // random core count + 100, + ) + } + + #[benchmark] + fn assign_core(s: Linear<1, 100>) { + // Setup + let root_origin = ::RuntimeOrigin::root(); + + // Use parameterized assignment count + let mut assignments: Vec<(CoreAssignment, PartsOf57600)> = vec![0u16; s as usize - 1] + .into_iter() + .enumerate() + .map(|(index, parts)| { + (CoreAssignment::Task(index as u32), PartsOf57600::new_saturating(parts)) + }) + .collect(); + // Parts must add up to exactly 57600. Here we add all the parts in one assignment, as + // it won't effect the weight and splitting up the parts into even groupings may not + // work for every value `s`. + assignments.push((CoreAssignment::Task(s as u32), PartsOf57600::FULL)); + + let core_index: BrokerCoreIndex = 0; + + #[extrinsic_call] + _( + root_origin as ::RuntimeOrigin, + core_index, + BlockNumberFor::::from(5u32), + assignments, + Some(BlockNumberFor::::from(20u32)), + ) + } +} diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs new file mode 100644 index 0000000000000000000000000000000000000000..64c10f731988902987b47768d69696e8b96fc72a --- /dev/null +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -0,0 +1,285 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Migrations for the Coretime pallet. + +pub use v_coretime::{GetLegacyLease, MigrateToCoretime}; + +mod v_coretime { + #[cfg(feature = "try-runtime")] + use crate::scheduler::common::AssignmentProvider; + use crate::{ + assigner_coretime, configuration, + coretime::{mk_coretime_call, Config, PartsOf57600, WeightInfo}, + paras, + }; + #[cfg(feature = "try-runtime")] + use frame_support::ensure; + use frame_support::{ + traits::{OnRuntimeUpgrade, PalletInfoAccess, StorageVersion}, + weights::Weight, + }; + use frame_system::pallet_prelude::BlockNumberFor; + use pallet_broker::{CoreAssignment, CoreMask, ScheduleItem}; + #[cfg(feature = "try-runtime")] + use parity_scale_codec::Decode; + #[cfg(feature = "try-runtime")] + use parity_scale_codec::Encode; + use polkadot_parachain_primitives::primitives::IsSystem; + use primitives::{CoreIndex, Id as ParaId}; + use sp_arithmetic::traits::SaturatedConversion; + use sp_core::Get; + use sp_runtime::BoundedVec; + #[cfg(feature = "try-runtime")] + use sp_std::vec::Vec; + use sp_std::{iter, prelude::*, result}; + use xcm::v3::{ + send_xcm, Instruction, Junction, Junctions, MultiLocation, SendError, WeightLimit, Xcm, + }; + + /// Return information about a legacy lease of a parachain. + pub trait GetLegacyLease { + /// If parachain is a lease holding parachain, return the block at which the lease expires. + fn get_parachain_lease_in_blocks(para: ParaId) -> Option; + } + + /// Migrate a chain to use coretime. + /// + /// This assumes that the `Coretime` and the `AssignerCoretime` pallets are added at the same + /// time to a runtime. + pub struct MigrateToCoretime( + sp_std::marker::PhantomData<(T, SendXcm, LegacyLease)>, + ); + + impl>> + MigrateToCoretime + { + fn already_migrated() -> bool { + // We are using the assigner coretime because the coretime pallet doesn't has any + // storage data. But both pallets are introduced at the same time, so this is fine. + let name_hash = assigner_coretime::Pallet::::name_hash(); + let mut next_key = name_hash.to_vec(); + let storage_version_key = StorageVersion::storage_key::>(); + + loop { + match sp_io::storage::next_key(&next_key) { + // StorageVersion is initialized before, so we need to ingore it. + Some(key) if &key == &storage_version_key => { + next_key = key; + }, + // If there is any other key with the prefix of the pallet, + // we already have executed the migration. + Some(key) if key.starts_with(&name_hash) => { + log::info!("`MigrateToCoretime` already executed!"); + return true + }, + // Any other key/no key means that we did not yet have migrated. + None | Some(_) => return false, + } + } + } + } + + impl< + T: Config + crate::dmp::Config, + SendXcm: xcm::v3::SendXcm, + LegacyLease: GetLegacyLease>, + > OnRuntimeUpgrade for MigrateToCoretime + { + fn on_runtime_upgrade() -> Weight { + if Self::already_migrated() { + return Weight::zero() + } + + log::info!("Migrating existing parachains to coretime."); + migrate_to_coretime::() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::DispatchError> { + if Self::already_migrated() { + return Ok(Vec::new()) + } + + let legacy_paras = paras::Parachains::::get(); + let config = >::config(); + let total_core_count = config.coretime_cores + legacy_paras.len() as u32; + + let dmp_queue_size = + crate::dmp::Pallet::::dmq_contents(T::BrokerId::get().into()).len() as u32; + + let total_core_count = total_core_count as u32; + + Ok((total_core_count, dmp_queue_size).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::DispatchError> { + if state.is_empty() { + return Ok(()) + } + + log::trace!("Running post_upgrade()"); + + let (prev_core_count, prev_dmp_queue_size) = + <(u32, u32)>::decode(&mut &state[..]).unwrap(); + + let dmp_queue_size = + crate::dmp::Pallet::::dmq_contents(T::BrokerId::get().into()).len() as u32; + let new_core_count = assigner_coretime::Pallet::::session_core_count(); + ensure!(new_core_count == prev_core_count, "Total number of cores need to not change."); + ensure!( + dmp_queue_size == prev_dmp_queue_size + 1, + "There should have been enqueued one DMP message." + ); + + Ok(()) + } + } + + // Migrate to Coretime. + // + // NOTE: Also migrates coretime_cores config value in configuration::ActiveConfig. + fn migrate_to_coretime< + T: Config, + SendXcm: xcm::v3::SendXcm, + LegacyLease: GetLegacyLease>, + >() -> Weight { + let legacy_paras = paras::Pallet::::parachains(); + let legacy_count = legacy_paras.len() as u32; + let now = >::block_number(); + for (core, para_id) in legacy_paras.into_iter().enumerate() { + let r = assigner_coretime::Pallet::::assign_core( + CoreIndex(core as u32), + now, + vec![(CoreAssignment::Task(para_id.into()), PartsOf57600::FULL)], + None, + ); + if let Err(err) = r { + log::error!( + "Creating assignment for existing para failed: {:?}, error: {:?}", + para_id, + err + ); + } + } + + let config = >::config(); + // coretime_cores was on_demand_cores until now: + for on_demand in 0..config.coretime_cores { + let core = CoreIndex(legacy_count.saturating_add(on_demand as _)); + let r = assigner_coretime::Pallet::::assign_core( + core, + now, + vec![(CoreAssignment::Pool, PartsOf57600::FULL)], + None, + ); + if let Err(err) = r { + log::error!("Creating assignment for existing on-demand core, failed: {:?}", err); + } + } + let total_cores = config.coretime_cores + legacy_count; + configuration::ActiveConfig::::mutate(|c| { + c.coretime_cores = total_cores; + }); + + if let Err(err) = migrate_send_assignments_to_coretime_chain::() { + log::error!("Sending legacy chain data to coretime chain failed: {:?}", err); + } + + let single_weight = ::WeightInfo::assign_core(1); + single_weight + .saturating_mul(u64::from(legacy_count.saturating_add(config.coretime_cores))) + // Second read from sending assignments to the coretime chain. + .saturating_add(T::DbWeight::get().reads_writes(2, 1)) + } + + fn migrate_send_assignments_to_coretime_chain< + T: Config, + SendXcm: xcm::v3::SendXcm, + LegacyLease: GetLegacyLease>, + >() -> result::Result<(), SendError> { + let legacy_paras = paras::Pallet::::parachains(); + let legacy_paras_count = legacy_paras.len(); + let (system_chains, lease_holding): (Vec<_>, Vec<_>) = + legacy_paras.into_iter().partition(IsSystem::is_system); + + let reservations = system_chains.into_iter().map(|p| { + let schedule = BoundedVec::truncate_from(vec![ScheduleItem { + mask: CoreMask::complete(), + assignment: CoreAssignment::Task(p.into()), + }]); + mk_coretime_call(crate::coretime::CoretimeCalls::Reserve(schedule)) + }); + + let leases = lease_holding.into_iter().filter_map(|p| { + log::trace!(target: "coretime-migration", "Preparing sending of lease holding para {:?}", p); + let Some(valid_until) = LegacyLease::get_parachain_lease_in_blocks(p) else { + log::error!("Lease holding chain with no lease information?!"); + return None + }; + let valid_until: u32 = match valid_until.try_into() { + Ok(val) => val, + Err(_) => { + log::error!("Converting block number to u32 failed!"); + return None + }, + }; + // We assume the coretime chain set this parameter to the recommened value in RFC-1: + const TIME_SLICE_PERIOD: u32 = 80; + let round_up = if valid_until % TIME_SLICE_PERIOD > 0 { 1 } else { 0 }; + let time_slice = valid_until / TIME_SLICE_PERIOD + TIME_SLICE_PERIOD * round_up; + log::trace!(target: "coretime-migration", "Sending of lease holding para {:?}, valid_until: {:?}, time_slice: {:?}", p, valid_until, time_slice); + Some(mk_coretime_call(crate::coretime::CoretimeCalls::SetLease(p.into(), time_slice))) + }); + + let core_count: u16 = configuration::Pallet::::config().coretime_cores.saturated_into(); + let set_core_count = iter::once(mk_coretime_call( + crate::coretime::CoretimeCalls::NotifyCoreCount(core_count), + )); + + let pool = (legacy_paras_count..core_count.into()).map(|_| { + let schedule = BoundedVec::truncate_from(vec![ScheduleItem { + mask: CoreMask::complete(), + assignment: CoreAssignment::Pool, + }]); + // Reserved cores will come before lease cores, so cores will change their assignments + // when coretime chain sends us their assign_core calls -> Good test. + mk_coretime_call(crate::coretime::CoretimeCalls::Reserve(schedule)) + }); + + let message_content = iter::once(Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }) + .chain(reservations) + .chain(pool) + .chain(leases) + .chain(set_core_count) + .collect(); + + let message = Xcm(message_content); + + send_xcm::( + MultiLocation { + parents: 0, + interior: Junctions::X1(Junction::Parachain(T::BrokerId::get())), + }, + message, + )?; + Ok(()) + } +} diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..d5b044c0631e9362c4c0484497081aa5d3341df8 --- /dev/null +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -0,0 +1,251 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Extrinsics implementing the relay chain side of the Coretime interface. +//! +//! + +use sp_std::{prelude::*, result}; + +use frame_support::{pallet_prelude::*, traits::Currency}; +use frame_system::pallet_prelude::*; +pub use pallet::*; +use pallet_broker::{CoreAssignment, CoreIndex as BrokerCoreIndex}; +use primitives::{CoreIndex, Id as ParaId}; +use sp_arithmetic::traits::SaturatedConversion; +use xcm::v3::{ + send_xcm, Instruction, Junction, Junctions, MultiLocation, OriginKind, SendXcm, Xcm, +}; + +use crate::{ + assigner_coretime::{self, PartsOf57600}, + initializer::{OnNewSession, SessionChangeNotification}, + origin::{ensure_parachain, Origin}, +}; + +mod benchmarking; +pub mod migration; + +pub trait WeightInfo { + fn request_core_count() -> Weight; + //fn request_revenue_info_at() -> Weight; + //fn credit_account() -> Weight; + fn assign_core(s: u32) -> Weight; +} + +/// A weight info that is only suitable for testing. +pub struct TestWeightInfo; + +impl WeightInfo for TestWeightInfo { + fn request_core_count() -> Weight { + Weight::MAX + } + // TODO: Add real benchmarking functionality for each of these to + // benchmarking.rs, then uncomment here and in trait definition. + /*fn request_revenue_info_at() -> Weight { + Weight::MAX + } + fn credit_account() -> Weight { + Weight::MAX + }*/ + fn assign_core(_s: u32) -> Weight { + Weight::MAX + } +} + +/// Broker pallet index on the coretime chain. Used to +/// +/// construct remote calls. The codec index must correspond to the index of `Broker` in the +/// `construct_runtime` of the coretime chain. +#[derive(Encode, Decode)] +enum BrokerRuntimePallets { + #[codec(index = 50)] + Broker(CoretimeCalls), +} + +/// Call encoding for the calls needed from the Broker pallet. +#[derive(Encode, Decode)] +enum CoretimeCalls { + #[codec(index = 1)] + Reserve(pallet_broker::Schedule), + #[codec(index = 3)] + SetLease(pallet_broker::TaskId, pallet_broker::Timeslice), + #[codec(index = 19)] + NotifyCoreCount(u16), +} + +#[frame_support::pallet] +pub mod pallet { + use crate::configuration; + + use super::*; + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + assigner_coretime::Config { + type RuntimeOrigin: From<::RuntimeOrigin> + + Into::RuntimeOrigin>>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// The runtime's definition of a Currency. + type Currency: Currency; + /// The ParaId of the broker system parachain. + #[pallet::constant] + type BrokerId: Get; + /// Something that provides the weight of this pallet. + type WeightInfo: WeightInfo; + type SendXcm: SendXcm; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// The broker chain has asked for revenue information for a specific block. + RevenueInfoRequested { when: BlockNumberFor }, + /// A core has received a new assignment from the broker chain. + CoreAssigned { core: CoreIndex }, + } + + #[pallet::error] + pub enum Error { + /// The paraid making the call is not the coretime brokerage system parachain. + NotBroker, + } + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(::WeightInfo::request_core_count())] + #[pallet::call_index(1)] + pub fn request_core_count(origin: OriginFor, count: u16) -> DispatchResult { + // Ignore requests not coming from the broker parachain or root. + Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; + + configuration::Pallet::::set_coretime_cores_unchecked(u32::from(count)) + } + + //// TODO Impl me! + ////#[pallet::weight(::WeightInfo::request_revenue_info_at())] + //#[pallet::call_index(2)] + //pub fn request_revenue_info_at( + // origin: OriginFor, + // _when: BlockNumberFor, + //) -> DispatchResult { + // // Ignore requests not coming from the broker parachain or root. + // Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; + // Ok(()) + //} + + //// TODO Impl me! + ////#[pallet::weight(::WeightInfo::credit_account())] + //#[pallet::call_index(3)] + //pub fn credit_account( + // origin: OriginFor, + // _who: T::AccountId, + // _amount: BalanceOf, + //) -> DispatchResult { + // // Ignore requests not coming from the broker parachain or root. + // Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; + // Ok(()) + //} + + /// Receive instructions from the `ExternalBrokerOrigin`, detailing how a specific core is + /// to be used. + /// + /// Parameters: + /// -`origin`: The `ExternalBrokerOrigin`, assumed to be the Broker system parachain. + /// -`core`: The core that should be scheduled. + /// -`begin`: The starting blockheight of the instruction. + /// -`assignment`: How the blockspace should be utilised. + /// -`end_hint`: An optional hint as to when this particular set of instructions will end. + // The broker pallet's `CoreIndex` definition is `u16` but on the relay chain it's `struct + // CoreIndex(u32)` + #[pallet::call_index(4)] + #[pallet::weight(::WeightInfo::assign_core(assignment.len() as u32))] + pub fn assign_core( + origin: OriginFor, + core: BrokerCoreIndex, + begin: BlockNumberFor, + assignment: Vec<(CoreAssignment, PartsOf57600)>, + end_hint: Option>, + ) -> DispatchResult { + // Ignore requests not coming from the broker parachain or root. + Self::ensure_root_or_para(origin, T::BrokerId::get().into())?; + + let core = u32::from(core).into(); + + >::assign_core(core, begin, assignment, end_hint)?; + Self::deposit_event(Event::::CoreAssigned { core }); + Ok(()) + } + } +} + +impl Pallet { + /// Ensure the origin is one of Root or the `para` itself. + fn ensure_root_or_para( + origin: ::RuntimeOrigin, + id: ParaId, + ) -> DispatchResult { + if let Ok(caller_id) = ensure_parachain(::RuntimeOrigin::from(origin.clone())) + { + // Check if matching para id... + ensure!(caller_id == id, Error::::NotBroker); + } else { + // Check if root... + ensure_root(origin.clone())?; + } + Ok(()) + } + + pub fn initializer_on_new_session(notification: &SessionChangeNotification>) { + let old_core_count = notification.prev_config.coretime_cores; + let new_core_count = notification.new_config.coretime_cores; + if new_core_count != old_core_count { + let core_count: u16 = new_core_count.saturated_into(); + let message = Xcm(vec![mk_coretime_call( + crate::coretime::CoretimeCalls::NotifyCoreCount(core_count), + )]); + if let Err(err) = send_xcm::( + MultiLocation { + parents: 0, + interior: Junctions::X1(Junction::Parachain(T::BrokerId::get())), + }, + message, + ) { + log::error!("Sending `NotifyCoreCount` to coretime chain failed: {:?}", err); + } + } + } +} + +impl OnNewSession> for Pallet { + fn on_new_session(notification: &SessionChangeNotification>) { + Self::initializer_on_new_session(notification); + } +} + +fn mk_coretime_call(call: crate::coretime::CoretimeCalls) -> Instruction<()> { + Instruction::Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: Weight::from_parts(1000000000, 200000), + call: BrokerRuntimePallets::Broker(call).encode().into(), + } +} diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index cf2e99e7359abf59d47d3d0c03667a860ab57559..c2383dad3053882b7abec959446b23d149aa4a5f 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -25,11 +25,11 @@ use frame_system::pallet_prelude::*; use parity_scale_codec::{Decode, Encode}; use polkadot_runtime_metrics::get_current_time; use primitives::{ - byzantine_threshold, supermajority_threshold, ApprovalVote, CandidateHash, - CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CompactStatement, ConsensusLog, - DisputeState, DisputeStatement, DisputeStatementSet, ExplicitDisputeStatement, - InvalidDisputeStatementKind, MultiDisputeStatementSet, SessionIndex, SigningContext, - ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, + byzantine_threshold, supermajority_threshold, vstaging::ApprovalVoteMultipleCandidates, + ApprovalVote, CandidateHash, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, + CompactStatement, ConsensusLog, DisputeState, DisputeStatement, DisputeStatementSet, + ExplicitDisputeStatement, InvalidDisputeStatementKind, MultiDisputeStatementSet, SessionIndex, + SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -952,6 +952,8 @@ impl Pallet { None => return StatementSetFilter::RemoveAll, }; + let config = >::config(); + let n_validators = session_info.validators.len(); // Check for ancient. @@ -1015,7 +1017,14 @@ impl Pallet { set.session, statement, signature, + // This is here to prevent malicious nodes of generating + // `ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates` before that + // is enabled, via setting `max_approval_coalesce_count` in the parachain host + // config. + config.approval_voting_params.max_approval_coalesce_count > 1, ) { + log::warn!("Failed to check dispute signature"); + importer.undo(undo); filter.remove_index(i); continue @@ -1260,22 +1269,31 @@ fn check_signature( session: SessionIndex, statement: &DisputeStatement, validator_signature: &ValidatorSignature, + approval_multiple_candidates_enabled: bool, ) -> Result<(), ()> { - let payload = match *statement { + let payload = match statement { DisputeStatement::Valid(ValidDisputeStatementKind::Explicit) => ExplicitDisputeStatement { valid: true, candidate_hash, session }.signing_payload(), DisputeStatement::Valid(ValidDisputeStatementKind::BackingSeconded(inclusion_parent)) => CompactStatement::Seconded(candidate_hash).signing_payload(&SigningContext { session_index: session, - parent_hash: inclusion_parent, + parent_hash: *inclusion_parent, }), DisputeStatement::Valid(ValidDisputeStatementKind::BackingValid(inclusion_parent)) => CompactStatement::Valid(candidate_hash).signing_payload(&SigningContext { session_index: session, - parent_hash: inclusion_parent, + parent_hash: *inclusion_parent, }), DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) => ApprovalVote(candidate_hash).signing_payload(session), + DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates( + candidates, + )) => + if approval_multiple_candidates_enabled && candidates.contains(&candidate_hash) { + ApprovalVoteMultipleCandidates(candidates).signing_payload(session) + } else { + return Err(()) + }, DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit) => ExplicitDisputeStatement { valid: false, candidate_hash, session }.signing_payload(), }; diff --git a/polkadot/runtime/parachains/src/disputes/tests.rs b/polkadot/runtime/parachains/src/disputes/tests.rs index 0757084084f64349e14e9f9df192fb3a479db7db..1f3f00132d680ce16b9fdf65e21a5af92f89caea 100644 --- a/polkadot/runtime/parachains/src/disputes/tests.rs +++ b/polkadot/runtime/parachains/src/disputes/tests.rs @@ -1500,7 +1500,8 @@ fn test_check_signature() { candidate_hash, session, &statement_1, - &signed_1 + &signed_1, + true, ) .is_ok()); assert!(check_signature( @@ -1508,7 +1509,8 @@ fn test_check_signature() { candidate_hash, session, &statement_1, - &signed_1 + &signed_1, + true ) .is_err()); assert!(check_signature( @@ -1516,7 +1518,8 @@ fn test_check_signature() { wrong_candidate_hash, session, &statement_1, - &signed_1 + &signed_1, + true, ) .is_err()); assert!(check_signature( @@ -1524,7 +1527,8 @@ fn test_check_signature() { candidate_hash, wrong_session, &statement_1, - &signed_1 + &signed_1, + true ) .is_err()); assert!(check_signature( @@ -1532,7 +1536,8 @@ fn test_check_signature() { candidate_hash, session, &statement_2, - &signed_1 + &signed_1, + true, ) .is_err()); assert!(check_signature( @@ -1540,7 +1545,8 @@ fn test_check_signature() { candidate_hash, session, &statement_3, - &signed_1 + &signed_1, + true ) .is_err()); assert!(check_signature( @@ -1548,7 +1554,8 @@ fn test_check_signature() { candidate_hash, session, &statement_4, - &signed_1 + &signed_1, + true ) .is_err()); assert!(check_signature( @@ -1556,7 +1563,8 @@ fn test_check_signature() { candidate_hash, session, &statement_5, - &signed_1 + &signed_1, + true, ) .is_err()); @@ -1565,7 +1573,8 @@ fn test_check_signature() { candidate_hash, session, &statement_2, - &signed_2 + &signed_2, + true, ) .is_ok()); assert!(check_signature( @@ -1573,7 +1582,8 @@ fn test_check_signature() { candidate_hash, session, &statement_2, - &signed_2 + &signed_2, + true, ) .is_err()); assert!(check_signature( @@ -1581,7 +1591,8 @@ fn test_check_signature() { wrong_candidate_hash, session, &statement_2, - &signed_2 + &signed_2, + true, ) .is_err()); assert!(check_signature( @@ -1589,7 +1600,8 @@ fn test_check_signature() { candidate_hash, wrong_session, &statement_2, - &signed_2 + &signed_2, + true ) .is_err()); assert!(check_signature( @@ -1597,7 +1609,8 @@ fn test_check_signature() { candidate_hash, session, &wrong_statement_2, - &signed_2 + &signed_2, + true, ) .is_err()); assert!(check_signature( @@ -1605,7 +1618,8 @@ fn test_check_signature() { candidate_hash, session, &statement_1, - &signed_2 + &signed_2, + true, ) .is_err()); assert!(check_signature( @@ -1613,7 +1627,8 @@ fn test_check_signature() { candidate_hash, session, &statement_3, - &signed_2 + &signed_2, + true, ) .is_err()); assert!(check_signature( @@ -1621,7 +1636,8 @@ fn test_check_signature() { candidate_hash, session, &statement_4, - &signed_2 + &signed_2, + true, ) .is_err()); assert!(check_signature( @@ -1629,7 +1645,8 @@ fn test_check_signature() { candidate_hash, session, &statement_5, - &signed_2 + &signed_2, + true, ) .is_err()); @@ -1638,7 +1655,8 @@ fn test_check_signature() { candidate_hash, session, &statement_3, - &signed_3 + &signed_3, + true, ) .is_ok()); assert!(check_signature( @@ -1646,7 +1664,8 @@ fn test_check_signature() { candidate_hash, session, &statement_3, - &signed_3 + &signed_3, + true, ) .is_err()); assert!(check_signature( @@ -1654,7 +1673,8 @@ fn test_check_signature() { wrong_candidate_hash, session, &statement_3, - &signed_3 + &signed_3, + true, ) .is_err()); assert!(check_signature( @@ -1662,7 +1682,8 @@ fn test_check_signature() { candidate_hash, wrong_session, &statement_3, - &signed_3 + &signed_3, + true, ) .is_err()); assert!(check_signature( @@ -1670,7 +1691,8 @@ fn test_check_signature() { candidate_hash, session, &wrong_statement_3, - &signed_3 + &signed_3, + true, ) .is_err()); assert!(check_signature( @@ -1678,7 +1700,8 @@ fn test_check_signature() { candidate_hash, session, &statement_1, - &signed_3 + &signed_3, + true, ) .is_err()); assert!(check_signature( @@ -1686,7 +1709,8 @@ fn test_check_signature() { candidate_hash, session, &statement_2, - &signed_3 + &signed_3, + true ) .is_err()); assert!(check_signature( @@ -1694,7 +1718,8 @@ fn test_check_signature() { candidate_hash, session, &statement_4, - &signed_3 + &signed_3, + true, ) .is_err()); assert!(check_signature( @@ -1702,7 +1727,8 @@ fn test_check_signature() { candidate_hash, session, &statement_5, - &signed_3 + &signed_3, + true, ) .is_err()); @@ -1711,7 +1737,8 @@ fn test_check_signature() { candidate_hash, session, &statement_4, - &signed_4 + &signed_4, + true, ) .is_ok()); assert!(check_signature( @@ -1719,7 +1746,8 @@ fn test_check_signature() { candidate_hash, session, &statement_4, - &signed_4 + &signed_4, + true, ) .is_err()); assert!(check_signature( @@ -1727,7 +1755,8 @@ fn test_check_signature() { wrong_candidate_hash, session, &statement_4, - &signed_4 + &signed_4, + true, ) .is_err()); assert!(check_signature( @@ -1735,7 +1764,8 @@ fn test_check_signature() { candidate_hash, wrong_session, &statement_4, - &signed_4 + &signed_4, + true, ) .is_err()); assert!(check_signature( @@ -1743,7 +1773,8 @@ fn test_check_signature() { candidate_hash, session, &statement_1, - &signed_4 + &signed_4, + true, ) .is_err()); assert!(check_signature( @@ -1751,7 +1782,8 @@ fn test_check_signature() { candidate_hash, session, &statement_2, - &signed_4 + &signed_4, + true, ) .is_err()); assert!(check_signature( @@ -1759,7 +1791,8 @@ fn test_check_signature() { candidate_hash, session, &statement_3, - &signed_4 + &signed_4, + true, ) .is_err()); assert!(check_signature( @@ -1767,7 +1800,8 @@ fn test_check_signature() { candidate_hash, session, &statement_5, - &signed_4 + &signed_4, + true, ) .is_err()); @@ -1776,7 +1810,8 @@ fn test_check_signature() { candidate_hash, session, &statement_5, - &signed_5 + &signed_5, + true, ) .is_ok()); assert!(check_signature( @@ -1784,7 +1819,8 @@ fn test_check_signature() { candidate_hash, session, &statement_5, - &signed_5 + &signed_5, + true, ) .is_err()); assert!(check_signature( @@ -1792,7 +1828,8 @@ fn test_check_signature() { wrong_candidate_hash, session, &statement_5, - &signed_5 + &signed_5, + true, ) .is_err()); assert!(check_signature( @@ -1800,7 +1837,8 @@ fn test_check_signature() { candidate_hash, wrong_session, &statement_5, - &signed_5 + &signed_5, + true, ) .is_err()); assert!(check_signature( @@ -1808,7 +1846,8 @@ fn test_check_signature() { candidate_hash, session, &statement_1, - &signed_5 + &signed_5, + true, ) .is_err()); assert!(check_signature( @@ -1816,7 +1855,8 @@ fn test_check_signature() { candidate_hash, session, &statement_2, - &signed_5 + &signed_5, + true, ) .is_err()); assert!(check_signature( @@ -1824,7 +1864,8 @@ fn test_check_signature() { candidate_hash, session, &statement_3, - &signed_5 + &signed_5, + true, ) .is_err()); assert!(check_signature( @@ -1832,7 +1873,8 @@ fn test_check_signature() { candidate_hash, session, &statement_4, - &signed_5 + &signed_5, + true, ) .is_err()); } diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 6bb731671f6f8afbb350dff2a2821f6ab31da5a0..232e65d78ed2aef86aa7903d88cbe99236ba9ec1 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -47,7 +47,7 @@ use test_helpers::{dummy_collator, dummy_collator_signature, dummy_validation_co fn default_config() -> HostConfiguration { let mut config = HostConfiguration::default(); - config.on_demand_cores = 1; + config.coretime_cores = 1; config.max_code_size = 0b100000; config.max_head_data_size = 0b100000; config.group_rotation_frequency = u32::MAX; @@ -218,7 +218,7 @@ pub(crate) fn run_to_block( } pub(crate) fn expected_bits() -> usize { - Paras::parachains().len() + Configuration::config().on_demand_cores as usize + Paras::parachains().len() + Configuration::config().coretime_cores as usize } fn default_bitfield() -> AvailabilityBitfield { diff --git a/polkadot/runtime/parachains/src/initializer.rs b/polkadot/runtime/parachains/src/initializer.rs index b4f8721be5188e6fd8840e18accdfcaababdf61a..3c8ab7c4726fe1687cebd7ca516584f6cd35e431 100644 --- a/polkadot/runtime/parachains/src/initializer.rs +++ b/polkadot/runtime/parachains/src/initializer.rs @@ -60,6 +60,16 @@ pub struct SessionChangeNotification { pub session_index: SessionIndex, } +/// Inform something about a new session. +pub trait OnNewSession { + /// A new session was started. + fn on_new_session(notification: &SessionChangeNotification); +} + +impl OnNewSession for () { + fn on_new_session(_: &SessionChangeNotification) {} +} + /// Number of validators (not only parachain) in a session. pub type ValidatorSetCount = u32; @@ -120,6 +130,10 @@ pub mod pallet { type Randomness: Randomness>; /// An origin which is allowed to force updates to parachains. type ForceOrigin: EnsureOrigin<::RuntimeOrigin>; + /// Temporary hack to call `Coretime::on_new_session` on chains that support `Coretime` or + /// to disable it on the ones that don't support it. Can be removed and replaced by a simple + /// bound to `coretime::Config` once all chains support it. + type CoretimeOnNewSession: OnNewSession>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -271,6 +285,7 @@ impl Pallet { T::SlashingHandler::initializer_on_new_session(session_index); dmp::Pallet::::initializer_on_new_session(¬ification, &outgoing_paras); hrmp::Pallet::::initializer_on_new_session(¬ification, &outgoing_paras); + T::CoretimeOnNewSession::on_new_session(¬ification); } /// Should be called when a new session occurs. Buffers the session notification to be applied diff --git a/polkadot/runtime/parachains/src/lib.rs b/polkadot/runtime/parachains/src/lib.rs index 2509edbee3cbe00420bf734fe0df920d122460cc..b0dc27b72863fe76d5d23a9bf18408907d2b89dd 100644 --- a/polkadot/runtime/parachains/src/lib.rs +++ b/polkadot/runtime/parachains/src/lib.rs @@ -23,10 +23,11 @@ #![cfg_attr(feature = "runtime-benchmarks", recursion_limit = "256")] #![cfg_attr(not(feature = "std"), no_std)] -pub mod assigner; +pub mod assigner_coretime; pub mod assigner_on_demand; pub mod assigner_parachains; pub mod configuration; +pub mod coretime; pub mod disputes; pub mod dmp; pub mod hrmp; diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 9df54bf29d3ebbc04fcd21b8f76a3a0c73781c37..fbaab1d24aafcf686582789ec06988f3179d267b 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -17,15 +17,20 @@ //! Mocks for all the traits. use crate::{ - assigner, assigner_on_demand, assigner_parachains, configuration, disputes, dmp, hrmp, + assigner_coretime, assigner_on_demand, assigner_parachains, configuration, coretime, disputes, + dmp, hrmp, inclusion::{self, AggregateMessageOrigin, UmpQueueId}, initializer, origin, paras, paras::ParaKind, - paras_inherent, scheduler, session_info, shared, ParaId, + paras_inherent, scheduler, + scheduler::common::{AssignmentProvider, AssignmentProviderConfig}, + session_info, shared, ParaId, }; +use frame_support::pallet_prelude::*; +use primitives::CoreIndex; use frame_support::{ - assert_ok, parameter_types, + assert_ok, derive_impl, parameter_types, traits::{ Currency, ProcessMessage, ProcessMessageError, ValidatorSet, ValidatorSetWithIdentification, }, @@ -45,7 +50,9 @@ use sp_runtime::{ transaction_validity::TransactionPriority, BuildStorage, FixedU128, Perbill, Permill, }; +use sp_std::collections::vec_deque::VecDeque; use std::{cell::RefCell, collections::HashMap}; +use xcm::v3::{MultiAssets, MultiLocation, SendError, SendResult, SendXcm, Xcm, XcmHash}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlockU32; @@ -62,9 +69,11 @@ frame_support::construct_runtime!( ParaInclusion: inclusion, ParaInherent: paras_inherent, Scheduler: scheduler, - Assigner: assigner, - OnDemandAssigner: assigner_on_demand, + MockAssigner: mock_assigner, ParachainsAssigner: assigner_parachains, + OnDemandAssigner: assigner_on_demand, + CoretimeAssigner: assigner_coretime, + Coretime: coretime, Initializer: initializer, Dmp: dmp, Hrmp: hrmp, @@ -94,6 +103,7 @@ parameter_types! { pub type AccountId = u64; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; @@ -177,6 +187,7 @@ impl crate::initializer::Config for Test { type Randomness = TestRandomness; type ForceOrigin = frame_system::EnsureRoot; type WeightInfo = (); + type CoretimeOnNewSession = Coretime; } impl crate::configuration::Config for Test { @@ -216,6 +227,7 @@ impl crate::paras::Config for Test { type QueueFootprinter = ParaInclusion; type NextSessionRotation = TestNextSessionRotation; type OnNewHead = (); + type AssignCoretime = (); } impl crate::dmp::Config for Test {} @@ -287,7 +299,7 @@ impl crate::disputes::SlashingHandler for Test { } impl crate::scheduler::Config for Test { - type AssignmentProvider = Assigner; + type AssignmentProvider = MockAssigner; } pub struct TestMessageQueueWeight; @@ -341,17 +353,12 @@ impl pallet_message_queue::Config for Test { type ServiceWeight = MessageQueueServiceWeight; } -impl assigner::Config for Test { - type ParachainsAssignmentProvider = ParachainsAssigner; - type OnDemandAssignmentProvider = OnDemandAssigner; -} - -impl assigner_parachains::Config for Test {} - parameter_types! { pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); } +impl assigner_parachains::Config for Test {} + impl assigner_on_demand::Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; @@ -359,6 +366,37 @@ impl assigner_on_demand::Config for Test { type WeightInfo = crate::assigner_on_demand::TestWeightInfo; } +impl assigner_coretime::Config for Test {} + +parameter_types! { + pub const BrokerId: u32 = 10u32; +} + +impl coretime::Config for Test { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type Currency = pallet_balances::Pallet; + type BrokerId = BrokerId; + type WeightInfo = crate::coretime::TestWeightInfo; + type SendXcm = DummyXcmSender; +} + +pub struct DummyXcmSender; +impl SendXcm for DummyXcmSender { + type Ticket = (); + fn validate( + _: &mut Option, + _: &mut Option>, + ) -> SendResult { + Ok(((), MultiAssets::new())) + } + + /// Actually carry out the delivery operation for a previously validated message sending. + fn deliver(_ticket: Self::Ticket) -> Result { + Ok([0u8; 32]) + } +} + impl crate::inclusion::Config for Test { type WeightInfo = (); type RuntimeEvent = RuntimeEvent; @@ -389,6 +427,104 @@ impl ValidatorSetWithIdentification for MockValidatorSet { type IdentificationOf = FoolIdentificationOf; } +/// A mock assigner which acts as the scheduler's `AssignmentProvider` for tests. The mock +/// assigner provides bare minimum functionality to test scheduler internals. Since they +/// have no direct effect on scheduler state, AssignmentProvider functions such as +/// `push_back_assignment` can be left empty. +pub mod mock_assigner { + use crate::scheduler::common::Assignment; + + use super::*; + pub use pallet::*; + + #[frame_support::pallet] + pub mod pallet { + use super::*; + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + configuration::Config + paras::Config {} + + #[pallet::storage] + pub(super) type MockAssignmentQueue = + StorageValue<_, VecDeque, ValueQuery>; + + #[pallet::storage] + pub(super) type MockProviderConfig = + StorageValue<_, AssignmentProviderConfig, OptionQuery>; + + #[pallet::storage] + pub(super) type MockCoreCount = StorageValue<_, u32, OptionQuery>; + } + + impl Pallet { + /// Adds a claim to the `MockAssignmentQueue` this claim can later be popped by the + /// scheduler when filling the claim queue for tests. + pub fn add_test_assignment(assignment: Assignment) { + MockAssignmentQueue::::mutate(|queue| queue.push_back(assignment)); + } + + // This configuration needs to be customized to service `get_provider_config` in + // scheduler tests. + pub fn set_assignment_provider_config(config: AssignmentProviderConfig) { + MockProviderConfig::::set(Some(config)); + } + + // Allows for customized core count in scheduler tests, rather than a core count + // derived from on-demand config + parachain count. + pub fn set_core_count(count: u32) { + MockCoreCount::::set(Some(count)); + } + } + + impl AssignmentProvider for Pallet { + // With regards to popping_assignments, the scheduler just needs to be tested under + // the following two conditions: + // 1. An assignment is provided + // 2. No assignment is provided + // A simple assignment queue populated to fit each test fulfills these needs. + fn pop_assignment_for_core(_core_idx: CoreIndex) -> Option { + let mut queue: VecDeque = MockAssignmentQueue::::get(); + let front = queue.pop_front(); + // Write changes to storage. + MockAssignmentQueue::::set(queue); + front + } + + // We don't care about core affinity in the test assigner + fn report_processed(_assignment: Assignment) {} + + // The results of this are tested in assigner_on_demand tests. No need to represent it + // in the mock assigner. + fn push_back_assignment(_assignment: Assignment) {} + + // Gets the provider config we set earlier using `set_assignment_provider_config`, falling + // back to the on demand parachain configuration if none was set. + fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig { + match MockProviderConfig::::get() { + Some(config) => config, + None => AssignmentProviderConfig { + max_availability_timeouts: 1, + ttl: BlockNumber::from(5u32), + }, + } + } + #[cfg(any(feature = "runtime-benchmarks", test))] + fn get_mock_assignment(_: CoreIndex, para_id: ParaId) -> Assignment { + Assignment::Bulk(para_id) + } + + fn session_core_count() -> u32 { + MockCoreCount::::get().unwrap_or(5) + } + } +} + +impl mock_assigner::pallet::Config for Test {} + pub struct FoolIdentificationOf; impl sp_runtime::traits::Convert> for FoolIdentificationOf { fn convert(_: AccountId) -> Option<()> { diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index cd73d23bdadb398d8e852b71fce3c09f5b97d94e..e97df8e4a2b3aaeee4e4d69c3c39262c23780588 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -506,6 +506,21 @@ impl OnNewHead for Tuple { } } +/// Assign coretime to some parachain. +/// +/// This assigns coretime to a parachain without using the coretime chain. Thus, this should only be +/// used for testing purposes. +pub trait AssignCoretime { + /// ONLY USE FOR TESTING OR GENESIS. + fn assign_coretime(id: ParaId) -> DispatchResult; +} + +impl AssignCoretime for () { + fn assign_coretime(_: ParaId) -> DispatchResult { + Ok(()) + } +} + pub trait WeightInfo { fn force_set_current_code(c: u32) -> Weight; fn force_set_current_head(s: u32) -> Weight; @@ -605,6 +620,13 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// Runtime hook for assigning coretime for a given parachain. + /// + /// This is only used at genesis or by root. + /// + /// TODO: Remove once coretime is the standard accross all chains. + type AssignCoretime: AssignCoretime; } #[pallet::event] @@ -838,6 +860,8 @@ pub mod pallet { panic!("empty validation code is not allowed in genesis"); } Pallet::::initialize_para_now(&mut parachains, *id, genesis_args); + T::AssignCoretime::assign_coretime(*id) + .expect("Assigning coretime works at genesis; qed"); } // parachains are flushed on drop } @@ -2064,7 +2088,7 @@ impl Pallet { } /// Submits a given PVF check statement with corresponding signature as an unsigned transaction - /// into the memory pool. Ultimately, that disseminates the transaction accross the network. + /// into the memory pool. Ultimately, that disseminates the transaction across the network. /// /// This function expects an offchain context and cannot be callable from the on-chain logic. /// diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index 3043127c3174660b72a55ce850bb9d05c2330ad2..0f6b23ae1b39213f8afbd37798f8f7f31a024cf9 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -18,7 +18,9 @@ use super::*; use crate::{inclusion, ParaId}; use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use frame_system::RawOrigin; -use sp_std::collections::btree_map::BTreeMap; +use sp_std::{cmp::min, collections::btree_map::BTreeMap}; + +use primitives::v6::GroupIndex; use crate::builder::BenchBuilder; @@ -116,7 +118,9 @@ benchmarks! { // There is 1 backed, assert_eq!(benchmark.backed_candidates.len(), 1); // with `v` validity votes. - assert_eq!(benchmark.backed_candidates.get(0).unwrap().validity_votes.len(), v as usize); + // let votes = v as usize; + let votes = min(scheduler::Pallet::::group_validators(GroupIndex::from(0)).unwrap().len(), v as usize); + assert_eq!(benchmark.backed_candidates.get(0).unwrap().validity_votes.len(), votes); benchmark.bitfields.clear(); benchmark.disputes.clear(); @@ -132,13 +136,13 @@ benchmarks! { // Ensure that the votes are for the correct session assert_eq!(vote.session, scenario._session); // Ensure that there are an expected number of candidates - let header = BenchBuilder::::header(scenario._block_number.clone()); + let header = BenchBuilder::::header(scenario._block_number); // Traverse candidates and assert descriptors are as expected for (para_id, backing_validators) in vote.backing_validators_per_candidate.iter().enumerate() { let descriptor = backing_validators.0.descriptor(); assert_eq!(ParaId::from(para_id), descriptor.para_id); assert_eq!(header.hash(), descriptor.relay_parent); - assert_eq!(backing_validators.1.len(), v as usize); + assert_eq!(backing_validators.1.len(), votes); } assert_eq!( @@ -167,11 +171,14 @@ benchmarks! { let mut benchmark = scenario.data.clone(); + // let votes = BenchBuilder::::fallback_min_validity_votes() as usize; + let votes = min(scheduler::Pallet::::group_validators(GroupIndex::from(0)).unwrap().len(), BenchBuilder::::fallback_min_validity_votes() as usize); + // There is 1 backed assert_eq!(benchmark.backed_candidates.len(), 1); assert_eq!( - benchmark.backed_candidates.get(0).unwrap().validity_votes.len() as u32, - BenchBuilder::::fallback_min_validity_votes() + benchmark.backed_candidates.get(0).unwrap().validity_votes.len(), + votes, ); benchmark.bitfields.clear(); @@ -189,7 +196,7 @@ benchmarks! { // Ensure that the votes are for the correct session assert_eq!(vote.session, scenario._session); // Ensure that there are an expected number of candidates - let header = BenchBuilder::::header(scenario._block_number.clone()); + let header = BenchBuilder::::header(scenario._block_number); // Traverse candidates and assert descriptors are as expected for (para_id, backing_validators) in vote.backing_validators_per_candidate.iter().enumerate() { @@ -197,8 +204,8 @@ benchmarks! { assert_eq!(ParaId::from(para_id), descriptor.para_id); assert_eq!(header.hash(), descriptor.relay_parent); assert_eq!( - backing_validators.1.len() as u32, - BenchBuilder::::fallback_min_validity_votes() + backing_validators.1.len(), + votes, ); } diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 8e918d35d5ff0d8af9ae77408b92fc53f9670853..8c33199c092371060aca56ad89d8d888310513c8 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -548,7 +548,7 @@ impl Pallet { let disputed_bitfield = create_disputed_bitfield(expected_bits, freed_disputed.keys()); if !freed_disputed.is_empty() { - >::update_claimqueue(freed_disputed.clone(), now); + >::free_cores_and_fill_claimqueue(freed_disputed.clone(), now); } let bitfields = sanitize_bitfields::( @@ -580,7 +580,7 @@ impl Pallet { let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); - >::update_claimqueue(freed, now); + >::free_cores_and_fill_claimqueue(freed, now); let scheduled = >::scheduled_paras() .map(|(core_idx, para_id)| (para_id, core_idx)) .collect(); diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 4fc60792e34683d223873a6e0ee39d70d380709e..e62d1cb68ffea39ced5edb3c0034fa5451744ef5 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -25,7 +25,8 @@ mod enter { use super::*; use crate::{ builder::{Bench, BenchBuilder}, - mock::{new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, + mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, + scheduler::common::Assignment, }; use assert_matches::assert_matches; use frame_support::assert_ok; @@ -39,6 +40,7 @@ mod enter { backed_and_concluding: BTreeMap, num_validators_per_core: u32, code_upgrade: Option, + fill_claimqueue: bool, } fn make_inherent_data( @@ -48,6 +50,7 @@ mod enter { backed_and_concluding, num_validators_per_core, code_upgrade, + fill_claimqueue, }: TestConfig, ) -> Bench { let builder = BenchBuilder::::new() @@ -58,7 +61,15 @@ mod enter { .set_max_validators_per_core(num_validators_per_core) .set_dispute_statements(dispute_statements) .set_backed_and_concluding_cores(backed_and_concluding) - .set_dispute_sessions(&dispute_sessions[..]); + .set_dispute_sessions(&dispute_sessions[..]) + .set_fill_claimqueue(fill_claimqueue); + + // Setup some assignments as needed: + mock_assigner::Pallet::::set_core_count(builder.max_cores()); + for core_index in 0..builder.max_cores() { + // Core index == para_id in this case + mock_assigner::Pallet::::add_test_assignment(Assignment::Bulk(core_index.into())); + } if let Some(code_size) = code_upgrade { builder.set_code_upgrade(code_size).build() @@ -88,6 +99,7 @@ mod enter { backed_and_concluding, num_validators_per_core: 1, code_upgrade: None, + fill_claimqueue: false, }); // We expect the scenario to have cores 0 & 1 with pending availability. The backed @@ -238,6 +250,7 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, + fill_claimqueue: false, }); let expected_para_inherent_data = scenario.data.clone(); @@ -308,6 +321,7 @@ mod enter { backed_and_concluding, num_validators_per_core: 6, code_upgrade: None, + fill_claimqueue: false, }); let expected_para_inherent_data = scenario.data.clone(); @@ -376,6 +390,7 @@ mod enter { backed_and_concluding, num_validators_per_core: 4, code_upgrade: None, + fill_claimqueue: false, }); let expected_para_inherent_data = scenario.data.clone(); @@ -460,6 +475,7 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, + fill_claimqueue: false, }); let expected_para_inherent_data = scenario.data.clone(); @@ -544,6 +560,7 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, + fill_claimqueue: false, }); let expected_para_inherent_data = scenario.data.clone(); @@ -627,6 +644,7 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, + fill_claimqueue: false, }); let expected_para_inherent_data = scenario.data.clone(); @@ -666,15 +684,9 @@ mod enter { // * 3 disputes. assert_eq!(limit_inherent_data.disputes.len(), 2); - assert_ok!(Pallet::::enter( - frame_system::RawOrigin::None.into(), - limit_inherent_data, - )); - - // TODO [now]: this assertion fails with async backing runtime. assert_eq!( - // The length of this vec is equal to the number of candidates, so we know our 2 - // backed candidates did not get filtered out + // The length of this vec is equal to the number of candidates, so we know 1 + // candidate got filtered out Pallet::::on_chain_votes().unwrap().backing_validators_per_candidate.len(), 1 ); @@ -684,6 +696,11 @@ mod enter { Pallet::::on_chain_votes().unwrap().session, 2 ); + + assert_ok!(Pallet::::enter( + frame_system::RawOrigin::None.into(), + limit_inherent_data, + )); }); } @@ -713,6 +730,7 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, + fill_claimqueue: false, }); let expected_para_inherent_data = scenario.data.clone(); @@ -778,6 +796,7 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, + fill_claimqueue: false, }); let expected_para_inherent_data = scenario.data.clone(); @@ -841,6 +860,7 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, + fill_claimqueue: false, }); let expected_para_inherent_data = scenario.data.clone(); @@ -905,6 +925,7 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, + fill_claimqueue: false, }); let expected_para_inherent_data = scenario.data.clone(); diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index 4d0bbc6a8960fc1c1d3e70abff2e0175e7898dac..b3a060e1cb8a05439de7f48fdd6ff1a84c554496 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -62,7 +62,7 @@ pub fn availability_cores() -> Vec>::update_claimqueue(Vec::new(), now); + >::free_cores_and_fill_claimqueue(Vec::new(), now); let time_out_for = >::availability_timeout_predicate(); diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 24a076f3a44310990d61ad0076db39f4deb8f9e5..0da50f6a5373e16b8e4164440b7f4fa9deff52cb 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,8 +16,11 @@ //! Put implementations of functions from staging APIs here. -use crate::shared; -use primitives::ValidatorIndex; +use crate::{configuration, initializer, shared}; +use primitives::{ + vstaging::{ApprovalVotingParams, NodeFeatures}, + ValidatorIndex, +}; use sp_std::{collections::btree_map::BTreeMap, prelude::Vec}; /// Implementation for `DisabledValidators` @@ -42,3 +45,14 @@ where .filter_map(|v| reverse_index.get(v).cloned()) .collect() } + +/// Returns the current state of the node features. +pub fn node_features() -> NodeFeatures { + >::config().node_features +} + +/// Approval voting subsystem configuration parameteres +pub fn approval_voting_params() -> ApprovalVotingParams { + let config = >::config(); + config.approval_voting_params +} diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index b81b68b5745ee57ce0736426abda5081aeb619f2..08ce656b2b284f3ecc5fa8ad0d451369ed22c453 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -65,7 +65,7 @@ pub mod migration; pub mod pallet { use super::*; - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] #[pallet::without_storage_info] @@ -99,15 +99,14 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn availability_cores)] pub(crate) type AvailabilityCores = - StorageValue<_, Vec>>, ValueQuery>; + StorageValue<_, Vec>, ValueQuery>; /// Representation of a core in `AvailabilityCores`. /// /// This is not to be confused with `CoreState` which is an enriched variant of this and exposed /// to the node side. It also provides information about scheduled/upcoming assignments for /// example and is computed on the fly in the `availability_cores` runtime call. - #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] - #[cfg_attr(feature = "std", derive(PartialEq))] + #[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq)] pub enum CoreOccupied { /// No candidate is waiting availability on this core right now (the core is not occupied). Free, @@ -115,6 +114,9 @@ pub mod pallet { Paras(ParasEntry), } + /// Conveninece type alias for `CoreOccupied`. + pub type CoreOccupiedType = CoreOccupied>; + impl CoreOccupied { /// Is core free? pub fn is_free(&self) -> bool { @@ -149,16 +151,13 @@ pub mod pallet { /// a block. Runtime APIs should be used to determine scheduled cores/ for the upcoming block. #[pallet::storage] #[pallet::getter(fn claimqueue)] - pub(crate) type ClaimQueue = StorageValue< - _, - BTreeMap>>>>, - ValueQuery, - >; + pub(crate) type ClaimQueue = + StorageValue<_, BTreeMap>>, ValueQuery>; /// Assignments as tracked in the claim queue. - #[derive(Clone, Encode, Decode, TypeInfo, PartialEq, RuntimeDebug)] - pub struct ParasEntry { - /// The underlying `Assignment` + #[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq, Clone)] + pub struct ParasEntry { + /// The underlying [`Assignment`]. pub assignment: Assignment, /// The number of times the entry has timed out in availability already. pub availability_timeouts: u32, @@ -169,37 +168,18 @@ pub mod pallet { pub ttl: N, } - impl ParasEntry { - /// Return `Id` from the underlying `Assignment`. - pub fn para_id(&self) -> ParaId { - self.assignment.para_id - } + /// Convenience type declaration for `ParasEntry`. + pub type ParasEntryType = ParasEntry>; + impl ParasEntry { /// Create a new `ParasEntry`. pub fn new(assignment: Assignment, now: N) -> Self { ParasEntry { assignment, availability_timeouts: 0, ttl: now } } - } - /// How a core is mapped to a backing group and a `ParaId` - #[derive(Clone, Encode, Decode, PartialEq, TypeInfo)] - #[cfg_attr(feature = "std", derive(Debug))] - pub struct CoreAssignment { - /// The core that is assigned. - pub core: CoreIndex, - /// The para id and accompanying information needed to collate and back a parablock. - pub paras_entry: ParasEntry, - } - - impl CoreAssignment { - /// Returns the [`ParaId`] of the assignment. + /// Return `Id` from the underlying `Assignment`. pub fn para_id(&self) -> ParaId { - self.paras_entry.para_id() - } - - /// Returns the inner [`ParasEntry`] of the assignment. - pub fn to_paras_entry(self) -> ParasEntry { - self.paras_entry + self.assignment.para_id() } } @@ -219,8 +199,6 @@ pub mod pallet { } type PositionInClaimqueue = u32; -type TimedoutParas = BTreeMap>>; -type ConcludedParas = BTreeMap; impl Pallet { /// Called by the initializer to initialize the scheduler pallet. @@ -253,7 +231,7 @@ impl Pallet { ); AvailabilityCores::::mutate(|cores| { - cores.resize(n_cores as _, CoreOccupied::Free); + cores.resize_with(n_cores as _, || CoreOccupied::Free); }); // shuffle validators into groups. @@ -298,9 +276,8 @@ impl Pallet { /// with the reason for them being freed. Returns a tuple of concluded and timedout paras. fn free_cores( just_freed_cores: impl IntoIterator, - ) -> (ConcludedParas, TimedoutParas) { - let mut timedout_paras: BTreeMap>> = - BTreeMap::new(); + ) -> (BTreeMap, BTreeMap>) { + let mut timedout_paras: BTreeMap> = BTreeMap::new(); let mut concluded_paras = BTreeMap::new(); AvailabilityCores::::mutate(|cores| { @@ -310,21 +287,22 @@ impl Pallet { .into_iter() .filter(|(freed_index, _)| (freed_index.0 as usize) < c_len) .for_each(|(freed_index, freed_reason)| { - match &cores[freed_index.0 as usize] { + match sp_std::mem::replace( + &mut cores[freed_index.0 as usize], + CoreOccupied::Free, + ) { CoreOccupied::Free => {}, CoreOccupied::Paras(entry) => { match freed_reason { FreedReason::Concluded => { - concluded_paras.insert(freed_index, entry.para_id()); + concluded_paras.insert(freed_index, entry.assignment); }, FreedReason::TimedOut => { - timedout_paras.insert(freed_index, entry.clone()); + timedout_paras.insert(freed_index, entry); }, }; }, }; - - cores[freed_index.0 as usize] = CoreOccupied::Free; }) }); @@ -379,30 +357,36 @@ impl Pallet { for (idx, _) in (0u32..).zip(availability_cores) { let core_idx = CoreIndex(idx); if let Some(core_claimqueue) = cq.get_mut(&core_idx) { - let mut dropped_claims: Vec> = vec![]; - core_claimqueue.retain(|maybe_entry| { - if let Some(entry) = maybe_entry { + let mut i = 0; + let mut num_dropped = 0; + while i < core_claimqueue.len() { + let maybe_dropped = if let Some(entry) = core_claimqueue.get(i) { if entry.ttl < now { - dropped_claims.push(Some(entry.para_id())); - return false + core_claimqueue.remove(i) + } else { + None } + } else { + None + }; + + if let Some(dropped) = maybe_dropped { + num_dropped += 1; + T::AssignmentProvider::report_processed(dropped.assignment); + } else { + i += 1; } - true - }); - - // For all claims dropped due to TTL, attempt to pop a new entry to - // the back of the claimqueue. - for drop in dropped_claims { - match T::AssignmentProvider::pop_assignment_for_core(core_idx, drop) { - Some(assignment) => { - let AssignmentProviderConfig { ttl, .. } = - T::AssignmentProvider::get_provider_config(core_idx); - core_claimqueue.push_back(Some(ParasEntry::new( - assignment.clone(), - now + ttl, - ))); - }, - None => (), + } + + for _ in 0..num_dropped { + // For all claims dropped due to TTL, attempt to pop a new entry to + // the back of the claimqueue. + if let Some(assignment) = + T::AssignmentProvider::pop_assignment_for_core(core_idx) + { + let AssignmentProviderConfig { ttl, .. } = + T::AssignmentProvider::get_provider_config(core_idx); + core_claimqueue.push_back(ParasEntry::new(assignment, now + ttl)); } } } @@ -446,7 +430,7 @@ impl Pallet { } let rotations_since_session_start: BlockNumberFor = - (at - session_start_block) / config.group_rotation_frequency.into(); + (at - session_start_block) / config.group_rotation_frequency; let rotations_since_session_start = as TryInto>::try_into(rotations_since_session_start) @@ -514,14 +498,12 @@ impl Pallet { /// Return the next thing that will be scheduled on this core assuming it is currently /// occupied and the candidate occupying it became available. pub(crate) fn next_up_on_available(core: CoreIndex) -> Option { - ClaimQueue::::get().get(&core).and_then(|a| { - a.iter() - .find_map(|e| e.as_ref()) - .map(|pe| Self::paras_entry_to_scheduled_core(pe)) - }) + ClaimQueue::::get() + .get(&core) + .and_then(|a| a.front().map(|pe| Self::paras_entry_to_scheduled_core(pe))) } - fn paras_entry_to_scheduled_core(pe: &ParasEntry>) -> ScheduledCore { + fn paras_entry_to_scheduled_core(pe: &ParasEntryType) -> ScheduledCore { ScheduledCore { para_id: pe.para_id(), collator: None } } @@ -552,35 +534,33 @@ impl Pallet { /// Pushes occupied cores to the assignment provider. fn push_occupied_cores_to_assignment_provider() { AvailabilityCores::::mutate(|cores| { - for (core_idx, core) in cores.iter_mut().enumerate() { - match core { + for core in cores.iter_mut() { + match sp_std::mem::replace(core, CoreOccupied::Free) { CoreOccupied::Free => continue, CoreOccupied::Paras(entry) => { - let core_idx = CoreIndex::from(core_idx as u32); - Self::maybe_push_assignment(core_idx, entry.clone()); + Self::maybe_push_assignment(entry); }, } - *core = CoreOccupied::Free; } }); } // on new session fn push_claimqueue_items_to_assignment_provider() { - for (core_idx, core_claimqueue) in ClaimQueue::::take() { + for (_, claim_queue) in ClaimQueue::::take() { // Push back in reverse order so that when we pop from the provider again, // the entries in the claimqueue are in the same order as they are right now. - for para_entry in core_claimqueue.into_iter().flatten().rev() { - Self::maybe_push_assignment(core_idx, para_entry); + for para_entry in claim_queue.into_iter().rev() { + Self::maybe_push_assignment(para_entry); } } } /// Push assignments back to the provider on session change unless the paras /// timed out on availability before. - fn maybe_push_assignment(core_idx: CoreIndex, pe: ParasEntry>) { + fn maybe_push_assignment(pe: ParasEntryType) { if pe.availability_timeouts == 0 { - T::AssignmentProvider::push_assignment_for_core(core_idx, pe.assignment); + T::AssignmentProvider::push_back_assignment(pe.assignment); } } @@ -591,31 +571,8 @@ impl Pallet { >::config().scheduling_lookahead } - /// Updates the claimqueue by moving it to the next paras and filling empty spots with new - /// paras. - pub(crate) fn update_claimqueue( - just_freed_cores: impl IntoIterator, - now: BlockNumberFor, - ) { - Self::move_claimqueue_forward(); - Self::free_cores_and_fill_claimqueue(just_freed_cores, now) - } - - /// Moves all elements in the claimqueue forward. - fn move_claimqueue_forward() { - let mut cq = ClaimQueue::::get(); - for core_queue in cq.values_mut() { - // First pop the finished claims from the front. - if let Some(None) = core_queue.front() { - core_queue.pop_front(); - } - } - - ClaimQueue::::set(cq); - } - /// Frees cores and fills the free claimqueue spots by popping from the `AssignmentProvider`. - fn free_cores_and_fill_claimqueue( + pub fn free_cores_and_fill_claimqueue( just_freed_cores: impl IntoIterator, now: BlockNumberFor, ) { @@ -651,19 +608,19 @@ impl Pallet { } else { // Consider timed out assignments for on demand parachains as concluded for // the assignment provider - let ret = concluded_paras.insert(core_idx, entry.para_id()); + let ret = concluded_paras.insert(core_idx, entry.assignment); debug_assert!(ret.is_none()); } } - // We consider occupied cores to be part of the claimqueue + if let Some(concluded_para) = concluded_paras.remove(&core_idx) { + T::AssignmentProvider::report_processed(concluded_para); + } + // We consider occupied cores to be part of the claimqueue let n_lookahead_used = cq.get(&core_idx).map_or(0, |v| v.len() as u32) + if Self::is_core_occupied(core_idx) { 1 } else { 0 }; for _ in n_lookahead_used..n_lookahead { - let concluded_para = concluded_paras.remove(&core_idx); - if let Some(assignment) = - T::AssignmentProvider::pop_assignment_for_core(core_idx, concluded_para) - { + if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) { Self::add_to_claimqueue(core_idx, ParasEntry::new(assignment, now + ttl)); } } @@ -680,9 +637,9 @@ impl Pallet { } } - fn add_to_claimqueue(core_idx: CoreIndex, pe: ParasEntry>) { + fn add_to_claimqueue(core_idx: CoreIndex, pe: ParasEntryType) { ClaimQueue::::mutate(|la| { - la.entry(core_idx).or_default().push_back(Some(pe)); + la.entry(core_idx).or_default().push_back(pe); }); } @@ -690,19 +647,16 @@ impl Pallet { fn remove_from_claimqueue( core_idx: CoreIndex, para_id: ParaId, - ) -> Result<(PositionInClaimqueue, ParasEntry>), &'static str> { + ) -> Result<(PositionInClaimqueue, ParasEntryType), &'static str> { ClaimQueue::::mutate(|cq| { let core_claims = cq.get_mut(&core_idx).ok_or("core_idx not found in lookahead")?; let pos = core_claims .iter() - .position(|a| a.as_ref().map_or(false, |pe| pe.para_id() == para_id)) + .position(|pe| pe.para_id() == para_id) .ok_or("para id not found at core_idx lookahead")?; - let pe = core_claims - .remove(pos) - .ok_or("remove returned None")? - .ok_or("Element in Claimqueue was None.")?; + let pe = core_claims.remove(pos).ok_or("remove returned None")?; Ok((pos as u32, pe)) }) @@ -710,16 +664,10 @@ impl Pallet { /// Paras scheduled next in the claim queue. pub(crate) fn scheduled_paras() -> impl Iterator { - Self::scheduled_entries().map(|(core_idx, e)| (core_idx, e.assignment.para_id)) - } - - /// Internal access to entries at the top of the claim queue. - fn scheduled_entries() -> impl Iterator>)> { let claimqueue = ClaimQueue::::get(); - claimqueue .into_iter() - .filter_map(|(core_idx, v)| v.front().cloned().flatten().map(|e| (core_idx, e))) + .filter_map(|(core_idx, v)| v.front().map(|e| (core_idx, e.assignment.para_id()))) } #[cfg(any(feature = "runtime-benchmarks", test))] diff --git a/polkadot/runtime/parachains/src/scheduler/common.rs b/polkadot/runtime/parachains/src/scheduler/common.rs index 316e8e3b760cc6a73c022f693e12ca537bf3443b..2eb73385803c6e62a88fc55526e3ba18218cf06d 100644 --- a/polkadot/runtime/parachains/src/scheduler/common.rs +++ b/polkadot/runtime/parachains/src/scheduler/common.rs @@ -16,29 +16,39 @@ //! Common traits and types used by the scheduler and assignment providers. -use frame_support::pallet_prelude::*; -use primitives::{CoreIndex, Id as ParaId}; use scale_info::TypeInfo; -use sp_std::prelude::*; +use sp_runtime::{ + codec::{Decode, Encode}, + RuntimeDebug, +}; -// Only used to link to configuration documentation. -#[allow(unused)] -use crate::configuration::HostConfiguration; +use primitives::{CoreIndex, Id as ParaId}; -/// An assignment for a parachain scheduled to be backed and included in a relay chain block. -#[derive(Clone, Encode, Decode, PartialEq, TypeInfo, RuntimeDebug)] -pub struct Assignment { - /// Assignment's ParaId - pub para_id: ParaId, +/// Assignment (ParaId -> CoreIndex). +#[derive(Encode, Decode, TypeInfo, RuntimeDebug, Clone, PartialEq)] +pub enum Assignment { + /// A pool assignment. + Pool { + /// The assigned para id. + para_id: ParaId, + /// The core index the para got assigned to. + core_index: CoreIndex, + }, + /// A bulk assignment. + Bulk(ParaId), } impl Assignment { - /// Create a new `Assignment`. - pub fn new(para_id: ParaId) -> Self { - Self { para_id } + /// Returns the [`ParaId`] this assignment is associated to. + pub fn para_id(&self) -> ParaId { + match self { + Self::Pool { para_id, .. } => *para_id, + Self::Bulk(para_id) => *para_id, + } } } +#[derive(Encode, Decode, TypeInfo)] /// A set of variables required by the scheduler in order to operate. pub struct AssignmentProviderConfig { /// How many times a collation can time out on availability. @@ -51,22 +61,42 @@ pub struct AssignmentProviderConfig { } pub trait AssignmentProvider { - /// How many cores are allocated to this provider. - fn session_core_count() -> u32; - /// Pops an [`Assignment`] from the provider for a specified [`CoreIndex`]. - /// The `concluded_para` field makes the caller report back to the provider - /// which [`ParaId`] it processed last on the supplied [`CoreIndex`]. - fn pop_assignment_for_core( - core_idx: CoreIndex, - concluded_para: Option, - ) -> Option; - - /// Push back an already popped assignment. Intended for provider implementations - /// that need to be able to keep track of assignments over session boundaries, - /// such as the on demand assignment provider. - fn push_assignment_for_core(core_idx: CoreIndex, assignment: Assignment); + /// + /// This is where assignments come into existance. + fn pop_assignment_for_core(core_idx: CoreIndex) -> Option; + + /// A previously popped `Assignment` has been fully processed. + /// + /// Report back to the assignment provider that an assignment is done and no longer present in + /// the scheduler. + /// + /// This is one way of the life of an assignment coming to an end. + fn report_processed(assignment: Assignment); + + /// Push back a previously popped assignment. + /// + /// If the assignment could not be processed within the current session, it can be pushed back + /// to the assignment provider in order to be poppped again later. + /// + /// This is the second way the life of an assignment can come to an end. + fn push_back_assignment(assignment: Assignment); /// Returns a set of variables needed by the scheduler fn get_provider_config(core_idx: CoreIndex) -> AssignmentProviderConfig; + + /// Push some assignment for mocking/benchmarks purposes. + /// + /// Useful for benchmarks and testing. The returned assignment is "valid" and can if need be + /// passed into `report_processed` for example. + #[cfg(any(feature = "runtime-benchmarks", test))] + fn get_mock_assignment(core_idx: CoreIndex, para_id: ParaId) -> Assignment; + + /// How many cores are allocated to this provider. + /// + /// As the name suggests the core count has to be session buffered: + /// + /// - Core count has to be predetermined for the next session in the current session. + /// - Core count must not change during a session. + fn session_core_count() -> u32; } diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index bb9a647e955ca7e6bf7b7e7a78abd41e084550d0..4c0a07d73674205dbc4fc80090f1331a8858633e 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -22,9 +22,18 @@ use frame_support::{ traits::OnRuntimeUpgrade, weights::Weight, }; +/// Old/legacy assignment representation (v0). +/// +/// `Assignment` used to be a concrete type with the same layout V0Assignment, idential on all +/// assignment providers. This can be removed once storage has been migrated. +#[derive(Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Clone)] +struct V0Assignment { + pub para_id: ParaId, +} + +/// Old scheduler with explicit parathreads and `Scheduled` storage instead of `ClaimQueue`. mod v0 { use super::*; - use primitives::{CollatorId, Id}; #[storage_alias] @@ -90,29 +99,123 @@ mod v0 { } } -pub mod v1 { +// `ClaimQueue` got introduced. +// +// - Items are `Option` for some weird reason. +// - Assignments only consist of `ParaId`, `Assignment` is a concrete type (Same as V0Assignment). +mod v1 { + use frame_support::{ + pallet_prelude::ValueQuery, storage_alias, traits::OnRuntimeUpgrade, weights::Weight, + }; + use frame_system::pallet_prelude::BlockNumberFor; + use super::*; use crate::scheduler; - #[allow(deprecated)] - pub type MigrateToV1 = VersionedMigration< - 0, - 1, - UncheckedMigrateToV1, + #[storage_alias] + pub(super) type ClaimQueue = StorageValue< Pallet, - ::DbWeight, + BTreeMap>>>>, + ValueQuery, >; - #[deprecated(note = "Use MigrateToV1 instead")] + #[storage_alias] + pub(super) type AvailabilityCores = + StorageValue, Vec>>, ValueQuery>; + + #[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq)] + pub(super) enum CoreOccupied { + /// No candidate is waiting availability on this core right now (the core is not occupied). + Free, + /// A para is currently waiting for availability/inclusion on this core. + Paras(ParasEntry), + } + + #[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq)] + pub(super) struct ParasEntry { + /// The underlying `Assignment` + pub(super) assignment: V0Assignment, + /// The number of times the entry has timed out in availability already. + pub(super) availability_timeouts: u32, + /// The block height until this entry needs to be backed. + /// + /// If missed the entry will be removed from the claim queue without ever having occupied + /// the core. + pub(super) ttl: N, + } + + impl ParasEntry { + /// Create a new `ParasEntry`. + pub(super) fn new(assignment: V0Assignment, now: N) -> Self { + ParasEntry { assignment, availability_timeouts: 0, ttl: now } + } + + /// Return `Id` from the underlying `Assignment`. + pub(super) fn para_id(&self) -> ParaId { + self.assignment.para_id + } + } + + fn add_to_claimqueue(core_idx: CoreIndex, pe: ParasEntry>) { + ClaimQueue::::mutate(|la| { + la.entry(core_idx).or_default().push_back(Some(pe)); + }); + } + + /// Migration to V1 pub struct UncheckedMigrateToV1(sp_std::marker::PhantomData); - #[allow(deprecated)] impl OnRuntimeUpgrade for UncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { - let weight_consumed = migrate_to_v1::(); + let mut weight: Weight = Weight::zero(); + + v0::ParathreadQueue::::kill(); + v0::ParathreadClaimIndex::::kill(); + + let now = >::block_number(); + let scheduled = v0::Scheduled::::take(); + let sched_len = scheduled.len() as u64; + for core_assignment in scheduled { + let core_idx = core_assignment.core; + let assignment = V0Assignment { para_id: core_assignment.para_id }; + let pe = v1::ParasEntry::new(assignment, now); + v1::add_to_claimqueue::(core_idx, pe); + } + + let parachains = paras::Pallet::::parachains(); + let availability_cores = v0::AvailabilityCores::::take(); + let mut new_availability_cores = Vec::new(); + + for (core_index, core) in availability_cores.into_iter().enumerate() { + let new_core = if let Some(core) = core { + match core { + v0::CoreOccupied::Parachain => + v1::CoreOccupied::Paras(v1::ParasEntry::new( + V0Assignment { para_id: parachains[core_index] }, + now, + )), + v0::CoreOccupied::Parathread(entry) => v1::CoreOccupied::Paras( + v1::ParasEntry::new(V0Assignment { para_id: entry.claim.0 }, now), + ), + } + } else { + v1::CoreOccupied::Free + }; + + new_availability_cores.push(new_core); + } + + v1::AvailabilityCores::::set(new_availability_cores); - log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler storage to v1"); + // 2x as once for Scheduled and once for Claimqueue + weight.saturating_accrue(T::DbWeight::get().reads_writes(2 * sched_len, 2 * sched_len)); + // reading parachains + availability_cores, writing AvailabilityCores + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 1)); + // 2x kill + weight.saturating_accrue(T::DbWeight::get().writes(2)); - weight_consumed + log::info!(target: scheduler::LOG_TARGET, "Migrated para scheduler storage to v1"); + + weight } #[cfg(feature = "try-runtime")] @@ -138,9 +241,9 @@ pub mod v1 { ); let expected_len = u32::decode(&mut &state[..]).unwrap(); - let availability_cores_waiting = super::AvailabilityCores::::get() - .iter() - .filter(|c| !matches!(c, CoreOccupied::Free)) + let availability_cores_waiting = v1::AvailabilityCores::::get() + .into_iter() + .filter(|c| !matches!(c, v1::CoreOccupied::Free)) .count(); ensure!( @@ -154,51 +257,150 @@ pub mod v1 { } } -pub fn migrate_to_v1() -> Weight { - let mut weight: Weight = Weight::zero(); +/// Migrate `V0` to `V1` of the storage format. +pub type MigrateV0ToV1 = VersionedMigration< + 0, + 1, + v1::UncheckedMigrateToV1, + Pallet, + ::DbWeight, +>; - v0::ParathreadQueue::::kill(); - v0::ParathreadClaimIndex::::kill(); +mod v2 { + use super::*; + use crate::scheduler; - let now = >::block_number(); - let scheduled = v0::Scheduled::::take(); - let sched_len = scheduled.len() as u64; - for core_assignment in scheduled { - let core_idx = core_assignment.core; - let assignment = Assignment::new(core_assignment.para_id); - let pe = ParasEntry::new(assignment, now); - Pallet::::add_to_claimqueue(core_idx, pe); + #[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq)] + pub(crate) enum CoreOccupied { + Free, + Paras(ParasEntry), } - let parachains = paras::Pallet::::parachains(); - let availability_cores = v0::AvailabilityCores::::take(); - let mut new_availability_cores = Vec::new(); - - for (core_index, core) in availability_cores.into_iter().enumerate() { - let new_core = if let Some(core) = core { - match core { - v0::CoreOccupied::Parachain => CoreOccupied::Paras(ParasEntry::new( - Assignment::new(parachains[core_index]), - now, - )), - v0::CoreOccupied::Parathread(entry) => - CoreOccupied::Paras(ParasEntry::new(Assignment::new(entry.claim.0), now)), - } - } else { - CoreOccupied::Free - }; + #[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq)] + pub(crate) struct ParasEntry { + pub assignment: Assignment, + pub availability_timeouts: u32, + pub ttl: N, + } - new_availability_cores.push(new_core); + // V2 (no Option wrapper) and new [`Assignment`]. + #[storage_alias] + pub(crate) type ClaimQueue = StorageValue< + Pallet, + BTreeMap>>>, + ValueQuery, + >; + + #[storage_alias] + pub(crate) type AvailabilityCores = + StorageValue, Vec>>, ValueQuery>; + + fn is_bulk(core_index: CoreIndex) -> bool { + core_index.0 < paras::Parachains::::decode_len().unwrap_or(0) as u32 } - super::AvailabilityCores::::set(new_availability_cores); + /// Migration to V2 + pub struct UncheckedMigrateToV2(sp_std::marker::PhantomData); + + impl OnRuntimeUpgrade for UncheckedMigrateToV2 { + fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = Weight::zero(); + + let old = v1::ClaimQueue::::take(); + let new = old + .into_iter() + .map(|(k, v)| { + ( + k, + v.into_iter() + .flatten() + .map(|p| { + let assignment = if is_bulk::(k) { + Assignment::Bulk(p.para_id()) + } else { + Assignment::Pool { para_id: p.para_id(), core_index: k } + }; + + ParasEntry { + assignment, + availability_timeouts: p.availability_timeouts, + ttl: p.ttl, + } + }) + .collect::>(), + ) + }) + .collect::>>>>(); + + ClaimQueue::::put(new); + + let old = v1::AvailabilityCores::::get(); + + let new = old + .into_iter() + .enumerate() + .map(|(k, a)| match a { + v1::CoreOccupied::Free => CoreOccupied::Free, + v1::CoreOccupied::Paras(paras) => { + let assignment = if is_bulk::((k as u32).into()) { + Assignment::Bulk(paras.para_id()) + } else { + Assignment::Pool { + para_id: paras.para_id(), + core_index: (k as u32).into(), + } + }; + + CoreOccupied::Paras(ParasEntry { + assignment, + availability_timeouts: paras.availability_timeouts, + ttl: paras.ttl, + }) + }, + }) + .collect::>(); + AvailabilityCores::::put(new); + + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + + log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler storage to v2"); + + weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::DispatchError> { + log::trace!( + target: crate::scheduler::LOG_TARGET, + "ClaimQueue before migration: {}", + v1::ClaimQueue::::get().len() + ); + + let bytes = u32::to_be_bytes(v1::ClaimQueue::::get().len() as u32); - // 2x as once for Scheduled and once for Claimqueue - weight = weight.saturating_add(T::DbWeight::get().reads_writes(2 * sched_len, 2 * sched_len)); - // reading parachains + availability_cores, writing AvailabilityCores - weight = weight.saturating_add(T::DbWeight::get().reads_writes(2, 1)); - // 2x kill - weight = weight.saturating_add(T::DbWeight::get().writes(2)); + Ok(bytes.to_vec()) + } - weight + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::DispatchError> { + log::trace!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()"); + + let old_len = u32::from_be_bytes(state.try_into().unwrap()); + ensure!( + v2::ClaimQueue::::get().len() as u32 == old_len, + "Old ClaimQueue completely moved to new ClaimQueue after migration" + ); + + Ok(()) + } + } } + +/// Migrate `V1` to `V2` of the storage format. +pub type MigrateV1ToV2 = VersionedMigration< + 1, + 2, + v2::UncheckedMigrateToV2, + Pallet, + ::DbWeight, +>; diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index 108f365d6b5c39567b57dc21243a30d0548a89bf..9af23ce64bd67ab0901dd1a03e849d51cfffe342 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -22,24 +22,24 @@ use primitives::{BlockNumber, SessionIndex, ValidationCode, ValidatorId}; use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use crate::{ - assigner_on_demand::QueuePushDirection, configuration::HostConfiguration, initializer::SessionChangeNotification, mock::{ - new_test_ext, MockGenesisConfig, OnDemandAssigner, Paras, ParasShared, RuntimeOrigin, + new_test_ext, MockAssigner, MockGenesisConfig, Paras, ParasShared, RuntimeOrigin, Scheduler, System, Test, }, paras::{ParaGenesisArgs, ParaKind}, + scheduler::{common::Assignment, ClaimQueue}, }; -fn schedule_blank_para(id: ParaId, parakind: ParaKind) { +fn schedule_blank_para(id: ParaId) { let validation_code: ValidationCode = vec![1, 2, 3].into(); assert_ok!(Paras::schedule_para_initialize( id, ParaGenesisArgs { genesis_head: Vec::new().into(), validation_code: validation_code.clone(), - para_kind: parakind, + para_kind: ParaKind::Parathread, // This most closely mimics our test assigner } )); @@ -78,7 +78,7 @@ fn run_to_block( Scheduler::initializer_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::update_claimqueue(BTreeMap::new(), b + 1); + Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), b + 1); } } @@ -103,11 +103,10 @@ fn run_to_end_of_block( fn default_config() -> HostConfiguration { HostConfiguration { - on_demand_cores: 3, + coretime_cores: 3, group_rotation_frequency: 10, paras_availability_period: 3, scheduling_lookahead: 2, - on_demand_retries: 1, // This field does not affect anything that scheduler does. However, `HostConfiguration` // is still a subject to consistency test. It requires that // `minimum_validation_upgrade_delay` is greater than `chain_availability_period` and @@ -124,29 +123,16 @@ fn genesis_config(config: &HostConfiguration) -> MockGenesisConfig } } -pub(crate) fn claimqueue_contains_only_none() -> bool { - let mut cq = Scheduler::claimqueue(); - for (_, v) in cq.iter_mut() { - v.retain(|e| e.is_some()); - } - - cq.values().map(|v| v.len()).sum::() == 0 -} - -pub(crate) fn claimqueue_contains_para_ids(pids: Vec) -> bool { +fn claimqueue_contains_para_ids(pids: Vec) -> bool { let set: BTreeSet = ClaimQueue::::get() .into_iter() - .flat_map(|(_, assignments)| { - assignments - .into_iter() - .filter_map(|assignment| assignment.and_then(|pe| Some(pe.para_id()))) - }) + .flat_map(|(_, paras_entries)| paras_entries.into_iter().map(|pe| pe.assignment.para_id())) .collect(); pids.into_iter().all(|pid| set.contains(&pid)) } -pub(crate) fn availability_cores_contains_para_ids(pids: Vec) -> bool { +fn availability_cores_contains_para_ids(pids: Vec) -> bool { let set: BTreeSet = AvailabilityCores::::get() .into_iter() .filter_map(|core| match core { @@ -158,6 +144,14 @@ pub(crate) fn availability_cores_contains_para_ids(pids: Vec) pids.into_iter().all(|pid| set.contains(&pid)) } +/// Internal access to entries at the top of the claim queue. +fn scheduled_entries() -> impl Iterator>)> { + let claimqueue = ClaimQueue::::get(); + claimqueue + .into_iter() + .filter_map(|(core_idx, v)| v.front().map(|e| (core_idx, e.clone()))) +} + #[test] fn claimqueue_ttl_drop_fn_works() { let mut config = default_config(); @@ -169,13 +163,14 @@ fn claimqueue_ttl_drop_fn_works() { let mut now = 10; new_test_ext(genesis_config).execute_with(|| { - assert!(default_config().on_demand_ttl == 5); + let assignment_provider_ttl = MockAssigner::get_provider_config(CoreIndex::from(0)).ttl; + assert!(assignment_provider_ttl == 5); // Register and run to a blockheight where the para is in a valid state. - schedule_blank_para(para_id, ParaKind::Parathread); - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + schedule_blank_para(para_id); + run_to_block(now, |n| if n == now { Some(Default::default()) } else { None }); // Add a claim on core 0 with a ttl in the past. - let paras_entry = ParasEntry::new(Assignment::new(para_id), now - 5); + let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now - 5 as u32); Scheduler::add_to_claimqueue(core_idx, paras_entry.clone()); // Claim is in queue prior to call. @@ -186,7 +181,7 @@ fn claimqueue_ttl_drop_fn_works() { assert!(!claimqueue_contains_para_ids::(vec![para_id])); // Add a claim on core 0 with a ttl in the future (15). - let paras_entry = ParasEntry::new(Assignment::new(para_id), now + 5); + let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now + 5); Scheduler::add_to_claimqueue(core_idx, paras_entry.clone()); // Claim is in queue post call. @@ -201,7 +196,7 @@ fn claimqueue_ttl_drop_fn_works() { assert!(!claimqueue_contains_para_ids::(vec![para_id])); // Add a claim on core 0 with a ttl == now (16) - let paras_entry = ParasEntry::new(Assignment::new(para_id), now); + let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now); Scheduler::add_to_claimqueue(core_idx, paras_entry.clone()); // Claim is in queue post call. @@ -215,8 +210,8 @@ fn claimqueue_ttl_drop_fn_works() { Scheduler::drop_expired_claims_from_claimqueue(); // Add a claim on core 0 with a ttl == now (17) - let paras_entry_non_expired = ParasEntry::new(Assignment::new(para_id), now); - let paras_entry_expired = ParasEntry::new(Assignment::new(para_id), now - 2); + let paras_entry_non_expired = ParasEntry::new(Assignment::Bulk(para_id), now); + let paras_entry_expired = ParasEntry::new(Assignment::Bulk(para_id), now - 2); // ttls = [17, 15, 17] Scheduler::add_to_claimqueue(core_idx, paras_entry_non_expired.clone()); Scheduler::add_to_claimqueue(core_idx, paras_entry_expired.clone()); @@ -224,18 +219,10 @@ fn claimqueue_ttl_drop_fn_works() { let cq = Scheduler::claimqueue(); assert!(cq.get(&core_idx).unwrap().len() == 3); - // Add claims to on demand assignment provider. - let assignment = Assignment::new(para_id); + // Add a claim to the test assignment provider. + let assignment = Assignment::Bulk(para_id); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment.clone(), - QueuePushDirection::Back - )); - - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment, - QueuePushDirection::Back - )); + MockAssigner::add_test_assignment(assignment.clone()); // Drop expired claim. Scheduler::drop_expired_claims_from_claimqueue(); @@ -248,58 +235,25 @@ fn claimqueue_ttl_drop_fn_works() { // The first 2 claims in the queue should have a ttl of 17, // being the ones set up prior in this test as claims 1 and 3. // The third claim is popped from the assignment provider and - // has a new ttl set by the scheduler of now + config.on_demand_ttl. - // ttls = [17, 17, 22] + // has a new ttl set by the scheduler of now + + // assignment_provider_ttl. ttls = [17, 17, 22] assert!(cqc.iter().enumerate().all(|(index, entry)| { match index { - 0 | 1 => return entry.clone().unwrap().ttl == 17, - 2 => return entry.clone().unwrap().ttl == 22, - _ => return false, + 0 | 1 => entry.clone().ttl == 17, + 2 => entry.clone().ttl == 22, + _ => false, } })) }); } -// Pretty useless here. Should be on parathread assigner... if at all -#[test] -fn add_parathread_claim_works() { - let genesis_config = genesis_config(&default_config()); - - let thread_id = ParaId::from(10); - let core_index = CoreIndex::from(0); - let entry_ttl = 10_000; - - new_test_ext(genesis_config).execute_with(|| { - schedule_blank_para(thread_id, ParaKind::Parathread); - - assert!(!Paras::is_parathread(thread_id)); - - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); - - assert!(Paras::is_parathread(thread_id)); - - let pe = ParasEntry::new(Assignment::new(thread_id), entry_ttl); - Scheduler::add_to_claimqueue(core_index, pe.clone()); - - let cq = Scheduler::claimqueue(); - assert_eq!(Scheduler::claimqueue_len(), 1); - assert_eq!(*(cq.get(&core_index).unwrap().front().unwrap()), Some(pe)); - }) -} - #[test] fn session_change_shuffles_validators() { let genesis_config = genesis_config(&default_config()); - assert_eq!(default_config().on_demand_cores, 3); new_test_ext(genesis_config).execute_with(|| { - let chain_a = ParaId::from(1_u32); - let chain_b = ParaId::from(2_u32); - - // ensure that we have 5 groups by registering 2 parachains. - schedule_blank_para(chain_a, ParaKind::Parachain); - schedule_blank_para(chain_b, ParaKind::Parachain); - + // Need five cores for this test + MockAssigner::set_core_count(5); run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { new_config: default_config(), @@ -336,7 +290,6 @@ fn session_change_shuffles_validators() { fn session_change_takes_only_max_per_core() { let config = { let mut config = default_config(); - config.on_demand_cores = 0; config.max_validators_per_core = Some(1); config }; @@ -344,14 +297,8 @@ fn session_change_takes_only_max_per_core() { let genesis_config = genesis_config(&config); new_test_ext(genesis_config).execute_with(|| { - let chain_a = ParaId::from(1_u32); - let chain_b = ParaId::from(2_u32); - let chain_c = ParaId::from(3_u32); - - // ensure that we have 5 groups by registering 2 parachains. - schedule_blank_para(chain_a, ParaKind::Parachain); - schedule_blank_para(chain_b, ParaKind::Parachain); - schedule_blank_para(chain_c, ParaKind::Parathread); + // Simulate 2 cores between all usage types + MockAssigner::set_core_count(2); run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { @@ -374,7 +321,7 @@ fn session_change_takes_only_max_per_core() { let groups = ValidatorGroups::::get(); assert_eq!(groups.len(), 7); - // Every validator gets its own group, even though there are 2 paras. + // Every validator gets its own group, even though there are 2 cores. for i in 0..7 { assert_eq!(groups[i].len(), 1); } @@ -385,31 +332,25 @@ fn session_change_takes_only_max_per_core() { fn fill_claimqueue_fills() { let genesis_config = genesis_config(&default_config()); - let lookahead = genesis_config.configuration.config.scheduling_lookahead as usize; - let chain_a = ParaId::from(1_u32); - let chain_b = ParaId::from(2_u32); - - let thread_a = ParaId::from(3_u32); - let thread_b = ParaId::from(4_u32); - let thread_c = ParaId::from(5_u32); + let para_a = ParaId::from(3_u32); + let para_b = ParaId::from(4_u32); + let para_c = ParaId::from(5_u32); - let assignment_a = Assignment { para_id: thread_a }; - let assignment_b = Assignment { para_id: thread_b }; - let assignment_c = Assignment { para_id: thread_c }; + let assignment_a = Assignment::Bulk(para_a); + let assignment_b = Assignment::Bulk(para_b); + let assignment_c = Assignment::Bulk(para_c); new_test_ext(genesis_config).execute_with(|| { - assert_eq!(default_config().on_demand_cores, 3); + MockAssigner::set_core_count(2); + let AssignmentProviderConfig { ttl: config_ttl, .. } = + MockAssigner::get_provider_config(CoreIndex(0)); - // register 2 lease holding parachains - schedule_blank_para(chain_a, ParaKind::Parachain); - schedule_blank_para(chain_b, ParaKind::Parachain); + // Add 3 paras + schedule_blank_para(para_a); + schedule_blank_para(para_b); + schedule_blank_para(para_c); - // and 3 parathreads (on-demand parachains) - schedule_blank_para(thread_a, ParaKind::Parathread); - schedule_blank_para(thread_b, ParaKind::Parathread); - schedule_blank_para(thread_c, ParaKind::Parathread); - - // start a new session to activate, 5 validators for 5 cores. + // start a new session to activate, 3 validators for 3 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { new_config: default_config(), @@ -417,107 +358,47 @@ fn fill_claimqueue_fills() { ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), ValidatorId::from(Sr25519Keyring::Charlie.public()), - ValidatorId::from(Sr25519Keyring::Dave.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), ], ..Default::default() }), _ => None, }); - { - assert_eq!(Scheduler::claimqueue_len(), 2 * lookahead); - let scheduled: BTreeMap<_, _> = Scheduler::scheduled_entries().collect(); - - // Cannot assert on indices anymore as they depend on the assignment providers - assert!(claimqueue_contains_para_ids::(vec![chain_a, chain_b])); - - assert_eq!( - scheduled.get(&CoreIndex(0)).unwrap(), - &ParasEntry { - assignment: Assignment { para_id: chain_a }, - availability_timeouts: 0, - ttl: 6 - }, - ); - - assert_eq!( - scheduled.get(&CoreIndex(1)).unwrap(), - &ParasEntry { - assignment: Assignment { para_id: chain_b }, - availability_timeouts: 0, - ttl: 6 - }, - ); - } - - // add a couple of parathread assignments. - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_a, - QueuePushDirection::Back - )); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_b, - QueuePushDirection::Back - )); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_c, - QueuePushDirection::Back - )); + // add some para assignments. + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); + MockAssigner::add_test_assignment(assignment_c.clone()); run_to_block(2, |_| None); - // cores 0 and 1 should be occupied. mark them as such. - Scheduler::occupied( - vec![(CoreIndex(0), chain_a), (CoreIndex(1), chain_b)].into_iter().collect(), - ); - - run_to_block(3, |_| None); { - assert_eq!(Scheduler::claimqueue_len(), 5); - let scheduled: BTreeMap<_, _> = Scheduler::scheduled_entries().collect(); - - assert_eq!( - scheduled.get(&CoreIndex(0)).unwrap(), - &ParasEntry { - assignment: Assignment { para_id: chain_a }, - availability_timeouts: 0, - ttl: 6 - }, - ); - assert_eq!( - scheduled.get(&CoreIndex(1)).unwrap(), - &ParasEntry { - assignment: Assignment { para_id: chain_b }, - availability_timeouts: 0, - ttl: 6 - }, - ); + assert_eq!(Scheduler::claimqueue_len(), 3); + let scheduled: BTreeMap<_, _> = scheduled_entries().collect(); // Was added a block later, note the TTL. assert_eq!( - scheduled.get(&CoreIndex(2)).unwrap(), + scheduled.get(&CoreIndex(0)).unwrap(), &ParasEntry { - assignment: Assignment { para_id: thread_a }, + assignment: assignment_a.clone(), availability_timeouts: 0, - ttl: 7 + ttl: 2 + config_ttl }, ); - // Sits on the same core as `thread_a` + // Sits on the same core as `para_a` assert_eq!( - Scheduler::claimqueue().get(&CoreIndex(2)).unwrap()[1], - Some(ParasEntry { - assignment: Assignment { para_id: thread_b }, + Scheduler::claimqueue().get(&CoreIndex(0)).unwrap()[1], + ParasEntry { + assignment: assignment_b.clone(), availability_timeouts: 0, - ttl: 7 - }) + ttl: 2 + config_ttl + } ); assert_eq!( - scheduled.get(&CoreIndex(3)).unwrap(), + scheduled.get(&CoreIndex(1)).unwrap(), &ParasEntry { - assignment: Assignment { para_id: thread_c }, + assignment: assignment_c.clone(), availability_timeouts: 0, - ttl: 7 + ttl: 2 + config_ttl }, ); } @@ -532,36 +413,29 @@ fn schedule_schedules_including_just_freed() { config.scheduling_lookahead = 1; let genesis_config = genesis_config(&config); - let chain_a = ParaId::from(1_u32); - let chain_b = ParaId::from(2_u32); - - let thread_a = ParaId::from(3_u32); - let thread_b = ParaId::from(4_u32); - let thread_c = ParaId::from(5_u32); - let thread_d = ParaId::from(6_u32); - let thread_e = ParaId::from(7_u32); + let para_a = ParaId::from(3_u32); + let para_b = ParaId::from(4_u32); + let para_c = ParaId::from(5_u32); + let para_d = ParaId::from(6_u32); + let para_e = ParaId::from(7_u32); - let assignment_a = Assignment { para_id: thread_a }; - let assignment_b = Assignment { para_id: thread_b }; - let assignment_c = Assignment { para_id: thread_c }; - let assignment_d = Assignment { para_id: thread_d }; - let assignment_e = Assignment { para_id: thread_e }; + let assignment_a = Assignment::Bulk(para_a); + let assignment_b = Assignment::Bulk(para_b); + let assignment_c = Assignment::Bulk(para_c); + let assignment_d = Assignment::Bulk(para_d); + let assignment_e = Assignment::Bulk(para_e); new_test_ext(genesis_config).execute_with(|| { - assert_eq!(default_config().on_demand_cores, 3); + MockAssigner::set_core_count(3); - // register 2 lease holding parachains - schedule_blank_para(chain_a, ParaKind::Parachain); - schedule_blank_para(chain_b, ParaKind::Parachain); + // add 5 paras + schedule_blank_para(para_a); + schedule_blank_para(para_b); + schedule_blank_para(para_c); + schedule_blank_para(para_d); + schedule_blank_para(para_e); - // and 5 parathreads (on-demand parachains) - schedule_blank_para(thread_a, ParaKind::Parathread); - schedule_blank_para(thread_b, ParaKind::Parathread); - schedule_blank_para(thread_c, ParaKind::Parathread); - schedule_blank_para(thread_d, ParaKind::Parathread); - schedule_blank_para(thread_e, ParaKind::Parathread); - - // start a new session to activate, 5 validators for 5 cores. + // start a new session to activate, 3 validators for 3 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { new_config: default_config(), @@ -569,153 +443,113 @@ fn schedule_schedules_including_just_freed() { ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), ValidatorId::from(Sr25519Keyring::Charlie.public()), - ValidatorId::from(Sr25519Keyring::Dave.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), ], ..Default::default() }), _ => None, }); - // add a couple of parathread claims now that the parathreads are live. - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_a, - QueuePushDirection::Back - )); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_c, - QueuePushDirection::Back - )); + // add a couple of para claims now that paras are live + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_c.clone()); let mut now = 2; run_to_block(now, |_| None); - assert_eq!(Scheduler::scheduled_paras().collect::>().len(), 4); + assert_eq!(Scheduler::scheduled_paras().collect::>().len(), 2); - // cores 0, 1, 2, and 3 should be occupied. mark them as such. + // cores 0, 1 should be occupied. mark them as such. let mut occupied_map: BTreeMap = BTreeMap::new(); - occupied_map.insert(CoreIndex(0), chain_a); - occupied_map.insert(CoreIndex(1), chain_b); - occupied_map.insert(CoreIndex(2), thread_a); - occupied_map.insert(CoreIndex(3), thread_c); + occupied_map.insert(CoreIndex(0), para_a); + occupied_map.insert(CoreIndex(1), para_c); Scheduler::occupied(occupied_map); { let cores = AvailabilityCores::::get(); - // cores 0, 1, 2, and 3 are all `CoreOccupied::Paras(ParasEntry...)` + // cores 0, 1 are `CoreOccupied::Paras(ParasEntry...)` assert!(cores[0] != CoreOccupied::Free); assert!(cores[1] != CoreOccupied::Free); - assert!(cores[2] != CoreOccupied::Free); - assert!(cores[3] != CoreOccupied::Free); - // core 4 is free - assert!(cores[4] == CoreOccupied::Free); + // core 2 is free + assert!(cores[2] == CoreOccupied::Free); assert!(Scheduler::scheduled_paras().collect::>().is_empty()); - // All core index entries in the claimqueue should have `None` in them. - Scheduler::claimqueue().iter().for_each(|(_core_idx, core_queue)| { - assert!(core_queue.iter().all(|claim| claim.is_none())) - }) + // All `core_queue`s should be empty + Scheduler::claimqueue() + .iter() + .for_each(|(_core_idx, core_queue)| assert!(core_queue.len() == 0)) } - // add a couple more parathread claims - the claim on `b` will go to the 3rd parathread core - // (4) and the claim on `d` will go back to the 1st parathread core (2). The claim on `e` - // then will go for core `3`. - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_b, - QueuePushDirection::Back - )); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_d, - QueuePushDirection::Back - )); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_e.clone(), - QueuePushDirection::Back - )); + // add a couple more para claims - the claim on `b` will go to the 3rd core + // (2) and the claim on `d` will go back to the 1st para core (0). The claim on `e` + // then will go for core `1`. + MockAssigner::add_test_assignment(assignment_b.clone()); + MockAssigner::add_test_assignment(assignment_d.clone()); + MockAssigner::add_test_assignment(assignment_e.clone()); now = 3; run_to_block(now, |_| None); { - let scheduled: BTreeMap<_, _> = Scheduler::scheduled_entries().collect(); + let scheduled: BTreeMap<_, _> = scheduled_entries().collect(); - // cores 0 and 1 are occupied by lease holding parachains. cores 2 and 3 are occupied by - // on-demand parachain claims. core 4 was free. + // cores 0 and 1 are occupied by claims. core 2 was free. assert_eq!(scheduled.len(), 1); assert_eq!( - scheduled.get(&CoreIndex(4)).unwrap(), + scheduled.get(&CoreIndex(2)).unwrap(), &ParasEntry { - assignment: Assignment { para_id: thread_b }, + assignment: Assignment::Bulk(para_b), availability_timeouts: 0, ttl: 8 }, ); } - // now note that cores 0, 2, and 3 were freed. + // now note that cores 0 and 1 were freed. let just_updated: BTreeMap = vec![ (CoreIndex(0), FreedReason::Concluded), - (CoreIndex(2), FreedReason::Concluded), - (CoreIndex(3), FreedReason::TimedOut), // should go back on queue. + (CoreIndex(1), FreedReason::TimedOut), // should go back on queue. ] .into_iter() .collect(); - Scheduler::update_claimqueue(just_updated, now); + Scheduler::free_cores_and_fill_claimqueue(just_updated, now); { - let scheduled: BTreeMap<_, _> = Scheduler::scheduled_entries().collect(); + let scheduled: BTreeMap<_, _> = scheduled_entries().collect(); - // 1 thing scheduled before, + 3 cores freed. - assert_eq!(scheduled.len(), 4); + // 1 thing scheduled before, + 2 cores freed. + assert_eq!(scheduled.len(), 3); assert_eq!( scheduled.get(&CoreIndex(0)).unwrap(), &ParasEntry { - assignment: Assignment { para_id: chain_a }, - availability_timeouts: 0, - ttl: 8 - }, - ); - assert_eq!( - scheduled.get(&CoreIndex(2)).unwrap(), - &ParasEntry { - assignment: Assignment { para_id: thread_d }, + assignment: Assignment::Bulk(para_d), availability_timeouts: 0, ttl: 8 }, ); - // Although C was descheduled, the core `4` was occupied so C goes back to the queue. + // Although C was descheduled, the core `2` was occupied so C goes back to the queue. assert_eq!( - scheduled.get(&CoreIndex(3)).unwrap(), + scheduled.get(&CoreIndex(1)).unwrap(), &ParasEntry { - assignment: Assignment { para_id: thread_c }, + assignment: Assignment::Bulk(para_c), availability_timeouts: 1, ttl: 8 }, ); assert_eq!( - scheduled.get(&CoreIndex(4)).unwrap(), + scheduled.get(&CoreIndex(2)).unwrap(), &ParasEntry { - assignment: Assignment { para_id: thread_b }, + assignment: Assignment::Bulk(para_b), availability_timeouts: 0, ttl: 8 }, ); - // The only assignment yet to be popped on to the claim queue is `thread_e`. - // This is due to `thread_c` timing out. - let order_queue = OnDemandAssigner::get_queue(); - assert!(order_queue.len() == 1); - assert!(order_queue[0] == assignment_e); - - // Chain B's core was not marked concluded or timed out, it should be on an - // availability core - assert!(availability_cores_contains_para_ids::(vec![chain_b])); - // Thread A claim should have been wiped, but thread C claim should remain. - assert!(!claimqueue_contains_para_ids::(vec![thread_a])); - assert!(claimqueue_contains_para_ids::(vec![thread_c])); - assert!(!availability_cores_contains_para_ids::(vec![thread_a, thread_c])); + // Para A claim should have been wiped, but para C claim should remain. + assert!(!claimqueue_contains_para_ids::(vec![para_a])); + assert!(claimqueue_contains_para_ids::(vec![para_c])); + assert!(!availability_cores_contains_para_ids::(vec![para_a, para_c])); } }); } @@ -726,28 +560,35 @@ fn schedule_clears_availability_cores() { config.scheduling_lookahead = 1; let genesis_config = genesis_config(&config); - let chain_a = ParaId::from(1_u32); - let chain_b = ParaId::from(2_u32); - let chain_c = ParaId::from(3_u32); + let para_a = ParaId::from(1_u32); + let para_b = ParaId::from(2_u32); + let para_c = ParaId::from(3_u32); + + let assignment_a = Assignment::Bulk(para_a); + let assignment_b = Assignment::Bulk(para_b); + let assignment_c = Assignment::Bulk(para_c); new_test_ext(genesis_config).execute_with(|| { - assert_eq!(default_config().on_demand_cores, 3); + MockAssigner::set_core_count(3); + + // register 3 paras + schedule_blank_para(para_a); + schedule_blank_para(para_b); + schedule_blank_para(para_c); - // register 3 parachains - schedule_blank_para(chain_a, ParaKind::Parachain); - schedule_blank_para(chain_b, ParaKind::Parachain); - schedule_blank_para(chain_c, ParaKind::Parachain); + // Adding assignments then running block to populate claim queue + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); + MockAssigner::add_test_assignment(assignment_c.clone()); - // start a new session to activate, 5 validators for 5 cores. + // start a new session to activate, 3 validators for 3 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), ValidatorId::from(Sr25519Keyring::Charlie.public()), - ValidatorId::from(Sr25519Keyring::Dave.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), ], ..Default::default() }), @@ -760,7 +601,7 @@ fn schedule_clears_availability_cores() { // cores 0, 1, and 2 should be occupied. mark them as such. Scheduler::occupied( - vec![(CoreIndex(0), chain_a), (CoreIndex(1), chain_b), (CoreIndex(2), chain_c)] + vec![(CoreIndex(0), para_a), (CoreIndex(1), para_b), (CoreIndex(2), para_c)] .into_iter() .collect(), ); @@ -772,9 +613,16 @@ fn schedule_clears_availability_cores() { assert_eq!(cores[1].is_free(), false); assert_eq!(cores[2].is_free(), false); - assert!(claimqueue_contains_only_none()); + // All `core_queue`s should be empty + Scheduler::claimqueue() + .iter() + .for_each(|(_core_idx, core_queue)| assert!(core_queue.len() == 0)) } + // Add more assignments + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_c.clone()); + run_to_block(3, |_| None); // now note that cores 0 and 2 were freed. @@ -786,20 +634,18 @@ fn schedule_clears_availability_cores() { ); { - let claimqueue = Scheduler::claimqueue(); + let claimqueue = ClaimQueue::::get(); let claimqueue_0 = claimqueue.get(&CoreIndex(0)).unwrap().clone(); let claimqueue_2 = claimqueue.get(&CoreIndex(2)).unwrap().clone(); let entry_ttl = 8; assert_eq!(claimqueue_0.len(), 1); assert_eq!(claimqueue_2.len(), 1); - assert_eq!( - claimqueue_0, - vec![Some(ParasEntry::new(Assignment::new(chain_a), entry_ttl))], - ); - assert_eq!( - claimqueue_2, - vec![Some(ParasEntry::new(Assignment::new(chain_c), entry_ttl))], - ); + let queue_0_expectation: VecDeque> = + vec![ParasEntry::new(assignment_a, entry_ttl as u32)].into_iter().collect(); + let queue_2_expectation: VecDeque> = + vec![ParasEntry::new(assignment_c, entry_ttl as u32)].into_iter().collect(); + assert_eq!(claimqueue_0, queue_0_expectation); + assert_eq!(claimqueue_2, queue_2_expectation); // The freed cores should be `Free` in `AvailabilityCores`. let cores = AvailabilityCores::::get(); @@ -813,32 +659,28 @@ fn schedule_clears_availability_cores() { fn schedule_rotates_groups() { let config = { let mut config = default_config(); - - // make sure on demand requests don't retry-out - config.on_demand_retries = config.group_rotation_frequency * 3; - config.on_demand_cores = 2; config.scheduling_lookahead = 1; config }; let rotation_frequency = config.group_rotation_frequency; - let on_demand_cores = config.on_demand_cores; + let on_demand_cores = 2; let genesis_config = genesis_config(&config); - let thread_a = ParaId::from(1_u32); - let thread_b = ParaId::from(2_u32); + let para_a = ParaId::from(1_u32); + let para_b = ParaId::from(2_u32); - let assignment_a = Assignment { para_id: thread_a }; - let assignment_b = Assignment { para_id: thread_b }; + let assignment_a = Assignment::Bulk(para_a); + let assignment_b = Assignment::Bulk(para_b); new_test_ext(genesis_config).execute_with(|| { - assert_eq!(default_config().on_demand_cores, 3); + MockAssigner::set_core_count(on_demand_cores); - schedule_blank_para(thread_a, ParaKind::Parathread); - schedule_blank_para(thread_b, ParaKind::Parathread); + schedule_blank_para(para_a); + schedule_blank_para(para_b); - // start a new session to activate, 5 validators for 5 cores. + // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { new_config: config.clone(), @@ -854,14 +696,8 @@ fn schedule_rotates_groups() { let session_start_block = Scheduler::session_start_block(); assert_eq!(session_start_block, 1); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_a, - QueuePushDirection::Back - )); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_b, - QueuePushDirection::Back - )); + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); let mut now = 2; run_to_block(now, |_| None); @@ -909,16 +745,20 @@ fn on_demand_claims_are_pruned_after_timing_out() { let max_retries = 20; let mut config = default_config(); config.scheduling_lookahead = 1; - config.on_demand_cores = 2; - config.on_demand_retries = max_retries; let genesis_config = genesis_config(&config); - let thread_a = ParaId::from(1_u32); + let para_a = ParaId::from(1_u32); - let assignment_a = Assignment { para_id: thread_a }; + let assignment_a = Assignment::Bulk(para_a); new_test_ext(genesis_config).execute_with(|| { - schedule_blank_para(thread_a, ParaKind::Parathread); + MockAssigner::set_core_count(2); + // Need more timeouts for this test + MockAssigner::set_assignment_provider_config(AssignmentProviderConfig { + max_availability_timeouts: max_retries, + ttl: BlockNumber::from(5u32), + }); + schedule_blank_para(para_a); // #1 let mut now = 1; @@ -934,23 +774,20 @@ fn on_demand_claims_are_pruned_after_timing_out() { _ => None, }); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_a.clone(), - QueuePushDirection::Back - )); + MockAssigner::add_test_assignment(assignment_a.clone()); // #2 now += 1; run_to_block(now, |_| None); assert_eq!(Scheduler::claimqueue().len(), 1); // ParaId a is in the claimqueue. - assert!(claimqueue_contains_para_ids::(vec![thread_a])); + assert!(claimqueue_contains_para_ids::(vec![para_a])); - Scheduler::occupied(vec![(CoreIndex(0), thread_a)].into_iter().collect()); + Scheduler::occupied(vec![(CoreIndex(0), para_a)].into_iter().collect()); // ParaId a is no longer in the claimqueue. - assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + assert!(!claimqueue_contains_para_ids::(vec![para_a])); // It is in availability cores. - assert!(availability_cores_contains_para_ids::(vec![thread_a])); + assert!(availability_cores_contains_para_ids::(vec![para_a])); // #3 now += 1; @@ -966,36 +803,32 @@ fn on_demand_claims_are_pruned_after_timing_out() { ] .into_iter() .collect(); - Scheduler::update_claimqueue(just_updated, now); + Scheduler::free_cores_and_fill_claimqueue(just_updated, now); // ParaId a exists in the claim queue until max_retries is reached. if n < max_retries + now { - assert!(claimqueue_contains_para_ids::(vec![thread_a])); + assert!(claimqueue_contains_para_ids::(vec![para_a])); } else { - assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + assert!(!claimqueue_contains_para_ids::(vec![para_a])); } let core_assignments = Scheduler::scheduled_paras().collect(); - // Occupy the cores based on the result of update_claimqueue. Scheduler::occupied(core_assignments); } // ParaId a does not exist in the claimqueue/availability_cores after // threshold has been reached. - assert!(!claimqueue_contains_para_ids::(vec![thread_a])); - assert!(!availability_cores_contains_para_ids::(vec![thread_a])); + assert!(!claimqueue_contains_para_ids::(vec![para_a])); + assert!(!availability_cores_contains_para_ids::(vec![para_a])); // #25 now += max_retries + 2; // Add assignment back to the mix. - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_a.clone(), - QueuePushDirection::Back - )); + MockAssigner::add_test_assignment(assignment_a.clone()); run_to_block(now, |_| None); - assert!(claimqueue_contains_para_ids::(vec![thread_a])); + assert!(claimqueue_contains_para_ids::(vec![para_a])); // #26 now += 1; @@ -1017,24 +850,23 @@ fn on_demand_claims_are_pruned_after_timing_out() { } } - Scheduler::update_claimqueue(just_updated, now); + Scheduler::free_cores_and_fill_claimqueue(just_updated, now); // ParaId a exists in the claim queue until groups are rotated. if n < 31 { - assert!(claimqueue_contains_para_ids::(vec![thread_a])); + assert!(claimqueue_contains_para_ids::(vec![para_a])); } else { - assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + assert!(!claimqueue_contains_para_ids::(vec![para_a])); } let core_assignments = Scheduler::scheduled_paras().collect(); - // Occupy the cores based on the result of update_claimqueue. Scheduler::occupied(core_assignments); } // ParaId a does not exist in the claimqueue/availability_cores after // being concluded - assert!(!claimqueue_contains_para_ids::(vec![thread_a])); - assert!(!availability_cores_contains_para_ids::(vec![thread_a])); + assert!(!claimqueue_contains_para_ids::(vec![para_a])); + assert!(!availability_cores_contains_para_ids::(vec![para_a])); }); } @@ -1047,40 +879,7 @@ fn availability_predicate_works() { assert!(paras_availability_period < group_rotation_frequency); - let chain_a = ParaId::from(1_u32); - let thread_a = ParaId::from(2_u32); - new_test_ext(genesis_config).execute_with(|| { - schedule_blank_para(chain_a, ParaKind::Parachain); - schedule_blank_para(thread_a, ParaKind::Parathread); - - // start a new session with our chain registered. - run_to_block(1, |number| match number { - 1 => Some(SessionChangeNotification { - new_config: default_config(), - validators: vec![ - ValidatorId::from(Sr25519Keyring::Alice.public()), - ValidatorId::from(Sr25519Keyring::Bob.public()), - ValidatorId::from(Sr25519Keyring::Charlie.public()), - ValidatorId::from(Sr25519Keyring::Dave.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), - ], - ..Default::default() - }), - _ => None, - }); - - // assign some availability cores. - { - let entry_ttl = 10_000; - AvailabilityCores::::mutate(|cores| { - cores[0] = - CoreOccupied::Paras(ParasEntry::new(Assignment::new(chain_a), entry_ttl)); - cores[1] = - CoreOccupied::Paras(ParasEntry::new(Assignment::new(thread_a), entry_ttl)); - }); - } - run_to_block(1 + paras_availability_period, |_| None); assert!(!Scheduler::availability_timeout_check_required()); @@ -1103,29 +902,25 @@ fn availability_predicate_works() { // check the threshold is exact. assert!(!pred(would_be_timed_out + 1).timed_out); } - - run_to_block(1 + group_rotation_frequency + paras_availability_period, |_| None); }); } #[test] -fn next_up_on_available_uses_next_scheduled_or_none_for_thread() { - let mut config = default_config(); - config.on_demand_cores = 1; - - let genesis_config = genesis_config(&config); +fn next_up_on_available_uses_next_scheduled_or_none() { + let genesis_config = genesis_config(&default_config()); - let thread_a = ParaId::from(1_u32); - let thread_b = ParaId::from(2_u32); + let para_a = ParaId::from(1_u32); + let para_b = ParaId::from(2_u32); new_test_ext(genesis_config).execute_with(|| { - schedule_blank_para(thread_a, ParaKind::Parathread); - schedule_blank_para(thread_b, ParaKind::Parathread); + MockAssigner::set_core_count(1); + schedule_blank_para(para_a); + schedule_blank_para(para_b); - // start a new session to activate, 5 validators for 5 cores. + // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: config.clone(), + new_config: default_config(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Eve.public()), @@ -1135,18 +930,18 @@ fn next_up_on_available_uses_next_scheduled_or_none_for_thread() { _ => None, }); - let thread_entry_a = ParasEntry { - assignment: Assignment { para_id: thread_a }, - availability_timeouts: 0, - ttl: 5, + let entry_a = ParasEntry { + assignment: Assignment::Bulk(para_a), + availability_timeouts: 0 as u32, + ttl: 5 as u32, }; - let thread_entry_b = ParasEntry { - assignment: Assignment { para_id: thread_b }, - availability_timeouts: 0, - ttl: 5, + let entry_b = ParasEntry { + assignment: Assignment::Bulk(para_b), + availability_timeouts: 0 as u32, + ttl: 5 as u32, }; - Scheduler::add_to_claimqueue(CoreIndex(0), thread_entry_a.clone()); + Scheduler::add_to_claimqueue(CoreIndex(0), entry_a.clone()); run_to_block(2, |_| None); @@ -1155,22 +950,22 @@ fn next_up_on_available_uses_next_scheduled_or_none_for_thread() { assert_eq!(Scheduler::availability_cores().len(), 1); let mut map = BTreeMap::new(); - map.insert(CoreIndex(0), thread_a); + map.insert(CoreIndex(0), para_a); Scheduler::occupied(map); let cores = Scheduler::availability_cores(); match &cores[0] { - CoreOccupied::Paras(entry) => assert_eq!(entry, &thread_entry_a), - _ => panic!("with no chains, only core should be a thread core"), + CoreOccupied::Paras(entry) => assert_eq!(entry, &entry_a), + _ => panic!("There should only be one test assigner core"), } assert!(Scheduler::next_up_on_available(CoreIndex(0)).is_none()); - Scheduler::add_to_claimqueue(CoreIndex(0), thread_entry_b); + Scheduler::add_to_claimqueue(CoreIndex(0), entry_b); assert_eq!( Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: thread_b, collator: None } + ScheduledCore { para_id: para_b, collator: None } ); } }); @@ -1178,25 +973,23 @@ fn next_up_on_available_uses_next_scheduled_or_none_for_thread() { #[test] fn next_up_on_time_out_reuses_claim_if_nothing_queued() { - let mut config = default_config(); - config.on_demand_cores = 1; - - let genesis_config = genesis_config(&config); + let genesis_config = genesis_config(&default_config()); - let thread_a = ParaId::from(1_u32); - let thread_b = ParaId::from(2_u32); + let para_a = ParaId::from(1_u32); + let para_b = ParaId::from(2_u32); - let assignment_a = Assignment { para_id: thread_a }; - let assignment_b = Assignment { para_id: thread_b }; + let assignment_a = Assignment::Bulk(para_a); + let assignment_b = Assignment::Bulk(para_b); new_test_ext(genesis_config).execute_with(|| { - schedule_blank_para(thread_a, ParaKind::Parathread); - schedule_blank_para(thread_b, ParaKind::Parathread); + MockAssigner::set_core_count(1); + schedule_blank_para(para_a); + schedule_blank_para(para_b); - // start a new session to activate, 5 validators for 5 cores. + // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: config.clone(), + new_config: default_config(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Eve.public()), @@ -1206,10 +999,7 @@ fn next_up_on_time_out_reuses_claim_if_nothing_queued() { _ => None, }); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_a.clone(), - QueuePushDirection::Back - )); + MockAssigner::add_test_assignment(assignment_a.clone()); run_to_block(2, |_| None); @@ -1218,150 +1008,62 @@ fn next_up_on_time_out_reuses_claim_if_nothing_queued() { assert_eq!(Scheduler::availability_cores().len(), 1); let mut map = BTreeMap::new(); - map.insert(CoreIndex(0), thread_a); + map.insert(CoreIndex(0), para_a); Scheduler::occupied(map); let cores = Scheduler::availability_cores(); match cores.get(0).unwrap() { - CoreOccupied::Paras(entry) => assert_eq!(entry.assignment, assignment_a.clone()), - _ => panic!("with no chains, only core should be a thread core"), + CoreOccupied::Paras(entry) => { + assert_eq!(entry.assignment, assignment_a.clone()); + }, + _ => panic!("There should only be a single test assigner core"), } // There's nothing more to pop for core 0 from the assignment provider. - assert!( - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(thread_a)).is_none() - ); + assert!(MockAssigner::pop_assignment_for_core(CoreIndex(0)).is_none()); assert_eq!( Scheduler::next_up_on_time_out(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: thread_a, collator: None } + ScheduledCore { para_id: para_a, collator: None } ); - assert_ok!(OnDemandAssigner::add_on_demand_assignment( - assignment_b.clone(), - QueuePushDirection::Back - )); + MockAssigner::add_test_assignment(assignment_b.clone()); // Pop assignment_b into the claimqueue - Scheduler::update_claimqueue(BTreeMap::new(), 2); + Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), 2); //// Now that there is an earlier next-up, we use that. assert_eq!( Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: thread_b, collator: None } + ScheduledCore { para_id: para_b, collator: None } ); } }); } #[test] -fn next_up_on_available_is_parachain_always() { +fn session_change_requires_reschedule_dropping_removed_paras() { let mut config = default_config(); - config.on_demand_cores = 0; + config.scheduling_lookahead = 1; let genesis_config = genesis_config(&config); - let chain_a = ParaId::from(1_u32); - - new_test_ext(genesis_config).execute_with(|| { - schedule_blank_para(chain_a, ParaKind::Parachain); - - // start a new session to activate, 5 validators for 5 cores. - run_to_block(1, |number| match number { - 1 => Some(SessionChangeNotification { - new_config: config.clone(), - validators: vec![ - ValidatorId::from(Sr25519Keyring::Alice.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), - ], - ..Default::default() - }), - _ => None, - }); - - run_to_block(2, |_| None); - - { - assert_eq!(Scheduler::claimqueue().len(), 1); - assert_eq!(Scheduler::availability_cores().len(), 1); - - Scheduler::occupied(vec![(CoreIndex(0), chain_a)].into_iter().collect()); - - let cores = Scheduler::availability_cores(); - match &cores[0] { - CoreOccupied::Paras(pe) if pe.para_id() == chain_a => {}, - _ => panic!("with no threads, only core should be a chain core"), - } - // Now that there is an earlier next-up, we use that. - assert_eq!( - Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: chain_a, collator: None } - ); - } - }); -} + let para_a = ParaId::from(1_u32); + let para_b = ParaId::from(2_u32); -#[test] -fn next_up_on_time_out_is_parachain_always() { - let mut config = default_config(); - config.on_demand_cores = 0; - - let genesis_config = genesis_config(&config); - - let chain_a = ParaId::from(1_u32); + let assignment_a = Assignment::Bulk(para_a); + let assignment_b = Assignment::Bulk(para_b); new_test_ext(genesis_config).execute_with(|| { - schedule_blank_para(chain_a, ParaKind::Parachain); - - // start a new session to activate, 5 validators for 5 cores. - run_to_block(1, |number| match number { - 1 => Some(SessionChangeNotification { - new_config: config.clone(), - validators: vec![ - ValidatorId::from(Sr25519Keyring::Alice.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), - ], - ..Default::default() - }), - _ => None, - }); - - run_to_block(2, |_| None); + // Setting explicit core count + MockAssigner::set_core_count(5); + let assignment_provider_ttl = MockAssigner::get_provider_config(CoreIndex::from(0)).ttl; - { - assert_eq!(Scheduler::claimqueue().len(), 1); - assert_eq!(Scheduler::availability_cores().len(), 1); - - Scheduler::occupied(vec![(CoreIndex(0), chain_a)].into_iter().collect()); - - let cores = Scheduler::availability_cores(); - match &cores[0] { - CoreOccupied::Paras(pe) if pe.para_id() == chain_a => {}, - _ => panic!("Core should be occupied by chain_a ParaId"), - } - - // Now that there is an earlier next-up, we use that. - assert_eq!( - Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: chain_a, collator: None } - ); - } - }); -} + schedule_blank_para(para_a); + schedule_blank_para(para_b); -#[test] -fn session_change_requires_reschedule_dropping_removed_paras() { - let mut config = default_config(); - config.scheduling_lookahead = 1; - let genesis_config = genesis_config(&config); - - assert_eq!(default_config().on_demand_cores, 3); - new_test_ext(genesis_config).execute_with(|| { - let chain_a = ParaId::from(1_u32); - let chain_b = ParaId::from(2_u32); - - // ensure that we have 5 groups by registering 2 parachains. - schedule_blank_para(chain_a, ParaKind::Parachain); - schedule_blank_para(chain_b, ParaKind::Parachain); + // Add assignments + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { @@ -1386,7 +1088,11 @@ fn session_change_requires_reschedule_dropping_removed_paras() { let groups = ValidatorGroups::::get(); assert_eq!(groups.len(), 5); - assert_ok!(Paras::schedule_para_cleanup(chain_b)); + assert_ok!(Paras::schedule_para_cleanup(para_b)); + + // Add assignment + MockAssigner::add_test_assignment(assignment_a.clone()); + run_to_end_of_block(2, |number| match number { 2 => Some(SessionChangeNotification { new_config: default_config(), @@ -1405,17 +1111,17 @@ fn session_change_requires_reschedule_dropping_removed_paras() { _ => None, }); - Scheduler::update_claimqueue(BTreeMap::new(), 3); + Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), 3); assert_eq!( Scheduler::claimqueue(), vec![( CoreIndex(0), - vec![Some(ParasEntry::new( - Assignment::new(chain_a), + vec![ParasEntry::new( + Assignment::Bulk(para_a), // At end of block 2 - config.on_demand_ttl + 2 - ))] + assignment_provider_ttl + 2 + )] .into_iter() .collect() )] @@ -1423,8 +1129,12 @@ fn session_change_requires_reschedule_dropping_removed_paras() { .collect() ); - // Add parachain back - schedule_blank_para(chain_b, ParaKind::Parachain); + // Add para back + schedule_blank_para(para_b); + + // Add assignments + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); run_to_block(3, |number| match number { 3 => Some(SessionChangeNotification { @@ -1449,28 +1159,28 @@ fn session_change_requires_reschedule_dropping_removed_paras() { let groups = ValidatorGroups::::get(); assert_eq!(groups.len(), 5); - Scheduler::update_claimqueue(BTreeMap::new(), 4); + Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), 4); assert_eq!( Scheduler::claimqueue(), vec![ ( CoreIndex(0), - vec![Some(ParasEntry::new( - Assignment::new(chain_a), + vec![ParasEntry::new( + Assignment::Bulk(para_a), // At block 3 - config.on_demand_ttl + 3 - ))] + assignment_provider_ttl + 3 + )] .into_iter() .collect() ), ( CoreIndex(1), - vec![Some(ParasEntry::new( - Assignment::new(chain_b), + vec![ParasEntry::new( + Assignment::Bulk(para_b), // At block 3 - config.on_demand_ttl + 3 - ))] + assignment_provider_ttl + 3 + )] .into_iter() .collect() ), diff --git a/polkadot/runtime/parachains/src/session_info/tests.rs b/polkadot/runtime/parachains/src/session_info/tests.rs index 727b7c79fbaeae18be2c2a40f4f38f562d1481dd..92a50575deda8413abb18f892680d693c148d0cb 100644 --- a/polkadot/runtime/parachains/src/session_info/tests.rs +++ b/polkadot/runtime/parachains/src/session_info/tests.rs @@ -62,7 +62,7 @@ fn run_to_block( fn default_config() -> HostConfiguration { HostConfiguration { - on_demand_cores: 1, + coretime_cores: 1, dispute_period: 2, needed_approvals: 3, ..Default::default() diff --git a/polkadot/runtime/parachains/src/ump_tests.rs b/polkadot/runtime/parachains/src/ump_tests.rs index def608882050ab57a4a95fe8c5b14a4852fbebb4..426993ffa65a73b83d4077ee31b64217c41fb168 100644 --- a/polkadot/runtime/parachains/src/ump_tests.rs +++ b/polkadot/runtime/parachains/src/ump_tests.rs @@ -523,21 +523,21 @@ fn overweight_queue_works() { assert_last_events( [ pallet_message_queue::Event::::Processed { - id: hash_1, + id: hash_1.into(), origin: Ump(UmpQueueId::Para(para_a)), weight_used: Weight::from_parts(301, 301), success: true, } .into(), pallet_message_queue::Event::::OverweightEnqueued { - id: hash_2, + id: hash_2.into(), origin: Ump(UmpQueueId::Para(para_a)), page_index: 0, message_index: 1, } .into(), pallet_message_queue::Event::::OverweightEnqueued { - id: hash_3, + id: hash_3.into(), origin: Ump(UmpQueueId::Para(para_a)), page_index: 0, message_index: 2, @@ -565,7 +565,7 @@ fn overweight_queue_works() { )); assert_last_event( pallet_message_queue::Event::::Processed { - id: hash_3, + id: hash_3.into(), origin: Ump(UmpQueueId::Para(para_a)), weight_used: Weight::from_parts(501, 501), success: true, diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 9693d351cf407afb326c4fa7583c13224c8364eb..c7236572ed7dc566f921541b6c73aecfc5c1597c 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -7,11 +7,14 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -serde = { version = "1.0.188", default-features = false } +serde = { version = "1.0.193", default-features = false } serde_derive = { version = "1.0.117", optional = true } static_assertions = "1.1.0" smallvec = "1.8.0" @@ -53,7 +56,7 @@ pallet-collective = { path = "../../../substrate/frame/collective", default-feat pallet-conviction-voting = { path = "../../../substrate/frame/conviction-voting", default-features = false } pallet-democracy = { path = "../../../substrate/frame/democracy", default-features = false } pallet-elections-phragmen = { path = "../../../substrate/frame/elections-phragmen", default-features = false } -pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false } +pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } @@ -105,7 +108,7 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", [dev-dependencies] tiny-keccak = { version = "2.0.2", features = ["keccak"] } keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } -remote-externalities = { package = "frame-remote-externalities" , path = "../../../substrate/utils/frame/remote-externalities" } +remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } sp-trie = { path = "../../../substrate/primitives/trie" } separator = "0.4.1" serde_json = "1.0.108" @@ -116,7 +119,7 @@ tokio = { version = "1.24.2", features = ["macros"] } substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] no_std = [] std = [ "authority-discovery-primitives/std", @@ -313,11 +316,11 @@ try-runtime = [ ] # Set timing constants (e.g. session period) to faster versions to speed up testing. -fast-runtime = [ "rococo-runtime-constants/fast-runtime" ] +fast-runtime = ["rococo-runtime-constants/fast-runtime"] -runtime-metrics = [ "runtime-parachains/runtime-metrics", "sp-io/with-tracing" ] +runtime-metrics = ["runtime-parachains/runtime-metrics", "sp-io/with-tracing"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/polkadot/runtime/rococo/constants/Cargo.toml b/polkadot/runtime/rococo/constants/Cargo.toml index 8ff6d57ea5b5f1e399928ad2f3c956e3d984eab0..1e6b0a5f903c7880b2e69665a700ee1c2dcaeb5b 100644 --- a/polkadot/runtime/rococo/constants/Cargo.toml +++ b/polkadot/runtime/rococo/constants/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] smallvec = "1.8.0" @@ -17,9 +20,10 @@ sp-weights = { path = "../../../../substrate/primitives/weights", default-featur sp-core = { path = "../../../../substrate/primitives/core", default-features = false } xcm = { package = "staging-xcm", path = "../../../xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../xcm/xcm-builder", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "frame-support/std", "primitives/std", @@ -27,6 +31,7 @@ std = [ "sp-core/std", "sp-runtime/std", "sp-weights/std", + "xcm-builder/std", "xcm/std", ] diff --git a/polkadot/runtime/rococo/constants/src/lib.rs b/polkadot/runtime/rococo/constants/src/lib.rs index 2f641d60fc8b098274f39a5e72ef2e42ede50c27..9209045364c28bc585c548d6d2b30176bd52bb20 100644 --- a/polkadot/runtime/rococo/constants/src/lib.rs +++ b/polkadot/runtime/rococo/constants/src/lib.rs @@ -103,7 +103,8 @@ pub mod fee { /// System Parachains. pub mod system_parachain { - use xcm::latest::prelude::*; + use primitives::Id; + use xcm_builder::IsChildSystemParachain; /// Network's Asset Hub parachain ID. pub const ASSET_HUB_ID: u32 = 1000; @@ -111,14 +112,15 @@ pub mod system_parachain { pub const CONTRACTS_ID: u32 = 1002; /// Encointer parachain ID. pub const ENCOINTER_ID: u32 = 1003; + /// People parachain ID. + pub const PEOPLE_ID: u32 = 1004; /// BridgeHub parachain ID. pub const BRIDGE_HUB_ID: u32 = 1013; + /// Brokerage parachain ID. + pub const BROKER_ID: u32 = 1005; - frame_support::match_types! { - pub type SystemParachains: impl Contains = { - MultiLocation { parents: 0, interior: X1(Parachain(ASSET_HUB_ID | CONTRACTS_ID | ENCOINTER_ID | BRIDGE_HUB_ID)) } - }; - } + /// All system parachains of Rococo. + pub type SystemParachains = IsChildSystemParachain; } /// Rococo Treasury pallet instance. diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..eddbfacc3b1da0cf4917e8e7133abb9bac57a915 --- /dev/null +++ b/polkadot/runtime/rococo/src/impls.rs @@ -0,0 +1,180 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::xcm_config; +use frame_support::pallet_prelude::DispatchResult; +use frame_system::RawOrigin; +use parity_scale_codec::{Decode, Encode}; +use primitives::Balance; +use rococo_runtime_constants::currency::*; +use runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; +use sp_std::{marker::PhantomData, prelude::*}; +use xcm::{latest::prelude::*, VersionedMultiLocation, VersionedXcm}; +use xcm_executor::traits::TransactAsset; + +/// A type containing the encoding of the People Chain pallets in its runtime. Used to construct any +/// remote calls. The codec index must correspond to the index of `IdentityMigrator` in the +/// `construct_runtime` of the remote chain. +#[derive(Encode, Decode)] +enum PeopleRuntimePallets { + #[codec(index = 248)] + IdentityMigrator(IdentityMigratorCalls), +} + +/// Call encoding for the calls needed from the Identity Migrator pallet. +#[derive(Encode, Decode)] +enum IdentityMigratorCalls { + #[codec(index = 1)] + PokeDeposit(AccountId), +} + +/// Type that implements `OnReapIdentity` that will send the deposit needed to store the same +/// information on a parachain, sends the deposit there, and then updates it. +pub struct ToParachainIdentityReaper(PhantomData<(Runtime, AccountId)>); +impl ToParachainIdentityReaper { + /// Calculate the balance needed on the remote chain based on the `IdentityInfo` and `Subs` on + /// this chain. The total includes: + /// + /// - Identity basic deposit + /// - `IdentityInfo` byte deposit + /// - Sub accounts deposit + /// - 2x existential deposit (1 for account existence, 1 such that the user can transact) + fn calculate_remote_deposit(bytes: u32, subs: u32) -> Balance { + // Remote deposit constants. Parachain uses `deposit / 100` + // Source: + // https://github.com/paritytech/polkadot-sdk/blob/a146918/cumulus/parachains/common/src/rococo.rs#L29 + // + // Parachain Deposit Configuration: + // + // pub const BasicDeposit: Balance = deposit(1, 17); + // pub const ByteDeposit: Balance = deposit(0, 1); + // pub const SubAccountDeposit: Balance = deposit(1, 53); + // pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + let para_basic_deposit = deposit(1, 17) / 100; + let para_byte_deposit = deposit(0, 1) / 100; + let para_sub_account_deposit = deposit(1, 53) / 100; + let para_existential_deposit = EXISTENTIAL_DEPOSIT / 10; + + // pallet deposits + let id_deposit = + para_basic_deposit.saturating_add(para_byte_deposit.saturating_mul(bytes as Balance)); + let subs_deposit = para_sub_account_deposit.saturating_mul(subs as Balance); + + id_deposit + .saturating_add(subs_deposit) + .saturating_add(para_existential_deposit.saturating_mul(2)) + } +} + +// Note / Warning: This implementation should only be used in a transactional context. If not, then +// an error could result in assets being burned. +impl OnReapIdentity for ToParachainIdentityReaper +where + Runtime: frame_system::Config + pallet_xcm::Config, + AccountId: Into<[u8; 32]> + Clone + Encode, +{ + fn on_reap_identity(who: &AccountId, fields: u32, subs: u32) -> DispatchResult { + use crate::{ + impls::IdentityMigratorCalls::PokeDeposit, + weights::runtime_common_identity_migrator::WeightInfo as MigratorWeights, + }; + + let total_to_send = Self::calculate_remote_deposit(fields, subs); + + // define asset / destination from relay perspective + let roc = MultiAsset { id: Concrete(Here.into_location()), fun: Fungible(total_to_send) }; + // People Chain: ParaId 1004 + let destination: MultiLocation = MultiLocation::new(0, Parachain(1004)); + + // Do `check_out` accounting since the XCM Executor's `InitiateTeleport` doesn't support + // unpaid teleports. + + // withdraw the asset from `who` + let who_origin = + Junction::AccountId32 { network: None, id: who.clone().into() }.into_location(); + let _withdrawn = xcm_config::LocalAssetTransactor::withdraw_asset(&roc, &who_origin, None) + .map_err(|err| { + log::error!( + target: "runtime::on_reap_identity", + "withdraw_asset(what: {:?}, who_origin: {:?}) error: {:?}", + roc, who_origin, err + ); + pallet_xcm::Error::::LowBalance + })?; + + // check out + xcm_config::LocalAssetTransactor::can_check_out( + &destination, + &roc, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ) + .map_err(|err| { + log::error!( + target: "runtime::on_reap_identity", + "can_check_out(destination: {:?}, asset: {:?}, _) error: {:?}", + destination, roc, err + ); + pallet_xcm::Error::::CannotCheckOutTeleport + })?; + xcm_config::LocalAssetTransactor::check_out( + &destination, + &roc, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ); + + // reanchor + let roc_reanchored: MultiAssets = vec![MultiAsset { + id: Concrete(MultiLocation::new(1, Here)), + fun: Fungible(total_to_send), + }] + .into(); + + let poke = PeopleRuntimePallets::::IdentityMigrator(PokeDeposit(who.clone())); + let remote_weight_limit = MigratorWeights::::poke_deposit().saturating_mul(2); + + // Actual program to execute on People Chain. + let program: Xcm<()> = Xcm(vec![ + // Unpaid as this is constructed by the system, once per user. The user shouldn't have + // their balance reduced by teleport fees for the favor of migrating. + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + // Receive the asset into holding. + ReceiveTeleportedAsset(roc_reanchored), + // Deposit into the user's account. + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: Junction::AccountId32 { network: None, id: who.clone().into() } + .into_location() + .into(), + }, + // Poke the deposit to reserve the appropriate amount on the parachain. + Transact { + origin_kind: OriginKind::Superuser, + require_weight_at_most: remote_weight_limit, + call: poke.encode().into(), + }, + ]); + + // send + let _ = >::send( + RawOrigin::Root.into(), + Box::new(VersionedMultiLocation::V3(destination)), + Box::new(VersionedXcm::V3(program)), + )?; + Ok(()) + } +} diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 697d22c311ae7b4d74c3d5717073c2a646e3afb1..a15911c21f65e58b90e22eb3af207545605a3d94 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -23,14 +23,17 @@ use pallet_nis::WithMaximumOf; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, + slashing, + vstaging::{ApprovalVotingParams, NodeFeatures}, + AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, }; +use rococo_runtime_constants::system_parachain::BROKER_ID; use runtime_common::{ - assigned_slots, auctions, claims, crowdloan, impl_runtime_weights, + assigned_slots, auctions, claims, crowdloan, identity_migrator, impl_runtime_weights, impls::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedMultiLocationConverter, }, @@ -41,9 +44,10 @@ use scale_info::TypeInfo; use sp_std::{cmp::Ordering, collections::btree_map::BTreeMap, prelude::*}; use runtime_parachains::{ - assigner as parachains_assigner, assigner_on_demand as parachains_assigner_on_demand, + assigner_coretime as parachains_assigner_coretime, + assigner_on_demand as parachains_assigner_on_demand, assigner_parachains as parachains_assigner_parachains, - configuration as parachains_configuration, disputes as parachains_disputes, + configuration as parachains_configuration, coretime, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, inclusion::{AggregateMessageOrigin, UmpQueueId}, @@ -63,13 +67,13 @@ use beefy_primitives::{ }; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ - fungible::HoldConsideration, EitherOf, EitherOfDiverse, Everything, InstanceFilter, - KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, - StorageMapShim, WithdrawReasons, + fungible::HoldConsideration, Contains, EitherOf, EitherOfDiverse, EverythingBut, + InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, + ProcessMessageError, StorageMapShim, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, @@ -77,19 +81,18 @@ use frame_support::{ use frame_system::EnsureRoot; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_identity::legacy::IdentityInfo; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_session::historical as session_historical; use pallet_transaction_payment::{CurrencyAdapter, FeeDetails, RuntimeDispatchInfo}; use sp_core::{ConstU128, OpaqueMetadata, H256}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{ - AccountIdLookup, BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, - Extrinsic as ExtrinsicT, IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, - Verify, + BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, Extrinsic as ExtrinsicT, + IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, RuntimeDebug, + ApplyExtrinsicResult, BoundToRuntimeAppPublic, FixedU128, KeyTypeId, Perbill, Percent, Permill, + RuntimeAppPublic, RuntimeDebug, }; use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] @@ -113,6 +116,10 @@ mod weights; // XCM configurations. pub mod xcm_config; +// Implemented types. +mod impls; +use impls::ToParachainIdentityReaper; + // Governance and configurations. pub mod governance; use governance::{ @@ -145,10 +152,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 10020, + spec_version: 1_005_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 22, + transaction_version: 24, state_version: 1, }; @@ -165,34 +172,37 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// A type to identify calls to the Identity pallet. These will be filtered to prevent invocation, +/// locking the state of the pallet and preventing further updates to identities and sub-identities. +/// The locked state will be the genesis state of a new system chain and then removed from the Relay +/// Chain. +pub struct IsIdentityCall; +impl Contains for IsIdentityCall { + fn contains(c: &RuntimeCall) -> bool { + matches!(c, RuntimeCall::Identity(_)) + } +} + parameter_types! { pub const Version: RuntimeVersion = VERSION; pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::RelayChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = Everything; + type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type DbWeight = RocksDbWeight; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type AccountId = AccountId; - type Lookup = AccountIdLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type Version = Version; - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); type SystemWeightInfo = weights::frame_system::WeightInfo; type SS58Prefix = SS58Prefix; - type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } @@ -269,8 +279,7 @@ impl pallet_babe::Config for Runtime { type WeightInfo = (); type MaxAuthorities = MaxAuthorities; type MaxNominators = ConstU32<0>; - type KeyOwnerProof = - >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_babe::EquivocationReportSystem; } @@ -338,14 +347,53 @@ impl pallet_timestamp::Config for Runtime { impl pallet_authorship::Config for Runtime { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = ImOnline; + type EventHandler = (); +} + +#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] +pub struct OldSessionKeys { + pub grandpa: ::Public, + pub babe: ::Public, + pub im_online: pallet_im_online::sr25519::AuthorityId, + pub para_validator: ::Public, + pub para_assignment: ::Public, + pub authority_discovery: ::Public, + pub beefy: ::Public, +} + +impl OpaqueKeys for OldSessionKeys { + type KeyTypeIdProviders = (); + fn key_ids() -> &'static [KeyTypeId] { + &[ + <::Public>::ID, + <::Public>::ID, + sp_core::crypto::key_types::IM_ONLINE, + <::Public>::ID, + <::Public>::ID, + <::Public>::ID, + <::Public>::ID, + ] + } + fn get_raw(&self, i: KeyTypeId) -> &[u8] { + match i { + <::Public>::ID => self.grandpa.as_ref(), + <::Public>::ID => self.babe.as_ref(), + sp_core::crypto::key_types::IM_ONLINE => self.im_online.as_ref(), + <::Public>::ID => self.para_validator.as_ref(), + <::Public>::ID => + self.para_assignment.as_ref(), + <::Public>::ID => + self.authority_discovery.as_ref(), + <::Public>::ID => self.beefy.as_ref(), + _ => &[], + } + } } impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, pub babe: Babe, - pub im_online: ImOnline, pub para_validator: Initializer, pub para_assignment: ParaSessionInfo, pub authority_discovery: AuthorityDiscovery, @@ -353,6 +401,18 @@ impl_opaque_keys! { } } +// remove this when removing `OldSessionKeys` +fn transform_session_keys(_val: AccountId, old: OldSessionKeys) -> SessionKeys { + SessionKeys { + grandpa: old.grandpa, + babe: old.babe, + para_validator: old.para_validator, + para_assignment: old.para_assignment, + authority_discovery: old.authority_discovery, + beefy: old.beefy, + } +} + /// Special `ValidatorIdOf` implementation that is just returning the input as result. pub struct ValidatorIdOf; impl sp_runtime::traits::Convert> for ValidatorIdOf { @@ -497,22 +557,6 @@ impl pallet_authority_discovery::Config for Runtime { type MaxAuthorities = MaxAuthorities; } -parameter_types! { - pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); -} - -impl pallet_im_online::Config for Runtime { - type AuthorityId = ImOnlineId; - type RuntimeEvent = RuntimeEvent; - type ValidatorSet = Historical; - type NextSessionRotation = Babe; - type ReportUnresponsiveness = Offences; - type UnsignedPriority = ImOnlineUnsignedPriority; - type WeightInfo = weights::pallet_im_online::WeightInfo; - type MaxKeys = MaxKeys; - type MaxPeerInHeartbeats = MaxPeerInHeartbeats; -} - parameter_types! { pub const MaxSetIdSessionEntries: u32 = BondingDuration::get() * SessionsPerEra::get(); } @@ -523,7 +567,7 @@ impl pallet_grandpa::Config for Runtime { type MaxAuthorities = MaxAuthorities; type MaxNominators = ConstU32<0>; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_grandpa::EquivocationReportSystem; } @@ -703,6 +747,7 @@ impl pallet_vesting::Config for Runtime { type MinVestedTransfer = MinVestedTransfer; type WeightInfo = weights::pallet_vesting::WeightInfo; type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; + type BlockNumberProvider = System; const MAX_VESTING_SCHEDULES: u32 = 28; } @@ -762,7 +807,6 @@ impl InstanceFilter for ProxyType { // Specifically omitting the entire Balances pallet RuntimeCall::Session(..) | RuntimeCall::Grandpa(..) | - RuntimeCall::ImOnline(..) | RuntimeCall::Treasury(..) | RuntimeCall::Bounties(..) | RuntimeCall::ChildBounties(..) | @@ -893,6 +937,7 @@ impl parachains_paras::Config for Runtime { type QueueFootprinter = ParaInclusion; type NextSessionRotation = Babe; type OnNewHead = Registrar; + type AssignCoretime = CoretimeAssignmentProvider; } parameter_types! { @@ -959,7 +1004,22 @@ impl parachains_paras_inherent::Config for Runtime { } impl parachains_scheduler::Config for Runtime { - type AssignmentProvider = ParaAssignmentProvider; + // If you change this, make sure the `Assignment` type of the new provider is binary compatible, + // otherwise provide a migration. + type AssignmentProvider = CoretimeAssignmentProvider; +} + +parameter_types! { + pub const BrokerId: u32 = BROKER_ID; +} + +impl coretime::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type BrokerId = BrokerId; + type WeightInfo = weights::runtime_parachains_coretime::WeightInfo; + type SendXcm = crate::xcm_config::XcmRouter; } parameter_types! { @@ -975,15 +1035,13 @@ impl parachains_assigner_on_demand::Config for Runtime { impl parachains_assigner_parachains::Config for Runtime {} -impl parachains_assigner::Config for Runtime { - type OnDemandAssignmentProvider = OnDemandAssignmentProvider; - type ParachainsAssignmentProvider = ParachainsAssignmentProvider; -} +impl parachains_assigner_coretime::Config for Runtime {} impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; type ForceOrigin = EnsureRoot; type WeightInfo = weights::runtime_parachains_initializer::WeightInfo; + type CoretimeOnNewSession = Coretime; } impl parachains_disputes::Config for Runtime { @@ -1078,6 +1136,14 @@ impl auctions::Config for Runtime { type WeightInfo = weights::runtime_common_auctions::WeightInfo; } +impl identity_migrator::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // To be changed to `EnsureSigned` once there is a People Chain to migrate to. + type Reaper = EnsureRoot; + type ReapIdentityHandler = ToParachainIdentityReaper; + type WeightInfo = weights::runtime_common_identity_migrator::WeightInfo; +} + type NisCounterpartInstance = pallet_balances::Instance2; impl pallet_balances::Config for Runtime { type Balance = Balance; @@ -1268,8 +1334,7 @@ construct_runtime! { TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 33, // Consensus support. - // Authorship must be before session in order to note author in the correct session and era - // for im-online. + // Authorship must be before session in order to note author in the correct session and era. Authorship: pallet_authorship::{Pallet, Storage} = 5, Offences: pallet_offences::{Pallet, Storage, Event} = 7, Historical: session_historical::{Pallet} = 34, @@ -1283,7 +1348,6 @@ construct_runtime! { Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 8, Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event, ValidateUnsigned} = 10, - ImOnline: pallet_im_online::{Pallet, Call, Storage, Event, ValidateUnsigned, Config} = 11, AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config} = 12, // Governance stuff; uncallable initially. @@ -1339,7 +1403,7 @@ construct_runtime! { // NIS pallet. Nis: pallet_nis::{Pallet, Call, Storage, Event, HoldReason} = 38, -// pub type NisCounterpartInstance = pallet_balances::Instance2; + // pub type NisCounterpartInstance = pallet_balances::Instance2; NisCounterpartBalances: pallet_balances:: = 45, // Parachains pallets. Start indices at 50 to leave room. @@ -1357,19 +1421,23 @@ construct_runtime! { ParasDisputes: parachains_disputes::{Pallet, Call, Storage, Event} = 62, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned} = 63, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 64, - ParaAssignmentProvider: parachains_assigner::{Pallet, Storage} = 65, OnDemandAssignmentProvider: parachains_assigner_on_demand::{Pallet, Call, Storage, Event} = 66, ParachainsAssignmentProvider: parachains_assigner_parachains::{Pallet} = 67, + CoretimeAssignmentProvider: parachains_assigner_coretime::{Pallet, Storage} = 68, // Parachain Onboarding Pallets. Start indices at 70 to leave room. Registrar: paras_registrar::{Pallet, Call, Storage, Event, Config} = 70, Slots: slots::{Pallet, Call, Storage, Event} = 71, Auctions: auctions::{Pallet, Call, Storage, Event} = 72, Crowdloan: crowdloan::{Pallet, Call, Storage, Event} = 73, + Coretime: coretime::{Pallet, Call, Event} = 74, // Pallet for sending XCM. XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 99, + // Pallet for migrating Identity to a parachain. To be removed post-migration. + IdentityMigrator: identity_migrator::{Pallet, Call, Event} = 248, + ParasSudoWrapper: paras_sudo_wrapper::{Pallet, Call} = 250, AssignedSlots: assigned_slots::{Pallet, Call, Storage, Event, Config} = 251, @@ -1426,6 +1494,38 @@ pub mod migrations { use frame_support::traits::LockIdentifier; use frame_system::pallet_prelude::BlockNumberFor; + use sp_arithmetic::traits::Zero; + #[cfg(feature = "try-runtime")] + use sp_core::crypto::ByteArray; + + pub struct GetLegacyLeaseImpl; + impl coretime::migration::GetLegacyLease for GetLegacyLeaseImpl { + fn get_parachain_lease_in_blocks(para: ParaId) -> Option { + let now = frame_system::Pallet::::block_number(); + let mut leases = slots::Pallet::::lease(para).into_iter(); + let initial_sum = if let Some(Some(_)) = leases.next() { + let (_, progress) = + slots::Pallet::::lease_period_index_plus_progress(now)?; + LeasePeriod::get().saturating_sub(progress) + } else { + // The parachain lease did not yet start + Zero::zero() + }; + log::trace!( + target: "coretime-migration", + "Getting lease info for para {:?}:\n LEASE_PERIOD: {:?}, initial_sum: {:?}, number of leases: {:?}", + para, + LeasePeriod::get(), + initial_sum, + slots::Pallet::::lease(para).len(), + ); + + Some(leases.into_iter().fold(initial_sum, |sum, lease| { + // If the parachain lease did not yet start, we ignore them by multiplying by `0`. + sum + LeasePeriod::get() * lease.map_or(0, |_| 1) + })) + } + } parameter_types! { pub const DemocracyPalletName: &'static str = "Democracy"; @@ -1434,6 +1534,7 @@ pub mod migrations { pub const PhragmenElectionPalletName: &'static str = "PhragmenElection"; pub const TechnicalMembershipPalletName: &'static str = "TechnicalMembership"; pub const TipsPalletName: &'static str = "Tips"; + pub const ImOnlinePalletName: &'static str = "ImOnline"; pub const PhragmenElectionPalletId: LockIdentifier = *b"phrelect"; } @@ -1470,13 +1571,85 @@ pub mod migrations { type PalletName = TipsPalletName; } + /// Upgrade Session keys to exclude `ImOnline` key. + /// When this is removed, should also remove `OldSessionKeys`. + pub struct UpgradeSessionKeys; + const UPGRADE_SESSION_KEYS_FROM_SPEC: u32 = 104000; + + impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { + log::warn!(target: "runtime::session_keys", "Skipping session keys migration pre-upgrade check due to spec version (already applied?)"); + return Ok(Vec::new()) + } + + log::info!(target: "runtime::session_keys", "Collecting pre-upgrade session keys state"); + let key_ids = SessionKeys::key_ids(); + frame_support::ensure!( + key_ids.into_iter().find(|&k| *k == sp_core::crypto::key_types::IM_ONLINE) == None, + "New session keys contain the ImOnline key that should have been removed", + ); + let storage_key = pallet_session::QueuedKeys::::hashed_key(); + let mut state: Vec = Vec::new(); + frame_support::storage::unhashed::get::>( + &storage_key, + ) + .ok_or::("Queued keys are not available".into())? + .into_iter() + .for_each(|(id, keys)| { + state.extend_from_slice(id.as_slice()); + for key_id in key_ids { + state.extend_from_slice(keys.get_raw(*key_id)); + } + }); + frame_support::ensure!(state.len() > 0, "Queued keys are not empty before upgrade"); + Ok(state) + } + + fn on_runtime_upgrade() -> Weight { + if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { + log::info!("Skipping session keys upgrade: already applied"); + return ::DbWeight::get().reads(1) + } + log::trace!("Upgrading session keys"); + Session::upgrade_keys::(transform_session_keys); + Perbill::from_percent(50) * BlockWeights::get().max_block + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade( + old_state: sp_std::vec::Vec, + ) -> Result<(), sp_runtime::TryRuntimeError> { + if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { + log::warn!(target: "runtime::session_keys", "Skipping session keys migration post-upgrade check due to spec version (already applied?)"); + return Ok(()) + } + + let key_ids = SessionKeys::key_ids(); + let mut new_state: Vec = Vec::new(); + pallet_session::QueuedKeys::::get().into_iter().for_each(|(id, keys)| { + new_state.extend_from_slice(id.as_slice()); + for key_id in key_ids { + new_state.extend_from_slice(keys.get_raw(*key_id)); + } + }); + frame_support::ensure!(new_state.len() > 0, "Queued keys are not empty after upgrade"); + frame_support::ensure!( + old_state == new_state, + "Pre-upgrade and post-upgrade keys do not match!" + ); + log::info!(target: "runtime::session_keys", "Session keys migrated successfully"); + Ok(()) + } + } + /// Unreleased migrations. Add new ones here: pub type Unreleased = ( pallet_society::migrations::MigrateToV2, - pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, assigned_slots::migration::v1::MigrateToV1, - parachains_scheduler::migration::v1::MigrateToV1, + parachains_scheduler::migration::MigrateV1ToV2, parachains_configuration::migration::v8::MigrateToV8, parachains_configuration::migration::v9::MigrateToV9, paras_registrar::migration::MigrateToV1, @@ -1497,6 +1670,18 @@ pub mod migrations { frame_support::migrations::RemovePallet::DbWeight>, frame_support::migrations::RemovePallet::DbWeight>, frame_support::migrations::RemovePallet::DbWeight>, + + pallet_grandpa::migrations::MigrateV4ToV5, + parachains_configuration::migration::v10::MigrateToV10, + + // Upgrade `SessionKeys` to exclude `ImOnline` + UpgradeSessionKeys, + + // Remove `im-online` pallet on-chain storage + frame_support::migrations::RemovePallet::DbWeight>, + parachains_configuration::migration::v11::MigrateToV11, + // This needs to come after the `parachains_configuration` above as we are reading the configuration. + coretime::migration::MigrateToCoretime, ); } @@ -1545,8 +1730,10 @@ mod benches { // the that path resolves correctly in the generated file. [runtime_common::assigned_slots, AssignedSlots] [runtime_common::auctions, Auctions] + [runtime_common::coretime, Coretime] [runtime_common::crowdloan, Crowdloan] [runtime_common::claims, Claims] + [runtime_common::identity_migrator, IdentityMigrator] [runtime_common::slots, Slots] [runtime_common::paras_registrar, Registrar] [runtime_parachains::configuration, Configuration] @@ -1566,7 +1753,6 @@ mod benches { [pallet_conviction_voting, ConvictionVoting] [pallet_nis, Nis] [pallet_identity, Identity] - [pallet_im_online, ImOnline] [pallet_indices, Indices] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -1586,7 +1772,7 @@ mod benches { [pallet_asset_rate, AssetRate] [pallet_whitelist, Whitelist] // XCM - [pallet_xcm, XcmPallet] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_xcm_benchmarks::fungible, pallet_xcm_benchmarks::fungible::Pallet::] [pallet_xcm_benchmarks::generic, pallet_xcm_benchmarks::generic::Pallet::] ); @@ -1658,8 +1844,8 @@ sp_api::impl_runtime_apis! { } } - #[api_version(8)] - impl primitives::runtime_api::ParachainHost for Runtime { + #[api_version(10)] + impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() } @@ -1802,10 +1988,17 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::async_backing_params::() } + fn approval_voting_params() -> ApprovalVotingParams { + parachains_staging_runtime_api_impl::approval_voting_params::() + } + fn disabled_validators() -> Vec { parachains_staging_runtime_api_impl::disabled_validators::() } + fn node_features() -> NodeFeatures { + parachains_staging_runtime_api_impl::node_features::() + } } #[api_version(3)] @@ -2062,6 +2255,8 @@ sp_api::impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -2079,6 +2274,7 @@ sp_api::impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use sp_storage::TrackedStorageKey; use xcm::latest::prelude::*; use xcm_config::{ @@ -2095,6 +2291,47 @@ sp_api::impl_runtime_apis! { impl frame_system_benchmarking::Config for Runtime {} impl frame_benchmarking::baseline::Config for Runtime {} + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(crate::xcm_config::AssetHub::get()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported to/from AH. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }, + crate::xcm_config::AssetHub::get(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay can reserve transfer native token to some random parachain. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }, + Parachain(43211234).into(), + )) + } + + fn set_up_complex_asset_transfer( + ) -> Option<(MultiAssets, u32, MultiLocation, Box)> { + // Relay supports only native token, either reserve transfer it to non-system parachains, + // or teleport it to system parachain. Use the teleport case for benchmarking as it's + // slightly heavier. + // Relay/native token can be teleported to/from AH. + let native_location = Here.into(); + let dest = crate::xcm_config::AssetHub::get(); + pallet_xcm::benchmarking::helpers::native_teleport_as_asset_transfer::( + native_location, + dest + ) + } + } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = XcmConfig; type AccountIdConverter = LocationConverter; diff --git a/polkadot/runtime/rococo/src/weights/frame_system.rs b/polkadot/runtime/rococo/src/weights/frame_system.rs index 7765d669a577cc110722b4453dfa33b9dbe6bd35..2e49483dcc62728f3554bb1364efd740f8b03fd2 100644 --- a/polkadot/runtime/rococo/src/weights/frame_system.rs +++ b/polkadot/runtime/rococo/src/weights/frame_system.rs @@ -141,4 +141,31 @@ impl frame_system::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 33_027_000 picoseconds. + Weight::from_parts(33_027_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `22` + // Estimated: `1518` + // Minimum execution time: 118_101_992_000 picoseconds. + Weight::from_parts(118_101_992_000, 0) + .saturating_add(Weight::from_parts(0, 1518)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 9c563a67d98b721b08cb3d84c4f5562682749b1d..3613fb4305ba0f7a35190a7ef788979c2f241207 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -46,10 +46,12 @@ pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; pub mod runtime_common_claims; pub mod runtime_common_crowdloan; +pub mod runtime_common_identity_migrator; pub mod runtime_common_paras_registrar; pub mod runtime_common_slots; pub mod runtime_parachains_assigner_on_demand; pub mod runtime_parachains_configuration; +pub mod runtime_parachains_coretime; pub mod runtime_parachains_disputes; pub mod runtime_parachains_hrmp; pub mod runtime_parachains_inclusion; diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index aafded3f7319fd3717f8e9a04ca4af80bd34547a..177407ef7088b5c9bdcc460f36cb7d6485743a71 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -48,6 +48,10 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + fn transfer_assets() -> Weight { + // TODO: run benchmarks + Weight::zero() + } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs new file mode 100644 index 0000000000000000000000000000000000000000..cec357453b67be400c0191ac7d5c12e6961a4bee --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs @@ -0,0 +1,97 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `runtime_common::identity_migrator` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `sbtb`, CPU: `13th Gen Intel(R) Core(TM) i7-1365U` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=2 +// --repeat=1 +// --pallet=runtime_common::identity_migrator +// --extrinsic=* +// --output=./migrator-release.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::identity_migrator`. +pub struct WeightInfo(PhantomData); +impl runtime_common::identity_migrator::WeightInfo for WeightInfo { + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 20]`. + /// The range of component `s` is `[0, 100]`. + fn reap_identity(r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7292 + r * (8 ±0) + s * (32 ±0)` + // Estimated: `11003 + r * (8 ±0) + s * (33 ±0)` + // Minimum execution time: 163_756_000 picoseconds. + Weight::from_parts(158_982_500, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 1_143_629 + .saturating_add(Weight::from_parts(238_675, 0).saturating_mul(r.into())) + // Standard Error: 228_725 + .saturating_add(Weight::from_parts(1_529_645, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(s.into())) + } + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `7229` + // Estimated: `11003` + // Minimum execution time: 137_570_000 picoseconds. + Weight::from_parts(137_570_000, 0) + .saturating_add(Weight::from_parts(0, 11003)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs index 29f387657786afd1e07c207d733b7cfddfd4d83a..34541b83597e6284a401a171e703b200366114a0 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::configuration // --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,11 +56,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_051_000 picoseconds. - Weight::from_parts(9_496_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_793_000 picoseconds. + Weight::from_parts(8_192_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -72,11 +72,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_104_000 picoseconds. - Weight::from_parts(9_403_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_819_000 picoseconds. + Weight::from_parts(8_004_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -88,11 +88,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_112_000 picoseconds. - Weight::from_parts(9_495_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_760_000 picoseconds. + Weight::from_parts(8_174_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -114,11 +114,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_011_000 picoseconds. - Weight::from_parts(9_460_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_814_000 picoseconds. + Weight::from_parts(8_098_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -130,11 +130,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_940_000 picoseconds. - Weight::from_parts(10_288_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_028_000 picoseconds. + Weight::from_parts(10_386_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -146,11 +146,27 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_perbill() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_192_000 picoseconds. - Weight::from_parts(9_595_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_867_000 picoseconds. + Weight::from_parts(8_191_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_node_feature() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_158_000 picoseconds. + Weight::from_parts(10_430_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs new file mode 100644 index 0000000000000000000000000000000000000000..d9f2d45207b923e3afe661a6021629cb8441970e --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs @@ -0,0 +1,73 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `runtime_parachains::coretime` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=runtime_common::coretime +// --chain=rococo-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +use runtime_parachains::configuration::{self, WeightInfo as ConfigWeightInfo}; + +/// Weight functions for `runtime_common::coretime`. +pub struct WeightInfo(PhantomData); +impl runtime_parachains::coretime::WeightInfo for WeightInfo { + fn request_core_count() -> Weight { + ::WeightInfo::set_config_with_u32() + } + /// Storage: `CoreTimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoreTimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CoreTimeAssignmentProvider::CoreSchedules` (r:0 w:1) + /// Proof: `CoreTimeAssignmentProvider::CoreSchedules` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 100]`. + fn assign_core(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3541` + // Minimum execution time: 6_275_000 picoseconds. + Weight::from_parts(6_883_543, 0) + .saturating_add(Weight::from_parts(0, 3541)) + // Standard Error: 202 + .saturating_add(Weight::from_parts(15_028, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 0814b77414f2bb83ee3f70c4df451789d3f5e081..4f9ab0d661117fe398277784a86609a1a0c6d911 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -36,15 +36,16 @@ use runtime_common::{ }; use sp_core::ConstU32; use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter as XcmCurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, DescribeBodyTerminal, - DescribeFamily, FixedWeightBounds, HashedDescription, IsChildSystemParachain, IsConcrete, - MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, - XcmFeeToAccount, + ChildParachainConvertsVia, DescribeBodyTerminal, DescribeFamily, FixedWeightBounds, + HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, OriginToPluralityVoice, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -70,6 +71,7 @@ pub type LocationConverter = ( /// point of view of XCM-only concepts like `MultiLocation` and `MultiAsset`. /// /// Ours is only aware of the Balances pallet, which is mapped to `RocLocation`. +#[allow(deprecated)] pub type LocalAssetTransactor = XcmCurrencyAdapter< // Use this currency: Balances, @@ -118,6 +120,7 @@ parameter_types! { pub const Contracts: MultiLocation = Parachain(CONTRACTS_ID).into_location(); pub const Encointer: MultiLocation = Parachain(ENCOINTER_ID).into_location(); pub const BridgeHub: MultiLocation = Parachain(BRIDGE_HUB_ID).into_location(); + pub const Broker: MultiLocation = Parachain(BROKER_ID).into_location(); pub const Tick: MultiLocation = Parachain(100).into_location(); pub const Trick: MultiLocation = Parachain(110).into_location(); pub const Track: MultiLocation = Parachain(120).into_location(); @@ -128,6 +131,7 @@ parameter_types! { pub const RocForContracts: (MultiAssetFilter, MultiLocation) = (Roc::get(), Contracts::get()); pub const RocForEncointer: (MultiAssetFilter, MultiLocation) = (Roc::get(), Encointer::get()); pub const RocForBridgeHub: (MultiAssetFilter, MultiLocation) = (Roc::get(), BridgeHub::get()); + pub const RocForBroker: (MultiAssetFilter, MultiLocation) = (Roc::get(), Broker::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; } @@ -139,6 +143,7 @@ pub type TrustedTeleporters = ( xcm_builder::Case, xcm_builder::Case, xcm_builder::Case, + xcm_builder::Case, ); match_types! { @@ -211,11 +216,6 @@ parameter_types! { pub const FellowsBodyId: BodyId = BodyId::Technical; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(ASSET_HUB_ID).into()); -} - /// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior /// location of this chain. pub type LocalOriginToLocation = ( @@ -269,7 +269,5 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 29debad7b53b6a3875bbb92bd841cf32ae54b315..585f16ac86f6e5427aed300ff328721768cd4f2a 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -7,13 +7,16 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false } +serde = { version = "1.0.193", default-features = false } serde_derive = { version = "1.0.117", optional = true } smallvec = "1.8.0" @@ -77,7 +80,7 @@ serde_json = "1.0.108" substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] no_std = [] only-staking = [] runtime-metrics = [ diff --git a/polkadot/runtime/test-runtime/constants/Cargo.toml b/polkadot/runtime/test-runtime/constants/Cargo.toml index d83e92a6ce8863cc529262b3e9c0edb666e2f618..2b387bbd3072a91bc00afa7326d93f66a644f39e 100644 --- a/polkadot/runtime/test-runtime/constants/Cargo.toml +++ b/polkadot/runtime/test-runtime/constants/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] smallvec = "1.8.0" @@ -17,7 +20,7 @@ sp-weights = { path = "../../../../substrate/primitives/weights", default-featur sp-core = { path = "../../../../substrate/primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "frame-support/std", "primitives/std", diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 596e65eca0680b3ee1b987e0e1f387b501762fb7..f472b619ba759cca80ebaf51056cb5a20f6eb165 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -42,10 +42,10 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, }; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{Everything, KeyOwnerProofSystem, WithdrawReasons}, + traits::{KeyOwnerProofSystem, WithdrawReasons}, }; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_session::historical as session_historical; @@ -74,7 +74,7 @@ use sp_runtime::{ SaturatedConversion, StaticLookup, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, KeyTypeId, Perbill, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, }; use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] @@ -139,29 +139,19 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::RelayChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = Everything; type BlockWeights = BlockWeights; type BlockLength = BlockLength; - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; type Nonce = Nonce; type Hash = HashT; - type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = Indices; type Block = Block; - type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type Version = Version; - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); type SS58Prefix = SS58Prefix; - type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } @@ -366,6 +356,7 @@ impl pallet_staking::Config for Runtime { type TargetList = pallet_staking::UseValidatorsMap; type NominationsQuota = pallet_staking::FixedNominationsQuota; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<5900>; type HistoryDepth = frame_support::traits::ConstU32<84>; type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; type EventListeners = (); @@ -473,6 +464,7 @@ impl pallet_vesting::Config for Runtime { type MinVestedTransfer = MinVestedTransfer; type WeightInfo = (); type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; + type BlockNumberProvider = System; const MAX_VESTING_SCHEDULES: u32 = 28; } @@ -528,6 +520,7 @@ impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; type ForceOrigin = frame_system::EnsureRoot; type WeightInfo = (); + type CoretimeOnNewSession = (); } impl parachains_session_info::Config for Runtime { @@ -545,6 +538,15 @@ impl parachains_paras::Config for Runtime { type QueueFootprinter = ParaInclusion; type NextSessionRotation = Babe; type OnNewHead = (); + type AssignCoretime = (); +} + +parameter_types! { + pub const BrokerId: u32 = 10u32; +} + +parameter_types! { + pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); } impl parachains_dmp::Config for Runtime {} @@ -826,7 +828,7 @@ sp_api::impl_runtime_apis! { } } - impl primitives::runtime_api::ParachainHost for Runtime { + impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { runtime_impl::validators::() } diff --git a/polkadot/runtime/test-runtime/src/xcm_config.rs b/polkadot/runtime/test-runtime/src/xcm_config.rs index 400658b13863645c6c5bc7a99ce9401013fc3cf8..ae4faecf70013d8b093e87e71a7acbdde3ee577d 100644 --- a/polkadot/runtime/test-runtime/src/xcm_config.rs +++ b/polkadot/runtime/test-runtime/src/xcm_config.rs @@ -127,11 +127,6 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Nothing; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(xcm::latest::Junctions::Here.into()); -} - impl pallet_xcm::Config for crate::Runtime { // The config types here are entirely configurable, since the only one that is sorely needed // is `XcmExecutor`, which will be used in unit tests located in xcm-executor. @@ -157,7 +152,5 @@ impl pallet_xcm::Config for crate::Runtime { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index eaebf01e3a7647ef277d20c0dc749323a1591af6..335ac14d47a94d64e40c3fcf53d555283981832c 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -7,13 +7,16 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } -serde = { version = "1.0.188", default-features = false } +serde = { version = "1.0.193", default-features = false } serde_derive = { version = "1.0.117", optional = true } smallvec = "1.8.0" @@ -46,7 +49,7 @@ frame-support = { path = "../../../substrate/frame/support", default-features = frame-system = { path = "../../../substrate/frame/system", default-features = false } frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } westend-runtime-constants = { package = "westend-runtime-constants", path = "constants", default-features = false } -pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false } +pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false } pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false } pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } pallet-babe = { path = "../../../substrate/frame/babe", default-features = false } @@ -117,7 +120,7 @@ hex-literal = "0.4.1" tiny-keccak = { version = "2.0.2", features = ["keccak"] } keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } serde_json = "1.0.108" -remote-externalities = { package = "frame-remote-externalities" , path = "../../../substrate/utils/frame/remote-externalities" } +remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } tokio = { version = "1.24.2", features = ["macros"] } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } @@ -125,7 +128,7 @@ sp-tracing = { path = "../../../substrate/primitives/tracing", default-features substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] no_std = [] only-staking = [] std = [ @@ -339,9 +342,9 @@ try-runtime = [ # Set timing constants (e.g. session period) to faster versions to speed up testing. fast-runtime = [] -runtime-metrics = [ "runtime-parachains/runtime-metrics", "sp-io/with-tracing" ] +runtime-metrics = ["runtime-parachains/runtime-metrics", "sp-io/with-tracing"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/polkadot/runtime/westend/constants/Cargo.toml b/polkadot/runtime/westend/constants/Cargo.toml index 2243210975b1df6bc0193fc60b9003e2199a5121..d2e86970e509389d491b29298b7f66f790696d45 100644 --- a/polkadot/runtime/westend/constants/Cargo.toml +++ b/polkadot/runtime/westend/constants/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] smallvec = "1.8.0" @@ -17,9 +20,10 @@ sp-weights = { path = "../../../../substrate/primitives/weights", default-featur sp-core = { path = "../../../../substrate/primitives/core", default-features = false } xcm = { package = "staging-xcm", path = "../../../xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../xcm/xcm-builder", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "frame-support/std", "primitives/std", @@ -27,5 +31,6 @@ std = [ "sp-core/std", "sp-runtime/std", "sp-weights/std", + "xcm-builder/std", "xcm/std", ] diff --git a/polkadot/runtime/westend/constants/src/lib.rs b/polkadot/runtime/westend/constants/src/lib.rs index a06b3ba602a365c0c9d2fe006ae27ffc7003ae97..3b44684c203bf3d2bc89f4ad724fb262b2a44b9d 100644 --- a/polkadot/runtime/westend/constants/src/lib.rs +++ b/polkadot/runtime/westend/constants/src/lib.rs @@ -98,7 +98,8 @@ pub mod fee { /// System Parachains. pub mod system_parachain { - use xcm::latest::prelude::*; + use primitives::Id; + use xcm_builder::IsChildSystemParachain; /// Network's Asset Hub parachain ID. pub const ASSET_HUB_ID: u32 = 1000; @@ -106,12 +107,11 @@ pub mod system_parachain { pub const COLLECTIVES_ID: u32 = 1001; /// BridgeHub parachain ID. pub const BRIDGE_HUB_ID: u32 = 1002; + /// Brokerage parachain ID. + pub const BROKER_ID: u32 = 1005; - frame_support::match_types! { - pub type SystemParachains: impl Contains = { - MultiLocation { parents: 0, interior: X1(Parachain(ASSET_HUB_ID | COLLECTIVES_ID | BRIDGE_HUB_ID ))} - }; - } + /// All system parachains of Westend. + pub type SystemParachains = IsChildSystemParachain; } /// Westend Treasury pallet instance. @@ -126,6 +126,7 @@ pub mod xcm { const ROOT_INDEX: u32 = 0; // The bodies corresponding to the Polkadot OpenGov Origins. pub const FELLOWSHIP_ADMIN_INDEX: u32 = 1; + pub const TREASURER_INDEX: u32 = 2; } } diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..5f23bd373b13fcd5377e5f3721f1ba9bc6e260d7 --- /dev/null +++ b/polkadot/runtime/westend/src/impls.rs @@ -0,0 +1,180 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::xcm_config; +use frame_support::pallet_prelude::DispatchResult; +use frame_system::RawOrigin; +use parity_scale_codec::{Decode, Encode}; +use primitives::Balance; +use runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; +use sp_std::{marker::PhantomData, prelude::*}; +use westend_runtime_constants::currency::*; +use xcm::{latest::prelude::*, VersionedMultiLocation, VersionedXcm}; +use xcm_executor::traits::TransactAsset; + +/// A type containing the encoding of the People Chain pallets in its runtime. Used to construct any +/// remote calls. The codec index must correspond to the index of `IdentityMigrator` in the +/// `construct_runtime` of the remote chain. +#[derive(Encode, Decode)] +enum PeopleRuntimePallets { + #[codec(index = 248)] + IdentityMigrator(IdentityMigratorCalls), +} + +/// Call encoding for the calls needed from the Identity Migrator pallet. +#[derive(Encode, Decode)] +enum IdentityMigratorCalls { + #[codec(index = 1)] + PokeDeposit(AccountId), +} + +/// Type that implements `OnReapIdentity` that will send the deposit needed to store the same +/// information on a parachain, sends the deposit there, and then updates it. +pub struct ToParachainIdentityReaper(PhantomData<(Runtime, AccountId)>); +impl ToParachainIdentityReaper { + /// Calculate the balance needed on the remote chain based on the `IdentityInfo` and `Subs` on + /// this chain. The total includes: + /// + /// - Identity basic deposit + /// - `IdentityInfo` byte deposit + /// - Sub accounts deposit + /// - 2x existential deposit (1 for account existence, 1 such that the user can transact) + fn calculate_remote_deposit(bytes: u32, subs: u32) -> Balance { + // Remote deposit constants. Parachain uses `deposit / 100` + // Source: + // https://github.com/paritytech/polkadot-sdk/blob/a146918/cumulus/parachains/common/src/westend.rs#L28 + // + // Parachain Deposit Configuration: + // + // pub const BasicDeposit: Balance = deposit(1, 17); + // pub const ByteDeposit: Balance = deposit(0, 1); + // pub const SubAccountDeposit: Balance = deposit(1, 53); + // pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + let para_basic_deposit = deposit(1, 17) / 100; + let para_byte_deposit = deposit(0, 1) / 100; + let para_sub_account_deposit = deposit(1, 53) / 100; + let para_existential_deposit = EXISTENTIAL_DEPOSIT / 10; + + // pallet deposits + let id_deposit = + para_basic_deposit.saturating_add(para_byte_deposit.saturating_mul(bytes as Balance)); + let subs_deposit = para_sub_account_deposit.saturating_mul(subs as Balance); + + id_deposit + .saturating_add(subs_deposit) + .saturating_add(para_existential_deposit.saturating_mul(2)) + } +} + +// Note / Warning: This implementation should only be used in a transactional context. If not, then +// an error could result in assets being burned. +impl OnReapIdentity for ToParachainIdentityReaper +where + Runtime: frame_system::Config + pallet_xcm::Config, + AccountId: Into<[u8; 32]> + Clone + Encode, +{ + fn on_reap_identity(who: &AccountId, fields: u32, subs: u32) -> DispatchResult { + use crate::{ + impls::IdentityMigratorCalls::PokeDeposit, + weights::runtime_common_identity_migrator::WeightInfo as MigratorWeights, + }; + + let total_to_send = Self::calculate_remote_deposit(fields, subs); + + // define asset / destination from relay perspective + let wnd = MultiAsset { id: Concrete(Here.into_location()), fun: Fungible(total_to_send) }; + // People Chain: ParaId 1004 + let destination: MultiLocation = MultiLocation::new(0, Parachain(1004)); + + // Do `check_out` accounting since the XCM Executor's `InitiateTeleport` doesn't support + // unpaid teleports. + + // withdraw the asset from `who` + let who_origin = + Junction::AccountId32 { network: None, id: who.clone().into() }.into_location(); + let _withdrawn = xcm_config::LocalAssetTransactor::withdraw_asset(&wnd, &who_origin, None) + .map_err(|err| { + log::error!( + target: "runtime::on_reap_identity", + "withdraw_asset(what: {:?}, who_origin: {:?}) error: {:?}", + wnd, who_origin, err + ); + pallet_xcm::Error::::LowBalance + })?; + + // check out + xcm_config::LocalAssetTransactor::can_check_out( + &destination, + &wnd, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ) + .map_err(|err| { + log::error!( + target: "runtime::on_reap_identity", + "can_check_out(destination: {:?}, asset: {:?}, _) error: {:?}", + destination, wnd, err + ); + pallet_xcm::Error::::CannotCheckOutTeleport + })?; + xcm_config::LocalAssetTransactor::check_out( + &destination, + &wnd, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ); + + // reanchor + let wnd_reanchored: MultiAssets = vec![MultiAsset { + id: Concrete(MultiLocation::new(1, Here)), + fun: Fungible(total_to_send), + }] + .into(); + + let poke = PeopleRuntimePallets::::IdentityMigrator(PokeDeposit(who.clone())); + let remote_weight_limit = MigratorWeights::::poke_deposit().saturating_mul(2); + + // Actual program to execute on People Chain. + let program: Xcm<()> = Xcm(vec![ + // Unpaid as this is constructed by the system, once per user. The user shouldn't have + // their balance reduced by teleport fees for the favor of migrating. + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + // Receive the asset into holding. + ReceiveTeleportedAsset(wnd_reanchored), + // Deposit into the user's account. + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: Junction::AccountId32 { network: None, id: who.clone().into() } + .into_location() + .into(), + }, + // Poke the deposit to reserve the appropriate amount on the parachain. + Transact { + origin_kind: OriginKind::Superuser, + require_weight_at_most: remote_weight_limit, + call: poke.encode().into(), + }, + ]); + + // send + let _ = >::send( + RawOrigin::Root.into(), + Box::new(VersionedMultiLocation::V3(destination)), + Box::new(VersionedXcm::V3(program)), + )?; + Ok(()) + } +} diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index fe9ed22f4375e2ac2bc8b76ccf19434c39e0d92d..fb54bec509b3c9f066b3eff3425e3b9c87c3f5ee 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -27,11 +27,11 @@ use beefy_primitives::{ }; use frame_election_provider_support::{bounds::ElectionBoundsBuilder, onchain, SequentialPhragmen}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ - fungible::HoldConsideration, ConstU32, EitherOf, EitherOfDiverse, Everything, + fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, ProcessMessageError, WithdrawReasons, }, @@ -41,12 +41,13 @@ use frame_support::{ use frame_system::EnsureRoot; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_identity::legacy::IdentityInfo; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_session::historical as session_historical; use pallet_transaction_payment::{CurrencyAdapter, FeeDetails, RuntimeDispatchInfo}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, + slashing, + vstaging::{ApprovalVotingParams, NodeFeatures}, + AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, @@ -56,7 +57,7 @@ use primitives::{ use runtime_common::{ assigned_slots, auctions, crowdloan, elections::OnChainAccuracy, - impl_runtime_weights, + identity_migrator, impl_runtime_weights, impls::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedMultiLocationConverter, }, @@ -84,11 +85,12 @@ use sp_runtime::{ curve::PiecewiseLinear, generic, impl_opaque_keys, traits::{ - AccountIdLookup, BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, - IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, + BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, IdentityLookup, + Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, + ApplyExtrinsicResult, BoundToRuntimeAppPublic, FixedU128, KeyTypeId, Perbill, Percent, Permill, + RuntimeAppPublic, }; use sp_staking::SessionIndex; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -113,12 +115,16 @@ use sp_runtime::traits::Get; pub use sp_runtime::BuildStorage; /// Constant values used within the runtime. -use westend_runtime_constants::{currency::*, fee::*, time::*}; +use westend_runtime_constants::{currency::*, fee::*, system_parachain::BROKER_ID, time::*}; mod bag_thresholds; mod weights; pub mod xcm_config; +// Implemented types. +mod impls; +use impls::ToParachainIdentityReaper; + // Governance and configurations. pub mod governance; use governance::{ @@ -141,10 +147,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 10020, + spec_version: 1_005_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 22, + transaction_version: 24, state_version: 1, }; @@ -161,34 +167,37 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// A type to identify calls to the Identity pallet. These will be filtered to prevent invocation, +/// locking the state of the pallet and preventing further updates to identities and sub-identities. +/// The locked state will be the genesis state of a new system chain and then removed from the Relay +/// Chain. +pub struct IsIdentityCall; +impl Contains for IsIdentityCall { + fn contains(c: &RuntimeCall) -> bool { + matches!(c, RuntimeCall::Identity(_)) + } +} + parameter_types! { pub const Version: RuntimeVersion = VERSION; pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::RelayChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = Everything; + type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type AccountId = AccountId; - type Lookup = AccountIdLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type DbWeight = RocksDbWeight; type Version = Version; - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); type SystemWeightInfo = weights::frame_system::WeightInfo; type SS58Prefix = SS58Prefix; - type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } @@ -257,8 +266,7 @@ impl pallet_babe::Config for Runtime { type MaxAuthorities = MaxAuthorities; type MaxNominators = MaxNominators; - type KeyOwnerProof = - >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_babe::EquivocationReportSystem; @@ -310,7 +318,7 @@ impl pallet_beefy::Config for Runtime { type MaxSetIdSessionEntries = BeefySetIdSessionEntries; type OnNewValidatorSet = BeefyMmrLeaf; type WeightInfo = (); - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_beefy::EquivocationReportSystem; } @@ -402,7 +410,7 @@ impl pallet_timestamp::Config for Runtime { impl pallet_authorship::Config for Runtime { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = (Staking, ImOnline); + type EventHandler = Staking; } parameter_types! { @@ -410,14 +418,43 @@ parameter_types! { pub const Offset: BlockNumber = 0; } -impl_opaque_keys! { - pub struct OldSessionKeys { - pub grandpa: Grandpa, - pub babe: Babe, - pub im_online: ImOnline, - pub para_validator: Initializer, - pub para_assignment: ParaSessionInfo, - pub authority_discovery: AuthorityDiscovery, +#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] +pub struct OldSessionKeys { + pub grandpa: ::Public, + pub babe: ::Public, + pub im_online: pallet_im_online::sr25519::AuthorityId, + pub para_validator: ::Public, + pub para_assignment: ::Public, + pub authority_discovery: ::Public, + pub beefy: ::Public, +} + +impl OpaqueKeys for OldSessionKeys { + type KeyTypeIdProviders = (); + fn key_ids() -> &'static [KeyTypeId] { + &[ + <::Public>::ID, + <::Public>::ID, + sp_core::crypto::key_types::IM_ONLINE, + <::Public>::ID, + <::Public>::ID, + <::Public>::ID, + <::Public>::ID, + ] + } + fn get_raw(&self, i: KeyTypeId) -> &[u8] { + match i { + <::Public>::ID => self.grandpa.as_ref(), + <::Public>::ID => self.babe.as_ref(), + sp_core::crypto::key_types::IM_ONLINE => self.im_online.as_ref(), + <::Public>::ID => self.para_validator.as_ref(), + <::Public>::ID => + self.para_assignment.as_ref(), + <::Public>::ID => + self.authority_discovery.as_ref(), + <::Public>::ID => self.beefy.as_ref(), + _ => &[], + } } } @@ -425,7 +462,6 @@ impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, pub babe: Babe, - pub im_online: ImOnline, pub para_validator: Initializer, pub para_assignment: ParaSessionInfo, pub authority_discovery: AuthorityDiscovery, @@ -434,28 +470,14 @@ impl_opaque_keys! { } // remove this when removing `OldSessionKeys` -fn transform_session_keys(v: AccountId, old: OldSessionKeys) -> SessionKeys { +fn transform_session_keys(_v: AccountId, old: OldSessionKeys) -> SessionKeys { SessionKeys { grandpa: old.grandpa, babe: old.babe, - im_online: old.im_online, para_validator: old.para_validator, para_assignment: old.para_assignment, authority_discovery: old.authority_discovery, - beefy: { - // From Session::upgrade_keys(): - // - // Care should be taken that the raw versions of the - // added keys are unique for every `ValidatorId, KeyTypeId` combination. - // This is an invariant that the session pallet typically maintains internally. - // - // So, produce a dummy value that's unique for the `ValidatorId, KeyTypeId` combination. - let mut id: BeefyId = sp_application_crypto::ecdsa::Public::from_raw([0u8; 33]).into(); - let id_raw: &mut [u8] = id.as_mut(); - id_raw[1..33].copy_from_slice(v.as_ref()); - id_raw[0..4].copy_from_slice(b"beef"); - id - }, + beefy: old.beefy, } } @@ -509,7 +531,6 @@ parameter_types! { pub const SignedDepositByte: Balance = deposit(0, 10) / 1024; // Each good submission will get 1 WND as reward pub SignedRewardBase: Balance = 1 * UNITS; - pub BetterUnsignedThreshold: Perbill = Perbill::from_rational(5u32, 10_000); // 1 hour session, 15 minutes unsigned phase, 4 offchain executions. pub OffchainRepeat: BlockNumber = UnsignedPhase::get() / 4; @@ -586,7 +607,6 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type MinerConfig = Self; type SlashHandler = (); // burn slashes type RewardHandler = (); // nothing to do upon rewards - type BetterUnsignedThreshold = BetterUnsignedThreshold; type BetterSignedThreshold = (); type OffchainRepeat = OffchainRepeat; type MinerTxPriority = NposSolutionPriority; @@ -652,6 +672,7 @@ parameter_types! { pub const MaxNominators: u32 = 64; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxNominations: u32 = ::LIMIT as u32; + pub const MaxControllersInDeprecationBatch: u32 = 751; } impl pallet_staking::Config for Runtime { @@ -666,7 +687,7 @@ impl pallet_staking::Config for Runtime { type SessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; type SlashDeferDuration = SlashDeferDuration; - type AdminOrigin = EnsureRoot; + type AdminOrigin = EitherOf, StakingAdmin>; type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = MaxExposurePageSize; @@ -679,6 +700,7 @@ impl pallet_staking::Config for Runtime { type NominationsQuota = pallet_staking::FixedNominationsQuota<{ MaxNominations::get() }>; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; type HistoryDepth = frame_support::traits::ConstU32<84>; + type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; type EventListeners = NominationPools; type WeightInfo = weights::pallet_staking::WeightInfo; @@ -766,19 +788,6 @@ impl pallet_authority_discovery::Config for Runtime { parameter_types! { pub const NposSolutionPriority: TransactionPriority = TransactionPriority::max_value() / 2; - pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); -} - -impl pallet_im_online::Config for Runtime { - type AuthorityId = ImOnlineId; - type RuntimeEvent = RuntimeEvent; - type ValidatorSet = Historical; - type NextSessionRotation = Babe; - type ReportUnresponsiveness = Offences; - type UnsignedPriority = ImOnlineUnsignedPriority; - type WeightInfo = weights::pallet_im_online::WeightInfo; - type MaxKeys = MaxKeys; - type MaxPeerInHeartbeats = MaxPeerInHeartbeats; } parameter_types! { @@ -793,7 +802,7 @@ impl pallet_grandpa::Config for Runtime { type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_grandpa::EquivocationReportSystem; @@ -941,6 +950,7 @@ impl pallet_vesting::Config for Runtime { type MinVestedTransfer = MinVestedTransfer; type WeightInfo = weights::pallet_vesting::WeightInfo; type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; + type BlockNumberProvider = System; const MAX_VESTING_SCHEDULES: u32 = 28; } @@ -1008,7 +1018,6 @@ impl InstanceFilter for ProxyType { RuntimeCall::Staking(..) | RuntimeCall::Session(..) | RuntimeCall::Grandpa(..) | - RuntimeCall::ImOnline(..) | RuntimeCall::Utility(..) | RuntimeCall::Identity(..) | RuntimeCall::ConvictionVoting(..) | @@ -1140,6 +1149,7 @@ impl parachains_paras::Config for Runtime { type QueueFootprinter = ParaInclusion; type NextSessionRotation = Babe; type OnNewHead = (); + type AssignCoretime = (); } parameter_types! { @@ -1206,7 +1216,13 @@ impl parachains_paras_inherent::Config for Runtime { } impl parachains_scheduler::Config for Runtime { - type AssignmentProvider = ParaAssignmentProvider; + // If you change this, make sure the `Assignment` type of the new provider is binary compatible, + // otherwise provide a migration. + type AssignmentProvider = ParachainsAssignmentProvider; +} + +parameter_types! { + pub const BrokerId: u32 = BROKER_ID; } impl parachains_assigner_parachains::Config for Runtime {} @@ -1215,6 +1231,7 @@ impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; type ForceOrigin = EnsureRoot; type WeightInfo = weights::runtime_parachains_initializer::WeightInfo; + type CoretimeOnNewSession = (); } impl paras_sudo_wrapper::Config for Runtime {} @@ -1328,6 +1345,14 @@ impl auctions::Config for Runtime { type WeightInfo = weights::runtime_common_auctions::WeightInfo; } +impl identity_migrator::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // To be changed to `EnsureSigned` once there is a People Chain to migrate to. + type Reaper = EnsureRoot; + type ReapIdentityHandler = ToParachainIdentityReaper; + type WeightInfo = weights::runtime_common_identity_migrator::WeightInfo; +} + parameter_types! { pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); pub const MaxPointsToBalance: u8 = 10; @@ -1388,8 +1413,7 @@ construct_runtime! { TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 26, // Consensus support. - // Authorship must be before session in order to note author in the correct session and era - // for im-online and staking. + // Authorship must be before session in order to note author in the correct session and era. Authorship: pallet_authorship::{Pallet, Storage} = 5, Staking: pallet_staking::{Pallet, Call, Storage, Config, Event} = 6, Offences: pallet_offences::{Pallet, Storage, Event} = 7, @@ -1404,7 +1428,6 @@ construct_runtime! { Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 8, Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event, ValidateUnsigned} = 10, - ImOnline: pallet_im_online::{Pallet, Call, Storage, Event, ValidateUnsigned, Config} = 11, AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config} = 12, // Utility module. @@ -1470,7 +1493,7 @@ construct_runtime! { ParaSessionInfo: parachains_session_info::{Pallet, Storage} = 52, ParasDisputes: parachains_disputes::{Pallet, Call, Storage, Event} = 53, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned} = 54, - ParaAssignmentProvider: parachains_assigner_parachains::{Pallet, Storage} = 55, + ParachainsAssignmentProvider: parachains_assigner_parachains::{Pallet} = 55, // Parachain Onboarding Pallets. Start indices at 60 to leave room. Registrar: paras_registrar::{Pallet, Call, Storage, Event, Config} = 60, @@ -1491,6 +1514,9 @@ construct_runtime! { // Root testing pallet. RootTesting: pallet_root_testing::{Pallet, Call, Storage, Event} = 102, + + // Pallet for migrating Identity to a parachain. To be removed post-migration. + IdentityMigrator: identity_migrator::{Pallet, Call, Event} = 248, } } @@ -1533,31 +1559,105 @@ pub type Migrations = migrations::Unreleased; #[allow(deprecated, missing_docs)] pub mod migrations { use super::*; + #[cfg(feature = "try-runtime")] + use sp_core::crypto::ByteArray; + + parameter_types! { + pub const ImOnlinePalletName: &'static str = "ImOnline"; + } - /// Upgrade Session keys to include BEEFY key. + /// Upgrade Session keys to exclude `ImOnline` key. /// When this is removed, should also remove `OldSessionKeys`. pub struct UpgradeSessionKeys; + const UPGRADE_SESSION_KEYS_FROM_SPEC: u32 = 104000; + impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { + log::warn!(target: "runtime::session_keys", "Skipping session keys migration pre-upgrade check due to spec version (already applied?)"); + return Ok(Vec::new()) + } + + log::info!(target: "runtime::session_keys", "Collecting pre-upgrade session keys state"); + let key_ids = SessionKeys::key_ids(); + frame_support::ensure!( + key_ids.into_iter().find(|&k| *k == sp_core::crypto::key_types::IM_ONLINE) == None, + "New session keys contain the ImOnline key that should have been removed", + ); + let storage_key = pallet_session::QueuedKeys::::hashed_key(); + let mut state: Vec = Vec::new(); + frame_support::storage::unhashed::get::>( + &storage_key, + ) + .ok_or::("Queued keys are not available".into())? + .into_iter() + .for_each(|(id, keys)| { + state.extend_from_slice(id.as_slice()); + for key_id in key_ids { + state.extend_from_slice(keys.get_raw(*key_id)); + } + }); + frame_support::ensure!(state.len() > 0, "Queued keys are not empty before upgrade"); + Ok(state) + } + fn on_runtime_upgrade() -> Weight { + if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { + log::warn!("Skipping session keys upgrade: already applied"); + return ::DbWeight::get().reads(1) + } + log::info!("Upgrading session keys"); Session::upgrade_keys::(transform_session_keys); Perbill::from_percent(50) * BlockWeights::get().max_block } + + #[cfg(feature = "try-runtime")] + fn post_upgrade( + old_state: sp_std::vec::Vec, + ) -> Result<(), sp_runtime::TryRuntimeError> { + if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { + log::warn!(target: "runtime::session_keys", "Skipping session keys migration post-upgrade check due to spec version (already applied?)"); + return Ok(()) + } + + let key_ids = SessionKeys::key_ids(); + let mut new_state: Vec = Vec::new(); + pallet_session::QueuedKeys::::get().into_iter().for_each(|(id, keys)| { + new_state.extend_from_slice(id.as_slice()); + for key_id in key_ids { + new_state.extend_from_slice(keys.get_raw(*key_id)); + } + }); + frame_support::ensure!(new_state.len() > 0, "Queued keys are not empty after upgrade"); + frame_support::ensure!( + old_state == new_state, + "Pre-upgrade and post-upgrade keys do not match!" + ); + log::info!(target: "runtime::session_keys", "Session keys migrated successfully"); + Ok(()) + } } /// Unreleased migrations. Add new ones here: pub type Unreleased = ( - pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, pallet_staking::migrations::v14::MigrateToV14, assigned_slots::migration::v1::MigrateToV1, - parachains_scheduler::migration::v1::MigrateToV1, + parachains_scheduler::migration::MigrateV1ToV2, parachains_configuration::migration::v8::MigrateToV8, - UpgradeSessionKeys, parachains_configuration::migration::v9::MigrateToV9, paras_registrar::migration::MigrateToV1, - pallet_nomination_pools::migration::versioned_migrations::V5toV6, pallet_referenda::migration::v1::MigrateV0ToV1, - pallet_nomination_pools::migration::versioned_migrations::V6ToV7, + pallet_grandpa::migrations::MigrateV4ToV5, + parachains_configuration::migration::v10::MigrateToV10, + pallet_nomination_pools::migration::versioned::V7ToV8, + UpgradeSessionKeys, + frame_support::migrations::RemovePallet< + ImOnlinePalletName, + ::DbWeight, + >, + parachains_configuration::migration::v11::MigrateToV11, ); } @@ -1585,6 +1685,7 @@ mod benches { [runtime_common::assigned_slots, AssignedSlots] [runtime_common::auctions, Auctions] [runtime_common::crowdloan, Crowdloan] + [runtime_common::identity_migrator, IdentityMigrator] [runtime_common::paras_registrar, Registrar] [runtime_common::slots, Slots] [runtime_parachains::configuration, Configuration] @@ -1603,7 +1704,6 @@ mod benches { [frame_election_provider_support, ElectionProviderBench::] [pallet_fast_unstake, FastUnstake] [pallet_identity, Identity] - [pallet_im_online, ImOnline] [pallet_indices, Indices] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -1625,7 +1725,7 @@ mod benches { [pallet_whitelist, Whitelist] [pallet_asset_rate, AssetRate] // XCM - [pallet_xcm, XcmPallet] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1698,8 +1798,8 @@ sp_api::impl_runtime_apis! { } } - #[api_version(8)] - impl primitives::runtime_api::ParachainHost for Runtime { + #[api_version(10)] + impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() } @@ -1842,9 +1942,17 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::async_backing_params::() } + fn approval_voting_params() -> ApprovalVotingParams { + parachains_staging_runtime_api_impl::approval_voting_params::() + } + fn disabled_validators() -> Vec { parachains_staging_runtime_api_impl::disabled_validators::() } + + fn node_features() -> NodeFeatures { + parachains_staging_runtime_api_impl::node_features::() + } } impl beefy_primitives::BeefyApi for Runtime { @@ -2144,6 +2252,7 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; @@ -2171,12 +2280,52 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; impl pallet_session_benchmarking::Config for Runtime {} impl pallet_offences_benchmarking::Config for Runtime {} impl pallet_election_provider_support_benchmarking::Config for Runtime {} + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(crate::xcm_config::AssetHub::get()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported to/from AH. + Some(( + MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }, + crate::xcm_config::AssetHub::get(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay can reserve transfer native token to some random parachain. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }, + crate::Junction::Parachain(43211234).into(), + )) + } + + fn set_up_complex_asset_transfer( + ) -> Option<(MultiAssets, u32, MultiLocation, Box)> { + // Relay supports only native token, either reserve transfer it to non-system parachains, + // or teleport it to system parachain. Use the teleport case for benchmarking as it's + // slightly heavier. + + // Relay/native token can be teleported to/from AH. + let native_location = Here.into(); + let dest = crate::xcm_config::AssetHub::get(); + pallet_xcm::benchmarking::helpers::native_teleport_as_asset_transfer::( + native_location, + dest + ) + } + } impl frame_system_benchmarking::Config for Runtime {} impl pallet_nomination_pools_benchmarking::Config for Runtime {} impl runtime_parachains::disputes::slashing::benchmarking::Config for Runtime {} diff --git a/polkadot/runtime/westend/src/weights/frame_system.rs b/polkadot/runtime/westend/src/weights/frame_system.rs index deef0959363c6431081ed154980c99f9f9c49e56..f679be5171517affeea382f530b28e72e540be5e 100644 --- a/polkadot/runtime/westend/src/weights/frame_system.rs +++ b/polkadot/runtime/westend/src/weights/frame_system.rs @@ -144,4 +144,31 @@ impl frame_system::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 33_027_000 picoseconds. + Weight::from_parts(33_027_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `22` + // Estimated: `1518` + // Minimum execution time: 118_101_992_000 picoseconds. + Weight::from_parts(118_101_992_000, 0) + .saturating_add(Weight::from_parts(0, 1518)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs index 9ae6798d70b6e234c7ffbbfd99f2f97c56fb30a4..d8a2ae5d2da6fab158898e6cb6548f5f56aa612c 100644 --- a/polkadot/runtime/westend/src/weights/mod.rs +++ b/polkadot/runtime/westend/src/weights/mod.rs @@ -46,9 +46,12 @@ pub mod pallet_xcm; pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; pub mod runtime_common_crowdloan; +pub mod runtime_common_identity_migrator; pub mod runtime_common_paras_registrar; pub mod runtime_common_slots; +pub mod runtime_parachains_assigner_on_demand; pub mod runtime_parachains_configuration; +pub mod runtime_parachains_coretime; pub mod runtime_parachains_disputes; pub mod runtime_parachains_disputes_slashing; pub mod runtime_parachains_hrmp; diff --git a/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs b/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs index 49bc687a3e4fa2afba34567769bb3b68c2d55d89..6aa5ddd1ec8fb5251ac6bbd5059938e4bad93b22 100644 --- a/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs +++ b/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_nomination_pools` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -53,7 +53,7 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) @@ -78,20 +78,22 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3318` + // Measured: `3355` // Estimated: `8877` - // Minimum execution time: 187_795_000 picoseconds. - Weight::from_parts(193_857_000, 0) + // Minimum execution time: 173_707_000 picoseconds. + Weight::from_parts(179_920_000, 0) .saturating_add(Weight::from_parts(0, 8877)) - .saturating_add(T::DbWeight::get().reads(19)) - .saturating_add(T::DbWeight::get().writes(12)) + .saturating_add(T::DbWeight::get().reads(20)) + .saturating_add(T::DbWeight::get().writes(13)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -110,22 +112,24 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3328` + // Measured: `3365` // Estimated: `8877` - // Minimum execution time: 186_245_000 picoseconds. - Weight::from_parts(190_916_000, 0) + // Minimum execution time: 174_414_000 picoseconds. + Weight::from_parts(178_068_000, 0) .saturating_add(Weight::from_parts(0, 8877)) - .saturating_add(T::DbWeight::get().reads(16)) - .saturating_add(T::DbWeight::get().writes(12)) + .saturating_add(T::DbWeight::get().reads(17)) + .saturating_add(T::DbWeight::get().writes(13)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -144,22 +148,24 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3274` + // Measured: `3312` // Estimated: `8799` - // Minimum execution time: 217_918_000 picoseconds. - Weight::from_parts(224_772_000, 0) + // Minimum execution time: 198_864_000 picoseconds. + Weight::from_parts(203_783_000, 0) .saturating_add(Weight::from_parts(0, 8799)) - .saturating_add(T::DbWeight::get().reads(16)) - .saturating_add(T::DbWeight::get().writes(12)) + .saturating_add(T::DbWeight::get().reads(17)) + .saturating_add(T::DbWeight::get().writes(13)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -168,10 +174,10 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_payout() -> Weight { // Proof Size summary in bytes: - // Measured: `1137` + // Measured: `1138` // Estimated: `4182` - // Minimum execution time: 76_958_000 picoseconds. - Weight::from_parts(78_278_000, 0) + // Minimum execution time: 70_250_000 picoseconds. + Weight::from_parts(72_231_000, 0) .saturating_add(Weight::from_parts(0, 4182)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -179,7 +185,7 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) @@ -210,16 +216,16 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3597` + // Measured: `3545` // Estimated: `8877` - // Minimum execution time: 170_992_000 picoseconds. - Weight::from_parts(179_987_000, 0) + // Minimum execution time: 155_853_000 picoseconds. + Weight::from_parts(161_032_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(20)) .saturating_add(T::DbWeight::get().writes(13)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) @@ -230,25 +236,27 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1670` + // Measured: `1744` // Estimated: `4764` - // Minimum execution time: 60_740_000 picoseconds. - Weight::from_parts(64_502_831, 0) + // Minimum execution time: 62_933_000 picoseconds. + Weight::from_parts(65_847_171, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 2_724 - .saturating_add(Weight::from_parts(37_725, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Standard Error: 1_476 + .saturating_add(Weight::from_parts(59_648, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) @@ -261,6 +269,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) @@ -268,22 +278,22 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2098` + // Measured: `2134` // Estimated: `4764` - // Minimum execution time: 127_322_000 picoseconds. - Weight::from_parts(132_064_603, 0) + // Minimum execution time: 123_641_000 picoseconds. + Weight::from_parts(127_222_589, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 3_424 - .saturating_add(Weight::from_parts(64_590, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(8)) + // Standard Error: 2_493 + .saturating_add(Weight::from_parts(83_361, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(9)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) @@ -292,16 +302,18 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:0) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Validators` (r:1 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `Staking::Nominators` (r:1 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) @@ -323,17 +335,15 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(s: u32, ) -> Weight { + fn withdraw_unbonded_kill(_s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2454` + // Measured: `2453` // Estimated: `8538` - // Minimum execution time: 236_510_000 picoseconds. - Weight::from_parts(243_943_334, 0) + // Minimum execution time: 219_469_000 picoseconds. + Weight::from_parts(227_526_000, 0) .saturating_add(Weight::from_parts(0, 8538)) - // Standard Error: 4_864 - .saturating_add(Weight::from_parts(14_974, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(23)) - .saturating_add(T::DbWeight::get().writes(19)) + .saturating_add(T::DbWeight::get().reads(24)) + .saturating_add(T::DbWeight::get().writes(20)) } /// Storage: `NominationPools::LastPoolId` (r:1 w:1) /// Proof: `NominationPools::LastPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -359,14 +369,12 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForRewardPools` (r:1 w:1) @@ -376,21 +384,23 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:0 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1222` + // Measured: `1102` // Estimated: `8538` - // Minimum execution time: 197_883_000 picoseconds. - Weight::from_parts(201_750_000, 0) + // Minimum execution time: 166_466_000 picoseconds. + Weight::from_parts(171_425_000, 0) .saturating_add(Weight::from_parts(0, 8538)) - .saturating_add(T::DbWeight::get().reads(24)) - .saturating_add(T::DbWeight::get().writes(16)) + .saturating_add(T::DbWeight::get().reads(23)) + .saturating_add(T::DbWeight::get().writes(17)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) @@ -416,36 +426,36 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1779` + // Measured: `1738` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 65_505_000 picoseconds. - Weight::from_parts(67_148_657, 0) + // Minimum execution time: 59_650_000 picoseconds. + Weight::from_parts(60_620_077, 0) .saturating_add(Weight::from_parts(0, 4556)) - // Standard Error: 9_115 - .saturating_add(Weight::from_parts(1_421_198, 0).saturating_mul(n.into())) + // Standard Error: 7_316 + .saturating_add(Weight::from_parts(1_467_406, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1367` + // Measured: `1363` // Estimated: `4556` - // Minimum execution time: 34_157_000 picoseconds. - Weight::from_parts(35_557_000, 0) + // Minimum execution time: 31_170_000 picoseconds. + Weight::from_parts(32_217_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::Metadata` (r:1 w:1) /// Proof: `NominationPools::Metadata` (`max_values`: None, `max_size`: Some(270), added: 2745, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForMetadata` (r:1 w:1) @@ -453,13 +463,13 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `497` + // Measured: `498` // Estimated: `3735` - // Minimum execution time: 13_806_000 picoseconds. - Weight::from_parts(14_540_018, 0) + // Minimum execution time: 12_603_000 picoseconds. + Weight::from_parts(13_241_702, 0) .saturating_add(Weight::from_parts(0, 3735)) - // Standard Error: 123 - .saturating_add(Weight::from_parts(644, 0).saturating_mul(n.into())) + // Standard Error: 116 + .saturating_add(Weight::from_parts(1_428, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -479,25 +489,25 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_870_000 picoseconds. - Weight::from_parts(6_253_000, 0) + // Minimum execution time: 3_608_000 picoseconds. + Weight::from_parts(3_801_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) fn update_roles() -> Weight { // Proof Size summary in bytes: - // Measured: `497` - // Estimated: `3685` - // Minimum execution time: 18_290_000 picoseconds. - Weight::from_parts(18_961_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `498` + // Estimated: `3719` + // Minimum execution time: 16_053_000 picoseconds. + Weight::from_parts(16_473_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) @@ -516,16 +526,16 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1942` + // Measured: `1901` // Estimated: `4556` - // Minimum execution time: 63_708_000 picoseconds. - Weight::from_parts(65_570_000, 0) + // Minimum execution time: 57_251_000 picoseconds. + Weight::from_parts(59_390_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -534,37 +544,49 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn set_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `736` - // Estimated: `3685` - // Minimum execution time: 34_291_000 picoseconds. - Weight::from_parts(34_767_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `770` + // Estimated: `3719` + // Minimum execution time: 29_888_000 picoseconds. + Weight::from_parts(31_056_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_commission_max() -> Weight { // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `3685` - // Minimum execution time: 18_406_000 picoseconds. - Weight::from_parts(18_999_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `538` + // Estimated: `3719` + // Minimum execution time: 15_769_000 picoseconds. + Weight::from_parts(16_579_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) fn set_commission_change_rate() -> Weight { // Proof Size summary in bytes: - // Measured: `497` - // Estimated: `3685` - // Minimum execution time: 18_440_000 picoseconds. - Weight::from_parts(19_230_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `498` + // Estimated: `3719` + // Minimum execution time: 15_385_000 picoseconds. + Weight::from_parts(16_402_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + fn set_commission_claim_permission() -> Weight { + // Proof Size summary in bytes: + // Measured: `498` + // Estimated: `3719` + // Minimum execution time: 14_965_000 picoseconds. + Weight::from_parts(15_548_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -576,14 +598,14 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `508` // Estimated: `4182` - // Minimum execution time: 14_310_000 picoseconds. - Weight::from_parts(14_681_000, 0) + // Minimum execution time: 13_549_000 picoseconds. + Weight::from_parts(14_307_000, 0) .saturating_add(Weight::from_parts(0, 4182)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -592,16 +614,16 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `934` - // Estimated: `3685` - // Minimum execution time: 64_526_000 picoseconds. - Weight::from_parts(66_800_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `968` + // Estimated: `3719` + // Minimum execution time: 60_153_000 picoseconds. + Weight::from_parts(61_369_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -610,10 +632,10 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) fn adjust_pool_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `866` + // Measured: `867` // Estimated: `4764` - // Minimum execution time: 73_472_000 picoseconds. - Weight::from_parts(74_698_000, 0) + // Minimum execution time: 64_985_000 picoseconds. + Weight::from_parts(66_616_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) diff --git a/polkadot/runtime/westend/src/weights/pallet_staking.rs b/polkadot/runtime/westend/src/weights/pallet_staking.rs index 3c4542c6d6fef6769c16a2d6ada2584b7a1e0798..1ecd44747ef5140b0f83f5226d8368a9231085d0 100644 --- a/polkadot/runtime/westend/src/weights/pallet_staking.rs +++ b/polkadot/runtime/westend/src/weights/pallet_staking.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,8 +62,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `894` // Estimated: `4764` - // Minimum execution time: 39_950_000 picoseconds. - Weight::from_parts(41_107_000, 0) + // Minimum execution time: 38_316_000 picoseconds. + Weight::from_parts(40_022_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(4)) @@ -84,8 +84,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1921` // Estimated: `8877` - // Minimum execution time: 83_828_000 picoseconds. - Weight::from_parts(85_733_000, 0) + // Minimum execution time: 81_027_000 picoseconds. + Weight::from_parts(83_964_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) @@ -112,8 +112,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2128` // Estimated: `8877` - // Minimum execution time: 89_002_000 picoseconds. - Weight::from_parts(91_556_000, 0) + // Minimum execution time: 85_585_000 picoseconds. + Weight::from_parts(87_256_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -133,11 +133,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1075` // Estimated: `4764` - // Minimum execution time: 40_839_000 picoseconds. - Weight::from_parts(42_122_428, 0) + // Minimum execution time: 39_520_000 picoseconds. + Weight::from_parts(41_551_548, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 884 - .saturating_add(Weight::from_parts(46_036, 0).saturating_mul(s.into())) + // Standard Error: 1_094 + .saturating_add(Weight::from_parts(50_426, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -174,11 +174,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 84_244_000 picoseconds. - Weight::from_parts(91_199_964, 0) + // Minimum execution time: 82_915_000 picoseconds. + Weight::from_parts(89_597_160, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 3_381 - .saturating_add(Weight::from_parts(1_327_289, 0).saturating_mul(s.into())) + // Standard Error: 3_146 + .saturating_add(Weight::from_parts(1_228_061, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -210,8 +210,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1301` // Estimated: `4556` - // Minimum execution time: 49_693_000 picoseconds. - Weight::from_parts(50_814_000, 0) + // Minimum execution time: 48_070_000 picoseconds. + Weight::from_parts(49_226_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(5)) @@ -226,10 +226,10 @@ impl pallet_staking::WeightInfo for WeightInfo { // Measured: `1243 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` // Minimum execution time: 29_140_000 picoseconds. - Weight::from_parts(28_309_627, 0) + Weight::from_parts(30_225_579, 0) .saturating_add(Weight::from_parts(0, 4556)) - // Standard Error: 5_780 - .saturating_add(Weight::from_parts(6_509_869, 0).saturating_mul(k.into())) + // Standard Error: 5_394 + .saturating_add(Weight::from_parts(6_401_367, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -262,11 +262,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1797 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 61_377_000 picoseconds. - Weight::from_parts(58_805_232, 0) + // Minimum execution time: 59_287_000 picoseconds. + Weight::from_parts(58_285_052, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 14_197 - .saturating_add(Weight::from_parts(4_090_197, 0).saturating_mul(n.into())) + // Standard Error: 14_556 + .saturating_add(Weight::from_parts(3_863_008, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6)) @@ -290,8 +290,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1581` // Estimated: `6248` - // Minimum execution time: 52_736_000 picoseconds. - Weight::from_parts(54_573_000, 0) + // Minimum execution time: 51_035_000 picoseconds. + Weight::from_parts(52_163_000, 0) .saturating_add(Weight::from_parts(0, 6248)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(6)) @@ -306,12 +306,28 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `865` // Estimated: `4556` - // Minimum execution time: 16_496_000 picoseconds. - Weight::from_parts(17_045_000, 0) + // Minimum execution time: 15_809_000 picoseconds. + Weight::from_parts(16_451_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + fn update_payee() -> Weight { + // Proof Size summary in bytes: + // Measured: `932` + // Estimated: `4556` + // Minimum execution time: 21_695_000 picoseconds. + Weight::from_parts(22_351_000, 0) + .saturating_add(Weight::from_parts(0, 4556)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:2) @@ -320,8 +336,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `865` // Estimated: `4556` - // Minimum execution time: 19_339_000 picoseconds. - Weight::from_parts(20_187_000, 0) + // Minimum execution time: 18_548_000 picoseconds. + Weight::from_parts(19_205_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) @@ -332,8 +348,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_340_000 picoseconds. - Weight::from_parts(2_551_000, 0) + // Minimum execution time: 2_193_000 picoseconds. + Weight::from_parts(2_408_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -343,8 +359,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_483_000 picoseconds. - Weight::from_parts(8_101_000, 0) + // Minimum execution time: 7_475_000 picoseconds. + Weight::from_parts(7_874_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -354,8 +370,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_773_000 picoseconds. - Weight::from_parts(8_610_000, 0) + // Minimum execution time: 7_393_000 picoseconds. + Weight::from_parts(7_643_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -365,8 +381,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_577_000 picoseconds. - Weight::from_parts(7_937_000, 0) + // Minimum execution time: 7_474_000 picoseconds. + Weight::from_parts(7_814_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -377,13 +393,33 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_522_000 picoseconds. - Weight::from_parts(2_735_307, 0) + // Minimum execution time: 2_358_000 picoseconds. + Weight::from_parts(2_589_423, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 38 - .saturating_add(Weight::from_parts(10_553, 0).saturating_mul(v.into())) + // Standard Error: 81 + .saturating_add(Weight::from_parts(13_612, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Staking::Ledger` (r:751 w:1502) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:751 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:0 w:751) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// The range of component `i` is `[0, 751]`. + fn deprecate_controller_batch(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `668 + i * (148 ±0)` + // Estimated: `990 + i * (3566 ±0)` + // Minimum execution time: 1_934_000 picoseconds. + Weight::from_parts(2_070_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 19_129 + .saturating_add(Weight::from_parts(13_231_580, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(i.into()))) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) + } /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Staking::Bonded` (r:1 w:1) @@ -417,11 +453,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 82_547_000 picoseconds. - Weight::from_parts(89_373_781, 0) + // Minimum execution time: 80_290_000 picoseconds. + Weight::from_parts(87_901_664, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 3_589 - .saturating_add(Weight::from_parts(1_258_878, 0).saturating_mul(s.into())) + // Standard Error: 2_960 + .saturating_add(Weight::from_parts(1_195_050, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(12)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -434,56 +470,14 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `66639` // Estimated: `70104` - // Minimum execution time: 134_619_000 picoseconds. - Weight::from_parts(1_194_949_665, 0) + // Minimum execution time: 132_682_000 picoseconds. + Weight::from_parts(932_504_297, 0) .saturating_add(Weight::from_parts(0, 70104)) - // Standard Error: 76_719 - .saturating_add(Weight::from_parts(6_455_953, 0).saturating_mul(s.into())) + // Standard Error: 57_593 + .saturating_add(Weight::from_parts(4_829_705, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) - /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) - /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:65 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:66 w:66) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::ClaimedRewards` (r:1 w:1) - /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) - /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) - /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) - /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:65 w:0) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 64]`. - fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `6895 + n * (156 ±0)` - // Estimated: `9802 + n * (2603 ±0)` - // Minimum execution time: 114_338_000 picoseconds. - Weight::from_parts(138_518_124, 0) - .saturating_add(Weight::from_parts(0, 9802)) - // Standard Error: 53_621 - .saturating_add(Weight::from_parts(25_676_781, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(14)) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(5)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) - } /// Storage: `Staking::Bonded` (r:65 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:65 w:65) @@ -517,11 +511,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `8249 + n * (396 ±0)` // Estimated: `10779 + n * (3774 ±3)` - // Minimum execution time: 132_719_000 picoseconds. - Weight::from_parts(170_505_880, 0) + // Minimum execution time: 129_091_000 picoseconds. + Weight::from_parts(166_186_167, 0) .saturating_add(Weight::from_parts(0, 10779)) - // Standard Error: 32_527 - .saturating_add(Weight::from_parts(42_453_136, 0).saturating_mul(n.into())) + // Standard Error: 36_242 + .saturating_add(Weight::from_parts(40_467_481, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(14)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4)) @@ -545,11 +539,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1922 + l * (5 ±0)` // Estimated: `8877` - // Minimum execution time: 78_438_000 picoseconds. - Weight::from_parts(81_774_734, 0) + // Minimum execution time: 77_461_000 picoseconds. + Weight::from_parts(80_118_021, 0) .saturating_add(Weight::from_parts(0, 8877)) - // Standard Error: 3_706 - .saturating_add(Weight::from_parts(51_358, 0).saturating_mul(l.into())) + // Standard Error: 4_343 + .saturating_add(Weight::from_parts(59_113, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -584,11 +578,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 92_129_000 picoseconds. - Weight::from_parts(94_137_611, 0) + // Minimum execution time: 89_366_000 picoseconds. + Weight::from_parts(91_964_557, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 4_141 - .saturating_add(Weight::from_parts(1_283_823, 0).saturating_mul(s.into())) + // Standard Error: 2_799 + .saturating_add(Weight::from_parts(1_206_123, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -633,14 +627,14 @@ impl pallet_staking::WeightInfo for WeightInfo { fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (716 ±0) + v * (3594 ±0)` - // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 527_896_000 picoseconds. - Weight::from_parts(533_325_000, 0) + // Estimated: `456136 + n * (3566 ±4) + v * (3566 ±40)` + // Minimum execution time: 520_430_000 picoseconds. + Weight::from_parts(527_125_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 2_064_813 - .saturating_add(Weight::from_parts(68_484_503, 0).saturating_mul(v.into())) - // Standard Error: 205_747 - .saturating_add(Weight::from_parts(18_833_735, 0).saturating_mul(n.into())) + // Standard Error: 1_974_092 + .saturating_add(Weight::from_parts(64_885_491, 0).saturating_mul(v.into())) + // Standard Error: 196_707 + .saturating_add(Weight::from_parts(18_100_326, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(184)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -671,13 +665,13 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `3108 + n * (907 ±0) + v * (391 ±0)` // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 35_302_472_000 picoseconds. - Weight::from_parts(35_651_169_000, 0) + // Minimum execution time: 33_917_323_000 picoseconds. + Weight::from_parts(34_173_565_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 412_098 - .saturating_add(Weight::from_parts(5_172_265, 0).saturating_mul(v.into())) - // Standard Error: 412_098 - .saturating_add(Weight::from_parts(4_142_772, 0).saturating_mul(n.into())) + // Standard Error: 367_135 + .saturating_add(Weight::from_parts(4_696_840, 0).saturating_mul(v.into())) + // Standard Error: 367_135 + .saturating_add(Weight::from_parts(3_889_075, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(179)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -694,11 +688,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `946 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_522_650_000 picoseconds. - Weight::from_parts(97_022_833, 0) + // Minimum execution time: 2_447_197_000 picoseconds. + Weight::from_parts(13_003_614, 0) .saturating_add(Weight::from_parts(0, 3510)) - // Standard Error: 6_751 - .saturating_add(Weight::from_parts(4_990_018, 0).saturating_mul(v.into())) + // Standard Error: 9_738 + .saturating_add(Weight::from_parts(4_953_442, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -719,8 +713,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_833_000 picoseconds. - Weight::from_parts(4_108_000, 0) + // Minimum execution time: 3_714_000 picoseconds. + Weight::from_parts(3_956_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(6)) } @@ -740,11 +734,13 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_520_000 picoseconds. - Weight::from_parts(3_686_000, 0) + // Minimum execution time: 3_361_000 picoseconds. + Weight::from_parts(3_632_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(6)) } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -767,12 +763,12 @@ impl pallet_staking::WeightInfo for WeightInfo { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill_other() -> Weight { // Proof Size summary in bytes: - // Measured: `1704` + // Measured: `1870` // Estimated: `6248` - // Minimum execution time: 63_983_000 picoseconds. - Weight::from_parts(66_140_000, 0) + // Minimum execution time: 65_329_000 picoseconds. + Weight::from_parts(67_247_000, 0) .saturating_add(Weight::from_parts(0, 6248)) - .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -783,8 +779,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `658` // Estimated: `3510` - // Minimum execution time: 11_830_000 picoseconds. - Weight::from_parts(12_210_000, 0) + // Minimum execution time: 11_760_000 picoseconds. + Weight::from_parts(12_095_000, 0) .saturating_add(Weight::from_parts(0, 3510)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -795,8 +791,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_364_000 picoseconds. - Weight::from_parts(2_555_000, 0) + // Minimum execution time: 2_256_000 picoseconds. + Weight::from_parts(2_378_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index cca4bdbd91e309f29d5d09c730c1a95c4e34138d..493acd0f9e7bdfbfd1e716b7e82a474643f1050b 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -50,6 +50,10 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + fn transfer_assets() -> Weight { + // TODO: run benchmarks + Weight::zero() + } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) diff --git a/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs b/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs new file mode 100644 index 0000000000000000000000000000000000000000..cec357453b67be400c0191ac7d5c12e6961a4bee --- /dev/null +++ b/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs @@ -0,0 +1,97 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `runtime_common::identity_migrator` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `sbtb`, CPU: `13th Gen Intel(R) Core(TM) i7-1365U` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=2 +// --repeat=1 +// --pallet=runtime_common::identity_migrator +// --extrinsic=* +// --output=./migrator-release.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::identity_migrator`. +pub struct WeightInfo(PhantomData); +impl runtime_common::identity_migrator::WeightInfo for WeightInfo { + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 20]`. + /// The range of component `s` is `[0, 100]`. + fn reap_identity(r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7292 + r * (8 ±0) + s * (32 ±0)` + // Estimated: `11003 + r * (8 ±0) + s * (33 ±0)` + // Minimum execution time: 163_756_000 picoseconds. + Weight::from_parts(158_982_500, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 1_143_629 + .saturating_add(Weight::from_parts(238_675, 0).saturating_mul(r.into())) + // Standard Error: 228_725 + .saturating_add(Weight::from_parts(1_529_645, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(s.into())) + } + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `7229` + // Estimated: `11003` + // Minimum execution time: 137_570_000 picoseconds. + Weight::from_parts(137_570_000, 0) + .saturating_add(Weight::from_parts(0, 11003)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs new file mode 100644 index 0000000000000000000000000000000000000000..ac0f05301b486dbdbb8c0ca004e195ab47171ff3 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs @@ -0,0 +1,91 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `runtime_parachains::assigner_on_demand` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::assigner_on_demand +// --chain=rococo-dev +// --header=./file_header.txt +// --output=./runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_parachains::assigner_on_demand`. +pub struct WeightInfo(PhantomData); +impl runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { + /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 9999]`. + fn place_order_keep_alive(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `297 + s * (4 ±0)` + // Estimated: `3762 + s * (4 ±0)` + // Minimum execution time: 33_522_000 picoseconds. + Weight::from_parts(35_436_835, 0) + .saturating_add(Weight::from_parts(0, 3762)) + // Standard Error: 129 + .saturating_add(Weight::from_parts(14_041, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + } + /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 9999]`. + fn place_order_allow_death(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `297 + s * (4 ±0)` + // Estimated: `3762 + s * (4 ±0)` + // Minimum execution time: 33_488_000 picoseconds. + Weight::from_parts(34_848_934, 0) + .saturating_add(Weight::from_parts(0, 3762)) + // Standard Error: 143 + .saturating_add(Weight::from_parts(14_215, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + } +} diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs index 585dc9058f21ec29b15040470d4a4c663c18d303..3a4813b667c68ea6400c9ad58cea8fd1bfece5d0 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::configuration // --chain=westend-dev -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,11 +56,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_616_000 picoseconds. - Weight::from_parts(9_961_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 8_065_000 picoseconds. + Weight::from_parts(8_389_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -72,11 +72,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_587_000 picoseconds. - Weight::from_parts(9_964_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 8_038_000 picoseconds. + Weight::from_parts(8_463_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -88,11 +88,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_650_000 picoseconds. - Weight::from_parts(9_960_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_843_000 picoseconds. + Weight::from_parts(8_216_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -114,11 +114,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_545_000 picoseconds. - Weight::from_parts(9_845_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_969_000 picoseconds. + Weight::from_parts(8_362_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -130,11 +130,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 10_258_000 picoseconds. - Weight::from_parts(10_607_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_084_000 picoseconds. + Weight::from_parts(10_451_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -146,11 +146,27 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_perbill() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_502_000 picoseconds. - Weight::from_parts(9_902_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_948_000 picoseconds. + Weight::from_parts(8_268_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_node_feature() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_257_000 picoseconds. + Weight::from_parts(10_584_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs new file mode 100644 index 0000000000000000000000000000000000000000..d9f2d45207b923e3afe661a6021629cb8441970e --- /dev/null +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs @@ -0,0 +1,73 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `runtime_parachains::coretime` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=runtime_common::coretime +// --chain=rococo-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +use runtime_parachains::configuration::{self, WeightInfo as ConfigWeightInfo}; + +/// Weight functions for `runtime_common::coretime`. +pub struct WeightInfo(PhantomData); +impl runtime_parachains::coretime::WeightInfo for WeightInfo { + fn request_core_count() -> Weight { + ::WeightInfo::set_config_with_u32() + } + /// Storage: `CoreTimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoreTimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CoreTimeAssignmentProvider::CoreSchedules` (r:0 w:1) + /// Proof: `CoreTimeAssignmentProvider::CoreSchedules` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 100]`. + fn assign_core(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3541` + // Minimum execution time: 6_275_000 picoseconds. + Weight::from_parts(6_883_543, 0) + .saturating_add(Weight::from_parts(0, 3541)) + // Standard Error: 202 + .saturating_add(Weight::from_parts(15_028, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 64e07317fc749d8e088c5d5bd810cc3d41aee08c..506df3025fdb5406b6c91866ed33e2cb44eced50 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -21,7 +21,7 @@ use super::{ GeneralAdmin, ParaId, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, StakingAdmin, TransactionByteFee, Treasury, WeightToFee, XcmPallet, }; - +use crate::governance::pallet_custom_origins::Treasurer; use frame_support::{ match_types, parameter_types, traits::{Everything, Nothing}, @@ -34,17 +34,21 @@ use runtime_common::{ }; use sp_core::ConstU32; use westend_runtime_constants::{ - currency::CENTS, system_parachain::*, xcm::body::FELLOWSHIP_ADMIN_INDEX, + currency::CENTS, + system_parachain::*, + xcm::body::{FELLOWSHIP_ADMIN_INDEX, TREASURER_INDEX}, }; use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter as XcmCurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, DescribeBodyTerminal, - DescribeFamily, HashedDescription, IsConcrete, MintLocation, OriginToPluralityVoice, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, - XcmFeeManagerFromComponents, XcmFeeToAccount, + ChildParachainConvertsVia, DescribeBodyTerminal, DescribeFamily, HashedDescription, IsConcrete, + MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -70,6 +74,7 @@ pub type LocationConverter = ( HashedDescription>, ); +#[allow(deprecated)] pub type LocalAssetTransactor = XcmCurrencyAdapter< // Use this currency: Balances, @@ -119,11 +124,6 @@ parameter_types! { pub const MaxAssetsIntoHolding: u32 = 64; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(ASSET_HUB_ID).into()); -} - pub type TrustedTeleporters = ( xcm_builder::Case, xcm_builder::Case, @@ -203,6 +203,8 @@ parameter_types! { pub const StakingAdminBodyId: BodyId = BodyId::Defense; // FellowshipAdmin pluralistic body. pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); + // `Treasurer` pluralistic body. + pub const TreasurerBodyId: BodyId = BodyId::Index(TREASURER_INDEX); } /// Type to convert the `GeneralAdmin` origin to a Plurality `MultiLocation` value. @@ -225,6 +227,9 @@ pub type StakingAdminToPlurality = pub type FellowshipAdminToPlurality = OriginToPluralityVoice; +/// Type to convert the `Treasurer` origin to a Plurality `MultiLocation` value. +pub type TreasurerToPlurality = OriginToPluralityVoice; + /// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an /// interior location of this chain for a destination chain. pub type LocalPalletOriginToLocation = ( @@ -234,6 +239,8 @@ pub type LocalPalletOriginToLocation = ( StakingAdminToPlurality, // FellowshipAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value. FellowshipAdminToPlurality, + // `Treasurer` origin to be used in XCM as a corresponding Plurality `MultiLocation` value. + TreasurerToPlurality, ); impl pallet_xcm::Config for Runtime { @@ -265,7 +272,5 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/scripts/list-syscalls/execute-worker-syscalls b/polkadot/scripts/list-syscalls/execute-worker-syscalls index 4a7a66181299a6033035e596cb935cd397af80c5..349af783cf1a1340d9467187fde21e1829ec6248 100644 --- a/polkadot/scripts/list-syscalls/execute-worker-syscalls +++ b/polkadot/scripts/list-syscalls/execute-worker-syscalls @@ -16,6 +16,7 @@ 16 (ioctl) 19 (readv) 20 (writev) +22 (pipe) 24 (sched_yield) 25 (mremap) 28 (madvise) @@ -25,7 +26,9 @@ 45 (recvfrom) 46 (sendmsg) 56 (clone) +57 (fork) 60 (exit) +61 (wait4) 62 (kill) 72 (fcntl) 79 (getcwd) @@ -36,6 +39,7 @@ 89 (readlink) 96 (gettimeofday) 97 (getrlimit) +98 (getrusage) 99 (sysinfo) 102 (getuid) 110 (getppid) @@ -47,6 +51,7 @@ 158 (arch_prctl) 165 (mount) 166 (umount2) +186 (gettid) 200 (tkill) 202 (futex) 204 (sched_getaffinity) @@ -60,6 +65,7 @@ 263 (unlinkat) 272 (unshare) 273 (set_robust_list) +293 (pipe2) 302 (prlimit64) 318 (getrandom) 319 (memfd_create) diff --git a/polkadot/scripts/list-syscalls/prepare-worker-syscalls b/polkadot/scripts/list-syscalls/prepare-worker-syscalls index cab58e06692bbd46d48d36ce1ef9525f9b2e792c..05281b61591a7f9e45efa7d7bb6514fbe118f130 100644 --- a/polkadot/scripts/list-syscalls/prepare-worker-syscalls +++ b/polkadot/scripts/list-syscalls/prepare-worker-syscalls @@ -16,6 +16,7 @@ 16 (ioctl) 19 (readv) 20 (writev) +22 (pipe) 24 (sched_yield) 25 (mremap) 28 (madvise) @@ -25,7 +26,9 @@ 45 (recvfrom) 46 (sendmsg) 56 (clone) +57 (fork) 60 (exit) +61 (wait4) 62 (kill) 72 (fcntl) 79 (getcwd) @@ -48,6 +51,7 @@ 158 (arch_prctl) 165 (mount) 166 (umount2) +186 (gettid) 200 (tkill) 202 (futex) 203 (sched_setaffinity) @@ -62,6 +66,7 @@ 263 (unlinkat) 272 (unshare) 273 (set_robust_list) +293 (pipe2) 302 (prlimit64) 309 (getcpu) 318 (getrandom) diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index d2518591d26c8062b87f4b51ab20a5400012c544..9a313882da71ca2c4dc9a7c3f171bc7bab1f05bf 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true description = "Stores messages other authorities issue about candidates in Polkadot." +[lints] +workspace = true + [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sp-core = { path = "../../substrate/primitives/core" } diff --git a/polkadot/utils/generate-bags/Cargo.toml b/polkadot/utils/generate-bags/Cargo.toml index ed29001aa4f47e98522e45b2bb435f1c34097081..97f15f02e358f2732254d099b4030c4643a1d4f3 100644 --- a/polkadot/utils/generate-bags/Cargo.toml +++ b/polkadot/utils/generate-bags/Cargo.toml @@ -6,8 +6,11 @@ edition.workspace = true license.workspace = true description = "CLI to generate voter bags for Polkadot runtimes" +[lints] +workspace = true + [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } generate-bags = { path = "../../../substrate/utils/frame/generate-bags" } sp-io = { path = "../../../substrate/primitives/io" } diff --git a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml index e305edc039b5ab68c91bf587f322678c91e02cc1..6b8c4be38a13e7e2435060932b48e45412345849 100644 --- a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml +++ b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] westend-runtime = { path = "../../../runtime/westend" } westend-runtime-constants = { path = "../../../runtime/westend/constants" } @@ -15,6 +18,6 @@ sp-tracing = { path = "../../../../substrate/primitives/tracing" } frame-system = { path = "../../../../substrate/frame/system" } sp-core = { path = "../../../../substrate/primitives/core" } -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } log = "0.4.17" tokio = { version = "1.24.2", features = ["macros"] } diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index 60c27f7fcfc34cd54d69b16007e5d535d5911b50..235a4b204c9d80c0c3128e01fb377cc6c1ede323 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -6,15 +6,19 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] -bounded-collections = { version = "0.1.8", default-features = false, features = ["serde"] } -derivative = { version = "2.2.0", default-features = false, features = [ "use_core" ] } +bounded-collections = { version = "0.1.9", default-features = false, features = ["serde"] } +derivative = { version = "2.2.0", default-features = false, features = ["use_core"] } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } -parity-scale-codec = { version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len" ] } +parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } -serde = { version = "1.0.188", default-features = false, features = ["alloc", "derive"] } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } +schemars = { version = "0.8.13", default-features = true, optional = true } xcm-procedural = { path = "procedural" } environmental = { version = "1.1.4", default-features = false } @@ -24,7 +28,7 @@ hex = "0.4.3" hex-literal = "0.4.1" [features] -default = [ "std" ] +default = ["std"] wasm-api = [] std = [ "bounded-collections/std", @@ -35,3 +39,4 @@ std = [ "serde/std", "sp-weights/std", ] +json-schema = ["bounded-collections/json-schema", "dep:schemars", "sp-weights/json-schema"] diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index 5be0bbe4ae5ad407aa465191402e4c570f40bc71..d9cc7e34c06c22e5a03a994466622a38449e73a7 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -6,19 +6,22 @@ license.workspace = true version = "1.0.0" description = "Benchmarks for the XCM pallet" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false} +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false } xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } log = "0.4.17" @@ -36,7 +39,7 @@ polkadot-runtime-common = { path = "../../runtime/common" } polkadot-primitives = { path = "../../primitives" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs index 9adc706fc18ae9ecd1540a820f79085573e52d1b..43892c31c7cd8745e2cf178a6ce7a1c74ebc8ec0 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs @@ -45,6 +45,8 @@ parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); @@ -101,6 +103,7 @@ impl xcm_executor::traits::MatchesFungible for MatchAnyFungible { } // Use balances as the asset transactor. +#[allow(deprecated)] pub type AssetTransactor = xcm_builder::CurrencyAdapter< Balances, MatchAnyFungible, diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 4a997666027f07cfc25469ee9150d318c85446ff..50a7fe45e23122b6f5d68d5f55094c0d2ce2a9d2 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -175,7 +175,7 @@ benchmarks! { descend_origin { let mut executor = new_executor::(Default::default()); let who = X2(OnlyChild, OnlyChild); - let instruction = Instruction::DescendOrigin(who.clone()); + let instruction = Instruction::DescendOrigin(who); let xcm = Xcm(vec![instruction]); } : { executor.bench_process(xcm)?; @@ -242,7 +242,7 @@ benchmarks! { &origin, assets.clone().into(), &XcmContext { - origin: Some(origin.clone()), + origin: Some(origin), message_id: [0; 32], topic: None, }, @@ -279,7 +279,7 @@ benchmarks! { let origin = T::subscribe_origin()?; let query_id = Default::default(); let max_response_weight = Default::default(); - let mut executor = new_executor::(origin.clone()); + let mut executor = new_executor::(origin); let instruction = Instruction::SubscribeVersion { query_id, max_response_weight }; let xcm = Xcm(vec![instruction]); } : { @@ -299,14 +299,14 @@ benchmarks! { query_id, max_response_weight, &XcmContext { - origin: Some(origin.clone()), + origin: Some(origin), message_id: [0; 32], topic: None, }, ).map_err(|_| "Could not start subscription")?; assert!(::SubscriptionService::is_subscribed(&origin)); - let mut executor = new_executor::(origin.clone()); + let mut executor = new_executor::(origin); let instruction = Instruction::UnsubscribeVersion; let xcm = Xcm(vec![instruction]); } : { @@ -413,8 +413,9 @@ benchmarks! { executor.set_holding(expected_assets_in_holding.into()); } + let valid_pallet = T::valid_pallet(); let instruction = Instruction::QueryPallet { - module_name: b"frame_system".to_vec(), + module_name: valid_pallet.module_name.as_bytes().to_vec(), response_info: QueryResponseInfo { destination, query_id, max_weight }, }; let xcm = Xcm(vec![instruction]); @@ -428,13 +429,13 @@ benchmarks! { expect_pallet { let mut executor = new_executor::(Default::default()); - + let valid_pallet = T::valid_pallet(); let instruction = Instruction::ExpectPallet { - index: 0, - name: b"System".to_vec(), - module_name: b"frame_system".to_vec(), - crate_major: 4, - min_crate_minor: 0, + index: valid_pallet.index as u32, + name: valid_pallet.name.as_bytes().to_vec(), + module_name: valid_pallet.module_name.as_bytes().to_vec(), + crate_major: valid_pallet.crate_version.major.into(), + min_crate_minor: valid_pallet.crate_version.minor.into(), }; let xcm = Xcm(vec![instruction]); }: { @@ -537,7 +538,7 @@ benchmarks! { let mut executor = new_executor::(origin); - let instruction = Instruction::UniversalOrigin(alias.clone()); + let instruction = Instruction::UniversalOrigin(alias); let xcm = Xcm(vec![instruction]); }: { executor.bench_process(xcm)?; @@ -631,13 +632,13 @@ benchmarks! { let (unlocker, owner, asset) = T::unlockable_asset()?; - let mut executor = new_executor::(unlocker.clone()); + let mut executor = new_executor::(unlocker); // We first place the asset in lock first... ::AssetLocker::prepare_lock( unlocker, asset.clone(), - owner.clone(), + owner, ) .map_err(|_| BenchmarkError::Skip)? .enact() @@ -657,13 +658,13 @@ benchmarks! { let (unlocker, owner, asset) = T::unlockable_asset()?; - let mut executor = new_executor::(unlocker.clone()); + let mut executor = new_executor::(unlocker); // We first place the asset in lock first... ::AssetLocker::prepare_lock( unlocker, asset.clone(), - owner.clone(), + owner, ) .map_err(|_| BenchmarkError::Skip)? .enact() @@ -685,9 +686,9 @@ benchmarks! { // We first place the asset in lock first... ::AssetLocker::prepare_lock( - locker.clone(), + locker, asset.clone(), - owner.clone(), + owner, ) .map_err(|_| BenchmarkError::Skip)? .enact() @@ -738,7 +739,7 @@ benchmarks! { let mut executor = new_executor::(origin); - let instruction = Instruction::AliasOrigin(target.clone()); + let instruction = Instruction::AliasOrigin(target); let xcm = Xcm(vec![instruction]); }: { executor.bench_process(xcm)?; diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index 710ff0d801927854a35a9de27a7e51a6803b809f..6efd2304e281f04563a47b096a53723174bb32a3 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -51,6 +51,7 @@ parameter_types! { frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs index cbdfa8d0112cbe7fac6da7160c199f5f4eeda3d6..11f7bba19a9873ec5d21eebec237097958bc461e 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs @@ -91,6 +91,18 @@ pub mod pallet { /// /// If set to `Err`, benchmarks which rely on a universal alias will be skipped. fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError>; + + /// Returns a valid pallet info for `ExpectPallet` or `QueryPallet` benchmark. + /// + /// By default returns `frame_system::Pallet` info with expected pallet index `0`. + fn valid_pallet() -> frame_support::traits::PalletInfoData { + frame_support::traits::PalletInfoData { + index: as frame_support::traits::PalletInfoAccess>::index(), + name: as frame_support::traits::PalletInfoAccess>::name(), + module_name: as frame_support::traits::PalletInfoAccess>::module_name(), + crate_version: as frame_support::traits::PalletInfoAccess>::crate_version(), + } + } } #[pallet::pallet] diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 6b5d5e75de82412eaa5c7f6669894c8acba4abb7..220aad013982b10e01d366031dc980029c03cb97 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -6,32 +6,38 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] bounded-collections = { version = "0.1.8", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true, features = ["derive"] } +serde = { version = "1.0.193", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -sp-core = { path = "../../../substrate/primitives/core", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } +# marked optional, used in benchmarking +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false, optional = true } + [dev-dependencies] -pallet-balances = { path = "../../../substrate/frame/balances" } +pallet-assets = { path = "../../../substrate/frame/assets" } polkadot-runtime-parachains = { path = "../../runtime/parachains" } polkadot-parachain-primitives = { path = "../../parachain" } [features] -default = [ "std" ] +default = ["std"] std = [ "bounded-collections/std", "codec/std", @@ -39,6 +45,7 @@ std = [ "frame-support/std", "frame-system/std", "log/std", + "pallet-balances/std", "scale-info/std", "serde", "sp-core/std", @@ -53,6 +60,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", @@ -63,6 +71,7 @@ runtime-benchmarks = [ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "pallet-assets/try-runtime", "pallet-balances/try-runtime", "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index 3eecbfec5180ba3405dc9918f9bb3c78889a7701..28a198f40a052bc06ca161cbf05c91cecba194bd 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -16,15 +16,73 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; -use frame_benchmarking::{benchmarks, BenchmarkError, BenchmarkResult}; -use frame_support::weights::Weight; +use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; +use frame_support::{traits::Currency, weights::Weight}; use frame_system::RawOrigin; use sp_std::prelude::*; use xcm::{latest::prelude::*, v2}; type RuntimeOrigin = ::RuntimeOrigin; +// existential deposit multiplier +const ED_MULTIPLIER: u32 = 100; + +/// Pallet we're benchmarking here. +pub struct Pallet(crate::Pallet); + +/// Trait that must be implemented by runtime to be able to benchmark pallet properly. +pub trait Config: crate::Config { + /// A `MultiLocation` that can be reached via `XcmRouter`. Used only in benchmarks. + /// + /// If `None`, the benchmarks that depend on a reachable destination will be skipped. + fn reachable_dest() -> Option { + None + } + + /// A `(MultiAsset, MultiLocation)` pair representing asset and the destination it can be + /// teleported to. Used only in benchmarks. + /// + /// Implementation should also make sure `dest` is reachable/connected. + /// + /// If `None`, the benchmarks that depend on this will default to `Weight::MAX`. + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + None + } + + /// A `(MultiAsset, MultiLocation)` pair representing asset and the destination it can be + /// reserve-transferred to. Used only in benchmarks. + /// + /// Implementation should also make sure `dest` is reachable/connected. + /// + /// If `None`, the benchmarks that depend on this will default to `Weight::MAX`. + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + None + } + + /// Sets up a complex transfer (usually consisting of a teleport and reserve-based transfer), so + /// that runtime can properly benchmark `transfer_assets()` extrinsic. Should return a tuple + /// `(MultiAsset, u32, MultiLocation, dyn FnOnce())` representing the assets to transfer, the + /// `u32` index of the asset to be used for fees, the destination chain for the transfer, and a + /// `verify()` closure to verify the intended transfer side-effects. + /// + /// Implementation should make sure the provided assets can be transacted by the runtime, there + /// are enough balances in the involved accounts, and that `dest` is reachable/connected. + /// + /// Used only in benchmarks. + /// + /// If `None`, the benchmarks that depend on this will default to `Weight::MAX`. + fn set_up_complex_asset_transfer( + ) -> Option<(MultiAssets, u32, MultiLocation, Box)> { + None + } +} + benchmarks! { + where_clause { + where + T: pallet_balances::Config, + ::Balance: From + Into, + } send { let send_origin = T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -32,7 +90,7 @@ benchmarks! { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let msg = Xcm(vec![ClearOrigin]); - let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( + let versioned_dest: VersionedMultiLocation = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )? .into(); @@ -40,44 +98,99 @@ benchmarks! { }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) teleport_assets { - let asset: MultiAsset = (Here, 10).into(); - let send_origin = - T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone()) + let (asset, destination) = T::teleportable_asset_and_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; + + let transferred_amount = match &asset.fun { + Fungible(amount) => *amount, + _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), + }.into(); + let assets: MultiAssets = asset.into(); + + let existential_deposit = T::ExistentialDeposit::get(); + let caller = whitelisted_caller(); + + // Give some multiple of the existential deposit + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + assert!(balance >= transferred_amount); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + // verify initial balance + assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); + + let send_origin = RawOrigin::Signed(caller.clone()); + let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmTeleportFilter::contains(&(origin_location, vec![asset.clone()])) { + if !T::XcmTeleportFilter::contains(&(origin_location, assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let recipient = [0u8; 32]; - let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); + let versioned_dest: VersionedMultiLocation = destination.into(); let versioned_beneficiary: VersionedMultiLocation = AccountId32 { network: None, id: recipient.into() }.into(); - let versioned_assets: VersionedMultiAssets = asset.into(); - }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + let versioned_assets: VersionedMultiAssets = assets.into(); + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + verify { + // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) + assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); + } reserve_transfer_assets { - let asset: MultiAsset = (Here, 10).into(); - let send_origin = - T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone()) + let (asset, destination) = T::reserve_transferable_asset_and_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; + + let transferred_amount = match &asset.fun { + Fungible(amount) => *amount, + _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), + }.into(); + let assets: MultiAssets = asset.into(); + + let existential_deposit = T::ExistentialDeposit::get(); + let caller = whitelisted_caller(); + + // Give some multiple of the existential deposit + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + assert!(balance >= transferred_amount); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + // verify initial balance + assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); + + let send_origin = RawOrigin::Signed(caller.clone()); + let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmReserveTransferFilter::contains(&(origin_location, vec![asset.clone()])) { + if !T::XcmReserveTransferFilter::contains(&(origin_location, assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let recipient = [0u8; 32]; - let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( + let versioned_dest: VersionedMultiLocation = destination.into(); + let versioned_beneficiary: VersionedMultiLocation = + AccountId32 { network: None, id: recipient.into() }.into(); + let versioned_assets: VersionedMultiAssets = assets.into(); + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + verify { + // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) + assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); + } + + transfer_assets { + let (assets, fee_index, destination, verify) = T::set_up_complex_asset_transfer().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); + )?; + let caller: T::AccountId = whitelisted_caller(); + let send_origin = RawOrigin::Signed(caller.clone()); + let recipient = [0u8; 32]; + let versioned_dest: VersionedMultiLocation = destination.into(); let versioned_beneficiary: VersionedMultiLocation = AccountId32 { network: None, id: recipient.into() }.into(); - let versioned_assets: VersionedMultiAssets = asset.into(); - }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + let versioned_assets: VersionedMultiAssets = assets.into(); + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0, WeightLimit::Unlimited) + verify { + // run provided verification function + verify(); + } execute { let execute_origin = @@ -89,10 +202,10 @@ benchmarks! { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let versioned_msg = VersionedXcm::from(msg); - }: _>(execute_origin, Box::new(versioned_msg), Weight::zero()) + }: _>(execute_origin, Box::new(versioned_msg), Weight::MAX) force_xcm_version { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; let xcm_version = 2; @@ -101,18 +214,18 @@ benchmarks! { force_default_xcm_version {}: _(RawOrigin::Root, Some(2)) force_subscribe_version_notify { - let versioned_loc: VersionedMultiLocation = T::ReachableDest::get().ok_or( + let versioned_loc: VersionedMultiLocation = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )? .into(); }: _(RawOrigin::Root, Box::new(versioned_loc)) force_unsubscribe_version_notify { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; let versioned_loc: VersionedMultiLocation = loc.into(); - let _ = Pallet::::request_version_notify(loc); + let _ = crate::Pallet::::request_version_notify(loc); }: _(RawOrigin::Root, Box::new(versioned_loc)) force_suspension {}: _(RawOrigin::Root, true) @@ -122,7 +235,7 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); SupportedVersion::::insert(old_version, loc, old_version); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); } migrate_version_notifiers { @@ -130,22 +243,22 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); VersionNotifiers::::insert(old_version, loc, 0); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); } already_notified_target { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads(1))), )?; let loc = VersionedMultiLocation::from(loc); let current_version = T::AdvertisedXcmVersion::get(); VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); } notify_current_targets { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), )?; let loc = VersionedMultiLocation::from(loc); @@ -153,7 +266,7 @@ benchmarks! { let old_version = current_version - 1; VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), old_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); } notify_target_migration_fail { @@ -167,7 +280,7 @@ benchmarks! { let current_version = T::AdvertisedXcmVersion::get(); VersionNotifyTargets::::insert(current_version, bad_loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } migrate_version_notify_targets { @@ -176,18 +289,18 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } migrate_and_notify_old_targets { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), )?; let loc = VersionedMultiLocation::from(loc); let old_version = T::AdvertisedXcmVersion::get() - 1; VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), old_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } new_query { @@ -195,14 +308,14 @@ benchmarks! { let timeout = 1u32.into(); let match_querier = MultiLocation::from(Here); }: { - Pallet::::new_query(responder, timeout, match_querier); + crate::Pallet::::new_query(responder, timeout, match_querier); } take_response { let responder = MultiLocation::from(Parent); let timeout = 1u32.into(); let match_querier = MultiLocation::from(Here); - let query_id = Pallet::::new_query(responder, timeout, match_querier); + let query_id = crate::Pallet::::new_query(responder, timeout, match_querier); let infos = (0 .. xcm::v3::MaxPalletsInfo::get()).map(|_| PalletInfo::new( u32::MAX, (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), @@ -211,10 +324,10 @@ benchmarks! { u32::MAX, u32::MAX, ).unwrap()).collect::>(); - Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); + crate::Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); }: { - as QueryHandler>::take_response(query_id); + as QueryHandler>::take_response(query_id); } impl_benchmark_test_suite!( @@ -223,3 +336,36 @@ benchmarks! { crate::mock::Test ); } + +pub mod helpers { + use super::*; + pub fn native_teleport_as_asset_transfer( + native_asset_location: MultiLocation, + destination: MultiLocation, + ) -> Option<(MultiAssets, u32, MultiLocation, Box)> + where + T: Config + pallet_balances::Config, + u128: From<::Balance>, + { + // Relay/native token can be teleported to/from AH. + let amount = T::ExistentialDeposit::get() * 100u32.into(); + let assets: MultiAssets = + MultiAsset { fun: Fungible(amount.into()), id: Concrete(native_asset_location) }.into(); + let fee_index = 0u32; + + // Give some multiple of transferred amount + let balance = amount * 10u32.into(); + let who = whitelisted_caller(); + let _ = + as frame_support::traits::Currency<_>>::make_free_balance_be(&who, balance); + // verify initial balance + assert_eq!(pallet_balances::Pallet::::free_balance(&who), balance); + + // verify transferred successfully + let verify = Box::new(move || { + // verify balance after transfer, decreased by transferred amount (and delivery fees) + assert!(pallet_balances::Pallet::::free_balance(&who) <= balance - amount); + }); + Some((assets, fee_index, destination, verify)) + } +} diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 2d969fb870c95004c7054f6f59efdcdd18aca25e..2848527f1502f9017db2e2d69be5ab523bb31e0d 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -19,7 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "runtime-benchmarks")] -mod benchmarking; +pub mod benchmarking; #[cfg(test)] mod mock; #[cfg(test)] @@ -55,9 +55,9 @@ use xcm_builder::{ }; use xcm_executor::{ traits::{ - CheckSuspension, ClaimAssets, ConvertLocation, ConvertOrigin, DropAssets, MatchesFungible, - OnResponse, Properties, QueryHandler, QueryResponseStatus, VersionChangeNotifier, - WeightBounds, + AssetTransferError, CheckSuspension, ClaimAssets, ConvertLocation, ConvertOrigin, + DropAssets, MatchesFungible, OnResponse, Properties, QueryHandler, QueryResponseStatus, + TransactAsset, TransferType, VersionChangeNotifier, WeightBounds, XcmAssetTransfers, }, Assets, }; @@ -66,6 +66,7 @@ pub trait WeightInfo { fn send() -> Weight; fn teleport_assets() -> Weight; fn reserve_transfer_assets() -> Weight; + fn transfer_assets() -> Weight; fn execute() -> Weight; fn force_xcm_version() -> Weight; fn force_default_xcm_version() -> Weight; @@ -98,6 +99,10 @@ impl WeightInfo for TestWeightInfo { Weight::from_parts(100_000_000, 0) } + fn transfer_assets() -> Weight { + Weight::from_parts(100_000_000, 0) + } + fn execute() -> Weight { Weight::from_parts(100_000_000, 0) } @@ -222,7 +227,7 @@ pub mod pallet { type XcmExecuteFilter: Contains<(MultiLocation, Xcm<::RuntimeCall>)>; /// Something to execute an XCM message. - type XcmExecutor: ExecuteXcm<::RuntimeCall>; + type XcmExecutor: ExecuteXcm<::RuntimeCall> + XcmAssetTransfers; /// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass. type XcmTeleportFilter: Contains<(MultiLocation, Vec)>; @@ -275,12 +280,6 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; - - /// A `MultiLocation` that can be reached via `XcmRouter`. Used only in benchmarks. - /// - /// If `None`, the benchmarks that depend on a reachable destination will be skipped. - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest: Get>; } impl ExecuteControllerWeightInfo for Pallet { @@ -531,8 +530,8 @@ pub mod pallet { NoSubscription, /// The location is invalid since it already has a subscription from us. AlreadySubscribed, - /// Invalid asset for the operation. - InvalidAsset, + /// Could not check-out the assets for teleportation to the destination chain. + CannotCheckOutTeleport, /// The owner does not own (all) of the asset that they wish to do the operation on. LowBalance, /// The asset owner has too many locks on the asset. @@ -545,6 +544,16 @@ pub mod pallet { LockNotFound, /// The unlock operation cannot succeed because there are still consumers of the lock. InUse, + /// Invalid non-concrete asset. + InvalidAssetNotConcrete, + /// Invalid asset, reserve chain could not be determined for it. + InvalidAssetUnknownReserve, + /// Invalid asset, do not support remote asset reserves with different fees reserves. + InvalidAssetUnsupportedReserve, + /// Too many assets with different reserve locations have been attempted for transfer. + TooManyReserves, + /// Local XCM execution incomplete. + LocalExecutionIncomplete, } impl From for Error { @@ -557,6 +566,15 @@ pub mod pallet { } } + impl From for Error { + fn from(e: AssetTransferError) -> Self { + match e { + AssetTransferError::NotConcrete => Error::::InvalidAssetNotConcrete, + AssetTransferError::UnknownReserve => Error::::InvalidAssetUnknownReserve, + } + } + } + /// The status of a query. #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum QueryStatus { @@ -892,8 +910,8 @@ pub mod pallet { /// from relay to parachain. /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will /// generally be an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to - /// pay the fee on the `dest` side. May not be empty. + /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the + /// fee on the `dest` chain. /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. #[pallet::call_index(1)] @@ -907,11 +925,7 @@ pub mod pallet { let mut message = Xcm(vec![ WithdrawAsset(assets), SetFeesMode { jit_withdraw: true }, - InitiateTeleport { - assets: Wild(AllCounted(count)), - dest, - xcm: Xcm(vec![]), - }, + InitiateTeleport { assets: Wild(AllCounted(count)), dest, xcm: Xcm(vec![]) }, ]); T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) } @@ -928,8 +942,19 @@ pub mod pallet { Self::do_teleport_assets(origin, dest, beneficiary, assets, fee_asset_item, Unlimited) } - /// Transfer some assets from the local chain to the sovereign account of a destination - /// chain and forward a notification XCM. + /// Transfer some assets from the local chain to the destination chain through their local, + /// destination or remote reserve. + /// + /// `assets` must have same reserve location and may not be teleportable to `dest`. + /// - `assets` have local reserve: transfer assets to sovereign account of destination + /// chain and forward a notification XCM to `dest` to mint and deposit reserve-based + /// assets to `beneficiary`. + /// - `assets` have destination reserve: burn local assets and forward a notification to + /// `dest` chain to withdraw the reserve assets from this chain's sovereign account and + /// deposit them to `beneficiary`. + /// - `assets` have remote reserve: burn local assets, forward XCM to reserve chain to move + /// reserves from this chain's SA to `dest` chain's SA, and forward another XCM to `dest` + /// to mint and deposit reserve-based assets to `beneficiary`. /// /// **This function is deprecated: Use `limited_reserve_transfer_assets` instead.** /// @@ -944,7 +969,7 @@ pub mod pallet { /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will /// generally be an `AccountId32` value. /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the - /// fee on the `dest` side. + /// fee on the `dest` (and possibly reserve) chains. /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. #[pallet::call_index(2)] @@ -954,6 +979,8 @@ pub mod pallet { match (maybe_assets, maybe_dest) { (Ok(assets), Ok(dest)) => { use sp_std::vec; + // heaviest version of locally executed XCM program: equivalent in weight to + // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM let mut message = Xcm(vec![ SetFeesMode { jit_withdraw: true }, TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } @@ -998,8 +1025,14 @@ pub mod pallet { message: Box::RuntimeCall>>, max_weight: Weight, ) -> DispatchResultWithPostInfo { + log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); let outcome = >::execute(origin, message, max_weight)?; - Ok(Some(outcome.weight_used().saturating_add(T::WeightInfo::execute())).into()) + let weight_used = outcome.weight_used(); + outcome.ensure_complete().map_err(|error| { + log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); + Error::::LocalExecutionIncomplete + })?; + Ok(Some(weight_used.saturating_add(T::WeightInfo::execute())).into()) } /// Extoll that a particular destination can be communicated with through a particular @@ -1088,8 +1121,19 @@ pub mod pallet { }) } - /// Transfer some assets from the local chain to the sovereign account of a destination - /// chain and forward a notification XCM. + /// Transfer some assets from the local chain to the destination chain through their local, + /// destination or remote reserve. + /// + /// `assets` must have same reserve location and may not be teleportable to `dest`. + /// - `assets` have local reserve: transfer assets to sovereign account of destination + /// chain and forward a notification XCM to `dest` to mint and deposit reserve-based + /// assets to `beneficiary`. + /// - `assets` have destination reserve: burn local assets and forward a notification to + /// `dest` chain to withdraw the reserve assets from this chain's sovereign account and + /// deposit them to `beneficiary`. + /// - `assets` have remote reserve: burn local assets, forward XCM to reserve chain to move + /// reserves from this chain's SA to `dest` chain's SA, and forward another XCM to `dest` + /// to mint and deposit reserve-based assets to `beneficiary`. /// /// Fee payment on the destination side is made from the asset in the `assets` vector of /// index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight @@ -1103,7 +1147,7 @@ pub mod pallet { /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will /// generally be an `AccountId32` value. /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the - /// fee on the `dest` side. + /// fee on the `dest` (and possibly reserve) chains. /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. @@ -1114,6 +1158,8 @@ pub mod pallet { match (maybe_assets, maybe_dest) { (Ok(assets), Ok(dest)) => { use sp_std::vec; + // heaviest version of locally executed XCM program: equivalent in weight to + // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM let mut message = Xcm(vec![ SetFeesMode { jit_withdraw: true }, TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } @@ -1154,8 +1200,8 @@ pub mod pallet { /// from relay to parachain. /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will /// generally be an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to - /// pay the fee on the `dest` side. May not be empty. + /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the + /// fee on the `dest` chain. /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. @@ -1206,12 +1252,162 @@ pub mod pallet { XcmExecutionSuspended::::set(suspended); Ok(()) } + + /// Transfer some assets from the local chain to the destination chain through their local, + /// destination or remote reserve, or through teleports. + /// + /// Fee payment on the destination side is made from the asset in the `assets` vector of + /// index `fee_asset_item` (hence referred to as `fees`), up to enough to pay for + /// `weight_limit` of weight. If more weight is needed than `weight_limit`, then the + /// operation will fail and the assets sent may be at risk. + /// + /// `assets` (excluding `fees`) must have same reserve location or otherwise be teleportable + /// to `dest`, no limitations imposed on `fees`. + /// - for local reserve: transfer assets to sovereign account of destination chain and + /// forward a notification XCM to `dest` to mint and deposit reserve-based assets to + /// `beneficiary`. + /// - for destination reserve: burn local assets and forward a notification to `dest` chain + /// to withdraw the reserve assets from this chain's sovereign account and deposit them + /// to `beneficiary`. + /// - for remote reserve: burn local assets, forward XCM to reserve chain to move reserves + /// from this chain's SA to `dest` chain's SA, and forward another XCM to `dest` to mint + /// and deposit reserve-based assets to `beneficiary`. + /// - for teleports: burn local assets and forward XCM to `dest` chain to mint/teleport + /// assets and deposit them to `beneficiary`. + /// + /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, + /// Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send + /// from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will + /// generally be an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the + /// fee on the `dest` (and possibly reserve) chains. + /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay + /// fees. + /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. + #[pallet::call_index(11)] + #[pallet::weight({ + let maybe_assets: Result = (*assets.clone()).try_into(); + let maybe_dest: Result = (*dest.clone()).try_into(); + match (maybe_assets, maybe_dest) { + (Ok(assets), Ok(dest)) => { + use sp_std::vec; + // heaviest version of locally executed XCM program: equivalent in weight to withdrawing fees, + // burning them, transferring rest of assets to SA, reanchoring them, extending XCM program, + // and sending onward XCM + let mut message = Xcm(vec![ + SetFeesMode { jit_withdraw: true }, + WithdrawAsset(assets.clone()), + BurnAsset(assets.clone()), + TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } + ]); + T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::transfer_assets().saturating_add(w)) + } + _ => Weight::MAX, + } + })] + pub fn transfer_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + weight_limit: WeightLimit, + ) -> DispatchResult { + let origin = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let dest = (*dest).try_into().map_err(|()| Error::::BadVersion)?; + let beneficiary: MultiLocation = + (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; + let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + log::debug!( + target: "xcm::pallet_xcm::transfer_assets", + "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, fee-idx {:?}, weight_limit {:?}", + origin, dest, beneficiary, assets, fee_asset_item, weight_limit, + ); + + ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); + let mut assets = assets.into_inner(); + let fee_asset_item = fee_asset_item as usize; + let fees = assets.get(fee_asset_item as usize).ok_or(Error::::Empty)?.clone(); + // Find transfer types for fee and non-fee assets. + let (fees_transfer_type, assets_transfer_type) = + Self::find_fee_and_assets_transfer_types(&assets, fee_asset_item, &dest)?; + + // local and remote XCM programs to potentially handle fees separately + let fees = if fees_transfer_type == assets_transfer_type { + // no need for custom fees instructions, fees are batched with assets + FeesHandling::Batched { fees } + } else { + // Disallow _remote reserves_ unless assets & fees have same remote reserve (covered + // by branch above). The reason for this is that we'd need to send XCMs to separate + // chains with no guarantee of delivery order on final destination; therefore we + // cannot guarantee to have fees in place on final destination chain to pay for + // assets transfer. + ensure!( + !matches!(assets_transfer_type, TransferType::RemoteReserve(_)), + Error::::InvalidAssetUnsupportedReserve + ); + let weight_limit = weight_limit.clone(); + // remove `fees` from `assets` and build separate fees transfer instructions to be + // added to assets transfers XCM programs + let fees = assets.remove(fee_asset_item); + let (local_xcm, remote_xcm) = match fees_transfer_type { + TransferType::LocalReserve => + Self::local_reserve_fees_instructions(origin, dest, fees, weight_limit)?, + TransferType::DestinationReserve => + Self::destination_reserve_fees_instructions( + origin, + dest, + fees, + weight_limit, + )?, + TransferType::Teleport => + Self::teleport_fees_instructions(origin, dest, fees, weight_limit)?, + TransferType::RemoteReserve(_) => + return Err(Error::::InvalidAssetUnsupportedReserve.into()), + }; + FeesHandling::Separate { local_xcm, remote_xcm } + }; + + Self::build_and_execute_xcm_transfer_type( + origin, + dest, + beneficiary, + assets, + assets_transfer_type, + fees, + weight_limit, + ) + } } } /// The maximum number of distinct assets allowed to be transferred in a single helper extrinsic. const MAX_ASSETS_FOR_TRANSFER: usize = 2; +/// Specify how assets used for fees are handled during asset transfers. +#[derive(Clone, PartialEq)] +enum FeesHandling { + /// `fees` asset can be batch-transferred with rest of assets using same XCM instructions. + Batched { fees: MultiAsset }, + /// fees cannot be batched, they are handled separately using XCM programs here. + Separate { local_xcm: Xcm<::RuntimeCall>, remote_xcm: Xcm<()> }, +} + +impl sp_std::fmt::Debug for FeesHandling { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + match self { + Self::Batched { fees } => write!(f, "FeesHandling::Batched({:?})", fees), + Self::Separate { local_xcm, remote_xcm } => write!( + f, + "FeesHandling::Separate(local: {:?}, remote: {:?})", + local_xcm, remote_xcm + ), + } + } +} + impl QueryHandler for Pallet { type QueryId = u64; type BlockNumber = BlockNumberFor; @@ -1273,6 +1469,47 @@ impl QueryHandler for Pallet { } impl Pallet { + /// Find `TransferType`s for `assets` and fee identified through `fee_asset_item`, when + /// transferring to `dest`. + /// + /// Validate `assets` to all have same `TransferType`. + fn find_fee_and_assets_transfer_types( + assets: &[MultiAsset], + fee_asset_item: usize, + dest: &MultiLocation, + ) -> Result<(TransferType, TransferType), Error> { + let mut fees_transfer_type = None; + let mut assets_transfer_type = None; + for (idx, asset) in assets.iter().enumerate() { + if let Fungible(x) = asset.fun { + // If fungible asset, ensure non-zero amount. + ensure!(!x.is_zero(), Error::::Empty); + } + let transfer_type = + T::XcmExecutor::determine_for(&asset, dest).map_err(Error::::from)?; + if idx == fee_asset_item { + fees_transfer_type = Some(transfer_type); + } else { + if let Some(existing) = assets_transfer_type.as_ref() { + // Ensure transfer for multiple assets uses same transfer type (only fee may + // have different transfer type/path) + ensure!(existing == &transfer_type, Error::::TooManyReserves); + } else { + // asset reserve identified + assets_transfer_type = Some(transfer_type); + } + } + } + // single asset also marked as fee item + if assets.len() == 1 { + assets_transfer_type = fees_transfer_type + } + Ok(( + fees_transfer_type.ok_or(Error::::Empty)?, + assets_transfer_type.ok_or(Error::::Empty)?, + )) + } + fn do_reserve_transfer_assets( origin: OriginFor, dest: Box, @@ -1286,35 +1523,37 @@ impl Pallet { let beneficiary: MultiLocation = (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + log::debug!( + target: "xcm::pallet_xcm::do_reserve_transfer_assets", + "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, fee-idx {:?}", + origin_location, dest, beneficiary, assets, fee_asset_item, + ); ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); let value = (origin_location, assets.into_inner()); ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); - let (origin_location, assets) = value; - let context = T::UniversalLocation::get(); - let fees = assets - .get(fee_asset_item as usize) - .ok_or(Error::::Empty)? - .clone() - .reanchored(&dest, context) - .map_err(|_| Error::::CannotReanchor)?; - let max_assets = assets.len() as u32; - let assets: MultiAssets = assets.into(); - let xcm = Xcm(vec![ - BuyExecution { fees, weight_limit }, - DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, - ]); - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - TransferReserveAsset { assets, dest, xcm }, - ]); - let weight = - T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; - let hash = message.using_encoded(sp_io::hashing::blake2_256); - let outcome = - T::XcmExecutor::execute_xcm_in_credit(origin_location, message, hash, weight, weight); - Self::deposit_event(Event::Attempted { outcome }); - Ok(()) + let (origin, assets) = value; + + let fee_asset_item = fee_asset_item as usize; + let fees = assets.get(fee_asset_item as usize).ok_or(Error::::Empty)?.clone(); + + // Find transfer types for fee and non-fee assets. + let (fees_transfer_type, assets_transfer_type) = + Self::find_fee_and_assets_transfer_types(&assets, fee_asset_item, &dest)?; + // Ensure assets (and fees according to check below) are not teleportable to `dest`. + ensure!(assets_transfer_type != TransferType::Teleport, Error::::Filtered); + // Ensure all assets (including fees) have same reserve location. + ensure!(assets_transfer_type == fees_transfer_type, Error::::TooManyReserves); + + Self::build_and_execute_xcm_transfer_type( + origin, + dest, + beneficiary, + assets, + assets_transfer_type, + FeesHandling::Batched { fees }, + weight_limit, + ) } fn do_teleport_assets( @@ -1330,36 +1569,496 @@ impl Pallet { let beneficiary: MultiLocation = (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + log::debug!( + target: "xcm::pallet_xcm::do_teleport_assets", + "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, fee-idx {:?}, weight_limit {:?}", + origin_location, dest, beneficiary, assets, fee_asset_item, weight_limit, + ); ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); let value = (origin_location, assets.into_inner()); ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); let (origin_location, assets) = value; + for asset in assets.iter() { + let transfer_type = + T::XcmExecutor::determine_for(asset, &dest).map_err(Error::::from)?; + ensure!(transfer_type == TransferType::Teleport, Error::::Filtered); + } + let fees = assets.get(fee_asset_item as usize).ok_or(Error::::Empty)?.clone(); + + Self::build_and_execute_xcm_transfer_type( + origin_location, + dest, + beneficiary, + assets, + TransferType::Teleport, + FeesHandling::Batched { fees }, + weight_limit, + ) + } + + fn build_and_execute_xcm_transfer_type( + origin: MultiLocation, + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + transfer_type: TransferType, + fees: FeesHandling, + weight_limit: WeightLimit, + ) -> DispatchResult { + log::debug!( + target: "xcm::pallet_xcm::build_and_execute_xcm_transfer_type", + "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, transfer_type {:?}, \ + fees_handling {:?}, weight_limit: {:?}", + origin, dest, beneficiary, assets, transfer_type, fees, weight_limit, + ); + let (mut local_xcm, remote_xcm) = match transfer_type { + TransferType::LocalReserve => { + let (local, remote) = Self::local_reserve_transfer_programs( + origin, + dest, + beneficiary, + assets, + fees, + weight_limit, + )?; + (local, Some(remote)) + }, + TransferType::DestinationReserve => { + let (local, remote) = Self::destination_reserve_transfer_programs( + origin, + dest, + beneficiary, + assets, + fees, + weight_limit, + )?; + (local, Some(remote)) + }, + TransferType::RemoteReserve(reserve) => { + let fees = match fees { + FeesHandling::Batched { fees } => fees, + _ => return Err(Error::::InvalidAssetUnsupportedReserve.into()), + }; + let local = Self::remote_reserve_transfer_program( + origin, + reserve, + dest, + beneficiary, + assets, + fees, + weight_limit, + )?; + (local, None) + }, + TransferType::Teleport => { + let (local, remote) = Self::teleport_assets_program( + origin, + dest, + beneficiary, + assets, + fees, + weight_limit, + )?; + (local, Some(remote)) + }, + }; + let weight = + T::Weigher::weight(&mut local_xcm).map_err(|()| Error::::UnweighableMessage)?; + let hash = local_xcm.using_encoded(sp_io::hashing::blake2_256); + let outcome = + T::XcmExecutor::execute_xcm_in_credit(origin, local_xcm, hash, weight, weight); + Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); + outcome.ensure_complete().map_err(|error| { + log::error!( + target: "xcm::pallet_xcm::build_and_execute_xcm_transfer_type", + "XCM execution failed with error {:?}", error + ); + Error::::LocalExecutionIncomplete + })?; + + if let Some(remote_xcm) = remote_xcm { + let (ticket, price) = validate_send::(dest, remote_xcm.clone()) + .map_err(Error::::from)?; + if origin != Here.into_location() { + Self::charge_fees(origin, price).map_err(|error| { + log::error!( + target: "xcm::pallet_xcm::build_and_execute_xcm_transfer_type", + "Unable to charge fee with error {:?}", error + ); + Error::::FeesNotMet + })?; + } + let message_id = T::XcmRouter::deliver(ticket).map_err(Error::::from)?; + + let e = Event::Sent { origin, destination: dest, message: remote_xcm, message_id }; + Self::deposit_event(e); + } + Ok(()) + } + + fn add_fees_to_xcm( + dest: MultiLocation, + fees: FeesHandling, + weight_limit: WeightLimit, + local: &mut Xcm<::RuntimeCall>, + remote: &mut Xcm<()>, + ) -> Result<(), Error> { + match fees { + FeesHandling::Batched { fees } => { + let context = T::UniversalLocation::get(); + // no custom fees instructions, they are batched together with `assets` transfer; + // BuyExecution happens after receiving all `assets` + let reanchored_fees = + fees.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; + // buy execution using `fees` batched together with above `reanchored_assets` + remote.inner_mut().push(BuyExecution { fees: reanchored_fees, weight_limit }); + }, + FeesHandling::Separate { local_xcm: mut local_fees, remote_xcm: mut remote_fees } => { + // fees are handled by separate XCM instructions, prepend fees instructions (for + // remote XCM they have to be prepended instead of appended to pass barriers). + sp_std::mem::swap(local, &mut local_fees); + sp_std::mem::swap(remote, &mut remote_fees); + // these are now swapped so fees actually go first + local.inner_mut().append(&mut local_fees.into_inner()); + remote.inner_mut().append(&mut remote_fees.into_inner()); + }, + } + Ok(()) + } + + fn local_reserve_fees_instructions( + origin: MultiLocation, + dest: MultiLocation, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let value = (origin, vec![fees.clone()]); + ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); + let context = T::UniversalLocation::get(); - let fees = assets - .get(fee_asset_item as usize) - .ok_or(Error::::Empty)? + let reanchored_fees = fees .clone() .reanchored(&dest, context) .map_err(|_| Error::::CannotReanchor)?; - let max_assets = assets.len() as u32; + + let local_execute_xcm = Xcm(vec![ + // move `fees` to `dest`s local sovereign account + TransferAsset { assets: fees.into(), beneficiary: dest }, + ]); + let xcm_on_dest = Xcm(vec![ + // let (dest) chain know `fees` are in its SA on reserve + ReserveAssetDeposited(reanchored_fees.clone().into()), + // buy exec using `fees` in holding deposited in above instruction + BuyExecution { fees: reanchored_fees, weight_limit }, + ]); + Ok((local_execute_xcm, xcm_on_dest)) + } + + fn local_reserve_transfer_programs( + origin: MultiLocation, + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + fees: FeesHandling, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let value = (origin, assets); + ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); + let (_, assets) = value; + + // max assets is `assets` (+ potentially separately handled fee) + let max_assets = + assets.len() as u32 + if matches!(&fees, FeesHandling::Batched { .. }) { 0 } else { 1 }; let assets: MultiAssets = assets.into(); - let xcm = Xcm(vec![ - BuyExecution { fees, weight_limit }, + let context = T::UniversalLocation::get(); + let mut reanchored_assets = assets.clone(); + reanchored_assets + .reanchor(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + + // XCM instructions to be executed on local chain + let mut local_execute_xcm = Xcm(vec![ + // locally move `assets` to `dest`s local sovereign account + TransferAsset { assets, beneficiary: dest }, + ]); + // XCM instructions to be executed on destination chain + let mut xcm_on_dest = Xcm(vec![ + // let (dest) chain know assets are in its SA on reserve + ReserveAssetDeposited(reanchored_assets), + // following instructions are not exec'ed on behalf of origin chain anymore + ClearOrigin, + ]); + // handle fees + Self::add_fees_to_xcm(dest, fees, weight_limit, &mut local_execute_xcm, &mut xcm_on_dest)?; + // deposit all remaining assets in holding to `beneficiary` location + xcm_on_dest + .inner_mut() + .push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + + Ok((local_execute_xcm, xcm_on_dest)) + } + + fn destination_reserve_fees_instructions( + origin: MultiLocation, + dest: MultiLocation, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let value = (origin, vec![fees.clone()]); + ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); + + let context = T::UniversalLocation::get(); + let reanchored_fees = fees + .clone() + .reanchored(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + let fees: MultiAssets = fees.into(); + + let local_execute_xcm = Xcm(vec![ + // withdraw reserve-based fees (derivatives) + WithdrawAsset(fees.clone()), + // burn derivatives + BurnAsset(fees), + ]); + let xcm_on_dest = Xcm(vec![ + // withdraw `fees` from origin chain's sovereign account + WithdrawAsset(reanchored_fees.clone().into()), + // buy exec using `fees` in holding withdrawn in above instruction + BuyExecution { fees: reanchored_fees, weight_limit }, + ]); + Ok((local_execute_xcm, xcm_on_dest)) + } + + fn destination_reserve_transfer_programs( + origin: MultiLocation, + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + fees: FeesHandling, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let value = (origin, assets); + ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); + let (_, assets) = value; + + // max assets is `assets` (+ potentially separately handled fee) + let max_assets = + assets.len() as u32 + if matches!(&fees, FeesHandling::Batched { .. }) { 0 } else { 1 }; + let assets: MultiAssets = assets.into(); + let context = T::UniversalLocation::get(); + let mut reanchored_assets = assets.clone(); + reanchored_assets + .reanchor(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + + // XCM instructions to be executed on local chain + let mut local_execute_xcm = Xcm(vec![ + // withdraw reserve-based assets + WithdrawAsset(assets.clone()), + // burn reserve-based assets + BurnAsset(assets), + ]); + // XCM instructions to be executed on destination chain + let mut xcm_on_dest = Xcm(vec![ + // withdraw `assets` from origin chain's sovereign account + WithdrawAsset(reanchored_assets), + // following instructions are not exec'ed on behalf of origin chain anymore + ClearOrigin, + ]); + // handle fees + Self::add_fees_to_xcm(dest, fees, weight_limit, &mut local_execute_xcm, &mut xcm_on_dest)?; + // deposit all remaining assets in holding to `beneficiary` location + xcm_on_dest + .inner_mut() + .push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + + Ok((local_execute_xcm, xcm_on_dest)) + } + + // function assumes fees and assets have the same remote reserve + fn remote_reserve_transfer_program( + origin: MultiLocation, + reserve: MultiLocation, + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result::RuntimeCall>, Error> { + let value = (origin, assets); + ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); + let (_, assets) = value; + + let max_assets = assets.len() as u32; + let context = T::UniversalLocation::get(); + // we spend up to half of fees for execution on reserve and other half for execution on + // destination + let (fees_half_1, fees_half_2) = Self::halve_fees(fees)?; + // identifies fee item as seen by `reserve` - to be used at reserve chain + let reserve_fees = fees_half_1 + .reanchored(&reserve, context) + .map_err(|_| Error::::CannotReanchor)?; + // identifies fee item as seen by `dest` - to be used at destination chain + let dest_fees = + fees_half_2.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; + // identifies `dest` as seen by `reserve` + let dest = dest.reanchored(&reserve, context).map_err(|_| Error::::CannotReanchor)?; + // xcm to be executed at dest + let xcm_on_dest = Xcm(vec![ + BuyExecution { fees: dest_fees, weight_limit: weight_limit.clone() }, DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, ]); - let mut message = Xcm(vec![ - WithdrawAsset(assets), - SetFeesMode { jit_withdraw: true }, - InitiateTeleport { assets: Wild(AllCounted(max_assets)), dest, xcm }, + // xcm to be executed on reserve + let xcm_on_reserve = Xcm(vec![ + BuyExecution { fees: reserve_fees, weight_limit }, + DepositReserveAsset { assets: Wild(AllCounted(max_assets)), dest, xcm: xcm_on_dest }, ]); - let weight = - T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; - let hash = message.using_encoded(sp_io::hashing::blake2_256); - let outcome = - T::XcmExecutor::execute_xcm_in_credit(origin_location, message, hash, weight, weight); - Self::deposit_event(Event::Attempted { outcome }); - Ok(()) + Ok(Xcm(vec![ + WithdrawAsset(assets.into()), + InitiateReserveWithdraw { + assets: Wild(AllCounted(max_assets)), + reserve, + xcm: xcm_on_reserve, + }, + ])) + } + + fn teleport_fees_instructions( + origin: MultiLocation, + dest: MultiLocation, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let value = (origin, vec![fees.clone()]); + ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); + + let context = T::UniversalLocation::get(); + let reanchored_fees = fees + .clone() + .reanchored(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + + // XcmContext irrelevant in teleports checks + let dummy_context = + XcmContext { origin: None, message_id: Default::default(), topic: None }; + // We should check that the asset can actually be teleported out (for this to + // be in error, there would need to be an accounting violation by ourselves, + // so it's unlikely, but we don't want to allow that kind of bug to leak into + // a trusted chain. + ::AssetTransactor::can_check_out( + &dest, + &fees, + &dummy_context, + ) + .map_err(|_| Error::::CannotCheckOutTeleport)?; + // safe to do this here, we're in a transactional call that will be reverted on any + // errors down the line + ::AssetTransactor::check_out( + &dest, + &fees, + &dummy_context, + ); + + let fees: MultiAssets = fees.into(); + let local_execute_xcm = Xcm(vec![ + // withdraw fees + WithdrawAsset(fees.clone()), + // burn fees + BurnAsset(fees), + ]); + let xcm_on_dest = Xcm(vec![ + // (dest) chain receive teleported assets burned on origin chain + ReceiveTeleportedAsset(reanchored_fees.clone().into()), + // buy exec using `fees` in holding received in above instruction + BuyExecution { fees: reanchored_fees, weight_limit }, + ]); + Ok((local_execute_xcm, xcm_on_dest)) + } + + fn teleport_assets_program( + origin: MultiLocation, + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + fees: FeesHandling, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let value = (origin, assets); + ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); + let (_, assets) = value; + + // max assets is `assets` (+ potentially separately handled fee) + let max_assets = + assets.len() as u32 + if matches!(&fees, FeesHandling::Batched { .. }) { 0 } else { 1 }; + let context = T::UniversalLocation::get(); + let assets: MultiAssets = assets.into(); + let mut reanchored_assets = assets.clone(); + reanchored_assets + .reanchor(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + + // XcmContext irrelevant in teleports checks + let dummy_context = + XcmContext { origin: None, message_id: Default::default(), topic: None }; + for asset in assets.inner() { + // We should check that the asset can actually be teleported out (for this to + // be in error, there would need to be an accounting violation by ourselves, + // so it's unlikely, but we don't want to allow that kind of bug to leak into + // a trusted chain. + ::AssetTransactor::can_check_out( + &dest, + asset, + &dummy_context, + ) + .map_err(|_| Error::::CannotCheckOutTeleport)?; + } + for asset in assets.inner() { + // safe to do this here, we're in a transactional call that will be reverted on any + // errors down the line + ::AssetTransactor::check_out( + &dest, + asset, + &dummy_context, + ); + } + + // XCM instructions to be executed on local chain + let mut local_execute_xcm = Xcm(vec![ + // withdraw assets to be teleported + WithdrawAsset(assets.clone()), + // burn assets on local chain + BurnAsset(assets), + ]); + // XCM instructions to be executed on destination chain + let mut xcm_on_dest = Xcm(vec![ + // teleport `assets` in from origin chain + ReceiveTeleportedAsset(reanchored_assets), + // following instructions are not exec'ed on behalf of origin chain anymore + ClearOrigin, + ]); + // handle fees + Self::add_fees_to_xcm(dest, fees, weight_limit, &mut local_execute_xcm, &mut xcm_on_dest)?; + // deposit all remaining assets in holding to `beneficiary` location + xcm_on_dest + .inner_mut() + .push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + + Ok((local_execute_xcm, xcm_on_dest)) + } + + /// Halve `fees` fungible amount. + pub(crate) fn halve_fees(fees: MultiAsset) -> Result<(MultiAsset, MultiAsset), Error> { + match fees.fun { + Fungible(amount) => { + let fee1 = amount.saturating_div(2); + let fee2 = amount.saturating_sub(fee1); + ensure!(fee1 > 0, Error::::FeesNotMet); + ensure!(fee2 > 0, Error::::FeesNotMet); + Ok((MultiAsset::from((fees.id, fee1)), MultiAsset::from((fees.id, fee2)))) + }, + NonFungible(_) => Err(Error::::FeesNotMet), + } } /// Will always make progress, and will do its best not to use much more than `weight_cutoff` @@ -1892,7 +2591,7 @@ impl WrapVersion for Pallet { dest: &MultiLocation, xcm: impl Into>, ) -> Result, ()> { - SupportedVersion::::get(XCM_VERSION, LatestVersionedMultiLocation(dest)) + Self::get_version_for(dest) .or_else(|| { Self::note_unknown_version(dest); SafeXcmVersion::::get() @@ -1909,6 +2608,12 @@ impl WrapVersion for Pallet { } } +impl GetVersion for Pallet { + fn get_version_for(dest: &MultiLocation) -> Option { + SupportedVersion::::get(XCM_VERSION, LatestVersionedMultiLocation(dest)) + } +} + impl VersionChangeNotifier for Pallet { /// Start notifying `location` should the XCM version of this chain change. /// diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 3b41ad90ec99d77c2f961ec1308464847f8089a4..0ac4205ed949fc2308d4c02839e9f37d118f4408 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -16,8 +16,11 @@ use codec::Encode; use frame_support::{ - construct_runtime, match_types, parameter_types, - traits::{ConstU32, Everything, EverythingBut, Nothing}, + construct_runtime, derive_impl, match_types, parameter_types, + traits::{ + AsEnsureOriginWithArg, ConstU128, ConstU32, Contains, Equals, Everything, EverythingBut, + Nothing, + }, weights::Weight, }; use frame_system::EnsureRoot; @@ -25,18 +28,22 @@ use polkadot_parachain_primitives::primitives::Id as ParaId; use polkadot_runtime_parachains::origin; use sp_core::H256; use sp_runtime::{traits::IdentityLookup, AccountId32, BuildStorage}; -pub use sp_std::{ - cell::RefCell, collections::btree_map::BTreeMap, fmt::Debug, marker::PhantomData, -}; +pub use sp_std::cell::RefCell; use xcm::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter as XcmCurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, Case, ChildParachainAsNative, ChildParachainConvertsVia, - ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, FixedRateOfFungible, - FixedWeightBounds, IsConcrete, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, XcmFeeManagerFromComponents, XcmFeeToAccount, + ChildSystemParachainAsSuperuser, DescribeAllTerminal, FixedRateOfFungible, FixedWeightBounds, + FungiblesAdapter, HashedDescription, IsConcrete, MatchedConvertedConcreteId, NoChecking, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + XcmFeeManagerFromComponents, XcmFeeToAccount, +}; +use xcm_executor::{ + traits::{Identity, JustTry}, + XcmExecutor, }; -use xcm_executor::XcmExecutor; use crate::{self as pallet_xcm, TestWeightInfo}; @@ -137,6 +144,7 @@ construct_runtime!( { System: frame_system::{Pallet, Call, Storage, Config, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Assets: pallet_assets::{Pallet, Call, Storage, Config, Event}, ParasOrigin: origin::{Pallet, Origin}, XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config}, TestNotifier: pallet_test_notifier::{Pallet, Call, Event}, @@ -145,6 +153,7 @@ construct_runtime!( thread_local! { pub static SENT_XCM: RefCell)>> = RefCell::new(Vec::new()); + pub static FAIL_SEND_XCM: RefCell = RefCell::new(false); } pub(crate) fn sent_xcm() -> Vec<(MultiLocation, Xcm<()>)> { SENT_XCM.with(|q| (*q.borrow()).clone()) @@ -156,6 +165,9 @@ pub(crate) fn take_sent_xcm() -> Vec<(MultiLocation, Xcm<()>)> { r }) } +pub(crate) fn set_send_xcm_artificial_failure(should_fail: bool) { + FAIL_SEND_XCM.with(|q| *q.borrow_mut() = should_fail); +} /// Sender that never returns error. pub struct TestSendXcm; impl SendXcm for TestSendXcm { @@ -164,6 +176,9 @@ impl SendXcm for TestSendXcm { dest: &mut Option, msg: &mut Option>, ) -> SendResult<(MultiLocation, Xcm<()>)> { + if FAIL_SEND_XCM.with(|q| *q.borrow()) { + return Err(SendError::Transport("Intentional send failure used in tests")) + } let pair = (dest.take().unwrap(), msg.take().unwrap()); Ok((pair, MultiAssets::new())) } @@ -179,13 +194,13 @@ impl SendXcm for TestSendXcmErrX8 { type Ticket = (MultiLocation, Xcm<()>); fn validate( dest: &mut Option, - msg: &mut Option>, + _: &mut Option>, ) -> SendResult<(MultiLocation, Xcm<()>)> { - let (dest, msg) = (dest.take().unwrap(), msg.take().unwrap()); - if dest.len() == 8 { + if dest.as_ref().unwrap().len() == 8 { + dest.take(); Err(SendError::Transport("Destination location full")) } else { - Ok(((dest, msg), MultiAssets::new())) + Err(SendError::NotApplicable) } } fn deliver(pair: (MultiLocation, Xcm<()>)) -> Result { @@ -231,6 +246,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; @@ -280,18 +296,150 @@ impl pallet_balances::Config for Test { type MaxFreezes = ConstU32<0>; } +#[cfg(feature = "runtime-benchmarks")] +/// Simple conversion of `u32` into an `AssetId` for use in benchmarking. +pub struct XcmBenchmarkHelper; +#[cfg(feature = "runtime-benchmarks")] +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> MultiLocation { + MultiLocation { parents: 1, interior: X1(Parachain(id)) } + } +} + +impl pallet_assets::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = MultiLocation; + type AssetIdParameter = MultiLocation; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = EnsureRoot; + type AssetDeposit = ConstU128<1>; + type AssetAccountDeposit = ConstU128<10>; + type MetadataDepositBase = ConstU128<1>; + type MetadataDepositPerByte = ConstU128<1>; + type ApprovalDeposit = ConstU128<1>; + type StringLimit = ConstU32<50>; + type Freezer = (); + type WeightInfo = (); + type CallbackHandle = (); + type Extra = (); + type RemoveItemsLimit = ConstU32<5>; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = XcmBenchmarkHelper; +} + +// This child parachain is a system parachain trusted to teleport native token. +pub const SOME_SYSTEM_PARA: u32 = 1001; + +// This child parachain acts as trusted reserve for its assets in tests. +// USDT allowed to teleport to/from here. +pub const FOREIGN_ASSET_RESERVE_PARA_ID: u32 = 2001; +// Inner junction of reserve asset on `FOREIGN_ASSET_RESERVE_PARA_ID`. +pub const FOREIGN_ASSET_INNER_JUNCTION: Junction = GeneralIndex(1234567); + +// This child parachain acts as trusted reserve for say.. USDC that can be used for fees. +pub const USDC_RESERVE_PARA_ID: u32 = 2002; +// Inner junction of reserve asset on `USDC_RESERVE_PARA_ID`. +pub const USDC_INNER_JUNCTION: Junction = PalletInstance(42); + +// This child parachain is a trusted teleporter for say.. USDT (T from Teleport :)). +// We'll use USDT in tests that teleport fees. +pub const USDT_PARA_ID: u32 = 2003; + +// This child parachain is not configured as trusted reserve or teleport location for any assets. +pub const OTHER_PARA_ID: u32 = 2009; + +// This child parachain is used for filtered/disallowed assets. +pub const FILTERED_PARA_ID: u32 = 2010; + parameter_types! { pub const RelayLocation: MultiLocation = Here.into_location(); + pub const NativeAsset: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(Here.into_location()), + }; + pub const SystemParachainLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(SOME_SYSTEM_PARA)) + }; + pub const ForeignReserveLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(FOREIGN_ASSET_RESERVE_PARA_ID)) + }; + pub const ForeignAsset: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X2(Parachain(FOREIGN_ASSET_RESERVE_PARA_ID), FOREIGN_ASSET_INNER_JUNCTION), + }), + }; + pub const UsdcReserveLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(USDC_RESERVE_PARA_ID)) + }; + pub const Usdc: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X2(Parachain(USDC_RESERVE_PARA_ID), USDC_INNER_JUNCTION), + }), + }; + pub const UsdtTeleportLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(USDT_PARA_ID)) + }; + pub const Usdt: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X1(Parachain(USDT_PARA_ID)), + }), + }; + pub const FilteredTeleportLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(FILTERED_PARA_ID)) + }; + pub const FilteredTeleportAsset: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X1(Parachain(FILTERED_PARA_ID)), + }), + }; pub const AnyNetwork: Option = None; pub UniversalLocation: InteriorMultiLocation = Here; pub UnitWeightCost: u64 = 1_000; + pub CheckingAccount: AccountId = XcmPallet::check_account(); } -pub type SovereignAccountOf = - (ChildParachainConvertsVia, AccountId32Aliases); +pub type SovereignAccountOf = ( + ChildParachainConvertsVia, + AccountId32Aliases, + HashedDescription, +); -pub type LocalAssetTransactor = - XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; +pub type ForeignAssetsConvertedConcreteId = MatchedConvertedConcreteId< + MultiLocation, + Balance, + // Excludes relay/parent chain currency + EverythingBut<(Equals,)>, + Identity, + JustTry, +>; + +#[allow(deprecated)] +pub type AssetTransactors = ( + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>, + FungiblesAdapter< + Assets, + ForeignAssetsConvertedConcreteId, + SovereignAccountOf, + AccountId, + NoChecking, + CheckingAccount, + >, +); type LocalOriginConverter = ( SovereignSignedViaLocation, @@ -303,7 +451,13 @@ type LocalOriginConverter = ( parameter_types! { pub const BaseXcmWeight: Weight = Weight::from_parts(1_000, 1_000); pub CurrencyPerSecondPerByte: (AssetId, u128, u128) = (Concrete(RelayLocation::get()), 1, 1); - pub TrustedAssets: (MultiAssetFilter, MultiLocation) = (All.into(), Here.into()); + pub TrustedLocal: (MultiAssetFilter, MultiLocation) = (All.into(), Here.into()); + pub TrustedSystemPara: (MultiAssetFilter, MultiLocation) = (NativeAsset::get().into(), SystemParachainLocation::get()); + pub TrustedUsdt: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), UsdtTeleportLocation::get()); + pub TrustedFilteredTeleport: (MultiAssetFilter, MultiLocation) = (FilteredTeleportAsset::get().into(), FilteredTeleportLocation::get()); + pub TeleportUsdtToForeign: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), ForeignReserveLocation::get()); + pub TrustedForeign: (MultiAssetFilter, MultiLocation) = (ForeignAsset::get().into(), ForeignReserveLocation::get()); + pub TrustedUsdc: (MultiAssetFilter, MultiLocation) = (Usdc::get().into(), UsdcReserveLocation::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; pub XcmFeesTargetAccount: AccountId = AccountId::new([167u8; 32]); @@ -323,14 +477,22 @@ pub type Barrier = ( AllowSubscriptionsFrom, ); +pub type XcmRouter = (TestPaidForPara3000SendXcm, TestSendXcmErrX8, TestSendXcm); + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; - type XcmSender = (TestPaidForPara3000SendXcm, TestSendXcm); - type AssetTransactor = LocalAssetTransactor; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; type OriginConverter = LocalOriginConverter; - type IsReserve = (); - type IsTeleporter = Case; + type IsReserve = (Case, Case); + type IsTeleporter = ( + Case, + Case, + Case, + Case, + Case, + ); type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = FixedWeightBounds; @@ -360,19 +522,22 @@ parameter_types! { pub static AdvertisedXcmVersion: pallet_xcm::XcmVersion = 3; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1000).into()); +pub struct XcmTeleportFiltered; +impl Contains<(MultiLocation, Vec)> for XcmTeleportFiltered { + fn contains(t: &(MultiLocation, Vec)) -> bool { + let filtered = FilteredTeleportAsset::get(); + t.1.iter().any(|asset| asset == &filtered) + } } impl pallet_xcm::Config for Test { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; - type XcmRouter = (TestSendXcmErrX8, TestPaidForPara3000SendXcm, TestSendXcm); + type XcmRouter = XcmRouter; type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmExecuteFilter = Everything; type XcmExecutor = XcmExecutor; - type XcmTeleportFilter = Everything; + type XcmTeleportFilter = EverythingBut; type XcmReserveTransferFilter = Everything; type Weigher = FixedWeightBounds; type UniversalLocation = UniversalLocation; @@ -380,6 +545,7 @@ impl pallet_xcm::Config for Test { type RuntimeCall = RuntimeCall; const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; type AdvertisedXcmVersion = AdvertisedXcmVersion; + type AdminOrigin = EnsureRoot; type TrustedLockers = (); type SovereignAccountOf = AccountId32Aliases<(), AccountId32>; type Currency = Balances; @@ -388,9 +554,6 @@ impl pallet_xcm::Config for Test { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - type AdminOrigin = EnsureRoot; } impl origin::Config for Test {} @@ -401,6 +564,75 @@ impl pallet_test_notifier::Config for Test { type RuntimeCall = RuntimeCall; } +#[cfg(feature = "runtime-benchmarks")] +impl super::benchmarking::Config for Test { + fn reachable_dest() -> Option { + Some(Parachain(1000).into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + Some((NativeAsset::get(), SystemParachainLocation::get())) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + Some(( + MultiAsset { fun: Fungible(10), id: Concrete(Here.into_location()) }, + Parachain(OTHER_PARA_ID).into(), + )) + } + + fn set_up_complex_asset_transfer( + ) -> Option<(MultiAssets, u32, MultiLocation, Box)> { + use crate::tests::assets_transfer::{into_multiassets_checked, set_up_foreign_asset}; + // Transfer native asset (local reserve) to `USDT_PARA_ID`. Using teleport-trusted USDT for + // fees. + + let asset_amount = 10u128; + let fee_amount = 2u128; + + let existential_deposit = ExistentialDeposit::get(); + let caller = frame_benchmarking::whitelisted_caller(); + + // Give some multiple of the existential deposit + let balance = asset_amount + existential_deposit * 1000; + let _ = >::make_free_balance_be( + &caller, balance, + ); + // create sufficient foreign asset USDT + let usdt_initial_local_amount = fee_amount * 10; + let (usdt_chain, _, usdt_id_multilocation) = set_up_foreign_asset( + USDT_PARA_ID, + None, + caller.clone(), + usdt_initial_local_amount, + true, + ); + + // native assets transfer destination is USDT chain (teleport trust only for USDT) + let dest = usdt_chain; + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDT for fees (is sufficient on local chain too) - teleported + (usdt_id_multilocation, fee_amount).into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), asset_amount).into(), + ); + // verify initial balances + assert_eq!(Balances::free_balance(&caller), balance); + assert_eq!(Assets::balance(usdt_id_multilocation, &caller), usdt_initial_local_amount); + + // verify transferred successfully + let verify = Box::new(move || { + // verify balances after transfer, decreased by transferred amounts + assert_eq!(Balances::free_balance(&caller), balance - asset_amount); + assert_eq!( + Assets::balance(usdt_id_multilocation, &caller), + usdt_initial_local_amount - fee_amount + ); + }); + Some((assets, fee_index as u32, dest, verify)) + } +} + pub(crate) fn last_event() -> RuntimeEvent { System::events().pop().expect("RuntimeEvent expected").event } @@ -416,14 +648,25 @@ pub(crate) fn buy_execution(fees: impl Into) -> Instruction { pub(crate) fn buy_limited_execution( fees: impl Into, - weight: Weight, + weight_limit: WeightLimit, ) -> Instruction { use xcm::latest::prelude::*; - BuyExecution { fees: fees.into(), weight_limit: Limited(weight) } + BuyExecution { fees: fees.into(), weight_limit } } pub(crate) fn new_test_ext_with_balances( balances: Vec<(AccountId, Balance)>, +) -> sp_io::TestExternalities { + new_test_ext_with_balances_and_xcm_version( + balances, + // By default set actual latest XCM version + Some(XCM_VERSION), + ) +} + +pub(crate) fn new_test_ext_with_balances_and_xcm_version( + balances: Vec<(AccountId, Balance)>, + safe_xcm_version: Option, ) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); @@ -431,7 +674,7 @@ pub(crate) fn new_test_ext_with_balances( .assimilate_storage(&mut t) .unwrap(); - pallet_xcm::GenesisConfig:: { safe_xcm_version: Some(2), ..Default::default() } + pallet_xcm::GenesisConfig:: { safe_xcm_version, ..Default::default() } .assimilate_storage(&mut t) .unwrap(); diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs new file mode 100644 index 0000000000000000000000000000000000000000..6893bae2b6c17d6d76abe28aa44d8b89a74a627c --- /dev/null +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -0,0 +1,2421 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +#![cfg(test)] + +use crate::{ + mock::*, + tests::{ALICE, BOB, FEE_AMOUNT, INITIAL_BALANCE, SEND_AMOUNT}, + DispatchResult, OriginFor, +}; +use frame_support::{ + assert_ok, + traits::{tokens::fungibles::Inspect, Currency}, + weights::Weight, +}; +use polkadot_parachain_primitives::primitives::Id as ParaId; +use sp_runtime::{traits::AccountIdConversion, DispatchError, ModuleError}; +use xcm::prelude::*; +use xcm_executor::traits::ConvertLocation; + +// Helper function to deduplicate testing different teleport types. +fn do_test_and_verify_teleport_assets( + origin_location: MultiLocation, + expected_beneficiary: MultiLocation, + call: Call, + expected_weight_limit: WeightLimit, +) { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), + ]; + let dest = RelayLocation::get().into(); + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get() * 2; + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + // call extrinsic + call(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Here, SEND_AMOUNT), expected_weight_limit), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: expected_beneficiary + }, + ]), + )] + ); + let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); + let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); + + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: origin_location, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +/// Test `teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn teleport_assets_works() { + let origin_location: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + do_test_and_verify_teleport_assets( + origin_location, + beneficiary, + || { + assert_ok!(XcmPallet::teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(beneficiary.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + )); + }, + Unlimited, + ); +} + +/// Test `limited_teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn limited_teleport_assets_works() { + let origin_location: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); + let expected_weight_limit = weight_limit.clone(); + do_test_and_verify_teleport_assets( + origin_location, + beneficiary, + || { + assert_ok!(XcmPallet::limited_teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(beneficiary.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + weight_limit, + )); + }, + expected_weight_limit, + ); +} + +/// `limited_teleport_assets` should fail for filtered assets +#[test] +fn limited_teleport_filtered_assets_disallowed() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + new_test_ext_with_balances(vec![(ALICE, INITIAL_BALANCE)]).execute_with(|| { + let result = XcmPallet::limited_teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(FilteredTeleportLocation::get().into()), + Box::new(beneficiary.into()), + Box::new(FilteredTeleportAsset::get().into()), + 0, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered") + })) + ); + }); +} + +/// Test `reserve_transfer_assets_with_paid_router_works` +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +/// Verifies that XCM router fees (`SendXcm::validate` -> `MultiAssets`) are withdrawn from correct +/// user account and deposited to a correct target account (`XcmFeesTargetAccount`). +#[test] +fn reserve_transfer_assets_with_paid_router_works() { + let user_account = AccountId::from(XCM_FEES_NOT_WAIVED_USER_ACCOUNT); + let paid_para_id = Para3000::get(); + let balances = vec![ + (user_account.clone(), INITIAL_BALANCE), + (ParaId::from(paid_para_id).into_account_truncating(), INITIAL_BALANCE), + (XcmFeesTargetAccount::get(), INITIAL_BALANCE), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let xcm_router_fee_amount = Para3000PaymentAmount::get(); + let weight = BaseXcmWeight::get(); + let dest: MultiLocation = + AccountId32 { network: None, id: user_account.clone().into() }.into(); + assert_eq!(Balances::total_balance(&user_account), INITIAL_BALANCE); + assert_ok!(XcmPallet::reserve_transfer_assets( + RuntimeOrigin::signed(user_account.clone()), + Box::new(Parachain(paid_para_id).into()), + Box::new(dest.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + )); + + // XCM_FEES_NOT_WAIVED_USER_ACCOUNT spent amount + assert_eq!( + Balances::free_balance(user_account), + INITIAL_BALANCE - SEND_AMOUNT - xcm_router_fee_amount + ); + + // Destination account (parachain account) has amount + let para_acc: AccountId = ParaId::from(paid_para_id).into_account_truncating(); + assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); + + // XcmFeesTargetAccount where should lend xcm_router_fee_amount + assert_eq!( + Balances::free_balance(XcmFeesTargetAccount::get()), + INITIAL_BALANCE + xcm_router_fee_amount + ); + + let dest_para: MultiLocation = Parachain(paid_para_id).into(); + assert_eq!( + sent_xcm(), + vec![( + dest_para, + Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_execution((Parent, SEND_AMOUNT)), + DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, + ]), + )] + ); + let mut last_events = last_events(5).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + // balances events + last_events.next().unwrap(); + last_events.next().unwrap(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: dest, + fees: Para3000PaymentMultiAssets::get(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +pub(crate) fn set_up_foreign_asset( + reserve_para_id: u32, + inner_junction: Option, + benficiary: AccountId, + initial_amount: u128, + is_sufficient: bool, +) -> (MultiLocation, AccountId, MultiLocation) { + let reserve_location = + RelayLocation::get().pushed_with_interior(Parachain(reserve_para_id)).unwrap(); + let reserve_sovereign_account = + SovereignAccountOf::convert_location(&reserve_location).unwrap(); + + let foreign_asset_id_multilocation = if let Some(junction) = inner_junction { + reserve_location.pushed_with_interior(junction).unwrap() + } else { + reserve_location + }; + + // create sufficient (to be used as fees as well) foreign asset + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + foreign_asset_id_multilocation, + BOB, + is_sufficient, + 1 + )); + // this asset should have been teleported/reserve-transferred in, but for this test we just + // mint it locally. + assert_ok!(Assets::mint( + RuntimeOrigin::signed(BOB), + foreign_asset_id_multilocation, + benficiary, + initial_amount + )); + + (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) +} + +// Helper function that provides correct `fee_index` after `sort()` done by +// `vec![MultiAsset, MultiAsset].into()`. +pub(crate) fn into_multiassets_checked( + fee_asset: MultiAsset, + transfer_asset: MultiAsset, +) -> (MultiAssets, usize, MultiAsset, MultiAsset) { + let assets: MultiAssets = vec![fee_asset.clone(), transfer_asset.clone()].into(); + let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; + (assets, fee_index, fee_asset, transfer_asset) +} + +/// Test `tested_call` with local asset reserve and local fee reserve. +/// +/// Transferring native asset (local reserve) to some `OTHER_PARA_ID` (no teleport trust). +/// Using native asset for fees as well. +/// +/// Verifies `expected_result` +fn local_asset_reserve_and_local_fee_reserve_call( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let balances = vec![ + (ALICE, INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), + ]; + let origin_location: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); + let expected_weight_limit = weight_limit.clone(); + let expected_beneficiary = beneficiary; + let dest: MultiLocation = Parachain(OTHER_PARA_ID).into(); + + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + // call extrinsic + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + weight_limit, + ); + assert_eq!(result, expected_result); + if expected_result.is_err() { + // short-circuit here for tests where we expect failure + return + } + // Alice spent amount + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Destination account (parachain account) has amount + let para_acc: AccountId = ParaId::from(OTHER_PARA_ID).into_account_truncating(); + assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Parent, SEND_AMOUNT), expected_weight_limit), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: expected_beneficiary + }, + ]), + )] + ); + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: origin_location, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +/// Test `transfer_assets` with local asset reserve and local fee reserve works. +#[test] +fn transfer_assets_with_local_asset_reserve_and_local_fee_reserve_works() { + let expected_result = Ok(()); + local_asset_reserve_and_local_fee_reserve_call(XcmPallet::transfer_assets, expected_result); +} + +/// Test `limited_reserve_transfer_assets` with local asset reserve and local fee reserve works. +#[test] +fn reserve_transfer_assets_with_local_asset_reserve_and_local_fee_reserve_works() { + let expected_result = Ok(()); + local_asset_reserve_and_local_fee_reserve_call( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with local asset reserve and local fee reserve disallowed. +#[test] +fn teleport_assets_with_local_asset_reserve_and_local_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + local_asset_reserve_and_local_fee_reserve_call( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} + +/// Test `tested_call` with destination asset reserve and local fee reserve. +/// +/// Transferring foreign asset (`FOREIGN_ASSET_RESERVE_PARA_ID` reserve) to +/// `FOREIGN_ASSET_RESERVE_PARA_ID` (no teleport trust). +/// Using native asset (local reserve) for fees. +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +/// +/// Verifies `expected_result`. +fn destination_asset_reserve_and_local_fee_reserve_call( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let weight = BaseXcmWeight::get() * 3; + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let origin_location: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create non-sufficient foreign asset BLA + let foreign_initial_amount = 142; + let (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + ALICE, + foreign_initial_amount, + false, + ); + + // transfer destination is reserve location (no teleport trust) + let dest = reserve_location; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // native asset for fee - local reserve + (MultiLocation::here(), FEE_AMOUNT).into(), + // foreign asset to transfer - destination reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + if expected_result.is_err() { + // short-circuit here for tests where we expect failure + return + } + + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + + // Alice spent (transferred) amount + assert_eq!( + Assets::balance(foreign_asset_id_multilocation, ALICE), + foreign_initial_amount - SEND_AMOUNT + ); + // Alice used native asset for fees + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - FEE_AMOUNT); + // Destination account (parachain account) added native reserve used as fee to balances + assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), FEE_AMOUNT); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); + // Verify total and active issuance of foreign BLA have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + // `fees` are being sent through local-reserve transfer because fee reserve is + // local chain; `assets` are burned on source and withdrawn from SA here + Xcm(vec![ + ReserveAssetDeposited((Parent, FEE_AMOUNT).into()), + buy_limited_execution(expected_fee, Unlimited), + WithdrawAsset(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: origin_location, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +/// Test `transfer_assets` with destination asset reserve and local fee reserve. +#[test] +fn transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_works() { + let expected_result = Ok(()); + destination_asset_reserve_and_local_fee_reserve_call( + XcmPallet::transfer_assets, + expected_result, + ); +} + +/// Test `limited_reserve_transfer_assets` with destination asset reserve and local fee reserve +/// disallowed. +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [23, 0, 0, 0], + message: Some("TooManyReserves"), + })); + destination_asset_reserve_and_local_fee_reserve_call( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with destination asset reserve and local fee reserve +/// disallowed. +#[test] +fn teleport_assets_with_destination_asset_reserve_and_local_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + destination_asset_reserve_and_local_fee_reserve_call( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} + +/// Test `tested_call` with remote asset reserve and local fee reserve is disallowed. +/// +/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to `OTHER_PARA_ID`. +/// Using native (local reserve) as fee should be disallowed. +fn remote_asset_reserve_and_local_fee_reserve_call_disallowed( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create non-sufficient foreign asset BLA + let foreign_initial_amount = 142; + let (_, _, foreign_asset_id_multilocation) = set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + ALICE, + foreign_initial_amount, + false, + ); + + // transfer destination is OTHER_PARA_ID (foreign asset needs to go through its reserve + // chain) + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let (assets, fee_index, _, _) = into_multiassets_checked( + // native asset for fee - local reserve + (MultiLocation::here(), FEE_AMOUNT).into(), + // foreign asset to transfer - remote reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // try the transfer + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + + // Alice transferred nothing + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + // Alice spent native asset for fees + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Verify total and active issuance of foreign BLA asset have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); + }); +} + +/// Test `transfer_assets` with remote asset reserve and local fee reserve is disallowed. +#[test] +fn transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve"), + })); + remote_asset_reserve_and_local_fee_reserve_call_disallowed( + XcmPallet::transfer_assets, + expected_result, + ); +} + +/// Test `limited_reserve_transfer_assets` with remote asset reserve and local fee reserve is +/// disallowed. +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [23, 0, 0, 0], + message: Some("TooManyReserves"), + })); + remote_asset_reserve_and_local_fee_reserve_call_disallowed( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with remote asset reserve and local fee reserve is disallowed. +#[test] +fn teleport_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + remote_asset_reserve_and_local_fee_reserve_call_disallowed( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} + +/// Test `tested_call` with local asset reserve and destination fee reserve. +/// +/// Transferring native asset (local reserve) to `USDC_RESERVE_PARA_ID` (no teleport trust). Using +/// foreign asset (`USDC_RESERVE_PARA_ID` reserve) for fees. +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +/// +/// Verifies `expected_result`. +fn local_asset_reserve_and_destination_fee_reserve_call( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let origin_location: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC + let usdc_initial_local_amount = 142; + let (usdc_reserve_location, usdc_chain_sovereign_account, usdc_id_multilocation) = + set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + ALICE, + usdc_initial_local_amount, + true, + ); + + // native assets transfer to fee reserve location (no teleport trust) + let dest = usdc_reserve_location; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // usdc for fees (is sufficient on local chain too) - destination reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + if expected_result.is_err() { + // short-circuit here for tests where we expect failure + return + } + + let weight = BaseXcmWeight::get() * 3; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: origin_location, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + + // Alice spent (fees) amount + assert_eq!( + Assets::balance(usdc_id_multilocation, ALICE), + usdc_initial_local_amount - FEE_AMOUNT + ); + // Alice used native asset for transfer + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Sovereign account of dest parachain holds `SEND_AMOUNT` native asset in local reserve + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), SEND_AMOUNT); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + // Verify total and active issuance of USDC have decreased (burned on reserve-withdraw) + let expected_issuance = usdc_initial_local_amount - FEE_AMOUNT; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + // fees are being sent through destination-reserve transfer because fee reserve + // is destination chain + WithdrawAsset(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + // transfer is through local-reserve transfer because `assets` (native asset) + // have local reserve + ReserveAssetDeposited(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + }); +} + +/// Test `transfer_assets` with local asset reserve and destination fee reserve. +#[test] +fn transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_works() { + let expected_result = Ok(()); + local_asset_reserve_and_destination_fee_reserve_call( + XcmPallet::transfer_assets, + expected_result, + ); +} + +/// Test `limited_reserve_transfer_assets` with local asset reserve and destination fee reserve +/// disallowed. +#[test] +fn reserve_transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [23, 0, 0, 0], + message: Some("TooManyReserves"), + })); + local_asset_reserve_and_destination_fee_reserve_call( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with local asset reserve and destination fee reserve disallowed. +#[test] +fn teleport_assets_with_local_asset_reserve_and_destination_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + local_asset_reserve_and_destination_fee_reserve_call( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} + +/// Test `tested_call` with destination asset reserve and destination fee reserve. +/// +/// Verifies `expected_result` +fn destination_asset_reserve_and_destination_fee_reserve_call( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let origin_location: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // we'll send just this foreign asset back to its reserve location and use it for fees as + // well + let foreign_initial_amount = 142; + let (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + ALICE, + foreign_initial_amount, + true, + ); + + // transfer destination is reserve location + let dest = reserve_location; + let assets: MultiAssets = vec![(foreign_asset_id_multilocation, SEND_AMOUNT).into()].into(); + let fee_index = 0; + + // reanchor according to test-case + let mut expected_assets = assets.clone(); + expected_assets.reanchor(&dest, UniversalLocation::get()).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index, + Unlimited, + ); + assert_eq!(result, expected_result); + if expected_result.is_err() { + // short-circuit here for tests where we expect failure + return + } + + let weight = BaseXcmWeight::get() * 2; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: origin_location, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + + // Alice spent (transferred) amount + assert_eq!( + Assets::balance(foreign_asset_id_multilocation, ALICE), + foreign_initial_amount - SEND_AMOUNT + ); + // Alice's native asset balance is untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Reserve sovereign account has same balances + assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); + // Verify total and active issuance of foreign BLA have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + Parachain(FOREIGN_ASSET_RESERVE_PARA_ID).into(), + Xcm(vec![ + WithdrawAsset(expected_assets.clone()), + ClearOrigin, + buy_limited_execution(expected_assets.get(0).unwrap().clone(), Unlimited), + DepositAsset { assets: AllCounted(1).into(), beneficiary }, + ]), + )] + ); + }); +} + +/// Test `transfer_assets` with destination asset reserve and destination fee reserve. +#[test] +fn transfer_assets_with_destination_asset_reserve_and_destination_fee_reserve_works() { + let expected_result = Ok(()); + destination_asset_reserve_and_destination_fee_reserve_call( + XcmPallet::transfer_assets, + expected_result, + ); +} + +/// Test `limited_reserve_transfer_assets` with destination asset reserve and destination fee +/// reserve. +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_destination_fee_reserve_works() { + let expected_result = Ok(()); + destination_asset_reserve_and_destination_fee_reserve_call( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with destination asset reserve and destination fee reserve +/// disallowed. +#[test] +fn teleport_assets_with_destination_asset_reserve_and_destination_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + destination_asset_reserve_and_destination_fee_reserve_call( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} + +/// Test `transfer_assets` with remote asset reserve and destination fee reserve is disallowed. +/// +/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to +/// `USDC_RESERVE_PARA_ID`. Using USDC (destination reserve) as fee. +fn remote_asset_reserve_and_destination_fee_reserve_call_disallowed( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC + let usdc_initial_local_amount = 42; + let (usdc_chain, _, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + ALICE, + usdc_initial_local_amount, + true, + ); + + // create non-sufficient foreign asset BLA + let foreign_initial_amount = 142; + let (_, _, foreign_asset_id_multilocation) = set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + ALICE, + foreign_initial_amount, + false, + ); + + // transfer destination is USDC chain (foreign asset BLA needs to go through its separate + // reserve chain) + let dest = usdc_chain; + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDC for fees (is sufficient on local chain too) - destination reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - remote reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + let expected_usdc_issuance = usdc_initial_local_amount; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + let expected_bla_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + }); +} + +/// Test `transfer_assets` with remote asset reserve and destination fee reserve is disallowed. +#[test] +fn transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve"), + })); + remote_asset_reserve_and_destination_fee_reserve_call_disallowed( + XcmPallet::transfer_assets, + expected_result, + ); +} + +/// Test `limited_reserve_transfer_assets` with remote asset reserve and destination fee reserve is +/// disallowed. +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [23, 0, 0, 0], + message: Some("TooManyReserves"), + })); + remote_asset_reserve_and_destination_fee_reserve_call_disallowed( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with remote asset reserve and destination fee reserve is +/// disallowed. +#[test] +fn teleport_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + remote_asset_reserve_and_destination_fee_reserve_call_disallowed( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} + +/// Test `tested_call` with local asset reserve and remote fee reserve is disallowed. +/// +/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign +/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. +fn local_asset_reserve_and_remote_fee_reserve_call_disallowed( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC + let usdc_initial_local_amount = 142; + let (_, usdc_chain_sovereign_account, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + ALICE, + usdc_initial_local_amount, + true, + ); + + // transfer destination is some other parachain != fee reserve location (no teleport trust) + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + let dest_sovereign_account = SovereignAccountOf::convert_location(&dest).unwrap(); + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDC for fees (is sufficient on local chain too) - remote reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Sovereign account of reserve parachain is unchanged + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + assert_eq!(Balances::free_balance(dest_sovereign_account), 0); + let expected_usdc_issuance = usdc_initial_local_amount; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + }); +} + +/// Test `transfer_assets` with local asset reserve and remote fee reserve is disallowed. +#[test] +fn transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve"), + })); + local_asset_reserve_and_remote_fee_reserve_call_disallowed( + XcmPallet::transfer_assets, + expected_result, + ); +} + +/// Test `limited_reserve_transfer_assets` with local asset reserve and remote fee reserve is +/// disallowed. +#[test] +fn reserve_transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [23, 0, 0, 0], + message: Some("TooManyReserves"), + })); + local_asset_reserve_and_remote_fee_reserve_call_disallowed( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with local asset reserve and remote fee reserve is disallowed. +#[test] +fn teleport_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + local_asset_reserve_and_remote_fee_reserve_call_disallowed( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} + +/// Test `tested_call` with destination asset reserve and remote fee reserve is disallowed. +/// +/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign +/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. +fn destination_asset_reserve_and_remote_fee_reserve_call_disallowed( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC + let usdc_initial_local_amount = 42; + let (_, usdc_chain_sovereign_account, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + ALICE, + usdc_initial_local_amount, + true, + ); + + // create non-sufficient foreign asset BLA + let foreign_initial_amount = 142; + let (reserve_location, foreign_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + ALICE, + foreign_initial_amount, + false, + ); + + // transfer destination is asset reserve location + let dest = reserve_location; + let dest_sovereign_account = foreign_sovereign_account; + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDC for fees (is sufficient on local chain too) - remote reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - destination reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + assert_eq!(Balances::free_balance(dest_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, dest_sovereign_account), 0); + let expected_usdc_issuance = usdc_initial_local_amount; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + let expected_bla_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + }); +} + +/// Test `transfer_assets` with destination asset reserve and remote fee reserve is disallowed. +#[test] +fn transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve"), + })); + destination_asset_reserve_and_remote_fee_reserve_call_disallowed( + XcmPallet::transfer_assets, + expected_result, + ); +} + +/// Test `limited_reserve_transfer_assets` with destination asset reserve and remote fee reserve is +/// disallowed. +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [23, 0, 0, 0], + message: Some("TooManyReserves"), + })); + destination_asset_reserve_and_remote_fee_reserve_call_disallowed( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with destination asset reserve and remote fee reserve is +/// disallowed. +#[test] +fn teleport_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + destination_asset_reserve_and_remote_fee_reserve_call_disallowed( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} + +/// Test `tested_call` with remote asset reserve and (same) remote fee reserve. +/// +/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign +/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. +/// +/// ```nocompile +/// | chain `A` | chain `C` | chain `B` +/// | Here (source) | USDC_RESERVE_PARA_ID | OTHER_PARA_ID (destination) +/// | | `fees` reserve | +/// | | `assets` reserve | +/// | +/// | 1. `A` executes `InitiateReserveWithdraw(both)` dest `C` +/// | -----------------> `C` executes `DepositReserveAsset(both)` dest `B` +/// | --------------------------> `DepositAsset(both)` +/// ``` +/// +/// Verifies `expected_result` +fn remote_asset_reserve_and_remote_fee_reserve_call( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC + let usdc_initial_local_amount = 142; + let (usdc_chain, usdc_chain_sovereign_account, usdc_id_multilocation) = + set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + ALICE, + usdc_initial_local_amount, + true, + ); + + // transfer destination is some other parachain + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let assets: MultiAssets = vec![(usdc_id_multilocation, SEND_AMOUNT).into()].into(); + let fee_index = 0u32; + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_dest_on_reserve = dest.reanchored(&usdc_chain, context).unwrap(); + let fees = assets.get(fee_index as usize).unwrap().clone(); + let (fees_half_1, fees_half_2) = XcmPallet::halve_fees(fees).unwrap(); + let mut expected_assets_on_reserve = assets.clone(); + expected_assets_on_reserve.reanchor(&usdc_chain, context).unwrap(); + let expected_fee_on_reserve = fees_half_1.reanchored(&usdc_chain, context).unwrap(); + let expected_fee_on_dest = fees_half_2.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index, + Unlimited, + ); + assert_eq!(result, expected_result); + if expected_result.is_err() { + // short-circuit here for tests where we expect failure + return + } + + assert!(matches!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(_) }) + )); + + // Alice spent (transferred) amount + assert_eq!( + Assets::balance(usdc_id_multilocation, ALICE), + usdc_initial_local_amount - SEND_AMOUNT + ); + // Alice's native asset balance is untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Destination account (parachain account) has expected (same) balances + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + // Verify total and active issuance of USDC have decreased (burned on reserve-withdraw) + let expected_usdc_issuance = usdc_initial_local_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + // first message sent to reserve chain + usdc_chain, + Xcm(vec![ + WithdrawAsset(expected_assets_on_reserve), + ClearOrigin, + BuyExecution { fees: expected_fee_on_reserve, weight_limit: Unlimited }, + DepositReserveAsset { + assets: Wild(AllCounted(1)), + // final destination is `dest` as seen by `reserve` + dest: expected_dest_on_reserve, + // message sent onward to `dest` + xcm: Xcm(vec![ + buy_limited_execution(expected_fee_on_dest, Unlimited), + DepositAsset { assets: AllCounted(1).into(), beneficiary } + ]) + } + ]) + )], + ); + }); +} + +/// Test `transfer_assets` with remote asset reserve and (same) remote fee reserve. +#[test] +fn transfer_assets_with_remote_asset_reserve_and_remote_fee_reserve_works() { + let expected_result = Ok(()); + remote_asset_reserve_and_remote_fee_reserve_call(XcmPallet::transfer_assets, expected_result); +} + +/// Test `limited_reserve_transfer_assets` with remote asset reserve and (same) remote fee reserve. +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_remote_fee_reserve_works() { + let expected_result = Ok(()); + remote_asset_reserve_and_remote_fee_reserve_call( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with remote asset reserve and (same) remote fee reserve +/// disallowed. +#[test] +fn teleport_assets_with_remote_asset_reserve_and_remote_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + remote_asset_reserve_and_remote_fee_reserve_call( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} + +/// Test `tested_call` with local asset reserve and teleported fee. +/// +/// Transferring native asset (local reserve) to `USDT_PARA_ID`. Using teleport-trusted USDT for +/// fees. +/// +/// Verifies `expected_result` +fn local_asset_reserve_and_teleported_fee_call( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let origin_location: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT + let usdt_initial_local_amount = 42; + let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, ALICE, usdt_initial_local_amount, true); + + // native assets transfer destination is USDT chain (teleport trust only for USDT) + let dest = usdt_chain; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // USDT for fees (is sufficient on local chain too) - teleported + (usdt_id_multilocation, FEE_AMOUNT).into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + if expected_result.is_err() { + // short-circuit here for tests where we expect failure + return + } + + let weight = BaseXcmWeight::get() * 3; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: origin_location, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + // Alice spent (fees) amount + assert_eq!( + Assets::balance(usdt_id_multilocation, ALICE), + usdt_initial_local_amount - FEE_AMOUNT + ); + // Alice used native asset for transfer + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Sovereign account of dest parachain holds `SEND_AMOUNT` native asset in local reserve + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), SEND_AMOUNT); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify total and active issuance have decreased (teleported) + let expected_usdt_issuance = usdt_initial_local_amount - FEE_AMOUNT; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + // fees are teleported to destination chain + ReceiveTeleportedAsset(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + // transfer is through local-reserve transfer because `assets` (native + // asset) have local reserve + ReserveAssetDeposited(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + }); +} + +/// Test `transfer_assets` with local asset reserve and teleported fee. +#[test] +fn transfer_assets_with_local_asset_reserve_and_teleported_fee_works() { + let expected_result = Ok(()); + local_asset_reserve_and_teleported_fee_call(XcmPallet::transfer_assets, expected_result); +} + +/// Test `limited_reserve_transfer_assets` with local asset reserve and teleported fee disallowed. +#[test] +fn reserve_transfer_assets_with_local_asset_reserve_and_teleported_fee_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [23, 0, 0, 0], + message: Some("TooManyReserves"), + })); + local_asset_reserve_and_teleported_fee_call( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with local asset reserve and teleported fee disallowed. +#[test] +fn teleport_assets_with_local_asset_reserve_and_teleported_fee_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + local_asset_reserve_and_teleported_fee_call( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} + +/// Test `tested_call` with destination asset reserve and teleported fee. +/// +/// Transferring foreign asset (destination reserve) to `FOREIGN_ASSET_RESERVE_PARA_ID`. Using +/// teleport-trusted USDT for fees. +/// +/// Verifies `expected_result` +fn destination_asset_reserve_and_teleported_fee_call( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let origin_location: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT + let usdt_initial_local_amount = 42; + let (_, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, ALICE, usdt_initial_local_amount, true); + + // create non-sufficient foreign asset BLA + let foreign_initial_amount = 142; + let (reserve_location, foreign_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + ALICE, + foreign_initial_amount, + false, + ); + + // transfer destination is asset reserve location + let dest = reserve_location; + let dest_sovereign_account = foreign_sovereign_account; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // USDT for fees (is sufficient on local chain too) - teleported + (usdt_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - destination reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + if expected_result.is_err() { + // short-circuit here for tests where we expect failure + return + } + + let weight = BaseXcmWeight::get() * 4; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: origin_location, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Alice spent USDT for fees + assert_eq!( + Assets::balance(usdt_id_multilocation, ALICE), + usdt_initial_local_amount - FEE_AMOUNT + ); + // Alice transferred BLA + assert_eq!( + Assets::balance(foreign_asset_id_multilocation, ALICE), + foreign_initial_amount - SEND_AMOUNT + ); + // Verify balances of USDT reserve parachain + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify balances of transferred-asset reserve parachain + assert_eq!(Balances::free_balance(dest_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, dest_sovereign_account), 0); + // Verify total and active issuance of USDT have decreased (teleported) + let expected_usdt_issuance = usdt_initial_local_amount - FEE_AMOUNT; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); + // Verify total and active issuance of foreign BLA asset have decreased (burned on + // reserve-withdraw) + let expected_bla_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + // fees are teleported to destination chain + ReceiveTeleportedAsset(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + // assets are withdrawn from origin's local SA + WithdrawAsset(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + }); +} + +/// Test `transfer_assets` with destination asset reserve and teleported fee. +#[test] +fn transfer_assets_with_destination_asset_reserve_and_teleported_fee_works() { + let expected_result = Ok(()); + destination_asset_reserve_and_teleported_fee_call(XcmPallet::transfer_assets, expected_result); +} + +/// Test `limited_reserve_transfer_assets` with destination asset reserve and teleported fee +/// disallowed. +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_teleported_fee_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [23, 0, 0, 0], + message: Some("TooManyReserves"), + })); + destination_asset_reserve_and_teleported_fee_call( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with destination asset reserve and teleported fee disallowed. +#[test] +fn teleport_assets_with_destination_asset_reserve_and_teleported_fee_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + destination_asset_reserve_and_teleported_fee_call( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} + +/// Test `tested_call` with remote asset reserve and teleported fee is disallowed. +/// +/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to `USDT_PARA_ID`. +/// Using teleport-trusted USDT for fees. +fn remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT + let usdt_initial_local_amount = 42; + let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, ALICE, usdt_initial_local_amount, true); + + // create non-sufficient foreign asset BLA + let foreign_initial_amount = 142; + let (_, reserve_sovereign_account, foreign_asset_id_multilocation) = set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + ALICE, + foreign_initial_amount, + false, + ); + + // transfer destination is USDT chain (foreign asset needs to go through its reserve chain) + let dest = usdt_chain; + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDT for fees (is sufficient on local chain too) - teleported + (usdt_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - remote reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // try the transfer + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); + let expected_usdt_issuance = usdt_initial_local_amount; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); + let expected_bla_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + }); +} + +/// Test `transfer_assets` with remote asset reserve and teleported fee is disallowed. +#[test] +fn transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve"), + })); + remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( + XcmPallet::transfer_assets, + expected_result, + ); +} + +/// Test `limited_reserve_transfer_assets` with remote asset reserve and teleported fee is +/// disallowed. +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [23, 0, 0, 0], + message: Some("TooManyReserves"), + })); + remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with remote asset reserve and teleported fee is disallowed. +#[test] +fn teleport_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} + +/// Test `reserve_transfer_assets` single asset which is teleportable - should fail. +/// +/// Attempting to reserve-transfer teleport-trusted USDT to `USDT_PARA_ID` should fail. +#[test] +fn reserve_transfer_assets_with_teleportable_asset_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT + let usdt_initial_local_amount = 42; + let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, ALICE, usdt_initial_local_amount, true); + + // transfer destination is USDT chain (foreign asset needs to go through its reserve chain) + let dest = usdt_chain; + let assets: MultiAssets = vec![(usdt_id_multilocation, FEE_AMOUNT).into()].into(); + let fee_index = 0; + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let res = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + res, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered") + })) + ); + // Alice native asset is still same + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Alice USDT balance is still same + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + // No USDT moved to sovereign account of reserve parachain + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify total and active issuance of USDT are still the same + assert_eq!(Assets::total_issuance(usdt_id_multilocation), usdt_initial_local_amount); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), usdt_initial_local_amount); + }); +} + +/// Test `transfer_assets` with teleportable fee that is filtered - should fail. +#[test] +fn transfer_assets_with_filtered_teleported_fee_disallowed() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + new_test_ext_with_balances(vec![(ALICE, INITIAL_BALANCE)]).execute_with(|| { + let (assets, fee_index, _, _) = into_multiassets_checked( + // FilteredTeleportAsset for fees - teleportable but filtered + FilteredTeleportAsset::get().into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + let result = XcmPallet::transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(FilteredTeleportLocation::get().into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered") + })) + ); + }); +} + +/// Test failure to complete execution of local XCM instructions reverts intermediate side-effects. +/// +/// Extrinsic will execute XCM to withdraw & burn reserve-based assets, then fail sending XCM to +/// reserve chain for releasing reserve assets. Assert that the previous instructions (withdraw & +/// burn) effects are reverted. +#[test] +fn intermediary_error_reverts_side_effects() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC + let usdc_initial_local_amount = 142; + let (_, usdc_chain_sovereign_account, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + ALICE, + usdc_initial_local_amount, + true, + ); + + // transfer destination is some other parachain + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let assets: MultiAssets = vec![(usdc_id_multilocation, SEND_AMOUNT).into()].into(); + let fee_index = 0; + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // introduce artificial error in sending outbound XCM + set_send_xcm_artificial_failure(true); + + // do the transfer - extrinsic should completely fail on xcm send failure + assert!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ) + .is_err()); + + // Alice no changes + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Destination account (parachain account) no changes + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + // Verify total and active issuance of USDC has not changed + assert_eq!(Assets::total_issuance(usdc_id_multilocation), usdc_initial_local_amount); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), usdc_initial_local_amount); + // Verify no XCM program sent + assert_eq!(sent_xcm(), vec![]); + }); +} + +/// Test `tested_call` with teleportable asset and local fee reserve. +/// +/// Transferring USDT to `USDT_PARA_ID` (teleport trust). Using native asset (local reserve) for +/// fees. +/// +/// Verifies `expected_result` +fn teleport_asset_using_local_fee_reserve_call( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let weight = BaseXcmWeight::get() * 3; + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let origin_location: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create non-sufficient foreign asset USDT + let usdt_initial_local_amount = 42; + let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, ALICE, usdt_initial_local_amount, false); + + // transfer destination is reserve location (no teleport trust) + let dest = usdt_chain; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // native asset for fee - local reserve + (MultiLocation::here(), FEE_AMOUNT).into(), + // USDT to transfer - destination reserve + (usdt_id_multilocation, SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + if expected_result.is_err() { + // short-circuit here for tests where we expect failure + return + } + + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + + // Alice spent (transferred) amount + assert_eq!( + Assets::balance(usdt_id_multilocation, ALICE), + usdt_initial_local_amount - SEND_AMOUNT + ); + // Alice used native asset for fees + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - FEE_AMOUNT); + // Destination account (parachain account) added native reserve to balances + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), FEE_AMOUNT); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify total and active issuance of foreign BLA have decreased (burned on + // reserve-withdraw) + let expected_issuance = usdt_initial_local_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + // `fees` are being sent through local-reserve transfer because fee reserve is + // local chain; `assets` are burned on source and withdrawn from SA here + Xcm(vec![ + ReserveAssetDeposited(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + ReceiveTeleportedAsset(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: origin_location, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +/// Test `transfer_assets` with teleportable asset and local fee reserve. +#[test] +fn transfer_assets_with_teleportable_asset_and_local_fee_reserve_works() { + let expected_result = Ok(()); + teleport_asset_using_local_fee_reserve_call(XcmPallet::transfer_assets, expected_result); +} + +/// Test `limited_reserve_transfer_assets` with teleportable asset and local fee reserve disallowed. +#[test] +fn reserve_transfer_assets_with_teleportable_asset_and_local_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + teleport_asset_using_local_fee_reserve_call( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with teleportable asset and local fee reserve disallowed. +#[test] +fn teleport_assets_with_teleportable_asset_and_local_fee_reserve_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + teleport_asset_using_local_fee_reserve_call( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} + +/// Test `tested_call` with teleported asset reserve and destination fee. +/// +/// Transferring USDT to `FOREIGN_ASSET_RESERVE_PARA_ID` (teleport trust). Using foreign asset +/// (destination reserve) for fees. +/// +/// Verifies `expected_result` +fn teleported_asset_using_destination_reserve_fee_call( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let origin_location: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + let beneficiary: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset BLA to be used for fees + let foreign_initial_amount = 142; + let (reserve_location, foreign_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + ALICE, + foreign_initial_amount, + true, + ); + + // create non-sufficient foreign asset USDT + let usdt_initial_local_amount = 42; + let (_, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, ALICE, usdt_initial_local_amount, false); + + // transfer destination is BLA reserve location + let dest = reserve_location; + let dest_sovereign_account = foreign_sovereign_account; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // foreign asset BLA used for fees - destination reserve + (foreign_asset_id_multilocation, FEE_AMOUNT).into(), + // USDT to transfer - teleported + (usdt_id_multilocation, SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + if expected_result.is_err() { + // short-circuit here for tests where we expect failure + return + } + + let weight = BaseXcmWeight::get() * 4; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: origin_location, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Alice spent USDT for fees + assert_eq!( + Assets::balance(usdt_id_multilocation, ALICE), + usdt_initial_local_amount - SEND_AMOUNT + ); + // Alice transferred BLA + assert_eq!( + Assets::balance(foreign_asset_id_multilocation, ALICE), + foreign_initial_amount - FEE_AMOUNT + ); + // Verify balances of USDT reserve parachain + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify balances of transferred-asset reserve parachain + assert_eq!(Balances::free_balance(dest_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, dest_sovereign_account), 0); + // Verify total and active issuance of USDT have decreased (teleported) + let expected_usdt_issuance = usdt_initial_local_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); + // Verify total and active issuance of foreign BLA asset have decreased (burned on + // reserve-withdraw) + let expected_bla_issuance = foreign_initial_amount - FEE_AMOUNT; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + // fees are withdrawn from origin's local SA + WithdrawAsset(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + // assets are teleported to destination chain + ReceiveTeleportedAsset(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + }); +} + +/// Test `transfer_assets` with teleported asset reserve and destination fee. +#[test] +fn transfer_teleported_assets_using_destination_reserve_fee_works() { + let expected_result = Ok(()); + teleported_asset_using_destination_reserve_fee_call( + XcmPallet::transfer_assets, + expected_result, + ); +} + +/// Test `limited_reserve_transfer_assets` with teleported asset reserve and destination fee +/// disallowed. +#[test] +fn reserve_transfer_teleported_assets_using_destination_reserve_fee_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + teleported_asset_using_destination_reserve_fee_call( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} + +/// Test `limited_teleport_assets` with teleported asset reserve and destination fee disallowed. +#[test] +fn teleport_assets_using_destination_reserve_fee_disallowed() { + let expected_result = Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered"), + })); + teleported_asset_using_destination_reserve_fee_call( + XcmPallet::limited_teleport_assets, + expected_result, + ); +} diff --git a/polkadot/xcm/pallet-xcm/src/tests.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs similarity index 69% rename from polkadot/xcm/pallet-xcm/src/tests.rs rename to polkadot/xcm/pallet-xcm/src/tests/mod.rs index d267eece2c0478a404af2dc729f8d7b083dc770d..e7a6fdc9dcede69c7ed520f396c2d53225706a85 100644 --- a/polkadot/xcm/pallet-xcm/src/tests.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -14,6 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +#![cfg(test)] + +pub(crate) mod assets_transfer; + use crate::{ mock::*, AssetTraps, CurrentMigration, Error, LatestVersionedMultiLocation, Queries, QueryStatus, VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, @@ -35,15 +39,15 @@ use xcm_executor::{ const ALICE: AccountId = AccountId::new([0u8; 32]); const BOB: AccountId = AccountId::new([1u8; 32]); -const PARA_ID: u32 = 2000; const INITIAL_BALANCE: u128 = 100; const SEND_AMOUNT: u128 = 10; +const FEE_AMOUNT: u128 = 2; #[test] fn report_outcome_notify_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); let mut message = @@ -56,7 +60,7 @@ fn report_outcome_notify_works() { new_test_ext_with_balances(balances).execute_with(|| { XcmPallet::report_outcome_notify( &mut message, - Parachain(PARA_ID).into_location(), + Parachain(OTHER_PARA_ID).into_location(), notify, 100, ) @@ -74,8 +78,8 @@ fn report_outcome_notify_works() { ); let querier: MultiLocation = Here.into(); let status = QueryStatus::Pending { - responder: MultiLocation::from(Parachain(PARA_ID)).into(), - maybe_notify: Some((4, 2)), + responder: MultiLocation::from(Parachain(OTHER_PARA_ID)).into(), + maybe_notify: Some((5, 2)), timeout: 100, maybe_match_querier: Some(querier.into()), }; @@ -89,7 +93,7 @@ fn report_outcome_notify_works() { }]); let hash = fake_message_hash(&message); let r = XcmExecutor::::execute_xcm( - Parachain(PARA_ID), + Parachain(OTHER_PARA_ID), message, hash, Weight::from_parts(1_000_000_000, 1_000_000_000), @@ -99,13 +103,13 @@ fn report_outcome_notify_works() { last_events(2), vec![ RuntimeEvent::TestNotifier(pallet_test_notifier::Event::ResponseReceived( - Parachain(PARA_ID).into(), + Parachain(OTHER_PARA_ID).into(), 0, Response::ExecutionResult(None), )), RuntimeEvent::XcmPallet(crate::Event::Notified { query_id: 0, - pallet_index: 4, + pallet_index: 5, call_index: 2 }), ] @@ -118,13 +122,14 @@ fn report_outcome_notify_works() { fn report_outcome_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); let mut message = Xcm(vec![TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender }]); new_test_ext_with_balances(balances).execute_with(|| { - XcmPallet::report_outcome(&mut message, Parachain(PARA_ID).into_location(), 100).unwrap(); + XcmPallet::report_outcome(&mut message, Parachain(OTHER_PARA_ID).into_location(), 100) + .unwrap(); assert_eq!( message, Xcm(vec![ @@ -138,7 +143,7 @@ fn report_outcome_works() { ); let querier: MultiLocation = Here.into(); let status = QueryStatus::Pending { - responder: MultiLocation::from(Parachain(PARA_ID)).into(), + responder: MultiLocation::from(Parachain(OTHER_PARA_ID)).into(), maybe_notify: None, timeout: 100, maybe_match_querier: Some(querier.into()), @@ -153,7 +158,7 @@ fn report_outcome_works() { }]); let hash = fake_message_hash(&message); let r = XcmExecutor::::execute_xcm( - Parachain(PARA_ID), + Parachain(OTHER_PARA_ID), message, hash, Weight::from_parts(1_000_000_000, 1_000_000_000), @@ -177,7 +182,7 @@ fn report_outcome_works() { fn custom_querier_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let querier: MultiLocation = @@ -281,7 +286,7 @@ fn custom_querier_works() { fn send_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); @@ -325,7 +330,7 @@ fn send_works() { fn send_fails_when_xcm_router_blocks() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let sender: MultiLocation = @@ -346,344 +351,6 @@ fn send_fails_when_xcm_router_blocks() { }); } -/// Test `teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn teleport_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - let dest: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - RelayLocation::get().into(), - Xcm(vec![ - ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Here, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn limited_teleport_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - let dest: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::limited_teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Limited(Weight::from_parts(5000, 5000)), - )); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - RelayLocation::get().into(), - Xcm(vec![ - ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), - ClearOrigin, - buy_limited_execution((Here, SEND_AMOUNT), Weight::from_parts(5000, 5000)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_teleport_assets` with unlimited weight -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn unlimited_teleport_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - let dest: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::limited_teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Unlimited, - )); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - RelayLocation::get().into(), - Xcm(vec![ - ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Here, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `reserve_transfer_assets` -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -#[test] -fn reserve_transfer_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(Parachain(PARA_ID).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - // Alice spent amount - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - Parachain(PARA_ID).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `reserve_transfer_assets_with_paid_router_works` -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -/// Verifies that XCM router fees (`SendXcm::validate` -> `MultiAssets`) are withdrawn from correct -/// user account and deposited to a correct target account (`XcmFeesTargetAccount`). -#[test] -fn reserve_transfer_assets_with_paid_router_works() { - let user_account = AccountId::from(XCM_FEES_NOT_WAIVED_USER_ACCOUNT); - let paid_para_id = Para3000::get(); - let balances = vec![ - (user_account.clone(), INITIAL_BALANCE), - (ParaId::from(paid_para_id).into_account_truncating(), INITIAL_BALANCE), - (XcmFeesTargetAccount::get(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let xcm_router_fee_amount = Para3000PaymentAmount::get(); - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = - Junction::AccountId32 { network: None, id: user_account.clone().into() }.into(); - assert_eq!(Balances::total_balance(&user_account), INITIAL_BALANCE); - assert_ok!(XcmPallet::reserve_transfer_assets( - RuntimeOrigin::signed(user_account.clone()), - Box::new(Parachain(paid_para_id).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - // check event - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - - // XCM_FEES_NOT_WAIVED_USER_ACCOUNT spent amount - assert_eq!( - Balances::free_balance(user_account), - INITIAL_BALANCE - SEND_AMOUNT - xcm_router_fee_amount - ); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(paid_para_id).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - // XcmFeesTargetAccount where should lend xcm_router_fee_amount - assert_eq!( - Balances::free_balance(XcmFeesTargetAccount::get()), - INITIAL_BALANCE + xcm_router_fee_amount - ); - assert_eq!( - sent_xcm(), - vec![( - Parachain(paid_para_id).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_reserve_transfer_assets` -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -#[test] -fn limited_reserve_transfer_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(Parachain(PARA_ID).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Limited(Weight::from_parts(5000, 5000)), - )); - // Alice spent amount - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - Parachain(PARA_ID).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_limited_execution((Parent, SEND_AMOUNT), Weight::from_parts(5000, 5000)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_reserve_transfer_assets` with unlimited weight purchasing -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -#[test] -fn unlimited_reserve_transfer_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(Parachain(PARA_ID).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Unlimited, - )); - // Alice spent amount - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - Parachain(PARA_ID).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - /// Test local execution of XCM /// /// Asserts that the sender's balance is decreased and the beneficiary's balance @@ -692,7 +359,7 @@ fn unlimited_reserve_transfer_assets_works() { fn execute_withdraw_to_deposit_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let weight = BaseXcmWeight::get() * 3; @@ -778,7 +445,7 @@ fn trapped_assets_can_be_claimed() { assert_eq!(AssetTraps::::iter().collect::>(), vec![]); let weight = BaseXcmWeight::get() * 3; - assert_ok!(XcmPallet::execute( + assert_ok!(>::execute( RuntimeOrigin::signed(ALICE), Box::new(VersionedXcm::from(Xcm(vec![ ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, @@ -792,6 +459,52 @@ fn trapped_assets_can_be_claimed() { }); } +/// Test failure to complete execution reverts intermediate side-effects. +/// +/// XCM program will withdraw and deposit some assets, then fail execution of a further withdraw. +/// Assert that the previous instructions effects are reverted. +#[test] +fn incomplete_execute_reverts_side_effects() { + let balances = vec![(ALICE, INITIAL_BALANCE), (BOB, INITIAL_BALANCE)]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get() * 4; + let dest: MultiLocation = Junction::AccountId32 { network: None, id: BOB.into() }.into(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + let amount_to_send = INITIAL_BALANCE - ExistentialDeposit::get(); + let assets: MultiAssets = (Here, amount_to_send).into(); + let result = XcmPallet::execute( + RuntimeOrigin::signed(ALICE), + Box::new(VersionedXcm::from(Xcm(vec![ + // Withdraw + BuyExec + Deposit should work + WithdrawAsset(assets.clone()), + buy_execution(assets.inner()[0].clone()), + DepositAsset { assets: assets.clone().into(), beneficiary: dest }, + // Withdrawing once more will fail because of InsufficientBalance, and we expect to + // revert the effects of the above instructions as well + WithdrawAsset(assets), + ]))), + weight, + ); + // all effects are reverted and balances unchanged for either sender or receiver + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + assert_eq!(Balances::total_balance(&BOB), INITIAL_BALANCE); + assert_eq!( + result, + Err(sp_runtime::DispatchErrorWithPostInfo { + post_info: frame_support::dispatch::PostDispatchInfo { + actual_weight: None, + pays_fee: frame_support::dispatch::Pays::Yes, + }, + error: sp_runtime::DispatchError::Module(sp_runtime::ModuleError { + index: 4, + error: [24, 0, 0, 0,], + message: Some("LocalExecutionIncomplete") + }) + }) + ); + }); +} + #[test] fn fake_latest_versioned_multilocation_works() { use codec::Encode; @@ -1061,12 +774,13 @@ fn subscription_side_upgrades_work_without_notify() { #[test] fn subscriber_side_subscription_works() { - new_test_ext_with_balances(vec![]).execute_with(|| { + new_test_ext_with_balances_and_xcm_version(vec![], Some(XCM_VERSION)).execute_with(|| { let remote: MultiLocation = Parachain(1000).into(); assert_ok!(XcmPallet::force_subscribe_version_notify( RuntimeOrigin::root(), Box::new(remote.into()), )); + assert_eq!(XcmPallet::get_version_for(&remote), None); take_sent_xcm(); // Assume subscription target is working ok. @@ -1085,6 +799,7 @@ fn subscriber_side_subscription_works() { let r = XcmExecutor::::execute_xcm(remote, message, hash, weight); assert_eq!(r, Outcome::Complete(weight)); assert_eq!(take_sent_xcm(), vec![]); + assert_eq!(XcmPallet::get_version_for(&remote), Some(1)); // This message cannot be sent to a v2 remote. let v2_msg = xcm::v2::Xcm::<()>(vec![xcm::v2::Instruction::Trap(0)]); @@ -1102,6 +817,8 @@ fn subscriber_side_subscription_works() { let hash = fake_message_hash(&message); let r = XcmExecutor::::execute_xcm(remote, message, hash, weight); assert_eq!(r, Outcome::Complete(weight)); + assert_eq!(take_sent_xcm(), vec![]); + assert_eq!(XcmPallet::get_version_for(&remote), Some(2)); // This message can now be sent to remote as it's v2. assert_eq!( @@ -1114,7 +831,7 @@ fn subscriber_side_subscription_works() { /// We should auto-subscribe when we don't know the remote's version. #[test] fn auto_subscription_works() { - new_test_ext_with_balances(vec![]).execute_with(|| { + new_test_ext_with_balances_and_xcm_version(vec![], None).execute_with(|| { let remote_v2: MultiLocation = Parachain(1000).into(); let remote_v3: MultiLocation = Parachain(1001).into(); @@ -1282,3 +999,68 @@ fn subscription_side_upgrades_work_with_multistage_notify() { ); }); } + +#[test] +fn get_and_wrap_version_works() { + new_test_ext_with_balances_and_xcm_version(vec![], None).execute_with(|| { + let remote_a: MultiLocation = Parachain(1000).into(); + let remote_b: MultiLocation = Parachain(1001).into(); + let remote_c: MultiLocation = Parachain(1002).into(); + + // no `safe_xcm_version` version at `GenesisConfig` + assert_eq!(XcmPallet::get_version_for(&remote_a), None); + assert_eq!(XcmPallet::get_version_for(&remote_b), None); + assert_eq!(XcmPallet::get_version_for(&remote_c), None); + assert_eq!(VersionDiscoveryQueue::::get().into_inner(), vec![]); + + // set default XCM version (a.k.a. `safe_xcm_version`) + assert_ok!(XcmPallet::force_default_xcm_version(RuntimeOrigin::root(), Some(1))); + assert_eq!(XcmPallet::get_version_for(&remote_a), None); + assert_eq!(XcmPallet::get_version_for(&remote_b), None); + assert_eq!(XcmPallet::get_version_for(&remote_c), None); + assert_eq!(VersionDiscoveryQueue::::get().into_inner(), vec![]); + + // set XCM version only for `remote_a` + assert_ok!(XcmPallet::force_xcm_version( + RuntimeOrigin::root(), + Box::new(remote_a), + XCM_VERSION + )); + assert_eq!(XcmPallet::get_version_for(&remote_a), Some(XCM_VERSION)); + assert_eq!(XcmPallet::get_version_for(&remote_b), None); + assert_eq!(XcmPallet::get_version_for(&remote_c), None); + assert_eq!(VersionDiscoveryQueue::::get().into_inner(), vec![]); + + let xcm = Xcm::<()>::default(); + + // wrap version - works because remote_a has `XCM_VERSION` + assert_eq!( + XcmPallet::wrap_version(&remote_a, xcm.clone()), + Ok(VersionedXcm::from(xcm.clone())) + ); + // does not work because remote_b has unknown version and default is set to 1, and + // `XCM_VERSION` cannot be wrapped to the `1` + assert_eq!(XcmPallet::wrap_version(&remote_b, xcm.clone()), Err(())); + assert_eq!(VersionDiscoveryQueue::::get().into_inner(), vec![(remote_b.into(), 1)]); + + // set default to the `XCM_VERSION` + assert_ok!(XcmPallet::force_default_xcm_version(RuntimeOrigin::root(), Some(XCM_VERSION))); + assert_eq!(XcmPallet::get_version_for(&remote_b), None); + assert_eq!(XcmPallet::get_version_for(&remote_c), None); + + // now works, because default is `XCM_VERSION` + assert_eq!( + XcmPallet::wrap_version(&remote_b, xcm.clone()), + Ok(VersionedXcm::from(xcm.clone())) + ); + assert_eq!(VersionDiscoveryQueue::::get().into_inner(), vec![(remote_b.into(), 2)]); + + // change remote_c to `1` + assert_ok!(XcmPallet::force_xcm_version(RuntimeOrigin::root(), Box::new(remote_c), 1)); + + // does not work because remote_c has `1` and default is `XCM_VERSION` which cannot be + // wrapped to the `1` + assert_eq!(XcmPallet::wrap_version(&remote_c, xcm.clone()), Err(())); + assert_eq!(VersionDiscoveryQueue::::get().into_inner(), vec![(remote_b.into(), 2)]); + }) +} diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 33c2a94be0e46290ac8eb90b58cd8c6e8c17fae5..c5f837a001db860f875a181e061cd65eb8686ebc 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -7,14 +7,18 @@ license.workspace = true version = "1.0.0" publish = true +[lints] +workspace = true + [lib] proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = "1.0.28" -syn = "2.0.38" +syn = "2.0.41" Inflector = "0.11.4" [dev-dependencies] trybuild = { version = "1.0.74", features = ["diff"] } +xcm = { package = "staging-xcm", path = ".." } diff --git a/polkadot/xcm/procedural/src/builder_pattern.rs b/polkadot/xcm/procedural/src/builder_pattern.rs index ebad54e972b6b1a77861aa8819ebf7425a2a0e11..e58c51103497a23a0b97d7274deba1130bfaf28a 100644 --- a/polkadot/xcm/procedural/src/builder_pattern.rs +++ b/polkadot/xcm/procedural/src/builder_pattern.rs @@ -17,56 +17,83 @@ //! Derive macro for creating XCMs with a builder pattern use inflector::Inflector; -use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote}; use syn::{ - parse_macro_input, Data, DeriveInput, Error, Expr, ExprLit, Fields, Lit, Meta, MetaNameValue, + Data, DataEnum, DeriveInput, Error, Expr, ExprLit, Fields, Ident, Lit, Meta, MetaNameValue, + Result, Variant, }; -pub fn derive(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - let builder_impl = match &input.data { - Data::Enum(data_enum) => generate_methods_for_enum(input.ident, data_enum), - _ => - return Error::new_spanned(&input, "Expected the `Instruction` enum") - .to_compile_error() - .into(), +pub fn derive(input: DeriveInput) -> Result { + let data_enum = match &input.data { + Data::Enum(data_enum) => data_enum, + _ => return Err(Error::new_spanned(&input, "Expected the `Instruction` enum")), }; + let builder_raw_impl = generate_builder_raw_impl(&input.ident, data_enum); + let builder_impl = generate_builder_impl(&input.ident, data_enum)?; + let builder_unpaid_impl = generate_builder_unpaid_impl(&input.ident, data_enum)?; let output = quote! { - pub struct XcmBuilder(Vec>); + /// A trait for types that track state inside the XcmBuilder + pub trait XcmBuilderState {} + + /// Access to all the instructions + pub enum AnythingGoes {} + /// You need to pay for execution + pub enum PaymentRequired {} + /// The holding register was loaded, now to buy execution + pub enum LoadedHolding {} + /// Need to explicitly state it won't pay for fees + pub enum ExplicitUnpaidRequired {} + + impl XcmBuilderState for AnythingGoes {} + impl XcmBuilderState for PaymentRequired {} + impl XcmBuilderState for LoadedHolding {} + impl XcmBuilderState for ExplicitUnpaidRequired {} + + /// Type used to build XCM programs + pub struct XcmBuilder { + pub(crate) instructions: Vec>, + pub state: core::marker::PhantomData, + } + impl Xcm { - pub fn builder() -> XcmBuilder { - XcmBuilder::(Vec::new()) + pub fn builder() -> XcmBuilder { + XcmBuilder:: { + instructions: Vec::new(), + state: core::marker::PhantomData, + } + } + pub fn builder_unpaid() -> XcmBuilder { + XcmBuilder:: { + instructions: Vec::new(), + state: core::marker::PhantomData, + } + } + pub fn builder_unsafe() -> XcmBuilder { + XcmBuilder:: { + instructions: Vec::new(), + state: core::marker::PhantomData, + } } } #builder_impl + #builder_unpaid_impl + #builder_raw_impl }; - output.into() + Ok(output) } -fn generate_methods_for_enum(name: syn::Ident, data_enum: &syn::DataEnum) -> TokenStream2 { +fn generate_builder_raw_impl(name: &Ident, data_enum: &DataEnum) -> TokenStream2 { let methods = data_enum.variants.iter().map(|variant| { let variant_name = &variant.ident; let method_name_string = &variant_name.to_string().to_snake_case(); - let method_name = syn::Ident::new(&method_name_string, variant_name.span()); - let docs: Vec<_> = variant - .attrs - .iter() - .filter_map(|attr| match &attr.meta { - Meta::NameValue(MetaNameValue { - value: Expr::Lit(ExprLit { lit: Lit::Str(literal), .. }), - .. - }) if attr.path().is_ident("doc") => Some(literal.value()), - _ => None, - }) - .map(|doc| syn::parse_str::(&format!("/// {}", doc)).unwrap()) - .collect(); + let method_name = syn::Ident::new(method_name_string, variant_name.span()); + let docs = get_doc_comments(variant); let method = match &variant.fields { Fields::Unit => { quote! { pub fn #method_name(mut self) -> Self { - self.0.push(#name::::#variant_name); + self.instructions.push(#name::::#variant_name); self } } @@ -81,7 +108,7 @@ fn generate_methods_for_enum(name: syn::Ident, data_enum: &syn::DataEnum) -> Tok let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); quote! { pub fn #method_name(mut self, #(#arg_names: #arg_types),*) -> Self { - self.0.push(#name::::#variant_name(#(#arg_names),*)); + self.instructions.push(#name::::#variant_name(#(#arg_names),*)); self } } @@ -91,7 +118,7 @@ fn generate_methods_for_enum(name: syn::Ident, data_enum: &syn::DataEnum) -> Tok let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); quote! { pub fn #method_name(mut self, #(#arg_names: #arg_types),*) -> Self { - self.0.push(#name::::#variant_name { #(#arg_names),* }); + self.instructions.push(#name::::#variant_name { #(#arg_names),* }); self } } @@ -103,13 +130,207 @@ fn generate_methods_for_enum(name: syn::Ident, data_enum: &syn::DataEnum) -> Tok } }); let output = quote! { - impl XcmBuilder { + impl XcmBuilder { #(#methods)* pub fn build(self) -> Xcm { - Xcm(self.0) + Xcm(self.instructions) } } }; output } + +fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result { + // We first require an instruction that load the holding register + let load_holding_variants = data_enum + .variants + .iter() + .map(|variant| { + let maybe_builder_attr = variant.attrs.iter().find(|attr| match attr.meta { + Meta::List(ref list) => list.path.is_ident("builder"), + _ => false, + }); + let builder_attr = match maybe_builder_attr { + Some(builder) => builder.clone(), + None => return Ok(None), /* It's not going to be an instruction that loads the + * holding register */ + }; + let Meta::List(ref list) = builder_attr.meta else { unreachable!("We checked before") }; + let inner_ident: Ident = syn::parse2(list.tokens.clone()).map_err(|_| { + Error::new_spanned(&builder_attr, "Expected `builder(loads_holding)`") + })?; + let ident_to_match: Ident = syn::parse_quote!(loads_holding); + if inner_ident == ident_to_match { + Ok(Some(variant)) + } else { + Err(Error::new_spanned(&builder_attr, "Expected `builder(loads_holding)`")) + } + }) + .collect::>>()?; + + let load_holding_methods = load_holding_variants + .into_iter() + .flatten() + .map(|variant| { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(method_name_string, variant_name.span()); + let docs = get_doc_comments(variant); + let method = match &variant.fields { + Fields::Unnamed(fields) => { + let arg_names: Vec<_> = fields + .unnamed + .iter() + .enumerate() + .map(|(index, _)| format_ident!("arg{}", index)) + .collect(); + let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); + quote! { + #(#docs)* + pub fn #method_name(self, #(#arg_names: #arg_types),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#variant_name(#(#arg_names),*)); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }, + Fields::Named(fields) => { + let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); + quote! { + #(#docs)* + pub fn #method_name(self, #(#arg_names: #arg_types),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#variant_name { #(#arg_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }, + _ => + return Err(Error::new_spanned( + variant, + "Instructions that load the holding register should take operands", + )), + }; + Ok(method) + }) + .collect::, _>>()?; + + let first_impl = quote! { + impl XcmBuilder { + #(#load_holding_methods)* + } + }; + + // Then we require fees to be paid + let buy_execution_method = data_enum + .variants + .iter() + .find(|variant| variant.ident == "BuyExecution") + .map_or( + Err(Error::new_spanned(&data_enum.variants, "No BuyExecution instruction")), + |variant| { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(method_name_string, variant_name.span()); + let docs = get_doc_comments(variant); + let fields = match &variant.fields { + Fields::Named(fields) => { + let arg_names: Vec<_> = + fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = + fields.named.iter().map(|field| &field.ty).collect(); + quote! { + #(#docs)* + pub fn #method_name(self, #(#arg_names: #arg_types),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#variant_name { #(#arg_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }, + _ => + return Err(Error::new_spanned( + variant, + "BuyExecution should have named fields", + )), + }; + Ok(fields) + }, + )?; + + let second_impl = quote! { + impl XcmBuilder { + #buy_execution_method + } + }; + + let output = quote! { + #first_impl + #second_impl + }; + + Ok(output) +} + +fn generate_builder_unpaid_impl(name: &Ident, data_enum: &DataEnum) -> Result { + let unpaid_execution_variant = data_enum + .variants + .iter() + .find(|variant| variant.ident == "UnpaidExecution") + .ok_or(Error::new_spanned(&data_enum.variants, "No UnpaidExecution instruction"))?; + let unpaid_execution_ident = &unpaid_execution_variant.ident; + let unpaid_execution_method_name = Ident::new( + &unpaid_execution_ident.to_string().to_snake_case(), + unpaid_execution_ident.span(), + ); + let docs = get_doc_comments(unpaid_execution_variant); + let fields = match &unpaid_execution_variant.fields { + Fields::Named(fields) => fields, + _ => + return Err(Error::new_spanned( + unpaid_execution_variant, + "UnpaidExecution should have named fields", + )), + }; + let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); + Ok(quote! { + impl XcmBuilder { + #(#docs)* + pub fn #unpaid_execution_method_name(self, #(#arg_names: #arg_types),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#unpaid_execution_ident { #(#arg_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }) +} + +fn get_doc_comments(variant: &Variant) -> Vec { + variant + .attrs + .iter() + .filter_map(|attr| match &attr.meta { + Meta::NameValue(MetaNameValue { + value: Expr::Lit(ExprLit { lit: Lit::Str(literal), .. }), + .. + }) if attr.path().is_ident("doc") => Some(literal.value()), + _ => None, + }) + .map(|doc| syn::parse_str::(&format!("/// {}", doc)).unwrap()) + .collect() +} diff --git a/polkadot/xcm/procedural/src/lib.rs b/polkadot/xcm/procedural/src/lib.rs index 83cc6cdf98ff43aa2422fa146c2deae39bb2320a..7600e817d0e662e42ef560c291de6ac192c7ca53 100644 --- a/polkadot/xcm/procedural/src/lib.rs +++ b/polkadot/xcm/procedural/src/lib.rs @@ -17,6 +17,7 @@ //! Procedural macros used in XCM. use proc_macro::TokenStream; +use syn::{parse_macro_input, DeriveInput}; mod builder_pattern; mod v2; @@ -56,7 +57,10 @@ pub fn impl_conversion_functions_for_junctions_v3(input: TokenStream) -> TokenSt /// .buy_execution(fees, weight_limit) /// .deposit_asset(assets, beneficiary) /// .build(); -#[proc_macro_derive(Builder)] +#[proc_macro_derive(Builder, attributes(builder))] pub fn derive_builder(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); builder_pattern::derive(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() } diff --git a/polkadot/xcm/procedural/tests/builder_pattern.rs b/polkadot/xcm/procedural/tests/builder_pattern.rs new file mode 100644 index 0000000000000000000000000000000000000000..eab9d67121f610a22166d9bd0d556f79e8770d1c --- /dev/null +++ b/polkadot/xcm/procedural/tests/builder_pattern.rs @@ -0,0 +1,81 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test the methods generated by the Builder derive macro. +//! Tests directly on the actual Xcm struct and Instruction enum. + +use xcm::latest::prelude::*; + +#[test] +fn builder_pattern_works() { + let asset: MultiAsset = (Here, 100u128).into(); + let beneficiary: MultiLocation = AccountId32 { id: [0u8; 32], network: None }.into(); + let message: Xcm<()> = Xcm::builder() + .receive_teleported_asset(asset.clone().into()) + .buy_execution(asset.clone(), Unlimited) + .deposit_asset(asset.clone().into(), beneficiary) + .build(); + assert_eq!( + message, + Xcm(vec![ + ReceiveTeleportedAsset(asset.clone().into()), + BuyExecution { fees: asset.clone(), weight_limit: Unlimited }, + DepositAsset { assets: asset.into(), beneficiary }, + ]) + ); +} + +#[test] +fn default_builder_requires_buy_execution() { + let asset: MultiAsset = (Here, 100u128).into(); + let beneficiary: MultiLocation = AccountId32 { id: [0u8; 32], network: None }.into(); + // This is invalid, since it doesn't pay for fees. + // This is enforced by the runtime, because the build() method doesn't exist + // on the resulting type. + // let message: Xcm<()> = Xcm::builder() + // .withdraw_asset(asset.clone().into()) + // .deposit_asset(asset.into(), beneficiary) + // .build(); + + // To be able to do that, we need to use the explicitly unpaid variant + let message: Xcm<()> = Xcm::builder_unpaid() + .unpaid_execution(Unlimited, None) + .withdraw_asset(asset.clone().into()) + .deposit_asset(asset.clone().into(), beneficiary) + .build(); // This works + assert_eq!( + message, + Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + WithdrawAsset(asset.clone().into()), + DepositAsset { assets: asset.clone().into(), beneficiary }, + ]) + ); + + // The other option doesn't have any limits whatsoever, so it should + // only be used when you really know what you're doing. + let message: Xcm<()> = Xcm::builder_unsafe() + .withdraw_asset(asset.clone().into()) + .deposit_asset(asset.clone().into(), beneficiary) + .build(); + assert_eq!( + message, + Xcm(vec![ + WithdrawAsset(asset.clone().into()), + DepositAsset { assets: asset.clone().into(), beneficiary }, + ]) + ); +} diff --git a/polkadot/xcm/procedural/tests/ui.rs b/polkadot/xcm/procedural/tests/ui.rs index a6ec35d0862af40ce9a6f9371adad7972cbb605a..b3469b520eb77cbe1329fc5428932e3467757991 100644 --- a/polkadot/xcm/procedural/tests/ui.rs +++ b/polkadot/xcm/procedural/tests/ui.rs @@ -21,12 +21,12 @@ fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return; + return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); - t.compile_fail("tests/ui/*.rs"); + t.compile_fail("tests/ui/**/*.rs"); } diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.rs new file mode 100644 index 0000000000000000000000000000000000000000..3a103f3ddc459dcb59a07a40931242aa1eab3c1c --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.rs @@ -0,0 +1,32 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when using a badly formatted attribute. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + #[builder(funds_holding = 2)] + WithdrawAsset(u128), + BuyExecution { fees: u128 }, + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr new file mode 100644 index 0000000000000000000000000000000000000000..978faf2e868d89ea47276bb5a7fed40c529e6336 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr @@ -0,0 +1,5 @@ +error: Expected `builder(loads_holding)` + --> tests/ui/builder_pattern/badly_formatted_attribute.rs:25:5 + | +25 | #[builder(funds_holding = 2)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.rs new file mode 100644 index 0000000000000000000000000000000000000000..dc5c679a96e72b92c0095e246ef487132dad4f69 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when the `BuyExecution` instruction doesn't take named fields. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + BuyExecution(u128), + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.stderr new file mode 100644 index 0000000000000000000000000000000000000000..dc8246770ba3e10ed0df45a714dd2d9eb337cc5e --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.stderr @@ -0,0 +1,5 @@ +error: BuyExecution should have named fields + --> tests/ui/builder_pattern/buy_execution_named_fields.rs:25:5 + | +25 | BuyExecution(u128), + | ^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs new file mode 100644 index 0000000000000000000000000000000000000000..070f0be6bacc995aa38a341fa9d242d395c4e045 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs @@ -0,0 +1,32 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when an instruction that loads the holding register doesn't take operands. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + #[builder(loads_holding)] + WithdrawAsset, + BuyExecution { fees: u128 }, + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr new file mode 100644 index 0000000000000000000000000000000000000000..0358a35ad3dd7bb48ddd51b69e4f395642d44edf --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr @@ -0,0 +1,6 @@ +error: Instructions that load the holding register should take operands + --> tests/ui/builder_pattern/loads_holding_no_operands.rs:25:5 + | +25 | / #[builder(loads_holding)] +26 | | WithdrawAsset, + | |_________________^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.rs new file mode 100644 index 0000000000000000000000000000000000000000..1ed8dd38cbad5b32bb9ce1a38470fb579b3cb5c2 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.rs @@ -0,0 +1,29 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when there's no `BuyExecution` instruction. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.stderr new file mode 100644 index 0000000000000000000000000000000000000000..d8798c8223f18e74e8ec6f409923da482547c9ec --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.stderr @@ -0,0 +1,6 @@ +error: No BuyExecution instruction + --> tests/ui/builder_pattern/no_buy_execution.rs:25:5 + | +25 | / UnpaidExecution { weight_limit: (u32, u32) }, +26 | | Transact { call: Call }, + | |____________________________^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.rs new file mode 100644 index 0000000000000000000000000000000000000000..d542102d2d35796736eb682377bc8015096186af --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.rs @@ -0,0 +1,29 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when there's no `UnpaidExecution` instruction. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + BuyExecution { fees: u128 }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.stderr new file mode 100644 index 0000000000000000000000000000000000000000..c8c0748da7220dd49fdcc196a4fa3ab353564e8c --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.stderr @@ -0,0 +1,6 @@ +error: No UnpaidExecution instruction + --> tests/ui/builder_pattern/no_unpaid_execution.rs:25:5 + | +25 | / BuyExecution { fees: u128 }, +26 | | Transact { call: Call }, + | |____________________________^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.rs new file mode 100644 index 0000000000000000000000000000000000000000..5808ec571ce75f3d7e25ae8489137ac90ced5687 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.rs @@ -0,0 +1,32 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when using wrong attribute. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + #[builder(funds_holding)] + WithdrawAsset(u128), + BuyExecution { fees: u128 }, + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1ff9d18513686293bc56f438c2bd7fa543820c84 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr @@ -0,0 +1,5 @@ +error: Expected `builder(loads_holding)` + --> tests/ui/builder_pattern/unexpected_attribute.rs:25:5 + | +25 | #[builder(funds_holding)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs new file mode 100644 index 0000000000000000000000000000000000000000..bb98d603fd91567406063b72d6133ba87671b4c9 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when the `BuyExecution` instruction doesn't take named fields. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + BuyExecution { fees: u128 }, + UnpaidExecution(u32, u32), + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr new file mode 100644 index 0000000000000000000000000000000000000000..0a3c0a40a33b0a5224f6b940c1ed18552a15fdf3 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr @@ -0,0 +1,5 @@ +error: UnpaidExecution should have named fields + --> tests/ui/builder_pattern/unpaid_execution_named_fields.rs:26:5 + | +26 | UnpaidExecution(u32, u32), + | ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.rs similarity index 100% rename from polkadot/xcm/procedural/tests/ui/builder_pattern.rs rename to polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.rs diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.stderr similarity index 63% rename from polkadot/xcm/procedural/tests/ui/builder_pattern.stderr rename to polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.stderr index 439b40f31cae0496d3200eb7c23ae8f76f56d0a9..007aa0b5ff3035761779a1220f364d980c30936b 100644 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern.stderr +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.stderr @@ -1,5 +1,5 @@ error: Expected the `Instruction` enum - --> tests/ui/builder_pattern.rs:23:1 + --> tests/ui/builder_pattern/wrong_target.rs:23:1 | 23 | struct SomeStruct; | ^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/src/double_encoded.rs b/polkadot/xcm/src/double_encoded.rs index 875b811da3f71c7f74762ec24acc05b8380e2110..45856f657d1a425636e479240681fa20acb6db74 100644 --- a/polkadot/xcm/src/double_encoded.rs +++ b/polkadot/xcm/src/double_encoded.rs @@ -25,6 +25,7 @@ use parity_scale_codec::{Decode, DecodeLimit, Encode}; #[codec(decode_bound())] #[scale_info(bounds(), skip_type_params(T))] #[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub struct DoubleEncoded { encoded: Vec, #[codec(skip)] diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index d804e4bf7351b3fffe5df2e4bfcba35d52b7704d..ddad0b5303beee8d318558d97485d1849f8ab462 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -373,6 +373,12 @@ pub trait WrapVersion { ) -> Result, ()>; } +/// Check and return the `Version` that should be used for the `Xcm` datum for the destination +/// `MultiLocation`, which will interpret it. +pub trait GetVersion { + fn get_version_for(dest: &latest::MultiLocation) -> Option; +} + /// `()` implementation does nothing with the XCM, just sending with whatever version it was /// authored as. impl WrapVersion for () { @@ -395,6 +401,11 @@ impl WrapVersion for AlwaysV2 { Ok(VersionedXcm::::V2(xcm.into().try_into()?)) } } +impl GetVersion for AlwaysV2 { + fn get_version_for(_dest: &latest::MultiLocation) -> Option { + Some(v2::VERSION) + } +} /// `WrapVersion` implementation which attempts to always convert the XCM to version 3 before /// wrapping it. @@ -407,6 +418,11 @@ impl WrapVersion for AlwaysV3 { Ok(VersionedXcm::::V3(xcm.into().try_into()?)) } } +impl GetVersion for AlwaysV3 { + fn get_version_for(_dest: &latest::MultiLocation) -> Option { + Some(v3::VERSION) + } +} /// `WrapVersion` implementation which attempts to always convert the XCM to the latest version /// before wrapping it. @@ -418,8 +434,8 @@ pub type AlwaysLts = AlwaysV3; pub mod prelude { pub use super::{ - latest::prelude::*, AlwaysLatest, AlwaysLts, AlwaysV2, AlwaysV3, IntoVersion, Unsupported, - Version as XcmVersion, VersionedAssetId, VersionedInteriorMultiLocation, + latest::prelude::*, AlwaysLatest, AlwaysLts, AlwaysV2, AlwaysV3, GetVersion, IntoVersion, + Unsupported, Version as XcmVersion, VersionedAssetId, VersionedInteriorMultiLocation, VersionedMultiAsset, VersionedMultiAssets, VersionedMultiLocation, VersionedResponse, VersionedXcm, WrapVersion, }; diff --git a/polkadot/xcm/src/v2/mod.rs b/polkadot/xcm/src/v2/mod.rs index 7f654ebfd9e9b242c920c7e88d02c2ce69714874..188b7f0b5c9384b7395ed08b27c4c8378719be8f 100644 --- a/polkadot/xcm/src/v2/mod.rs +++ b/polkadot/xcm/src/v2/mod.rs @@ -82,6 +82,7 @@ pub use traits::{Error, ExecuteXcm, GetWeight, Outcome, Result, SendError, SendR /// Basically just the XCM (more general) version of `ParachainDispatchOrigin`. #[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] #[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub enum OriginKind { /// Origin should just be the native dispatch origin representation for the sender in the /// local runtime framework. For Cumulus/Frame chains this is the `Parachain` or `Relay` origin diff --git a/polkadot/xcm/src/v3/junction.rs b/polkadot/xcm/src/v3/junction.rs index 47429a8c36e9a76c29b495feb0e2b765e8c7a21a..6ae339db2ae65aba72cb15ff438ffc8f577b875c 100644 --- a/polkadot/xcm/src/v3/junction.rs +++ b/polkadot/xcm/src/v3/junction.rs @@ -49,6 +49,7 @@ use serde::{Deserialize, Serialize}; Serialize, Deserialize, )] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum NetworkId { /// Network specified by the first 32 bytes of its genesis block. @@ -75,6 +76,8 @@ pub enum NetworkId { BitcoinCore, /// The Bitcoin network, including hard-forks supported by Bitcoin Cash developers. BitcoinCash, + /// The Polkadot Bulletin chain. + PolkadotBulletin, } impl From for Option { @@ -117,6 +120,7 @@ impl TryFrom for NetworkId { Serialize, Deserialize, )] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum BodyId { /// The only body in its context. @@ -188,6 +192,7 @@ impl TryFrom for BodyId { Serialize, Deserialize, )] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum BodyPart { /// The body's declaration, under whatever means it decides. @@ -264,6 +269,7 @@ impl TryFrom for BodyPart { Serialize, Deserialize, )] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Junction { /// An indexed parachain belonging to and operated by the context. diff --git a/polkadot/xcm/src/v3/junctions.rs b/polkadot/xcm/src/v3/junctions.rs index d1cbc2dbed42cddd4fae2eae398a120e2272679b..88da20cb1a11e2824a927fbdf7bc8aead4ee58cc 100644 --- a/polkadot/xcm/src/v3/junctions.rs +++ b/polkadot/xcm/src/v3/junctions.rs @@ -44,6 +44,7 @@ pub(crate) const MAX_JUNCTIONS: usize = 8; serde::Serialize, serde::Deserialize, )] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Junctions { /// The interpreting consensus system. diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index 4217528f2273647b0e7dbaf840695493d7c0ddb5..50b7a539122d66c02d5d47c2efe119097313d5d6 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -69,6 +69,7 @@ pub type QueryId = u64; #[codec(encode_bound())] #[scale_info(bounds(), skip_type_params(Call))] #[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub struct Xcm(pub Vec>); /// The maximal number of instructions in an XCM before decoding fails. @@ -232,15 +233,19 @@ pub mod prelude { } parameter_types! { + #[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub MaxPalletNameLen: u32 = 48; /// Maximum size of the encoded error code coming from a `Dispatch` result, used for /// `MaybeErrorCode`. This is not (yet) enforced, so it's just an indication of expectation. + #[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub MaxDispatchErrorLen: u32 = 128; + #[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub MaxPalletsInfo: u32 = 64; } #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] #[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub struct PalletInfo { #[codec(compact)] pub index: u32, @@ -272,6 +277,7 @@ impl PalletInfo { #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] #[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub enum MaybeErrorCode { Success, Error(BoundedVec), @@ -296,6 +302,7 @@ impl Default for MaybeErrorCode { /// Response data to a query. #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] #[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub enum Response { /// No response. Serves as a neutral default. Null, @@ -320,6 +327,7 @@ impl Default for Response { /// Information regarding the composition of a query response. #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] #[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub struct QueryResponseInfo { /// The destination to which the query response message should be send. pub destination: MultiLocation, @@ -333,6 +341,7 @@ pub struct QueryResponseInfo { /// An optional weight limit. #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] #[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub enum WeightLimit { /// No weight limit imposed. Unlimited, @@ -417,6 +426,7 @@ impl XcmContext { #[codec(decode_bound())] #[scale_info(bounds(), skip_type_params(Call))] #[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub enum Instruction { /// Withdraw asset(s) (`assets`) from the ownership of `origin` and place them into the Holding /// Register. @@ -426,6 +436,7 @@ pub enum Instruction { /// Kind: *Command*. /// /// Errors: + #[builder(loads_holding)] WithdrawAsset(MultiAssets), /// Asset(s) (`assets`) have been received into the ownership of this system on the `origin` @@ -439,6 +450,7 @@ pub enum Instruction { /// Kind: *Trusted Indication*. /// /// Errors: + #[builder(loads_holding)] ReserveAssetDeposited(MultiAssets), /// Asset(s) (`assets`) have been destroyed on the `origin` system and equivalent assets should @@ -452,6 +464,7 @@ pub enum Instruction { /// Kind: *Trusted Indication*. /// /// Errors: + #[builder(loads_holding)] ReceiveTeleportedAsset(MultiAssets), /// Respond with information that the local system is expecting. @@ -776,6 +789,7 @@ pub enum Instruction { /// Kind: *Command* /// /// Errors: + #[builder(loads_holding)] ClaimAsset { assets: MultiAssets, ticket: MultiLocation }, /// Always throws an error of type `Trap`. diff --git a/polkadot/xcm/src/v3/multiasset.rs b/polkadot/xcm/src/v3/multiasset.rs index 454120a1a7b9c701423236004c36aea55a6425a7..c8801f5a461da249b44cf45746a2db72f80be5c2 100644 --- a/polkadot/xcm/src/v3/multiasset.rs +++ b/polkadot/xcm/src/v3/multiasset.rs @@ -47,6 +47,7 @@ use scale_info::TypeInfo; Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum AssetInstance { /// Undefined - used if the non-fungible asset class has only one instance. @@ -243,6 +244,7 @@ impl TryFrom for u128 { /// instance. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum Fungibility { /// A fungible asset; we record a number of units, as a `u128` in the inner item. @@ -313,6 +315,7 @@ impl TryFrom for Fungibility { Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum WildFungibility { /// The asset is fungible. @@ -337,6 +340,7 @@ impl TryFrom for WildFungibility { Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum AssetId { /// A specific location identifying an asset. @@ -412,6 +416,7 @@ impl AssetId { /// Either an amount of a single fungible asset, or a single well-identified non-fungible asset. #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct MultiAsset { /// The overall asset identity (aka *class*, in the case of a non-fungible). @@ -510,6 +515,7 @@ impl TryFrom for MultiAsset { /// - The number of items should grow no larger than `MAX_ITEMS_IN_MULTIASSETS`. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, TypeInfo, Default)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct MultiAssets(Vec); @@ -710,6 +716,7 @@ impl MultiAssets { /// A wildcard representing a set of assets. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum WildMultiAsset { /// All assets in Holding. @@ -823,6 +830,7 @@ impl, B: Into> From<(A, B)> for WildMultiAsset /// `MultiAsset` collection, defined either by a number of `MultiAssets` or a single wildcard. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum MultiAssetFilter { /// Specify the filter as being everything contained by the given `MultiAssets` inner. diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index 8a1575d9bc950620e3f411349cdca6ef9db5deef..9649b1b3207341dcbaaaa33c7b5f7715c3ca323c 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -66,6 +66,7 @@ use scale_info::TypeInfo; serde::Serialize, serde::Deserialize, )] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub struct MultiLocation { /// The number of parent junctions at the beginning of this `MultiLocation`. pub parents: u8, @@ -444,6 +445,21 @@ impl MultiLocation { } } } + + /// Return the MultiLocation subsection identifying the chain that `self` points to. + pub fn chain_location(&self) -> MultiLocation { + let mut clone = *self; + // start popping junctions until we reach chain identifier + while let Some(j) = clone.last() { + if matches!(j, Junction::Parachain(_) | Junction::GlobalConsensus(_)) { + // return chain subsection + return clone + } else { + (clone, _) = clone.split_last_interior(); + } + } + MultiLocation::new(clone.parents, Junctions::Here) + } } impl TryFrom for MultiLocation { @@ -674,6 +690,57 @@ mod tests { assert_eq!(iter.next_back(), None); } + #[test] + fn chain_location_works() { + // Relay-chain or parachain context pointing to local resource, + let relay_to_local = MultiLocation::new(0, (PalletInstance(42), GeneralIndex(42))); + assert_eq!(relay_to_local.chain_location(), MultiLocation::here()); + + // Relay-chain context pointing to child parachain, + let relay_to_child = + MultiLocation::new(0, (Parachain(42), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(0, Parachain(42)); + assert_eq!(relay_to_child.chain_location(), expected); + + // Relay-chain context pointing to different consensus relay, + let relay_to_remote_relay = + MultiLocation::new(1, (GlobalConsensus(Kusama), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(1, GlobalConsensus(Kusama)); + assert_eq!(relay_to_remote_relay.chain_location(), expected); + + // Relay-chain context pointing to different consensus parachain, + let relay_to_remote_para = MultiLocation::new( + 1, + (GlobalConsensus(Kusama), Parachain(42), PalletInstance(42), GeneralIndex(42)), + ); + let expected = MultiLocation::new(1, (GlobalConsensus(Kusama), Parachain(42))); + assert_eq!(relay_to_remote_para.chain_location(), expected); + + // Parachain context pointing to relay chain, + let para_to_relay = MultiLocation::new(1, (PalletInstance(42), GeneralIndex(42))); + assert_eq!(para_to_relay.chain_location(), MultiLocation::parent()); + + // Parachain context pointing to sibling parachain, + let para_to_sibling = + MultiLocation::new(1, (Parachain(42), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(1, Parachain(42)); + assert_eq!(para_to_sibling.chain_location(), expected); + + // Parachain context pointing to different consensus relay, + let para_to_remote_relay = + MultiLocation::new(2, (GlobalConsensus(Kusama), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(2, GlobalConsensus(Kusama)); + assert_eq!(para_to_remote_relay.chain_location(), expected); + + // Parachain context pointing to different consensus parachain, + let para_to_remote_para = MultiLocation::new( + 2, + (GlobalConsensus(Kusama), Parachain(42), PalletInstance(42), GeneralIndex(42)), + ); + let expected = MultiLocation::new(2, (GlobalConsensus(Kusama), Parachain(42))); + assert_eq!(para_to_remote_para.chain_location(), expected); + } + #[test] fn conversion_from_other_types_works() { use crate::v2; diff --git a/polkadot/xcm/src/v3/traits.rs b/polkadot/xcm/src/v3/traits.rs index 1043d17b7106647e86f9290ff008e00f2ead4555..29bd40a6a2d8d1accf8c09e0bf7efded446a8ae1 100644 --- a/polkadot/xcm/src/v3/traits.rs +++ b/polkadot/xcm/src/v3/traits.rs @@ -30,6 +30,7 @@ use super::*; /// they will retain the same index over time. #[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] #[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub enum Error { // Errors that happen due to instructions being executed. These alone are defined in the // XCM specification. @@ -275,9 +276,9 @@ pub enum Outcome { } impl Outcome { - pub fn ensure_complete(self) -> Result { + pub fn ensure_complete(self) -> result::Result { match self { - Outcome::Complete(_) => Ok(()), + Outcome::Complete(weight) => Ok(weight), Outcome::Incomplete(_, e) => Err(e), Outcome::Error(e) => Err(e), } diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 7d6c40eb8417e55c3bf04b8cb8c85520312386ee..ff528d7d07522dc4a64120029609277c87bd1364 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true version = "1.0.0" +[lints] +workspace = true + [dependencies] impl-trait-for-tuples = "0.2.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -37,7 +40,7 @@ assert_matches = "1.5.0" polkadot-test-runtime = { path = "../../runtime/test-runtime" } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index 3b13cab2c1ea84ba2422809daa4a0a6d807eb50e..c2b62751c688f73a217b497c2f585c392ca72166 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -81,10 +81,15 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFro instructions[..end] .matcher() .match_next_inst(|inst| match inst { - ReceiveTeleportedAsset(..) | ReserveAssetDeposited(..) => Ok(()), - WithdrawAsset(ref assets) if assets.len() <= MAX_ASSETS_FOR_BUY_EXECUTION => Ok(()), - ClaimAsset { ref assets, .. } if assets.len() <= MAX_ASSETS_FOR_BUY_EXECUTION => - Ok(()), + ReceiveTeleportedAsset(ref assets) | + ReserveAssetDeposited(ref assets) | + WithdrawAsset(ref assets) | + ClaimAsset { ref assets, .. } => + if assets.len() <= MAX_ASSETS_FOR_BUY_EXECUTION { + Ok(()) + } else { + Err(ProcessMessageError::BadFormat) + }, _ => Err(ProcessMessageError::BadFormat), })? .skip_inst_while(|inst| matches!(inst, ClearOrigin))? diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs index 0ee638b73e1cebe4d977a6f5791ce35af68c442e..931d812eaaf192c49ed2bfed3fbd123fdd89e691 100644 --- a/polkadot/xcm/xcm-builder/src/controller.rs +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -21,7 +21,7 @@ use frame_support::pallet_prelude::DispatchError; use sp_std::boxed::Box; use xcm::prelude::*; -use xcm_executor::traits::QueryHandler; +pub use xcm_executor::traits::QueryHandler; /// Umbrella trait for all Controller traits. pub trait Controller: diff --git a/polkadot/xcm/xcm-builder/src/currency_adapter.rs b/polkadot/xcm/xcm-builder/src/currency_adapter.rs index 8ecf1dee72db00695a0a48ca7b2f489ec6f8bf4c..68ca0111174f5e35d636c95174655e1b50003b19 100644 --- a/polkadot/xcm/xcm-builder/src/currency_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/currency_adapter.rs @@ -16,6 +16,8 @@ //! Adapters to work with `frame_support::traits::Currency` through XCM. +#![allow(deprecated)] + use super::MintLocation; use frame_support::traits::{ExistenceRequirement::AllowDeath, Get, WithdrawReasons}; use sp_runtime::traits::CheckedSub; @@ -85,6 +87,7 @@ impl From for XcmError { /// CheckingAccount, /// >; /// ``` +#[deprecated = "Use `FungibleAdapter` instead"] pub struct CurrencyAdapter( PhantomData<(Currency, Matcher, AccountIdConverter, AccountId, CheckedAccount)>, ); @@ -113,7 +116,7 @@ impl< .map_err(|_| XcmError::NotWithdrawable) } fn accrue_checked(checked_account: AccountId, amount: Currency::Balance) { - Currency::deposit_creating(&checked_account, amount); + let _ = Currency::deposit_creating(&checked_account, amount); Currency::deactivate(amount); } fn reduce_checked(checked_account: AccountId, amount: Currency::Balance) { @@ -215,7 +218,7 @@ impl< let amount = Matcher::matches_fungible(what).ok_or(Error::AssetNotHandled)?; let who = AccountIdConverter::convert_location(who).ok_or(Error::AccountIdConversionFailed)?; - Currency::withdraw(&who, amount, WithdrawReasons::TRANSFER, AllowDeath) + let _ = Currency::withdraw(&who, amount, WithdrawReasons::TRANSFER, AllowDeath) .map_err(|e| XcmError::FailedToTransactAsset(e.into()))?; Ok(what.clone().into()) } diff --git a/polkadot/xcm/xcm-builder/src/fungible_adapter.rs b/polkadot/xcm/xcm-builder/src/fungible_adapter.rs new file mode 100644 index 0000000000000000000000000000000000000000..90608faa44778c37e2792e196b5a77b993634a3a --- /dev/null +++ b/polkadot/xcm/xcm-builder/src/fungible_adapter.rs @@ -0,0 +1,317 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Adapters to work with [`frame_support::traits::fungible`] through XCM. + +use super::MintLocation; +use frame_support::traits::{ + tokens::{ + fungible, Fortitude::Polite, Precision::Exact, Preservation::Preserve, Provenance::Minted, + }, + Get, +}; +use sp_std::{marker::PhantomData, prelude::*, result}; +use xcm::latest::prelude::*; +use xcm_executor::traits::{ConvertLocation, Error as MatchError, MatchesFungible, TransactAsset}; + +/// [`TransactAsset`] implementation that allows the use of a [`fungible`] implementation for +/// handling an asset in the XCM executor. +/// Only works for transfers. +pub struct FungibleTransferAdapter( + PhantomData<(Fungible, Matcher, AccountIdConverter, AccountId)>, +); +impl< + Fungible: fungible::Mutate, + Matcher: MatchesFungible, + AccountIdConverter: ConvertLocation, + AccountId: Eq + Clone, + > TransactAsset for FungibleTransferAdapter +{ + fn internal_transfer_asset( + what: &MultiAsset, + from: &MultiLocation, + to: &MultiLocation, + _context: &XcmContext, + ) -> result::Result { + log::trace!( + target: "xcm::fungible_adapter", + "internal_transfer_asset what: {:?}, from: {:?}, to: {:?}", + what, from, to + ); + // Check we handle the asset + let amount = Matcher::matches_fungible(what).ok_or(MatchError::AssetNotHandled)?; + let source = AccountIdConverter::convert_location(from) + .ok_or(MatchError::AccountIdConversionFailed)?; + let dest = AccountIdConverter::convert_location(to) + .ok_or(MatchError::AccountIdConversionFailed)?; + Fungible::transfer(&source, &dest, amount, Preserve) + .map_err(|error| XcmError::FailedToTransactAsset(error.into()))?; + Ok(what.clone().into()) + } +} + +/// [`TransactAsset`] implementation that allows the use of a [`fungible`] implementation for +/// handling an asset in the XCM executor. +/// Works for everything but transfers. +pub struct FungibleMutateAdapter( + PhantomData<(Fungible, Matcher, AccountIdConverter, AccountId, CheckingAccount)>, +); + +impl< + Fungible: fungible::Mutate, + Matcher: MatchesFungible, + AccountIdConverter: ConvertLocation, + AccountId: Eq + Clone, + CheckingAccount: Get>, + > FungibleMutateAdapter +{ + fn can_accrue_checked(checking_account: AccountId, amount: Fungible::Balance) -> XcmResult { + Fungible::can_deposit(&checking_account, amount, Minted) + .into_result() + .map_err(|_| XcmError::NotDepositable) + } + + fn can_reduce_checked(checking_account: AccountId, amount: Fungible::Balance) -> XcmResult { + Fungible::can_withdraw(&checking_account, amount) + .into_result(false) + .map_err(|_| XcmError::NotWithdrawable) + .map(|_| ()) + } + + fn accrue_checked(checking_account: AccountId, amount: Fungible::Balance) { + let ok = Fungible::mint_into(&checking_account, amount).is_ok(); + debug_assert!(ok, "`can_accrue_checked` must have returned `true` immediately prior; qed"); + } + + fn reduce_checked(checking_account: AccountId, amount: Fungible::Balance) { + let ok = Fungible::burn_from(&checking_account, amount, Exact, Polite).is_ok(); + debug_assert!(ok, "`can_reduce_checked` must have returned `true` immediately prior; qed"); + } +} + +impl< + Fungible: fungible::Mutate, + Matcher: MatchesFungible, + AccountIdConverter: ConvertLocation, + AccountId: Eq + Clone, + CheckingAccount: Get>, + > TransactAsset + for FungibleMutateAdapter +{ + fn can_check_in( + _origin: &MultiLocation, + what: &MultiAsset, + _context: &XcmContext, + ) -> XcmResult { + log::trace!( + target: "xcm::fungible_adapter", + "can_check_in origin: {:?}, what: {:?}", + _origin, what + ); + // Check we handle this asset + let amount = Matcher::matches_fungible(what).ok_or(MatchError::AssetNotHandled)?; + match CheckingAccount::get() { + Some((checking_account, MintLocation::Local)) => + Self::can_reduce_checked(checking_account, amount), + Some((checking_account, MintLocation::NonLocal)) => + Self::can_accrue_checked(checking_account, amount), + None => Ok(()), + } + } + + fn check_in(_origin: &MultiLocation, what: &MultiAsset, _context: &XcmContext) { + log::trace!( + target: "xcm::fungible_adapter", + "check_in origin: {:?}, what: {:?}", + _origin, what + ); + if let Some(amount) = Matcher::matches_fungible(what) { + match CheckingAccount::get() { + Some((checking_account, MintLocation::Local)) => + Self::reduce_checked(checking_account, amount), + Some((checking_account, MintLocation::NonLocal)) => + Self::accrue_checked(checking_account, amount), + None => (), + } + } + } + + fn can_check_out(_dest: &MultiLocation, what: &MultiAsset, _context: &XcmContext) -> XcmResult { + log::trace!( + target: "xcm::fungible_adapter", + "check_out dest: {:?}, what: {:?}", + _dest, + what + ); + let amount = Matcher::matches_fungible(what).ok_or(MatchError::AssetNotHandled)?; + match CheckingAccount::get() { + Some((checking_account, MintLocation::Local)) => + Self::can_accrue_checked(checking_account, amount), + Some((checking_account, MintLocation::NonLocal)) => + Self::can_reduce_checked(checking_account, amount), + None => Ok(()), + } + } + + fn check_out(_dest: &MultiLocation, what: &MultiAsset, _context: &XcmContext) { + log::trace!( + target: "xcm::fungible_adapter", + "check_out dest: {:?}, what: {:?}", + _dest, + what + ); + if let Some(amount) = Matcher::matches_fungible(what) { + match CheckingAccount::get() { + Some((checking_account, MintLocation::Local)) => + Self::accrue_checked(checking_account, amount), + Some((checking_account, MintLocation::NonLocal)) => + Self::reduce_checked(checking_account, amount), + None => (), + } + } + } + + fn deposit_asset( + what: &MultiAsset, + who: &MultiLocation, + _context: Option<&XcmContext>, + ) -> XcmResult { + log::trace!( + target: "xcm::fungible_adapter", + "deposit_asset what: {:?}, who: {:?}", + what, who, + ); + let amount = Matcher::matches_fungible(what).ok_or(MatchError::AssetNotHandled)?; + let who = AccountIdConverter::convert_location(who) + .ok_or(MatchError::AccountIdConversionFailed)?; + Fungible::mint_into(&who, amount) + .map_err(|error| XcmError::FailedToTransactAsset(error.into()))?; + Ok(()) + } + + fn withdraw_asset( + what: &MultiAsset, + who: &MultiLocation, + _context: Option<&XcmContext>, + ) -> result::Result { + log::trace!( + target: "xcm::fungible_adapter", + "deposit_asset what: {:?}, who: {:?}", + what, who, + ); + let amount = Matcher::matches_fungible(what).ok_or(MatchError::AssetNotHandled)?; + let who = AccountIdConverter::convert_location(who) + .ok_or(MatchError::AccountIdConversionFailed)?; + Fungible::burn_from(&who, amount, Exact, Polite) + .map_err(|error| XcmError::FailedToTransactAsset(error.into()))?; + Ok(what.clone().into()) + } +} + +/// [`TransactAsset`] implementation that allows the use of a [`fungible`] implementation for +/// handling an asset in the XCM executor. +/// Works for everything, transfers and teleport bookkeeping. +pub struct FungibleAdapter( + PhantomData<(Fungible, Matcher, AccountIdConverter, AccountId, CheckingAccount)>, +); +impl< + Fungible: fungible::Mutate, + Matcher: MatchesFungible, + AccountIdConverter: ConvertLocation, + AccountId: Eq + Clone, + CheckingAccount: Get>, + > TransactAsset + for FungibleAdapter +{ + fn can_check_in(origin: &MultiLocation, what: &MultiAsset, context: &XcmContext) -> XcmResult { + FungibleMutateAdapter::< + Fungible, + Matcher, + AccountIdConverter, + AccountId, + CheckingAccount, + >::can_check_in(origin, what, context) + } + + fn check_in(origin: &MultiLocation, what: &MultiAsset, context: &XcmContext) { + FungibleMutateAdapter::< + Fungible, + Matcher, + AccountIdConverter, + AccountId, + CheckingAccount, + >::check_in(origin, what, context) + } + + fn can_check_out(dest: &MultiLocation, what: &MultiAsset, context: &XcmContext) -> XcmResult { + FungibleMutateAdapter::< + Fungible, + Matcher, + AccountIdConverter, + AccountId, + CheckingAccount, + >::can_check_out(dest, what, context) + } + + fn check_out(dest: &MultiLocation, what: &MultiAsset, context: &XcmContext) { + FungibleMutateAdapter::< + Fungible, + Matcher, + AccountIdConverter, + AccountId, + CheckingAccount, + >::check_out(dest, what, context) + } + + fn deposit_asset( + what: &MultiAsset, + who: &MultiLocation, + context: Option<&XcmContext>, + ) -> XcmResult { + FungibleMutateAdapter::< + Fungible, + Matcher, + AccountIdConverter, + AccountId, + CheckingAccount, + >::deposit_asset(what, who, context) + } + + fn withdraw_asset( + what: &MultiAsset, + who: &MultiLocation, + maybe_context: Option<&XcmContext>, + ) -> result::Result { + FungibleMutateAdapter::< + Fungible, + Matcher, + AccountIdConverter, + AccountId, + CheckingAccount, + >::withdraw_asset(what, who, maybe_context) + } + + fn internal_transfer_asset( + what: &MultiAsset, + from: &MultiLocation, + to: &MultiLocation, + context: &XcmContext, + ) -> result::Result { + FungibleTransferAdapter::::internal_transfer_asset( + what, from, to, context + ) + } +} diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 35f95b85c89ca94674b8acd34b42849af861ace8..e7431ae0254533aa2acd8ae7e274cedb549c0564 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -65,6 +65,7 @@ mod process_xcm_message; pub use process_xcm_message::ProcessXcmMessage; mod currency_adapter; +#[allow(deprecated)] pub use currency_adapter::CurrencyAdapter; mod fee_handling; @@ -72,6 +73,9 @@ pub use fee_handling::{ deposit_or_burn_fee, HandleFee, XcmFeeManagerFromComponents, XcmFeeToAccount, }; +mod fungible_adapter; +pub use fungible_adapter::{FungibleAdapter, FungibleMutateAdapter, FungibleTransferAdapter}; + mod fungibles_adapter; pub use fungibles_adapter::{ AssetChecking, DualMint, FungiblesAdapter, FungiblesMutateAdapter, FungiblesTransferAdapter, @@ -119,5 +123,5 @@ pub use pay::{FixedLocation, LocatableAssetId, PayAccountId32OnChainOverXcm, Pay mod controller; pub use controller::{ Controller, ExecuteController, ExecuteControllerWeightInfo, QueryController, - QueryControllerWeightInfo, SendController, SendControllerWeightInfo, + QueryControllerWeightInfo, QueryHandler, SendController, SendControllerWeightInfo, }; diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs index 406843a0fe8ae69fe9b9754890c40e9138e45701..b1361cc85777e9e39fc337cc4b0fa0f619870654 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs @@ -23,11 +23,16 @@ use super::*; parameter_types! { pub UniversalLocation: Junctions = X2(GlobalConsensus(Local::get()), Parachain(1)); pub RemoteUniversalLocation: Junctions = X2(GlobalConsensus(Remote::get()), Parachain(1)); + pub RemoteNetwork: MultiLocation = AncestorThen(2, GlobalConsensus(Remote::get())).into(); } type TheBridge = TestBridge>; -type Router = - TestTopic, UniversalLocation>>; +type Router = TestTopic< + UnpaidLocalExporter< + HaulBlobExporter, + UniversalLocation, + >, +>; /// ```nocompile /// local | remote @@ -44,7 +49,7 @@ fn sending_to_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Parent, Remote::get(), Parachain(1)).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); assert_eq!( take_received_remote_messages(), @@ -78,7 +83,7 @@ fn sending_to_parachain_of_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Parent, Remote::get(), Parachain(1000)).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); let expected = vec![( (Parent, Parachain(1000)).into(), @@ -110,7 +115,7 @@ fn sending_to_relay_chain_of_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Parent, Remote::get()).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); let expected = vec![( Parent.into(), diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs index 02c454bb2129184e14da7b1892de07b6aa6b7d73..5371abccf666b9d330343a6fd973e5c5fecc3186 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs @@ -23,11 +23,16 @@ use super::*; parameter_types! { pub UniversalLocation: Junctions = X1(GlobalConsensus(Local::get())); pub RemoteUniversalLocation: Junctions = X1(GlobalConsensus(Remote::get())); + pub RemoteNetwork: MultiLocation = AncestorThen(1, GlobalConsensus(Remote::get())).into(); } type TheBridge = TestBridge>; -type Router = - TestTopic, UniversalLocation>>; +type Router = TestTopic< + UnpaidLocalExporter< + HaulBlobExporter, + UniversalLocation, + >, +>; /// ```nocompile /// local | remote @@ -41,7 +46,7 @@ fn sending_to_bridged_chain_works() { let msg = Xcm(vec![Trap(1)]); assert_eq!( send_xcm::((Parent, Remote::get()).into(), msg).unwrap().1, - (Here, 100).into() + Price::get() ); assert_eq!(TheBridge::service(), 1); let expected = vec![( @@ -68,7 +73,7 @@ fn sending_to_parachain_of_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Remote::get(), Parachain(1000)).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); let expected = vec![( Parachain(1000).into(), diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs index 45630dbfc2484c23c14a1cbc283c5403a9f56d62..0c749b66da61e2250c7c4bc23a5ee1abc38f9dd2 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs @@ -20,6 +20,7 @@ use super::mock::*; use crate::{universal_exports::*, WithTopicSource}; use frame_support::{parameter_types, traits::Get}; use std::{cell::RefCell, marker::PhantomData}; +use xcm::AlwaysLatest; use xcm_executor::{ traits::{export_xcm, validate_export}, XcmExecutor, diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs index 45dc2d4a3b9a44c075874e945b5f25453e344912..079eb0175d71f8fabcf9c94d3e5cd3098c3fc6f2 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs @@ -24,23 +24,25 @@ use super::*; parameter_types! { + // 100 to use the bridge (export) and 80 for the remote execution weight (4 instructions x (10 + + // 10) weight each). + pub SendOverBridgePrice: u128 = 180u128 + if UsingTopic::get() { 20 } else { 0 }; pub UniversalLocation: Junctions = X2(GlobalConsensus(Local::get()), Parachain(100)); pub RelayUniversalLocation: Junctions = X1(GlobalConsensus(Local::get())); pub RemoteUniversalLocation: Junctions = X1(GlobalConsensus(Remote::get())); + pub RemoteNetwork: MultiLocation = AncestorThen(1, GlobalConsensus(Remote::get())).into(); pub BridgeTable: Vec = vec![ NetworkExportTableItem::new( Remote::get(), None, MultiLocation::parent(), - Some((Parent, 200u128 + if UsingTopic::get() { 20 } else { 0 }).into()) + Some((Parent, SendOverBridgePrice::get()).into()) ) ]; - // ^^^ 100 to use the bridge (export) and 100 for the remote execution weight (5 instructions - // x (10 + 10) weight each). } type TheBridge = TestBridge>; -type RelayExporter = HaulBlobExporter; +type RelayExporter = HaulBlobExporter; type LocalInnerRouter = ExecutingRouter; type LocalBridgeRouter = SovereignPaidRemoteExporter< NetworkExportTable, @@ -68,7 +70,7 @@ fn sending_to_bridged_chain_works() { clear_assets(Parachain(100)); add_asset(Parachain(100), (Here, 1000u128)); - let price = 200u128 + if UsingTopic::get() { 20 } else { 0 }; + let price = SendOverBridgePrice::get(); let msg = Xcm(vec![Trap(1)]); assert_eq!(send_xcm::(dest, msg).unwrap().1, (Parent, price).into()); @@ -86,7 +88,7 @@ fn sending_to_bridged_chain_works() { )]; assert_eq!(take_received_remote_messages(), expected); - // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of + // The export cost 40 ref time and 40 proof size weight units (and thus 80 units of // balance). assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]); @@ -104,11 +106,10 @@ fn sending_to_bridged_chain_works() { destination: Here, xcm: xcm_with_topic([0; 32], vec![Trap(1)]), }, - RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: Parachain(100).into() }, ], ), - outcome: Outcome::Complete(test_weight(5)), + outcome: Outcome::Complete(test_weight(4)), paid: true, }; assert_eq!(RoutingLog::take(), vec![entry]); @@ -143,7 +144,7 @@ fn sending_to_parachain_of_bridged_chain_works() { clear_assets(Parachain(100)); add_asset(Parachain(100), (Here, 1000u128)); - let price = 200u128 + if UsingTopic::get() { 20 } else { 0 }; + let price = SendOverBridgePrice::get(); let msg = Xcm(vec![Trap(1)]); assert_eq!(send_xcm::(dest, msg).unwrap().1, (Parent, price).into()); @@ -161,7 +162,7 @@ fn sending_to_parachain_of_bridged_chain_works() { )]; assert_eq!(take_received_remote_messages(), expected); - // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of + // The export cost 40 ref time and 40 proof size weight units (and thus 80 units of // balance). assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]); @@ -179,11 +180,10 @@ fn sending_to_parachain_of_bridged_chain_works() { destination: Parachain(100).into(), xcm: xcm_with_topic([0; 32], vec![Trap(1)]), }, - RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: Parachain(100).into() }, ], ), - outcome: Outcome::Complete(test_weight(5)), + outcome: Outcome::Complete(test_weight(4)), paid: true, }; assert_eq!(RoutingLog::take(), vec![entry]); diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para.rs index f11143ab9f6fc9095bc6bdcc220e5742e86a9bcc..fb6c5da3eb010df30a34c86a1c1d279f5fea8be3 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para.rs @@ -24,6 +24,7 @@ parameter_types! { pub UniversalLocation: Junctions = X2(GlobalConsensus(Local::get()), Parachain(1000)); pub ParaBridgeUniversalLocation: Junctions = X2(GlobalConsensus(Local::get()), Parachain(1)); pub RemoteParaBridgeUniversalLocation: Junctions = X2(GlobalConsensus(Remote::get()), Parachain(1)); + pub RemoteNetwork: MultiLocation = AncestorThen(2, GlobalConsensus(Remote::get())).into(); pub BridgeTable: Vec = vec![ NetworkExportTableItem::new( Remote::get(), @@ -36,7 +37,7 @@ parameter_types! { type TheBridge = TestBridge< BridgeBlobDispatcher, >; -type RelayExporter = HaulBlobExporter; +type RelayExporter = HaulBlobExporter; type LocalInnerRouter = UnpaidExecutingRouter; type LocalBridgingRouter = diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para_via_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para_via_relay.rs index 7218e0a04880fdb2a49ec470385e551f1ab884ac..0b6dc01e2bf1362b3729b8bbf800025221b56ab7 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para_via_relay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/remote_para_para_via_relay.rs @@ -24,6 +24,7 @@ parameter_types! { pub UniversalLocation: Junctions = X1(GlobalConsensus(Local::get())); pub ParaBridgeUniversalLocation: Junctions = X2(GlobalConsensus(Local::get()), Parachain(1)); pub RemoteParaBridgeUniversalLocation: Junctions = X2(GlobalConsensus(Remote::get()), Parachain(1)); + pub RemoteNetwork: MultiLocation = AncestorThen(2, GlobalConsensus(Remote::get())).into(); pub BridgeTable: Vec = vec![ NetworkExportTableItem::new( Remote::get(), @@ -36,7 +37,7 @@ parameter_types! { type TheBridge = TestBridge< BridgeBlobDispatcher, >; -type RelayExporter = HaulBlobExporter; +type RelayExporter = HaulBlobExporter; type LocalInnerRouter = UnpaidExecutingRouter; type LocalBridgingRouter = diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/remote_relay_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/remote_relay_relay.rs index 45b5efbc44c549b9a68fbcfc968a6589854d74df..e33c7b15b0af8383742d52e4c44af392ea3ba01f 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/remote_relay_relay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/remote_relay_relay.rs @@ -24,6 +24,7 @@ parameter_types! { pub UniversalLocation: Junctions = X2(GlobalConsensus(Local::get()), Parachain(1000)); pub RelayUniversalLocation: Junctions = X1(GlobalConsensus(Local::get())); pub RemoteUniversalLocation: Junctions = X1(GlobalConsensus(Remote::get())); + pub RemoteNetwork: MultiLocation = AncestorThen(1, GlobalConsensus(Remote::get())).into(); pub BridgeTable: Vec = vec![ NetworkExportTableItem::new( Remote::get(), @@ -35,7 +36,7 @@ parameter_types! { } type TheBridge = TestBridge>; -type RelayExporter = HaulBlobExporter; +type RelayExporter = HaulBlobExporter; type LocalInnerRouter = UnpaidExecutingRouter; type LocalBridgeRouter = diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index 189274eb5f5b8034f5bf0aa5ccfc26700fb3aa95..843c39bbfeb5100a588bbaef488bfee66e0071e0 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -27,20 +27,16 @@ pub use crate::{ }; use frame_support::traits::{ContainsPair, Everything}; pub use frame_support::{ - dispatch::{ - DispatchInfo, DispatchResultWithPostInfo, GetDispatchInfo, Parameter, PostDispatchInfo, - }, + dispatch::{DispatchInfo, DispatchResultWithPostInfo, GetDispatchInfo, PostDispatchInfo}, ensure, match_types, parameter_types, sp_runtime::{traits::Dispatchable, DispatchError, DispatchErrorWithPostInfo}, - traits::{ConstU32, Contains, Get, IsInVec}, + traits::{Contains, Get, IsInVec}, }; pub use parity_scale_codec::{Decode, Encode}; -pub use sp_io::hashing::blake2_256; pub use sp_std::{ cell::{Cell, RefCell}, collections::{btree_map::BTreeMap, btree_set::BTreeSet}, fmt::Debug, - marker::PhantomData, }; pub use xcm::latest::{prelude::*, Weight}; use xcm_executor::traits::{Properties, QueryHandler, QueryResponseStatus}; diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index e51bd952177ba442d97ee82c2b9b44e37217da25..78b9284c689fec49c1f35d193189badfb61fac26 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -246,11 +246,6 @@ type SovereignAccountOf = ( HashedDescription>, ); -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1000).into()); -} - impl pallet_xcm::Config for Test { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -274,8 +269,6 @@ impl pallet_xcm::Config for Test { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index dbe9571d461ad66f604af593ec7bc66fc9bf3cb2..4aa6a0ef7a50fc9dadecd6e46bacf4957ea248e3 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -305,7 +305,6 @@ impl( - PhantomData<(Bridge, BridgedNetwork, Price)>, +pub struct HaulBlobExporter( + PhantomData<(Bridge, BridgedNetwork, DestinationVersion, Price)>, ); -impl, Price: Get> ExportXcm - for HaulBlobExporter +/// `ExportXcm` implementation for `HaulBlobExporter`. +/// +/// # Type Parameters +/// +/// ```text +/// - Bridge: Implements `HaulBlob`. +/// - BridgedNetwork: The relative location of the bridged consensus system with the expected `GlobalConsensus` junction. +/// - DestinationVersion: Implements `GetVersion` for retrieving XCM version for the destination. +/// - Price: potential fees for exporting. +/// ``` +impl< + Bridge: HaulBlob, + BridgedNetwork: Get, + DestinationVersion: GetVersion, + Price: Get, + > ExportXcm for HaulBlobExporter { type Ticket = (Vec, XcmHash); @@ -438,17 +451,35 @@ impl, Price: Get> destination: &mut Option, message: &mut Option>, ) -> Result<((Vec, XcmHash), MultiAssets), SendError> { - let bridged_network = BridgedNetwork::get(); + let (bridged_network, bridged_network_location_parents) = { + let MultiLocation { parents, interior: mut junctions } = BridgedNetwork::get(); + match junctions.take_first() { + Some(GlobalConsensus(network)) => (network, parents), + _ => return Err(SendError::NotApplicable), + } + }; ensure!(&network == &bridged_network, SendError::NotApplicable); // We don't/can't use the `channel` for this adapter. let dest = destination.take().ok_or(SendError::MissingArgument)?; - let universal_dest = match dest.pushed_front_with(GlobalConsensus(bridged_network)) { - Ok(d) => d.into(), - Err((dest, _)) => { - *destination = Some(dest); - return Err(SendError::NotApplicable) - }, - }; + + // Let's resolve the known/supported XCM version for the destination because we don't know + // if it supports the same/latest version. + let (universal_dest, version) = + match dest.pushed_front_with(GlobalConsensus(bridged_network)) { + Ok(d) => { + let version = DestinationVersion::get_version_for(&MultiLocation::from( + AncestorThen(bridged_network_location_parents, d), + )) + .ok_or(SendError::DestinationUnsupported)?; + (d, version) + }, + Err((dest, _)) => { + *destination = Some(dest); + return Err(SendError::NotApplicable) + }, + }; + + // Let's adjust XCM with `UniversalOrigin`, `DescendOrigin` and`SetTopic`. let (local_net, local_sub) = universal_source .take() .ok_or(SendError::MissingArgument)? @@ -463,7 +494,17 @@ impl, Price: Get> if local_sub != Here { message.0.insert(1, DescendOrigin(local_sub)); } - let message = VersionedXcm::from(message); + + // We cannot use the latest `Versioned` because we don't know if the target chain already + // supports the same version. Therefore, we better control the destination version with best + // efforts. + let message = VersionedXcm::from(message) + .into_version(version) + .map_err(|()| SendError::DestinationUnsupported)?; + let universal_dest = VersionedInteriorMultiLocation::from(universal_dest) + .into_version(version) + .map_err(|()| SendError::DestinationUnsupported)?; + let id = maybe_id.unwrap_or_else(|| message.using_encoded(sp_io::hashing::blake2_256)); let blob = BridgeMessage { universal_dest, message }.encode(); Ok(((blob, id), Price::get())) diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 5fcba5e2f54d44b33dc5343b82752c2a001b7807..6b4d893f73c7bf3688b19de4d8f5cb364b496316 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{ConstU32, Everything, Nothing}, weights::Weight, }; @@ -32,12 +32,14 @@ use xcm_executor::XcmExecutor; use staging_xcm_builder as xcm_builder; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter as XcmCurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - CurrencyAdapter as XcmCurrencyAdapter, FixedRateOfFungible, FixedWeightBounds, - IsChildSystemParachain, IsConcrete, MintLocation, RespectSuspension, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + FixedRateOfFungible, FixedWeightBounds, IsChildSystemParachain, IsConcrete, MintLocation, + RespectSuspension, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, + TakeWeightCredit, }; pub type AccountId = AccountId32; @@ -76,6 +78,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; @@ -142,6 +145,7 @@ parameter_types! { pub type SovereignAccountOf = (ChildParachainConvertsVia, AccountId32Aliases); +#[allow(deprecated)] pub type LocalCurrencyAdapter = XcmCurrencyAdapter< Balances, IsConcrete, @@ -210,11 +214,6 @@ impl xcm_executor::Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Here.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type UniversalLocation = UniversalLocation; @@ -239,8 +238,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index 9f0caa80617c38e57b6103d696771e25b5bd883d..32fa6669c0abc6b96e2cb50d201f87b749240306 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -6,10 +6,14 @@ edition.workspace = true license.workspace = true version = "1.0.0" +[lints] +workspace = true + [dependencies] impl-trait-for-tuples = "0.2.2" environmental = { version = "1.1.4", default-features = false } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } xcm = { package = "staging-xcm", path = "..", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } @@ -22,7 +26,7 @@ log = { version = "0.4.17", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -34,6 +38,7 @@ std = [ "frame-support/std", "log/std", "parity-scale-codec/std", + "scale-info/std", "sp-arithmetic/std", "sp-core/std", "sp-io/std", diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index d869fc6f2dcd2a72d0e20d80a890ebf89a9114cf..cafe12dc587f883a31ac3539aced38e8de29a89e 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -7,11 +7,15 @@ license.workspace = true version = "1.0.0" publish = false +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system" } futures = "0.3.21" +pallet-transaction-payment = { path = "../../../../substrate/frame/transaction-payment" } pallet-xcm = { path = "../../pallet-xcm" } polkadot-test-client = { path = "../../../node/test/client" } polkadot-test-runtime = { path = "../../../runtime/test-runtime" } @@ -25,5 +29,5 @@ xcm-executor = { package = "staging-xcm-executor", path = ".." } sp-tracing = { path = "../../../../substrate/primitives/tracing" } [features] -default = [ "std" ] -std = [ "frame-support/std", "sp-runtime/std", "xcm/std" ] +default = ["std"] +std = ["frame-support/std", "sp-runtime/std", "xcm/std"] diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index d8c77f8317e1ff961e3a221b01b7ea45db4be80a..c02cb218885f9e2896b3c7ccf24156138c9e0177 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -76,33 +76,59 @@ fn transact_recursion_limit_works() { sp_tracing::try_init_simple(); let mut client = TestClientBuilder::new().build(); - let mut msg = Xcm(vec![ClearOrigin]); - let max_weight = ::Weigher::weight(&mut msg).unwrap(); - let mut call = polkadot_test_runtime::RuntimeCall::Xcm(pallet_xcm::Call::execute { - message: Box::new(VersionedXcm::from(msg)), - max_weight, - }); - - for _ in 0..11 { - let mut msg = Xcm(vec![ - WithdrawAsset((Parent, 1_000).into()), - BuyExecution { fees: (Parent, 1).into(), weight_limit: Unlimited }, + let base_xcm = |call: polkadot_test_runtime::RuntimeCall| { + Xcm(vec![ + WithdrawAsset((Here, 1_000).into()), + BuyExecution { fees: (Here, 1).into(), weight_limit: Unlimited }, Transact { origin_kind: OriginKind::Native, require_weight_at_most: call.get_dispatch_info().weight, call: call.encode().into(), }, - ]); + ]) + }; + let mut call: Option = None; + // set up transacts with recursive depth of 11 + for depth in (1..12).rev() { + let mut msg; + match depth { + // this one should fail with `XcmError::ExceedsStackLimit` + 11 => { + msg = Xcm(vec![ClearOrigin]); + }, + // this one checks that the inner one (depth 11) fails as expected, + // itself should not fail => should have outcome == Complete + 10 => { + let inner_call = call.take().unwrap(); + let expected_transact_status = + sp_runtime::DispatchError::Module(sp_runtime::ModuleError { + index: 27, + error: [24, 0, 0, 0], + message: Some("LocalExecutionIncomplete"), + }) + .encode() + .into(); + msg = base_xcm(inner_call); + msg.inner_mut().push(ExpectTransactStatus(expected_transact_status)); + }, + // these are the outer 9 calls that expect `ExpectTransactStatus(Success)` + d if d >= 1 && d <= 9 => { + let inner_call = call.take().unwrap(); + msg = base_xcm(inner_call); + msg.inner_mut().push(ExpectTransactStatus(MaybeErrorCode::Success)); + }, + _ => unreachable!(), + } let max_weight = ::Weigher::weight(&mut msg).unwrap(); - call = polkadot_test_runtime::RuntimeCall::Xcm(pallet_xcm::Call::execute { - message: Box::new(VersionedXcm::from(msg)), + call = Some(polkadot_test_runtime::RuntimeCall::Xcm(pallet_xcm::Call::execute { + message: Box::new(VersionedXcm::from(msg.clone())), max_weight, - }); + })); } let mut block_builder = client.init_polkadot_block_builder(); - let execute = construct_extrinsic(&client, call, sp_keyring::Sr25519Keyring::Alice, 0); + let execute = construct_extrinsic(&client, call.unwrap(), sp_keyring::Sr25519Keyring::Alice, 0); block_builder.push_polkadot_extrinsic(execute).expect("pushes extrinsic"); @@ -113,11 +139,29 @@ fn transact_recursion_limit_works() { .expect("imports the block"); client.state_at(block_hash).expect("state should exist").inspect_state(|| { - assert!(polkadot_test_runtime::System::events().iter().any(|r| matches!( - r.event, - polkadot_test_runtime::RuntimeEvent::Xcm(pallet_xcm::Event::Attempted { - outcome: Outcome::Incomplete(_, XcmError::ExceedsStackLimit) - }), + let events = polkadot_test_runtime::System::events(); + // verify 10 pallet_xcm calls were successful + assert_eq!( + polkadot_test_runtime::System::events() + .iter() + .filter(|r| matches!( + r.event, + polkadot_test_runtime::RuntimeEvent::Xcm(pallet_xcm::Event::Attempted { + outcome: Outcome::Complete(_) + }), + )) + .count(), + 10 + ); + // verify transaction fees have been paid + assert!(events.iter().any(|r| matches!( + &r.event, + polkadot_test_runtime::RuntimeEvent::TransactionPayment( + pallet_transaction_payment::Event::TransactionFeePaid { + who: payer, + .. + } + ) if *payer == sp_keyring::Sr25519Keyring::Alice.into(), ))); }); } diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index e43d7a048992fce7fb0b8f9a79289407a17d8dab..ac256ea14899c1a7305903b575a1178d6377b2b4 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -32,7 +32,7 @@ pub mod traits; use traits::{ validate_export, AssetExchange, AssetLock, CallDispatcher, ClaimAssets, ConvertOrigin, DropAssets, Enact, ExportXcm, FeeManager, FeeReason, OnResponse, Properties, ShouldExecute, - TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, + TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, XcmAssetTransfers, }; mod assets; @@ -254,6 +254,12 @@ impl ExecuteXcm for XcmExecutor XcmAssetTransfers for XcmExecutor { + type IsReserve = Config::IsReserve; + type IsTeleporter = Config::IsTeleporter; + type AssetTransactor = Config::AssetTransactor; +} + #[derive(Debug)] pub struct ExecutorError { pub index: u32, diff --git a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs new file mode 100644 index 0000000000000000000000000000000000000000..5fdc9b15e01541e0f77d126b4cbf4c69ba09a254 --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs @@ -0,0 +1,90 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::traits::TransactAsset; +use frame_support::traits::ContainsPair; +use scale_info::TypeInfo; +use sp_runtime::codec::{Decode, Encode}; +use xcm::prelude::*; + +/// Errors related to determining asset transfer support. +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum Error { + /// Invalid non-concrete asset. + NotConcrete, + /// Reserve chain could not be determined for assets. + UnknownReserve, +} + +/// Specify which type of asset transfer is required for a particular `(asset, dest)` combination. +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum TransferType { + /// should teleport `asset` to `dest` + Teleport, + /// should reserve-transfer `asset` to `dest`, using local chain as reserve + LocalReserve, + /// should reserve-transfer `asset` to `dest`, using `dest` as reserve + DestinationReserve, + /// should reserve-transfer `asset` to `dest`, using remote chain `MultiLocation` as reserve + RemoteReserve(MultiLocation), +} + +/// A trait for identifying asset transfer type based on `IsTeleporter` and `IsReserve` +/// configurations. +pub trait XcmAssetTransfers { + /// Combinations of (Asset, Location) pairs which we trust as reserves. Meaning + /// reserve-based-transfers are to be used for assets matching this filter. + type IsReserve: ContainsPair; + + /// Combinations of (Asset, Location) pairs which we trust as teleporters. Meaning teleports are + /// to be used for assets matching this filter. + type IsTeleporter: ContainsPair; + + /// How to withdraw and deposit an asset. + type AssetTransactor: TransactAsset; + + /// Determine transfer type to be used for transferring `asset` from local chain to `dest`. + fn determine_for(asset: &MultiAsset, dest: &MultiLocation) -> Result { + if Self::IsTeleporter::contains(asset, dest) { + // we trust destination for teleporting asset + return Ok(TransferType::Teleport) + } else if Self::IsReserve::contains(asset, dest) { + // we trust destination as asset reserve location + return Ok(TransferType::DestinationReserve) + } + + // try to determine reserve location based on asset id/location + let asset_location = match asset.id { + Concrete(location) => Ok(location.chain_location()), + _ => Err(Error::NotConcrete), + }?; + if asset_location == MultiLocation::here() || + Self::IsTeleporter::contains(asset, &asset_location) + { + // if the asset is local, then it's a local reserve + // it's also a local reserve if the asset's location is not `here` but it's a location + // where it can be teleported to `here` => local reserve + Ok(TransferType::LocalReserve) + } else if Self::IsReserve::contains(asset, &asset_location) { + // remote location that is recognized as reserve location for asset + Ok(TransferType::RemoteReserve(asset_location)) + } else { + // remote location that is not configured either as teleporter or reserve => cannot + // determine asset reserve + Err(Error::UnknownReserve) + } + } +} diff --git a/polkadot/xcm/xcm-executor/src/traits/mod.rs b/polkadot/xcm/xcm-executor/src/traits/mod.rs index a9439968fa6ca5b2d1bfae6097d985ae5a887f19..71e75c77e9394129c65505cc1daddb825fd4c077 100644 --- a/polkadot/xcm/xcm-executor/src/traits/mod.rs +++ b/polkadot/xcm/xcm-executor/src/traits/mod.rs @@ -20,10 +20,12 @@ mod conversion; pub use conversion::{CallDispatcher, ConvertLocation, ConvertOrigin, WithOriginFilter}; mod drop_assets; pub use drop_assets::{ClaimAssets, DropAssets}; -mod asset_lock; -pub use asset_lock::{AssetLock, Enact, LockError}; mod asset_exchange; pub use asset_exchange::AssetExchange; +mod asset_lock; +pub use asset_lock::{AssetLock, Enact, LockError}; +mod asset_transfer; +pub use asset_transfer::{Error as AssetTransferError, TransferType, XcmAssetTransfers}; mod export; pub use export::{export_xcm, validate_export, ExportXcm}; mod fee_manager; diff --git a/polkadot/xcm/xcm-simulator/Cargo.toml b/polkadot/xcm/xcm-simulator/Cargo.toml index eedcfa0032af41d2cb69844c54048d8abb5137c9..051e9752f6e485333a6b3d0507f4360000c9a8de 100644 --- a/polkadot/xcm/xcm-simulator/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/Cargo.toml @@ -6,6 +6,9 @@ authors.workspace = true edition.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } paste = "1.0.7" diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index f0caa5ab48ec821debdcac62f8de6a860caa1d2c..522b7855837008a77f202fe0b2cbd4d65cb2d3f6 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -6,6 +6,9 @@ edition.workspace = true license.workspace = true version = "1.0.0" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } scale-info = { version = "2.10.0", features = ["derive"] } diff --git a/polkadot/xcm/xcm-simulator/example/src/lib.rs b/polkadot/xcm/xcm-simulator/example/src/lib.rs index 03e7c19a9148de935de51ea1ef057bc9d9da2701..85b8ad1c5cb7bceb03ac42320432900d19471e55 100644 --- a/polkadot/xcm/xcm-simulator/example/src/lib.rs +++ b/polkadot/xcm/xcm-simulator/example/src/lib.rs @@ -649,23 +649,4 @@ mod tests { ); }); } - - #[test] - fn builder_pattern_works() { - let asset: MultiAsset = (Here, 100u128).into(); - let beneficiary: MultiLocation = AccountId32 { id: [0u8; 32], network: None }.into(); - let message: Xcm<()> = Xcm::builder() - .withdraw_asset(asset.clone().into()) - .buy_execution(asset.clone(), Unlimited) - .deposit_asset(asset.clone().into(), beneficiary) - .build(); - assert_eq!( - message, - Xcm(vec![ - WithdrawAsset(asset.clone().into()), - BuyExecution { fees: asset.clone(), weight_limit: Unlimited }, - DepositAsset { assets: asset.into(), beneficiary }, - ]) - ); - } } diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain.rs b/polkadot/xcm/xcm-simulator/example/src/parachain.rs index fa9d3300619ad5a9021f95b52969d220df82ac13..69db81deff4fa568d8eb4a83814d07bbb6ee5aaf 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain.rs @@ -19,7 +19,7 @@ use codec::{Decode, Encode}; use core::marker::PhantomData; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{ContainsPair, EnsureOrigin, EnsureOriginWithArg, Everything, EverythingBut, Nothing}, weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; @@ -40,10 +40,9 @@ use polkadot_parachain_primitives::primitives::{ use xcm::{latest::prelude::*, VersionedXcm}; use xcm_builder::{ Account32Hash, AccountId32Aliases, AllowUnpaidExecutionFrom, ConvertedConcreteId, - CurrencyAdapter as XcmCurrencyAdapter, EnsureXcmOrigin, FixedRateOfFungible, FixedWeightBounds, - IsConcrete, NativeAsset, NoChecking, NonFungiblesAdapter, ParentIsPreset, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, + EnsureXcmOrigin, FixedRateOfFungible, FixedWeightBounds, FungibleAdapter, IsConcrete, + NativeAsset, NoChecking, NonFungiblesAdapter, ParentIsPreset, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, }; use xcm_executor::{ traits::{ConvertLocation, JustTry}, @@ -63,6 +62,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; @@ -164,7 +164,7 @@ impl EnsureOriginWithArg for ForeignCreators { #[cfg(feature = "runtime-benchmarks")] fn try_successful_origin(a: &MultiLocation) -> Result { - Ok(pallet_xcm::Origin::Xcm(a.clone()).into()) + Ok(pallet_xcm::Origin::Xcm(*a).into()) } } @@ -201,7 +201,7 @@ parameter_types! { } pub type LocalAssetTransactor = ( - XcmCurrencyAdapter, LocationToAccountId, AccountId, ()>, + FungibleAdapter, LocationToAccountId, AccountId, ()>, NonFungiblesAdapter< ForeignUniques, ConvertedConcreteId, @@ -399,11 +399,6 @@ impl mock_msg_queue::Config for Runtime { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - pub struct TrustedLockerCase(PhantomData); impl> ContainsPair for TrustedLockerCase @@ -443,8 +438,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs index 0fba4cb270d855fbdf88ffc475c23f8a806241e9..24fc56eb7174ba7d8a35ca170b7994c884cf38f6 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs @@ -17,7 +17,7 @@ //! Relay chain runtime mock. use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{AsEnsureOriginWithArg, Everything, Nothing, ProcessMessage, ProcessMessageError}, weights::{Weight, WeightMeter}, }; @@ -36,9 +36,9 @@ use xcm::latest::prelude::*; use xcm_builder::{ Account32Hash, AccountId32Aliases, AllowUnpaidExecutionFrom, AsPrefixedGeneralIndex, ChildParachainAsNative, ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - ConvertedConcreteId, CurrencyAdapter as XcmCurrencyAdapter, FixedRateOfFungible, - FixedWeightBounds, IsConcrete, NoChecking, NonFungiblesAdapter, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, + ConvertedConcreteId, FixedRateOfFungible, FixedWeightBounds, FungibleAdapter, IsConcrete, + NoChecking, NonFungiblesAdapter, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, }; use xcm_executor::{traits::JustTry, Config, XcmExecutor}; @@ -49,6 +49,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; @@ -140,7 +141,7 @@ pub type LocationToAccountId = ( ); pub type LocalAssetTransactor = ( - XcmCurrencyAdapter, LocationToAccountId, AccountId, ()>, + FungibleAdapter, LocationToAccountId, AccountId, ()>, NonFungiblesAdapter< Uniques, ConvertedConcreteId, JustTry>, @@ -199,11 +200,6 @@ impl Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1).into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -228,8 +224,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml index acf28bec4f19480ae8135cb4e7f1845fc8d76566..1d13c76f17103ed84346d15b7495665fbdbd46d7 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -7,6 +7,9 @@ edition.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } honggfuzz = "0.5.55" diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index f9ad0252285c226255ae4554a6695a330c05d3dc..2262d18e86044990c1553d938833e08b77e74047 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -18,7 +18,7 @@ use codec::{Decode, Encode}; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{Everything, Nothing}, weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; @@ -37,11 +37,12 @@ use polkadot_parachain_primitives::primitives::{ DmpMessageHandler, Id as ParaId, Sibling, XcmpMessageFormat, XcmpMessageHandler, }; use xcm::{latest::prelude::*, VersionedXcm}; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter as XcmCurrencyAdapter; use xcm_builder::{ - AccountId32Aliases, AllowUnpaidExecutionFrom, CurrencyAdapter as XcmCurrencyAdapter, - EnsureXcmOrigin, FixedRateOfFungible, FixedWeightBounds, IsConcrete, NativeAsset, - ParentIsPreset, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, + AccountId32Aliases, AllowUnpaidExecutionFrom, EnsureXcmOrigin, FixedRateOfFungible, + FixedWeightBounds, IsConcrete, NativeAsset, ParentIsPreset, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, }; use xcm_executor::{Config, XcmExecutor}; @@ -52,6 +53,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; @@ -131,6 +133,7 @@ parameter_types! { pub const MaxAssetsIntoHolding: u32 = 64; } +#[allow(deprecated)] pub type LocalAssetTransactor = XcmCurrencyAdapter, LocationToAccountId, AccountId, ()>; @@ -313,11 +316,6 @@ impl mock_msg_queue::Config for Runtime { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -341,8 +339,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 756cf4803b146486d85d087f2740ea16d75de166..bbf4f1e6cc5b10a9eb2d3723005bbbe25e324fb4 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -17,7 +17,7 @@ //! Relay chain runtime mock. use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{Everything, Nothing, ProcessMessage, ProcessMessageError}, weights::{Weight, WeightMeter}, }; @@ -33,11 +33,13 @@ use polkadot_runtime_parachains::{ origin, shared, }; use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter as XcmCurrencyAdapter; use xcm_builder::{ AccountId32Aliases, AllowUnpaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - CurrencyAdapter as XcmCurrencyAdapter, FixedRateOfFungible, FixedWeightBounds, IsConcrete, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, + ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, FixedRateOfFungible, + FixedWeightBounds, IsConcrete, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, }; use xcm_executor::{Config, XcmExecutor}; @@ -48,6 +50,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; @@ -113,6 +116,7 @@ parameter_types! { pub type SovereignAccountOf = (ChildParachainConvertsVia, AccountId32Aliases); +#[allow(deprecated)] pub type LocalAssetTransactor = XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; @@ -163,11 +167,6 @@ impl Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1).into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -192,8 +191,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl b/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl index 46bb8bcdf72b103a9b72be9a68ff90134a54466f..3e1d8ba771c43a69c917b5357afe36f1ebcfb6f6 100644 --- a/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl +++ b/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl @@ -32,6 +32,8 @@ alice: parachain 2005 block height is at least 10 within 300 seconds alice: parachain 2006 block height is at least 10 within 300 seconds alice: parachain 2007 block height is at least 10 within 300 seconds +alice: reports substrate_block_height{status="finalized"} is at least 30 within 400 seconds + # Check preparation time is under 10s. # Check all buckets <= 10. alice: reports histogram polkadot_pvf_preparation_time has at least 1 samples in buckets ["0.1", "0.5", "1", "2", "3", "10"] within 10 seconds @@ -54,8 +56,8 @@ one: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets [" two: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "120", "+Inf"] within 10 seconds # Check execution time. -# There are two different timeout conditions: BACKING_EXECUTION_TIMEOUT(2s) and -# APPROVAL_EXECUTION_TIMEOUT(6s). Currently these are not differentiated by metrics +# There are two different timeout conditions: DEFAULT_BACKING_EXECUTION_TIMEOUT(2s) and +# DEFAULT_APPROVAL_EXECUTION_TIMEOUT(12s). Currently these are not differentiated by metrics # because the metrics are defined in `polkadot-node-core-pvf` which is a level below # the relevant subsystems. # That being said, we will take the simplifying assumption of testing only the diff --git a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml index e70322e13e6bc4a1eabb477f22afff0cc8ed2afb..27cd81dface5ec21ba9650167a1cca62dc8d1489 100644 --- a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml +++ b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml @@ -5,6 +5,10 @@ timeout = 1000 max_validators_per_core = 5 needed_approvals = 8 +[relaychain.genesis.runtime.runtime_genesis_config.configuration.config.approval_voting_params] + max_approval_coalesce_count = 5 + + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" diff --git a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.zndsl b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.zndsl index a3f1f0669ac9e85cea978ac2b62991cb3a092297..d92820391d53d85fb549878e621000829f570452 100644 --- a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.zndsl +++ b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.zndsl @@ -32,6 +32,10 @@ honest-flaky-validator-1: reports parachain_candidate_disputes_total is at least honest-flaky-validator-1: pause # Wait for 1 full session to pass after the last unconcluded dispute. +# +# TODO: replace with assertion for "New session detected" in logs. I think that +# would match on previous log lines, so we may need to programmatically wait for +# a specific session, requiring zombienet v2. sleep 110 seconds # Now resume flaky validators diff --git a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml new file mode 100644 index 0000000000000000000000000000000000000000..69eb0804d8cb70c58fbc70abdb091af3a197d2fb --- /dev/null +++ b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml @@ -0,0 +1,40 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + max_validators_per_core = 1 + needed_approvals = 1 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + name = "honest" + count = 6 + args = ["-lparachain=debug"] + + [[relaychain.nodes]] + image = "{{MALUS_IMAGE}}" + name = "malus" + command = "malus dispute-finalized-candidates" + args = [ "--alice", "-lparachain=debug,MALUS=trace", "--dispute-offset=3" ] + +[[parachains]] +id = 2000 + + [parachains.collator] + image = "{{COL_IMAGE}}" + name = "collator" + command = "undying-collator" + args = ["-lparachain=debug"] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" diff --git a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..62d5a9768f9ebd0b6eee8e6ef92dae7735fbf791 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl @@ -0,0 +1,29 @@ +Description: Test if disputes triggered on finalized blocks within scope always end as valid. +Network: ./0007-dispute-freshly-finalized.toml +Creds: config + +# Check authority status and peers. +malus: reports node_roles is 4 +honest: reports node_roles is 4 + +# Ensure parachains are registered. +honest: parachain 2000 is registered within 30 seconds + +# Ensure parachains made progress. +honest: parachain 2000 block height is at least 10 within 200 seconds + +# Ensure that malus is already attempting to dispute +malus: log line contains "😈 Disputing candidate with hash:" within 180 seconds + +# Check if disputes are initiated and concluded. +honest: reports polkadot_parachain_candidate_disputes_total is at least 2 within 100 seconds +honest: reports polkadot_parachain_candidate_dispute_concluded{validity="valid"} is at least 2 within 100 seconds +honest: reports polkadot_parachain_candidate_dispute_concluded{validity="invalid"} is 0 within 100 seconds + +# Check lag - approval +honest: reports polkadot_parachain_approval_checking_finality_lag is 0 + +# Check lag - dispute conclusion +honest: reports polkadot_parachain_disputes_finality_lag is 0 + + diff --git a/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml new file mode 100644 index 0000000000000000000000000000000000000000..1ea385c3a42ee8fdf89b7595151a98c26d9b011b --- /dev/null +++ b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml @@ -0,0 +1,40 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + max_validators_per_core = 1 + needed_approvals = 1 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + name = "honest" + count = 6 + args = ["-lparachain=debug"] + + [[relaychain.nodes]] + image = "{{MALUS_IMAGE}}" + name = "malus" + command = "malus dispute-finalized-candidates" + args = [ "--alice", "-lparachain=debug,MALUS=trace", "--dispute-offset=14" ] + +[[parachains]] +id = 2000 + + [parachains.collator] + image = "{{COL_IMAGE}}" + name = "collator" + command = "undying-collator" + args = ["-lparachain=debug"] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" diff --git a/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.zndsl b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..b30c5801a1dac291d8496967b27d2d976b92ce15 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.zndsl @@ -0,0 +1,21 @@ +Description: Test if disputes triggered on finalized blocks out of scope never get to be confirmed and concluded. +Network: ./0008-dispute-old-finalized.toml +Creds: config + +# Check authority status and peers. +malus: reports node_roles is 4 +honest: reports node_roles is 4 + + +# Ensure parachains are registered. +honest: parachain 2000 is registered within 30 seconds + +# Ensure parachains made progress. +honest: parachain 2000 block height is at least 20 within 300 seconds + +# Ensure that malus is already attempting to dispute +malus: log line contains "😈 Disputing candidate with hash:" within 180 seconds + +# Ensure that honest nodes don't participate and conclude any disputes +honest: count of log lines containing "Dispute on candidate concluded" is 0 within 100 seconds + diff --git a/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml b/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml new file mode 100644 index 0000000000000000000000000000000000000000..19c7015403d7d86b3ece2e7006995e86fc9c0ab7 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml @@ -0,0 +1,115 @@ +[settings] +timeout = 1000 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + needed_approvals = 4 + relay_vrf_modulo_samples = 6 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + max_approval_coalesce_count = 5 + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + name = "alice" + args = [ "-lparachain=trace,runtime=debug" ] + count = 13 + +[[parachains]] +id = 2000 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=1" + + [parachains.collator] + name = "collator01" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=1", "--parachain-id=2000"] + +[[parachains]] +id = 2001 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=10" + + [parachains.collator] + name = "collator02" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--parachain-id=2001", "--pvf-complexity=10"] + +[[parachains]] +id = 2002 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=100" + + [parachains.collator] + name = "collator03" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--parachain-id=2002", "--pvf-complexity=100"] + +[[parachains]] +id = 2003 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=20000 --pvf-complexity=300" + + [parachains.collator] + name = "collator04" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=20000", "--parachain-id=2003", "--pvf-complexity=300"] + +[[parachains]] +id = 2004 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=300" + + [parachains.collator] + name = "collator05" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--parachain-id=2004", "--pvf-complexity=300"] + +[[parachains]] +id = 2005 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=20000 --pvf-complexity=400" + + [parachains.collator] + name = "collator06" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=20000", "--pvf-complexity=400", "--parachain-id=2005"] + +[[parachains]] +id = 2006 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=300" + + [parachains.collator] + name = "collator07" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=300", "--parachain-id=2006"] + +[[parachains]] +id = 2007 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=300" + + [parachains.collator] + name = "collator08" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=300", "--parachain-id=2007"] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" \ No newline at end of file diff --git a/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.zndsl b/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..1fc4f678446008d62e94b221fe1ad7344216a49a --- /dev/null +++ b/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.zndsl @@ -0,0 +1,32 @@ +Description: Approval voting coalescing does not lag finality +Network: ./0009-approval-voting-coalescing.toml +Creds: config + +# Check authority status. +alice: reports node_roles is 4 + +# Ensure parachains are registered. +alice: parachain 2000 is registered within 60 seconds +alice: parachain 2001 is registered within 60 seconds +alice: parachain 2002 is registered within 60 seconds +alice: parachain 2003 is registered within 60 seconds +alice: parachain 2004 is registered within 60 seconds +alice: parachain 2005 is registered within 60 seconds +alice: parachain 2006 is registered within 60 seconds +alice: parachain 2007 is registered within 60 seconds + +# Ensure parachains made progress. +alice: parachain 2000 block height is at least 10 within 300 seconds +alice: parachain 2001 block height is at least 10 within 300 seconds +alice: parachain 2002 block height is at least 10 within 300 seconds +alice: parachain 2003 block height is at least 10 within 300 seconds +alice: parachain 2004 block height is at least 10 within 300 seconds +alice: parachain 2005 block height is at least 10 within 300 seconds +alice: parachain 2006 block height is at least 10 within 300 seconds +alice: parachain 2007 block height is at least 10 within 300 seconds + +alice: reports substrate_block_height{status="finalized"} is at least 30 within 400 seconds + +alice: reports polkadot_parachain_approval_checking_finality_lag < 3 + +alice: reports polkadot_parachain_approvals_no_shows_total < 3 within 10 seconds diff --git a/polkadot/zombienet_tests/misc/0002-update-cmd.sh b/polkadot/zombienet_tests/misc/0002-update-cmd.sh new file mode 100755 index 0000000000000000000000000000000000000000..7d0dc53ca0df264f9926b0038137c3586a340adc --- /dev/null +++ b/polkadot/zombienet_tests/misc/0002-update-cmd.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -euxo pipefail + +if [[ $(grep "insecure-validator-i-know-what-i-do" /cfg/zombie.cmd) ]]; then + echo "insecure flag is already part of the cmd"; +else + echo -n " --insecure-validator-i-know-what-i-do" >> /cfg/zombie.cmd; +fi; + +echo "update-cmd" > /tmp/zombiepipe; \ No newline at end of file diff --git a/polkadot/zombienet_tests/misc/0002-upgrade-node.toml b/polkadot/zombienet_tests/misc/0002-upgrade-node.toml index b6fff6c8cb834ce97724cfd11df3ec761cac1f3e..1edb18abcececa32cadcf3756ac11e66be5f12c6 100644 --- a/polkadot/zombienet_tests/misc/0002-upgrade-node.toml +++ b/polkadot/zombienet_tests/misc/0002-upgrade-node.toml @@ -9,12 +9,10 @@ chain = "rococo-local" [[relaychain.nodes]] name = "alice" args = [ "-lparachain=debug,runtime=debug", "--db paritydb" ] - substrate_cli_args_version = 1 [[relaychain.nodes]] name = "bob" args = [ "-lparachain=debug,runtime=debug", "--db rocksdb" ] - substrate_cli_args_version = 1 [[relaychain.nodes]] name = "charlie" diff --git a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl index 9191fb027de0d0240805d6cb65e68adfc4f15942..db0a60ac1df617e5c89dc6a1385c4c106c1ead05 100644 --- a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl +++ b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl @@ -13,6 +13,11 @@ dave: parachain 2001 block height is at least 10 within 200 seconds # avg 30s in our infra alice: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds bob: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds +# update the cmd to add the flag '--insecure-validator-i-know-what-i-do' +# once the base image include the version with this flag we can remove this logic. +alice: run ./0002-update-cmd.sh within 60 seconds +bob: run ./0002-update-cmd.sh within 60 seconds +# restart alice: restart after 5 seconds bob: restart after 5 seconds diff --git a/polkadot/zombienet_tests/smoke/0004-configure-broker.js b/polkadot/zombienet_tests/smoke/0004-configure-broker.js new file mode 100644 index 0000000000000000000000000000000000000000..a4939ffe1cb83dea531a362a0015e0b3d7ddb9df --- /dev/null +++ b/polkadot/zombienet_tests/smoke/0004-configure-broker.js @@ -0,0 +1,66 @@ +const assert = require("assert"); + +async function run(nodeName, networkInfo, _jsArgs) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + const calls = [ + // Default broker configuration + api.tx.broker.configure({ + advanceNotice: 2, + interludeLength: 1, + leadinLength: 1, + regionLength: 3, + idealBulkProportion: 100, + limitCoresOffered: null, + renewalBump: 10, + contributionTimeout: 5, + }), + // Make reservation for ParaId 100 (adder-a) every other block + // and ParaId 101 (adder-b) every other block. + api.tx.broker.reserve([ + { + mask: [255, 0, 255, 0, 255, 0, 255, 0, 255, 0], + assignment: { Task: 100 }, + }, + { + mask: [0, 255, 0, 255, 0, 255, 0, 255, 0, 255], + assignment: { Task: 101 }, + }, + ]), + // Start sale with 1 core starting at 1 planck + api.tx.broker.startSales(1, 1), + ]; + const sudo_batch = api.tx.sudo.sudo(api.tx.utility.batch(calls)); + + await new Promise(async (resolve, reject) => { + const unsub = await sudo_batch.signAndSend(alice, (result) => { + console.log(`Current status is ${result.status}`); + if (result.status.isInBlock) { + console.log( + `Transaction included at blockHash ${result.status.asInBlock}` + ); + } else if (result.status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${result.status.asFinalized}` + ); + unsub(); + return resolve(); + } else if (result.isError) { + console.log(`Transaction Error`); + unsub(); + return reject(); + } + }); + }); + + return 0; +} + +module.exports = { run }; diff --git a/polkadot/zombienet_tests/smoke/0004-configure-relay.js b/polkadot/zombienet_tests/smoke/0004-configure-relay.js new file mode 100644 index 0000000000000000000000000000000000000000..9ca23d86a561709bc6a0a4082f25ed7faa450048 --- /dev/null +++ b/polkadot/zombienet_tests/smoke/0004-configure-relay.js @@ -0,0 +1,43 @@ +const assert = require("assert"); + +async function run(nodeName, networkInfo, _jsArgs) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + const calls = [ + api.tx.configuration.setCoretimeCores({ new: 1 }), + api.tx.coretime.assignCore(0, 20,[[ { task: 1005 }, 57600 ]], null) + ]; + const sudo_batch = api.tx.sudo.sudo(api.tx.utility.batch(calls)); + + await new Promise(async (resolve, reject) => { + const unsub = await sudo_batch.signAndSend(alice, (result) => { + console.log(`Current status is ${result.status}`); + if (result.status.isInBlock) { + console.log( + `Transaction included at blockHash ${result.status.asInBlock}` + ); + } else if (result.status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${result.status.asFinalized}` + ); + unsub(); + return resolve(); + } else if (result.isError) { + console.log(`Transaction Error`); + unsub(); + return reject(); + } + }); + }); + + return 0; +} + +module.exports = { run }; diff --git a/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.toml b/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.toml new file mode 100644 index 0000000000000000000000000000000000000000..3bcd0bee3c71b2c2ca369c1029af6f3024367cdf --- /dev/null +++ b/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.toml @@ -0,0 +1,58 @@ +[settings] +timeout = 1000 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.nodes]] + name = "alice" + args = ["-lruntime=debug,parachain=trace" ] + + [[relaychain.nodes]] + name = "bob" + args = ["-lruntime=debug,parachain=trace" ] + + [[relaychain.nodes]] + name = "charlie" + args = ["-lruntime=debug,parachain=trace" ] + +[[parachains]] +id = 1005 +chain = "coretime-rococo-local" + + [parachains.collator] + name = "coretime-collator" + image = "{{COL_IMAGE}}" + command = "polkadot-parachain" + args = [ "-lruntime=debug,parachain=trace" ] + +[[parachains]] +id = 100 +add_to_genesis = false +register_para = true +onboard_as_parachain = false + + [parachains.collator] + name = "adder-a" + image = "{{COL_IMAGE}}" + command = "adder-collator" + args = [ "-lruntime=debug,parachain=trace" ] + +[[parachains]] +id = 101 +add_to_genesis = false +register_para = true +onboard_as_parachain = false + + [parachains.collator] + name = "adder-b" + image = "{{COL_IMAGE}}" + command = "adder-collator" + args = [ "-lruntime=debug,parachain=trace" ] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" diff --git a/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.zndsl b/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..45e000e0bf8513a7284b1d8082ca5a042d6aa6f0 --- /dev/null +++ b/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.zndsl @@ -0,0 +1,19 @@ +Description: Bulk core assignment Smoke +Network: ./0004-coretime-smoke-test.toml +Creds: config + +alice: is up +coretime-collator: is up + +alice: reports block height is at least 3 within 30 seconds +# configure relay chain +alice: js-script ./0004-configure-relay.js with "" return is 0 within 600 secs + +# Wait 2 sessions. The parachain doesn't start block production immediately. +alice: log line contains "New session detected session_index=2" within 600 seconds + +# configure broker chain +coretime-collator: js-script ./0004-configure-broker.js with "" return is 0 within 600 secs + +# TODO: Fix this +# alice: parachain 100 block height is at least 10 within 600 seconds diff --git a/prdoc/.template.prdoc b/prdoc/.template.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..097741f388c4f2be73c9aed9d2b1dcc7dde32cf2 --- /dev/null +++ b/prdoc/.template.prdoc @@ -0,0 +1,11 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: ... + +doc: + - audience: Node Dev + description: | + ... + +crates: [ ] diff --git a/prdoc/pr_1234.prdoc b/prdoc/1.3.0/pr_1234.prdoc similarity index 83% rename from prdoc/pr_1234.prdoc rename to prdoc/1.3.0/pr_1234.prdoc index cc22a02d88b9d7a105bfe50dd548e9a9e98a34e9..e1e5d71050a86022431ec6fd4a2aefa6928dd0b5 100644 --- a/prdoc/pr_1234.prdoc +++ b/prdoc/1.3.0/pr_1234.prdoc @@ -4,17 +4,10 @@ title: Introduce XcmFeesToAccount fee manager doc: - - audience: Builder + - audience: Runtime User description: | Now all XCM sending, unless done by the system for the system, will be charged delivery fees. All runtimes are now configured to send these delivery fees to a treasury account. The fee formula is `delivery_fee_factor * (base_fee + encoded_msg_len * per_byte_fee)`. -migrations: - db: [] - - runtime: [] - -crates: [] - -host_functions: [] +crates: [ ] diff --git a/prdoc/1.3.0/pr_1255.prdoc b/prdoc/1.3.0/pr_1255.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c00a7c307e99c177338fc7a2735966a0a3429d67 --- /dev/null +++ b/prdoc/1.3.0/pr_1255.prdoc @@ -0,0 +1,21 @@ +# Schema: Parity PR Documentation Schema (prdoc) +# See doc at https://github.com/paritytech/prdoc + +title: Fix for Reward Deficit in the pool + +doc: + - audience: Runtime Dev + description: | + Instead of fragile calculation of current balance by looking at free balance - ED, Nomination Pool now freezes ED in the pool reward account to restrict an account from going below minimum balance. This also has a nice side effect that if ED changes, we know how much is the imbalance in ED frozen in the pool and the current required ED. A pool operator can diligently top up the pool with the deficit in ED or vice versa, withdraw the excess they transferred to the pool. + + notes: + - Introduces new call `adjust_pool_deposit` that allows to top up the deficit or withdraw the excess deposit for the pool. + - Switch to using Fungible trait from Currency trait. + +migrations: + runtime: + - reference: pallet-nomination-pools + description: One time migration of freezing ED from each of the existing pools. + +crates: + - name: pallet-nomination-pools diff --git a/prdoc/pr_1818.prdoc b/prdoc/1.3.0/pr_1818.prdoc similarity index 63% rename from prdoc/pr_1818.prdoc rename to prdoc/1.3.0/pr_1818.prdoc index cbafa02f9af563e88f38a4fb4f74e4ae7c13fc1a..0f59a0f9124c512fcb9c5766fc2968faac07346b 100644 --- a/prdoc/pr_1818.prdoc +++ b/prdoc/1.3.0/pr_1818.prdoc @@ -1,16 +1,9 @@ title: FRAME pallets warning for unchecked weight witness doc: - - audience: Core Dev + - audience: Runtime Dev description: | FRAME pallets now emit a warning when a call uses a function argument that starts with an underscore in its weight declaration. -migrations: - db: [ ] - runtime: [ ] - -host_functions: [] - crates: -- name: "frame-support-procedural" - semver: minor + - name: frame-support-procedural diff --git a/prdoc/pr_1873.prdoc b/prdoc/1.3.0/pr_1873.prdoc similarity index 66% rename from prdoc/pr_1873.prdoc rename to prdoc/1.3.0/pr_1873.prdoc index 6f3bc7646db2a95d8ce8c15093317ed2d2b5af61..c22b732c72f90428f5683357b67fb5308b10c939 100644 --- a/prdoc/pr_1873.prdoc +++ b/prdoc/1.3.0/pr_1873.prdoc @@ -1,15 +1,9 @@ title: Message Queue use proper overweight limit doc: - - audience: Core Dev + - audience: Node Dev description: | Changed the overweight cutoff limit from the full `Config::ServiceWeight` to a lower value that is calculated based on the weight of the functions being called. -migrations: - db: [] - - runtime: [] - -crates: ["pallet-message-queue", patch] - -host_functions: [] +crates: + - name: pallet-message-queue diff --git a/prdoc/pr_1913.prdoc b/prdoc/1.3.0/pr_1913.prdoc similarity index 87% rename from prdoc/pr_1913.prdoc rename to prdoc/1.3.0/pr_1913.prdoc index 155057054eb5c5686805bfa7b970b99b401d0850..c2e7627c9acc4fc8f6ff243b010008cc94de383c 100644 --- a/prdoc/pr_1913.prdoc +++ b/prdoc/1.3.0/pr_1913.prdoc @@ -7,13 +7,6 @@ doc: If experiencing stability issues caused by BEEFY, it can be disabled using `--no-beefy` flag. BEEFY doesn't (yet) support warp sync. So, attempting to Warp sync as a validator will throw an error. -migrations: - db: [] - - runtime: [] - crates: - name: polkadot-cli - name: polkadot-service - -host_functions: [] diff --git a/prdoc/pr_1921.prdoc b/prdoc/1.3.0/pr_1921.prdoc similarity index 67% rename from prdoc/pr_1921.prdoc rename to prdoc/1.3.0/pr_1921.prdoc index 5ed0137cd5f9ee850265fe816060d34fbdf9fb38..e71a68fa829c0d74f27c8886f072ab3fcb96e6c4 100644 --- a/prdoc/pr_1921.prdoc +++ b/prdoc/1.3.0/pr_1921.prdoc @@ -1,19 +1,14 @@ title: Fix para-scheduler migration doc: - - audience: Core Dev + - audience: Runtime Dev description: | Changing the `MigrateToV1` migration in the `ParachainScheduler` pallet to be truly idempotent. It is achieved by wrapping it in a `VersionedMigration`. migrations: - db: [] - runtime: - - pallet: "ParachainScheduler" + - reference: ParachainScheduler description: Non-critical fixup for `MigrateToV1`. crates: - - name: "polkadot-runtime-parachains" - semver: patch - -host_functions: [] + - name: polkadot-runtime-parachains diff --git a/prdoc/1.3.0/readme.md b/prdoc/1.3.0/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..3d74fa34247c2708fdbb10c4e49c83e4176f7138 --- /dev/null +++ b/prdoc/1.3.0/readme.md @@ -0,0 +1,2 @@ +Version 1.3.0 does not support `prddoc` yet. +Some prdoc files are provided but the list is NOT complete. diff --git a/prdoc/pr_1178.prdoc b/prdoc/1.4.0/pr_1178.prdoc similarity index 96% rename from prdoc/pr_1178.prdoc rename to prdoc/1.4.0/pr_1178.prdoc index 36c3b05c7a3f0166e606fe2c2ce1c8801ee0a0c2..528e523c4328c3f18551ea4ce3d6c0955dd5a63a 100644 --- a/prdoc/pr_1178.prdoc +++ b/prdoc/1.4.0/pr_1178.prdoc @@ -6,9 +6,9 @@ doc: Changed approval-voting, approval-distribution to send all messages tranche0 assignments in one message. This required: * A new parachains_db version. - * A new validation protocol to support the new message types. + * A new validation protocol to support the new message types. The new logic will be disabled and will be enabled at a later date after all validators have upgraded. - + migrations: db: - name: Parachains database change from v3 to v4. @@ -18,6 +18,5 @@ migrations: crates: - name: "polkadot" - semver: patch host_functions: [] diff --git a/prdoc/pr_1246.prdoc b/prdoc/1.4.0/pr_1246.prdoc similarity index 87% rename from prdoc/pr_1246.prdoc rename to prdoc/1.4.0/pr_1246.prdoc index f9c867812331bfdd18f7c14a38a1dcc735cd2c9c..a4d270c45cb5915760ddfbd60e5e4b3b7c08cd4a 100644 --- a/prdoc/pr_1246.prdoc +++ b/prdoc/1.4.0/pr_1246.prdoc @@ -1,17 +1,17 @@ title: Use the `Message Queue` Pallet for DMP and XCMP dispatch queueing doc: - - audience: Parachain Dev + - audience: Runtime Dev description: Replaces the queueing capabilities of the `DMP and `XCMP-Queue` pallet for incoming messages with the `MessageQueue` pallet. This simplifies the code and improves security. migrations: runtime: - - pallet: "cumulus_pallet_dmp_queue" + reference: cumulus_pallet_dmp_queue description: "Messages from the DMP dispatch queue will be moved over to the MQ pallet via `on_initialize`. This happens over multiple blocks and emits a `Completed` event at the end. The pallet can be un-deployed and deleted afterwards. Note that the migration reverses the order of messages, which should be acceptable as a one-off." crates: - - name: "cumulus_pallet_xcmp_queue" + - name: cumulus_pallet_xcmp_queue note: Pallet config must be altered according to the MR description. host_functions: [] diff --git a/prdoc/pr_1256.prdoc b/prdoc/1.4.0/pr_1256.prdoc similarity index 100% rename from prdoc/pr_1256.prdoc rename to prdoc/1.4.0/pr_1256.prdoc diff --git a/prdoc/pr_1805.prdoc b/prdoc/1.4.0/pr_1805.prdoc similarity index 87% rename from prdoc/pr_1805.prdoc rename to prdoc/1.4.0/pr_1805.prdoc index 8a8e6c2fde2665eca306ff3adc8a2e701cc29a7d..30f0fbea3070f1e819df2f12f866d6a6f5a2a78f 100644 --- a/prdoc/pr_1805.prdoc +++ b/prdoc/1.4.0/pr_1805.prdoc @@ -1,7 +1,7 @@ title: Introduce state decoding check after runtime upgrades. doc: - - audience: Core Dev + - audience: Runtime Dev description: | Adds a check to the try-runtime logic that will verify that all pallet on-chain storage still decodes. This can help to spot missing migrations before they become a problem. The check is enabled as soon as the `--checks` option of the `try-runtime` CLI is not `None`. @@ -10,10 +10,8 @@ migrations: runtime: [] -crates: +crates: - name: frame-support - semver: minor - name: frame-support-procedural - semver: minor host_functions: [] diff --git a/prdoc/1.4.0/pr_1926.prdoc b/prdoc/1.4.0/pr_1926.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e7c4293d9fd452017af8e1fe9cd4e623558a8d88 --- /dev/null +++ b/prdoc/1.4.0/pr_1926.prdoc @@ -0,0 +1,24 @@ +title: Adds syntax for marking calls feeless + +doc: + - audience: Runtime Dev + description: | + 1. Adds an attribute `#[pallet::feeless_if]` that can be optionally attached to a `pallet::call`. + 2. Adds a signed extension SkipCheckIfFeeless that wraps a transaction + payment processor to potentially skip payment fees for such calls. + Note that both the attribute and the signed extension are needed to make the call feeless. + +migrations: + db: [] + + runtime: [] + +crates: + - name: frame-support-procedural + - name: pallet-skip-feeless-payment + - name: pallet-example-kitchensink + - name: kitchensink-runtime + - name: node-testing + - name: node-cli + +host_functions: [] diff --git a/prdoc/pr_2086.prdoc b/prdoc/1.4.0/pr_2086.prdoc similarity index 70% rename from prdoc/pr_2086.prdoc rename to prdoc/1.4.0/pr_2086.prdoc index a9bbd0729d5b08b8b2d482ba062ece9e443458a3..3bd568cc139883d70fe7556c18b017f1fc12965a 100644 --- a/prdoc/pr_2086.prdoc +++ b/prdoc/1.4.0/pr_2086.prdoc @@ -1,15 +1,12 @@ title: "Contracts: Add XCM traits to interface with contracts" doc: - - audience: Core Dev + - audience: Runtime Dev description: | We are introducing a new set of `XcmController` traits in `pallet-xcm`. - These traits extract functionality from `pallet-xcm` and provide high-level interaction with XCM. + These traits extract functionality from `pallet-xcm` and provide high-level interaction with XCM. They enable other pallets, like `pallet_contracts`, to rely on these traits instead of tight coupling to `pallet-xcm` itself. crates: - - name: "pallet-xcm" - semver: patch - - name: "xcm-executor" - semver: patch - + - name: pallet-xcm + - name: xcm-executor diff --git a/prdoc/pr_2107.prdoc b/prdoc/1.4.0/pr_2107.prdoc similarity index 91% rename from prdoc/pr_2107.prdoc rename to prdoc/1.4.0/pr_2107.prdoc index 0e33680555ace33797db4dcc73f3d003eb1d5a23..be71828cbad5cf76e4ab9c1a5c9d11e2aa61a48c 100644 --- a/prdoc/pr_2107.prdoc +++ b/prdoc/1.4.0/pr_2107.prdoc @@ -4,14 +4,16 @@ title: Add a builder pattern to create XCM programs doc: - - audience: Core Dev + - audience: Runtime Dev description: | XCMs can now be built using a builder pattern like so: + ``` Xcm::builder() .withdraw_asset(assets) .buy_execution(fees, weight_limit) .deposit_asset(assets, beneficiary) .build(); + ``` migrations: db: [] diff --git a/prdoc/pr_2165.prdoc b/prdoc/1.4.0/pr_2165.prdoc similarity index 80% rename from prdoc/pr_2165.prdoc rename to prdoc/1.4.0/pr_2165.prdoc index 31cb691c43aabb58c81de10ca91abedaeaae0bb0..3b10bcfe6a5210bf9dc60ce1d1edf6b653e71b82 100644 --- a/prdoc/pr_2165.prdoc +++ b/prdoc/1.4.0/pr_2165.prdoc @@ -1,7 +1,7 @@ -title: Add sudo::remove_key +title: Add `sudo::remove_key` doc: - - audience: Core Dev + - audience: Runtime User description: | Pallet `Sudo` now has the ability to remove the sudo key via `remove_key`. This is a less-invasive way of rendering the sudo pallet useless without needing a code upgrade. @@ -12,6 +12,5 @@ migrations: crates: - name: pallet-sudo - semver: minor host_functions: [] diff --git a/prdoc/1.4.0/readme.md b/prdoc/1.4.0/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..e1a1055d9185eae47dff4c39269aa14f0e0e0d07 --- /dev/null +++ b/prdoc/1.4.0/readme.md @@ -0,0 +1,2 @@ +Version 1.4.0 does not support `prddoc` yet. +Some prdoc files are provided but the list is NOT complete. diff --git a/prdoc/1.5.0/pr_1370_special.prdoc b/prdoc/1.5.0/pr_1370_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..692a6e03170bfeafcfccdd82edab22e5d44cf905 --- /dev/null +++ b/prdoc/1.5.0/pr_1370_special.prdoc @@ -0,0 +1,9 @@ +title: Rework the event system of `sc-network` +author: altonen +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/pr_1408_prodc-introduction.prdoc b/prdoc/1.5.0/pr_1408_prodc-introduction.prdoc similarity index 76% rename from prdoc/pr_1408_prodc-introduction.prdoc rename to prdoc/1.5.0/pr_1408_prodc-introduction.prdoc index 4b10e0fe2e8139e973ab31cabc7fb181c75f4fba..46f56068e271b6ee1d2810c0434ad8d7f363eb9e 100644 --- a/prdoc/pr_1408_prodc-introduction.prdoc +++ b/prdoc/1.5.0/pr_1408_prodc-introduction.prdoc @@ -1,19 +1,15 @@ # This PR does not need a prdoc but it is provided in order to test title: PRdoc check +author: chevdor +topic: documentation + doc: - - audience: Core Dev + - audience: Node Dev description: | This PRdoc is an **example**. This PR brings support and automated checks for documentation in the form of a [`prdoc`](https://github.com/paritytech/prdoc/) file. -migrations: - db: [] - - runtime: [] - -crates: [] - -host_functions: [] +crates: [ ] diff --git a/prdoc/1.5.0/pr_1497_special.prdoc b/prdoc/1.5.0/pr_1497_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..3d60354826073a7888d56fff3a5db82605d9af04 --- /dev/null +++ b/prdoc/1.5.0/pr_1497_special.prdoc @@ -0,0 +1,9 @@ +title: Update tick collator for async backing +author: Sophia-Gold +topic: Tests + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_1918_special.prdoc b/prdoc/1.5.0/pr_1918_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..9220ee970bcb8681106f991ca543bdff46abdf83 --- /dev/null +++ b/prdoc/1.5.0/pr_1918_special.prdoc @@ -0,0 +1,9 @@ +title: Preserve artifact cache unless stale +author: eagr +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_1946_prdoc_new_schema.prdoc b/prdoc/1.5.0/pr_1946_prdoc_new_schema.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..fae063f6b1ecd349b9d252d6014a52bdd424051a --- /dev/null +++ b/prdoc/1.5.0/pr_1946_prdoc_new_schema.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: New PRDoc Schema + +author: chevdor +topic: documentation + +doc: + - audience: Node Dev + description: &desc | + The new version of prdoc and the new schema is activated in this PR. + + - audience: Runtime Dev + description: *desc + +crates: [] diff --git a/prdoc/1.5.0/pr_1985_special.prdoc b/prdoc/1.5.0/pr_1985_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c4305d6bb295b74ce182f732d160dbccf7eeed87 --- /dev/null +++ b/prdoc/1.5.0/pr_1985_special.prdoc @@ -0,0 +1,9 @@ +title: Enable parallel key scraping +author: eagr +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2001_special.prdoc b/prdoc/1.5.0/pr_2001_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..366b5fddb8b8d3e6f72d756eb5d58c69c7997cfc --- /dev/null +++ b/prdoc/1.5.0/pr_2001_special.prdoc @@ -0,0 +1,9 @@ +title: "cumulus-consensus-common: block import: `delayed_best_block` flag added" +author: michalkucharczyk +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2058_special.prdoc b/prdoc/1.5.0/pr_2058_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..6e3c83b09fa1b80469b16eb8d504f9ec0846f08f --- /dev/null +++ b/prdoc/1.5.0/pr_2058_special.prdoc @@ -0,0 +1,9 @@ +title: "PVF: Add test instructions" +author: mrcnski +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2142.prdoc b/prdoc/1.5.0/pr_2142.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..9cd1b23906d05734adddbb4cad7f146ba52481da --- /dev/null +++ b/prdoc/1.5.0/pr_2142.prdoc @@ -0,0 +1,17 @@ +title: Cleanup XCMP `QueueConfigData` + +author: serban300 +topic: runtime + +doc: + - audience: Runtime Dev + description: Removes obsolete fields from the `QueueConfigData` structure. For the remaining fields, if they use the old defaults, we replace them with the new defaults. + +migrations: + runtime: + - reference: cumulus_pallet_xcmp_queue + description: "v4: Removes obsolete fields from the `QueueConfigData` structure. For the remaining fields, if they use the old defaults, we replace them with the new defaults." + +crates: [] + +host_functions: [] diff --git a/prdoc/1.5.0/pr_2167_special.prdoc b/prdoc/1.5.0/pr_2167_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..7bbde7002a2a8ef4f17d70de68acedc16ecf3648 --- /dev/null +++ b/prdoc/1.5.0/pr_2167_special.prdoc @@ -0,0 +1,9 @@ +title: "add pallet nomination-pools versioned migration to kitchensink" +author: brunopgalvao +topic: Tests + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2174_special.prdoc b/prdoc/1.5.0/pr_2174_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..f23d2803e962c38f248c33a4e58834f7abc97e7f --- /dev/null +++ b/prdoc/1.5.0/pr_2174_special.prdoc @@ -0,0 +1,9 @@ +title: "chain-spec-builder: cleanup" +author: michalkucharczyk +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2182_special.prdoc b/prdoc/1.5.0/pr_2182_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..ad57bf6491634f7531ba1ee0da339bcecbcb0286 --- /dev/null +++ b/prdoc/1.5.0/pr_2182_special.prdoc @@ -0,0 +1,9 @@ +title: "remove retry from backers on failed candidate validation" +author: Jpserrat +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2184_special.prdoc b/prdoc/1.5.0/pr_2184_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..b838bf41ba1570e03ed78411390d580ea8a66ddf --- /dev/null +++ b/prdoc/1.5.0/pr_2184_special.prdoc @@ -0,0 +1,9 @@ +title: Zombienet tests - disputes on finalized blocks +author: Overkillus +topic: Tests + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2221_special.prdoc b/prdoc/1.5.0/pr_2221_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..dbd8c4a1fc14df1faf80f6d03336ab1d48ad243d --- /dev/null +++ b/prdoc/1.5.0/pr_2221_special.prdoc @@ -0,0 +1,9 @@ +title: "PVF worker: switch on seccomp networking restrictions" +author: mrcnski +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2250_special.prdoc b/prdoc/1.5.0/pr_2250_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d3f87b81b92478bb6b10a648c566ce2edd458ccb --- /dev/null +++ b/prdoc/1.5.0/pr_2250_special.prdoc @@ -0,0 +1,9 @@ +title: "crypto: `lazy_static` removed, light parser for address URI added" +author: michalkucharczyk +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2253.prdoc b/prdoc/1.5.0/pr_2253.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..3f69bc2461e410518bf63ba168afdc82b8f7188f --- /dev/null +++ b/prdoc/1.5.0/pr_2253.prdoc @@ -0,0 +1,27 @@ +# Schema: Parity PR Documentation Schema (prdoc) +# See doc at https://github.com/paritytech/prdoc + +title: Different builder pattern constructors for XCM + +author: franciscoaguirre +topic: runtime + +doc: + - audience: Runtime Dev + description: | + The `builder()` constructor for XCM programs now only allows building messages that pay for fees, + i.e. messages that would pass the `AllowTopLevelPaidExecutionFrom` barrier. + Another constructor, `builder_unpaid()` requires an explicit `UnpaidExecution` instruction before + anything else. + For building messages without any restriction, `builder_unsafe` can be used. + This has been named like that since in general the other two should be used instead, but it's okay + to use it for teaching purposes or for experimenting. + +migrations: + db: [] + + runtime: [] + +crates: [] + +host_functions: [] diff --git a/prdoc/1.5.0/pr_2265_special.prdoc b/prdoc/1.5.0/pr_2265_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..336adec03abed0bd646cf1806c372d2537799fbd --- /dev/null +++ b/prdoc/1.5.0/pr_2265_special.prdoc @@ -0,0 +1,9 @@ +title: Remove im-online pallet from Rococo and Westend +author: s0me0ne-unkn0wn +topic: Pallets + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2300_special.prdoc b/prdoc/1.5.0/pr_2300_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..407f07663254db09c894cbe812cdbaaa9ed9190a --- /dev/null +++ b/prdoc/1.5.0/pr_2300_special.prdoc @@ -0,0 +1,9 @@ +title: '[testnet] Remove Wococo stuff from BridgeHubRococo/AssetHubRococo' +author: bkontur  +topic: Bridges + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2351_special.prdoc b/prdoc/1.5.0/pr_2351_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..16f9e5d15a797390105d6875107eaf4ee25aa74b --- /dev/null +++ b/prdoc/1.5.0/pr_2351_special.prdoc @@ -0,0 +1,9 @@ +title: "frame-system: Add last_runtime_upgrade_spec_version" +author: bkchr +topic: Frame + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2354_special.prdoc b/prdoc/1.5.0/pr_2354_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..5fbedef036159cc73e12d79922174f539ae27067 --- /dev/null +++ b/prdoc/1.5.0/pr_2354_special.prdoc @@ -0,0 +1,9 @@ +title: "Fix Typo: `PalletXcmExtrinsicsBenchmark`" +author: joepetrowski +topic: Benchmarks + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2361_special.prdoc b/prdoc/1.5.0/pr_2361_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d44b87287c431b2bd4e4198ed054439127a7fdb1 --- /dev/null +++ b/prdoc/1.5.0/pr_2361_special.prdoc @@ -0,0 +1,9 @@ +title: "[ci] Enable zombienet jobs in PRs" +author: alvicsam +topic: Tests + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2368_special.prdoc b/prdoc/1.5.0/pr_2368_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e8ebcb38d30a197af9aa523a512fc4587d2f4074 --- /dev/null +++ b/prdoc/1.5.0/pr_2368_special.prdoc @@ -0,0 +1,9 @@ +title: "implementers-guide: update github link" +author: ordian +topic: Documentation + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2369_special.prdoc b/prdoc/1.5.0/pr_2369_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..ebcc533712da84f649b84903c6cc12570a3557df --- /dev/null +++ b/prdoc/1.5.0/pr_2369_special.prdoc @@ -0,0 +1,9 @@ +title: "[NPoS] Check if staker is exposed in paged exposure storage entries" +author: Ank4n +topic: Pallets + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2377_special.prdoc b/prdoc/1.5.0/pr_2377_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..2985db6f3f82d934ab79a0351a4189514de53332 --- /dev/null +++ b/prdoc/1.5.0/pr_2377_special.prdoc @@ -0,0 +1,9 @@ +title: "fix typo" +author: cuteolaf +topic: Documentation + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2378_special.prdoc b/prdoc/1.5.0/pr_2378_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..bdc965000945fecb9a6aeb59231f8da48740d13d --- /dev/null +++ b/prdoc/1.5.0/pr_2378_special.prdoc @@ -0,0 +1,9 @@ +title: "Beefy: small fixes" +author: serban300 +topic: Bridges + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2380_special.prdoc b/prdoc/1.5.0/pr_2380_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..058be28bf5ddf7c7e08b826724e3cb88aeeede53 --- /dev/null +++ b/prdoc/1.5.0/pr_2380_special.prdoc @@ -0,0 +1,9 @@ +title: Deprecate `RewardDestination::Controller` +author: rossbulat +topic: XCM + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2381_special.prdoc b/prdoc/1.5.0/pr_2381_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..eb4020424d7f60abc0c850b464545a64dd28d982 --- /dev/null +++ b/prdoc/1.5.0/pr_2381_special.prdoc @@ -0,0 +1,9 @@ +title: Make collator RPC mode non-experimental +author: skunert +topic: Cumulus + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2385_special.prdoc b/prdoc/1.5.0/pr_2385_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..a5239d30652c47dd7de91dab03e1f337798cd1dc --- /dev/null +++ b/prdoc/1.5.0/pr_2385_special.prdoc @@ -0,0 +1,9 @@ +title: "Relax `force_default_xcm_version` for testnet system parachains" +author: bkontur +topic: Cumulus + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2388.prdoc b/prdoc/1.5.0/pr_2388.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..8f79097b8f60c37dd446ea5b757efb2cfb2e2c9f --- /dev/null +++ b/prdoc/1.5.0/pr_2388.prdoc @@ -0,0 +1,30 @@ +# Schema: Parity PR Documentation Schema (prdoc) +# See doc at https://github.com/paritytech/prdoc + +title: Add new flexible `pallet_xcm::transfer_assets()` call/extrinsic + +author: acatangiu +topic: runtime + +doc: + - audience: Runtime Dev + description: | + For complex combinations of asset transfers where assets and fees may have different reserves or + different reserve/teleport trust configurations, users can use the newly added `transfer_assets()` + extrinsic which is more flexible in allowing more complex scenarios. + The new extrinsic enables, for example, a (non-system) parachain to teleport their `ForeignAssets` + assets to `AssetHub` while using (reserve-based) `DOT` to pay fees. + notes: + - Now `(limited_)reserve_transfer_assets()` only allow reserve-based transfers for all assets + including fees, similarly `(limited_)teleport_assets()` only allows teleports for all assets + including fees. + +migrations: + db: [] + + runtime: [] + +crates: + - name: pallet-xcm + +host_functions: [] diff --git a/prdoc/1.5.0/pr_2397_special.prdoc b/prdoc/1.5.0/pr_2397_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..5f07b269b1e236607a5ff24b7c12c0ca03ba1d60 --- /dev/null +++ b/prdoc/1.5.0/pr_2397_special.prdoc @@ -0,0 +1,9 @@ +title: "Pools: Add `MaxUnbonding` to metadata" +author: rossbulat +topic: Pallets + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2406_special.prdoc b/prdoc/1.5.0/pr_2406_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..3fdb7ad8cf2f3793f099d0e5387c4ae97fcb55a0 --- /dev/null +++ b/prdoc/1.5.0/pr_2406_special.prdoc @@ -0,0 +1,9 @@ +title: Refactor ValidationError +author: eagr +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2411_special.prdoc b/prdoc/1.5.0/pr_2411_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..0bc01e66903ab3a9fe6dbed87f37b4baeb931e50 --- /dev/null +++ b/prdoc/1.5.0/pr_2411_special.prdoc @@ -0,0 +1,9 @@ +title: "polkadot-node-subsystems: `ChainApiBackend` added + polkadot-debug image version fixed" +author: michalkucharczyk +topic: Tests + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2413_special.prdoc b/prdoc/1.5.0/pr_2413_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..38083ba845b7fc57d666a8fb6a02bf663bae2998 --- /dev/null +++ b/prdoc/1.5.0/pr_2413_special.prdoc @@ -0,0 +1,9 @@ +title: "Update documentation for `SafeMode` and `TxPause` Pallets" +author: wilwade +topic: Documentation + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2426_special.prdoc b/prdoc/1.5.0/pr_2426_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..a0f5ab8ac5b8cdb7100893570b4f38ba23d49b9e --- /dev/null +++ b/prdoc/1.5.0/pr_2426_special.prdoc @@ -0,0 +1,9 @@ +title: "PVF: Fix unshare `no such file or directory` error" +author: mrcnski +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2435_special.prdoc b/prdoc/1.5.0/pr_2435_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..b2bb7a2b8155ebb2b3f7fbc304f0ceaed61e9c1f --- /dev/null +++ b/prdoc/1.5.0/pr_2435_special.prdoc @@ -0,0 +1,9 @@ +title: "pallet-staking: Converts all math operations to safe" +author: gpestanaar +topic: Pallets + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2442_special.prdoc b/prdoc/1.5.0/pr_2442_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..52e672e765fe9322242340b64097a903d9aac9c2 --- /dev/null +++ b/prdoc/1.5.0/pr_2442_special.prdoc @@ -0,0 +1,9 @@ +title: "Fixes cumulus README instructions" +author: gpestana +topic: Documentation + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2446_special.prdoc b/prdoc/1.5.0/pr_2446_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..9fec1ad139ccef59b36fcfb7c7785c83250e8d07 --- /dev/null +++ b/prdoc/1.5.0/pr_2446_special.prdoc @@ -0,0 +1,9 @@ +title: "sp-api: Move macro related re-exports to `__private`" +author: bkchr +topic: Runtime API + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2450_special.prdoc b/prdoc/1.5.0/pr_2450_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..343e71fbf6d77c52dda28a050b875aed0f21913a --- /dev/null +++ b/prdoc/1.5.0/pr_2450_special.prdoc @@ -0,0 +1,9 @@ +title: Adapt test worker to profile flag +author: eagr +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2455_special.prdoc b/prdoc/1.5.0/pr_2455_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..928b84678074b94d27fe316b171fe4ea195c8d71 --- /dev/null +++ b/prdoc/1.5.0/pr_2455_special.prdoc @@ -0,0 +1,9 @@ +title: "Remove `RuntimeApi` dependency on system parachain runtime code" +author: seadanda +topic: "System Parachains" + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2457_special.prdoc b/prdoc/1.5.0/pr_2457_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..ca6401206f468d6a6fd4c93aff3117a00e2a0bba --- /dev/null +++ b/prdoc/1.5.0/pr_2457_special.prdoc @@ -0,0 +1,9 @@ +title: "polkadot-parachain: one chain-spec for all" +author: michalkucharczyk +topic: "System Parachains" + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2459_special.prdoc b/prdoc/1.5.0/pr_2459_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..125f390f4ac9f58f5f0d4c8b6f5869ae47a90809 --- /dev/null +++ b/prdoc/1.5.0/pr_2459_special.prdoc @@ -0,0 +1,9 @@ +title: '[NPoS] Use `EraInfo` to manipulate exposure in fast-unstake tests' +author: Ank4n +topic: Pallets,Tests + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2461_special.prdoc b/prdoc/1.5.0/pr_2461_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..60a46714ca415ce18d8db311ea70df3ea9150808 --- /dev/null +++ b/prdoc/1.5.0/pr_2461_special.prdoc @@ -0,0 +1,9 @@ +title: "PVF: remove audit log access" +author: mrcnski +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2462_special.prdoc b/prdoc/1.5.0/pr_2462_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..ae1f148632762d15955cd2bdff94835acab5043c --- /dev/null +++ b/prdoc/1.5.0/pr_2462_special.prdoc @@ -0,0 +1,9 @@ +title: "relay-chain-consensus: set a fork_choice" +author: michalkucharczyk +topic: Node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2463_special.prdoc b/prdoc/1.5.0/pr_2463_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..0f35d50036f07df927ae9656ecfafc075d83d224 --- /dev/null +++ b/prdoc/1.5.0/pr_2463_special.prdoc @@ -0,0 +1,9 @@ +title: Add `on-chain-release-build` feature for Collectives Westend +author: liamaharon +topic: System Parachains + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2474_special.prdoc b/prdoc/1.5.0/pr_2474_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..42d67b5efa669bf195346ffbdbf6c048161eb378 --- /dev/null +++ b/prdoc/1.5.0/pr_2474_special.prdoc @@ -0,0 +1,9 @@ +title: "Pools: Add ability to configure commission claiming permissions" +author: rossbulat +topic: Pallets + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2483_special.prdoc b/prdoc/1.5.0/pr_2483_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..21fb045cae88f86c00f12f2b8c3484a8b4bdab26 --- /dev/null +++ b/prdoc/1.5.0/pr_2483_special.prdoc @@ -0,0 +1,9 @@ +title: Remove `dmp-queue`` pallet from Rococo Asset Hub and Bridge Hub +author: liamaharon +topic: Frame + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2486.prdoc b/prdoc/1.5.0/pr_2486.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c716f71c34e5a3a62a2fcba48faaf0fc804d307f --- /dev/null +++ b/prdoc/1.5.0/pr_2486.prdoc @@ -0,0 +1,25 @@ +title: "PVF: Add Secure Validator Mode" + +author: mrcnski +topic: node + +doc: + - audience: Node Operator + description: | + Secure Validator Mode has been enabled for Polkadot validators by default. + This enforces PVF validation security, and prevents starting a validator node if some security features are missing on the machine. + SVM can be disabled using the `--insecure-validator-i-know-what-i-do` flag. + +migrations: + db: [] + + runtime: [] + +crates: + - name: polkadot-cli + - name: polkadot-node-core-pvf + - name: polkadot-node-core-pvf-common + - name: polkadot-node-core-pvf-prepare-worker + - name: polkadot-node-core-pvf-execute-worker + +host_functions: [] diff --git a/prdoc/1.5.0/pr_2487_special.prdoc b/prdoc/1.5.0/pr_2487_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..3d6a2e11e268caf8ab46ad52c4c09ab67ee39614 --- /dev/null +++ b/prdoc/1.5.0/pr_2487_special.prdoc @@ -0,0 +1,9 @@ +title: "Do not pollute global base path with export genesis/wasm" +author: bkchr +topic: Cumulus + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2501_special.prdoc b/prdoc/1.5.0/pr_2501_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..125b9452c984d93a198930f537e64cd4cbc8bb87 --- /dev/null +++ b/prdoc/1.5.0/pr_2501_special.prdoc @@ -0,0 +1,9 @@ +title: "Staking: `chill_other` takes stash instead of controller" +author: rossbulat +topic: Pallets + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2509_special.prdoc b/prdoc/1.5.0/pr_2509_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..03ebfd80c96d82ff09ceaf78f1b4cb7d7035ecfd --- /dev/null +++ b/prdoc/1.5.0/pr_2509_special.prdoc @@ -0,0 +1,9 @@ +title: "Breaking: Remove long deprecated `AllPalletsWithoutSystemReversed`" +author: skunert +topic: Frame + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2515_special.prdoc b/prdoc/1.5.0/pr_2515_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..4664058f86c82acaae2088a79ca1919507f782a4 --- /dev/null +++ b/prdoc/1.5.0/pr_2515_special.prdoc @@ -0,0 +1,9 @@ +title: Set `frame_system::LastRuntimeUpgrade` after running `try-runtime migrations` +author: liamaharon +topic: Frame + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2516_special.prdoc b/prdoc/1.5.0/pr_2516_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..5d452b63e59601b859a8550cd8621b9fbf9032dd --- /dev/null +++ b/prdoc/1.5.0/pr_2516_special.prdoc @@ -0,0 +1,9 @@ +title: Remove `dmp_queue pallet` from Westend SP runtimes +author: liamaharon +topic: Frame + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2521_special.prdoc b/prdoc/1.5.0/pr_2521_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..3b70150619e577485d434936ecbbbcc858502fc6 --- /dev/null +++ b/prdoc/1.5.0/pr_2521_special.prdoc @@ -0,0 +1,10 @@ +title: 'substrate-node: `NativeElseWasmExecutor` is no longer used' + +author: michalkucharczyk +topic: node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2526_special.prdoc b/prdoc/1.5.0/pr_2526_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..6008d7bfa9d570b1a992f61806df5d1b11320afe --- /dev/null +++ b/prdoc/1.5.0/pr_2526_special.prdoc @@ -0,0 +1,10 @@ +title: Remove `pov-recovery` race condition/Improve zombienet test + +author: skunert +topic: testing + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2552_special.prdoc b/prdoc/1.5.0/pr_2552_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..9f0140c8142163489d2f0222764c7944c5057986 --- /dev/null +++ b/prdoc/1.5.0/pr_2552_special.prdoc @@ -0,0 +1,10 @@ +title: Withdraw Assets Before Checking Out in OnReapIdentity impl + +author: joepetrowski +topic: xcm + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2555_special.prdoc b/prdoc/1.5.0/pr_2555_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..f817810f433e02c279d4ec2d96b21afdb467c14d --- /dev/null +++ b/prdoc/1.5.0/pr_2555_special.prdoc @@ -0,0 +1,10 @@ +title: Remove dependency on rand's SliceRandom shuffle implementation in `gossip-support` + +author: rphmeier +topic: node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2572_special.prdoc b/prdoc/1.5.0/pr_2572_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..9d4c285798cc653d7ae91df9dea62773764621a2 --- /dev/null +++ b/prdoc/1.5.0/pr_2572_special.prdoc @@ -0,0 +1,10 @@ +title: Add missing glossary to ref docs + +author: juangirini +topic: documentation + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2579_special.prdoc b/prdoc/1.5.0/pr_2579_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..2992c92a8c5257ad6dca35eec82dc20b52a19101 --- /dev/null +++ b/prdoc/1.5.0/pr_2579_special.prdoc @@ -0,0 +1,10 @@ +title: "impl guide: update PVF host page; add diagrams" + +author: mrcnsk +topic: documentation + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2581_special.prdoc b/prdoc/1.5.0/pr_2581_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..ebe5855b401604f4e89ff3909854b6c118096a19 --- /dev/null +++ b/prdoc/1.5.0/pr_2581_special.prdoc @@ -0,0 +1,10 @@ +title: 'Bandersnatch: `ring-context` generic over domain size' + +author: davxy +topic: node + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2591.prdoc b/prdoc/1.5.0/pr_2591.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..f827e70af8bc97fbd2c035b23d22f983e1e86aaf --- /dev/null +++ b/prdoc/1.5.0/pr_2591.prdoc @@ -0,0 +1,12 @@ +title: Ensure to cleanup state in `remove_member` + +author: bkchr +topic: runtime + +doc: + - audience: Runtime Dev + description: | + Cleans up the state properly if a member of a ranked collective is removed. + +crates: + - name: pallet-ranked-collective diff --git a/prdoc/1.5.0/pr_2602_special.prdoc b/prdoc/1.5.0/pr_2602_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..56896348b4f7fc9d12f8e45d65fb955f166336d9 --- /dev/null +++ b/prdoc/1.5.0/pr_2602_special.prdoc @@ -0,0 +1,10 @@ +title: 'Bridges subtree update' + +author: bkontur +topic: bridges + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/pr_2625_special.prdoc b/prdoc/1.5.0/pr_2625_special.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..3ffcf5986602cc682f43b27412db2f8381aae66c --- /dev/null +++ b/prdoc/1.5.0/pr_2625_special.prdoc @@ -0,0 +1,10 @@ +title: Improved `ExportXcm::validate` implementation for BridgeHubs + +author: bkontur +topic: bridges + +doc: + - audience: Runtime Dev + description: n/a + +crates: [] diff --git a/prdoc/1.5.0/readme.md b/prdoc/1.5.0/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..14b6d60331477d757cde68116e83d6f21ca4e036 --- /dev/null +++ b/prdoc/1.5.0/readme.md @@ -0,0 +1,2 @@ +Version 1.5.0 does not fully support `prddoc` yet. +While the list is complete, not all prdoc files have a valid or accurate content. diff --git a/prdoc/pr_1191.prdoc b/prdoc/pr_1191.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..26626731be46864e1fc383a02aab1646364c1966 --- /dev/null +++ b/prdoc/pr_1191.prdoc @@ -0,0 +1,21 @@ +title: Approve multiple candidates with a single signature + +doc: + - audience: Node Operator + description: | + Changed approval-voting, approval-distribution to approve multiple candidate with a single message, it adds: + * A new parachains_db version. + * A new validation protocol to support the new message types. + The new logic will be disabled and will be enabled at a later date after all validators have upgraded. + +migrations: + db: + - name: Parachains database change from v4 to v5. + description: | + Approval-voting column format has been updated with several new fields. All existing data will be automatically + be migrated to the new values. + +crates: + - name: "polkadot" + +host_functions: [] diff --git a/prdoc/pr_1226.prdoc b/prdoc/pr_1226.prdoc index df7a425b538496d22040f2b5d9151623ea534585..caef324bfd0d35f258af51a7c857fa965dc11bcd 100644 --- a/prdoc/pr_1226.prdoc +++ b/prdoc/pr_1226.prdoc @@ -1,17 +1,12 @@ title: Removed deprecated `Balances::transfer` and `Balances::set_balance_deprecated` functions. doc: - - audience: Builder - description: The Balances pallet's dispatchables `set_balance_deprecated` and `transfer` were deprecated in [paritytech/substrate#12951](https://github.com/paritytech/substrate/pull/12951) and have now been removed. - notes: - - Use `set_balance_deprecated` instead `force_set_balance` and `transfer_allow_death` instead of `transfer`. - -migrations: - db: [] + - audience: Runtime User + description: | + The Balances pallet's dispatchables `set_balance_deprecated` and `transfer` were deprecated in [paritytech/substrate#12951](https://github.com/paritytech/substrate/pull/12951) and have now been removed. - runtime: [] + notes: + - Use `set_balance_deprecated` instead `force_set_balance` and `transfer_allow_death` instead of `transfer`. crates: - name: pallet-balances - -host_functions: [] diff --git a/prdoc/pr_1255.prdoc b/prdoc/pr_1255.prdoc deleted file mode 100644 index 793b5c3c8597a785276637a796eb83be5738f42a..0000000000000000000000000000000000000000 --- a/prdoc/pr_1255.prdoc +++ /dev/null @@ -1,22 +0,0 @@ -# Schema: Parity PR Documentation Schema (prdoc) -# See doc at https://github.com/paritytech/prdoc - -title: Fix for Reward Deficit in the pool - -doc: - - audience: Core Dev - description: Instead of fragile calculation of current balance by looking at free balance - ED, Nomination Pool now freezes ED in the pool reward account to restrict an account from going below minimum balance. This also has a nice side effect that if ED changes, we know how much is the imbalance in ED frozen in the pool and the current required ED. A pool operator can diligently top up the pool with the deficit in ED or vice versa, withdraw the excess they transferred to the pool. - notes: - - Introduces new call `adjust_pool_deposit` that allows to top up the deficit or withdraw the excess deposit for the pool. - - Switch to using Fungible trait from Currency trait. - -migrations: - db: [] - - runtime: - - { pallet: "pallet-nomination-pools", description: "One time migration of freezing ED from each of the existing pools."} - -crates: - - name: pallet-nomination-pools - -host_functions: [] \ No newline at end of file diff --git a/prdoc/pr_1289.prdoc b/prdoc/pr_1289.prdoc index f3d8801d9d82efc4be2f9f0b0ee45c1c593ae9f7..059d7608ba6380b5b8d1b0c9edf93b2c619aa34a 100644 --- a/prdoc/pr_1289.prdoc +++ b/prdoc/pr_1289.prdoc @@ -4,25 +4,26 @@ title: Supporting paged rewards allowing all nominators to be rewarded doc: - - audience: Validator + - audience: Node Operator description: | We used to clip top `MaxNominatorRewardedPerValidator` nominators by stake that are eligible for staking reward. This was done to limit computation cost of paying out rewards. This PR introduces paging to reward payouts, meaning we still clip nominators upto MaxExposurePageSize per page and there could be multiple pages of rewards to be paid out. Validators get commission pro-rata to the amount of reward that is paid out for the page. - notes: - - payout_stakers should be called multiple times, once for each page of nominators. - - payout_stakers_by_page can be used to pay out rewards for a specific page. - - Some old non-paged era storage items are deprecated, and can be removed in a future upgrade. + notes: + - payout_stakers should be called multiple times, once for each page of nominators. + - payout_stakers_by_page can be used to pay out rewards for a specific page. + - Some old non-paged era storage items are deprecated, and can be removed in a future upgrade. migrations: db: [] runtime: - - { pallet: "pallet-staking", description: "v14: Migration of era exposure storage items to paged exposures."} + - reference: pallet-staking + description: "v14: Migration of era exposure storage items to paged exposures." crates: - name: pallet-staking -host_functions: [] \ No newline at end of file +host_functions: [] diff --git a/prdoc/pr_1343.prdoc b/prdoc/pr_1343.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..84168230e0afba483db2f653ad5bfe0a934ebf69 --- /dev/null +++ b/prdoc/pr_1343.prdoc @@ -0,0 +1,29 @@ +title: Tasks API - A general system for recognizing and executing service work + +doc: + - audience: Runtime Dev + description: | + The Tasks API allows you to define some service work that can be recognized by a script or an off-chain worker. + Such a script can then create and submit all such work items at any given time. + `#[pallet:tasks_experimental]` provides a convenient way to define such work items. It can be attached to an + `impl` block inside a pallet, whose functions can then be annotated by the following attributes: + 1. `#[pallet::task_list]`: Define an iterator over the available work items for a task + 2. `#[pallet::task_condition]`: Define the conditions for a given work item to be valid + 3. `#[pallet::task_weight]`: Define the weight of a given work item + 4. `#[pallet::task_index]`: Define the index of a given work item + Each such function becomes a variant of the autogenerated enum `Task` for this pallet. + All such enums are aggregated into a `RuntimeTask` by `construct_runtime`. + An example pallet that uses the Tasks API is available at `substrate/frame/example/tasks`. + +migrations: + db: [] + + runtime: [] + +crates: + - name: frame-system + - name: frame-support + - name: frame-support-procedural + - name: pallet-example-tasks + +host_functions: [] diff --git a/prdoc/pr_1454.prdoc b/prdoc/pr_1454.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c96bfcde6cfe83067695ee04654f5a106d23286a --- /dev/null +++ b/prdoc/pr_1454.prdoc @@ -0,0 +1,10 @@ +title: Support XCM as part of Cosmos CosmWasm contract messages + +doc: + - audience: Runtime Dev + description: | + Made XCM JSON schema behind flag, bumped bounded-collection so to ensure it has that flag too. + +crates: + - name: staging-xcm + - name: sp-weights diff --git a/prdoc/pr_1479.prdoc b/prdoc/pr_1479.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..33b798290f8327780f4eb172ecba1daa96e38460 --- /dev/null +++ b/prdoc/pr_1479.prdoc @@ -0,0 +1,11 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Rococo/Westend Coretime Runtime + +doc: + - audience: Runtime User + description: | + Rococo/Westend runtime for the Coretime Chain (a.k.a. "Broker Chain") described in RFC-1. + +crates: [ ] \ No newline at end of file diff --git a/prdoc/pr_1677.prdoc b/prdoc/pr_1677.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..9c5bee386ae34adf26be30d44ecb42697c94cc62 --- /dev/null +++ b/prdoc/pr_1677.prdoc @@ -0,0 +1,22 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "pallet-asset-conversion: Swap Credit" + +doc: + - audience: Runtime Dev + description: | + Introduces a swap implementation that allows the exchange of a credit (aka Negative Imbalance) of one asset for a credit of another asset. + + This is particularly useful when a credit swap is required but may not have sufficient value to meet the ED constraint, hence cannot be deposited to temp account before. An example use case is when XCM fees are paid using an asset held in the XCM executor registry and has to be swapped for native currency. + + Additional Updates: + - encapsulates the existing `Swap` trait impl within a transactional context, since partial storage mutation is possible when an error occurs; + - supplied `Currency` and `Assets` impls must be implemented over the same `Balance` type, the `AssetBalance` generic type is dropped. This helps to avoid numerous type conversion and overflow cases. If those types are different it should be handled outside of the pallet; + - `Box` asset kind on a pallet level, unbox on a runtime level - here [why](https://substrate.stackexchange.com/questions/10039/boxed-argument-of-a-dispatchable/10103#10103); + - `path` uses `Vec` now, instead of `BoundedVec` since it is never used in PoV; + - removes the `Transfer` event due to it's redundancy with the events emitted by `fungible/s` implementations; + - modifies the `SwapExecuted` event type; + +crates: [ ] + diff --git a/prdoc/pr_1694.prdoc b/prdoc/pr_1694.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..24797630efc992224d81ce06b4e6e22c8de1249f --- /dev/null +++ b/prdoc/pr_1694.prdoc @@ -0,0 +1,24 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Agile Coretime Base Relaychain Functionality + +doc: + - audience: Runtime User + description: | + The relay chain is now capable of receiving assignments from the coretime + chain and will schedule parachains and on-demand orders accordingly. + Existing leases and system chains are preserved. They get a reserved + coretime core via a migration. +migrations: + db: [] + runtime: + - reference: polkadot-runtime-parachains + description: | + Claim queue in scheduler now no longer contains Option values and + assignments now contain information necessary to accomodate for coretime + features. Also all existing parachains are converted to coretime + assignments. + +crates: + - name: polkadot-runtime-parachains diff --git a/prdoc/pr_2031.prdoc b/prdoc/pr_2031.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..fc2695df52e1b9205e77b22069938c4df99ec773 --- /dev/null +++ b/prdoc/pr_2031.prdoc @@ -0,0 +1,29 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "pallet-asset-conversion: Decoupling Native Currency Dependancy" + +doc: + - audience: Runtime Dev + description: | + Decoupling Pallet from the Concept of Native Currency + + Currently, the pallet used to intrinsically linked with the concept of native currency, requiring users to provide implementations of the `fungible::*` and `fungibles::*` traits to interact with native and non native assets. This incapsulates some non-related to the pallet complexity and makes it less adaptable in contexts where the native currency concept is absent. + + With this PR, the dependence on `fungible::*` for liquidity-supplying assets has been removed. Instead, the native and non-native currencies' handling is now overseen by a single type that implements the `fungibles::*` traits. To simplify this integration, types have been introduced to facilitate the creation of a union between `fungible::*` and `fungibles::*` implementations, producing a unified `fungibles::*` type. + + One of the reasons driving these changes is the ambition to create a more user-friendly API for the `SwapCredit` implementation. Given that it interacts with two distinct credit types from `fungible` and `fungibles`, a unified type was introduced. Clients now manage potential conversion failures for those credit types. In certain contexts, it's vital to guarantee that operations are fail-safe, like in this impl - [PR](https://github.com/paritytech/polkadot-sdk/pull/1845), place in [code](https://github.com/paritytech/polkadot-sdk/blob/20b85a5fada8f55c98ba831964f5866ffeadf4da/cumulus/primitives/utility/src/lib.rs#L429). + + Additional Updates: + - abstracted the pool ID and its account derivation logic via trait bounds, along with common implementation offerings; + - removed `inc_providers` on a pool creation for the pool account; + - benchmarks: + -- swap complexity is N, not const; + -- removed `From + Into` bound from `T::Balance`; + -- removed swap/liquidity/.. amount constants, resolve them dynamically based on pallet configuration; + -- migrated to v2 API; + - `OnUnbalanced` handler for the pool creation fee, replacing direct transfers to a specified account ID; + - renamed `MultiAssetId` to `AssetKind` aligning with naming across frame crates; + +crates: + - name: pallet-asset-conversion diff --git a/prdoc/pr_2033.prdoc b/prdoc/pr_2033.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..eeb7ff2b4eed162ae435ff93e9a14f4442a5aacb --- /dev/null +++ b/prdoc/pr_2033.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "`UnionOf` types for merged `fungible` and `fungibles` implementations" + +doc: + - audience: Runtime Dev + description: | + Introduces `UnionOf` types, crafted to merge `fungible` and `fungibles` implementations or two + `fungibles` implementations into a single type implementing `fungibles`. This also addresses + an issue where `ItemOf` initiates a double drop for an imbalance type, leading to inaccurate + total issuance accounting. + +crates: [ ] diff --git a/prdoc/pr_2331.prdoc b/prdoc/pr_2331.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e3daf4c45bd414337535edc4c620a8d553c17f80 --- /dev/null +++ b/prdoc/pr_2331.prdoc @@ -0,0 +1,17 @@ +title: Rename `ExportGenesisStateCommand` to `ExportGenesisHeadCommand` + +doc: + - audience: Node Operator + description: | + The `export-genesis-state` subcommand is now called `export-gensis-head`, but + `export-genesis-state` stays as an alias to not break any scripts. + + - audience: Node Dev + description: | + The struct `ExportGenesisStateCommand` is now called `ExportGenesisHeadCommand`. + So, you only need to rename the import and usage. The `run` function is now + taking only a `client` as argument to fetch the genesis header. This way + the exported genesis head is respecting custom genesis block builders. + +crates: + - name: "cumulus-client-cli" diff --git a/prdoc/pr_2403.prdoc b/prdoc/pr_2403.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..f1c4d3ecbaf10785a63df49a606644a06e9e2d73 --- /dev/null +++ b/prdoc/pr_2403.prdoc @@ -0,0 +1,9 @@ +title: Configurable block number provider in pallet-vesting + +doc: + - audience: Runtime Dev + description: | + Adds `BlockNumberProvider` type to pallet-vesting Config trait, allowing for custom providers instead of hardcoding frame-system. + This is particularly useful for parachains wanting to use `cumulus_pallet_parachain_system::RelaychainDataProvider` with `pallet-vesting`. + +crates: [ ] diff --git a/prdoc/pr_2481.prdoc b/prdoc/pr_2481.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d8736b1afd6eb76c340b1a4f86434be4a8c3d6df --- /dev/null +++ b/prdoc/pr_2481.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "xcm-builder: `HaulBlobExporter` with improved XCM version check." + +doc: + - audience: Runtime Dev + description: | + Version check in `HaulBlobExporter` uses new trait `CheckVersion` to check known/configured destination versions, + ensuring compatibility. `HaulBlobExporter` will attempt to downgrade the message to destination's known version + instead of using the latest version. + +crates: [ ] diff --git a/prdoc/pr_2522.prdoc b/prdoc/pr_2522.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..9a98f984bacb510bfd10b888a49e711d5ccfad3a --- /dev/null +++ b/prdoc/pr_2522.prdoc @@ -0,0 +1,12 @@ +title: "Adds Snowbridge to Rococo runtime" + +doc: + - audience: Runtime Dev + description: | + Adds the snowbridge pallets as a git subtree under the bridges directory. Adds Snowbridge + to the Rococo Asset Hub and Bridge Hub runtimes. + + +crates: + - name: asset-hub-rococo-runtime + - name: bridge-hub-rococo-runtime diff --git a/prdoc/pr_2532.prdoc b/prdoc/pr_2532.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d0df0ee4aca95527dc4cc4b8ee77365d9b0ffdbb --- /dev/null +++ b/prdoc/pr_2532.prdoc @@ -0,0 +1,11 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Westend Fellowship Treasury + +doc: + - audience: Runtime User + description: | + Treasury Pallet Instance for the Fellowship in Westend Collectives. + +crates: [ ] diff --git a/prdoc/pr_2597.prdoc b/prdoc/pr_2597.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..33d8505318416b331c686c1d95ca108f55c46cdd --- /dev/null +++ b/prdoc/pr_2597.prdoc @@ -0,0 +1,17 @@ +title: Make crate visible methods of `OverlayedChanges` public. + +doc: + - audience: Node Dev + description: | + Make some methods of `OverlayedChanges` namely `set_child_storage`, `clear_child_storage`, `clear_prefix` + and `clear_child_prefix` public which only had crate level visibility. + +migrations: + db: [] + + runtime: [] + +crates: + - name: sp-state-machine + +host_functions: [] diff --git a/prdoc/pr_2656.prdoc b/prdoc/pr_2656.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..563218dbde62bbb2c6cd565a2e7e427c9d8845d4 --- /dev/null +++ b/prdoc/pr_2656.prdoc @@ -0,0 +1,10 @@ +title: "pallet-broker: Small improvements to the origin checks" + +doc: + - audience: Runtime User + description: | + Change the permissionless calls `drop_region`, `drop_contribution`, `drop_history` and + `drop_renewal` to allow any kind of origin. + +crates: + - name: "pallet-broker" diff --git a/prdoc/pr_2663-fix-could-not-create-temporary-drectory.prdoc b/prdoc/pr_2663-fix-could-not-create-temporary-drectory.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..2119599fce11cdf7070499d6e0be0fd4e859d14a --- /dev/null +++ b/prdoc/pr_2663-fix-could-not-create-temporary-drectory.prdoc @@ -0,0 +1,17 @@ +title: "PVF: fix unshare 'could not create temporary directory'" + +doc: + - audience: Node Operator + description: | + For validators: fixes the potential warning/error: + "Cannot unshare user namespace and change root, which are Linux-specific kernel security features: could not create a temporary directory in "/tmp/.tmpIcLriO". + +migrations: + db: [] + + runtime: [] + +crates: + - name: polkadot-node-core-pvf + +host_functions: [] diff --git a/prdoc/pr_2682.prdoc b/prdoc/pr_2682.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..eaa5f5a4a9a69782806c0805d3accddd6a50d182 --- /dev/null +++ b/prdoc/pr_2682.prdoc @@ -0,0 +1,21 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Add Authorize Upgrade Pattern to Frame System" + +doc: + - audience: Runtime User + description: | + Adds the `authorize_upgrade` -> `enact_authorized_upgrade` pattern to `frame-system`. This + will be useful for upgrading bridged chains that are under the governance of Polkadot without + passing entire runtime Wasm blobs over a bridge. + + Notes: + + - Changed `enact_authorized_upgrade` to `apply_authorized_upgrade`. + - Left calls in `parachain-system` and marked as deprecated to prevent breaking the API. They + just call into the `frame-system` functions. + - Deprecated calls will be removed no earlier than June 2024. + - Updated `frame-system` benchmarks to v2 syntax. + +crates: [ ] diff --git a/prdoc/pr_2684.prdoc b/prdoc/pr_2684.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..8960b6460f0dafe8cdf0c382aa6b1a510a74c5d3 --- /dev/null +++ b/prdoc/pr_2684.prdoc @@ -0,0 +1,14 @@ +title: Add XCM FungibleAdapter + +doc: + - audience: Runtime Dev + description: | + A new AssetTransactor has been added to xcm-builder: FungibleAdapter. + It's meant to be used instead of the old CurrencyAdapter for configuring the XCM executor + to handle only one asset. + +crates: + - name: "xcm-builder" + +migrations: [] +host_functions: [] diff --git a/prdoc/pr_2687.prdoc b/prdoc/pr_2687.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..90e635d80529c0693254e6ffa07db5a6c31cbc3e --- /dev/null +++ b/prdoc/pr_2687.prdoc @@ -0,0 +1,18 @@ +title: "pallet-uniques: Move migration over to `VersionedMigration`" + +doc: + - audience: Runtime Dev + description: | + Moves the migration over to `VersionedMigration`. Thus, if you had + used `migrate_to_v1` before in a custom `OnRuntimeUpgrade` implementation + you can now directly use the `MigrateV0ToV1`. + +migrations: + runtime: + - reference: MigrateV0ToV1 + description: | + Migrate the pallet storage from `0` to `1` by initializing + the `CollectionAccount` storage entry from all collections. + +crates: + - name: "pallet-uniques" diff --git a/prdoc/pr_2694.prdoc b/prdoc/pr_2694.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c393dcfeb9a8008d1e8921a2bfa88db79e6a414c --- /dev/null +++ b/prdoc/pr_2694.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "pallet-election-provider-multi-phase: Removes `BetterUnsignedThreshold` from pallet config" + +doc: + - audience: Runtime Dev + description: | + Removes thresholding for accepting solutions better than the last queued for unsigned phase. This is unnecessary + as even without thresholding, the number of solutions that can be submitted to on-chain which is better than the + previous one is limited. + +crates: + - name: "pallet-election-provider-multi-phase" diff --git a/prdoc/pr_2783.prdoc b/prdoc/pr_2783.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..0e4c9906541494b1e61984ed8576600f8fd53cb4 --- /dev/null +++ b/prdoc/pr_2783.prdoc @@ -0,0 +1,12 @@ +title: "Accept Root origin as valid sudo" + +doc: + - audience: Runtime User + description: | + Dispatchables of `pallet-sudo` will now also accept the `Root` origin + as valid `sudo` origin. This enhancement is useful for parachains that + allow the relay chain as a superuser. It enables the relay chain to send + an XCM message to initialize the sudo key. + +crates: + - name: "pallet-sudo" diff --git a/prdoc/schema_user.json b/prdoc/schema_user.json new file mode 100644 index 0000000000000000000000000000000000000000..82215d51866b35895b5e840a8f3a900b161a9cf6 --- /dev/null +++ b/prdoc/schema_user.json @@ -0,0 +1,222 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema#", + "$id": "https://raw.githubusercontent.com/paritytech/prdoc/master/prdoc_schema_user.json", + "version": { + "major": 1, + "minor": 0, + "patch": 0, + "timestamp": 20230817152351 + }, + "title": "Polkadot SDK PRDoc Schema", + "description": "JSON Schema definition for the Polkadot SDK PR documentation", + "type": "object", + "additionalProperties": false, + "properties": { + "title": { + "title": "Title of the change", + "type": "string", + "description": "Title for the PR. This is what will show up in the release notes.\nif needed, you may provide a different title override for each audience in the `doc` property." + }, + "author": { + "title": "Author handle", + "type": "string", + "description": "Author handle" + }, + "topic": { + "title": "Topic", + "type": "string", + "description": "Topic" + }, + + "doc": { + "type": "array", + "title": "Documentation adapted to the audience(s)", + "description": "Description of the PR. Provide a description for each relevant audience.\nSee the `audience` property for more documentation about audiences", + "items": { + "$ref": "#/$defs/doc" + }, + "minItems": 1 + }, + + "crates": { + "title": "Crates", + "description": "You have the option to provide a hint about the crates that have noticeable changes.\n This is used during the crate publishing to crates.io and to help users understand the impact of the changes introduced in your PR.", + "type": "array", + "items": { + "$ref": "#/$defs/crate" + } + }, + + "migrations": { + "title": "Migrations (DB & Runtime)", + "description": "It is important for users to be aware of migrations.\nMake sure to mention any migrations in the appropriate sub-properties:\n- db\n- runtime", + "type": "object", + "properties": { + "db": { + "type": "array", + "nullable": false, + "title": "Database Migration", + "description": "List of the Database Migrations or empty array: []", + "items": { + "$ref": "#/$defs/migration_db" + }, + "minItems": 0, + "required": [ + "name", + "description" + ] + }, + "runtime": { + "type": "array", + "title": "Runtime Migration", + "nullable": false, + "description": "List of the Runtime Migrations or empty array: []", + "minItems": 0, + "items": { + "$ref": "#/$defs/migration_runtime" + }, + "required": [ + "db", + "runtime" + ] + } + } + }, + "host_functions": { + "title": "Host Functions", + "description": "List of the host functions involved in this PR.", + "type": "array", + "items": { + "$ref": "#/$defs/host_function" + } + } + }, + "required": [ + "title", + "doc", + "crates" + ], + "$defs": { + "audience": { + "description": "You may pick one or more audiences and address those users with appropriate documentation, information and warning related to the PR.", + "oneOf": [ + {"const": "Node Dev", + "title": "Node Dev", + "description": "Those who build around the client side code. Alternative client builders, SMOLDOT, those who consume RPCs. These are people who are oblivious to the runtime changes. They only care about the meta-protocol, not the protocol itself."}, + + {"const": "Runtime Dev", + "title": "Runtime Dev", + "description": "All of those who rely on the runtime. A parachain team that is using a pallet. A DApp that is using a pallet. These are people who care about the protocol (WASM), not the meta-protocol (client)."}, + + {"const": "Node Operator", + "title": "Node Operator", + "description": "Those who don't write any code and only run code."}, + + {"const": "Runtime User", + "title": "Runtime User", + "description": "Anyone using the runtime. This can be a token holder or a dev writing a front end for a chain."} + ] + }, + "crate": { + "type": "object", + "description": "You have the option here to provide a hint about a crate that has changed to help with the publishing of crates.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "note": { + "type": "string" + } + } + }, + "migration_db": { + "type": "object", + "description": "This property allows the documentation of database migrations.", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "name", + "description" + ] + }, + "migration_runtime": { + "type": "object", + "description": "This property allows the documentation of runtime migrations.", + "properties": { + "reference": { + "title": "Migration reference", + "description": "Reference to the runtime migration", + "type": "string" + }, + "description": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "description" + ] + }, + "doc": { + "type": "object", + "description": "You have the the option to provide different description of your PR for different audiences.", + "additionalProperties": false, + "properties": { + "audience": { + "description": "The selected audience", + "$ref": "#/$defs/audience" + }, + "title": { + "type": "string", + "title": "Title for the audience", + "description": "Optional title override for the PR and for the current audience" + }, + "description": { + "title": "Description for the audience", + "description": "Description of the change", + "type": "string" + } + } + }, + "array_of_strings": { + "description": "An array of strings that can be empty", + "type": "array", + "items": { + "type": "string" + } + }, + "host_function": { + "type": "object", + "additionalProperties": false, + "title": "Host Functions", + "description": "List of host functions and their descriptions", + "properties": { + "name": { + "title": "Host function name", + "description": "Name or identifier to find the host function in the codebase", + "type": "string" + }, + "description": { + "title": "Host function description", + "description": "Short description of the host function", + "type": "string" + }, + "notes": { + "type": "string" + } + }, + "required": [ + "name", + "description" + ] + } + } + } diff --git a/scripts/release/build-changelogs.sh b/scripts/release/build-changelogs.sh new file mode 100755 index 0000000000000000000000000000000000000000..a9275f45a50c479d27ff3cfffcb5bd82f0b815cf --- /dev/null +++ b/scripts/release/build-changelogs.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +export PRODUCT=polkadot +export VERSION=${VERSION:-1.5.0} + +PROJECT_ROOT=`git rev-parse --show-toplevel` +echo $PROJECT_ROOT + +TMP=$(mktemp -d) +TEMPLATE_AUDIENCE="${PROJECT_ROOT}/scripts/release/templates/audience.md.tera" +TEMPLATE_CHANGELOG="${PROJECT_ROOT}/scripts/release/templates/changelog.md.tera" + +DATA_JSON="${TMP}/data.json" +CONTEXT_JSON="${TMP}/context.json" +echo -e "TEMPLATE_AUDIENCE: \t$TEMPLATE_AUDIENCE" +echo -e "DATA_JSON: \t\t$DATA_JSON" +echo -e "CONTEXT_JSON: \t\t$CONTEXT_JSON" + +# Create output folder +OUTPUT="${TMP}/changelogs/$PRODUCT/$VERSION" +echo -e "OUTPUT: \t\t$OUTPUT" +mkdir -p $OUTPUT + +prdoc load -d "$PROJECT_ROOT/prdoc/$VERSION" --json > $DATA_JSON +# ls -al $DATA_JSON + +cat $DATA_JSON | jq ' { "prdoc" : .}' > $CONTEXT_JSON +# ls -al $CONTEXT_JSON + +# Fetch the list of valid audiences +SCHEMA_URL=https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json +SCHEMA=$(curl -s $SCHEMA_URL | sed 's|^//.*||') +AUDIENCE_ARRAY=$(echo -E $SCHEMA | jq -r '."$defs".audience.oneOf[] | .const') + +readarray -t audiences < <(echo "$AUDIENCE_ARRAY") +declare -p audiences + + +# Generate a changelog +echo "Generating changelog..." +tera -t "${TEMPLATE_CHANGELOG}" --env --env-key env "${CONTEXT_JSON}" > "$OUTPUT/changelog.md" +echo "Changelog ready in $OUTPUT/changelog.md" + +# Generate a release notes doc per audience +for audience in "${audiences[@]}"; do + audience_id="$(tr [A-Z] [a-z] <<< "$audience")" + audience_id="$(tr ' ' '_' <<< "$audience_id")" + echo "Processing audience: $audience ($audience_id)" + export TARGET_AUDIENCE=$audience + tera -t "${TEMPLATE_AUDIENCE}" --env --env-key env "${CONTEXT_JSON}" > "$OUTPUT/relnote_${audience_id}.md" +done + +# Show the files +tree -s -h -c $OUTPUT/ diff --git a/scripts/release/templates/audience.md.tera b/scripts/release/templates/audience.md.tera new file mode 100644 index 0000000000000000000000000000000000000000..dc507053dd5a1fb0f56c788e0f2b02408ba8221c --- /dev/null +++ b/scripts/release/templates/audience.md.tera @@ -0,0 +1,13 @@ +## Release {{ env.PRODUCT }} {{ env.VERSION }} + +Changelog for `{{ env.TARGET_AUDIENCE }}`. + +{% for file in prdoc -%} +#### PR #{{file.doc_filename.number}}: {{ file.content.title }} +{% for doc_item in file.content.doc %} +{%- if doc_item.audience == env.TARGET_AUDIENCE %} +{{ doc_item.description }} +{% endif -%} + +{%- endfor %} +{%- endfor %} diff --git a/scripts/release/templates/changelog.md.tera b/scripts/release/templates/changelog.md.tera new file mode 100644 index 0000000000000000000000000000000000000000..aaba761e8e47fa567db20c125ed9893c733da5dd --- /dev/null +++ b/scripts/release/templates/changelog.md.tera @@ -0,0 +1,7 @@ +## Changelog for `{{ env.PRODUCT | capitalize }} v{{ env.VERSION }}` + +{% for file in prdoc | sort(attribute="doc_filename.number") -%} +{%- set author= file.content.author | default(value="n/a") -%} +{%- set topic= file.content.topic | default(value="n/a") -%} +- #{{file.doc_filename.number}}: {{ file.content.title }} (@{{ author }}) [{{ topic | capitalize }}] +{% endfor -%} diff --git a/scripts/snowbridge_update_subtree.sh b/scripts/snowbridge_update_subtree.sh new file mode 100755 index 0000000000000000000000000000000000000000..2276bb35469f1e13cd58085e36fa49069123bcef --- /dev/null +++ b/scripts/snowbridge_update_subtree.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +# A script to udpate bridges repo as subtree to Cumulus +# Usage: +# ./scripts/update_subtree_snowbridge.sh fetch +# ./scripts/update_subtree_snowbridge.sh patch + +set -e + +SNOWBRIDGE_BRANCH="${SNOWBRIDGE_BRANCH:-main}" +POLKADOT_SDK_BRANCH="${POLKADOT_SDK_BRANCH:-master}" +SNOWBRIDGE_TARGET_DIR="${TARGET_DIR:-bridges/snowbridge}" + +function fetch() { + # the script is able to work only on clean git copy + [[ -z "$(git status --porcelain)" ]] || { + echo >&2 "The git copy must be clean (stash all your changes):"; + git status --porcelain + exit 1; + } + + local snowbridge_remote=$(git remote -v | grep "snowbridge.git (fetch)" | head -n1 | awk '{print $1;}') + if [ -z "$snowbridge_remote" ]; then + echo "Adding new remote: 'snowbridge' repo..." + git remote add -f snowbridge https://github.com/Snowfork/snowbridge.git + snowbridge_remote="snowbridge" + else + echo "Fetching remote: '${snowbridge_remote}' repo..." + git fetch https://github.com/Snowfork/snowbridge.git --prune + fi + + echo "Syncing/updating subtree with remote branch '${snowbridge_remote}/$SNOWBRIDGE_BRANCH' to target directory: '$SNOWBRIDGE_TARGET_DIR'" + git subtree pull --prefix=$SNOWBRIDGE_TARGET_DIR ${snowbridge_remote} $SNOWBRIDGE_BRANCH --squash +} + +function clean() { + echo "Patching/removing unneeded stuff from subtree in target directory: '$SNOWBRIDGE_TARGET_DIR'" + chmod +x $SNOWBRIDGE_TARGET_DIR/parachain/scripts/verify-pallets-build.sh + $SNOWBRIDGE_TARGET_DIR/parachain/scripts/verify-pallets-build.sh --ignore-git-state --no-revert +} + +function create_patch() { + [[ -z "$(git status --porcelain)" ]] || { + echo >&2 "The git copy must be clean (stash all your changes):"; + git status --porcelain + exit 1; + } + echo "Creating diff patch file to apply to snowbridge. No Cargo.toml files will be included in the patch." + git diff snowbridge/$SNOWBRIDGE_BRANCH $POLKADOT_SDK_BRANCH:bridges/snowbridge --diff-filter=ACM -- . ':(exclude)*/Cargo.toml' > snowbridge.patch +} + +case "$1" in + fetch) + fetch + ;; + clean) + clean + ;; + create_patch) + create_patch + ;; + update) + fetch + clean + ;; +esac diff --git a/substrate/Cargo.toml b/substrate/Cargo.toml deleted file mode 100644 index 8fb1be5821bad1f04b483188f190f99866f80a2f..0000000000000000000000000000000000000000 --- a/substrate/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "substrate" -description = "Next-generation framework for blockchain innovation" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.io" -repository.workspace = true -authors.workspace = true -edition.workspace = true -version = "1.0.0" -publish = false - -# The dependencies are only needed for docs. -[dependencies] -simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", rev = "e48b187bcfd5cc75111acd9d241f1bd36604344b" } - -subkey = { path = "bin/utils/subkey" } -chain-spec-builder = { package = "staging-chain-spec-builder", path = "bin/utils/chain-spec-builder" } - -sc-service = { path = "client/service" } -sc-chain-spec = { path = "client/chain-spec" } -sc-cli = { path = "client/cli" } -sc-consensus-aura = { path = "client/consensus/aura" } -sc-consensus-babe = { path = "client/consensus/babe" } -sc-consensus-grandpa = { path = "client/consensus/grandpa" } -sc-consensus-beefy = { path = "client/consensus/beefy" } -sc-consensus-manual-seal = { path = "client/consensus/manual-seal" } -sc-consensus-pow = { path = "client/consensus/pow" } - -sp-runtime = { path = "primitives/runtime" } -frame-support = { path = "frame/support" } diff --git a/substrate/README.md b/substrate/README.md index f7afa7a894d88f7bf57c8c7f6fb870a3e60c1b73..f69612772627430099e27dc07385c908431cbcb5 100644 --- a/substrate/README.md +++ b/substrate/README.md @@ -3,7 +3,7 @@ [![GitHub license](https://img.shields.io/badge/license-GPL3%2FApache2-blue)](#LICENSE) [![GitLab Status](https://gitlab.parity.io/parity/mirrors/polkadot-sdk/badges/master/pipeline.svg)](https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/pipelines) -[![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](docs/CONTRIBUTING.md) +[![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](docs/contributor/CONTRIBUTING.md) [![Stack Exchange](https://img.shields.io/badge/Substrate-Community%20&%20Support-24CC85?logo=stackexchange)](https://substrate.stackexchange.com/)

@@ -26,15 +26,13 @@ here](https://github.com/paritytech/polkadot-sdk/issues) for anything you suspec ## Contributions & Code of Conduct -Please follow the contributions guidelines as outlined in -[`docs/CONTRIBUTING.md`](https://github.com/paritytech/polkadot-sdk/blob/master/docs/CONTRIBUTING.md). In all -communications and contributions, this project follows the [Contributor Covenant Code of -Conduct](https://github.com/paritytech/polkadot-sdk/blob/master/docs/CODE_OF_CONDUCT.md). +Please follow the contributions guidelines as outlined in [`docs/contributor/CONTRIBUTING.md`](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md). +In all communications and contributions, this project follows the [Contributor Covenant Code of Conduct](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CODE_OF_CONDUCT.md). ## Security The security policy and procedures can be found in -[`docs/SECURITY.md`](https://github.com/paritytech/polkadot-sdk/blob/master/docs/SECURITY.md). +[`docs/contributor/SECURITY.md`](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/SECURITY.md). ## License diff --git a/substrate/bin/minimal/node/Cargo.toml b/substrate/bin/minimal/node/Cargo.toml index 0506d0838f1f930679d23792e6bbe241350a8cc4..532cded68de88c9f4ad05bcf11a2558eb8fa05f4 100644 --- a/substrate/bin/minimal/node/Cargo.toml +++ b/substrate/bin/minimal/node/Cargo.toml @@ -10,6 +10,9 @@ publish = false repository = "https://github.com/substrate-developer-hub/substrate-node-template/" build = "build.rs" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,16 +20,16 @@ targets = ["x86_64-unknown-linux-gnu"] name = "minimal-node" [dependencies] -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } futures = { version = "0.3.21", features = ["thread-pool"] } futures-timer = "3.0.1" jsonrpsee = { version = "0.16.2", features = ["server"] } serde_json = "1.0.108" sc-cli = { path = "../../../client/cli" } -sc-executor = { path = "../../../client/executor" } -sc-network = { path = "../../../client/network" } -sc-service = { path = "../../../client/service" } +sc-executor = { path = "../../../client/executor" } +sc-network = { path = "../../../client/network" } +sc-service = { path = "../../../client/service" } sc-telemetry = { path = "../../../client/telemetry" } sc-transaction-pool = { path = "../../../client/transaction-pool" } sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } @@ -47,7 +50,7 @@ sp-runtime = { path = "../../../primitives/runtime" } substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } -frame = { path = "../../../frame", features = ["runtime", "experimental"] } +frame = { path = "../../../frame", features = ["experimental", "runtime"] } runtime = { package = "minimal-runtime", path = "../runtime" } [build-dependencies] diff --git a/substrate/bin/minimal/node/src/main.rs b/substrate/bin/minimal/node/src/main.rs index 900651fd1fdb83c6146b67490eb1bd6551a1f9b6..3cf7d98311eaa3cde56813326117adde409b366d 100644 --- a/substrate/bin/minimal/node/src/main.rs +++ b/substrate/bin/minimal/node/src/main.rs @@ -19,11 +19,10 @@ #![warn(missing_docs)] mod chain_spec; -#[macro_use] -mod service; mod cli; mod command; mod rpc; +mod service; fn main() -> sc_cli::Result<()> { command::run() diff --git a/substrate/bin/minimal/runtime/Cargo.toml b/substrate/bin/minimal/runtime/Cargo.toml index 85d56d0638a59f930b0ea293b649d9cbab09559b..296106544bbfdbbb1f7f76b86d5c8ddefb54f917 100644 --- a/substrate/bin/minimal/runtime/Cargo.toml +++ b/substrate/bin/minimal/runtime/Cargo.toml @@ -8,13 +8,16 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] parity-scale-codec = { version = "3.0.0", default-features = false } scale-info = { version = "2.6.0", default-features = false } # this is a frame-based runtime, thus importing `frame` with runtime feature enabled. -frame = { path = "../../../frame", default-features = false, features = ["runtime", "experimental"] } -frame-support = { path = "../../../frame/support", default-features = false} +frame = { path = "../../../frame", default-features = false, features = ["experimental", "runtime"] } +frame-support = { path = "../../../frame/support", default-features = false } # pallets that we want to use pallet-balances = { path = "../../../frame/balances", default-features = false } @@ -24,14 +27,14 @@ pallet-transaction-payment = { path = "../../../frame/transaction-payment", defa pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } # genesis builder that allows us to interacto with runtime genesis config -sp-genesis-builder = { path = "../../../primitives/genesis-builder", default-features = false} +sp-genesis-builder = { path = "../../../primitives/genesis-builder", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "frame-support/std", "frame/std", diff --git a/substrate/bin/node-template/node/Cargo.toml b/substrate/bin/node-template/node/Cargo.toml index 61953631d799879894256a72b5dfde1a2cde5d1c..9d8c4430c211582c0a9cd63e851270273d27e0d1 100644 --- a/substrate/bin/node-template/node/Cargo.toml +++ b/substrate/bin/node-template/node/Cargo.toml @@ -10,6 +10,9 @@ publish = false repository = "https://github.com/substrate-developer-hub/substrate-node-template/" build = "build.rs" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,8 +20,8 @@ targets = ["x86_64-unknown-linux-gnu"] name = "node-template" [dependencies] -clap = { version = "4.4.6", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"]} +clap = { version = "4.4.11", features = ["derive"] } +futures = { version = "0.3.21", features = ["thread-pool"] } serde_json = "1.0.108" sc-cli = { path = "../../../client/cli" } @@ -42,7 +45,7 @@ sp-timestamp = { path = "../../../primitives/timestamp" } sp-inherents = { path = "../../../primitives/inherents" } sp-keyring = { path = "../../../primitives/keyring" } frame-system = { path = "../../../frame/system" } -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false} +pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } # These dependencies are used for the node template's RPCs jsonrpsee = { version = "0.16.2", features = ["server"] } @@ -62,7 +65,7 @@ frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli" } node-template-runtime = { path = "../runtime" } # CLI-specific dependencies -try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true} +try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true } [build-dependencies] substrate-build-script-utils = { path = "../../../utils/build-script-utils" } diff --git a/substrate/bin/node-template/node/src/main.rs b/substrate/bin/node-template/node/src/main.rs index 426cbabb6fbf7dc960f48736e092e0072984f23a..8918dd43a01195f1111ac1703070c9de03db254c 100644 --- a/substrate/bin/node-template/node/src/main.rs +++ b/substrate/bin/node-template/node/src/main.rs @@ -1,13 +1,12 @@ //! Substrate Node Template CLI library. #![warn(missing_docs)] -mod chain_spec; -#[macro_use] -mod service; mod benchmarking; +mod chain_spec; mod cli; mod command; mod rpc; +mod service; fn main() -> sc_cli::Result<()> { command::run() diff --git a/substrate/bin/node-template/node/src/service.rs b/substrate/bin/node-template/node/src/service.rs index 403202829241ee682a688a435ccdc687ffa3af62..c4a2b2f39d2156339553515de6ad994659eae846 100644 --- a/substrate/bin/node-template/node/src/service.rs +++ b/substrate/bin/node-template/node/src/service.rs @@ -5,35 +5,17 @@ use node_template_runtime::{self, opaque::Block, RuntimeApi}; use sc_client_api::{Backend, BlockBackend}; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; use sc_consensus_grandpa::SharedVoterState; -pub use sc_executor::NativeElseWasmExecutor; use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; use std::{sync::Arc, time::Duration}; -// Our native executor instance. -pub struct ExecutorDispatch; - -impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { - /// Only enable the benchmarking host functions when we actually want to benchmark. - #[cfg(feature = "runtime-benchmarks")] - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - /// Otherwise we only use the default Substrate host functions. - #[cfg(not(feature = "runtime-benchmarks"))] - type ExtendHostFunctions = (); - - fn dispatch(method: &str, data: &[u8]) -> Option> { - node_template_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - node_template_runtime::native_version() - } -} - -pub(crate) type FullClient = - sc_service::TFullClient>; +pub(crate) type FullClient = sc_service::TFullClient< + Block, + RuntimeApi, + sc_executor::WasmExecutor, +>; type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; @@ -75,7 +57,7 @@ pub fn new_partial( }) .transpose()?; - let executor = sc_service::new_native_or_wasm_executor(config); + let executor = sc_service::new_wasm_executor::(config); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( config, @@ -163,9 +145,9 @@ pub fn new_full(config: Configuration) -> Result { &client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"), &config.chain_spec, ); - net_config.add_notification_protocol(sc_consensus_grandpa::grandpa_peers_set_config( - grandpa_protocol_name.clone(), - )); + let (grandpa_protocol_config, grandpa_notification_service) = + sc_consensus_grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone()); + net_config.add_notification_protocol(grandpa_protocol_config); let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new( backend.clone(), @@ -316,6 +298,7 @@ pub fn new_full(config: Configuration) -> Result { link: grandpa_link, network, sync: Arc::new(sync_service), + notification_service: grandpa_notification_service, voting_rule: sc_consensus_grandpa::VotingRulesBuilder::default().build(), prometheus_registry, shared_voter_state: SharedVoterState::empty(), diff --git a/substrate/bin/node-template/pallets/template/Cargo.toml b/substrate/bin/node-template/pallets/template/Cargo.toml index 77183c42cd60c8b6bd9e1e7ad6387f60a5bd1eaa..51410a71c7bcee0267f36bbfcf20c616a5537ce3 100644 --- a/substrate/bin/node-template/pallets/template/Cargo.toml +++ b/substrate/bin/node-template/pallets/template/Cargo.toml @@ -9,6 +9,9 @@ license = "MIT-0" publish = false repository = "https://github.com/substrate-developer-hub/substrate-node-template/" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,10 +20,10 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../../../frame/benchmarking", default-features = false, optional = true} -frame-support = { path = "../../../../frame/support", default-features = false} -frame-system = { path = "../../../../frame/system", default-features = false} -sp-std = { path = "../../../../primitives/std", default-features = false} +frame-benchmarking = { path = "../../../../frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../../frame/support", default-features = false } +frame-system = { path = "../../../../frame/system", default-features = false } +sp-std = { path = "../../../../primitives/std", default-features = false } [dev-dependencies] sp-core = { path = "../../../../primitives/core" } @@ -28,7 +31,7 @@ sp-io = { path = "../../../../primitives/io" } sp-runtime = { path = "../../../../primitives/runtime" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/bin/node-template/pallets/template/src/benchmarking.rs b/substrate/bin/node-template/pallets/template/src/benchmarking.rs index 6c3cae6066b41982af15a56098bcec62080c4e47..5a262417629c579c6ecf5ada30ae803217623766 100644 --- a/substrate/bin/node-template/pallets/template/src/benchmarking.rs +++ b/substrate/bin/node-template/pallets/template/src/benchmarking.rs @@ -1,7 +1,6 @@ //! Benchmarking setup for pallet-template #![cfg(feature = "runtime-benchmarks")] use super::*; -use sp_std::vec; #[allow(unused)] use crate::Pallet as Template; diff --git a/substrate/bin/node-template/pallets/template/src/mock.rs b/substrate/bin/node-template/pallets/template/src/mock.rs index 244ae1b37859ba4405221459d66c4b1298eeca89..8346461e6ed9bd97ff306bc69f11876a5b3391be 100644 --- a/substrate/bin/node-template/pallets/template/src/mock.rs +++ b/substrate/bin/node-template/pallets/template/src/mock.rs @@ -1,5 +1,8 @@ use crate as pallet_template; -use frame_support::traits::{ConstU16, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU16, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -17,6 +20,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/bin/node-template/runtime/Cargo.toml b/substrate/bin/node-template/runtime/Cargo.toml index 7711ddba34d0bb50e88941067bb7b513475ad7dd..14a64948c0bcbcd50cae79e38a92ec82029df7c8 100644 --- a/substrate/bin/node-template/runtime/Cargo.toml +++ b/substrate/bin/node-template/runtime/Cargo.toml @@ -9,6 +9,9 @@ license = "MIT-0" publish = false repository = "https://github.com/substrate-developer-hub/substrate-node-template/" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,48 +19,48 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -pallet-aura = { path = "../../../frame/aura", default-features = false} -pallet-balances = { path = "../../../frame/balances", default-features = false} -frame-support = { path = "../../../frame/support", default-features = false} -pallet-grandpa = { path = "../../../frame/grandpa", default-features = false} -pallet-sudo = { path = "../../../frame/sudo", default-features = false} -frame-system = { path = "../../../frame/system", default-features = false} +pallet-aura = { path = "../../../frame/aura", default-features = false } +pallet-balances = { path = "../../../frame/balances", default-features = false } +frame-support = { path = "../../../frame/support", default-features = false } +pallet-grandpa = { path = "../../../frame/grandpa", default-features = false } +pallet-sudo = { path = "../../../frame/sudo", default-features = false } +frame-system = { path = "../../../frame/system", default-features = false } frame-try-runtime = { path = "../../../frame/try-runtime", default-features = false, optional = true } -pallet-timestamp = { path = "../../../frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false} -frame-executive = { path = "../../../frame/executive", default-features = false} -sp-api = { path = "../../../primitives/api", default-features = false} -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} +pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } +frame-executive = { path = "../../../frame/executive", default-features = false } +sp-api = { path = "../../../primitives/api", default-features = false } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false } sp-consensus-aura = { path = "../../../primitives/consensus/aura", default-features = false, features = ["serde"] } sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-core = { path = "../../../primitives/core", default-features = false, features = ["serde"]} -sp-inherents = { path = "../../../primitives/inherents", default-features = false} -sp-offchain = { path = "../../../primitives/offchain", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false, features = ["serde"] } +sp-inherents = { path = "../../../primitives/inherents", default-features = false } +sp-offchain = { path = "../../../primitives/offchain", default-features = false } sp-runtime = { path = "../../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../../primitives/session", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} -sp-storage = { path = "../../../primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false} +sp-session = { path = "../../../primitives/session", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-storage = { path = "../../../primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false } sp-version = { path = "../../../primitives/version", default-features = false, features = ["serde"] } serde_json = { version = "1.0.108", default-features = false, features = ["alloc"] } sp-genesis-builder = { default-features = false, path = "../../../primitives/genesis-builder" } # Used for the node template's RPCs -frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false} +frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } # Used for runtime benchmarking frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false, optional = true } frame-system-benchmarking = { path = "../../../frame/system/benchmarking", default-features = false, optional = true } # Local Dependencies -pallet-template = { path = "../pallets/template", default-features = false} +pallet-template = { path = "../pallets/template", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", @@ -119,4 +122,4 @@ try-runtime = [ "pallet-transaction-payment/try-runtime", "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/substrate/bin/node-template/runtime/src/lib.rs b/substrate/bin/node-template/runtime/src/lib.rs index 6aa4cb70fde17b8a6e6d6a113833e883ff727239..5f399edda98780402cfb472f7a3461b9b46ef042 100644 --- a/substrate/bin/node-template/runtime/src/lib.rs +++ b/substrate/bin/node-template/runtime/src/lib.rs @@ -12,9 +12,7 @@ use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{ - AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, One, Verify, - }, + traits::{BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, One, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, MultiSignature, }; @@ -26,7 +24,7 @@ use sp_version::RuntimeVersion; use frame_support::genesis_builder_helper::{build_config, create_default_config}; // A few exports that help ease life for downstream crates. pub use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{ ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem, Randomness, StorageInfo, @@ -151,11 +149,11 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } -// Configure FRAME pallets to include in runtime. - +/// The default types are being injected by [`derive_impl`](`frame_support::derive_impl`) from +/// [`SoloChainDefaultConfig`](`struct@frame_system::config_preludes::SolochainDefaultConfig`), +/// but overridden as needed. +#[derive_impl(frame_system::config_preludes::SolochainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - /// The basic call filter to use in dispatchable. - type BaseCallFilter = frame_support::traits::Everything; /// The block type for the runtime. type Block = Block; /// Block & extrinsics weights: base values and limits. @@ -164,42 +162,20 @@ impl frame_system::Config for Runtime { type BlockLength = BlockLength; /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; /// The type for storing how many extrinsics an account has signed. type Nonce = Nonce; /// The type for hashing blocks and tries. type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; /// Version of the runtime. type Version = Version; - /// Converts a module to the index of the module in `construct_runtime!`. - /// - /// This type is being generated by `construct_runtime!`. - type PalletInfo = PalletInfo; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); /// The data to be stored in an account. type AccountData = pallet_balances::AccountData; - /// Weight information for the extrinsics of this pallet. - type SystemWeightInfo = (); /// This is used as an identifier of the chain. 42 is the generic substrate prefix. type SS58Prefix = SS58Prefix; - /// The set code logic, just the default since we're not a parachain. - type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } diff --git a/substrate/bin/node-template/rust-toolchain.toml b/substrate/bin/node-template/rust-toolchain.toml index 64daeff68360a3a1cc51d0d8c181bcd6f8f087d7..2a35c6ed07c1c2a667729d1fa558e3da0cd0457f 100644 --- a/substrate/bin/node-template/rust-toolchain.toml +++ b/substrate/bin/node-template/rust-toolchain.toml @@ -6,9 +6,9 @@ components = [ "rust-analyzer", "rust-src", "rust-std", - "rustc-dev", "rustc", + "rustc-dev", "rustfmt", ] -targets = [ "wasm32-unknown-unknown" ] +targets = ["wasm32-unknown-unknown"] profile = "minimal" diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index ee429ee8c0c18ed4239e948c61326f3878fe4b2b..48b3ef1b67e2d786e51ef0ae7d31d99e96c28552 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -9,11 +9,14 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] array-bytes = "6.1" -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } log = "0.4.17" node-primitives = { path = "../primitives" } node-testing = { path = "../testing" } @@ -21,7 +24,7 @@ kitchensink-runtime = { path = "../runtime" } sc-client-api = { path = "../../../client/api" } sp-runtime = { path = "../../../primitives/runtime" } sp-state-machine = { path = "../../../primitives/state-machine" } -serde = "1.0.188" +serde = "1.0.193" serde_json = "1.0.108" derive_more = { version = "0.99.17", default-features = false, features = ["display"] } kvdb = "0.13.0" @@ -31,14 +34,14 @@ sp-core = { path = "../../../primitives/core" } sp-consensus = { path = "../../../primitives/consensus/common" } sc-basic-authorship = { path = "../../../client/basic-authorship" } sp-inherents = { path = "../../../primitives/inherents" } -sp-timestamp = { path = "../../../primitives/timestamp", default-features = false} +sp-timestamp = { path = "../../../primitives/timestamp", default-features = false } sp-tracing = { path = "../../../primitives/tracing" } hash-db = "0.16.0" tempfile = "3.1.0" fs_extra = "1" rand = { version = "0.8.5", features = ["small_rng"] } lazy_static = "1.4.0" -parity-db = "0.4.8" +parity-db = "0.4.12" sc-transaction-pool = { path = "../../../client/transaction-pool" } sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } futures = { version = "0.3.21", features = ["thread-pool"] } diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 5e7ffebaa8edf6e10704897d0bdac0a105adc84f..4f78bd65e8f909d5bfba0f00bd53df4aa925695a 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -11,6 +11,9 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.wasm-pack.profile.release] # `wasm-opt` has some problems on linux, see # https://github.com/rustwasm/wasm-pack/issues/781 etc. @@ -38,9 +41,9 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies array-bytes = "6.1" -clap = { version = "4.4.6", features = ["derive"], optional = true } +clap = { version = "4.4.11", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } jsonrpsee = { version = "0.16.2", features = ["server"] } futures = "0.3.21" log = "0.4.17" @@ -79,7 +82,7 @@ sc-consensus-babe = { path = "../../../client/consensus/babe" } grandpa = { package = "sc-consensus-grandpa", path = "../../../client/consensus/grandpa" } sc-rpc = { path = "../../../client/rpc" } sc-basic-authorship = { path = "../../../client/basic-authorship" } -sc-service = { path = "../../../client/service", default-features = false} +sc-service = { path = "../../../client/service", default-features = false } sc-telemetry = { path = "../../../client/telemetry" } sc-executor = { path = "../../../client/executor" } sc-authority-discovery = { path = "../../../client/authority-discovery" } @@ -90,24 +93,25 @@ sc-storage-monitor = { path = "../../../client/storage-monitor" } sc-offchain = { path = "../../../client/offchain" } # frame dependencies +frame-benchmarking = { path = "../../../frame/benchmarking" } frame-system = { path = "../../../frame/system" } frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api" } pallet-assets = { path = "../../../frame/assets" } pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment" } -pallet-im-online = { path = "../../../frame/im-online", default-features = false} +pallet-im-online = { path = "../../../frame/im-online", default-features = false } +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false } # node-specific dependencies kitchensink-runtime = { path = "../runtime" } node-rpc = { path = "../rpc" } node-primitives = { path = "../primitives" } -node-executor = { package = "staging-node-executor", path = "../executor" } # CLI-specific dependencies -sc-cli = { path = "../../../client/cli", optional = true} -frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true} -node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true} -try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true} +sc-cli = { path = "../../../client/cli", optional = true } +frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true } +node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } +try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true } serde_json = "1.0.108" [dev-dependencies] @@ -128,27 +132,47 @@ regex = "1.6.0" platforms = "3.0" soketto = "0.7.1" criterion = { version = "0.4.0", features = ["async_tokio"] } -tokio = { version = "1.22.0", features = ["macros", "time", "parking_lot"] } +tokio = { version = "1.22.0", features = ["macros", "parking_lot", "time"] } tokio-util = { version = "0.7.4", features = ["compat"] } wait-timeout = "0.2" substrate-rpc-client = { path = "../../../utils/frame/rpc/client" } pallet-timestamp = { path = "../../../frame/timestamp" } substrate-cli-test-utils = { path = "../../../test-utils/cli" } +wat = "1.0" +frame-support = { path = "../../../frame/support" } +node-testing = { path = "../testing" } +pallet-balances = { path = "../../../frame/balances" } +pallet-contracts = { path = "../../../frame/contracts" } +pallet-glutton = { path = "../../../frame/glutton" } +pallet-sudo = { path = "../../../frame/sudo" } +pallet-treasury = { path = "../../../frame/treasury" } +pallet-transaction-payment = { path = "../../../frame/transaction-payment" } +sp-application-crypto = { path = "../../../primitives/application-crypto" } +pallet-root-testing = { path = "../../../frame/root-testing" } +sp-consensus-babe = { path = "../../../primitives/consensus/babe" } +sp-externalities = { path = "../../../primitives/externalities" } +sp-keyring = { path = "../../../primitives/keyring" } +sp-runtime = { path = "../../../primitives/runtime" } +serde_json = "1.0.108" +scale-info = { version = "2.10.0", features = ["derive", "serde"] } +sp-trie = { path = "../../../primitives/trie" } +sp-state-machine = { path = "../../../primitives/state-machine" } + [build-dependencies] -clap = { version = "4.4.6", optional = true } +clap = { version = "4.4.11", optional = true } clap_complete = { version = "4.0.2", optional = true } -node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true} -frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true} -substrate-build-script-utils = { path = "../../../utils/build-script-utils", optional = true} -substrate-frame-cli = { path = "../../../utils/frame/frame-utilities-cli", optional = true} -try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true} +node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } +frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true } +substrate-build-script-utils = { path = "../../../utils/build-script-utils", optional = true } +substrate-frame-cli = { path = "../../../utils/frame/frame-utilities-cli", optional = true } +try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true } sc-cli = { path = "../../../client/cli", optional = true } pallet-balances = { path = "../../../frame/balances" } sc-storage-monitor = { path = "../../../client/storage-monitor" } [features] -default = [ "cli" ] +default = ["cli"] cli = [ "clap", "clap_complete", @@ -162,13 +186,21 @@ cli = [ ] runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "kitchensink-runtime/runtime-benchmarks", + "node-inspect?/runtime-benchmarks", "pallet-asset-tx-payment/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-contracts/runtime-benchmarks", + "pallet-glutton/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", + "pallet-skip-feeless-payment/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-treasury/runtime-benchmarks", "sc-client-db/runtime-benchmarks", "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", @@ -176,14 +208,22 @@ runtime-benchmarks = [ # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. try-runtime = [ + "frame-support/try-runtime", "frame-system/try-runtime", "kitchensink-runtime/try-runtime", "pallet-asset-conversion-tx-payment/try-runtime", "pallet-asset-tx-payment/try-runtime", "pallet-assets/try-runtime", "pallet-balances/try-runtime", + "pallet-contracts/try-runtime", + "pallet-glutton/try-runtime", "pallet-im-online/try-runtime", + "pallet-root-testing/try-runtime", + "pallet-skip-feeless-payment/try-runtime", + "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", + "pallet-treasury/try-runtime", "sp-runtime/try-runtime", "substrate-cli-test-utils/try-runtime", "try-runtime-cli/try-runtime", @@ -196,3 +236,7 @@ harness = false [[bench]] name = "block_production" harness = false + +[[bench]] +name = "executor" +harness = false diff --git a/substrate/bin/node/executor/benches/bench.rs b/substrate/bin/node/cli/benches/executor.rs similarity index 69% rename from substrate/bin/node/executor/benches/bench.rs rename to substrate/bin/node/cli/benches/executor.rs index 587e76af867cb40b9d5b533869085ce79bc3a205..a326e1a79ea347f169e372581d07dc4f43848e24 100644 --- a/substrate/bin/node/executor/benches/bench.rs +++ b/substrate/bin/node/cli/benches/executor.rs @@ -22,20 +22,16 @@ use kitchensink_runtime::{ constants::currency::*, Block, BuildStorage, CheckedExtrinsic, Header, RuntimeCall, RuntimeGenesisConfig, UncheckedExtrinsic, }; -use node_executor::ExecutorDispatch; use node_primitives::{BlockNumber, Hash}; use node_testing::keyring::*; -use sc_executor::{ - Externalities, NativeElseWasmExecutor, RuntimeVersionOf, WasmExecutionMethod, WasmExecutor, - WasmtimeInstantiationStrategy, -}; +use sc_executor::{Externalities, RuntimeVersionOf}; use sp_core::{ storage::well_known_keys, traits::{CallContext, CodeExecutor, RuntimeCode}, }; use sp_runtime::traits::BlakeTwo256; use sp_state_machine::TestExternalities as CoreTestExternalities; -use staging_node_executor as node_executor; +use staging_node_cli::service::RuntimeExecutor; criterion_group!(benches, bench_execute_block); criterion_main!(benches); @@ -58,12 +54,6 @@ const HEAP_PAGES: u64 = 20; type TestExternalities = CoreTestExternalities; -#[derive(Debug)] -enum ExecutionMethod { - Native, - Wasm(WasmExecutionMethod), -} - fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH) } @@ -80,7 +70,7 @@ fn new_test_ext(genesis_config: &RuntimeGenesisConfig) -> TestExternalities( - executor: &NativeElseWasmExecutor, + executor: &RuntimeExecutor, ext: &mut E, number: BlockNumber, parent_hash: Hash, @@ -113,14 +103,7 @@ fn construct_block( // execute the block to get the real header. executor - .call( - ext, - &runtime_code, - "Core_initialize_block", - &header.encode(), - true, - CallContext::Offchain, - ) + .call(ext, &runtime_code, "Core_initialize_block", &header.encode(), CallContext::Offchain) .0 .unwrap(); @@ -131,7 +114,6 @@ fn construct_block( &runtime_code, "BlockBuilder_apply_extrinsic", &i.encode(), - true, CallContext::Offchain, ) .0 @@ -145,7 +127,6 @@ fn construct_block( &runtime_code, "BlockBuilder_finalize_block", &[0u8; 0], - true, CallContext::Offchain, ) .0 @@ -159,7 +140,7 @@ fn construct_block( fn test_blocks( genesis_config: &RuntimeGenesisConfig, - executor: &NativeElseWasmExecutor, + executor: &RuntimeExecutor, ) -> Vec<(Vec, Hash)> { let mut test_ext = new_test_ext(genesis_config); let mut block1_extrinsics = vec![CheckedExtrinsic { @@ -181,56 +162,42 @@ fn test_blocks( fn bench_execute_block(c: &mut Criterion) { let mut group = c.benchmark_group("execute blocks"); - let execution_methods = vec![ - ExecutionMethod::Native, - ExecutionMethod::Wasm(WasmExecutionMethod::Compiled { - instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, - }), - ]; - - for strategy in execution_methods { - group.bench_function(format!("{:?}", strategy), |b| { - let genesis_config = node_testing::genesis::config(); - let use_native = match strategy { - ExecutionMethod::Native => true, - ExecutionMethod::Wasm(..) => false, - }; - - let executor = - NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()); - let runtime_code = RuntimeCode { - code_fetcher: &sp_core::traits::WrappedRuntimeCode(compact_code_unwrap().into()), - hash: vec![1, 2, 3], - heap_pages: None, - }; - - // Get the runtime version to initialize the runtimes cache. - { - let mut test_ext = new_test_ext(&genesis_config); - executor.runtime_version(&mut test_ext.ext(), &runtime_code).unwrap(); - } - - let blocks = test_blocks(&genesis_config, &executor); - - b.iter_batched_ref( - || new_test_ext(&genesis_config), - |test_ext| { - for block in blocks.iter() { - executor - .call( - &mut test_ext.ext(), - &runtime_code, - "Core_execute_block", - &block.0, - use_native, - CallContext::Offchain, - ) - .0 - .unwrap(); - } - }, - BatchSize::LargeInput, - ); - }); - } + + group.bench_function("wasm", |b| { + let genesis_config = node_testing::genesis::config(); + + let executor = RuntimeExecutor::builder().build(); + let runtime_code = RuntimeCode { + code_fetcher: &sp_core::traits::WrappedRuntimeCode(compact_code_unwrap().into()), + hash: vec![1, 2, 3], + heap_pages: None, + }; + + // Get the runtime version to initialize the runtimes cache. + { + let mut test_ext = new_test_ext(&genesis_config); + executor.runtime_version(&mut test_ext.ext(), &runtime_code).unwrap(); + } + + let blocks = test_blocks(&genesis_config, &executor); + + b.iter_batched_ref( + || new_test_ext(&genesis_config), + |test_ext| { + for block in blocks.iter() { + executor + .call( + &mut test_ext.ext(), + &runtime_code, + "Core_execute_block", + &block.0, + CallContext::Offchain, + ) + .0 + .unwrap(); + } + }, + BatchSize::LargeInput, + ); + }); } diff --git a/substrate/bin/node/cli/src/command.rs b/substrate/bin/node/cli/src/command.rs index 16d0415ff2637fe4f613cb302bec81a7966224e8..dc28705c2aea9323d0ce84ae901d0206fe513efe 100644 --- a/substrate/bin/node/cli/src/command.rs +++ b/substrate/bin/node/cli/src/command.rs @@ -24,7 +24,6 @@ use crate::{ }; use frame_benchmarking_cli::*; use kitchensink_runtime::{ExistentialDeposit, RuntimeApi}; -use node_executor::ExecutorDispatch; use node_primitives::Block; use sc_cli::{Result, SubstrateCli}; use sc_service::PartialComponents; @@ -89,7 +88,7 @@ pub fn run() -> Result<()> { Some(Subcommand::Inspect(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run::(config)) + runner.sync_run(|config| cmd.run::(config)) }, Some(Subcommand::Benchmark(cmd)) => { let runner = cli.create_runner(cmd)?; diff --git a/substrate/bin/node/cli/src/lib.rs b/substrate/bin/node/cli/src/lib.rs index 2fe238ef316e60a7544b0f828e53b228503df7e6..0ff544932a9a144e82cf9e147c1d0b8718022c47 100644 --- a/substrate/bin/node/cli/src/lib.rs +++ b/substrate/bin/node/cli/src/lib.rs @@ -30,16 +30,14 @@ #![warn(missing_docs)] -pub mod chain_spec; - -#[macro_use] -pub mod service; #[cfg(feature = "cli")] mod benchmarking; +pub mod chain_spec; #[cfg(feature = "cli")] mod cli; #[cfg(feature = "cli")] mod command; +pub mod service; #[cfg(feature = "cli")] pub use cli::*; diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 153dda5c0a523395b8b4604b09000129237f71ef..4f8c6198cdce72c49f22d1d089f3e92a95182fa4 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -26,11 +26,9 @@ use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; use frame_system_rpc_runtime_api::AccountNonceApi; use futures::prelude::*; use kitchensink_runtime::RuntimeApi; -use node_executor::ExecutorDispatch; use node_primitives::Block; use sc_client_api::{Backend, BlockBackend}; use sc_consensus_babe::{self, SlotProportion}; -use sc_executor::NativeElseWasmExecutor; use sc_network::{event::Event, NetworkEventStream, NetworkService}; use sc_network_sync::{warp::WarpSyncParams, SyncingService}; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; @@ -42,9 +40,25 @@ use sp_core::crypto::Pair; use sp_runtime::{generic, traits::Block as BlockT, SaturatedConversion}; use std::sync::Arc; +/// Host functions required for kitchensink runtime and Substrate node. +#[cfg(not(feature = "runtime-benchmarks"))] +pub type HostFunctions = + (sp_io::SubstrateHostFunctions, sp_statement_store::runtime_api::HostFunctions); + +/// Host functions required for kitchensink runtime and Substrate node. +#[cfg(feature = "runtime-benchmarks")] +pub type HostFunctions = ( + sp_io::SubstrateHostFunctions, + sp_statement_store::runtime_api::HostFunctions, + frame_benchmarking::benchmarking::HostFunctions, +); + +/// A specialized `WasmExecutor` intended to use accross substrate node. It provides all required +/// HostFunctions. +pub type RuntimeExecutor = sc_executor::WasmExecutor; + /// The full client type definition. -pub type FullClient = - sc_service::TFullClient>; +pub type FullClient = sc_service::TFullClient; type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; type FullGrandpaBlockImport = @@ -91,21 +105,24 @@ pub fn create_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: kitchensink_runtime::SignedExtra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::mortal( - period, - best_block.saturated_into(), - )), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from( - tip, None, - ), - ); + let extra: kitchensink_runtime::SignedExtra = + ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::mortal( + period, + best_block.saturated_into(), + )), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::< + kitchensink_runtime::Runtime, + >::from(tip, None), + ), + ); let raw_payload = kitchensink_runtime::SignedPayload::from_raw( function.clone(), @@ -171,7 +188,7 @@ pub fn new_partial( }) .transpose()?; - let executor = sc_service::new_native_or_wasm_executor(&config); + let executor = sc_service::new_wasm_executor(&config); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( @@ -367,28 +384,28 @@ pub fn new_full_base( let shared_voter_state = rpc_setup; let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); - let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); let grandpa_protocol_name = grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec); - net_config.add_notification_protocol(grandpa::grandpa_peers_set_config( - grandpa_protocol_name.clone(), - )); + let (grandpa_protocol_config, grandpa_notification_service) = + grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone()); + net_config.add_notification_protocol(grandpa_protocol_config); - let statement_handler_proto = sc_network_statement::StatementHandlerPrototype::new( - genesis_hash, - config.chain_spec.fork_id(), - ); - net_config.add_notification_protocol(statement_handler_proto.set_config()); + let (statement_handler_proto, statement_config) = + sc_network_statement::StatementHandlerPrototype::new( + genesis_hash, + config.chain_spec.fork_id(), + ); + net_config.add_notification_protocol(statement_config); let mixnet_protocol_name = sc_mixnet::protocol_name(genesis_hash.as_ref(), config.chain_spec.fork_id()); - if let Some(mixnet_config) = &mixnet_config { - net_config.add_notification_protocol(sc_mixnet::peers_set_config( - mixnet_protocol_name.clone(), - mixnet_config, - )); - } + let mixnet_notification_service = mixnet_config.as_ref().map(|mixnet_config| { + let (config, notification_service) = + sc_mixnet::peers_set_config(mixnet_protocol_name.clone(), mixnet_config); + net_config.add_notification_protocol(config); + notification_service + }); let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new( backend.clone(), @@ -419,6 +436,8 @@ pub fn new_full_base( mixnet_protocol_name, transaction_pool.clone(), Some(keystore_container.keystore()), + mixnet_notification_service + .expect("`NotificationService` exists since mixnet was enabled; qed"), ); task_manager.spawn_handle().spawn("mixnet", None, mixnet); } @@ -587,6 +606,7 @@ pub fn new_full_base( link: grandpa_link, network: network.clone(), sync: Arc::new(sync_service.clone()), + notification_service: grandpa_notification_service, telemetry: telemetry.as_ref().map(|x| x.handle()), voting_rule: grandpa::VotingRulesBuilder::default().build(), prometheus_registry: prometheus_registry.clone(), @@ -879,8 +899,9 @@ mod tests { let check_era = frame_system::CheckEra::from(Era::Immortal); let check_nonce = frame_system::CheckNonce::from(index); let check_weight = frame_system::CheckWeight::new(); - let tx_payment = - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None); + let tx_payment = pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None), + ); let extra = ( check_non_zero_sender, check_spec_version, diff --git a/substrate/bin/node/executor/tests/basic.rs b/substrate/bin/node/cli/tests/basic.rs similarity index 94% rename from substrate/bin/node/executor/tests/basic.rs rename to substrate/bin/node/cli/tests/basic.rs index cbceac04e8eaa075079fcb3048b47122e5037b3e..e5a8a397254e5eb321dd053fa2a8dfaabd0cd30c 100644 --- a/substrate/bin/node/executor/tests/basic.rs +++ b/substrate/bin/node/cli/tests/basic.rs @@ -193,11 +193,9 @@ fn panic_execution_with_foreign_code_gives_error() { t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true) + let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())) .0 .unwrap(); let r = ApplyExtrinsicResult::decode(&mut &v[..]).unwrap(); @@ -219,11 +217,9 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { t.insert(>::hashed_key().to_vec(), 69u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true) + let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())) .0 .unwrap(); let r = ApplyExtrinsicResult::decode(&mut &v[..]).unwrap(); @@ -256,14 +252,12 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); let fees = t.execute_with(|| transfer_fee(&xt())); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0; + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0; assert!(r.is_ok()); t.execute_with(|| { @@ -298,14 +292,12 @@ fn successful_execution_with_foreign_code_gives_ok() { ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); let fees = t.execute_with(|| transfer_fee(&xt())); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0; + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0; assert!(r.is_ok()); t.execute_with(|| { @@ -337,7 +329,7 @@ fn full_native_block_import_works() { .base_extrinsic, ); - executor_call(&mut t, "Core_execute_block", &block1.0, true).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap(); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); @@ -412,7 +404,7 @@ fn full_native_block_import_works() { fees = t.execute_with(|| transfer_fee(&xt())); let pot = t.execute_with(|| Treasury::pot()); - executor_call(&mut t, "Core_execute_block", &block2.0, true).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); t.execute_with(|| { assert_eq!( @@ -554,7 +546,7 @@ fn full_wasm_block_import_works() { let mut alice_last_known_balance: Balance = Default::default(); let mut fees = t.execute_with(|| transfer_fee(&xt())); - executor_call(&mut t, "Core_execute_block", &block1.0, false).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap(); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); @@ -564,7 +556,7 @@ fn full_wasm_block_import_works() { fees = t.execute_with(|| transfer_fee(&xt())); - executor_call(&mut t, "Core_execute_block", &block2.0, false).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); t.execute_with(|| { assert_eq!( @@ -717,7 +709,7 @@ fn deploying_wasm_contract_should_work() { let mut t = new_test_ext(compact_code_unwrap()); - executor_call(&mut t, "Core_execute_block", &b.0, false).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &b.0).0.unwrap(); t.execute_with(|| { // Verify that the contract does exist by querying some of its storage items @@ -732,8 +724,7 @@ fn wasm_big_block_import_fails() { set_heap_pages(&mut t.ext(), 4); - let result = - executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0, false).0; + let result = executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0).0; assert!(result.is_err()); // Err(Wasmi(Trap(Trap { kind: Host(AllocatorOutOfSpace) }))) } @@ -741,7 +732,7 @@ fn wasm_big_block_import_fails() { fn native_big_block_import_succeeds() { let mut t = new_test_ext(compact_code_unwrap()); - executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0, true) + executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0) .0 .unwrap(); } @@ -754,11 +745,9 @@ fn native_big_block_import_fails_on_fallback() { // block. set_heap_pages(&mut t.ext(), 8); - assert!( - executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0, false,) - .0 - .is_err() - ); + assert!(executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0) + .0 + .is_err()); } #[test] @@ -775,15 +764,9 @@ fn panic_execution_gives_error() { t.insert(>::hashed_key().to_vec(), 0_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = executor_call( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - false, - ) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), false) + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())) .0 .unwrap(); let r = ApplyExtrinsicResult::decode(&mut &r[..]).unwrap(); @@ -816,13 +799,7 @@ fn successful_execution_gives_ok() { ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = executor_call( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - false, - ) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 111 * DOLLARS); @@ -830,7 +807,7 @@ fn successful_execution_gives_ok() { let fees = t.execute_with(|| transfer_fee(&xt())); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), false) + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())) .0 .unwrap(); ApplyExtrinsicResult::decode(&mut &r[..]) @@ -861,7 +838,7 @@ fn should_import_block_with_test_client() { #[test] fn default_config_as_json_works() { let mut t = new_test_ext(compact_code_unwrap()); - let r = executor_call(&mut t, "GenesisBuilder_create_default_config", &vec![], false) + let r = executor_call(&mut t, "GenesisBuilder_create_default_config", &vec![]) .0 .unwrap(); let r = Vec::::decode(&mut &r[..]).unwrap(); diff --git a/substrate/bin/node/executor/tests/common.rs b/substrate/bin/node/cli/tests/common.rs similarity index 91% rename from substrate/bin/node/executor/tests/common.rs rename to substrate/bin/node/cli/tests/common.rs index 2d68c88db9252a1cb43314bfa2a6d5e216b455c1..9019594ff627f2aae89f58e837f55341b6793df0 100644 --- a/substrate/bin/node/executor/tests/common.rs +++ b/substrate/bin/node/cli/tests/common.rs @@ -18,7 +18,7 @@ use codec::{Decode, Encode}; use frame_support::Hashable; use frame_system::offchain::AppCrypto; -use sc_executor::{error::Result, NativeElseWasmExecutor, WasmExecutor}; +use sc_executor::error::Result; use sp_consensus_babe::{ digests::{PreDigest, SecondaryPlainPreDigest}, Slot, BABE_ENGINE_ID, @@ -38,11 +38,10 @@ use kitchensink_runtime::{ constants::currency::*, Block, BuildStorage, CheckedExtrinsic, Header, Runtime, UncheckedExtrinsic, }; -use node_executor::ExecutorDispatch; use node_primitives::{BlockNumber, Hash}; use node_testing::keyring::*; use sp_externalities::Externalities; -use staging_node_executor as node_executor; +use staging_node_cli::service::RuntimeExecutor; pub const TEST_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"test"); @@ -98,15 +97,14 @@ pub fn from_block_number(n: u32) -> Header { Header::new(n, Default::default(), Default::default(), [69; 32].into(), Default::default()) } -pub fn executor() -> NativeElseWasmExecutor { - NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()) +pub fn executor() -> RuntimeExecutor { + RuntimeExecutor::builder().build() } pub fn executor_call( t: &mut TestExternalities, method: &str, data: &[u8], - use_native: bool, ) -> (Result>, bool) { let mut t = t.ext(); @@ -118,7 +116,7 @@ pub fn executor_call( heap_pages: heap_pages.and_then(|hp| Decode::decode(&mut &hp[..]).ok()), }; sp_tracing::try_init_simple(); - executor().call(&mut t, &runtime_code, method, data, use_native, CallContext::Onchain) + executor().call(&mut t, &runtime_code, method, data, CallContext::Onchain) } pub fn new_test_ext(code: &[u8]) -> TestExternalities { @@ -169,12 +167,12 @@ pub fn construct_block( }; // execute the block to get the real header. - executor_call(env, "Core_initialize_block", &header.encode(), true).0.unwrap(); + executor_call(env, "Core_initialize_block", &header.encode()).0.unwrap(); for extrinsic in extrinsics.iter() { // Try to apply the `extrinsic`. It should be valid, in the sense that it passes // all pre-inclusion checks. - let r = executor_call(env, "BlockBuilder_apply_extrinsic", &extrinsic.encode(), true) + let r = executor_call(env, "BlockBuilder_apply_extrinsic", &extrinsic.encode()) .0 .expect("application of an extrinsic failed"); @@ -187,7 +185,7 @@ pub fn construct_block( } let header = Header::decode( - &mut &executor_call(env, "BlockBuilder_finalize_block", &[0u8; 0], true).0.unwrap()[..], + &mut &executor_call(env, "BlockBuilder_finalize_block", &[0u8; 0]).0.unwrap()[..], ) .unwrap(); diff --git a/substrate/bin/node/executor/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs similarity index 95% rename from substrate/bin/node/executor/tests/fees.rs rename to substrate/bin/node/cli/tests/fees.rs index 7519ce6e8b1b47b9cfc6eee9d87c52ef8ae25cc3..8c7b3c873157770a8f156a019aa7e43e66460bae 100644 --- a/substrate/bin/node/executor/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -95,7 +95,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { ); // execute a big block. - executor_call(&mut t, "Core_execute_block", &block1.0, true).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap(); // weight multiplier is increased for next block. t.execute_with(|| { @@ -106,7 +106,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { }); // execute a big block. - executor_call(&mut t, "Core_execute_block", &block2.0, true).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); // weight multiplier is increased for next block. t.execute_with(|| { @@ -151,12 +151,10 @@ fn transaction_fee_is_correct() { function: RuntimeCall::Balances(default_transfer_call()), }); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt.clone()), true).0; + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt.clone())).0; assert!(r.is_ok()); t.execute_with(|| { @@ -247,7 +245,7 @@ fn block_weight_capacity_report() { len / 1024 / 1024, ); - let r = executor_call(&mut t, "Core_execute_block", &block.0, true).0; + let r = executor_call(&mut t, "Core_execute_block", &block.0).0; println!(" || Result = {:?}", r); assert!(r.is_ok()); @@ -310,7 +308,7 @@ fn block_length_capacity_report() { len / 1024 / 1024, ); - let r = executor_call(&mut t, "Core_execute_block", &block.0, true).0; + let r = executor_call(&mut t, "Core_execute_block", &block.0).0; println!(" || Result = {:?}", r); assert!(r.is_ok()); diff --git a/substrate/bin/node/executor/tests/res/default_genesis_config.json b/substrate/bin/node/cli/tests/res/default_genesis_config.json similarity index 100% rename from substrate/bin/node/executor/tests/res/default_genesis_config.json rename to substrate/bin/node/cli/tests/res/default_genesis_config.json diff --git a/substrate/bin/node/executor/tests/submit_transaction.rs b/substrate/bin/node/cli/tests/submit_transaction.rs similarity index 100% rename from substrate/bin/node/executor/tests/submit_transaction.rs rename to substrate/bin/node/cli/tests/submit_transaction.rs diff --git a/substrate/bin/node/cli/tests/websocket_server.rs b/substrate/bin/node/cli/tests/websocket_server.rs index 432a4871cd3785a171dcd6f8d20135dd8072a33c..b34fc82b8be85b133374aed65c9ff28a32cdf163 100644 --- a/substrate/bin/node/cli/tests/websocket_server.rs +++ b/substrate/bin/node/cli/tests/websocket_server.rs @@ -205,8 +205,7 @@ impl WsServer { Ok(soketto::Data::Text(len)) => String::from_utf8(buf[..len].to_vec()) .map(Message::Text) .map_err(|err| Box::new(err) as Box<_>), - Ok(soketto::Data::Binary(len)) => Ok(buf[..len].to_vec()) - .map(Message::Binary), + Ok(soketto::Data::Binary(len)) => Ok(Message::Binary(buf[..len].to_vec())), Err(err) => Err(Box::new(err) as Box<_>), }; Some((ret, (receiver, buf))) diff --git a/substrate/bin/node/executor/Cargo.toml b/substrate/bin/node/executor/Cargo.toml deleted file mode 100644 index 595a313d2cb9ba31cbcbcc21cce05a99cd7204a6..0000000000000000000000000000000000000000 --- a/substrate/bin/node/executor/Cargo.toml +++ /dev/null @@ -1,57 +0,0 @@ -[package] -name = "staging-node-executor" -version = "3.0.0-dev" -authors.workspace = true -description = "Substrate node implementation in Rust." -edition.workspace = true -license = "Apache-2.0" -homepage = "https://substrate.io" -repository.workspace = true -publish = false - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } -scale-info = { version = "2.10.0", features = ["derive", "serde"] } -frame-benchmarking = { path = "../../../frame/benchmarking" } -node-primitives = { path = "../primitives" } -kitchensink-runtime = { path = "../runtime" } -sc-executor = { path = "../../../client/executor" } -sp-core = { path = "../../../primitives/core", features=["serde"] } -sp-keystore = { path = "../../../primitives/keystore" } -sp-state-machine = { path = "../../../primitives/state-machine" } -sp-tracing = { path = "../../../primitives/tracing" } -sp-trie = { path = "../../../primitives/trie" } -sp-statement-store = { path = "../../../primitives/statement-store", features=["serde"] } - -[dev-dependencies] -criterion = "0.4.0" -futures = "0.3.21" -wat = "1.0" -frame-support = { path = "../../../frame/support" } -frame-system = { path = "../../../frame/system" } -node-testing = { path = "../testing" } -pallet-balances = { path = "../../../frame/balances" } -pallet-contracts = { path = "../../../frame/contracts" } -pallet-im-online = { path = "../../../frame/im-online" } -pallet-glutton = { path = "../../../frame/glutton" } -pallet-sudo = { path = "../../../frame/sudo" } -pallet-timestamp = { path = "../../../frame/timestamp" } -pallet-treasury = { path = "../../../frame/treasury" } -pallet-transaction-payment = { path = "../../../frame/transaction-payment" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -pallet-root-testing = { path = "../../../frame/root-testing" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe" } -sp-externalities = { path = "../../../primitives/externalities" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-runtime = { path = "../../../primitives/runtime" } -serde_json = "1.0.108" - -[features] -stress-test = [] - -[[bench]] -name = "bench" -harness = false diff --git a/substrate/bin/node/executor/src/lib.rs b/substrate/bin/node/executor/src/lib.rs deleted file mode 100644 index 3557a16740b8a6407d485a74dc422985805e063b..0000000000000000000000000000000000000000 --- a/substrate/bin/node/executor/src/lib.rs +++ /dev/null @@ -1,40 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A `CodeExecutor` specialization which uses natively compiled runtime when the wasm to be -//! executed is equivalent to the natively compiled code. - -pub use sc_executor::NativeElseWasmExecutor; - -// Declare an instance of the native executor named `ExecutorDispatch`. Include the wasm binary as -// the equivalent wasm code. -pub struct ExecutorDispatch; - -impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { - type ExtendHostFunctions = ( - frame_benchmarking::benchmarking::HostFunctions, - sp_statement_store::runtime_api::HostFunctions, - ); - - fn dispatch(method: &str, data: &[u8]) -> Option> { - kitchensink_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - kitchensink_runtime::native_version() - } -} diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index 30cc22b0e8c63d7415e230d4e43591d9e4fa3cc3..44de013483ebf54d58dd0d659b59264394de6cd7 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -8,16 +8,27 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1" } thiserror = "1.0" sc-cli = { path = "../../../client/cli" } sc-client-api = { path = "../../../client/api" } -sc-service = { path = "../../../client/service", default-features = false} +sc-service = { path = "../../../client/service", default-features = false } sp-blockchain = { path = "../../../primitives/blockchain" } sp-core = { path = "../../../primitives/core" } +sp-io = { path = "../../../primitives/io" } sp-runtime = { path = "../../../primitives/runtime" } +sp-statement-store = { path = "../../../primitives/statement-store" } + +[features] +runtime-benchmarks = [ + "sc-service/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] diff --git a/substrate/bin/node/inspect/src/command.rs b/substrate/bin/node/inspect/src/command.rs index dcecfd788264466b408e4ec466d675cb97781905..e0e25707e31b06b7d53f31bde9a0b809211d8dce 100644 --- a/substrate/bin/node/inspect/src/command.rs +++ b/substrate/bin/node/inspect/src/command.rs @@ -23,18 +23,20 @@ use crate::{ Inspector, }; use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; -use sc_service::{Configuration, NativeExecutionDispatch}; +use sc_service::Configuration; use sp_runtime::traits::Block; +type HostFunctions = + (sp_io::SubstrateHostFunctions, sp_statement_store::runtime_api::HostFunctions); + impl InspectCmd { /// Run the inspect command, passing the inspector. - pub fn run(&self, config: Configuration) -> Result<()> + pub fn run(&self, config: Configuration) -> Result<()> where B: Block, RA: Send + Sync + 'static, - D: NativeExecutionDispatch + 'static, { - let executor = sc_service::new_native_or_wasm_executor::(&config); + let executor = sc_service::new_wasm_executor::(&config); let client = sc_service::new_full_client::(&config, None, executor)?; let inspect = Inspector::::new(client); diff --git a/substrate/bin/node/primitives/Cargo.toml b/substrate/bin/node/primitives/Cargo.toml index 77bf7ad467614e7df0fdd68a601a2d614fa9bc0e..24279ad09c3d9f4576a212d7c67ac24be27b8e22 100644 --- a/substrate/bin/node/primitives/Cargo.toml +++ b/substrate/bin/node/primitives/Cargo.toml @@ -9,13 +9,16 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../../../primitives/core", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } [features] -default = [ "std" ] -std = [ "sp-core/std", "sp-runtime/std" ] +default = ["std"] +std = ["sp-core/std", "sp-runtime/std"] diff --git a/substrate/bin/node/rpc/Cargo.toml b/substrate/bin/node/rpc/Cargo.toml index 43db4ab9d34f709c43809e5bf909b979eebba6c5..a4a361fadbc169e77d6f78c258fa5e627a788d4c 100644 --- a/substrate/bin/node/rpc/Cargo.toml +++ b/substrate/bin/node/rpc/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 836f90e76542d542ccf2872dc9a6b6a81592374d..693fd673da5d993b7df3ef67c61154ee020a9132 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -10,6 +10,9 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -26,122 +29,123 @@ log = { version = "0.4.17", default-features = false } serde_json = { version = "1.0.108", default-features = false, features = ["alloc", "arbitrary_precision"] } # pallet-asset-conversion: turn on "num-traits" feature -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info", "num-traits"] } +primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } # primitives -sp-authority-discovery = { path = "../../../primitives/authority-discovery", default-features = false, features=["serde"] } -sp-consensus-babe = { path = "../../../primitives/consensus/babe", default-features = false, features=["serde"] } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false, features=["serde"] } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} +sp-authority-discovery = { path = "../../../primitives/authority-discovery", default-features = false, features = ["serde"] } +sp-consensus-babe = { path = "../../../primitives/consensus/babe", default-features = false, features = ["serde"] } +sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false } sp-genesis-builder = { default-features = false, path = "../../../primitives/genesis-builder" } -sp-inherents = { path = "../../../primitives/inherents", default-features = false} -node-primitives = { path = "../primitives", default-features = false} +sp-inherents = { path = "../../../primitives/inherents", default-features = false } +node-primitives = { path = "../primitives", default-features = false } sp-mixnet = { path = "../../../primitives/mixnet", default-features = false } -sp-offchain = { path = "../../../primitives/offchain", default-features = false} -sp-core = { path = "../../../primitives/core", default-features = false, features=["serde"] } -sp-std = { path = "../../../primitives/std", default-features = false} -sp-api = { path = "../../../primitives/api", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false, features=["serde"] } -sp-staking = { path = "../../../primitives/staking", default-features = false, features=["serde"] } -sp-storage = { path = "../../../primitives/storage", default-features = false} -sp-session = { path = "../../../primitives/session", default-features = false} -sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false} -sp-statement-store = { path = "../../../primitives/statement-store", default-features = false, features=["serde"] } -sp-version = { path = "../../../primitives/version", default-features = false, features=["serde"] } -sp-io = { path = "../../../primitives/io", default-features = false} +sp-offchain = { path = "../../../primitives/offchain", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false, features = ["serde"] } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-api = { path = "../../../primitives/api", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false, features = ["serde"] } +sp-staking = { path = "../../../primitives/staking", default-features = false, features = ["serde"] } +sp-storage = { path = "../../../primitives/storage", default-features = false } +sp-session = { path = "../../../primitives/session", default-features = false } +sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false } +sp-statement-store = { path = "../../../primitives/statement-store", default-features = false, features = ["serde"] } +sp-version = { path = "../../../primitives/version", default-features = false, features = ["serde"] } +sp-io = { path = "../../../primitives/io", default-features = false } # frame dependencies -frame-executive = { path = "../../../frame/executive", default-features = false} -frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false} -frame-benchmarking-pallet-pov = { path = "../../../frame/benchmarking/pov", default-features = false} +frame-executive = { path = "../../../frame/executive", default-features = false } +frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false } +frame-benchmarking-pallet-pov = { path = "../../../frame/benchmarking/pov", default-features = false } frame-support = { path = "../../../frame/support", default-features = false, features = ["tuples-96"] } -frame-system = { path = "../../../frame/system", default-features = false} +frame-system = { path = "../../../frame/system", default-features = false } frame-system-benchmarking = { path = "../../../frame/system/benchmarking", default-features = false, optional = true } -frame-election-provider-support = { path = "../../../frame/election-provider-support", default-features = false} -frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false} +frame-election-provider-support = { path = "../../../frame/election-provider-support", default-features = false } +frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false } frame-try-runtime = { path = "../../../frame/try-runtime", default-features = false, optional = true } -pallet-alliance = { path = "../../../frame/alliance", default-features = false} -pallet-asset-conversion = { path = "../../../frame/asset-conversion", default-features = false} -pallet-asset-rate = { path = "../../../frame/asset-rate", default-features = false} -pallet-assets = { path = "../../../frame/assets", default-features = false} -pallet-authority-discovery = { path = "../../../frame/authority-discovery", default-features = false} -pallet-authorship = { path = "../../../frame/authorship", default-features = false} -pallet-babe = { path = "../../../frame/babe", default-features = false} -pallet-bags-list = { path = "../../../frame/bags-list", default-features = false} -pallet-balances = { path = "../../../frame/balances", default-features = false} -pallet-bounties = { path = "../../../frame/bounties", default-features = false} -pallet-broker = { path = "../../../frame/broker", default-features = false} -pallet-child-bounties = { path = "../../../frame/child-bounties", default-features = false} -pallet-collective = { path = "../../../frame/collective", default-features = false} -pallet-contracts = { path = "../../../frame/contracts", default-features = false} -pallet-contracts-primitives = { path = "../../../frame/contracts/primitives", default-features = false} -pallet-conviction-voting = { path = "../../../frame/conviction-voting", default-features = false} -pallet-core-fellowship = { path = "../../../frame/core-fellowship", default-features = false} -pallet-democracy = { path = "../../../frame/democracy", default-features = false} -pallet-election-provider-multi-phase = { path = "../../../frame/election-provider-multi-phase", default-features = false} +pallet-alliance = { path = "../../../frame/alliance", default-features = false } +pallet-asset-conversion = { path = "../../../frame/asset-conversion", default-features = false } +pallet-asset-rate = { path = "../../../frame/asset-rate", default-features = false } +pallet-assets = { path = "../../../frame/assets", default-features = false } +pallet-authority-discovery = { path = "../../../frame/authority-discovery", default-features = false } +pallet-authorship = { path = "../../../frame/authorship", default-features = false } +pallet-babe = { path = "../../../frame/babe", default-features = false } +pallet-bags-list = { path = "../../../frame/bags-list", default-features = false } +pallet-balances = { path = "../../../frame/balances", default-features = false } +pallet-bounties = { path = "../../../frame/bounties", default-features = false } +pallet-broker = { path = "../../../frame/broker", default-features = false } +pallet-child-bounties = { path = "../../../frame/child-bounties", default-features = false } +pallet-collective = { path = "../../../frame/collective", default-features = false } +pallet-contracts = { path = "../../../frame/contracts", default-features = false } +pallet-conviction-voting = { path = "../../../frame/conviction-voting", default-features = false } +pallet-core-fellowship = { path = "../../../frame/core-fellowship", default-features = false } +pallet-democracy = { path = "../../../frame/democracy", default-features = false } +pallet-election-provider-multi-phase = { path = "../../../frame/election-provider-multi-phase", default-features = false } pallet-election-provider-support-benchmarking = { path = "../../../frame/election-provider-support/benchmarking", default-features = false, optional = true } -pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", default-features = false} -pallet-fast-unstake = { path = "../../../frame/fast-unstake", default-features = false} -pallet-nis = { path = "../../../frame/nis", default-features = false} -pallet-grandpa = { path = "../../../frame/grandpa", default-features = false} -pallet-im-online = { path = "../../../frame/im-online", default-features = false} -pallet-indices = { path = "../../../frame/indices", default-features = false} -pallet-identity = { path = "../../../frame/identity", default-features = false} -pallet-lottery = { path = "../../../frame/lottery", default-features = false} -pallet-membership = { path = "../../../frame/membership", default-features = false} -pallet-message-queue = { path = "../../../frame/message-queue", default-features = false} +pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", default-features = false } +pallet-example-tasks = { path = "../../../frame/examples/tasks", default-features = false } +pallet-fast-unstake = { path = "../../../frame/fast-unstake", default-features = false } +pallet-nis = { path = "../../../frame/nis", default-features = false } +pallet-grandpa = { path = "../../../frame/grandpa", default-features = false } +pallet-im-online = { path = "../../../frame/im-online", default-features = false } +pallet-indices = { path = "../../../frame/indices", default-features = false } +pallet-identity = { path = "../../../frame/identity", default-features = false } +pallet-lottery = { path = "../../../frame/lottery", default-features = false } +pallet-membership = { path = "../../../frame/membership", default-features = false } +pallet-message-queue = { path = "../../../frame/message-queue", default-features = false } pallet-mixnet = { path = "../../../frame/mixnet", default-features = false } -pallet-mmr = { path = "../../../frame/merkle-mountain-range", default-features = false} -pallet-multisig = { path = "../../../frame/multisig", default-features = false} -pallet-nfts = { path = "../../../frame/nfts", default-features = false} -pallet-nfts-runtime-api = { path = "../../../frame/nfts/runtime-api", default-features = false} -pallet-nft-fractionalization = { path = "../../../frame/nft-fractionalization", default-features = false} -pallet-nomination-pools = { path = "../../../frame/nomination-pools", default-features = false} -pallet-nomination-pools-benchmarking = { path = "../../../frame/nomination-pools/benchmarking", default-features = false, optional = true} -pallet-nomination-pools-runtime-api = { path = "../../../frame/nomination-pools/runtime-api", default-features = false} -pallet-offences = { path = "../../../frame/offences", default-features = false} +pallet-mmr = { path = "../../../frame/merkle-mountain-range", default-features = false } +pallet-multisig = { path = "../../../frame/multisig", default-features = false } +pallet-nfts = { path = "../../../frame/nfts", default-features = false } +pallet-nfts-runtime-api = { path = "../../../frame/nfts/runtime-api", default-features = false } +pallet-nft-fractionalization = { path = "../../../frame/nft-fractionalization", default-features = false } +pallet-nomination-pools = { path = "../../../frame/nomination-pools", default-features = false } +pallet-nomination-pools-benchmarking = { path = "../../../frame/nomination-pools/benchmarking", default-features = false, optional = true } +pallet-nomination-pools-runtime-api = { path = "../../../frame/nomination-pools/runtime-api", default-features = false } +pallet-offences = { path = "../../../frame/offences", default-features = false } pallet-offences-benchmarking = { path = "../../../frame/offences/benchmarking", default-features = false, optional = true } -pallet-glutton = { path = "../../../frame/glutton", default-features = false} -pallet-preimage = { path = "../../../frame/preimage", default-features = false} -pallet-proxy = { path = "../../../frame/proxy", default-features = false} -pallet-insecure-randomness-collective-flip = { path = "../../../frame/insecure-randomness-collective-flip", default-features = false} -pallet-ranked-collective = { path = "../../../frame/ranked-collective", default-features = false} -pallet-recovery = { path = "../../../frame/recovery", default-features = false} -pallet-referenda = { path = "../../../frame/referenda", default-features = false} -pallet-remark = { path = "../../../frame/remark", default-features = false} -pallet-root-testing = { path = "../../../frame/root-testing", default-features = false} -pallet-salary = { path = "../../../frame/salary", default-features = false} -pallet-session = { path = "../../../frame/session", default-features = false , features = [ "historical" ]} +pallet-glutton = { path = "../../../frame/glutton", default-features = false } +pallet-preimage = { path = "../../../frame/preimage", default-features = false } +pallet-proxy = { path = "../../../frame/proxy", default-features = false } +pallet-insecure-randomness-collective-flip = { path = "../../../frame/insecure-randomness-collective-flip", default-features = false } +pallet-ranked-collective = { path = "../../../frame/ranked-collective", default-features = false } +pallet-recovery = { path = "../../../frame/recovery", default-features = false } +pallet-referenda = { path = "../../../frame/referenda", default-features = false } +pallet-remark = { path = "../../../frame/remark", default-features = false } +pallet-root-testing = { path = "../../../frame/root-testing", default-features = false } +pallet-salary = { path = "../../../frame/salary", default-features = false } +pallet-session = { path = "../../../frame/session", default-features = false, features = ["historical"] } pallet-session-benchmarking = { path = "../../../frame/session/benchmarking", default-features = false, optional = true } -pallet-staking = { path = "../../../frame/staking", default-features = false} -pallet-staking-reward-curve = { path = "../../../frame/staking/reward-curve", default-features = false} -pallet-staking-runtime-api = { path = "../../../frame/staking/runtime-api", default-features = false} -pallet-state-trie-migration = { path = "../../../frame/state-trie-migration", default-features = false} -pallet-statement = { path = "../../../frame/statement", default-features = false} -pallet-scheduler = { path = "../../../frame/scheduler", default-features = false} -pallet-society = { path = "../../../frame/society", default-features = false} -pallet-sudo = { path = "../../../frame/sudo", default-features = false} -pallet-timestamp = { path = "../../../frame/timestamp", default-features = false} -pallet-tips = { path = "../../../frame/tips", default-features = false} -pallet-treasury = { path = "../../../frame/treasury", default-features = false} -pallet-utility = { path = "../../../frame/utility", default-features = false} -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment", default-features = false} -pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment", default-features = false} -pallet-transaction-storage = { path = "../../../frame/transaction-storage", default-features = false} -pallet-uniques = { path = "../../../frame/uniques", default-features = false} -pallet-vesting = { path = "../../../frame/vesting", default-features = false} -pallet-whitelist = { path = "../../../frame/whitelist", default-features = false} -pallet-tx-pause = { path = "../../../frame/tx-pause", default-features = false} -pallet-safe-mode = { path = "../../../frame/safe-mode", default-features = false} +pallet-staking = { path = "../../../frame/staking", default-features = false } +pallet-staking-reward-curve = { path = "../../../frame/staking/reward-curve", default-features = false } +pallet-staking-runtime-api = { path = "../../../frame/staking/runtime-api", default-features = false } +pallet-state-trie-migration = { path = "../../../frame/state-trie-migration", default-features = false } +pallet-statement = { path = "../../../frame/statement", default-features = false } +pallet-scheduler = { path = "../../../frame/scheduler", default-features = false } +pallet-society = { path = "../../../frame/society", default-features = false } +pallet-sudo = { path = "../../../frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } +pallet-tips = { path = "../../../frame/tips", default-features = false } +pallet-treasury = { path = "../../../frame/treasury", default-features = false } +pallet-utility = { path = "../../../frame/utility", default-features = false } +pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment", default-features = false } +pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment", default-features = false } +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false } +pallet-transaction-storage = { path = "../../../frame/transaction-storage", default-features = false } +pallet-uniques = { path = "../../../frame/uniques", default-features = false } +pallet-vesting = { path = "../../../frame/vesting", default-features = false } +pallet-whitelist = { path = "../../../frame/whitelist", default-features = false } +pallet-tx-pause = { path = "../../../frame/tx-pause", default-features = false } +pallet-safe-mode = { path = "../../../frame/safe-mode", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] -with-tracing = [ "frame-executive/with-tracing" ] +default = ["std"] +with-tracing = ["frame-executive/with-tracing"] std = [ "codec/std", "frame-benchmarking-pallet-pov/std", @@ -170,7 +174,6 @@ std = [ "pallet-broker/std", "pallet-child-bounties/std", "pallet-collective/std", - "pallet-contracts-primitives/std", "pallet-contracts/std", "pallet-conviction-voting/std", "pallet-core-fellowship/std", @@ -178,6 +181,7 @@ std = [ "pallet-election-provider-multi-phase/std", "pallet-election-provider-support-benchmarking?/std", "pallet-elections-phragmen/std", + "pallet-example-tasks/std", "pallet-fast-unstake/std", "pallet-glutton/std", "pallet-grandpa/std", @@ -212,6 +216,7 @@ std = [ "pallet-scheduler/std", "pallet-session-benchmarking?/std", "pallet-session/std", + "pallet-skip-feeless-payment/std", "pallet-society/std", "pallet-staking-runtime-api/std", "pallet-staking/std", @@ -279,6 +284,7 @@ runtime-benchmarks = [ "pallet-election-provider-multi-phase/runtime-benchmarks", "pallet-election-provider-support-benchmarking/runtime-benchmarks", "pallet-elections-phragmen/runtime-benchmarks", + "pallet-example-tasks/runtime-benchmarks", "pallet-fast-unstake/runtime-benchmarks", "pallet-glutton/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", @@ -308,6 +314,7 @@ runtime-benchmarks = [ "pallet-salary/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-session-benchmarking/runtime-benchmarks", + "pallet-skip-feeless-payment/runtime-benchmarks", "pallet-society/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-state-trie-migration/runtime-benchmarks", @@ -352,6 +359,7 @@ try-runtime = [ "pallet-democracy/try-runtime", "pallet-election-provider-multi-phase/try-runtime", "pallet-elections-phragmen/try-runtime", + "pallet-example-tasks/try-runtime", "pallet-fast-unstake/try-runtime", "pallet-glutton/try-runtime", "pallet-grandpa/try-runtime", @@ -381,6 +389,7 @@ try-runtime = [ "pallet-salary/try-runtime", "pallet-scheduler/try-runtime", "pallet-session/try-runtime", + "pallet-skip-feeless-payment/try-runtime", "pallet-society/try-runtime", "pallet-staking/try-runtime", "pallet-state-trie-migration/try-runtime", @@ -398,3 +407,8 @@ try-runtime = [ "pallet-whitelist/try-runtime", "sp-runtime/try-runtime", ] +experimental = [ + "frame-support/experimental", + "frame-system/experimental", + "pallet-example-tasks/experimental", +] diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 620e89a65e53fb5e7c31e46ef37fbeb0bd008742..4d409a791ba25e99cb5a2011643c953c2d3b3ff0 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -28,7 +28,7 @@ use frame_election_provider_support::{ onchain, BalancingConfig, ElectionDataProvider, SequentialPhragmen, VoteWeight, }; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, instances::{Instance1, Instance2}, @@ -36,8 +36,13 @@ use frame_support::{ pallet_prelude::Get, parameter_types, traits::{ - fungible::{Balanced, Credit, HoldConsideration, ItemOf}, - tokens::{nonfungibles_v2::Inspect, pay::PayAssetFromAccount, GetSalary, PayFromAccount}, + fungible::{ + Balanced, Credit, HoldConsideration, ItemOf, NativeFromLeft, NativeOrWithId, UnionOf, + }, + tokens::{ + imbalance::ResolveAssetTo, nonfungibles_v2::Inspect, pay::PayAssetFromAccount, + GetSalary, PayFromAccount, + }, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Contains, Currency, EitherOfDiverse, EqualPrivilegeOnly, Imbalance, InsideBoth, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, LockIdentifier, Nothing, OnUnbalanced, @@ -57,7 +62,7 @@ use frame_system::{ }; pub use node_primitives::{AccountId, Signature}; use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Moment, Nonce}; -use pallet_asset_conversion::{NativeOrAssetId, NativeOrAssetIdConverter}; +use pallet_asset_conversion::{Ascending, Chain, WithFirstAsset}; use pallet_broker::{CoreAssignment, CoreIndex, CoretimeInterface, PartsOf57600}; use pallet_election_provider_multi_phase::{GeometricDepositBase, SolutionAccuracyOf}; use pallet_identity::legacy::IdentityInfo; @@ -283,34 +288,32 @@ impl pallet_safe_mode::Config for Runtime { type WeightInfo = pallet_safe_mode::weights::SubstrateWeight; } +#[derive_impl(frame_system::config_preludes::SolochainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = InsideBoth; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type DbWeight = RocksDbWeight; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = Indices; type Block = Block; - type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type Version = Version; - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); type SystemWeightInfo = frame_system::weights::SubstrateWeight; type SS58Prefix = ConstU16<42>; - type OnSetCode = (); type MaxConsumers = ConstU32<16>; } impl pallet_insecure_randomness_collective_flip::Config for Runtime {} +impl pallet_example_tasks::Config for Runtime { + type RuntimeTask = RuntimeTask; + type WeightInfo = pallet_example_tasks::weights::SubstrateWeight; +} + impl pallet_utility::Config for Runtime { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; @@ -565,8 +568,15 @@ impl pallet_asset_tx_payment::Config for Runtime { impl pallet_asset_conversion_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; - type OnChargeAssetTransaction = - pallet_asset_conversion_tx_payment::AssetConversionAdapter; + type OnChargeAssetTransaction = pallet_asset_conversion_tx_payment::AssetConversionAdapter< + Balances, + AssetConversion, + Native, + >; +} + +impl pallet_skip_feeless_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; } parameter_types! { @@ -630,6 +640,7 @@ parameter_types! { pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominators: u32 = 64; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); + pub const MaxControllersInDeprecationBatch: u32 = 5900; pub OffchainRepeat: BlockNumber = 5; pub HistoryDepth: u32 = 84; } @@ -672,6 +683,7 @@ impl pallet_staking::Config for Runtime { // This a placeholder, to be introduced in the next PR as an instance of bags-list type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; type HistoryDepth = HistoryDepth; type EventListeners = NominationPools; type WeightInfo = pallet_staking::weights::SubstrateWeight; @@ -700,8 +712,6 @@ parameter_types! { pub const SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); pub const SignedDepositByte: Balance = 1 * CENTS; - pub BetterUnsignedThreshold: Perbill = Perbill::from_rational(1u32, 10_000); - // miner configs pub const MultiPhaseUnsignedPriority: TransactionPriority = StakingUnsignedPriority::get() - 1u64; pub MinerMaxWeight: Weight = RuntimeBlockWeights::get() @@ -818,7 +828,6 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type EstimateCallFee = TransactionPayment; type SignedPhase = SignedPhase; type UnsignedPhase = UnsignedPhase; - type BetterUnsignedThreshold = BetterUnsignedThreshold; type BetterSignedThreshold = (); type OffchainRepeat = OffchainRepeat; type MinerTxPriority = MultiPhaseUnsignedPriority; @@ -1349,6 +1358,7 @@ impl pallet_contracts::Config for Runtime { type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type Debug = (); type Environment = (); + type Xcm = (); } impl pallet_sudo::Config for Runtime { @@ -1394,7 +1404,11 @@ where frame_system::CheckEra::::from(era), frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from(tip, None), + pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from( + tip, None, + ), + ), ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { @@ -1545,6 +1559,7 @@ impl pallet_vesting::Config for Runtime { type MinVestedTransfer = MinVestedTransfer; type WeightInfo = pallet_vesting::weights::SubstrateWeight; type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; + type BlockNumberProvider = System; // `VestingInfo` encode length is 36bytes. 28 schedules gets encoded as 1009 bytes, which is the // highest number of schedules that encodes less than 2^10. const MAX_VESTING_SCHEDULES: u32 = 28; @@ -1637,33 +1652,34 @@ impl pallet_assets::Config for Runtime { parameter_types! { pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); - pub AllowMultiAssetPools: bool = true; pub const PoolSetupFee: Balance = 1 * DOLLARS; // should be more or equal to the existential deposit pub const MintMinLiquidity: Balance = 100; // 100 is good enough when the main currency has 10-12 decimals. - pub const LiquidityWithdrawalFee: Permill = Permill::from_percent(0); // should be non-zero if AllowMultiAssetPools is true, otherwise can be zero. + pub const LiquidityWithdrawalFee: Permill = Permill::from_percent(0); + pub const Native: NativeOrWithId = NativeOrWithId::Native; } impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type AssetBalance = ::Balance; - type HigherPrecisionBalance = sp_core::U256; - type Assets = Assets; type Balance = u128; - type PoolAssets = PoolAssets; - type AssetId = >::AssetId; - type MultiAssetId = NativeOrAssetId; + type HigherPrecisionBalance = sp_core::U256; + type AssetKind = NativeOrWithId; + type Assets = UnionOf, AccountId>; + type PoolId = (Self::AssetKind, Self::AssetKind); + type PoolLocator = Chain< + WithFirstAsset>, + Ascending>, + >; type PoolAssetId = >::AssetId; + type PoolAssets = PoolAssets; + type PoolSetupFee = PoolSetupFee; + type PoolSetupFeeAsset = Native; + type PoolSetupFeeTarget = ResolveAssetTo; type PalletId = AssetConversionPalletId; type LPFee = ConstU32<3>; // means 0.3% - type PoolSetupFee = PoolSetupFee; - type PoolSetupFeeReceiver = AssetConversionOrigin; type LiquidityWithdrawalFee = LiquidityWithdrawalFee; type WeightInfo = pallet_asset_conversion::weights::SubstrateWeight; - type AllowMultiAssetPools = AllowMultiAssetPools; type MaxSwapPathLength = ConstU32<4>; type MintMinLiquidity = MintMinLiquidity; - type MultiAssetIdConverter = NativeOrAssetIdConverter; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); } @@ -1977,7 +1993,6 @@ impl OnUnbalanced> for IntoAuthor { } parameter_types! { - pub storage CoreCount: Option = None; pub storage CoretimeRevenue: Option<(BlockNumber, Balance)> = None; } @@ -1985,36 +2000,24 @@ pub struct CoretimeProvider; impl CoretimeInterface for CoretimeProvider { type AccountId = AccountId; type Balance = Balance; - type BlockNumber = BlockNumber; - fn latest() -> Self::BlockNumber { - System::block_number() - } + type RealyChainBlockNumberProvider = System; fn request_core_count(_count: CoreIndex) {} - fn request_revenue_info_at(_when: Self::BlockNumber) {} + fn request_revenue_info_at(_when: u32) {} fn credit_account(_who: Self::AccountId, _amount: Self::Balance) {} fn assign_core( _core: CoreIndex, - _begin: Self::BlockNumber, + _begin: u32, _assignment: Vec<(CoreAssignment, PartsOf57600)>, - _end_hint: Option, + _end_hint: Option, ) { } - fn check_notify_core_count() -> Option { - let count = CoreCount::get(); - CoreCount::set(&None); - count - } - fn check_notify_revenue_info() -> Option<(Self::BlockNumber, Self::Balance)> { + fn check_notify_revenue_info() -> Option<(u32, Self::Balance)> { let revenue = CoretimeRevenue::get(); CoretimeRevenue::set(&None); revenue } #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_core_count(count: u16) { - CoreCount::set(&Some(count)); - } - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(when: Self::BlockNumber, revenue: Self::Balance) { + fn ensure_notify_revenue_info(when: u32, revenue: Self::Balance) { CoretimeRevenue::set(&Some((when, revenue))); } } @@ -2133,7 +2136,9 @@ construct_runtime!( SafeMode: pallet_safe_mode, Statement: pallet_statement, Broker: pallet_broker, + TasksExample: pallet_example_tasks, Mixnet: pallet_mixnet, + SkipFeelessPayment: pallet_skip_feeless_payment, } ); @@ -2160,7 +2165,10 @@ pub type SignedExtra = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + pallet_skip_feeless_payment::SkipCheckIfFeeless< + Runtime, + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + >, ); /// Unchecked extrinsic type as expected by this runtime. @@ -2181,9 +2189,10 @@ pub type Executive = frame_executive::Executive< >; // All migrations executed on runtime upgrade as a nested tuple of types implementing -// `OnRuntimeUpgrade`. +// `OnRuntimeUpgrade`. Note: These are examples and do not need to be run directly +// after the genesis block. type Migrations = ( - pallet_nomination_pools::migration::v2::MigrateToV2, + pallet_nomination_pools::migration::versioned::V6ToV7, pallet_alliance::migration::Migration, pallet_contracts::Migration, ); @@ -2220,6 +2229,7 @@ mod benches { [pallet_conviction_voting, ConvictionVoting] [pallet_contracts, Contracts] [pallet_core_fellowship, CoreFellowship] + [tasks_example, TasksExample] [pallet_democracy, Democracy] [pallet_asset_conversion, AssetConversion] [pallet_election_provider_multi_phase, ElectionProviderMultiPhase] @@ -2483,7 +2493,7 @@ impl_runtime_apis! { gas_limit: Option, storage_deposit_limit: Option, input_data: Vec, - ) -> pallet_contracts_primitives::ContractExecResult { + ) -> pallet_contracts::ContractExecResult { let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); Contracts::bare_call( origin, @@ -2503,10 +2513,10 @@ impl_runtime_apis! { value: Balance, gas_limit: Option, storage_deposit_limit: Option, - code: pallet_contracts_primitives::Code, + code: pallet_contracts::Code, data: Vec, salt: Vec, - ) -> pallet_contracts_primitives::ContractInstantiateResult + ) -> pallet_contracts::ContractInstantiateResult { let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); Contracts::bare_instantiate( @@ -2527,7 +2537,7 @@ impl_runtime_apis! { code: Vec, storage_deposit_limit: Option, determinism: pallet_contracts::Determinism, - ) -> pallet_contracts_primitives::CodeUploadResult + ) -> pallet_contracts::CodeUploadResult { Contracts::bare_upload_code( origin, @@ -2540,7 +2550,7 @@ impl_runtime_apis! { fn get_storage( address: AccountId, key: Vec, - ) -> pallet_contracts_primitives::GetStorageResult { + ) -> pallet_contracts::GetStorageResult { Contracts::get_storage( address, key @@ -2569,20 +2579,19 @@ impl_runtime_apis! { impl pallet_asset_conversion::AssetConversionApi< Block, Balance, - u128, - NativeOrAssetId + NativeOrWithId > for Runtime { - fn quote_price_exact_tokens_for_tokens(asset1: NativeOrAssetId, asset2: NativeOrAssetId, amount: u128, include_fee: bool) -> Option { + fn quote_price_exact_tokens_for_tokens(asset1: NativeOrWithId, asset2: NativeOrWithId, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) } - fn quote_price_tokens_for_exact_tokens(asset1: NativeOrAssetId, asset2: NativeOrAssetId, amount: u128, include_fee: bool) -> Option { + fn quote_price_tokens_for_exact_tokens(asset1: NativeOrWithId, asset2: NativeOrWithId, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) } - fn get_reserves(asset1: NativeOrAssetId, asset2: NativeOrAssetId) -> Option<(Balance, Balance)> { - AssetConversion::get_reserves(&asset1, &asset2).ok() + fn get_reserves(asset1: NativeOrWithId, asset2: NativeOrWithId) -> Option<(Balance, Balance)> { + AssetConversion::get_reserves(asset1, asset2).ok() } } diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 68f80ab6e83f01d0c6079c62b3a29ee92398eeaf..76188ed446c0870229bcfebb772b6c6d98e09e64 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -19,22 +22,23 @@ futures = "0.3.21" log = "0.4.17" tempfile = "3.1.0" frame-system = { path = "../../../frame/system" } -node-executor = { package = "staging-node-executor", path = "../executor" } +node-cli = { package = "staging-node-cli", path = "../cli" } node-primitives = { path = "../primitives" } kitchensink-runtime = { path = "../runtime" } pallet-asset-conversion = { path = "../../../frame/asset-conversion" } pallet-assets = { path = "../../../frame/assets" } pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment" } +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment" } sc-block-builder = { path = "../../../client/block-builder" } sc-client-api = { path = "../../../client/api" } -sc-client-db = { path = "../../../client/db", features = ["rocksdb"]} +sc-client-db = { path = "../../../client/db", features = ["rocksdb"] } sc-consensus = { path = "../../../client/consensus/common" } sc-executor = { path = "../../../client/executor" } sc-service = { path = "../../../client/service", features = [ - "test-helpers", "rocksdb", -]} + "test-helpers", +] } sp-api = { path = "../../../primitives/api" } sp-block-builder = { path = "../../../primitives/block-builder" } sp-blockchain = { path = "../../../primitives/blockchain" } @@ -44,5 +48,5 @@ sp-inherents = { path = "../../../primitives/inherents" } sp-io = { path = "../../../primitives/io" } sp-keyring = { path = "../../../primitives/keyring" } sp-runtime = { path = "../../../primitives/runtime" } -sp-timestamp = { path = "../../../primitives/timestamp", default-features = false} +sp-timestamp = { path = "../../../primitives/timestamp", default-features = false } substrate-test-client = { path = "../../../test-utils/client" } diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index 89b96c0191ce813a6ba49c1263014da4ff3b1c9d..98d3b968a358a3c46830760219dfe62f314ac496 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -43,7 +43,7 @@ use sc_block_builder::BlockBuilderBuilder; use sc_client_api::{execution_extensions::ExecutionExtensions, UsageProvider}; use sc_client_db::PruningMode; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, ImportedAux}; -use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod, WasmtimeInstantiationStrategy}; +use sc_executor::{WasmExecutionMethod, WasmtimeInstantiationStrategy}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; use sp_consensus::BlockOrigin; @@ -388,13 +388,11 @@ impl BenchDb { let task_executor = TaskExecutor::new(); let backend = sc_service::new_db_backend(db_config).expect("Should not fail"); - let executor = NativeElseWasmExecutor::new_with_wasm_executor( - sc_executor::WasmExecutor::builder() - .with_execution_method(WasmExecutionMethod::Compiled { - instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, - }) - .build(), - ); + let executor = sc_executor::WasmExecutor::builder() + .with_execution_method(WasmExecutionMethod::Compiled { + instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, + }) + .build(); let client_config = sc_service::ClientConfig::default(); let genesis_block_builder = sc_service::GenesisBlockBuilder::new( diff --git a/substrate/bin/node/testing/src/client.rs b/substrate/bin/node/testing/src/client.rs index 22276833fb665c8396d40d8c52763c7484be46ee..07ba1cdbbfb56d6c13dbd76d61703bb347d19a6f 100644 --- a/substrate/bin/node/testing/src/client.rs +++ b/substrate/bin/node/testing/src/client.rs @@ -23,7 +23,7 @@ use sp_runtime::BuildStorage; pub use substrate_test_client::*; /// Call executor for `kitchensink-runtime` `TestClient`. -pub type ExecutorDispatch = sc_executor::NativeElseWasmExecutor; +use node_cli::service::RuntimeExecutor; /// Default backend type. pub type Backend = sc_client_db::Backend; @@ -31,7 +31,7 @@ pub type Backend = sc_client_db::Backend; /// Test client type. pub type Client = client::Client< Backend, - client::LocalCallExecutor, + client::LocalCallExecutor, node_primitives::Block, kitchensink_runtime::RuntimeApi, >; @@ -63,7 +63,7 @@ pub trait TestClientBuilderExt: Sized { impl TestClientBuilderExt for substrate_test_client::TestClientBuilder< node_primitives::Block, - client::LocalCallExecutor, + client::LocalCallExecutor, Backend, GenesisParameters, > @@ -71,8 +71,17 @@ impl TestClientBuilderExt fn new() -> Self { Self::default() } - fn build(self) -> Client { - self.build_with_native_executor(None).0 + let executor = RuntimeExecutor::builder().build(); + use sc_service::client::LocalCallExecutor; + use std::sync::Arc; + let executor = LocalCallExecutor::new( + self.backend().clone(), + executor.clone(), + Default::default(), + ExecutionExtensions::new(None, Arc::new(executor)), + ) + .expect("Creates LocalCallExecutor"); + self.build_with_executor(executor).0 } } diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index 22a8f5deb19f737537227ae21b4c0c20770f7759..9940077c9da638360e26d9722d61fbebdc6d124c 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -78,7 +78,9 @@ pub fn signed_extra(nonce: Nonce, extra_fee: Balance) -> SignedExtra { frame_system::CheckEra::from(Era::mortal(256, 0)), frame_system::CheckNonce::from(nonce), frame_system::CheckWeight::new(), - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), + pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), + ), ) } diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index 0b373b8e9247f417d24fe1335d24093deaa5b590..dcbe26f6a8ffe363c5cd6123add2619d4b72eba4 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -20,15 +23,8 @@ name = "chain-spec-builder" crate-type = ["rlib"] [dependencies] -ansi_term = "0.12.1" -clap = { version = "4.4.6", features = ["derive"] } -rand = "0.8" -kitchensink-runtime = { version = "3.0.0-dev", path = "../../node/runtime" } +clap = { version = "4.4.11", features = ["derive"] } log = "0.4.17" -node-cli = { package = "staging-node-cli", path = "../../node/cli" } sc-chain-spec = { path = "../../../client/chain-spec" } -sc-keystore = { path = "../../../client/keystore" } serde_json = "1.0.108" -sp-core = { path = "../../../primitives/core" } -sp-keystore = { path = "../../../primitives/keystore" } sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } diff --git a/substrate/bin/utils/chain-spec-builder/bin/main.rs b/substrate/bin/utils/chain-spec-builder/bin/main.rs index 83892afd6ace5c5562cd9bab6faab0b346f7a52e..986293179a91540a2b020aeb39d9285fefef4453 100644 --- a/substrate/bin/utils/chain-spec-builder/bin/main.rs +++ b/substrate/bin/utils/chain-spec-builder/bin/main.rs @@ -17,14 +17,11 @@ // along with this program. If not, see . use chain_spec_builder::{ - generate_authority_keys_and_store, generate_chain_spec, generate_chain_spec_for_runtime, - print_seeds, ChainSpecBuilder, ChainSpecBuilderCmd, EditCmd, GenerateCmd, NewCmd, VerifyCmd, + generate_chain_spec_for_runtime, ChainSpecBuilder, ChainSpecBuilderCmd, ConvertToRawCmd, + UpdateCodeCmd, VerifyCmd, }; use clap::Parser; -use node_cli::chain_spec; -use rand::{distributions::Alphanumeric, rngs::OsRng, Rng}; use sc_chain_spec::{update_code_in_json_chain_spec, GenericChainSpec}; -use sp_core::{crypto::Ss58Codec, sr25519}; use staging_chain_spec_builder as chain_spec_builder; use std::fs; @@ -32,110 +29,48 @@ fn main() -> Result<(), String> { sp_tracing::try_init_simple(); let builder = ChainSpecBuilder::parse(); - #[cfg(build_type = "debug")] - if matches!(builder.command, ChainSpecBuilderCmd::Generate(_) | ChainSpecBuilderCmd::New(_)) { - println!( - "The chain spec builder builds a chain specification that includes a Substrate runtime \ - compiled as WASM. To ensure proper functioning of the included runtime compile (or run) \ - the chain spec builder binary in `--release` mode.\n", - ); - } - let chain_spec_path = builder.chain_spec_path.to_path_buf(); - let mut write_chain_spec = true; - - let chain_spec_json = match builder.command { - ChainSpecBuilderCmd::Generate(GenerateCmd { - authorities, - nominators, - endowed, - keystore_path, - }) => { - let authorities = authorities.max(1); - let rand_str = || -> String { - OsRng.sample_iter(&Alphanumeric).take(32).map(char::from).collect() - }; - - let authority_seeds = (0..authorities).map(|_| rand_str()).collect::>(); - let nominator_seeds = (0..nominators).map(|_| rand_str()).collect::>(); - let endowed_seeds = (0..endowed).map(|_| rand_str()).collect::>(); - let sudo_seed = rand_str(); - - print_seeds(&authority_seeds, &nominator_seeds, &endowed_seeds, &sudo_seed); - - if let Some(keystore_path) = keystore_path { - generate_authority_keys_and_store(&authority_seeds, &keystore_path)?; - } - - let nominator_accounts = nominator_seeds - .into_iter() - .map(|seed| { - chain_spec::get_account_id_from_seed::(&seed).to_ss58check() - }) - .collect(); - - let endowed_accounts = endowed_seeds - .into_iter() - .map(|seed| { - chain_spec::get_account_id_from_seed::(&seed).to_ss58check() - }) - .collect(); - let sudo_account = - chain_spec::get_account_id_from_seed::(&sudo_seed).to_ss58check(); - - generate_chain_spec(authority_seeds, nominator_accounts, endowed_accounts, sudo_account) + match builder.command { + ChainSpecBuilderCmd::Create(cmd) => { + let chain_spec_json = generate_chain_spec_for_runtime(&cmd)?; + fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; }, - ChainSpecBuilderCmd::New(NewCmd { - authority_seeds, - nominator_accounts, - endowed_accounts, - sudo_account, - }) => - generate_chain_spec(authority_seeds, nominator_accounts, endowed_accounts, sudo_account), - ChainSpecBuilderCmd::Runtime(cmd) => generate_chain_spec_for_runtime(&cmd), - ChainSpecBuilderCmd::Edit(EditCmd { + ChainSpecBuilderCmd::UpdateCode(UpdateCodeCmd { ref input_chain_spec, ref runtime_wasm_path, - convert_to_raw, }) => { let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; let mut chain_spec_json = - serde_json::from_str::(&chain_spec.as_json(convert_to_raw)?) + serde_json::from_str::(&chain_spec.as_json(false)?) .map_err(|e| format!("Conversion to json failed: {e}"))?; - if let Some(path) = runtime_wasm_path { - update_code_in_json_chain_spec( - &mut chain_spec_json, - &fs::read(path.as_path()) - .map_err(|e| format!("Wasm blob file could not be read: {e}"))?[..], - ); - } - - serde_json::to_string_pretty(&chain_spec_json) - .map_err(|e| format!("to pretty failed: {e}")) + update_code_in_json_chain_spec( + &mut chain_spec_json, + &fs::read(runtime_wasm_path.as_path()) + .map_err(|e| format!("Wasm blob file could not be read: {e}"))?[..], + ); + + let chain_spec_json = serde_json::to_string_pretty(&chain_spec_json) + .map_err(|e| format!("to pretty failed: {e}"))?; + fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; }, - ChainSpecBuilderCmd::Verify(VerifyCmd { ref input_chain_spec, ref runtime_wasm_path }) => { - write_chain_spec = false; + ChainSpecBuilderCmd::ConvertToRaw(ConvertToRawCmd { ref input_chain_spec }) => { let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; - let mut chain_spec_json = + + let chain_spec_json = serde_json::from_str::(&chain_spec.as_json(true)?) .map_err(|e| format!("Conversion to json failed: {e}"))?; - if let Some(path) = runtime_wasm_path { - update_code_in_json_chain_spec( - &mut chain_spec_json, - &fs::read(path.as_path()) - .map_err(|e| format!("Wasm blob file could not be read: {e}"))?[..], - ); - }; - serde_json::to_string_pretty(&chain_spec_json) - .map_err(|e| format!("to pretty failed: {e}")) - }, - }?; - if write_chain_spec { - fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string()) - } else { - Ok(()) - } + let chain_spec_json = serde_json::to_string_pretty(&chain_spec_json) + .map_err(|e| format!("Conversion to pretty failed: {e}"))?; + fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; + }, + ChainSpecBuilderCmd::Verify(VerifyCmd { ref input_chain_spec }) => { + let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; + let _ = serde_json::from_str::(&chain_spec.as_json(true)?) + .map_err(|e| format!("Conversion to json failed: {e}"))?; + }, + }; + Ok(()) } diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 6f21b68c3684a82baeae99786f4483b39a01bbe0..8c78030c885484b7d1f9ba3213dbfa76faa36c52 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -21,31 +21,75 @@ //! A chain-spec is short for `chain-configuration`. See the [`sc-chain-spec`] for more information. //! //! Note that this binary is analogous to the `build-spec` subcommand, contained in typical -//! substrate-based nodes. This particular binary is capable of building a more sophisticated chain -//! specification that can be used with the substrate-node, ie. [`node-cli`]. +//! substrate-based nodes. This particular binary is capable of interacting with +//! [`sp-genesis-builder`] implementation of any provided runtime allowing to build chain-spec JSON +//! files. //! -//! See [`ChainSpecBuilder`] for a list of available commands. +//! See [`ChainSpecBuilderCmd`] for a list of available commands. +//! +//! ## Typical use-cases. +//! ##### Get default config from runtime. +//! +//! Query the default genesis config from the provided `runtime.wasm` and use it in the chain +//! spec. The tool allows specifying where to write the chain spec, and optionally also where the +//! write the default genesis state config (which is `/dev/stdout` in the following example): +//! ```text +//! chain-spec-builder --chain_spec_path ./my_chain_spec.json create -r runtime.wasm default /dev/stdout +//! ``` +//! +//! _Note:_ [`GenesisBuilder::create_default_config`][sp-genesis-builder-create] runtime function is +//! called. +//! +//! +//! ##### Generate raw storage chain spec using genesis config patch. +//! +//! Patch the runtime's default genesis config with provided `patch.json` and generate raw +//! storage (`-s`) version of chain spec: +//! +//! ```bash +//! chain-spec-builder create -s -r runtime.wasm patch patch.json +//! ``` +//! +//! _Note:_ [`GenesisBuilder::build_config`][sp-genesis-builder-build] runtime function is called. +//! +//! ##### Generate raw storage chain spec using full genesis config. +//! +//! Build the chain spec using provided full genesis config json file. No defaults will be used: +//! +//! ```bash +//! chain-spec-builder create -s -r runtime.wasm full full-genesis-config.json +//! ``` +//! +//! _Note_: [`GenesisBuilder::build_config`][sp-genesis-builder-build] runtime function is called. +//! +//! ##### Generate human readable chain spec using provided genesis config patch. +//! ```bash +//! chain-spec-builder create -r runtime.wasm patch patch.json +//! ``` +//! +//! ##### Generate human readable chain spec using provided full genesis config. +//! +//! ```bash +//! chain-spec-builder create -r runtime.wasm full full-genesis-config.json +//! ``` +//! +//! ##### Extra tools. +//! The `chain-spec-builder` provides also some extra utilities: [`VerifyCmd`], [`ConvertToRawCmd`], +//! [`UpdateCodeCmd`]. //! //! [`sc-chain-spec`]: ../sc_chain_spec/index.html //! [`node-cli`]: ../node_cli/index.html +//! [`sp-genesis-builder`]: ../sp_genesis_builder/index.html +//! [sp-genesis-builder-create]: ../sp_genesis_builder/trait.GenesisBuilder.html#method.create_default_config +//! [sp-genesis-builder-build]: ../sp_genesis_builder/trait.GenesisBuilder.html#method.build_config -use std::{ - fs, - path::{Path, PathBuf}, -}; +use std::{fs, path::PathBuf}; -use ansi_term::Style; use clap::{Parser, Subcommand}; -use sc_chain_spec::GenesisConfigBuilderRuntimeCaller; - -use node_cli::chain_spec::{self, AccountId}; -use sc_keystore::LocalKeystore; +use sc_chain_spec::{GenericChainSpec, GenesisConfigBuilderRuntimeCaller}; use serde_json::Value; -use sp_core::crypto::{ByteArray, Ss58Codec}; -use sp_keystore::KeystorePtr; -/// A utility to easily create a testnet chain spec definition with a given set -/// of authorities and endowed accounts and/or generate random accounts. +/// A utility to easily create a chain spec definition. #[derive(Debug, Parser)] #[command(rename_all = "kebab-case")] pub struct ChainSpecBuilder { @@ -59,70 +103,25 @@ pub struct ChainSpecBuilder { #[derive(Debug, Subcommand)] #[command(rename_all = "kebab-case")] pub enum ChainSpecBuilderCmd { - New(NewCmd), - Generate(GenerateCmd), - Runtime(RuntimeCmd), - Edit(EditCmd), + Create(CreateCmd), Verify(VerifyCmd), -} - -/// Create a new chain spec with the given authorities, endowed and sudo -/// accounts. Only works for kitchen-sink runtime -#[derive(Parser, Debug)] -#[command(rename_all = "kebab-case")] -pub struct NewCmd { - /// Authority key seed. - #[arg(long, short, required = true)] - pub authority_seeds: Vec, - /// Active nominators (SS58 format), each backing a random subset of the aforementioned - /// authorities. - #[arg(long, short, default_value = "0")] - pub nominator_accounts: Vec, - /// Endowed account address (SS58 format). - #[arg(long, short)] - pub endowed_accounts: Vec, - /// Sudo account address (SS58 format). - #[arg(long, short)] - pub sudo_account: String, -} - -/// Create a new chain spec with the given number of authorities and endowed -/// accounts. Random keys will be generated as required. -#[derive(Parser, Debug)] -pub struct GenerateCmd { - /// The number of authorities. - #[arg(long, short)] - pub authorities: usize, - /// The number of nominators backing the aforementioned authorities. - /// - /// Will nominate a random subset of `authorities`. - #[arg(long, short, default_value_t = 0)] - pub nominators: usize, - /// The number of endowed accounts. - #[arg(long, short, default_value_t = 0)] - pub endowed: usize, - /// Path to use when saving generated keystores for each authority. - /// - /// At this path, a new folder will be created for each authority's - /// keystore named `auth-$i` where `i` is the authority index, i.e. - /// `auth-0`, `auth-1`, etc. - #[arg(long, short)] - pub keystore_path: Option, + UpdateCode(UpdateCodeCmd), + ConvertToRaw(ConvertToRawCmd), } /// Create a new chain spec by interacting with the provided runtime wasm blob. #[derive(Parser, Debug)] -pub struct RuntimeCmd { - /// The name of chain +pub struct CreateCmd { + /// The name of chain. #[arg(long, short = 'n', default_value = "Custom")] chain_name: String, - /// The chain id + /// The chain id. #[arg(long, short = 'i', default_value = "custom")] chain_id: String, - /// The path to runtime wasm blob + /// The path to runtime wasm blob. #[arg(long, short)] runtime_wasm_path: PathBuf, - /// Export chainspec as raw storage + /// Export chainspec as raw storage. #[arg(long, short = 's')] raw_storage: bool, /// Verify the genesis config. This silently generates the raw storage from genesis config. Any @@ -144,7 +143,6 @@ enum GenesisBuildAction { #[derive(Parser, Debug, Clone)] struct PatchCmd { /// The path to the runtime genesis config patch. - #[arg(long, short)] patch_path: PathBuf, } @@ -152,7 +150,6 @@ struct PatchCmd { #[derive(Parser, Debug, Clone)] struct FullCmd { /// The path to the full runtime genesis config json file. - #[arg(long, short)] config_path: PathBuf, } @@ -163,161 +160,45 @@ struct FullCmd { struct DefaultCmd { /// If provided stores the default genesis config json file at given path (in addition to /// chain-spec). - #[arg(long, short)] default_config_path: Option, } -/// Edits provided input chain spec. Input can be converted into raw storage chain-spec. The code -/// can be updated with the runtime provided in the command line. +/// Updates the code in the provided input chain spec. +/// +/// The code field of the chain spec will be updated with the runtime provided in the +/// command line. This operation supports both plain and raw formats. #[derive(Parser, Debug, Clone)] -pub struct EditCmd { - /// Chain spec to be edited - #[arg(long, short)] +pub struct UpdateCodeCmd { + /// Chain spec to be updated. pub input_chain_spec: PathBuf, - /// The path to new runtime wasm blob to be stored into chain-spec - #[arg(long, short = 'r')] - pub runtime_wasm_path: Option, - /// Convert genesis spec to raw format - #[arg(long, short = 's')] - pub convert_to_raw: bool, + /// The path to new runtime wasm blob to be stored into chain-spec. + pub runtime_wasm_path: PathBuf, } -/// Verifies provided input chain spec. If the runtime is provided verification is performed against -/// new runtime. +/// Converts the given chain spec into the raw format. #[derive(Parser, Debug, Clone)] -pub struct VerifyCmd { - /// Chain spec to be edited - #[arg(long, short)] +pub struct ConvertToRawCmd { + /// Chain spec to be converted. pub input_chain_spec: PathBuf, - /// The path to new runtime wasm blob to be stored into chain-spec - #[arg(long, short = 'r')] - pub runtime_wasm_path: Option, -} - -/// Generate the chain spec using the given seeds and accounts. -pub fn generate_chain_spec( - authority_seeds: Vec, - nominator_accounts: Vec, - endowed_accounts: Vec, - sudo_account: String, -) -> Result { - let parse_account = |address: String| { - AccountId::from_string(&address) - .map_err(|err| format!("Failed to parse account address: {:?}", err)) - }; - - let nominator_accounts = nominator_accounts - .into_iter() - .map(parse_account) - .collect::, String>>()?; - - let endowed_accounts = endowed_accounts - .into_iter() - .map(parse_account) - .collect::, String>>()?; - - let sudo_account = parse_account(sudo_account)?; - - let authorities = authority_seeds - .iter() - .map(AsRef::as_ref) - .map(chain_spec::authority_keys_from_seed) - .collect::>(); - - chain_spec::ChainSpec::builder(kitchensink_runtime::wasm_binary_unwrap(), Default::default()) - .with_name("Custom") - .with_id("custom") - .with_chain_type(sc_chain_spec::ChainType::Live) - .with_genesis_config_patch(chain_spec::testnet_genesis( - authorities, - nominator_accounts, - sudo_account, - Some(endowed_accounts), - )) - .build() - .as_json(false) -} - -/// Generate the authority keys and store them in the given `keystore_path`. -pub fn generate_authority_keys_and_store( - seeds: &[String], - keystore_path: &Path, -) -> Result<(), String> { - for (n, seed) in seeds.iter().enumerate() { - let keystore: KeystorePtr = - LocalKeystore::open(keystore_path.join(format!("auth-{}", n)), None) - .map_err(|err| err.to_string())? - .into(); - - let (_, _, grandpa, babe, im_online, authority_discovery, mixnet) = - chain_spec::authority_keys_from_seed(seed); - - let insert_key = |key_type, public| { - keystore - .insert(key_type, &format!("//{}", seed), public) - .map_err(|_| format!("Failed to insert key: {}", grandpa)) - }; - - insert_key(sp_core::crypto::key_types::BABE, babe.as_slice())?; - - insert_key(sp_core::crypto::key_types::GRANDPA, grandpa.as_slice())?; - - insert_key(sp_core::crypto::key_types::IM_ONLINE, im_online.as_slice())?; - - insert_key( - sp_core::crypto::key_types::AUTHORITY_DISCOVERY, - authority_discovery.as_slice(), - )?; - - insert_key(sp_core::crypto::key_types::MIXNET, mixnet.as_slice())?; - } - - Ok(()) } -/// Print the given seeds -pub fn print_seeds( - authority_seeds: &[String], - nominator_seeds: &[String], - endowed_seeds: &[String], - sudo_seed: &str, -) { - let header = Style::new().bold().underline(); - let entry = Style::new().bold(); - - println!("{}", header.paint("Authority seeds")); - - for (n, seed) in authority_seeds.iter().enumerate() { - println!("{} //{}", entry.paint(format!("auth-{}:", n)), seed); - } - - println!("{}", header.paint("Nominator seeds")); - - for (n, seed) in nominator_seeds.iter().enumerate() { - println!("{} //{}", entry.paint(format!("nom-{}:", n)), seed); - } - - println!(); - - if !endowed_seeds.is_empty() { - println!("{}", header.paint("Endowed seeds")); - for (n, seed) in endowed_seeds.iter().enumerate() { - println!("{} //{}", entry.paint(format!("endowed-{}:", n)), seed); - } - - println!(); - } - - println!("{}", header.paint("Sudo seed")); - println!("//{}", sudo_seed); +/// Verifies the provided input chain spec. +/// +/// Silently checks if given input chain spec can be converted to raw. It allows to check if all +/// RuntimeGenesisConfig fiels are properly initialized and if the json does not contain invalid +/// fields. +#[derive(Parser, Debug, Clone)] +pub struct VerifyCmd { + /// Chain spec to be verified. + pub input_chain_spec: PathBuf, } -/// Processes `RuntimeCmd` and returns JSON version of `ChainSpec` -pub fn generate_chain_spec_for_runtime(cmd: &RuntimeCmd) -> Result { +/// Processes `CreateCmd` and returns JSON version of `ChainSpec`. +pub fn generate_chain_spec_for_runtime(cmd: &CreateCmd) -> Result { let code = fs::read(cmd.runtime_wasm_path.as_path()) .map_err(|e| format!("wasm blob shall be readable {e}"))?; - let builder = chain_spec::ChainSpec::builder(&code[..], Default::default()) + let builder = GenericChainSpec::<()>::builder(&code[..], Default::default()) .with_name(&cmd.chain_name[..]) .with_id(&cmd.chain_id[..]) .with_chain_type(sc_chain_spec::ChainType::Live); diff --git a/substrate/bin/utils/subkey/Cargo.toml b/substrate/bin/utils/subkey/Cargo.toml index 6606d8ac365f9f9e68805808ef6db62662fbeac5..58aa036a631d68e2804b75020d3529f6adfa380f 100644 --- a/substrate/bin/utils/subkey/Cargo.toml +++ b/substrate/bin/utils/subkey/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,5 +20,5 @@ path = "src/main.rs" name = "subkey" [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } sc-cli = { path = "../../../client/cli" } diff --git a/substrate/client/allocator/Cargo.toml b/substrate/client/allocator/Cargo.toml index 31c714180ce57b0684ebfb7a8e27f7d2b42857ea..ef13c1a4573f36665e42b84f3bcbca8436afe45d 100644 --- a/substrate/client/allocator/Cargo.toml +++ b/substrate/client/allocator/Cargo.toml @@ -10,6 +10,9 @@ description = "Collection of allocator implementations." documentation = "https://docs.rs/sc-allocator" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/client/allocator/src/lib.rs b/substrate/client/allocator/src/lib.rs index e50d7d54c8e97659a60a391d40561f58d13d4d13..70ed764bef8c166177137cf79f80825320ef3cdb 100644 --- a/substrate/client/allocator/src/lib.rs +++ b/substrate/client/allocator/src/lib.rs @@ -18,7 +18,7 @@ //! Collection of allocator implementations. //! //! This crate provides the following allocator implementations: -//! - A freeing-bump allocator: [`FreeingBumpHeapAllocator`](freeing_bump::FreeingBumpHeapAllocator) +//! - A freeing-bump allocator: [`FreeingBumpHeapAllocator`] #![warn(missing_docs)] diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index 2b64c86038dda3ad5a37962c6dfc4a3230f7a4b2..8c50b872914419478c51ca5d498e45243b64082f 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -10,6 +10,9 @@ description = "Substrate client interfaces." documentation = "https://docs.rs/sc-client-api" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -28,10 +31,10 @@ sc-utils = { path = "../utils" } sp-api = { path = "../../primitives/api" } sp-blockchain = { path = "../../primitives/blockchain" } sp-consensus = { path = "../../primitives/consensus/common" } -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } sp-database = { path = "../../primitives/database" } sp-externalities = { path = "../../primitives/externalities" } -sp-runtime = { path = "../../primitives/runtime", default-features = false} +sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-state-machine = { path = "../../primitives/state-machine" } sp-statement-store = { path = "../../primitives/statement-store" } sp-storage = { path = "../../primitives/storage" } diff --git a/substrate/client/api/src/call_executor.rs b/substrate/client/api/src/call_executor.rs index 49b51ccc943edab005390a0726426f106e55b458..d131cbcec00262d8dc65616ab03c31c75d744972 100644 --- a/substrate/client/api/src/call_executor.rs +++ b/substrate/client/api/src/call_executor.rs @@ -21,12 +21,12 @@ use sc_executor::{RuntimeVersion, RuntimeVersionOf}; use sp_core::traits::CallContext; use sp_externalities::Extensions; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::traits::{Block as BlockT, HashingFor}; use sp_state_machine::{OverlayedChanges, StorageProof}; use std::cell::RefCell; use crate::execution_extensions::ExecutionExtensions; -use sp_api::{HashingFor, ProofRecorder}; +use sp_api::ProofRecorder; /// Executor Provider pub trait ExecutorProvider { diff --git a/substrate/client/api/src/client.rs b/substrate/client/api/src/client.rs index e334f2f9fb4f68aa55cb7ebcd7440f9462342fb9..46232c74539c6c230652a753b5fcd4b6761600d8 100644 --- a/substrate/client/api/src/client.rs +++ b/substrate/client/api/src/client.rs @@ -25,7 +25,11 @@ use sp_runtime::{ traits::{Block as BlockT, NumberFor}, Justifications, }; -use std::{collections::HashSet, fmt, sync::Arc}; +use std::{ + collections::HashSet, + fmt::{self, Debug}, + sync::Arc, +}; use crate::{blockchain::Info, notifications::StorageEventStream, FinalizeSummary, ImportSummary}; @@ -271,13 +275,18 @@ impl fmt::Display for UsageInfo { } /// Sends a message to the pinning-worker once dropped to unpin a block in the backend. -#[derive(Debug)] pub struct UnpinHandleInner { /// Hash of the block pinned by this handle hash: Block::Hash, unpin_worker_sender: TracingUnboundedSender, } +impl Debug for UnpinHandleInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("UnpinHandleInner").field("pinned_block", &self.hash).finish() + } +} + impl UnpinHandleInner { /// Create a new [`UnpinHandleInner`] pub fn new( diff --git a/substrate/client/api/src/execution_extensions.rs b/substrate/client/api/src/execution_extensions.rs index 6f927105df0bf46927356e4b897c72c4d5e0e19b..26d3ae73f69f26c0923aa700397d09863003989e 100644 --- a/substrate/client/api/src/execution_extensions.rs +++ b/substrate/client/api/src/execution_extensions.rs @@ -91,7 +91,6 @@ impl ExtensionsFactory /// /// This crate aggregates extensions available for the offchain calls /// and is responsible for producing a correct `Extensions` object. -/// for each call, based on required `Capabilities`. pub struct ExecutionExtensions { extensions_factory: RwLock>>, read_runtime_version: Arc, @@ -116,8 +115,7 @@ impl ExecutionExtensions { *self.extensions_factory.write() = Box::new(maker); } - /// Based on the execution context and capabilities it produces - /// the extensions object to support desired set of APIs. + /// Produces default extensions based on the input parameters. pub fn extensions( &self, block_hash: Block::Hash, @@ -127,7 +125,6 @@ impl ExecutionExtensions { self.extensions_factory.read().extensions_for(block_hash, block_number); extensions.register(ReadRuntimeVersionExt::new(self.read_runtime_version.clone())); - extensions } } diff --git a/substrate/client/api/src/in_mem.rs b/substrate/client/api/src/in_mem.rs index 807bdf0e334725eba64d99eb4edb88215436c073..b933ed1f17e01a8822b958ac6a064fe2f3503926 100644 --- a/substrate/client/api/src/in_mem.rs +++ b/substrate/client/api/src/in_mem.rs @@ -812,9 +812,8 @@ pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { #[cfg(test)] mod tests { use crate::{in_mem::Blockchain, NewBlockState}; - use sp_api::HeaderT; use sp_blockchain::Backend; - use sp_runtime::{ConsensusEngineId, Justifications}; + use sp_runtime::{traits::Header as HeaderT, ConsensusEngineId, Justifications}; use substrate_test_runtime::{Block, Header, H256}; pub const ID1: ConsensusEngineId = *b"TST1"; diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index ef2fdcfd485f1b08539e83c9deb82ba82232d58c..a8a28a501ea821c9a954dfb0256808991337d3af 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -10,6 +10,9 @@ repository.workspace = true description = "Substrate authority discovery." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -21,8 +24,11 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = futures = "0.3.21" futures-timer = "3.0.1" ip_network = "0.4.1" -libp2p = { version = "0.51.3", features = ["kad", "ed25519"] } -multihash = { version = "0.17.0", default-features = false, features = ["std", "sha2"] } +libp2p = { version = "0.51.4", features = ["ed25519", "kad"] } +multihash = { version = "0.18.1", default-features = false, features = [ + "sha2", + "std", +] } log = "0.4.17" prost = "0.11" rand = "0.8.5" @@ -36,7 +42,12 @@ sp-blockchain = { path = "../../primitives/blockchain" } sp-core = { path = "../../primitives/core" } sp-keystore = { path = "../../primitives/keystore" } sp-runtime = { path = "../../primitives/runtime" } -async-trait = "0.1.56" +async-trait = "0.1.74" +multihash-codetable = { version = "0.1.1", features = [ + "digest", + "serde", + "sha2", +] } [dev-dependencies] quickcheck = { version = "1.0.3", default-features = false } diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index a29e74df9accc98c50bdf1608612543299936efa..6db25416dee78323a1531f5868be2601af3eaee0 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -35,7 +35,7 @@ use addr_cache::AddrCache; use codec::{Decode, Encode}; use ip_network::IpNetwork; use libp2p::{core::multiaddr, identity::PublicKey, multihash::Multihash, Multiaddr, PeerId}; -use multihash::{Code, MultihashDigest}; +use multihash_codetable::{Code, MultihashDigest}; use log::{debug, error, log_enabled}; use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; diff --git a/substrate/client/basic-authorship/Cargo.toml b/substrate/client/basic-authorship/Cargo.toml index 1d60fc7f53e3b5903153132fc48a6f8c026983dc..926909ec7b764ed4f0c90c23f5b2a0c91c101311 100644 --- a/substrate/client/basic-authorship/Cargo.toml +++ b/substrate/client/basic-authorship/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Basic implementation of block-authoring logic." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index 2492c4101b29a8fb2e2e49b6bec4bfa38b4a9a52..4477f5f1d776c43ebd9129f4350216d6d638e5c9 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Substrate block builder" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -20,6 +23,7 @@ sp-api = { path = "../../primitives/api" } sp-block-builder = { path = "../../primitives/block-builder" } sp-blockchain = { path = "../../primitives/blockchain" } sp-core = { path = "../../primitives/core" } +sp-trie = { path = "../../primitives/trie" } sp-inherents = { path = "../../primitives/inherents" } sp-runtime = { path = "../../primitives/runtime" } diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index f62b941fdb1848e879e777b28164d45d6686e2df..258e39d962b2de2397d85a54223c155e821ddfa3 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -42,6 +42,7 @@ use sp_runtime::{ use std::marker::PhantomData; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_trie::proof_size_extension::ProofSizeExt; /// A builder for creating an instance of [`BlockBuilder`]. pub struct BlockBuilderBuilder<'a, B, C> { @@ -235,6 +236,10 @@ where if record_proof { api.record_proof(); + let recorder = api + .proof_recorder() + .expect("Proof recording is enabled in the line above; qed."); + api.register_extension(ProofSizeExt::new(recorder)); } api.set_call_context(CallContext::Onchain); diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index 5b7cdda8ebe4fc6288df89f546a5cb5ade5627de..c870ff19b2ade75ca120a9ea166ea55eaa8faeef 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -9,13 +9,16 @@ repository.workspace = true description = "Substrate chain configurations." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } memmap2 = "0.5.0" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" sc-client-api = { path = "../api" } sc-chain-spec-derive = { path = "derive" } @@ -35,5 +38,5 @@ docify = "0.2.0" [dev-dependencies] substrate-test-runtime = { path = "../../test-utils/runtime" } sp-keyring = { path = "../../primitives/keyring" } -sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } +sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } sp-consensus-babe = { default-features = false, path = "../../primitives/consensus/babe", features = ["serde"] } diff --git a/substrate/client/chain-spec/derive/Cargo.toml b/substrate/client/chain-spec/derive/Cargo.toml index 74b8b656a4042bdd348cf2997fa3b9ae686fe2cd..a63520ffb3103f94358062cfe04e5f85ecc6d1a6 100644 --- a/substrate/client/chain-spec/derive/Cargo.toml +++ b/substrate/client/chain-spec/derive/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "Macros to derive chain spec extension traits implementation." +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.1" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = "2.0.38" +syn = "2.0.41" diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index 8d97d941022978198e14015aaa365f12d9b218b6..fe8fcfda216e1fa86215daf93add2aa6adc78bd9 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -784,9 +784,7 @@ fn json_eval_value_at_key( path: &mut VecDeque<&str>, fun: &dyn Fn(&json::Value) -> bool, ) -> bool { - let Some(key) = path.pop_front() else { - return false; - }; + let Some(key) = path.pop_front() else { return false }; if path.is_empty() { doc.as_object().map_or(false, |o| o.get(key).map_or(false, |v| fun(v))) diff --git a/substrate/client/chain-spec/src/extension.rs b/substrate/client/chain-spec/src/extension.rs index 25ab011a05b323bf8874519b2f1efcb5533d4f91..f2939741535f730787846d2a641a2721f5cec9e5 100644 --- a/substrate/client/chain-spec/src/extension.rs +++ b/substrate/client/chain-spec/src/extension.rs @@ -284,7 +284,7 @@ where } } -/// A subset if the `Extension` trait that only allows for quering extensions. +/// A subset of the `Extension` trait that only allows for quering extensions. pub trait GetExtension { /// Get an extension of specific type. fn get_any(&self, t: TypeId) -> &dyn Any; diff --git a/substrate/client/chain-spec/src/genesis_config_builder.rs b/substrate/client/chain-spec/src/genesis_config_builder.rs index 9ccf6b4efb203c6942cb28f1b8040f2982f3c119..d6ef99fafdd0325b0136f3013453465f361bf2bb 100644 --- a/substrate/client/chain-spec/src/genesis_config_builder.rs +++ b/substrate/client/chain-spec/src/genesis_config_builder.rs @@ -76,7 +76,6 @@ where &RuntimeCode { heap_pages: None, code_fetcher: self, hash: self.code_hash.clone() }, method, data, - false, CallContext::Offchain, ) .0 @@ -142,7 +141,7 @@ where mod tests { use super::*; use serde_json::{from_str, json}; - pub use sp_consensus_babe::{AllowedSlots, BabeEpochConfiguration, Slot}; + pub use sp_consensus_babe::{AllowedSlots, BabeEpochConfiguration}; #[test] fn get_default_config_works() { diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index c415527c372cafb3b6be296a1387c598e516bbd7..80d3815fbc6fdcff595f729e14175bcc94ba488d 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -9,34 +9,37 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" -chrono = "0.4.27" -clap = { version = "4.4.6", features = ["derive", "string", "wrap_help"] } -fdlimit = "0.2.1" +chrono = "0.4.31" +clap = { version = "4.4.11", features = ["derive", "string", "wrap_help"] } +fdlimit = "0.3.0" futures = "0.3.21" itertools = "0.10.3" -libp2p-identity = { version = "0.1.3", features = ["peerid", "ed25519"]} +libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = "0.4.17" -names = { version = "0.13.0", default-features = false } +names = { version = "0.14.0", default-features = false } parity-scale-codec = "3.6.1" rand = "0.8.5" regex = "1.6.0" rpassword = "7.0.0" -serde = "1.0.188" +serde = "1.0.193" serde_json = "1.0.108" thiserror = "1.0.48" bip39 = "2.0.0" -tokio = { version = "1.22.0", features = ["signal", "rt-multi-thread", "parking_lot"] } +tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "signal"] } sc-client-api = { path = "../api" } -sc-client-db = { path = "../db", default-features = false} +sc-client-db = { path = "../db", default-features = false } sc-keystore = { path = "../keystore" } sc-mixnet = { path = "../mixnet" } sc-network = { path = "../network" } -sc-service = { path = "../service", default-features = false} +sc-service = { path = "../service", default-features = false } sc-telemetry = { path = "../telemetry" } sc-tracing = { path = "../tracing" } sc-utils = { path = "../utils" } @@ -54,5 +57,5 @@ futures-timer = "3.0.1" sp-tracing = { path = "../../primitives/tracing" } [features] -default = [ "rocksdb" ] -rocksdb = [ "sc-client-db/rocksdb" ] +default = ["rocksdb"] +rocksdb = ["sc-client-db/rocksdb"] diff --git a/substrate/client/cli/src/arg_enums.rs b/substrate/client/cli/src/arg_enums.rs index c55b97675da25d871736d517081280508ce5ed1e..d4a4b7cfdf6d13a5bf4006d6365a6e56de48c848 100644 --- a/substrate/client/cli/src/arg_enums.rs +++ b/substrate/client/cli/src/arg_enums.rs @@ -225,7 +225,7 @@ pub enum OffchainWorkerEnabled { #[derive(Debug, Clone, Copy, ValueEnum, PartialEq)] #[value(rename_all = "kebab-case")] pub enum SyncMode { - /// Full sync. Download end verify all blocks. + /// Full sync. Download and verify all blocks. Full, /// Download blocks without executing them. Download latest state with proofs. Fast, diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs index 4d218da6aa89891d3d51c28882978c0b232a8d53..b842df5a690a2a72c51d2fd69bd1324051a07a24 100644 --- a/substrate/client/cli/src/config.rs +++ b/substrate/client/cli/src/config.rs @@ -605,14 +605,25 @@ pub trait CliConfiguration: Sized { logger.init()?; - if let Some(new_limit) = fdlimit::raise_fd_limit() { - if new_limit < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT { + match fdlimit::raise_fd_limit() { + Ok(fdlimit::Outcome::LimitRaised { to, .. }) => + if to < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT { + warn!( + "Low open file descriptor limit configured for the process. \ + Current value: {:?}, recommended value: {:?}.", + to, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, + ); + }, + Ok(fdlimit::Outcome::Unsupported) => { + // Unsupported platform (non-Linux) + }, + Err(error) => { warn!( - "Low open file descriptor limit configured for the process. \ - Current value: {:?}, recommended value: {:?}.", - new_limit, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, + "Failed to configure file descriptor limit for the process: \ + {}, recommended value: {:?}.", + error, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, ); - } + }, } Ok(()) diff --git a/substrate/client/cli/src/params/shared_params.rs b/substrate/client/cli/src/params/shared_params.rs index 6419e15c62ab4afb25ea3fbccb7379f8646a413a..465372fba17d4bb8036477d587e37f12cdd414d6 100644 --- a/substrate/client/cli/src/params/shared_params.rs +++ b/substrate/client/cli/src/params/shared_params.rs @@ -35,6 +35,7 @@ pub struct SharedParams { /// /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, /// `--alice`, and `--tmp` flags, unless explicitly overridden. + /// It also disables local peer discovery (see --no-mdns and --discover-local) #[arg(long, conflicts_with_all = &["chain"])] pub dev: bool, diff --git a/substrate/client/consensus/aura/Cargo.toml b/substrate/client/consensus/aura/Cargo.toml index bc9648f683a880afe7eded0a079efdbcafcd5cc3..89a63a944166250cfa3fd6ca603ba9b7dc3fe4a3 100644 --- a/substrate/client/consensus/aura/Cargo.toml +++ b/substrate/client/consensus/aura/Cargo.toml @@ -9,11 +9,14 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" log = "0.4.17" diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index c8cff0981b36f89e0b6f9120f4c6378f7c6793e7..40c69d5780a537fb63c37601c0403712960c0bea 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -10,17 +10,20 @@ repository.workspace = true documentation = "https://docs.rs/sc-consensus-babe" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" log = "0.4.17" num-bigint = "0.4.3" num-rational = "0.4.1" -num-traits = "0.2.8" +num-traits = "0.2.17" parking_lot = "0.12.1" thiserror = "1.0" fork-tree = { path = "../../../utils/fork-tree" } diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index c95d95ae427a4a96a358c1cc772dc5ff76437f6b..b23f3f81d4372dcfa8374d1ec314e9bf3e112b14 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -9,13 +9,16 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } futures = "0.3.21" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } thiserror = "1.0" sc-consensus-babe = { path = ".." } sc-consensus-epochs = { path = "../../epochs" } diff --git a/substrate/client/consensus/babe/src/authorship.rs b/substrate/client/consensus/babe/src/authorship.rs index 3580caba746141a8a9faf9e73216cb8903667acf..fb1722398012b92701f66bcbe5c6c8c292d9084e 100644 --- a/substrate/client/consensus/babe/src/authorship.rs +++ b/substrate/client/consensus/babe/src/authorship.rs @@ -249,7 +249,7 @@ fn claim_primary_slot( .make_bytes::( AUTHORING_SCORE_VRF_CONTEXT, &data.as_ref(), - &vrf_signature.output, + &vrf_signature.pre_output, ) .map(|bytes| u128::from_le_bytes(bytes) < threshold) .unwrap_or_default(); diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index d097f37c325790f134c879d7a38e7a9ba12c559a..38c9e1ff6ac25cc26151c668902838ec65ab6189 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -411,7 +411,7 @@ async fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + ' let mut net = net.lock(); net.poll(cx); for p in net.peers() { - for (h, e) in p.failed_verifications() { + if let Some((h, e)) = p.failed_verifications().into_iter().next() { panic!("Verification failed for {:?}: {}", h, e); } } @@ -580,7 +580,7 @@ fn claim_vrf_check() { }; let data = make_vrf_sign_data(&epoch.randomness.clone(), 0.into(), epoch.epoch_index); let sign = keystore.sr25519_vrf_sign(AuthorityId::ID, &public, &data).unwrap().unwrap(); - assert_eq!(pre_digest.vrf_signature.output, sign.output); + assert_eq!(pre_digest.vrf_signature.pre_output, sign.pre_output); // We expect a SecondaryVRF claim for slot 1 let pre_digest = match claim_slot(1.into(), &epoch, &keystore).unwrap().0 { @@ -589,7 +589,7 @@ fn claim_vrf_check() { }; let data = make_vrf_sign_data(&epoch.randomness.clone(), 1.into(), epoch.epoch_index); let sign = keystore.sr25519_vrf_sign(AuthorityId::ID, &public, &data).unwrap().unwrap(); - assert_eq!(pre_digest.vrf_signature.output, sign.output); + assert_eq!(pre_digest.vrf_signature.pre_output, sign.pre_output); // Check that correct epoch index has been used if epochs are skipped (primary VRF) let slot = Slot::from(103); @@ -601,7 +601,7 @@ fn claim_vrf_check() { let data = make_vrf_sign_data(&epoch.randomness.clone(), slot, fixed_epoch.epoch_index); let sign = keystore.sr25519_vrf_sign(AuthorityId::ID, &public, &data).unwrap().unwrap(); assert_eq!(fixed_epoch.epoch_index, 11); - assert_eq!(claim.vrf_signature.output, sign.output); + assert_eq!(claim.vrf_signature.pre_output, sign.pre_output); // Check that correct epoch index has been used if epochs are skipped (secondary VRF) let slot = Slot::from(100); @@ -613,7 +613,7 @@ fn claim_vrf_check() { let data = make_vrf_sign_data(&epoch.randomness.clone(), slot, fixed_epoch.epoch_index); let sign = keystore.sr25519_vrf_sign(AuthorityId::ID, &public, &data).unwrap().unwrap(); assert_eq!(fixed_epoch.epoch_index, 11); - assert_eq!(pre_digest.vrf_signature.output, sign.output); + assert_eq!(pre_digest.vrf_signature.pre_output, sign.pre_output); } // Propose and import a new BABE block on top of the given parent. diff --git a/substrate/client/consensus/babe/src/verification.rs b/substrate/client/consensus/babe/src/verification.rs index 3de5eacc2c519a0b38a9484a687e4975a2c759e6..c6e4ec0c10c135c77f778367d95103c490d9e029 100644 --- a/substrate/client/consensus/babe/src/verification.rs +++ b/substrate/client/consensus/babe/src/verification.rs @@ -185,7 +185,7 @@ fn check_primary_header( .make_bytes::( AUTHORING_SCORE_VRF_CONTEXT, &data.as_ref(), - &pre_digest.vrf_signature.output, + &pre_digest.vrf_signature.pre_output, ) .map(u128::from_le_bytes) .map_err(|_| babe_err(Error::VrfVerificationFailed))?; diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index aae5a44d7fa26da98d90b02d1dcb9dbd22faae06..1736929e9b44b8a7ce3957659344912b79997b1d 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -8,10 +8,13 @@ repository.workspace = true description = "BEEFY Client gadget for substrate" homepage = "https://substrate.io" +[lints] +workspace = true + [dependencies] array-bytes = "6.1" async-channel = "1.8.0" -async-trait = "0.1.57" +async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } fnv = "1.0.6" futures = "0.3" @@ -38,7 +41,7 @@ sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } sp-runtime = { path = "../../../primitives/runtime" } [dev-dependencies] -serde = "1.0.188" +serde = "1.0.193" tempfile = "3.1.0" tokio = "1.22.0" sc-block-builder = { path = "../../block-builder" } diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index c7464fdc653230100a937408b518c9638b82a976..157b0cc87fc0da237fab6c50e5fda3226bc0206f 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -8,13 +8,16 @@ repository.workspace = true description = "RPC for the BEEFY Client gadget for substrate" homepage = "https://substrate.io" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } log = "0.4" parking_lot = "0.12.1" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } thiserror = "1.0" sc-consensus-beefy = { path = ".." } sp-consensus-beefy = { path = "../../../../primitives/consensus/beefy" } @@ -24,6 +27,6 @@ sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] serde_json = "1.0.108" -sc-rpc = { path = "../../../rpc", features = ["test-helpers"]} +sc-rpc = { path = "../../../rpc", features = ["test-helpers"] } substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } tokio = { version = "1.22.0", features = ["macros"] } diff --git a/substrate/client/consensus/beefy/src/communication/mod.rs b/substrate/client/consensus/beefy/src/communication/mod.rs index 10a6071aae658d2988874092dcaade873870ca37..3827559057dde856d201ebb9c9ff71a1d7d11f47 100644 --- a/substrate/client/consensus/beefy/src/communication/mod.rs +++ b/substrate/client/consensus/beefy/src/communication/mod.rs @@ -67,10 +67,16 @@ pub(crate) mod beefy_protocol_name { /// For standard protocol name see [`beefy_protocol_name::gossip_protocol_name`]. pub fn beefy_peers_set_config( gossip_protocol_name: sc_network::ProtocolName, -) -> sc_network::config::NonDefaultSetConfig { - let mut cfg = sc_network::config::NonDefaultSetConfig::new(gossip_protocol_name, 1024 * 1024); +) -> (sc_network::config::NonDefaultSetConfig, Box) { + let (mut cfg, notification_service) = sc_network::config::NonDefaultSetConfig::new( + gossip_protocol_name, + Vec::new(), + 1024 * 1024, + None, + Default::default(), + ); cfg.allow_non_reserved(25, 25); - cfg + (cfg, notification_service) } // cost scalars for reporting peers. diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 89a5d51c88702df76f7e3f0e7bb4f65076531066..e6224cbf3e92b4993b99eb5782c9512f4583bba6 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -38,9 +38,9 @@ use parking_lot::Mutex; use prometheus::Registry; use sc_client_api::{Backend, BlockBackend, BlockchainEvents, FinalityNotifications, Finalizer}; use sc_consensus::BlockImport; -use sc_network::{NetworkRequest, ProtocolName}; +use sc_network::{NetworkRequest, NotificationService, ProtocolName}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork, Syncing as GossipSyncing}; -use sp_api::{HeaderT, NumberFor, ProvideRuntimeApi}; +use sp_api::ProvideRuntimeApi; use sp_blockchain::{ Backend as BlockchainBackend, Error as ClientError, HeaderBackend, Result as ClientResult, }; @@ -51,7 +51,7 @@ use sp_consensus_beefy::{ }; use sp_keystore::KeystorePtr; use sp_mmr_primitives::MmrApi; -use sp_runtime::traits::{Block, Zero}; +use sp_runtime::traits::{Block, Header as HeaderT, NumberFor, Zero}; use std::{ collections::{BTreeMap, VecDeque}, marker::PhantomData, @@ -178,6 +178,8 @@ pub struct BeefyNetworkParams { pub network: Arc, /// Syncing service implementing a sync oracle and an event stream for peers. pub sync: Arc, + /// Handle for receiving notification events. + pub notification_service: Box, /// Chain specific BEEFY gossip protocol name. See /// [`communication::beefy_protocol_name::gossip_protocol_name`]. pub gossip_protocol_name: ProtocolName, @@ -243,6 +245,7 @@ pub async fn start_beefy_gadget( let BeefyNetworkParams { network, sync, + notification_service, gossip_protocol_name, justifications_protocol_name, .. @@ -264,6 +267,7 @@ pub async fn start_beefy_gadget( let gossip_engine = GossipEngine::new( network.clone(), sync.clone(), + notification_service, gossip_protocol_name.clone(), gossip_validator.clone(), None, @@ -539,25 +543,23 @@ where R: ProvideRuntimeApi, R::Api: BeefyApi, { - debug!(target: LOG_TARGET, "🥩 Try to find validator set active at header: {:?}", at_header); - runtime - .runtime_api() - .validator_set(at_header.hash()) - .ok() - .flatten() - .or_else(|| { - // if state unavailable, fallback to walking up the chain looking for the header - // Digest emitted when validator set active 'at_header' was enacted. - let blockchain = backend.blockchain(); - let mut header = at_header.clone(); - loop { - debug!(target: LOG_TARGET, "🥩 look for auth set change digest in header number: {:?}", *header.number()); - match worker::find_authorities_change::(&header) { - Some(active) => return Some(active), - // Move up the chain. - None => header = blockchain.expect_header(*header.parent_hash()).ok()?, - } + let blockchain = backend.blockchain(); + + // Walk up the chain looking for the validator set active at 'at_header'. Process both state and + // header digests. + debug!(target: LOG_TARGET, "🥩 Trying to find validator set active at header: {:?}", at_header); + let mut header = at_header.clone(); + loop { + if let Ok(Some(active)) = runtime.runtime_api().validator_set(header.hash()) { + return Ok(active) + } else { + debug!(target: LOG_TARGET, "🥩 Looking for auth set change at block number: {:?}", *header.number()); + match worker::find_authorities_change::(&header) { + Some(active) => return Ok(active), + // Move up the chain. Ultimately we'll get it from chain genesis state, or error out + // here. + None => header = blockchain.expect_header(*header.parent_hash())?, } - }) - .ok_or_else(|| ClientError::Backend("Could not find initial validator set".into())) + } + } } diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index 3aaa59cbfa1c1c99fdb19c4628b1df19448e0f75..3f800166e26ab72ba338bf146e550c5d0ad56d37 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -72,7 +72,7 @@ use substrate_test_runtime_client::{BlockBuilderExt, ClientExt}; use tokio::time::Duration; const GENESIS_HASH: H256 = H256::zero(); -fn beefy_gossip_proto_name() -> ProtocolName { +pub(crate) fn beefy_gossip_proto_name() -> ProtocolName { gossip_protocol_name(GENESIS_HASH, None) } @@ -371,6 +371,7 @@ async fn voter_init_setup( let mut gossip_engine = sc_network_gossip::GossipEngine::new( net.peer(0).network_service().clone(), net.peer(0).sync_service().clone(), + net.peer(0).take_notification_service(&beefy_gossip_proto_name()).unwrap(), "/beefy/whatever", gossip_validator, None, @@ -392,6 +393,14 @@ where { let tasks = FuturesUnordered::new(); + let mut notification_services = peers + .iter() + .map(|(peer_id, _, _)| { + let peer = &mut net.peers[*peer_id]; + (*peer_id, peer.take_notification_service(&beefy_gossip_proto_name()).unwrap()) + }) + .collect::>(); + for (peer_id, key, api) in peers.into_iter() { let peer = &net.peers[peer_id]; @@ -409,6 +418,7 @@ where let network_params = crate::BeefyNetworkParams { network: peer.network_service().clone(), sync: peer.sync_service().clone(), + notification_service: notification_services.remove(&peer_id).unwrap(), gossip_protocol_name: beefy_gossip_proto_name(), justifications_protocol_name: on_demand_justif_handler.protocol_name(), _phantom: PhantomData, @@ -1045,7 +1055,25 @@ async fn should_initialize_voter_at_custom_genesis() { net.peer(0).client().as_client().finalize_block(hashes[8], None).unwrap(); // load persistent state - nothing in DB, should init at genesis - let persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); + // + // NOTE: code from `voter_init_setup()` is moved here because the new network event system + // doesn't allow creating a new `GossipEngine` as the notification handle is consumed by the + // first `GossipEngine` + let known_peers = Arc::new(Mutex::new(KnownPeers::new())); + let (gossip_validator, _) = GossipValidator::new(known_peers); + let gossip_validator = Arc::new(gossip_validator); + let mut gossip_engine = sc_network_gossip::GossipEngine::new( + net.peer(0).network_service().clone(), + net.peer(0).sync_service().clone(), + net.peer(0).take_notification_service(&beefy_gossip_proto_name()).unwrap(), + "/beefy/whatever", + gossip_validator, + None, + ); + let (beefy_genesis, best_grandpa) = + wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap(); + let persisted_state = + load_or_init_voter_state(&*backend, &api, beefy_genesis, best_grandpa, 1).unwrap(); // Test initialization at session boundary. // verify voter initialized with single session starting at block `custom_pallet_genesis` (7) @@ -1075,7 +1103,11 @@ async fn should_initialize_voter_at_custom_genesis() { net.peer(0).client().as_client().finalize_block(hashes[10], None).unwrap(); // load persistent state - state preset in DB, but with different pallet genesis - let new_persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); + // the network state persists and uses the old `GossipEngine` initialized for `peer(0)` + let (beefy_genesis, best_grandpa) = + wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap(); + let new_persisted_state = + load_or_init_voter_state(&*backend, &api, beefy_genesis, best_grandpa, 1).unwrap(); // verify voter initialized with single session starting at block `new_pallet_genesis` (10) let sessions = new_persisted_state.voting_oracle().sessions(); @@ -1371,7 +1403,7 @@ async fn gossipped_finality_proofs() { let api = Arc::new(TestApi::with_validator_set(&validator_set)); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); - let charlie = &net.peers[2]; + let charlie = &mut net.peers[2]; let known_peers = Arc::new(Mutex::new(KnownPeers::::new())); // Charlie will run just the gossip engine and not the full voter. let (gossip_validator, _) = GossipValidator::new(known_peers); @@ -1384,6 +1416,7 @@ async fn gossipped_finality_proofs() { let mut charlie_gossip_engine = sc_network_gossip::GossipEngine::new( charlie.network_service().clone(), charlie.sync_service().clone(), + charlie.take_notification_service(&beefy_gossip_proto_name()).unwrap(), beefy_gossip_proto_name(), charlie_gossip_validator.clone(), None, diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index 309d8c5135bef96447bc87b1485775fb96f6b255..da73a0d17d7ee612c10f69b1010659693b2ec30e 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -36,7 +36,7 @@ use log::{debug, error, info, log_enabled, trace, warn}; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; use sc_network_gossip::GossipEngine; use sc_utils::{mpsc::TracingUnboundedReceiver, notification::NotificationReceiver}; -use sp_api::{BlockId, ProvideRuntimeApi}; +use sp_api::ProvideRuntimeApi; use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; use sp_consensus::SyncOracle; use sp_consensus_beefy::{ @@ -46,7 +46,7 @@ use sp_consensus_beefy::{ VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, }; use sp_runtime::{ - generic::OpaqueDigestItemId, + generic::{BlockId, OpaqueDigestItemId}, traits::{Block, Header, NumberFor, Zero}, SaturatedConversion, }; @@ -212,7 +212,7 @@ impl VoterOracle { // Accept any vote for a GRANDPA finalized block in a better round. Ok(( rounds.session_start().max(self.best_beefy_block), - (*self.best_grandpa_block_header.number()).into(), + (*self.best_grandpa_block_header.number()), )) } else { // Current session has mandatory not done. @@ -456,6 +456,7 @@ where .filter(|genesis| *genesis == self.persisted_state.pallet_genesis) .ok_or(Error::ConsensusReset)?; + let mut new_session_added = false; if *header.number() > self.best_grandpa_block() { // update best GRANDPA finalized block we have seen self.persisted_state.set_best_grandpa(header.clone()); @@ -475,9 +476,15 @@ where { if let Some(new_validator_set) = find_authorities_change::(&header) { self.init_session_at(new_validator_set, *header.number()); + new_session_added = true; } } + if new_session_added { + crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) + .map_err(|e| Error::Backend(e.to_string()))?; + } + // Update gossip validator votes filter. if let Err(e) = self .persisted_state @@ -848,15 +855,10 @@ where .fuse(), ); + self.process_new_state(); let error = loop { - // Act on changed 'state'. - self.process_new_state(); - // Mutable reference used to drive the gossip engine. let mut gossip_engine = &mut self.comms.gossip_engine; - // Use temp val and report after async section, - // to avoid having to Mutex-wrap `gossip_engine`. - let mut gossip_report: Option = None; // Wait for, and handle external events. // The branches below only change 'state', actual voting happens afterwards, @@ -884,10 +886,15 @@ where if let Err(err) = self.triage_incoming_justif(justif) { debug!(target: LOG_TARGET, "🥩 {}", err); } - gossip_report = Some(peer_report); + self.comms.gossip_engine.report(peer_report.who, peer_report.cost_benefit); + }, + ResponseInfo::PeerReport(peer_report) => { + self.comms.gossip_engine.report(peer_report.who, peer_report.cost_benefit); + continue; + }, + ResponseInfo::Pending => { + continue; }, - ResponseInfo::PeerReport(peer_report) => gossip_report = Some(peer_report), - ResponseInfo::Pending => (), } }, justif = block_import_justif.next() => { @@ -924,12 +931,15 @@ where }, // Process peer reports. report = self.comms.gossip_report_stream.next() => { - gossip_report = report; + if let Some(PeerReport { who, cost_benefit }) = report { + self.comms.gossip_engine.report(who, cost_benefit); + } + continue; }, } - if let Some(PeerReport { who, cost_benefit }) = gossip_report { - self.comms.gossip_engine.report(who, cost_benefit); - } + + // Act on changed 'state'. + self.process_new_state(); }; // return error _and_ `comms` that can be reused @@ -1064,13 +1074,12 @@ pub(crate) mod tests { use sc_client_api::{Backend as BackendT, HeaderBackend}; use sc_network_sync::SyncingService; use sc_network_test::TestNetFactory; - use sp_api::HeaderT; use sp_blockchain::Backend as BlockchainBackendT; use sp_consensus_beefy::{ generate_equivocation_proof, known_payloads, known_payloads::MMR_ROOT_ID, mmr::MmrRootProvider, Keyring, Payload, SignedCommitment, }; - use sp_runtime::traits::One; + use sp_runtime::traits::{Header as HeaderT, One}; use substrate_test_runtime_client::{ runtime::{Block, Digest, DigestItem, Header}, Backend, @@ -1136,12 +1145,16 @@ pub(crate) mod tests { let api = Arc::new(TestApi::with_validator_set(&genesis_validator_set)); let network = peer.network_service().clone(); let sync = peer.sync_service().clone(); + let notification_service = peer + .take_notification_service(&crate::tests::beefy_gossip_proto_name()) + .unwrap(); let known_peers = Arc::new(Mutex::new(KnownPeers::new())); let (gossip_validator, gossip_report_stream) = GossipValidator::new(known_peers.clone()); let gossip_validator = Arc::new(gossip_validator); let gossip_engine = GossipEngine::new( network.clone(), sync.clone(), + notification_service, "/beefy/1", gossip_validator.clone(), None, diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml index f269e3752d435488e9c843260454adbccd325b7b..ba0147b895245bce8f1d55919ec4de203e977e3b 100644 --- a/substrate/client/consensus/common/Cargo.toml +++ b/substrate/client/consensus/common/Cargo.toml @@ -9,14 +9,17 @@ repository.workspace = true description = "Collection of common consensus specific imlementations for Substrate (client)" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.74" futures = { version = "0.3.21", features = ["thread-pool"] } futures-timer = "3.0.1" -libp2p-identity = { version = "0.1.3", features = ["peerid", "ed25519"] } +libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = "0.4.17" mockall = "0.11.3" parking_lot = "0.12.1" diff --git a/substrate/client/consensus/epochs/Cargo.toml b/substrate/client/consensus/epochs/Cargo.toml index 07de83980bcf766b55b97a9c8826b26c0978e839..76e4c05a67344c7052cf673127f6e02139402d97 100644 --- a/substrate/client/consensus/epochs/Cargo.toml +++ b/substrate/client/consensus/epochs/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index 85f98e7546e038fffa55802e8355105a93b1f64a..2adedb5b3b5514e26c982e670e550ea23db5d46e 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -10,13 +10,16 @@ description = "Integration of the GRANDPA finality gadget into substrate." documentation = "https://docs.rs/sc-consensus-grandpa" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] ahash = "0.8.2" array-bytes = "6.1" -async-trait = "0.1.57" +async-trait = "0.1.74" dyn-clone = "1.0" finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.21" @@ -53,7 +56,7 @@ sp-runtime = { path = "../../../primitives/runtime" } [dev-dependencies] assert_matches = "1.3.0" finality-grandpa = { version = "0.16.2", features = ["derive-codec", "test-helpers"] } -serde = "1.0.188" +serde = "1.0.193" tokio = "1.22.0" sc-network = { path = "../../network" } sc-network-test = { path = "../../network/test" } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index e2f9e40afb2da661cd5003b4755d7ee425b5ee1a..983f7a4339ba2128fe531da4e06f2d7100c2650c 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -9,13 +9,16 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" readme = "README.md" homepage = "https://substrate.io" +[lints] +workspace = true + [dependencies] finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.16" -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } log = "0.4.8" parity-scale-codec = { version = "3.6.1", features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } thiserror = "1.0" sc-client-api = { path = "../../../api" } sc-consensus-grandpa = { path = ".." } @@ -26,7 +29,7 @@ sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] sc-block-builder = { path = "../../../block-builder" } -sc-rpc = { path = "../../../rpc", features = ["test-helpers"]} +sc-rpc = { path = "../../../rpc", features = ["test-helpers"] } sp-core = { path = "../../../../primitives/core" } sp-consensus-grandpa = { path = "../../../../primitives/consensus/grandpa" } sp-keyring = { path = "../../../../primitives/keyring" } diff --git a/substrate/client/consensus/grandpa/src/communication/mod.rs b/substrate/client/consensus/grandpa/src/communication/mod.rs index 6d9e956b41beedf777d994ea6cb21ca8bdd2856c..5c7e1276297a988b3109ae650e121331f57f930d 100644 --- a/substrate/client/consensus/grandpa/src/communication/mod.rs +++ b/substrate/client/consensus/grandpa/src/communication/mod.rs @@ -46,7 +46,7 @@ use finality_grandpa::{ Message::{Precommit, Prevote, PrimaryPropose}, }; use parity_scale_codec::{Decode, DecodeAll, Encode}; -use sc_network::{NetworkBlock, NetworkSyncForkRequest, ReputationChange}; +use sc_network::{NetworkBlock, NetworkSyncForkRequest, NotificationService, ReputationChange}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sp_keystore::KeystorePtr; @@ -247,6 +247,7 @@ impl, S: Syncing> NetworkBridge { pub(crate) fn new( service: N, sync: S, + notification_service: Box, config: crate::Config, set_state: crate::environment::SharedVoterSetState, prometheus_registry: Option<&Registry>, @@ -260,6 +261,7 @@ impl, S: Syncing> NetworkBridge { let gossip_engine = Arc::new(Mutex::new(GossipEngine::new( service.clone(), sync.clone(), + notification_service, protocol, validator.clone(), prometheus_registry, diff --git a/substrate/client/consensus/grandpa/src/communication/tests.rs b/substrate/client/consensus/grandpa/src/communication/tests.rs index 4a869d0f51520387a75ddb7be58c92b64e6b02f1..fe24fb3cb20ea055e8ebfe385a974f57af5e4396 100644 --- a/substrate/client/consensus/grandpa/src/communication/tests.rs +++ b/substrate/client/consensus/grandpa/src/communication/tests.rs @@ -24,16 +24,17 @@ use super::{ }; use crate::{communication::grandpa_protocol_name, environment::SharedVoterSetState}; use futures::prelude::*; -use parity_scale_codec::Encode; +use parity_scale_codec::{DecodeAll, Encode}; use sc_network::{ config::{MultiaddrWithPeerId, Role}, event::Event as NetworkEvent, + service::traits::{Direction, MessageSink, NotificationEvent, NotificationService}, types::ProtocolName, Multiaddr, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkSyncForkRequest, NotificationSenderError, NotificationSenderT as NotificationSender, PeerId, ReputationChange, }; -use sc_network_common::role::ObservedRole; +use sc_network_common::role::{ObservedRole, Roles}; use sc_network_gossip::Validator; use sc_network_sync::{SyncEvent as SyncStreamEvent, SyncEventStream}; use sc_network_test::{Block, Hash}; @@ -74,11 +75,15 @@ impl NetworkPeers for TestNetwork { unimplemented!(); } - fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - let _ = self.sender.unbounded_send(Event::Report(who, cost_benefit)); + fn report_peer(&self, peer_id: PeerId, cost_benefit: ReputationChange) { + let _ = self.sender.unbounded_send(Event::Report(peer_id, cost_benefit)); } - fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) {} + fn peer_reputation(&self, _peer_id: &PeerId) -> i32 { + unimplemented!() + } + + fn disconnect_peer(&self, _peer_id: PeerId, _protocol: ProtocolName) {} fn accept_unreserved_peers(&self) { unimplemented!(); @@ -123,6 +128,12 @@ impl NetworkPeers for TestNetwork { fn sync_num_connected(&self) -> usize { unimplemented!(); } + + fn peer_role(&self, _peer_id: PeerId, handshake: Vec) -> Option { + Roles::decode_all(&mut &handshake[..]) + .ok() + .and_then(|role| Some(ObservedRole::from(role))) + } } impl NetworkEventStream for TestNetwork { @@ -211,10 +222,70 @@ impl NetworkSyncForkRequest> for TestSync { fn set_sync_fork_request(&self, _peers: Vec, _hash: Hash, _number: NumberFor) {} } +#[derive(Debug)] +pub(crate) struct TestNotificationService { + sender: TracingUnboundedSender, + rx: TracingUnboundedReceiver, +} + +#[async_trait::async_trait] +impl NotificationService for TestNotificationService { + /// Instruct `Notifications` to open a new substream for `peer`. + async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + /// Instruct `Notifications` to close substream for `peer`. + async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + /// Send synchronous `notification` to `peer`. + fn send_sync_notification(&self, peer: &PeerId, notification: Vec) { + let _ = self.sender.unbounded_send(Event::WriteNotification(*peer, notification)); + } + + /// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure. + async fn send_async_notification( + &self, + _peer: &PeerId, + _notification: Vec, + ) -> Result<(), sc_network::error::Error> { + unimplemented!(); + } + + /// Set handshake for the notification protocol replacing the old handshake. + async fn set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + fn try_set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + /// Get next event from the `Notifications` event stream. + async fn next_event(&mut self) -> Option { + self.rx.next().await + } + + fn clone(&mut self) -> Result, ()> { + unimplemented!(); + } + + fn protocol(&self) -> &ProtocolName { + unimplemented!(); + } + + fn message_sink(&self, _peer: &PeerId) -> Option> { + unimplemented!(); + } +} + pub(crate) struct Tester { pub(crate) net_handle: super::NetworkBridge, gossip_validator: Arc>, pub(crate) events: TracingUnboundedReceiver, + pub(crate) notification_tx: TracingUnboundedSender, } impl Tester { @@ -279,6 +350,9 @@ fn voter_set_state() -> SharedVoterSetState { // needs to run in a tokio runtime. pub(crate) fn make_test_network() -> (impl Future, TestNetwork) { let (tx, rx) = tracing_unbounded("test", 100_000); + let (notification_tx, notification_rx) = tracing_unbounded("test-notification", 100_000); + + let notification_service = TestNotificationService { rx: notification_rx, sender: tx.clone() }; let net = TestNetwork { sender: tx }; let sync = TestSync {}; @@ -293,14 +367,22 @@ pub(crate) fn make_test_network() -> (impl Future, TestNetwork) } } - let bridge = - super::NetworkBridge::new(net.clone(), sync, config(), voter_set_state(), None, None); + let bridge = super::NetworkBridge::new( + net.clone(), + sync, + Box::new(notification_service), + config(), + voter_set_state(), + None, + None, + ); ( futures::future::ready(Tester { gossip_validator: bridge.validator.clone(), net_handle: bridge, events: rx, + notification_tx, }), net, ) @@ -385,63 +467,62 @@ fn good_commit_leads_to_relay() { let commit_to_send = encoded_commit.clone(); let network_bridge = tester.net_handle.clone(); - // asking for global communication will cause the test network - // to send us an event asking us for a stream. use it to - // send a message. + // `NetworkBridge` will be operational as soon as it's created and it's + // waiting for events from the network. Send it events that inform that + // a notification stream was opened and that a notification was received. + // + // Since each protocol has its own notification stream, events need not be filtered. let sender_id = id; - let send_message = tester.filter_network_events(move |event| match event { - Event::EventStream(sender) => { - // Add the sending peer and send the commit - let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: sender_id, - protocol: grandpa_protocol_name::NAME.into(), + + let send_message = async move { + let _ = tester.notification_tx.unbounded_send( + NotificationEvent::NotificationStreamOpened { + peer: sender_id, + direction: Direction::Inbound, negotiated_fallback: None, - role: ObservedRole::Full, - received_handshake: vec![], + handshake: Roles::FULL.encode(), + }, + ); + let _ = tester.notification_tx.unbounded_send( + NotificationEvent::NotificationReceived { + peer: sender_id, + notification: commit_to_send.clone(), + }, + ); + + // Add a random peer which will be the recipient of this message + let receiver_id = PeerId::random(); + let _ = tester.notification_tx.unbounded_send( + NotificationEvent::NotificationStreamOpened { + peer: receiver_id, + direction: Direction::Inbound, + negotiated_fallback: None, + handshake: Roles::FULL.encode(), + }, + ); + + // Announce its local set being on the current set id through a neighbor + // packet, otherwise it won't be eligible to receive the commit + let _ = { + let update = gossip::VersionedNeighborPacket::V1(gossip::NeighborPacket { + round: Round(round), + set_id: SetId(set_id), + commit_finalized_height: 1, }); - let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: sender_id, - messages: vec![( - grandpa_protocol_name::NAME.into(), - commit_to_send.clone().into(), - )], - }); + let msg = gossip::GossipMessage::::Neighbor(update); - // Add a random peer which will be the recipient of this message - let receiver_id = PeerId::random(); - let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: receiver_id, - protocol: grandpa_protocol_name::NAME.into(), - negotiated_fallback: None, - role: ObservedRole::Full, - received_handshake: vec![], - }); + let _ = tester.notification_tx.unbounded_send( + NotificationEvent::NotificationReceived { + peer: receiver_id, + notification: msg.encode(), + }, + ); + }; - // Announce its local set has being on the current set id through a neighbor - // packet, otherwise it won't be eligible to receive the commit - let _ = { - let update = gossip::VersionedNeighborPacket::V1(gossip::NeighborPacket { - round: Round(round), - set_id: SetId(set_id), - commit_finalized_height: 1, - }); - - let msg = gossip::GossipMessage::::Neighbor(update); - - sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: receiver_id, - messages: vec![( - grandpa_protocol_name::NAME.into(), - msg.encode().into(), - )], - }) - }; - - true - }, - _ => false, - }); + tester + } + .boxed(); // when the commit comes in, we'll tell the callback it was good. let handle_commit = commits_in.into_future().map(|(item, _)| match item.unwrap() { @@ -537,31 +618,32 @@ fn bad_commit_leads_to_report() { let commit_to_send = encoded_commit.clone(); let network_bridge = tester.net_handle.clone(); - // asking for global communication will cause the test network - // to send us an event asking us for a stream. use it to - // send a message. + // `NetworkBridge` will be operational as soon as it's created and it's + // waiting for events from the network. Send it events that inform that + // a notification stream was opened and that a notification was received. + // + // Since each protocol has its own notification stream, events need not be filtered. let sender_id = id; - let send_message = tester.filter_network_events(move |event| match event { - Event::EventStream(sender) => { - let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: sender_id, - protocol: grandpa_protocol_name::NAME.into(), + + let send_message = async move { + let _ = tester.notification_tx.unbounded_send( + NotificationEvent::NotificationStreamOpened { + peer: sender_id, + direction: Direction::Inbound, negotiated_fallback: None, - role: ObservedRole::Full, - received_handshake: vec![], - }); - let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: sender_id, - messages: vec![( - grandpa_protocol_name::NAME.into(), - commit_to_send.clone().into(), - )], - }); + handshake: Roles::FULL.encode(), + }, + ); + let _ = tester.notification_tx.unbounded_send( + NotificationEvent::NotificationReceived { + peer: sender_id, + notification: commit_to_send.clone(), + }, + ); - true - }, - _ => false, - }); + tester + } + .boxed(); // when the commit comes in, we'll tell the callback it was bad. let handle_commit = commits_in.into_future().map(|(item, _)| match item.unwrap() { diff --git a/substrate/client/consensus/grandpa/src/lib.rs b/substrate/client/consensus/grandpa/src/lib.rs index da621abd254caf90abf5dfab0511d5e868218168..b7cfc9f5b6019a5cea789dfcf385d7efc4e196e7 100644 --- a/substrate/client/consensus/grandpa/src/lib.rs +++ b/substrate/client/consensus/grandpa/src/lib.rs @@ -67,7 +67,7 @@ use sc_client_api::{ BlockchainEvents, CallExecutor, ExecutorProvider, Finalizer, LockImportRun, StorageProvider, }; use sc_consensus::BlockImport; -use sc_network::types::ProtocolName; +use sc_network::{types::ProtocolName, NotificationService}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; @@ -471,9 +471,6 @@ where Client: ExecutorProvider + HeaderBackend, { fn get(&self) -> Result { - // This implementation uses the Grandpa runtime API instead of reading directly from the - // `GRANDPA_AUTHORITIES_KEY` as the data may have been migrated since the genesis block of - // the chain, whereas the runtime API is backwards compatible. self.executor() .call( self.expect_block_hash_from_id(&BlockId::Number(Zero::zero()))?, @@ -690,6 +687,8 @@ pub struct GrandpaParams { pub network: N, /// Event stream for syncing-related events. pub sync: S, + /// Handle for interacting with `Notifications`. + pub notification_service: Box, /// A voting rule used to potentially restrict target votes. pub voting_rule: VR, /// The prometheus metrics registry. @@ -710,21 +709,21 @@ pub struct GrandpaParams { /// For standard protocol name see [`crate::protocol_standard_name`]. pub fn grandpa_peers_set_config( protocol_name: ProtocolName, -) -> sc_network::config::NonDefaultSetConfig { +) -> (sc_network::config::NonDefaultSetConfig, Box) { use communication::grandpa_protocol_name; - sc_network::config::NonDefaultSetConfig { - notifications_protocol: protocol_name, - fallback_names: grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(), + sc_network::config::NonDefaultSetConfig::new( + protocol_name, + grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(), // Notifications reach ~256kiB in size at the time of writing on Kusama and Polkadot. - max_notification_size: 1024 * 1024, - handshake: None, - set_config: sc_network::config::SetConfig { + 1024 * 1024, + None, + sc_network::config::SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), non_reserved_mode: sc_network::config::NonReservedPeerMode::Deny, }, - } + ) } /// Run a GRANDPA voter as a task. Provide configuration and a link to a @@ -747,6 +746,7 @@ where link, network, sync, + notification_service, voting_rule, prometheus_registry, shared_voter_state, @@ -773,6 +773,7 @@ where let network = NetworkBridge::new( network, sync, + notification_service, config.clone(), persistent_data.set_state.clone(), prometheus_registry.as_ref(), diff --git a/substrate/client/consensus/grandpa/src/observer.rs b/substrate/client/consensus/grandpa/src/observer.rs index 8541baa822bb44aeac0613f6870884f904cbca2d..608ff5e46a0e825a6b7480a29258574f121e4aa2 100644 --- a/substrate/client/consensus/grandpa/src/observer.rs +++ b/substrate/client/consensus/grandpa/src/observer.rs @@ -28,6 +28,7 @@ use futures::prelude::*; use log::{debug, info, warn}; use sc_client_api::backend::Backend; +use sc_network::NotificationService; use sc_telemetry::TelemetryHandle; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; @@ -168,6 +169,7 @@ pub fn run_grandpa_observer( link: LinkHalf, network: N, sync: S, + notification_service: Box, ) -> sp_blockchain::Result + Send> where BE: Backend + Unpin + 'static, @@ -189,6 +191,7 @@ where let network = NetworkBridge::new( network, sync, + notification_service, config.clone(), persistent_data.set_state.clone(), None, @@ -414,14 +417,14 @@ mod tests { use futures::executor; - /// Ensure `Future` implementation of `ObserverWork` is polling its `NetworkBridge`. Regression - /// test for bug introduced in d4fbb897c and fixed in b7af8b339. + /// Ensure `Future` implementation of `ObserverWork` is polling its `NetworkBridge`. + /// Regression test for bug introduced in d4fbb897c and fixed in b7af8b339. /// - /// When polled, `NetworkBridge` forwards reputation change requests from the `GossipValidator` - /// to the underlying `dyn Network`. This test triggers a reputation change by calling - /// `GossipValidator::validate` with an invalid gossip message. After polling the `ObserverWork` - /// which should poll the `NetworkBridge`, the reputation change should be forwarded to the test - /// network. + /// When polled, `NetworkBridge` forwards reputation change requests from the + /// `GossipValidator` to the underlying `dyn Network`. This test triggers a reputation change + /// by calling `GossipValidator::validate` with an invalid gossip message. After polling the + /// `ObserverWork` which should poll the `NetworkBridge`, the reputation change should be + /// forwarded to the test network. #[test] fn observer_work_polls_underlying_network_bridge() { // Create a test network. @@ -463,12 +466,6 @@ mod tests { // validator to the test network. assert!(observer.now_or_never().is_none()); - // Ignore initial event stream request by gossip engine. - match tester.events.next().now_or_never() { - Some(Some(Event::EventStream(_))) => {}, - _ => panic!("expected event stream request"), - }; - assert_matches!(tester.events.next().now_or_never(), Some(Some(Event::Report(_, _)))); }); } diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs index 644befe98853e260ffa5f31ebad5bb839d312248..7e42c2d45c733b8cbc32a599680202399fadaed6 100644 --- a/substrate/client/consensus/grandpa/src/tests.rs +++ b/substrate/client/consensus/grandpa/src/tests.rs @@ -317,6 +317,9 @@ fn initialize_grandpa( (net.peers[peer_id].network_service().clone(), link) }; let sync = net.peers[peer_id].sync_service().clone(); + let notification_service = net.peers[peer_id] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(); let grandpa_params = GrandpaParams { config: Config { @@ -332,6 +335,7 @@ fn initialize_grandpa( link, network: net_service, sync, + notification_service, voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -472,6 +476,9 @@ async fn finalize_3_voters_1_full_observer() { let net_service = net.peers[peer_id].network_service().clone(); let sync = net.peers[peer_id].sync_service().clone(); let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + let notification_service = net.peers[peer_id] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(); let grandpa_params = GrandpaParams { config: Config { @@ -487,6 +494,7 @@ async fn finalize_3_voters_1_full_observer() { link, network: net_service, sync, + notification_service, voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -557,14 +565,17 @@ async fn transition_3_voters_twice_1_full_observer() { for (peer_id, local_key) in all_peers.clone().into_iter().enumerate() { let keystore = create_keystore(local_key); - let (net_service, link, sync) = { - let net = net.lock(); + let (net_service, link, sync, notification_service) = { + let mut net = net.lock(); let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); ( net.peers[peer_id].network_service().clone(), link, net.peers[peer_id].sync_service().clone(), + net.peers[peer_id] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(), ) }; @@ -582,6 +593,7 @@ async fn transition_3_voters_twice_1_full_observer() { link, network: net_service, sync, + notification_service, voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -1025,6 +1037,9 @@ async fn voter_persists_its_votes() { communication::NetworkBridge::new( net.peers[1].network_service().clone(), net.peers[1].sync_service().clone(), + net.peers[1] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(), config.clone(), set_state, None, @@ -1043,6 +1058,9 @@ async fn voter_persists_its_votes() { (net.peers[0].network_service().clone(), link) }; let sync = net.peers[0].sync_service().clone(); + let notification_service = net.peers[0] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(); let grandpa_params = GrandpaParams { config: Config { @@ -1058,6 +1076,7 @@ async fn voter_persists_its_votes() { link, network: net_service, sync, + notification_service, voting_rule: VotingRulesBuilder::default().build(), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -1082,6 +1101,9 @@ async fn voter_persists_its_votes() { net.add_authority_peer(); let net_service = net.peers[2].network_service().clone(); let sync = net.peers[2].sync_service().clone(); + let notification_service = net.peers[2] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(); // but we'll reuse the client from the first peer (alice_voter1) // since we want to share the same database, so that we can // read the persisted state after aborting alice_voter1. @@ -1104,6 +1126,7 @@ async fn voter_persists_its_votes() { link, network: net_service, sync, + notification_service, voting_rule: VotingRulesBuilder::default().build(), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -1255,6 +1278,9 @@ async fn finalize_3_voters_1_light_observer() { let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 1); let voters = initialize_grandpa(&mut net, authorities); + let notification_service = net.peers[3] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(); let observer = observer::run_grandpa_observer( Config { gossip_duration: TEST_GOSSIP_DURATION, @@ -1269,6 +1295,7 @@ async fn finalize_3_voters_1_light_observer() { net.peers[3].data.lock().take().expect("link initialized at startup; qed"), net.peers[3].network_service().clone(), net.peers[3].sync_service().clone(), + notification_service, ) .unwrap(); net.peer(0).push_blocks(20, false); @@ -1317,6 +1344,10 @@ async fn voter_catches_up_to_latest_round_when_behind() { link, network: net.peer(peer_id).network_service().clone(), sync: net.peer(peer_id).sync_service().clone(), + notification_service: net + .peer(peer_id) + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(), voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -1409,6 +1440,7 @@ fn test_environment_with_select_chain( keystore: Option, network_service: N, sync_service: S, + notification_service: Box, select_chain: SC, voting_rule: VR, ) -> TestEnvironment @@ -1433,6 +1465,7 @@ where let network = NetworkBridge::new( network_service.clone(), sync_service, + notification_service, config.clone(), set_state.clone(), None, @@ -1462,6 +1495,7 @@ fn test_environment( keystore: Option, network_service: N, sync_service: S, + notification_service: Box, voting_rule: VR, ) -> TestEnvironment, VR> where @@ -1474,6 +1508,7 @@ where keystore, network_service, sync_service, + notification_service, link.select_chain.clone(), voting_rule, ) @@ -1490,14 +1525,22 @@ async fn grandpa_environment_respects_voting_rules() { let peer = net.peer(0); let network_service = peer.network_service().clone(); let sync_service = peer.sync_service().clone(); + let mut notification_service = + peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap(); let link = peer.data.lock().take().unwrap(); // add 21 blocks let hashes = peer.push_blocks(21, false); // create an environment with no voting rule restrictions - let unrestricted_env = - test_environment(&link, None, network_service.clone(), sync_service.clone(), ()); + let unrestricted_env = test_environment( + &link, + None, + network_service.clone(), + sync_service.clone(), + notification_service.clone().unwrap(), + (), + ); // another with 3/4 unfinalized chain voting rule restriction let three_quarters_env = test_environment( @@ -1505,6 +1548,7 @@ async fn grandpa_environment_respects_voting_rules() { None, network_service.clone(), sync_service.clone(), + notification_service.clone().unwrap(), voting_rule::ThreeQuartersOfTheUnfinalizedChain, ); @@ -1515,6 +1559,7 @@ async fn grandpa_environment_respects_voting_rules() { None, network_service.clone(), sync_service, + notification_service, VotingRulesBuilder::default().build(), ); @@ -1608,6 +1653,8 @@ async fn grandpa_environment_passes_actual_best_block_to_voting_rules() { let peer = net.peer(0); let network_service = peer.network_service().clone(); let sync_service = peer.sync_service().clone(); + let notification_service = + peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap(); let link = peer.data.lock().take().unwrap(); let client = peer.client().as_client().clone(); let select_chain = MockSelectChain::default(); @@ -1622,6 +1669,7 @@ async fn grandpa_environment_passes_actual_best_block_to_voting_rules() { None, network_service.clone(), sync_service, + notification_service, select_chain.clone(), voting_rule::BeforeBestBlockBy(5), ); @@ -1669,6 +1717,8 @@ async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_targ let peer = net.peer(0); let network_service = peer.network_service().clone(); let sync_service = peer.sync_service().clone(); + let notification_service = + peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap(); let link = peer.data.lock().take().unwrap(); let client = peer.client().as_client().clone(); let select_chain = MockSelectChain::default(); @@ -1678,6 +1728,7 @@ async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_targ None, network_service.clone(), sync_service.clone(), + notification_service, select_chain.clone(), voting_rule.clone(), ); @@ -1780,11 +1831,19 @@ async fn grandpa_environment_never_overwrites_round_voter_state() { let peer = net.peer(0); let network_service = peer.network_service().clone(); let sync_service = peer.sync_service().clone(); + let notification_service = + peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap(); let link = peer.data.lock().take().unwrap(); let keystore = create_keystore(peers[0]); - let environment = - test_environment(&link, Some(keystore), network_service.clone(), sync_service, ()); + let environment = test_environment( + &link, + Some(keystore), + network_service.clone(), + sync_service, + notification_service, + (), + ); let round_state = || finality_grandpa::round::State::genesis(Default::default()); let base = || Default::default(); @@ -2012,9 +2071,18 @@ async fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() { let peer = net.peer(0); let network_service = peer.network_service().clone(); let sync_service = peer.sync_service().clone(); + let notification_service = + peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap(); let link = peer.data.lock().take().unwrap(); let keystore = create_keystore(alice); - test_environment(&link, Some(keystore), network_service.clone(), sync_service, ()) + test_environment( + &link, + Some(keystore), + network_service.clone(), + sync_service, + notification_service, + (), + ) }; let signed_prevote = { diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index a6430fdf1deece639a443e78ee8c08d627f8e6c3..77cd88dfc194a5d804e300d3bf6a3cc75a3c2c8f 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -9,13 +9,16 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } assert_matches = "1.3.0" -async-trait = "0.1.57" +async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" futures-timer = "3.0.1" @@ -43,7 +46,7 @@ sp-runtime = { path = "../../../primitives/runtime" } sp-timestamp = { path = "../../../primitives/timestamp" } [dev-dependencies] -tokio = { version = "1.22.0", features = ["rt-multi-thread", "macros"] } +tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } sc-basic-authorship = { path = "../../basic-authorship" } substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool" } diff --git a/substrate/client/consensus/pow/Cargo.toml b/substrate/client/consensus/pow/Cargo.toml index ef32425685b6f127fc67d01ec0acfc5598501897..7077fb84babec0e0a5febd6abac2a0a560e958d9 100644 --- a/substrate/client/consensus/pow/Cargo.toml +++ b/substrate/client/consensus/pow/Cargo.toml @@ -9,11 +9,14 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.1" diff --git a/substrate/client/consensus/slots/Cargo.toml b/substrate/client/consensus/slots/Cargo.toml index 52c528c3028a80edbba90e13a48d22a24fd45d38..801558a276a57a471520515671a5eaebb7e44db5 100644 --- a/substrate/client/consensus/slots/Cargo.toml +++ b/substrate/client/consensus/slots/Cargo.toml @@ -10,11 +10,14 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" futures-timer = "3.0.1" diff --git a/substrate/client/db/Cargo.toml b/substrate/client/db/Cargo.toml index cb9560b6cb62fbd9cab7d335eee1a0a352d69ef2..e833b90b3edeb2b2fd7da2e090aa9183e87ae8a1 100644 --- a/substrate/client/db/Cargo.toml +++ b/substrate/client/db/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Client backend that uses RocksDB database as storage." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -22,7 +25,7 @@ kvdb-memorydb = "0.13.0" kvdb-rocksdb = { version = "0.19.0", optional = true } linked-hash-map = "0.5.4" log = "0.4.17" -parity-db = "0.4.8" +parity-db = "0.4.12" parking_lot = "0.12.1" sc-client-api = { path = "../api" } sc-state-db = { path = "../state-db" } @@ -53,7 +56,7 @@ runtime-benchmarks = [ "kitchensink-runtime/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -rocksdb = [ "kvdb-rocksdb" ] +rocksdb = ["kvdb-rocksdb"] [[bench]] name = "state_access" diff --git a/substrate/client/executor/Cargo.toml b/substrate/client/executor/Cargo.toml index 9f41b74237374334e079970f25e3e14526636c35..aa8e8c9abf295cabcab686d7d2dbfd5e78a41157 100644 --- a/substrate/client/executor/Cargo.toml +++ b/substrate/client/executor/Cargo.toml @@ -10,6 +10,9 @@ description = "A crate that provides means of executing/dispatching calls into t documentation = "https://docs.rs/sc-executor" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -55,7 +58,7 @@ name = "bench" harness = false [features] -default = [ "std" ] +default = ["std"] # This crate does not have `no_std` support, we just require this for tests std = [ "sc-runtime-test/std", diff --git a/substrate/client/executor/common/Cargo.toml b/substrate/client/executor/common/Cargo.toml index 5118279b43b44ad75df3b8882592640c5ec9bd11..b3db6a86a2030ee47c8d241805ee183b7d953af5 100644 --- a/substrate/client/executor/common/Cargo.toml +++ b/substrate/client/executor/common/Cargo.toml @@ -10,6 +10,9 @@ description = "A set of common definitions that are needed for defining executio documentation = "https://docs.rs/sc-executor-common/" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/client/executor/runtime-test/Cargo.toml b/substrate/client/executor/runtime-test/Cargo.toml index 046e59c08e0251890e83ea818e90c28e652e8f70..82610c4f50c2841fea13c1f859cc242f8ae427c7 100644 --- a/substrate/client/executor/runtime-test/Cargo.toml +++ b/substrate/client/executor/runtime-test/Cargo.toml @@ -9,21 +9,24 @@ publish = false homepage = "https://substrate.io" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../../../primitives/core", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false, features = ["improved_panic_error_reporting"]} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-runtime-interface = { path = "../../../primitives/runtime-interface", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false, features = ["improved_panic_error_reporting"] } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-runtime-interface = { path = "../../../primitives/runtime-interface", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "sp-core/std", "sp-io/std", diff --git a/substrate/client/executor/src/executor.rs b/substrate/client/executor/src/executor.rs index 7c292a83da0895e670e203edbad24cf592164446..499bb704b16990de49d2b3311c48fa8ee2c1813e 100644 --- a/substrate/client/executor/src/executor.rs +++ b/substrate/client/executor/src/executor.rs @@ -492,7 +492,6 @@ where runtime_code: &RuntimeCode, method: &str, data: &[u8], - _use_native: bool, context: CallContext, ) -> (Result>, bool) { tracing::trace!( @@ -565,6 +564,8 @@ pub struct NativeElseWasmExecutor { /// Fallback wasm executor. wasm: WasmExecutor>, + + use_native: bool, } impl NativeElseWasmExecutor { @@ -601,7 +602,7 @@ impl NativeElseWasmExecutor { .with_runtime_cache_size(runtime_cache_size) .build(); - NativeElseWasmExecutor { native_version: D::native_version(), wasm } + NativeElseWasmExecutor { native_version: D::native_version(), wasm, use_native: true } } /// Create a new instance using the given [`WasmExecutor`]. @@ -610,7 +611,14 @@ impl NativeElseWasmExecutor { ExtendedHostFunctions, >, ) -> Self { - Self { native_version: D::native_version(), wasm: executor } + Self { native_version: D::native_version(), wasm: executor, use_native: true } + } + + /// Disable to use native runtime when possible just behave like `WasmExecutor`. + /// + /// Default to enabled. + pub fn disable_use_native(&mut self) { + self.use_native = false; } /// Ignore missing function imports if set true. @@ -645,9 +653,10 @@ impl CodeExecutor for NativeElseWasmExecut runtime_code: &RuntimeCode, method: &str, data: &[u8], - use_native: bool, context: CallContext, ) -> (Result>, bool) { + let use_native = self.use_native; + tracing::trace!( target: "executor", function = %method, @@ -711,7 +720,11 @@ impl CodeExecutor for NativeElseWasmExecut impl Clone for NativeElseWasmExecutor { fn clone(&self) -> Self { - NativeElseWasmExecutor { native_version: D::native_version(), wasm: self.wasm.clone() } + NativeElseWasmExecutor { + native_version: D::native_version(), + wasm: self.wasm.clone(), + use_native: self.use_native, + } } } diff --git a/substrate/client/executor/src/lib.rs b/substrate/client/executor/src/lib.rs index 6ee0ab3512ac0071d51b08a7071d1aaeac53fd2a..25bad81938f383e66eb1e63fb1c8ddcbea3a387f 100644 --- a/substrate/client/executor/src/lib.rs +++ b/substrate/client/executor/src/lib.rs @@ -58,7 +58,7 @@ pub use sc_executor_wasmtime::InstantiationStrategy as WasmtimeInstantiationStra /// Extracts the runtime version of a given runtime code. pub trait RuntimeVersionOf { - /// Extract [`RuntimeVersion`](sp_version::RuntimeVersion) of the given `runtime_code`. + /// Extract [`RuntimeVersion`] of the given `runtime_code`. fn runtime_version( &self, ext: &mut dyn Externalities, diff --git a/substrate/client/executor/src/wasm_runtime.rs b/substrate/client/executor/src/wasm_runtime.rs index 6dec3abdb20cf5ff50ca8cc3c09102dd7f2a6573..501279a312cc09bdaada09b64895a1e6c5a2c87a 100644 --- a/substrate/client/executor/src/wasm_runtime.rs +++ b/substrate/client/executor/src/wasm_runtime.rs @@ -441,6 +441,7 @@ mod tests { use codec::Encode; use sp_api::{Core, RuntimeApiInfo}; use sp_runtime::RuntimeString; + use sp_version::{create_apis_vec, RuntimeVersion}; use sp_wasm_interface::HostFunctions; use substrate_test_runtime::Block; @@ -470,7 +471,7 @@ mod tests { authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(>::ID, 1)]), + apis: create_apis_vec!([(>::ID, 1)]), }; let version = decode_version(&old_runtime_version.encode()).unwrap(); @@ -486,7 +487,7 @@ mod tests { authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(>::ID, 3)]), + apis: create_apis_vec!([(>::ID, 3)]), }; decode_version(&old_runtime_version.encode()).unwrap_err(); @@ -494,13 +495,13 @@ mod tests { #[test] fn new_runtime_version_decodes() { - let old_runtime_version = sp_api::RuntimeVersion { + let old_runtime_version = RuntimeVersion { spec_name: "test".into(), impl_name: "test".into(), authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(>::ID, 3)]), + apis: create_apis_vec!([(>::ID, 3)]), transaction_version: 3, state_version: 4, }; @@ -509,13 +510,13 @@ mod tests { assert_eq!(3, version.transaction_version); assert_eq!(0, version.state_version); - let old_runtime_version = sp_api::RuntimeVersion { + let old_runtime_version = RuntimeVersion { spec_name: "test".into(), impl_name: "test".into(), authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(>::ID, 4)]), + apis: create_apis_vec!([(>::ID, 4)]), transaction_version: 3, state_version: 4, }; @@ -538,7 +539,7 @@ mod tests { authoring_version: 100, spec_version: 100, impl_version: 100, - apis: sp_api::create_apis_vec!([(>::ID, 4)]), + apis: create_apis_vec!([(>::ID, 4)]), transaction_version: 100, state_version: 1, }; diff --git a/substrate/client/executor/wasmtime/Cargo.toml b/substrate/client/executor/wasmtime/Cargo.toml index 261d52c0ede3292292cff4f47b1196f7ea55d8ff..f8df23a026e5643129d8f373a316d60113f1a335 100644 --- a/substrate/client/executor/wasmtime/Cargo.toml +++ b/substrate/client/executor/wasmtime/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Defines a `WasmRuntime` that uses the Wasmtime JIT to execute." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -25,7 +28,7 @@ wasmtime = { version = "8.0.1", default-features = false, features = [ "cranelift", "jitdump", "parallel-compilation", - "pooling-allocator" + "pooling-allocator", ] } anyhow = "1.0.68" sc-allocator = { path = "../../allocator" } @@ -39,7 +42,7 @@ sp-wasm-interface = { path = "../../../primitives/wasm-interface", features = [" # By default rustix directly calls the appropriate syscalls completely bypassing libc; # this doesn't have any actual benefits for us besides making it harder to debug memory # problems (since then `mmap` etc. cannot be easily hooked into). -rustix = { version = "0.36.7", default-features = false, features = ["std", "mm", "fs", "param", "use-libc"] } +rustix = { version = "0.36.7", default-features = false, features = ["fs", "mm", "param", "std", "use-libc"] } [dev-dependencies] wat = "1.0" diff --git a/substrate/client/executor/wasmtime/src/tests.rs b/substrate/client/executor/wasmtime/src/tests.rs index e185754b07696fde80f61787c659c869ff112842..1c06da1e3c142f58d4fa3ec64cf87577190218f8 100644 --- a/substrate/client/executor/wasmtime/src/tests.rs +++ b/substrate/client/executor/wasmtime/src/tests.rs @@ -384,7 +384,9 @@ fn test_max_memory_pages( ) (i32.const -1) ) - (unreachable) + (then + (unreachable) + ) ) (i64.const 0) @@ -421,7 +423,9 @@ fn test_max_memory_pages( ) (i32.const -1) ) - (unreachable) + (then + (unreachable) + ) ) (i64.const 0) diff --git a/substrate/client/informant/Cargo.toml b/substrate/client/informant/Cargo.toml index 47e65df3cc1159f2c2e68714f6db9cb8736d3eb8..8373e5a54c1b3a689ecca8de9090889db07f6c9c 100644 --- a/substrate/client/informant/Cargo.toml +++ b/substrate/client/informant/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/client/keystore/Cargo.toml b/substrate/client/keystore/Cargo.toml index 3fd88ae8b87ed3a1d99e6d0a6c08c1c48dc0159b..7671aac0bd763188d7e175a9be4a84d3464415a4 100644 --- a/substrate/client/keystore/Cargo.toml +++ b/substrate/client/keystore/Cargo.toml @@ -10,6 +10,9 @@ description = "Keystore (and session key management) for ed25519 based chains li documentation = "https://docs.rs/sc-keystore" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/client/keystore/src/local.rs b/substrate/client/keystore/src/local.rs index 8089dbba035297f3cfe2f6309ac10b124cfa5c4d..3b29f435e2a942ffe96dd02a8009af4d33693995 100644 --- a/substrate/client/keystore/src/local.rs +++ b/substrate/client/keystore/src/local.rs @@ -120,18 +120,18 @@ impl LocalKeystore { Ok(sig) } - fn vrf_output( + fn vrf_pre_output( &self, key_type: KeyTypeId, public: &T::Public, input: &T::VrfInput, - ) -> std::result::Result, TraitError> { - let preout = self + ) -> std::result::Result, TraitError> { + let pre_output = self .0 .read() .key_pair_by_type::(public, key_type)? - .map(|pair| pair.vrf_output(input)); - Ok(preout) + .map(|pair| pair.vrf_pre_output(input)); + Ok(pre_output) } } @@ -188,13 +188,13 @@ impl Keystore for LocalKeystore { self.vrf_sign::(key_type, public, data) } - fn sr25519_vrf_output( + fn sr25519_vrf_pre_output( &self, key_type: KeyTypeId, public: &sr25519::Public, input: &sr25519::vrf::VrfInput, - ) -> std::result::Result, TraitError> { - self.vrf_output::(key_type, public, input) + ) -> std::result::Result, TraitError> { + self.vrf_pre_output::(key_type, public, input) } fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec { @@ -293,13 +293,13 @@ impl Keystore for LocalKeystore { self.vrf_sign::(key_type, public, data) } - fn bandersnatch_vrf_output( + fn bandersnatch_vrf_pre_output( &self, key_type: KeyTypeId, public: &bandersnatch::Public, input: &bandersnatch::vrf::VrfInput, - ) -> std::result::Result, TraitError> { - self.vrf_output::(key_type, public, input) + ) -> std::result::Result, TraitError> { + self.vrf_pre_output::(key_type, public, input) } fn bandersnatch_ring_vrf_sign( diff --git a/substrate/client/merkle-mountain-range/Cargo.toml b/substrate/client/merkle-mountain-range/Cargo.toml index ae60fd1ce8968befae4105206e65875cb173660f..f6dbaf86c51562377c6528b3456b65abb2547fee 100644 --- a/substrate/client/merkle-mountain-range/Cargo.toml +++ b/substrate/client/merkle-mountain-range/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true description = "MMR Client gadget for substrate" homepage = "https://substrate.io" +[lints] +workspace = true + # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/substrate/client/merkle-mountain-range/rpc/Cargo.toml b/substrate/client/merkle-mountain-range/rpc/Cargo.toml index e75c5f1baa86f641bdcf839563c5a995d19adb35..d4ee0b4852256e2a2c67304d706879390258fe22 100644 --- a/substrate/client/merkle-mountain-range/rpc/Cargo.toml +++ b/substrate/client/merkle-mountain-range/rpc/Cargo.toml @@ -8,13 +8,16 @@ homepage = "https://substrate.io" repository.workspace = true description = "Node-specific RPC methods for interaction with Merkle Mountain Range pallet." +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } -serde = { version = "1.0.188", features = ["derive"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } +serde = { version = "1.0.193", features = ["derive"] } sp-api = { path = "../../../primitives/api" } sp-blockchain = { path = "../../../primitives/blockchain" } sp-core = { path = "../../../primitives/core" } diff --git a/substrate/client/merkle-mountain-range/rpc/src/lib.rs b/substrate/client/merkle-mountain-range/rpc/src/lib.rs index 5be82b600d91425589df1b0e9638067f1c636b25..1653749ffab9411a00f58b1ec097e4eef0a43d3d 100644 --- a/substrate/client/merkle-mountain-range/rpc/src/lib.rs +++ b/substrate/client/merkle-mountain-range/rpc/src/lib.rs @@ -30,14 +30,14 @@ use jsonrpsee::{ }; use serde::{Deserialize, Serialize}; -use sp_api::{ApiExt, NumberFor, ProvideRuntimeApi}; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::{ offchain::{storage::OffchainDb, OffchainDbExt, OffchainStorage}, Bytes, }; use sp_mmr_primitives::{Error as MmrError, Proof}; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::traits::{Block as BlockT, NumberFor}; pub use sp_mmr_primitives::MmrApi as MmrRuntimeApi; diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 86c5a37754afb8c7de3e2cae539e51d47ee57282..e8543b5bdf2cfb03a156255be29249e5811a22c4 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,6 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = "4.1" arrayvec = "0.7.2" blake2 = "0.10.4" +bytes = "1" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } futures = "0.3.25" futures-timer = "3.0.2" diff --git a/substrate/client/mixnet/src/packet_dispatcher.rs b/substrate/client/mixnet/src/packet_dispatcher.rs index 856208ecb34260f5cf03c183cf68683dec6710d0..420e0c68847d86cb3cb76db3908994da44c09cb6 100644 --- a/substrate/client/mixnet/src/packet_dispatcher.rs +++ b/substrate/client/mixnet/src/packet_dispatcher.rs @@ -24,7 +24,7 @@ use libp2p_identity::PeerId; use log::{debug, warn}; use mixnet::core::{AddressedPacket, NetworkStatus, Packet, PeerId as CorePeerId}; use parking_lot::Mutex; -use sc_network::{NetworkNotification, ProtocolName}; +use sc_network::NotificationService; use std::{collections::HashMap, future::Future, sync::Arc}; const LOG_TARGET: &str = "mixnet"; @@ -77,41 +77,37 @@ pub struct ReadyPeer { } impl ReadyPeer { - /// If a future is returned, and if that future returns `Some`, this function should be called - /// again to send the next packet queued for the peer; `self` is placed in the `Some` to make - /// this straightforward. Otherwise, we have either sent or dropped all packets queued for the - /// peer, and it can be forgotten about for the time being. + /// If a future is returned, and if that future returns `Some`, this function should be + /// called again to send the next packet queued for the peer; `self` is placed in the `Some` + /// to make this straightforward. Otherwise, we have either sent or dropped all packets + /// queued for the peer, and it can be forgotten about for the time being. pub fn send_packet( self, - network: &impl NetworkNotification, - protocol_name: ProtocolName, + notification_service: &Box, ) -> Option>> { - match network.notification_sender(self.id, protocol_name) { - Err(err) => { + match notification_service.message_sink(&self.id) { + None => { debug!( target: LOG_TARGET, - "Failed to get notification sender for peer ID {}: {err}", self.id + "Failed to get message sink for peer ID {}", self.id, ); self.queue.clear(); None }, - Ok(sender) => Some(async move { - match sender.ready().await.and_then(|mut ready| { - let (packet, more_packets) = self.queue.pop(); - let packet = - packet.expect("Should only be called if there is a packet to send"); - ready.send((packet as Box<[_]>).into())?; - Ok(more_packets) - }) { + Some(sink) => Some(async move { + let (packet, more_packets) = self.queue.pop(); + let packet = packet.expect("Should only be called if there is a packet to send"); + + match sink.send_async_notification((packet as Box<[_]>).into()).await { + Ok(_) => more_packets.then_some(self), Err(err) => { debug!( target: LOG_TARGET, - "Notification sender for peer ID {} failed: {err}", self.id + "Failed to send packet to peer ID {}: {err}", self.id, ); self.queue.clear(); None }, - Ok(more_packets) => more_packets.then(|| self), } }), } diff --git a/substrate/client/mixnet/src/protocol.rs b/substrate/client/mixnet/src/protocol.rs index 555c267b86e0c5b95d52df355c552448e9c19ce4..955502a4856a9aa8f143baa1cee7323ed005a078 100644 --- a/substrate/client/mixnet/src/protocol.rs +++ b/substrate/client/mixnet/src/protocol.rs @@ -18,7 +18,10 @@ use super::config::Config; use mixnet::core::PACKET_SIZE; -use sc_network::{config::NonDefaultSetConfig, ProtocolName}; +use sc_network::{ + config::{NonDefaultSetConfig, NonReservedPeerMode, SetConfig}, + NotificationService, ProtocolName, +}; /// Returns the protocol name to use for the mixnet controlled by the given chain. pub fn protocol_name(genesis_hash: &[u8], fork_id: Option<&str>) -> ProtocolName { @@ -31,12 +34,26 @@ pub fn protocol_name(genesis_hash: &[u8], fork_id: Option<&str>) -> ProtocolName } /// Returns the peers set configuration for the mixnet protocol. -pub fn peers_set_config(name: ProtocolName, config: &Config) -> NonDefaultSetConfig { - let mut set_config = NonDefaultSetConfig::new(name, PACKET_SIZE as u64); +pub fn peers_set_config( + name: ProtocolName, + config: &Config, +) -> (NonDefaultSetConfig, Box) { + let (mut set_config, service) = NonDefaultSetConfig::new( + name, + Vec::new(), + PACKET_SIZE as u64, + None, + SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + ); if config.substrate.num_gateway_slots != 0 { // out_peers is always 0; we are only interested in connecting to mixnodes, which we do by // setting them as reserved nodes set_config.allow_non_reserved(config.substrate.num_gateway_slots, 0); } - set_config + (set_config, service) } diff --git a/substrate/client/mixnet/src/run.rs b/substrate/client/mixnet/src/run.rs index 09020469d5eee42782b1f3178f3402f83d5edbce..14d188df097721bc33c54199ce9ca7bd13c38cb2 100644 --- a/substrate/client/mixnet/src/run.rs +++ b/substrate/client/mixnet/src/run.rs @@ -29,11 +29,12 @@ use super::{ request::{extrinsic_delay, Request, SUBMIT_EXTRINSIC}, sync_with_runtime::sync_with_runtime, }; +use bytes::Bytes; use codec::{Decode, DecodeAll, Encode}; use futures::{ future::{pending, Either}, stream::FuturesUnordered, - StreamExt, + FutureExt, StreamExt, }; use log::{debug, error, trace, warn}; use mixnet::{ @@ -43,8 +44,8 @@ use mixnet::{ }; use sc_client_api::{BlockchainEvents, HeaderBackend}; use sc_network::{ - Event::{NotificationStreamClosed, NotificationStreamOpened, NotificationsReceived}, - NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo, ProtocolName, + service::traits::{NotificationEvent, ValidationResult}, + NetworkNotification, NetworkPeers, NetworkStateInfo, NotificationService, ProtocolName, }; use sc_transaction_pool_api::{ LocalTransactionPool, OffchainTransactionPoolFactory, TransactionPool, @@ -154,12 +155,13 @@ pub async fn run( protocol_name: ProtocolName, transaction_pool: Arc

::on_chain_storage_version(); - log::info!( - target: LOG_TARGET, - "Running migration storage v1 for uniques with storage version {:?}", - on_chain_storage_version, - ); - - if on_chain_storage_version < 1 { - let mut count = 0; - for (collection, detail) in Collection::::iter() { - CollectionAccount::::insert(&detail.owner, &collection, ()); - count += 1; +use frame_support::traits::{Get, OnRuntimeUpgrade}; +use sp_std::marker::PhantomData; + +mod v1 { + use super::*; + + /// Actual implementation of the storage migration. + pub struct MigrateToV1Impl(PhantomData<(T, I)>); + + impl, I: 'static> OnRuntimeUpgrade for MigrateToV1Impl { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let mut count = 0; + for (collection, detail) in Collection::::iter() { + CollectionAccount::::insert(&detail.owner, &collection, ()); + count += 1; + } + + log::info!( + target: LOG_TARGET, + "Storage migration v1 for uniques finished.", + ); + + // calculate and return migration weights + T::DbWeight::get().reads_writes(count as u64 + 1, count as u64 + 1) } - StorageVersion::new(1).put::

(); - log::info!( - target: LOG_TARGET, - "Running migration storage v1 for uniques with storage version {:?} was complete", - on_chain_storage_version, - ); - // calculate and return migration weights - T::DbWeight::get().reads_writes(count as u64 + 1, count as u64 + 1) - } else { - log::warn!( - target: LOG_TARGET, - "Attempted to apply migration to v1 but failed because storage version is {:?}", - on_chain_storage_version, - ); - T::DbWeight::get().reads(1) } } + +/// Migrate the pallet storage from `0` to `1`. +pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< + 0, + 1, + v1::MigrateToV1Impl, + Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/uniques/src/mock.rs b/substrate/frame/uniques/src/mock.rs index 1f62c3c4e93bf1d7b9ab5f51315d1536e34ca041..056c19ec55934ac0e27a9f12fd327473e798ac02 100644 --- a/substrate/frame/uniques/src/mock.rs +++ b/substrate/frame/uniques/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_uniques; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, }; use sp_core::H256; @@ -41,6 +41,7 @@ construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/uniques/src/tests.rs b/substrate/frame/uniques/src/tests.rs index 52f7df3b5efbc29796a77591ac44efd32429796a..351dac09f7f202a62df508fe71d26bb24662d5bc 100644 --- a/substrate/frame/uniques/src/tests.rs +++ b/substrate/frame/uniques/src/tests.rs @@ -254,8 +254,11 @@ fn transfer_owner_should_work() { Uniques::transfer_ownership(RuntimeOrigin::signed(1), 0, 2), Error::::Unaccepted ); + assert_eq!(System::consumers(&2), 0); assert_ok!(Uniques::set_accept_ownership(RuntimeOrigin::signed(2), Some(0))); + assert_eq!(System::consumers(&2), 1); assert_ok!(Uniques::transfer_ownership(RuntimeOrigin::signed(1), 0, 2)); + assert_eq!(System::consumers(&2), 1); assert_eq!(collections(), vec![(2, 0)]); assert_eq!(Balances::total_balance(&1), 98); diff --git a/substrate/frame/utility/Cargo.toml b/substrate/frame/utility/Cargo.toml index 8f7a368709b612376dfc807ae5fb0405cfafc776..4aa75f9f616570966375a71f3bd6999e818f5617 100644 --- a/substrate/frame/utility/Cargo.toml +++ b/substrate/frame/utility/Cargo.toml @@ -9,19 +9,22 @@ repository.workspace = true description = "FRAME utilities pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -31,7 +34,7 @@ pallet-timestamp = { path = "../timestamp" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/utility/src/tests.rs b/substrate/frame/utility/src/tests.rs index 01e3f5264bff0087a69d313188233e261c941ce5..7b42fa511d100f277d046269f9cbca498d9b918c 100644 --- a/substrate/frame/utility/src/tests.rs +++ b/substrate/frame/utility/src/tests.rs @@ -23,7 +23,7 @@ use super::*; use crate as utility; use frame_support::{ - assert_err_ignore_postinfo, assert_noop, assert_ok, + assert_err_ignore_postinfo, assert_noop, assert_ok, derive_impl, dispatch::{DispatchErrorWithPostInfo, Pays}, error::BadOrigin, parameter_types, storage, @@ -144,6 +144,7 @@ parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(Weight::MAX); } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; type BlockWeights = BlockWeights; diff --git a/substrate/frame/vesting/Cargo.toml b/substrate/frame/vesting/Cargo.toml index ed13a15bc977c0c99a30b4966c94d96588cd67d0..3b5252d61810644e42913bb00dae47db96b61c0a 100644 --- a/substrate/frame/vesting/Cargo.toml +++ b/substrate/frame/vesting/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet for manage vesting" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,19 +21,19 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/vesting/src/benchmarking.rs b/substrate/frame/vesting/src/benchmarking.rs index 34aa04607add1a9afad621f4c8fd89dc18876436..311590873d95f84f36d25b907f9fcac8c399106c 100644 --- a/substrate/frame/vesting/src/benchmarking.rs +++ b/substrate/frame/vesting/src/benchmarking.rs @@ -55,7 +55,7 @@ fn add_vesting_schedules( let source_lookup = T::Lookup::unlookup(source.clone()); T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); - System::::set_block_number(BlockNumberFor::::zero()); + T::BlockNumberProvider::set_block_number(BlockNumberFor::::zero()); let mut total_locked: BalanceOf = Zero::zero(); for _ in 0..n { @@ -116,7 +116,7 @@ benchmarks! { add_vesting_schedules::(caller_lookup, s)?; // At block 21, everything is unlocked. - System::::set_block_number(21u32.into()); + T::BlockNumberProvider::set_block_number(21u32.into()); assert_eq!( Vesting::::vesting_balance(&caller), Some(BalanceOf::::zero()), @@ -173,7 +173,7 @@ benchmarks! { add_locks::(&other, l as u8); add_vesting_schedules::(other_lookup.clone(), s)?; // At block 21 everything is unlocked. - System::::set_block_number(21u32.into()); + T::BlockNumberProvider::set_block_number(21u32.into()); assert_eq!( Vesting::::vesting_balance(&other), @@ -335,7 +335,7 @@ benchmarks! { let total_transferred = add_vesting_schedules::(caller_lookup, s)?; // Go to about half way through all the schedules duration. (They all start at 1, and have a duration of 20 or 21). - System::::set_block_number(11u32.into()); + T::BlockNumberProvider::set_block_number(11u32.into()); // We expect half the original locked balance (+ any remainder that vests on the last block). let expected_balance = total_transferred / 2u32.into(); assert_eq!( diff --git a/substrate/frame/vesting/src/lib.rs b/substrate/frame/vesting/src/lib.rs index dbad7926a30f56a3ec3d565320ea9ea4b1c77561..4101caded4180b25c9dbdc8ecfda295900ddc0cf 100644 --- a/substrate/frame/vesting/src/lib.rs +++ b/substrate/frame/vesting/src/lib.rs @@ -71,8 +71,8 @@ use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; use sp_runtime::{ traits::{ - AtLeast32BitUnsigned, Bounded, Convert, MaybeSerializeDeserialize, One, Saturating, - StaticLookup, Zero, + AtLeast32BitUnsigned, BlockNumberProvider, Bounded, Convert, MaybeSerializeDeserialize, + One, Saturating, StaticLookup, Zero, }, DispatchError, RuntimeDebug, }; @@ -176,6 +176,9 @@ pub mod pallet { /// the unvested amount. type UnvestedFundsAllowedWithdrawReasons: Get; + /// Provider for the block number. + type BlockNumberProvider: BlockNumberProvider>; + /// Maximum number of vesting schedules an account may have at a given moment. const MAX_VESTING_SCHEDULES: u32; } @@ -565,7 +568,7 @@ impl Pallet { schedules: Vec, BlockNumberFor>>, action: VestingAction, ) -> (Vec, BlockNumberFor>>, BalanceOf) { - let now = >::block_number(); + let now = T::BlockNumberProvider::current_block_number(); let mut total_locked_now: BalanceOf = Zero::zero(); let filtered_schedules = action @@ -649,7 +652,7 @@ impl Pallet { let (mut schedules, mut locked_now) = Self::report_schedule_updates(schedules.to_vec(), action); - let now = >::block_number(); + let now = T::BlockNumberProvider::current_block_number(); if let Some(new_schedule) = Self::merge_vesting_info(now, schedule1, schedule2) { // Merging created a new schedule so we: // 1) need to add it to the accounts vesting schedule collection, @@ -685,7 +688,7 @@ where /// Get the amount that is currently being vested and cannot be transferred out of this account. fn vesting_balance(who: &T::AccountId) -> Option> { if let Some(v) = Self::vesting(who) { - let now = >::block_number(); + let now = T::BlockNumberProvider::current_block_number(); let total_locked_now = v.iter().fold(Zero::zero(), |total, schedule| { schedule.locked_at::(now).saturating_add(total) }); diff --git a/substrate/frame/vesting/src/mock.rs b/substrate/frame/vesting/src/mock.rs index 13d6d5ba57a6fbb9379d21fd27dcddd341b27960..3af4a9c962d1b12c82586fd3e6f015950331f27e 100644 --- a/substrate/frame/vesting/src/mock.rs +++ b/substrate/frame/vesting/src/mock.rs @@ -16,7 +16,7 @@ // limitations under the License. use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, WithdrawReasons}, }; use sp_core::H256; @@ -39,6 +39,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; type AccountId = u64; @@ -95,6 +96,7 @@ impl Config for Test { type MinVestedTransfer = MinVestedTransfer; type WeightInfo = (); type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; + type BlockNumberProvider = System; } pub struct ExtBuilder { diff --git a/substrate/frame/whitelist/Cargo.toml b/substrate/frame/whitelist/Cargo.toml index c52466153205a73d4e1b69fc413c2e389f5faea0..5d9a362f9aacb799cbbf3bd291c01a68f40bbd9e 100644 --- a/substrate/frame/whitelist/Cargo.toml +++ b/substrate/frame/whitelist/Cargo.toml @@ -8,18 +8,21 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME pallet for whitelisting call, and dispatch from specific origin" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-api = { path = "../../primitives/api", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-api = { path = "../../primitives/api", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -28,7 +31,7 @@ sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/whitelist/src/mock.rs b/substrate/frame/whitelist/src/mock.rs index 4e70a503c28065b0cebfae714a7c759a5da9233d..200e589c6aa91a3b2bf2bd269d2abf36e059cc92 100644 --- a/substrate/frame/whitelist/src/mock.rs +++ b/substrate/frame/whitelist/src/mock.rs @@ -22,7 +22,7 @@ use crate as pallet_whitelist; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, traits::{ConstU32, ConstU64, Nothing}, }; use frame_system::EnsureRoot; @@ -44,6 +44,7 @@ construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Nothing; type BlockWeights = (); diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index c3a68af097b0a46ecf6225306f76cfc9de0c06d5..345647cec25df55c864215f64aaf3e8459d1e058 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -9,30 +9,33 @@ repository.workspace = true description = "Substrate runtime api primitives" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } sp-api-proc-macro = { path = "proc-macro" } -sp-core = { path = "../core", default-features = false} -sp-std = { path = "../std", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-externalities = { path = "../externalities", default-features = false, optional = true} -sp-version = { path = "../version", default-features = false} -sp-state-machine = { path = "../state-machine", default-features = false, optional = true} -sp-trie = { path = "../trie", default-features = false, optional = true} +sp-core = { path = "../core", default-features = false } +sp-std = { path = "../std", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-externalities = { path = "../externalities", default-features = false, optional = true } +sp-version = { path = "../version", default-features = false } +sp-state-machine = { path = "../state-machine", default-features = false, optional = true } +sp-trie = { path = "../trie", default-features = false, optional = true } hash-db = { version = "0.16.0", optional = true } thiserror = { version = "1.0.48", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-metadata-ir = { path = "../metadata-ir", default-features = false, optional = true} +sp-metadata-ir = { path = "../metadata-ir", default-features = false, optional = true } log = { version = "0.4.17", default-features = false } [dev-dependencies] sp-test-primitives = { path = "../test-primitives" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "hash-db", @@ -58,7 +61,7 @@ std = [ # building a runtime for registering it on chain. # # This sets the max logging level to `off` for `log`. -disable-logging = [ "log/max_level_off" ] +disable-logging = ["log/max_level_off"] # Do not report the documentation in the metadata. -no-metadata-docs = [ "sp-api-proc-macro/no-metadata-docs" ] -frame-metadata = [ "sp-api-proc-macro/frame-metadata", "sp-metadata-ir" ] +no-metadata-docs = ["sp-api-proc-macro/no-metadata-docs"] +frame-metadata = ["sp-api-proc-macro/frame-metadata", "sp-metadata-ir"] diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index d60b9f1bb4ea6c49fa167d77f33f88d3bef340ca..ae46b45ccbf2eab5aeeceac9cd6fc3d595ad505c 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Macros for declaring and implementing runtime apis." documentation = "https://docs.rs/sp-api-proc-macro" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,10 +20,10 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = { version = "2.0.38", features = ["full", "fold", "extra-traits", "visit"] } +syn = { version = "2.0.41", features = ["extra-traits", "fold", "full", "visit"] } proc-macro2 = "1.0.56" blake2 = { version = "0.10.4", default-features = false } -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.1" expander = "2.0.0" Inflector = "0.11.4" @@ -29,7 +32,7 @@ assert_matches = "1.3.0" [features] # Required for the doc tests -default = [ "std" ] -std = [ "blake2/std" ] +default = ["std"] +std = ["blake2/std"] no-metadata-docs = [] frame-metadata = [] diff --git a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs index 370735819f94c1a4a3660dec6ae006f537f1cda5..2b1e65ec88524a0236a464005fb2d1215c1157fe 100644 --- a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -729,7 +729,7 @@ fn decl_runtime_apis_impl_inner(api_decls: &[ItemTrait]) -> Result }; let decl = expander::Expander::new("decl_runtime_apis") - .dry(std::env::var("SP_API_EXPAND").is_err()) + .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) .write_to_out_dir(decl) .expect("Does not fail because of IO in OUT_DIR; qed"); diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index e439a796e28d451ac47f7ed9858591b02dd50d7f..fd81fdb624c1f33bef6d5b71e759648e75899064 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -275,6 +275,7 @@ fn generate_runtime_api_base_structures() -> Result { extensions_generated_for: std::cell::RefCell>, } + #[automatically_derived] impl> #crate_::ApiExt for RuntimeApiImpl { @@ -367,6 +368,7 @@ fn generate_runtime_api_base_structures() -> Result { } } + #[automatically_derived] impl #crate_::ConstructRuntimeApi for RuntimeApi where @@ -389,6 +391,7 @@ fn generate_runtime_api_base_structures() -> Result { } } + #[automatically_derived] impl> RuntimeApiImpl { fn commit_or_rollback_transaction(&self, commit: bool) { let proof = "\ @@ -685,9 +688,11 @@ fn generate_api_impl_for_runtime_api(impls: &[ItemImpl]) -> Result // remove the trait to get just the module path runtime_mod_path.segments.pop(); - let processed_impl = + let mut processed_impl = ApiRuntimeImplToApiRuntimeApiImpl { runtime_block }.process(impl_.clone()); + processed_impl.attrs.push(parse_quote!(#[automatically_derived])); + result.push(processed_impl); } @@ -841,7 +846,7 @@ fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { ); let impl_ = expander::Expander::new("impl_runtime_apis") - .dry(std::env::var("SP_API_EXPAND").is_err()) + .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) .write_to_out_dir(impl_) .expect("Does not fail because of IO in OUT_DIR; qed"); diff --git a/substrate/primitives/api/proc-macro/src/utils.rs b/substrate/primitives/api/proc-macro/src/utils.rs index e261b162b5aa1c96252232b7c079415623bfa43b..c8c1f12d90a1645e1bd3c7df6f91a873c5d9288f 100644 --- a/substrate/primitives/api/proc-macro/src/utils.rs +++ b/substrate/primitives/api/proc-macro/src/utils.rs @@ -19,7 +19,7 @@ use crate::common::API_VERSION_ATTRIBUTE; use inflector::Inflector; use proc_macro2::{Span, TokenStream}; use proc_macro_crate::{crate_name, FoundCrate}; -use quote::{format_ident, quote, ToTokens}; +use quote::{format_ident, quote}; use syn::{ parse_quote, spanned::Spanned, token::And, Attribute, Error, FnArg, GenericArgument, Ident, ImplItem, ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, @@ -28,14 +28,14 @@ use syn::{ /// Generates the access to the `sc_client` crate. pub fn generate_crate_access() -> TokenStream { match crate_name("sp-api") { - Ok(FoundCrate::Itself) => quote!(sp_api), + Ok(FoundCrate::Itself) => quote!(sp_api::__private), Ok(FoundCrate::Name(renamed_name)) => { let renamed_name = Ident::new(&renamed_name, Span::call_site()); - quote!(#renamed_name) + quote!(#renamed_name::__private) }, Err(e) => if let Ok(FoundCrate::Name(name)) = crate_name(&"frame") { - let path = format!("{}::deps::{}", name, "sp_api"); + let path = format!("{}::deps::sp_api::__private", name); let path = syn::parse_str::(&path).expect("is a valid path; qed"); quote!( #path ) } else { @@ -261,6 +261,7 @@ pub fn versioned_trait_name(trait_ident: &Ident, version: u64) -> Ident { /// Extract the documentation from the provided attributes. #[cfg(feature = "frame-metadata")] pub fn get_doc_literals(attrs: &[syn::Attribute]) -> Vec { + use quote::ToTokens; attrs .iter() .filter_map(|attr| { diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index c122a2348b8e2daf36f79eff54f7aaa216ab0f83..1b8b7c40ae074188fb960c8cde1bde0639d983b7 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -70,48 +70,58 @@ // Make doc tests happy extern crate self as sp_api; +/// Private exports used by the macros. +/// +/// This is seen as internal API and can change at any point. #[doc(hidden)] -pub use codec::{self, Decode, DecodeLimit, Encode}; -#[doc(hidden)] -#[cfg(feature = "std")] -pub use hash_db::Hasher; -#[doc(hidden)] -pub use scale_info; -#[doc(hidden)] -pub use sp_core::offchain; -#[doc(hidden)] -#[cfg(not(feature = "std"))] -pub use sp_core::to_substrate_wasm_fn_return_value; -#[doc(hidden)] +pub mod __private { + #[cfg(feature = "std")] + mod std_imports { + pub use hash_db::Hasher; + pub use sp_core::traits::CallContext; + pub use sp_externalities::{Extension, Extensions}; + pub use sp_runtime::StateVersion; + pub use sp_state_machine::{ + Backend as StateBackend, InMemoryBackend, OverlayedChanges, StorageProof, TrieBackend, + TrieBackendBuilder, + }; + } + #[cfg(feature = "std")] + pub use std_imports::*; + + pub use crate::*; + pub use codec::{self, Decode, DecodeLimit, Encode}; + pub use scale_info; + pub use sp_core::offchain; + #[cfg(not(feature = "std"))] + pub use sp_core::to_substrate_wasm_fn_return_value; + #[cfg(feature = "frame-metadata")] + pub use sp_metadata_ir::{self as metadata_ir, frame_metadata as metadata}; + pub use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Hash as HashT, HashingFor, Header as HeaderT, NumberFor}, + transaction_validity::TransactionValidity, + RuntimeString, TransactionOutcome, + }; + pub use sp_std::{mem, slice, vec}; + pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; +} + #[cfg(feature = "std")] pub use sp_core::traits::CallContext; use sp_core::OpaqueMetadata; -#[doc(hidden)] #[cfg(feature = "std")] -pub use sp_externalities::{Extension, Extensions}; -#[doc(hidden)] -#[cfg(feature = "frame-metadata")] -pub use sp_metadata_ir::{self as metadata_ir, frame_metadata as metadata}; -#[doc(hidden)] +use sp_externalities::{Extension, Extensions}; +use sp_runtime::traits::Block as BlockT; #[cfg(feature = "std")] -pub use sp_runtime::StateVersion; -#[doc(hidden)] -pub use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Hash as HashT, HashingFor, Header as HeaderT, NumberFor}, - transaction_validity::TransactionValidity, - RuntimeString, TransactionOutcome, -}; -#[doc(hidden)] +use sp_runtime::traits::HashingFor; #[cfg(feature = "std")] -pub use sp_state_machine::{ - backend::AsTrieBackend, Backend as StateBackend, InMemoryBackend, OverlayedChanges, - StorageProof, TrieBackend, TrieBackendBuilder, -}; -#[doc(hidden)] -pub use sp_std::{mem, slice, vec}; -#[doc(hidden)] -pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; +pub use sp_runtime::TransactionOutcome; +#[cfg(feature = "std")] +pub use sp_state_machine::StorageProof; +#[cfg(feature = "std")] +use sp_state_machine::{backend::AsTrieBackend, Backend as StateBackend, OverlayedChanges}; +use sp_version::RuntimeVersion; #[cfg(feature = "std")] use std::cell::RefCell; @@ -396,14 +406,14 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// Mocks given trait implementations as runtime apis. /// -/// Accepts similar syntax as [`impl_runtime_apis!`] and generates -/// simplified mock implementations of the given runtime apis. The difference in syntax is that -/// the trait does not need to be referenced by a qualified path, methods accept the `&self` -/// parameter and the error type can be specified as associated type. If no error type is -/// specified [`String`] is used as error type. +/// Accepts similar syntax as [`impl_runtime_apis!`] and generates simplified mock +/// implementations of the given runtime apis. The difference in syntax is that the trait does +/// not need to be referenced by a qualified path, methods accept the `&self` parameter and the +/// error type can be specified as associated type. If no error type is specified [`String`] is +/// used as error type. /// -/// Besides implementing the given traits, the [`Core`](sp_api::Core) and -/// [`ApiExt`](sp_api::ApiExt) are implemented automatically. +/// Besides implementing the given traits, the [`Core`] and [`ApiExt`] are implemented +/// automatically. /// /// # Example /// diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index f207f5ff02dd0127a2cf072af4305ff0f2f8516d..0346ad270ab03f9040d55dc4621ec47a58659398 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -8,6 +8,9 @@ publish = false homepage = "https://substrate.io" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 4bd64c974f29ced75ff54eb002711d728a64f258..788d1807f3ba2791c4593ebc7559cdf6d44078ca 100644 --- a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -1,3 +1,21 @@ +error[E0603]: struct `RuntimeVersion` is private + --> tests/ui/impl_incorrect_method_signature.rs:37:27 + | +37 | fn version() -> sp_api::RuntimeVersion { + | ^^^^^^^^^^^^^^ private struct + | +note: the struct `RuntimeVersion` is defined here + --> $WORKSPACE/substrate/primitives/api/src/lib.rs + | + | use sp_version::RuntimeVersion; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ +help: consider importing one of these items instead + | +37 | fn version() -> sp_api::__private::RuntimeVersion { + | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +37 | fn version() -> sp_version::RuntimeVersion { + | ~~~~~~~~~~~~~~~~~~~~~~~~~~ + error[E0053]: method `test` has an incompatible type for trait --> tests/ui/impl_incorrect_method_signature.rs:33:17 | diff --git a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index 4c21a3afb9b6a2d13a7b3a0091dec785608c5a69..b4df7c068768c9236e98b44c1c2fa878311942af 100644 --- a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -1,3 +1,21 @@ +error[E0603]: struct `RuntimeVersion` is private + --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:39:27 + | +39 | fn version() -> sp_api::RuntimeVersion { + | ^^^^^^^^^^^^^^ private struct + | +note: the struct `RuntimeVersion` is defined here + --> $WORKSPACE/substrate/primitives/api/src/lib.rs + | + | use sp_version::RuntimeVersion; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ +help: consider importing one of these items instead + | +39 | fn version() -> sp_api::__private::RuntimeVersion { + | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +39 | fn version() -> sp_version::RuntimeVersion { + | ~~~~~~~~~~~~~~~~~~~~~~~~~~ + error[E0053]: method `test` has an incompatible type for trait --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:33:17 | diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index a4a1bc44a69b9128454a88e56c04e2f37a4ebc0e..33bc22ed84f9220316f8aeca27b2bff1232fe373 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -10,20 +10,23 @@ repository.workspace = true documentation = "https://docs.rs/sp-application-crypto" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../core", default-features = false} +sp-core = { path = "../core", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, optional = true, features = ["derive", "alloc"] } -sp-std = { path = "../std", default-features = false} -sp-io = { path = "../io", default-features = false} +serde = { version = "1.0.193", default-features = false, optional = true, features = ["alloc", "derive"] } +sp-std = { path = "../std", default-features = false } +sp-io = { path = "../io", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "full_crypto", @@ -35,7 +38,7 @@ std = [ ] # Serde support without relying on std features. -serde = [ "dep:serde", "scale-info/serde", "sp-core/serde" ] +serde = ["dep:serde", "scale-info/serde", "sp-core/serde"] # This feature enables all crypto primitives for `no_std` builds like microcontrollers # or Intel SGX. @@ -51,7 +54,7 @@ full_crypto = [ # This feature adds BLS crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bls-experimental = [ "sp-core/bls-experimental", "sp-io/bls-experimental" ] +bls-experimental = ["sp-core/bls-experimental", "sp-io/bls-experimental"] # This feature adds Bandersnatch crypto primitives. # It should not be used in production since the implementation and interface may still diff --git a/substrate/primitives/application-crypto/test/Cargo.toml b/substrate/primitives/application-crypto/test/Cargo.toml index a6f4f108c8f1842b8e0e47f98d65644dedb83dc9..0057606b38e57112e2988d96dbaa342059616ee0 100644 --- a/substrate/primitives/application-crypto/test/Cargo.toml +++ b/substrate/primitives/application-crypto/test/Cargo.toml @@ -9,12 +9,15 @@ publish = false homepage = "https://substrate.io" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-api = { path = "../../api" } sp-application-crypto = { path = ".." } -sp-core = { path = "../../core", default-features = false} -sp-keystore = { path = "../../keystore", default-features = false} +sp-core = { path = "../../core", default-features = false } +sp-keystore = { path = "../../keystore", default-features = false } substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 249aebec68fdd0d93afe57c512cd5a82a5bf2aa6..f7f1da6a9139705a17651fd76db36c6529626c48 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -10,6 +10,9 @@ description = "Minimal fixed point arithmetic primitives and types for runtime." documentation = "https://docs.rs/sp-arithmetic" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -19,20 +22,20 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "max-encoded-len", ] } integer-sqrt = "0.1.2" -num-traits = { version = "0.2.8", default-features = false } +num-traits = { version = "0.2.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } static_assertions = "1.1.0" -sp-std = { path = "../std", default-features = false} +sp-std = { path = "../std", default-features = false } [dev-dependencies] criterion = "0.4.0" primitive-types = "0.12.0" -sp-core = { path = "../core", features = ["full_crypto"]} +sp-core = { path = "../core", features = ["full_crypto"] } rand = "0.8.5" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "num-traits/std", @@ -42,7 +45,7 @@ std = [ "sp-std/std", ] # Serde support without relying on std features. -serde = [ "dep:serde", "scale-info/serde" ] +serde = ["dep:serde", "scale-info/serde"] [[bench]] name = "bench" diff --git a/substrate/primitives/arithmetic/fuzzer/Cargo.toml b/substrate/primitives/arithmetic/fuzzer/Cargo.toml index eded5a954c5a61773a3aa3a3416ffb1355d79a89..b881e8d46dbdcc4576bc685e03bbea23a2d7c00c 100644 --- a/substrate/primitives/arithmetic/fuzzer/Cargo.toml +++ b/substrate/primitives/arithmetic/fuzzer/Cargo.toml @@ -10,6 +10,9 @@ description = "Fuzzer for fixed point arithmetic primitives." documentation = "https://docs.rs/sp-arithmetic-fuzzer" publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/primitives/authority-discovery/Cargo.toml b/substrate/primitives/authority-discovery/Cargo.toml index e4f44e9da383e2121b419eec2bb94a0e4ea4f7fe..82ec5a3eb9a492eb06b146f9c6e89ff83b086ff0 100644 --- a/substrate/primitives/authority-discovery/Cargo.toml +++ b/substrate/primitives/authority-discovery/Cargo.toml @@ -9,19 +9,22 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-api = { path = "../api", default-features = false} -sp-application-crypto = { path = "../application-crypto", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-api = { path = "../api", default-features = false } +sp-application-crypto = { path = "../application-crypto", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", diff --git a/substrate/primitives/block-builder/Cargo.toml b/substrate/primitives/block-builder/Cargo.toml index 269eb53953279a3ccf8940ca92e04f715ed8d2d6..de1ffd9d9e64a4708370db0f5d1d44eee56d5870 100644 --- a/substrate/primitives/block-builder/Cargo.toml +++ b/substrate/primitives/block-builder/Cargo.toml @@ -9,15 +9,18 @@ repository.workspace = true description = "The block builder runtime api." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../api", default-features = false} -sp-inherents = { path = "../inherents", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-api = { path = "../api", default-features = false } +sp-inherents = { path = "../inherents", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] -std = [ "sp-api/std", "sp-inherents/std", "sp-runtime/std", "sp-std/std" ] +default = ["std"] +std = ["sp-api/std", "sp-inherents/std", "sp-runtime/std", "sp-std/std"] diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index 33db09ce0ac2f41897957a43b8b74497b90e93a1..38b3b2030dc62a77fd7060d2fc241df8a305be71 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -10,6 +10,9 @@ description = "Substrate blockchain traits and primitives." documentation = "https://docs.rs/sp-blockchain" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/primitives/consensus/aura/Cargo.toml b/substrate/primitives/consensus/aura/Cargo.toml index 26f02bc3119924981fbab2fd90e0a8446579b6b9..6c797e15ae805e4d257b9fac1b057bfe8842b1e3 100644 --- a/substrate/primitives/consensus/aura/Cargo.toml +++ b/substrate/primitives/consensus/aura/Cargo.toml @@ -9,23 +9,26 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.57", optional = true } +async-trait = { version = "0.1.74", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-api = { path = "../../api", default-features = false} -sp-application-crypto = { path = "../../application-crypto", default-features = false} -sp-consensus-slots = { path = "../slots", default-features = false} -sp-inherents = { path = "../../inherents", default-features = false} -sp-runtime = { path = "../../runtime", default-features = false} -sp-std = { path = "../../std", default-features = false} -sp-timestamp = { path = "../../timestamp", default-features = false} +sp-api = { path = "../../api", default-features = false } +sp-application-crypto = { path = "../../application-crypto", default-features = false } +sp-consensus-slots = { path = "../slots", default-features = false } +sp-inherents = { path = "../../inherents", default-features = false } +sp-runtime = { path = "../../runtime", default-features = false } +sp-std = { path = "../../std", default-features = false } +sp-timestamp = { path = "../../timestamp", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "async-trait", "codec/std", diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index db8bb8cb1540688232493826fff4c554a437f6cf..8690c73e34c33d3c49ada85df0090dc86cc1e4bd 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -9,25 +9,28 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.57", optional = true } +async-trait = { version = "0.1.74", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } -sp-api = { path = "../../api", default-features = false} -sp-application-crypto = { path = "../../application-crypto", default-features = false} -sp-consensus-slots = { path = "../slots", default-features = false} -sp-core = { path = "../../core", default-features = false} -sp-inherents = { path = "../../inherents", default-features = false} -sp-runtime = { path = "../../runtime", default-features = false} -sp-std = { path = "../../std", default-features = false} -sp-timestamp = { path = "../../timestamp", optional = true} +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } +sp-api = { path = "../../api", default-features = false } +sp-application-crypto = { path = "../../application-crypto", default-features = false } +sp-consensus-slots = { path = "../slots", default-features = false } +sp-core = { path = "../../core", default-features = false } +sp-inherents = { path = "../../inherents", default-features = false } +sp-runtime = { path = "../../runtime", default-features = false } +sp-std = { path = "../../std", default-features = false } +sp-timestamp = { path = "../../timestamp", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "async-trait", "codec/std", diff --git a/substrate/primitives/consensus/babe/src/lib.rs b/substrate/primitives/consensus/babe/src/lib.rs index c083bfd9a313e078e4f82de0a9bb00fc025bb93d..d6b2cdd55e0daddf7e49a132d48bbc8c1741736a 100644 --- a/substrate/primitives/consensus/babe/src/lib.rs +++ b/substrate/primitives/consensus/babe/src/lib.rs @@ -33,7 +33,7 @@ use sp_std::vec::Vec; use crate::digests::{NextConfigDescriptor, NextEpochDescriptor}; pub use sp_core::sr25519::vrf::{ - VrfInput, VrfOutput, VrfProof, VrfSignData, VrfSignature, VrfTranscript, + VrfInput, VrfPreOutput, VrfProof, VrfSignData, VrfSignature, VrfTranscript, }; /// Key type for BABE module. diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index 5ff0a2ebc70fb6749da31a630148625ebffc8a57..42383cf14a862b0c16e70261d73260728de0e8db 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -8,29 +8,32 @@ homepage = "https://substrate.io" repository.workspace = true description = "Primitives for BEEFY protocol." +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, optional = true, features = ["derive", "alloc"] } -sp-api = { path = "../../api", default-features = false} -sp-application-crypto = { path = "../../application-crypto", default-features = false} -sp-core = { path = "../../core", default-features = false} -sp-io = { path = "../../io", default-features = false} -sp-mmr-primitives = { path = "../../merkle-mountain-range", default-features = false} -sp-runtime = { path = "../../runtime", default-features = false} -sp-std = { path = "../../std", default-features = false} +serde = { version = "1.0.193", default-features = false, optional = true, features = ["alloc", "derive"] } +sp-api = { path = "../../api", default-features = false } +sp-application-crypto = { path = "../../application-crypto", default-features = false } +sp-core = { path = "../../core", default-features = false } +sp-io = { path = "../../io", default-features = false } +sp-mmr-primitives = { path = "../../merkle-mountain-range", default-features = false } +sp-runtime = { path = "../../runtime", default-features = false } +sp-std = { path = "../../std", default-features = false } strum = { version = "0.24.1", features = ["derive"], default-features = false } lazy_static = "1.4.0" [dev-dependencies] array-bytes = "6.1" -w3f-bls = { version = "0.1.3", features = ["std"]} +w3f-bls = { version = "0.1.3", features = ["std"] } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index 5bdf8ce010a10ea75c4dde5dafe0f3641a11c88e..e31c53237be24523da370725176265a978576835 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -133,7 +133,7 @@ pub mod bls_crypto { ::Output: Into<[u8; 32]>, { fn verify(&self, signature: &::Signature, msg: &[u8]) -> bool { - // `w3f-bls` library uses IETF hashing standard and as such does not exposes + // `w3f-bls` library uses IETF hashing standard and as such does not expose // a choice of hash to field function. // We are directly calling into the library to avoid introducing new host call. // and because BeefyAuthorityId::verify is being called in the runtime so we don't have @@ -157,7 +157,7 @@ pub mod bls_crypto { pub mod ecdsa_bls_crypto { use super::{BeefyAuthorityId, Hash, RuntimeAppPublic, KEY_TYPE}; use sp_application_crypto::{app_crypto, ecdsa_bls377}; - use sp_core::{crypto::Wraps, ecdsa_bls377::Pair as EcdsaBlsPair, Pair as _}; + use sp_core::{crypto::Wraps, ecdsa_bls377::Pair as EcdsaBlsPair}; app_crypto!(ecdsa_bls377, KEY_TYPE); @@ -167,17 +167,24 @@ pub mod ecdsa_bls_crypto { /// Signature for a BEEFY authority using (ECDSA,BLS) as its crypto. pub type AuthoritySignature = Signature; - impl BeefyAuthorityId for AuthorityId + impl BeefyAuthorityId for AuthorityId where - ::Output: Into<[u8; 32]>, + H: Hash, + H::Output: Into<[u8; 32]>, { fn verify(&self, signature: &::Signature, msg: &[u8]) -> bool { - // `w3f-bls` library uses IETF hashing standard and as such does not exposes - // a choice of hash to field function. - // We are directly calling into the library to avoid introducing new host call. - // and because BeefyAuthorityId::verify is being called in the runtime so we don't have - - EcdsaBlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref()) + // We can not simply call + // `EcdsaBlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref())` + // because that invokes ECDSA default verification which perfoms Blake2b hash + // which we don't want. This is because ECDSA signatures are meant to be verified + // on Ethereum network where Keccak hasher is significantly cheaper than Blake2b. + // See Figure 3 of [OnSc21](https://www.scitepress.org/Papers/2021/106066/106066.pdf) + // for comparison. + EcdsaBlsPair::verify_with_hasher::( + signature.as_inner_ref(), + msg, + self.as_inner_ref(), + ) } } } @@ -257,6 +264,7 @@ pub enum ConsensusLog { /// /// A vote message is a direct vote created by a BEEFY node on every voting round /// and is gossiped to its peers. +// TODO: Remove `Signature` generic type, instead get it from `Id::Signature`. #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] pub struct VoteMessage { /// Commit to information extracted from a finalized block @@ -507,11 +515,15 @@ mod tests { let msg = &b"test-message"[..]; let (pair, _) = ecdsa_bls_crypto::Pair::generate(); - let signature: ecdsa_bls_crypto::Signature = pair.as_inner_ref().sign(&msg).into(); + let signature: ecdsa_bls_crypto::Signature = + pair.as_inner_ref().sign_with_hasher::(&msg).into(); // Verification works if same hashing function is used when signing and verifying. assert!(BeefyAuthorityId::::verify(&pair.public(), &signature, msg)); + // Verification doesn't work if we verify function provided by pair_crypto implementation + assert!(!ecdsa_bls_crypto::Pair::verify(&signature, msg, &pair.public())); + // Other public key doesn't work let (other_pair, _) = ecdsa_bls_crypto::Pair::generate(); assert!(!BeefyAuthorityId::::verify(&other_pair.public(), &signature, msg,)); diff --git a/substrate/primitives/consensus/beefy/src/mmr.rs b/substrate/primitives/consensus/beefy/src/mmr.rs index 660506b8763f177591408c526d4c7f8c341ad13c..9ac1624ca752c055b56522cfd92cd0240a1a52a6 100644 --- a/substrate/primitives/consensus/beefy/src/mmr.rs +++ b/substrate/primitives/consensus/beefy/src/mmr.rs @@ -150,8 +150,9 @@ pub use mmr_root_provider::MmrRootProvider; mod mmr_root_provider { use super::*; use crate::{known_payloads, payload::PayloadProvider, Payload}; - use sp_api::{NumberFor, ProvideRuntimeApi}; + use sp_api::ProvideRuntimeApi; use sp_mmr_primitives::MmrApi; + use sp_runtime::traits::NumberFor; use sp_std::{marker::PhantomData, sync::Arc}; /// A [`crate::Payload`] provider where payload is Merkle Mountain Range root hash. diff --git a/substrate/primitives/consensus/common/Cargo.toml b/substrate/primitives/consensus/common/Cargo.toml index e8f6b806f8c6403296e5cd95907b3a74c4e097f2..afb7a9895fcdc8b684fcfab2eb40d3eb802ea10f 100644 --- a/substrate/primitives/consensus/common/Cargo.toml +++ b/substrate/primitives/consensus/common/Cargo.toml @@ -10,11 +10,14 @@ description = "Common utilities for building and using consensus engines in subs documentation = "https://docs.rs/sp-consensus/" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.74" futures = { version = "0.3.21", features = ["thread-pool"] } log = "0.4.17" thiserror = "1.0.48" diff --git a/substrate/primitives/consensus/grandpa/Cargo.toml b/substrate/primitives/consensus/grandpa/Cargo.toml index 8757869995d061ef7d5d0f679bfab993a44a7912..238c9868664e10d41ecd8fd7f376351d82c350dd 100644 --- a/substrate/primitives/consensus/grandpa/Cargo.toml +++ b/substrate/primitives/consensus/grandpa/Cargo.toml @@ -10,6 +10,9 @@ description = "Primitives for GRANDPA integration, suitable for WASM compilation documentation = "https://docs.rs/sp-consensus-grandpa" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,16 +21,16 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = grandpa = { package = "finality-grandpa", version = "0.16.2", default-features = false, features = ["derive-codec"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive", "alloc"], default-features = false, optional = true } -sp-api = { path = "../../api", default-features = false} -sp-application-crypto = { path = "../../application-crypto", default-features = false} -sp-core = { path = "../../core", default-features = false} -sp-keystore = { path = "../../keystore", default-features = false, optional = true} -sp-runtime = { path = "../../runtime", default-features = false} -sp-std = { path = "../../std", default-features = false} +serde = { version = "1.0.193", features = ["alloc", "derive"], default-features = false, optional = true } +sp-api = { path = "../../api", default-features = false } +sp-application-crypto = { path = "../../application-crypto", default-features = false } +sp-core = { path = "../../core", default-features = false } +sp-keystore = { path = "../../keystore", default-features = false, optional = true } +sp-runtime = { path = "../../runtime", default-features = false } +sp-std = { path = "../../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "grandpa/std", diff --git a/substrate/primitives/consensus/grandpa/src/lib.rs b/substrate/primitives/consensus/grandpa/src/lib.rs index baeaee4738e484d23059f4a27dc99093b60412fe..1cf5504c5e7d1b0f17e248e6be45829e6ef15655 100644 --- a/substrate/primitives/consensus/grandpa/src/lib.rs +++ b/substrate/primitives/consensus/grandpa/src/lib.rs @@ -19,13 +19,10 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] -extern crate alloc; - #[cfg(feature = "serde")] use serde::Serialize; -use codec::{Codec, Decode, Encode, Input}; +use codec::{Codec, Decode, Encode}; use scale_info::TypeInfo; #[cfg(feature = "std")] use sp_keystore::KeystorePtr; @@ -33,7 +30,7 @@ use sp_runtime::{ traits::{Header as HeaderT, NumberFor}, ConsensusEngineId, RuntimeDebug, }; -use sp_std::{borrow::Cow, vec::Vec}; +use sp_std::vec::Vec; /// The log target to be used by client code. pub const CLIENT_LOG_TARGET: &str = "grandpa"; @@ -62,10 +59,6 @@ pub type AuthoritySignature = app::Signature; /// The `ConsensusEngineId` of GRANDPA. pub const GRANDPA_ENGINE_ID: ConsensusEngineId = *b"FRNK"; -/// The storage key for the current set of weighted Grandpa authorities. -/// The value stored is an encoded VersionedAuthorityList. -pub const GRANDPA_AUTHORITIES_KEY: &[u8] = b":grandpa_authorities"; - /// The weight of an authority. pub type AuthorityWeight = u64; @@ -464,60 +457,6 @@ where Some(grandpa::SignedMessage { message, signature, id: public }) } -/// WASM function call to check for pending changes. -pub const PENDING_CHANGE_CALL: &str = "grandpa_pending_change"; -/// WASM function call to get current GRANDPA authorities. -pub const AUTHORITIES_CALL: &str = "grandpa_authorities"; - -/// The current version of the stored AuthorityList type. The encoding version MUST be updated any -/// time the AuthorityList type changes. -const AUTHORITIES_VERSION: u8 = 1; - -/// An AuthorityList that is encoded with a version specifier. The encoding version is updated any -/// time the AuthorityList type changes. This ensures that encodings of different versions of an -/// AuthorityList are differentiable. Attempting to decode an authority list with an unknown -/// version will fail. -#[derive(Default)] -pub struct VersionedAuthorityList<'a>(Cow<'a, AuthorityList>); - -impl<'a> From for VersionedAuthorityList<'a> { - fn from(authorities: AuthorityList) -> Self { - VersionedAuthorityList(Cow::Owned(authorities)) - } -} - -impl<'a> From<&'a AuthorityList> for VersionedAuthorityList<'a> { - fn from(authorities: &'a AuthorityList) -> Self { - VersionedAuthorityList(Cow::Borrowed(authorities)) - } -} - -impl<'a> Into for VersionedAuthorityList<'a> { - fn into(self) -> AuthorityList { - self.0.into_owned() - } -} - -impl<'a> Encode for VersionedAuthorityList<'a> { - fn size_hint(&self) -> usize { - (AUTHORITIES_VERSION, self.0.as_ref()).size_hint() - } - - fn using_encoded R>(&self, f: F) -> R { - (AUTHORITIES_VERSION, self.0.as_ref()).using_encoded(f) - } -} - -impl<'a> Decode for VersionedAuthorityList<'a> { - fn decode(value: &mut I) -> Result { - let (version, authorities): (u8, AuthorityList) = Decode::decode(value)?; - if version != AUTHORITIES_VERSION { - return Err("unknown Grandpa authorities version".into()) - } - Ok(authorities.into()) - } -} - /// An opaque type used to represent the key ownership proof at the runtime API /// boundary. The inner value is an encoded representation of the actual key /// ownership proof which will be parameterized when defining the runtime. At diff --git a/substrate/primitives/consensus/pow/Cargo.toml b/substrate/primitives/consensus/pow/Cargo.toml index cc4e961dbb6e62fc47aa78474655b0135c088a8d..e528d8365ced3721001381f881ad8df1aa2c33c1 100644 --- a/substrate/primitives/consensus/pow/Cargo.toml +++ b/substrate/primitives/consensus/pow/Cargo.toml @@ -9,18 +9,21 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -sp-api = { path = "../../api", default-features = false} -sp-core = { path = "../../core", default-features = false} -sp-runtime = { path = "../../runtime", default-features = false} -sp-std = { path = "../../std", default-features = false} +sp-api = { path = "../../api", default-features = false } +sp-core = { path = "../../core", default-features = false } +sp-runtime = { path = "../../runtime", default-features = false } +sp-std = { path = "../../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "sp-api/std", diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 696e0a64596163ad18775bc0714d2622a8753651..41385e9d1e9f63eea583b21ea56a9493ba60c826 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -11,22 +11,25 @@ documentation = "https://docs.rs/sp-consensus-sassafras" readme = "README.md" publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive"], optional = true } -sp-api = { default-features = false, path = "../../api" } -sp-application-crypto = { default-features = false, path = "../../application-crypto", features = ["bandersnatch-experimental"] } -sp-consensus-slots = { default-features = false, path = "../slots" } -sp-core = { default-features = false, path = "../../core", features = ["bandersnatch-experimental"] } -sp-runtime = { default-features = false, path = "../../runtime" } -sp-std = { default-features = false, path = "../../std" } +serde = { version = "1.0.193", default-features = false, features = ["derive"], optional = true } +sp-api = { path = "../../api", default-features = false } +sp-application-crypto = { path = "../../application-crypto", default-features = false, features = ["bandersnatch-experimental"] } +sp-consensus-slots = { path = "../slots", default-features = false } +sp-core = { path = "../../core", default-features = false, features = ["bandersnatch-experimental"] } +sp-runtime = { path = "../../runtime", default-features = false } +sp-std = { path = "../../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "scale-codec/std", "scale-info/std", diff --git a/substrate/primitives/consensus/sassafras/README.md b/substrate/primitives/consensus/sassafras/README.md index b0f3685494e4e3eb8b68bf15318960f532fe22ce..d6251940a496f52ff28c6471582c1981681bb8ec 100644 --- a/substrate/primitives/consensus/sassafras/README.md +++ b/substrate/primitives/consensus/sassafras/README.md @@ -1,12 +1,6 @@ Primitives for SASSAFRAS. -# ⚠️ WARNING ⚠️ +- Tracking issue: https://github.com/paritytech/polkadot-sdk/issues/41 +- RFC proposal: https://github.com/polkadot-fellows/RFCs/pull/26 -The crate interfaces and structures are highly experimental and may be subject -to significant changes. - -Depends on upstream experimental feature: `bandersnatch-experimental`. - -These structs were mostly extracted from the main SASSAFRAS protocol PR: https://github.com/paritytech/polkadot-sdk/pull/1336. - -Tracking issue: https://github.com/paritytech/polkadot-sdk/issues/41 +Depends on `sp-core` feature: `bandersnatch-experimental`. diff --git a/substrate/primitives/consensus/sassafras/src/digests.rs b/substrate/primitives/consensus/sassafras/src/digests.rs index 95a305099de553cbe263fc1acd5605d1670d638b..5274f1309d8251977fe3bf30f2209bf8538f5e19 100644 --- a/substrate/primitives/consensus/sassafras/src/digests.rs +++ b/substrate/primitives/consensus/sassafras/src/digests.rs @@ -48,11 +48,11 @@ pub struct SlotClaim { /// This is mandatory in the first block of each epoch. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct NextEpochDescriptor { + /// Randomness value. + pub randomness: Randomness, /// Authorities list. pub authorities: Vec, - /// Epoch randomness. - pub randomness: Randomness, - /// Epoch configurable parameters. + /// Epoch configuration. /// /// If not present previous epoch parameters are used. pub config: Option, diff --git a/substrate/primitives/consensus/sassafras/src/lib.rs b/substrate/primitives/consensus/sassafras/src/lib.rs index e421e771d406a197cb30429a43b5c57d7f48b9de..1752f76588635f5a80fddabc86023a439361643c 100644 --- a/substrate/primitives/consensus/sassafras/src/lib.rs +++ b/substrate/primitives/consensus/sassafras/src/lib.rs @@ -80,33 +80,43 @@ pub type EquivocationProof = sp_consensus_slots::EquivocationProof, - /// Randomness for the epoch. + /// Epoch index. + pub index: u64, + /// Starting slot of the epoch. + pub start: Slot, + /// Number of slots in the epoch. + pub length: u32, + /// Randomness value. pub randomness: Randomness, + /// Authorities list. + pub authorities: Vec, /// Epoch configuration. pub config: EpochConfiguration, } diff --git a/substrate/primitives/consensus/sassafras/src/ticket.rs b/substrate/primitives/consensus/sassafras/src/ticket.rs index d81770c96d9bbec9dc8a625e048173d705a9f7cb..dc0a61990d3ea7248a4bbbbf69a6eed72942719f 100644 --- a/substrate/primitives/consensus/sassafras/src/ticket.rs +++ b/substrate/primitives/consensus/sassafras/src/ticket.rs @@ -62,10 +62,10 @@ pub struct TicketClaim { pub erased_signature: EphemeralSignature, } -/// Computes ticket-id maximum allowed value for a given epoch. +/// Computes a boundary for [`TicketId`] maximum allowed value for a given epoch. /// -/// Only ticket identifiers below this threshold should be considered for slot -/// assignment. +/// Only ticket identifiers below this threshold should be considered as candidates +/// for slot assignment. /// /// The value is computed as `TicketId::MAX*(redundancy*slots)/(attempts*validators)` /// @@ -76,16 +76,51 @@ pub struct TicketClaim { /// - `validators`: number of validators in epoch. /// /// If `attempts * validators = 0` then we return 0. +/// +/// For details about the formula and implications refer to +/// [*probabilities an parameters*](https://research.web3.foundation/Polkadot/protocols/block-production/SASSAFRAS#probabilities-and-parameters) +/// paragraph of the w3f introduction to the protocol. +// TODO: replace with [RFC-26](https://github.com/polkadot-fellows/RFCs/pull/26) +// "Tickets Threshold" paragraph once is merged pub fn ticket_id_threshold( redundancy: u32, slots: u32, attempts: u32, validators: u32, ) -> TicketId { - let den = attempts as u64 * validators as u64; let num = redundancy as u64 * slots as u64; + let den = attempts as u64 * validators as u64; TicketId::max_value() .checked_div(den.into()) .unwrap_or_default() .saturating_mul(num.into()) } + +#[cfg(test)] +mod tests { + use super::*; + + // This is a trivial example/check which just better explain explains the rationale + // behind the threshold. + // + // After this reading the formula should become obvious. + #[test] + fn ticket_id_threshold_trivial_check() { + // For an epoch with `s` slots we want to accept a number of tickets equal to ~s·r + let redundancy = 2; + let slots = 1000; + let attempts = 100; + let validators = 500; + + let threshold = ticket_id_threshold(redundancy, slots, attempts, validators); + let threshold = threshold as f64 / TicketId::MAX as f64; + + // We expect that the total number of tickets allowed to be submited + // is slots*redundancy + let avt = ((attempts * validators) as f64 * threshold) as u32; + assert_eq!(avt, slots * redundancy); + + println!("threshold: {}", threshold); + println!("avt = {}", avt); + } +} diff --git a/substrate/primitives/consensus/sassafras/src/vrf.rs b/substrate/primitives/consensus/sassafras/src/vrf.rs index d25a656f9508f7090755af20cfa0418b5954015e..5deacd8e9945bf9dbcc05a0664029e3a2c57c1c0 100644 --- a/substrate/primitives/consensus/sassafras/src/vrf.rs +++ b/substrate/primitives/consensus/sassafras/src/vrf.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Utilities related to VRF input, output and signatures. +//! Utilities related to VRF input, pre-output and signatures. use crate::{Randomness, TicketBody, TicketId}; use scale_codec::Encode; @@ -23,10 +23,16 @@ use sp_consensus_slots::Slot; use sp_std::vec::Vec; pub use sp_core::bandersnatch::{ - ring_vrf::{RingContext, RingProver, RingVerifier, RingVrfSignature}, - vrf::{VrfInput, VrfOutput, VrfSignData, VrfSignature}, + ring_vrf::{RingProver, RingVerifier, RingVerifierData, RingVrfSignature}, + vrf::{VrfInput, VrfPreOutput, VrfSignData, VrfSignature}, }; +/// Ring VRF domain size for Sassafras consensus. +pub const RING_VRF_DOMAIN_SIZE: u32 = 2048; + +/// Bandersnatch VRF [`RingContext`] specialization for Sassafras using [`RING_VRF_DOMAIN_SIZE`]. +pub type RingContext = sp_core::bandersnatch::ring_vrf::RingContext; + fn vrf_input_from_data( domain: &[u8], data: impl IntoIterator>, @@ -84,21 +90,21 @@ pub fn ticket_body_sign_data(ticket_body: &TicketBody, ticket_id_input: VrfInput ) } -/// Make ticket-id from the given VRF input and output. +/// Make ticket-id from the given VRF input and pre-output. /// /// Input should have been obtained via [`ticket_id_input`]. -/// Output should have been obtained from the input directly using the vrf secret key -/// or from the vrf signature outputs. -pub fn make_ticket_id(input: &VrfInput, output: &VrfOutput) -> TicketId { - let bytes = output.make_bytes::<16>(b"ticket-id", input); +/// Pre-output should have been obtained from the input directly using the vrf +/// secret key or from the vrf signature pre-outputs. +pub fn make_ticket_id(input: &VrfInput, pre_output: &VrfPreOutput) -> TicketId { + let bytes = pre_output.make_bytes::<16>(b"ticket-id", input); u128::from_le_bytes(bytes) } -/// Make revealed key seed from a given VRF input and ouput. +/// Make revealed key seed from a given VRF input and pre-ouput. /// /// Input should have been obtained via [`revealed_key_input`]. -/// Output should have been obtained from the input directly using the vrf secret key -/// or from the vrf signature outputs. -pub fn make_revealed_key_seed(input: &VrfInput, output: &VrfOutput) -> [u8; 32] { - output.make_bytes::<32>(b"revealed-seed", input) +/// Pre-output should have been obtained from the input directly using the vrf +/// secret key or from the vrf signature pre-outputs. +pub fn make_revealed_key_seed(input: &VrfInput, pre_output: &VrfPreOutput) -> [u8; 32] { + pre_output.make_bytes::<32>(b"revealed-seed", input) } diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index aa899d86e72ca34f39f9bcd924d117deaeff57bc..91bbd1663a9c3d867624acdfd0a17412a8d2a49c 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -9,18 +9,21 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", default-features = false, features = ["derive", "alloc"], optional = true } -sp-std = { path = "../../std", default-features = false} -sp-timestamp = { path = "../../timestamp", default-features = false} +serde = { version = "1.0", default-features = false, features = ["alloc", "derive"], optional = true } +sp-std = { path = "../../std", default-features = false } +sp-timestamp = { path = "../../timestamp", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", @@ -30,4 +33,4 @@ std = [ ] # Serde support without relying on std features. -serde = [ "dep:serde", "scale-info/serde" ] +serde = ["dep:serde", "scale-info/serde"] diff --git a/substrate/primitives/consensus/slots/src/lib.rs b/substrate/primitives/consensus/slots/src/lib.rs index a299ce395ea4fa79dd95bd2b9b9446e537983dd1..b9a032c1bcfcf94228eed82431b551de0414c14d 100644 --- a/substrate/primitives/consensus/slots/src/lib.rs +++ b/substrate/primitives/consensus/slots/src/lib.rs @@ -121,7 +121,20 @@ impl From for u64 { } /// A slot duration defined in milliseconds. -#[derive(Clone, Copy, Debug, Encode, Decode, Hash, PartialOrd, Ord, PartialEq, Eq, TypeInfo)] +#[derive( + Clone, + Copy, + Debug, + Encode, + Decode, + MaxEncodedLen, + Hash, + PartialOrd, + Ord, + PartialEq, + Eq, + TypeInfo, +)] pub struct SlotDuration(u64); impl SlotDuration { diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 79df81e62c6670a1b33ca5f54120cdbb7a01109e..b4d3c8439283c776a598dc0303e8616a862dbc94 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -9,33 +9,34 @@ repository.workspace = true description = "Shareable Substrate types." documentation = "https://docs.rs/sp-core" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive","max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -serde = { version = "1.0.188", optional = true, default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.193", optional = true, default-features = false, features = ["alloc", "derive"] } bounded-collections = { version = "0.1.8", default-features = false } primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } hash-db = { version = "0.16.0", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } bs58 = { version = "0.5.0", default-features = false, optional = true } -rand = { version = "0.8.5", features = ["small_rng"], optional = true } +rand = { version = "0.8.5", features = ["small_rng"], optional = true } substrate-bip39 = { version = "0.4.4", optional = true } bip39 = { version = "2.0.0", default-features = false } -regex = { version = "1.6.0", optional = true } zeroize = { version = "1.4.3", default-features = false } secrecy = { version = "0.8.0", default-features = false } -lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.12.1", optional = true } ss58-registry = { version = "1.34.0", default-features = false } -sp-std = { path = "../std", default-features = false} -sp-debug-derive = { path = "../debug-derive", default-features = false} -sp-storage = { path = "../storage", default-features = false} -sp-externalities = { path = "../externalities", optional = true} +sp-std = { path = "../std", default-features = false } +sp-debug-derive = { path = "../debug-derive", default-features = false } +sp-storage = { path = "../storage", default-features = false } +sp-externalities = { path = "../externalities", optional = true } futures = { version = "0.3.21", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { version = "1.0.48", optional = true } @@ -49,20 +50,22 @@ array-bytes = { version = "6.1", optional = true } ed25519-zebra = { version = "3.1.0", default-features = false, optional = true } blake2 = { version = "0.10.4", default-features = false, optional = true } libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } -schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } -merlin = { version = "2.0", default-features = false } -secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"], optional = true } +schnorrkel = { version = "0.11.4", features = ["preaudit_deprecated"], default-features = false } +merlin = { version = "3.0", default-features = false } +secp256k1 = { version = "0.28.0", default-features = false, features = ["alloc", "recovery"], optional = true } sp-core-hashing = { path = "hashing", default-features = false, optional = true } -sp-runtime-interface = { path = "../runtime-interface", default-features = false} +sp-runtime-interface = { path = "../runtime-interface", default-features = false } # bls crypto -w3f-bls = { version = "0.1.3", default-features = false, optional = true} +w3f-bls = { version = "0.1.3", default-features = false, optional = true } # bandersnatch crypto -bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "cbc342e", default-features = false, optional = true } +bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "e9782f9", default-features = false, features = ["substrate-curves"], optional = true } [dev-dependencies] criterion = "0.4.0" serde_json = "1.0.108" +lazy_static = "1.4.0" +regex = "1.6.0" sp-core-hashing-proc-macro = { path = "hashing/proc-macro" } [[bench]] @@ -73,7 +76,7 @@ harness = false bench = false [features] -default = [ "std" ] +default = ["std"] std = [ "array-bytes", "bandersnatch_vrfs?/std", @@ -92,7 +95,6 @@ std = [ "hash256-std-hasher/std", "impl-serde/std", "itertools", - "lazy_static", "libsecp256k1/std", "log/std", "merlin/std", @@ -102,7 +104,6 @@ std = [ "primitive-types/serde", "primitive-types/std", "rand", - "regex", "scale-info/std", "schnorrkel/std", "secp256k1/global-context", @@ -155,9 +156,9 @@ full_crypto = [ # This feature adds BLS crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bls-experimental = [ "w3f-bls" ] +bls-experimental = ["w3f-bls"] # This feature adds Bandersnatch crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bandersnatch-experimental = [ "bandersnatch_vrfs" ] +bandersnatch-experimental = ["bandersnatch_vrfs"] diff --git a/substrate/primitives/core/fuzz/Cargo.toml b/substrate/primitives/core/fuzz/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..c6b5a065b6dca7a389e6409fe623018df3cfc083 --- /dev/null +++ b/substrate/primitives/core/fuzz/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "sp-core-fuzz" +version = "0.0.0" +publish = false + +[lints] +workspace = true + +[package.metadata] +cargo-fuzz = true + +[dependencies] +lazy_static = "1.4.0" +libfuzzer-sys = "0.4" +regex = "1.10.2" + +sp-core = { path = ".." } + +[[bin]] +name = "fuzz_address_uri" +path = "fuzz_targets/fuzz_address_uri.rs" +test = false +doc = false diff --git a/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs b/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs new file mode 100644 index 0000000000000000000000000000000000000000..e2d9e2fc8b0822ae9d984683cdaaf71a97bd7c2e --- /dev/null +++ b/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_main] + +extern crate libfuzzer_sys; +extern crate regex; +extern crate sp_core; + +use libfuzzer_sys::fuzz_target; +use regex::Regex; +use sp_core::crypto::AddressUri; + +lazy_static::lazy_static! { + static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed"); +} + +fuzz_target!(|input: &str| { + let regex_result = SECRET_PHRASE_REGEX.captures(input); + let manual_result = AddressUri::parse(input); + assert_eq!(regex_result.is_some(), manual_result.is_ok()); + if manual_result.is_err() { + let _ = format!("{}", manual_result.as_ref().err().unwrap()); + } + if let (Some(regex_result), Ok(manual_result)) = (regex_result, manual_result) { + assert_eq!(regex_result.name("phrase").map(|p| p.as_str()), manual_result.phrase); + + let manual_paths = manual_result + .paths + .iter() + .map(|s| "/".to_string() + s) + .collect::>() + .join(""); + + assert_eq!(regex_result.name("path").unwrap().as_str().to_string(), manual_paths); + assert_eq!(regex_result.name("password").map(|pass| pass.as_str()), manual_result.pass); + } +}); diff --git a/substrate/primitives/core/hashing/Cargo.toml b/substrate/primitives/core/hashing/Cargo.toml index bd22bd79e7d5f83efdc20640bc01d5dd7933add2..011d312ba90fce623a9d351e8e349b053b676595 100644 --- a/substrate/primitives/core/hashing/Cargo.toml +++ b/substrate/primitives/core/hashing/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Primitive core crate hashing implementation." documentation = "https://docs.rs/sp-core-hashing" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -21,7 +24,7 @@ sha3 = { version = "0.10.0", default-features = false } twox-hash = { version = "1.6.3", default-features = false, features = ["digest_0_10"] } [features] -default = [ "std" ] +default = ["std"] std = [ "blake2b_simd/std", "byteorder/std", diff --git a/substrate/primitives/core/hashing/proc-macro/Cargo.toml b/substrate/primitives/core/hashing/proc-macro/Cargo.toml index 187b5559b931c27df9613b33232426fe77fe8bf0..d379fc38ffb9b88c5af170fe5cd1ea8feb5c8a34 100644 --- a/substrate/primitives/core/hashing/proc-macro/Cargo.toml +++ b/substrate/primitives/core/hashing/proc-macro/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "This crate provides procedural macros for calculating static hash." documentation = "https://docs.rs/sp-core-hashing-proc-macro" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,5 +20,5 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = { version = "2.0.38", features = ["full", "parsing"] } -sp-core-hashing = { path = "..", default-features = false} +syn = { version = "2.0.41", features = ["full", "parsing"] } +sp-core-hashing = { path = "..", default-features = false } diff --git a/substrate/primitives/core/src/address_uri.rs b/substrate/primitives/core/src/address_uri.rs new file mode 100644 index 0000000000000000000000000000000000000000..862747c9a4b69947905f384ab0be52e4b776c066 --- /dev/null +++ b/substrate/primitives/core/src/address_uri.rs @@ -0,0 +1,432 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Little util for parsing an address URI. Replaces regular expressions. + +#[cfg(all(not(feature = "std"), any(feature = "serde", feature = "full_crypto")))] +use sp_std::{ + alloc::string::{String, ToString}, + vec::Vec, +}; + +/// A container for results of parsing the address uri string. +/// +/// Intended to be equivalent of: +/// `Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$")` +/// which also handles soft and hard derivation paths: +/// `Regex::new(r"/(/?[^/]+)")` +/// +/// Example: +/// ``` +/// use sp_core::crypto::AddressUri; +/// let manual_result = AddressUri::parse("hello world/s//h///pass"); +/// assert_eq!( +/// manual_result.unwrap(), +/// AddressUri { phrase: Some("hello world"), paths: vec!["s", "/h"], pass: Some("pass") } +/// ); +/// ``` +#[derive(Debug, PartialEq)] +pub struct AddressUri<'a> { + /// Phrase, hexadecimal string, or ss58-compatible string. + pub phrase: Option<&'a str>, + /// Key derivation paths, ordered as in input string, + pub paths: Vec<&'a str>, + /// Password. + pub pass: Option<&'a str>, +} + +/// Errors that are possible during parsing the address URI. +#[allow(missing_docs)] +#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Error { + #[cfg_attr(feature = "std", error("Invalid character in phrase:\n{0}"))] + InvalidCharacterInPhrase(InvalidCharacterInfo), + #[cfg_attr(feature = "std", error("Invalid character in password:\n{0}"))] + InvalidCharacterInPass(InvalidCharacterInfo), + #[cfg_attr(feature = "std", error("Missing character in hard path:\n{0}"))] + MissingCharacterInHardPath(InvalidCharacterInfo), + #[cfg_attr(feature = "std", error("Missing character in soft path:\n{0}"))] + MissingCharacterInSoftPath(InvalidCharacterInfo), +} + +impl Error { + /// Creates an instance of `Error::InvalidCharacterInPhrase` using given parameters. + pub fn in_phrase(input: &str, pos: usize) -> Self { + Self::InvalidCharacterInPhrase(InvalidCharacterInfo::new(input, pos)) + } + /// Creates an instance of `Error::InvalidCharacterInPass` using given parameters. + pub fn in_pass(input: &str, pos: usize) -> Self { + Self::InvalidCharacterInPass(InvalidCharacterInfo::new(input, pos)) + } + /// Creates an instance of `Error::MissingCharacterInHardPath` using given parameters. + pub fn in_hard_path(input: &str, pos: usize) -> Self { + Self::MissingCharacterInHardPath(InvalidCharacterInfo::new(input, pos)) + } + /// Creates an instance of `Error::MissingCharacterInSoftPath` using given parameters. + pub fn in_soft_path(input: &str, pos: usize) -> Self { + Self::MissingCharacterInSoftPath(InvalidCharacterInfo::new(input, pos)) + } +} + +/// Complementary error information. +/// +/// Strucutre contains complementary information about parsing address URI string. +/// String contains a copy of an original URI string, 0-based integer indicates position of invalid +/// character. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct InvalidCharacterInfo(String, usize); + +impl InvalidCharacterInfo { + fn new(info: &str, pos: usize) -> Self { + Self(info.to_string(), pos) + } +} + +impl sp_std::fmt::Display for InvalidCharacterInfo { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + let (s, pos) = escape_string(&self.0, self.1); + write!(f, "{s}\n{i}^", i = sp_std::iter::repeat(" ").take(pos).collect::()) + } +} + +/// Escapes the control characters in given string, and recomputes the position if some characters +/// were actually escaped. +fn escape_string(input: &str, pos: usize) -> (String, usize) { + let mut out = String::with_capacity(2 * input.len()); + let mut out_pos = 0; + input + .chars() + .enumerate() + .map(|(i, c)| { + let esc = |c| (i, Some('\\'), c, 2); + match c { + '\t' => esc('t'), + '\n' => esc('n'), + '\r' => esc('r'), + '\x07' => esc('a'), + '\x08' => esc('b'), + '\x0b' => esc('v'), + '\x0c' => esc('f'), + _ => (i, None, c, 1), + } + }) + .for_each(|(i, maybe_escape, c, increment)| { + maybe_escape.map(|e| out.push(e)); + out.push(c); + if i < pos { + out_pos += increment; + } + }); + (out, out_pos) +} + +fn extract_prefix<'a>(input: &mut &'a str, is_allowed: &dyn Fn(char) -> bool) -> Option<&'a str> { + let output = input.trim_start_matches(is_allowed); + let prefix_len = input.len() - output.len(); + let prefix = if prefix_len > 0 { Some(&input[..prefix_len]) } else { None }; + *input = output; + prefix +} + +fn strip_prefix(input: &mut &str, prefix: &str) -> bool { + if let Some(stripped_input) = input.strip_prefix(prefix) { + *input = stripped_input; + true + } else { + false + } +} + +impl<'a> AddressUri<'a> { + /// Parses the given string. + pub fn parse(mut input: &'a str) -> Result { + let initial_input = input; + let initial_input_len = input.len(); + let phrase = extract_prefix(&mut input, &|ch: char| { + ch.is_ascii_digit() || ch.is_ascii_alphabetic() || ch == ' ' + }); + + let mut pass = None; + let mut paths = Vec::new(); + while !input.is_empty() { + let unstripped_input = input; + if strip_prefix(&mut input, "///") { + pass = Some(extract_prefix(&mut input, &|ch: char| ch != '\n').unwrap_or("")); + } else if strip_prefix(&mut input, "//") { + let path = extract_prefix(&mut input, &|ch: char| ch != '/') + .ok_or(Error::in_hard_path(initial_input, initial_input_len - input.len()))?; + assert!(path.len() > 0); + // hard path shall contain leading '/', so take it from unstripped input. + paths.push(&unstripped_input[1..path.len() + 2]); + } else if strip_prefix(&mut input, "/") { + paths.push( + extract_prefix(&mut input, &|ch: char| ch != '/').ok_or( + Error::in_soft_path(initial_input, initial_input_len - input.len()), + )?, + ); + } else { + return Err(if pass.is_some() { + Error::in_pass(initial_input, initial_input_len - input.len()) + } else { + Error::in_phrase(initial_input, initial_input_len - input.len()) + }); + } + } + + Ok(Self { phrase, paths, pass }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use regex::Regex; + + lazy_static::lazy_static! { + static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed"); + } + + fn check_with_regex(input: &str) { + let regex_result = SECRET_PHRASE_REGEX.captures(input); + let manual_result = AddressUri::parse(input); + assert_eq!(regex_result.is_some(), manual_result.is_ok()); + if let (Some(regex_result), Ok(manual_result)) = (regex_result, manual_result) { + assert_eq!( + regex_result.name("phrase").map(|phrase| phrase.as_str()), + manual_result.phrase + ); + + let manual_paths = manual_result + .paths + .iter() + .map(|s| "/".to_string() + s) + .collect::>() + .join(""); + + assert_eq!(regex_result.name("path").unwrap().as_str().to_string(), manual_paths); + assert_eq!( + regex_result.name("password").map(|phrase| phrase.as_str()), + manual_result.pass + ); + } + } + + fn check(input: &str, result: Result) { + let manual_result = AddressUri::parse(input); + assert_eq!(manual_result, result); + check_with_regex(input); + } + + #[test] + fn test00() { + check("///", Ok(AddressUri { phrase: None, pass: Some(""), paths: vec![] })); + } + + #[test] + fn test01() { + check("////////", Ok(AddressUri { phrase: None, pass: Some("/////"), paths: vec![] })) + } + + #[test] + fn test02() { + check( + "sdasd///asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: Some("asda"), paths: vec![] }), + ); + } + + #[test] + fn test03() { + check( + "sdasd//asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/asda"] }), + ); + } + + #[test] + fn test04() { + check("sdasd//a", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/a"] })); + } + + #[test] + fn test05() { + let input = "sdasd//"; + check(input, Err(Error::in_hard_path(input, 7))); + } + + #[test] + fn test06() { + check( + "sdasd/xx//asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["xx", "/asda"] }), + ); + } + + #[test] + fn test07() { + check( + "sdasd/xx//a/b//c///pass", + Ok(AddressUri { + phrase: Some("sdasd"), + pass: Some("pass"), + paths: vec!["xx", "/a", "b", "/c"], + }), + ); + } + + #[test] + fn test08() { + check( + "sdasd/xx//a", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["xx", "/a"] }), + ); + } + + #[test] + fn test09() { + let input = "sdasd/xx//"; + check(input, Err(Error::in_hard_path(input, 10))); + } + + #[test] + fn test10() { + check( + "sdasd/asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asda"] }), + ); + } + + #[test] + fn test11() { + check( + "sdasd/asda//x", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asda", "/x"] }), + ); + } + + #[test] + fn test12() { + check("sdasd/a", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["a"] })); + } + + #[test] + fn test13() { + let input = "sdasd/"; + check(input, Err(Error::in_soft_path(input, 6))); + } + + #[test] + fn test14() { + check("sdasd", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec![] })); + } + + #[test] + fn test15() { + let input = "sdasd."; + check(input, Err(Error::in_phrase(input, 5))); + } + + #[test] + fn test16() { + let input = "sd.asd/asd.a"; + check(input, Err(Error::in_phrase(input, 2))); + } + + #[test] + fn test17() { + let input = "sd.asd//asd.a"; + check(input, Err(Error::in_phrase(input, 2))); + } + + #[test] + fn test18() { + check( + "sdasd/asd.a", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asd.a"] }), + ); + } + + #[test] + fn test19() { + check( + "sdasd//asd.a", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/asd.a"] }), + ); + } + + #[test] + fn test20() { + let input = "///\n"; + check(input, Err(Error::in_pass(input, 3))); + } + + #[test] + fn test21() { + let input = "///a\n"; + check(input, Err(Error::in_pass(input, 4))); + } + + #[test] + fn test22() { + let input = "sd asd///asd.a\n"; + check(input, Err(Error::in_pass(input, 14))); + } + + #[test] + fn test_invalid_char_info_1() { + let expected = "01234\n^"; + let f = format!("{}", InvalidCharacterInfo::new("01234", 0)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_2() { + let expected = "01\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("01", 1)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_3() { + let expected = "01234\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("01234", 2)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_4() { + let expected = "012\\n456\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("012\n456", 3)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_5() { + let expected = "012\\n456\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("012\n456", 5)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_6() { + let expected = "012\\f456\\t89\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("012\x0c456\t89", 9)); + assert_eq!(expected, f); + } +} diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index 78b7f12f9ffd4c5ce9faacb6990f80e091994cf7..463d49fd88901cc3eb7cafaab25ff94c18c64d71 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -20,13 +20,17 @@ //! //! The primitive can operate both as a regular VRF or as an anonymized Ring VRF. -#[cfg(feature = "std")] +#[cfg(feature = "serde")] use crate::crypto::Ss58Codec; use crate::crypto::{ ByteArray, CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom, VrfPublic, }; #[cfg(feature = "full_crypto")] use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError, VrfSecret}; +#[cfg(feature = "serde")] +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(all(not(feature = "std"), feature = "serde"))] +use sp_std::alloc::{format, string::String}; use bandersnatch_vrfs::CanonicalSerialize; #[cfg(feature = "full_crypto")] @@ -35,7 +39,7 @@ use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime_interface::pass_by::PassByInner; -use sp_std::{boxed::Box, vec::Vec}; +use sp_std::{vec, vec::Vec}; /// Identifier used to match public keys against bandersnatch-vrf keys. pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"band"); @@ -44,23 +48,12 @@ pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"band"); #[cfg(feature = "full_crypto")] pub const SIGNING_CTX: &[u8] = b"BandersnatchSigningContext"; -// Max ring domain size. -const RING_DOMAIN_SIZE: usize = 1024; - #[cfg(feature = "full_crypto")] -const SEED_SERIALIZED_LEN: usize = 32; - -// Short-Weierstrass form serialized sizes. -const PUBLIC_SERIALIZED_LEN: usize = 33; -const SIGNATURE_SERIALIZED_LEN: usize = 65; -const RING_SIGNATURE_SERIALIZED_LEN: usize = 755; -const PREOUT_SERIALIZED_LEN: usize = 33; +const SEED_SERIALIZED_SIZE: usize = 32; -// Max size of serialized ring-vrf context params. -// -// This size is dependent on the ring domain size and the actual value -// is equal to the SCALE encoded size of the `KZG` backend. -const RING_CONTEXT_SERIALIZED_LEN: usize = 147716; +const PUBLIC_SERIALIZED_SIZE: usize = 33; +const SIGNATURE_SERIALIZED_SIZE: usize = 65; +const PREOUT_SERIALIZED_SIZE: usize = 33; /// Bandersnatch public key. #[cfg_attr(feature = "full_crypto", derive(Hash))] @@ -77,16 +70,16 @@ const RING_CONTEXT_SERIALIZED_LEN: usize = 147716; MaxEncodedLen, TypeInfo, )] -pub struct Public(pub [u8; PUBLIC_SERIALIZED_LEN]); +pub struct Public(pub [u8; PUBLIC_SERIALIZED_SIZE]); -impl UncheckedFrom<[u8; PUBLIC_SERIALIZED_LEN]> for Public { - fn unchecked_from(raw: [u8; PUBLIC_SERIALIZED_LEN]) -> Self { +impl UncheckedFrom<[u8; PUBLIC_SERIALIZED_SIZE]> for Public { + fn unchecked_from(raw: [u8; PUBLIC_SERIALIZED_SIZE]) -> Self { Public(raw) } } -impl AsRef<[u8; PUBLIC_SERIALIZED_LEN]> for Public { - fn as_ref(&self) -> &[u8; PUBLIC_SERIALIZED_LEN] { +impl AsRef<[u8; PUBLIC_SERIALIZED_SIZE]> for Public { + fn as_ref(&self) -> &[u8; PUBLIC_SERIALIZED_SIZE] { &self.0 } } @@ -107,17 +100,17 @@ impl TryFrom<&[u8]> for Public { type Error = (); fn try_from(data: &[u8]) -> Result { - if data.len() != PUBLIC_SERIALIZED_LEN { + if data.len() != PUBLIC_SERIALIZED_SIZE { return Err(()) } - let mut r = [0u8; PUBLIC_SERIALIZED_LEN]; + let mut r = [0u8; PUBLIC_SERIALIZED_SIZE]; r.copy_from_slice(data); Ok(Self::unchecked_from(r)) } } impl ByteArray for Public { - const LEN: usize = PUBLIC_SERIALIZED_LEN; + const LEN: usize = PUBLIC_SERIALIZED_SIZE; } impl TraitPublic for Public {} @@ -142,16 +135,31 @@ impl sp_std::fmt::Debug for Public { } } +#[cfg(feature = "serde")] +impl Serialize for Public { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_str(&self.to_ss58check()) + } +} + +#[cfg(feature = "serde")] +impl<'de> Deserialize<'de> for Public { + fn deserialize>(deserializer: D) -> Result { + Public::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } +} + /// Bandersnatch signature. /// /// The signature is created via the [`VrfSecret::vrf_sign`] using [`SIGNING_CTX`] as transcript /// `label`. #[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, PassByInner, MaxEncodedLen, TypeInfo)] -pub struct Signature([u8; SIGNATURE_SERIALIZED_LEN]); +pub struct Signature([u8; SIGNATURE_SERIALIZED_SIZE]); -impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_LEN]> for Signature { - fn unchecked_from(raw: [u8; SIGNATURE_SERIALIZED_LEN]) -> Self { +impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature { + fn unchecked_from(raw: [u8; SIGNATURE_SERIALIZED_SIZE]) -> Self { Signature(raw) } } @@ -172,17 +180,17 @@ impl TryFrom<&[u8]> for Signature { type Error = (); fn try_from(data: &[u8]) -> Result { - if data.len() != SIGNATURE_SERIALIZED_LEN { + if data.len() != SIGNATURE_SERIALIZED_SIZE { return Err(()) } - let mut r = [0u8; SIGNATURE_SERIALIZED_LEN]; + let mut r = [0u8; SIGNATURE_SERIALIZED_SIZE]; r.copy_from_slice(data); Ok(Self::unchecked_from(r)) } } impl ByteArray for Signature { - const LEN: usize = SIGNATURE_SERIALIZED_LEN; + const LEN: usize = SIGNATURE_SERIALIZED_SIZE; } impl CryptoType for Signature { @@ -204,7 +212,7 @@ impl sp_std::fmt::Debug for Signature { /// The raw secret seed, which can be used to reconstruct the secret [`Pair`]. #[cfg(feature = "full_crypto")] -type Seed = [u8; SEED_SERIALIZED_LEN]; +type Seed = [u8; SEED_SERIALIZED_SIZE]; /// Bandersnatch secret key. #[cfg(feature = "full_crypto")] @@ -232,10 +240,10 @@ impl TraitPair for Pair { /// /// The slice must be 32 bytes long or it will return an error. fn from_seed_slice(seed_slice: &[u8]) -> Result { - if seed_slice.len() != SEED_SERIALIZED_LEN { + if seed_slice.len() != SEED_SERIALIZED_SIZE { return Err(SecretStringError::InvalidSeedLength) } - let mut seed = [0; SEED_SERIALIZED_LEN]; + let mut seed = [0; SEED_SERIALIZED_SIZE]; seed.copy_from_slice(seed_slice); let secret = SecretKey::from_seed(&seed); Ok(Pair { secret, seed }) @@ -266,7 +274,7 @@ impl TraitPair for Pair { fn public(&self) -> Public { let public = self.secret.to_public(); - let mut raw = [0; PUBLIC_SERIALIZED_LEN]; + let mut raw = [0; PUBLIC_SERIALIZED_SIZE]; public .serialize_compressed(raw.as_mut_slice()) .expect("serialization length is constant and checked by test; qed"); @@ -287,7 +295,7 @@ impl TraitPair for Pair { fn verify>(signature: &Signature, data: M, public: &Public) -> bool { let data = vrf::VrfSignData::new_unchecked(SIGNING_CTX, &[data.as_ref()], None); let signature = - vrf::VrfSignature { signature: *signature, outputs: vrf::VrfIosVec::default() }; + vrf::VrfSignature { signature: *signature, pre_outputs: vrf::VrfIosVec::default() }; public.vrf_verify(&data, &signature) } @@ -311,18 +319,18 @@ pub mod vrf { ThinVrfSignature, Transcript, }; - /// Max number of inputs/outputs which can be handled by the VRF signing procedures. + /// Max number of inputs/pre-outputs which can be handled by the VRF signing procedures. /// /// The number is quite arbitrary and chosen to fulfill the use cases found so far. /// If required it can be extended in the future. pub const MAX_VRF_IOS: u32 = 3; - /// Bounded vector used for VRF inputs and outputs. + /// Bounded vector used for VRF inputs and pre-outputs. /// /// Can contain at most [`MAX_VRF_IOS`] elements. pub type VrfIosVec = BoundedVec>; - /// VRF input to construct a [`VrfOutput`] instance and embeddable in [`VrfSignData`]. + /// VRF input to construct a [`VrfPreOutput`] instance and embeddable in [`VrfSignData`]. #[derive(Clone, Debug)] pub struct VrfInput(pub(super) bandersnatch_vrfs::VrfInput); @@ -334,17 +342,17 @@ pub mod vrf { } } - /// VRF (pre)output derived from [`VrfInput`] using a [`VrfSecret`]. + /// VRF pre-output derived from [`VrfInput`] using a [`VrfSecret`]. /// /// This object is used to produce an arbitrary number of verifiable pseudo random /// bytes and is often called pre-output to emphasize that this is not the actual /// output of the VRF but an object capable of generating the output. #[derive(Clone, Debug, PartialEq, Eq)] - pub struct VrfOutput(pub(super) bandersnatch_vrfs::VrfPreOut); + pub struct VrfPreOutput(pub(super) bandersnatch_vrfs::VrfPreOut); - impl Encode for VrfOutput { + impl Encode for VrfPreOutput { fn encode(&self) -> Vec { - let mut bytes = [0; PREOUT_SERIALIZED_LEN]; + let mut bytes = [0; PREOUT_SERIALIZED_SIZE]; self.0 .serialize_compressed(bytes.as_mut_slice()) .expect("serialization length is constant and checked by test; qed"); @@ -352,23 +360,26 @@ pub mod vrf { } } - impl Decode for VrfOutput { + impl Decode for VrfPreOutput { fn decode(i: &mut R) -> Result { - let buf = <[u8; PREOUT_SERIALIZED_LEN]>::decode(i)?; - let preout = bandersnatch_vrfs::VrfPreOut::deserialize_compressed(buf.as_slice()) - .map_err(|_| "vrf-preout decode error: bad preout")?; - Ok(VrfOutput(preout)) + let buf = <[u8; PREOUT_SERIALIZED_SIZE]>::decode(i)?; + let preout = + bandersnatch_vrfs::VrfPreOut::deserialize_compressed_unchecked(buf.as_slice()) + .map_err(|_| "vrf-preout decode error: bad preout")?; + Ok(VrfPreOutput(preout)) } } - impl MaxEncodedLen for VrfOutput { + impl EncodeLike for VrfPreOutput {} + + impl MaxEncodedLen for VrfPreOutput { fn max_encoded_len() -> usize { - <[u8; PREOUT_SERIALIZED_LEN]>::max_encoded_len() + <[u8; PREOUT_SERIALIZED_SIZE]>::max_encoded_len() } } - impl TypeInfo for VrfOutput { - type Identity = [u8; PREOUT_SERIALIZED_LEN]; + impl TypeInfo for VrfPreOutput { + type Identity = [u8; PREOUT_SERIALIZED_SIZE]; fn type_info() -> scale_info::Type { Self::Identity::type_info() @@ -384,21 +395,21 @@ pub mod vrf { /// A good explaination of the topic can be found in Merlin [docs](https://merlin.cool/) /// /// The `inputs` is a sequence of [`VrfInput`]s which, during the signing procedure, are - /// first transformed to [`VrfOutput`]s. Both inputs and outputs are then appended to + /// first transformed to [`VrfPreOutput`]s. Both inputs and pre-outputs are then appended to /// the transcript before signing the Fiat-Shamir transform result (the challenge). /// /// In practice, as a user, all these technical details can be easily ignored. /// What is important to remember is: /// - *Transcript* is an object defining the protocol and used to produce the signature. This - /// object doesn't influence the `VrfOutput`s values. - /// - *Vrf inputs* is some additional data which is used to produce *vrf outputs*. This data + /// object doesn't influence the `VrfPreOutput`s values. + /// - *Vrf inputs* is some additional data which is used to produce *vrf pre-outputs*. This data /// will contribute to the signature as well. #[derive(Clone)] pub struct VrfSignData { - /// VRF inputs to be signed. - pub inputs: VrfIosVec, /// Associated protocol transcript. pub transcript: Transcript, + /// VRF inputs to be signed. + pub inputs: VrfIosVec, } impl VrfSignData { @@ -462,22 +473,22 @@ pub mod vrf { /// VRF signature. /// - /// Includes both the transcript `signature` and the `outputs` generated from the + /// Includes both the transcript `signature` and the `pre-outputs` generated from the /// [`VrfSignData::inputs`]. /// /// Refer to [`VrfSignData`] for more details. #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct VrfSignature { - /// VRF (pre)outputs. - pub outputs: VrfIosVec, /// Transcript signature. pub signature: Signature, + /// VRF pre-outputs. + pub pre_outputs: VrfIosVec, } #[cfg(feature = "full_crypto")] impl VrfCrypto for Pair { type VrfInput = VrfInput; - type VrfOutput = VrfOutput; + type VrfPreOutput = VrfPreOutput; type VrfSignData = VrfSignData; type VrfSignature = VrfSignature; } @@ -496,15 +507,15 @@ pub mod vrf { } } - fn vrf_output(&self, input: &Self::VrfInput) -> Self::VrfOutput { - let output = self.secret.vrf_preout(&input.0); - VrfOutput(output) + fn vrf_pre_output(&self, input: &Self::VrfInput) -> Self::VrfPreOutput { + let pre_output = self.secret.vrf_preout(&input.0); + VrfPreOutput(pre_output) } } impl VrfCrypto for Public { type VrfInput = VrfInput; - type VrfOutput = VrfOutput; + type VrfPreOutput = VrfPreOutput; type VrfSignData = VrfSignData; type VrfSignature = VrfSignature; } @@ -512,12 +523,12 @@ pub mod vrf { impl VrfPublic for Public { fn vrf_verify(&self, data: &Self::VrfSignData, signature: &Self::VrfSignature) -> bool { const _: () = assert!(MAX_VRF_IOS == 3, "`MAX_VRF_IOS` expected to be 3"); - let outputs_len = signature.outputs.len(); - if outputs_len != data.inputs.len() { + let pre_outputs_len = signature.pre_outputs.len(); + if pre_outputs_len != data.inputs.len() { return false } // Workaround to overcome backend signature generic over the number of IOs. - match outputs_len { + match pre_outputs_len { 0 => self.vrf_verify_gen::<0>(data, signature), 1 => self.vrf_verify_gen::<1>(data, signature), 2 => self.vrf_verify_gen::<2>(data, signature), @@ -535,11 +546,12 @@ pub mod vrf { let thin_signature: ThinVrfSignature = self.secret.sign_thin_vrf(data.transcript.clone(), &ios); - let outputs: Vec<_> = thin_signature.preouts.into_iter().map(VrfOutput).collect(); - let outputs = VrfIosVec::truncate_from(outputs); + let pre_outputs: Vec<_> = + thin_signature.preouts.into_iter().map(VrfPreOutput).collect(); + let pre_outputs = VrfIosVec::truncate_from(pre_outputs); let mut signature = - VrfSignature { signature: Signature([0; SIGNATURE_SERIALIZED_LEN]), outputs }; + VrfSignature { signature: Signature([0; SIGNATURE_SERIALIZED_SIZE]), pre_outputs }; thin_signature .proof @@ -567,20 +579,20 @@ pub mod vrf { data: &VrfSignData, signature: &VrfSignature, ) -> bool { - let Ok(public) = PublicKey::deserialize_compressed(self.as_slice()) else { + let Ok(public) = PublicKey::deserialize_compressed_unchecked(self.as_slice()) else { return false }; let preouts: [bandersnatch_vrfs::VrfPreOut; N] = - core::array::from_fn(|i| signature.outputs[i].0); + core::array::from_fn(|i| signature.pre_outputs[i].0); // Deserialize only the proof, the rest has already been deserialized // This is another hack used because backend signature type is generic over // the number of ios. - let Ok(proof) = - ThinVrfSignature::<0>::deserialize_compressed(signature.signature.as_ref()) - .map(|s| s.proof) - else { + let Ok(proof) = ThinVrfSignature::<0>::deserialize_compressed_unchecked( + signature.signature.as_ref(), + ) + .map(|s| s.proof) else { return false }; let signature = ThinVrfSignature { proof, preouts }; @@ -591,7 +603,7 @@ pub mod vrf { } } - impl VrfOutput { + impl VrfPreOutput { /// Generate an arbitrary number of bytes from the given `context` and VRF `input`. pub fn make_bytes( &self, @@ -609,16 +621,96 @@ pub mod vrf { pub mod ring_vrf { use super::{vrf::*, *}; pub use bandersnatch_vrfs::ring::{RingProof, RingProver, RingVerifier, KZG}; - use bandersnatch_vrfs::{CanonicalDeserialize, PublicKey}; + use bandersnatch_vrfs::{ring::VerifierKey, CanonicalDeserialize, PublicKey}; + + /// Overhead in the domain size with respect to the supported ring size. + /// + /// Some bits of the domain are reserved for the zk-proof to work. + pub const RING_DOMAIN_OVERHEAD: u32 = 257; + + // Max size of serialized ring-vrf context given `domain_len`. + pub(crate) const fn ring_context_serialized_size(domain_len: u32) -> usize { + // const G1_POINT_COMPRESSED_SIZE: usize = 48; + // const G2_POINT_COMPRESSED_SIZE: usize = 96; + const G1_POINT_UNCOMPRESSED_SIZE: usize = 96; + const G2_POINT_UNCOMPRESSED_SIZE: usize = 192; + const OVERHEAD_SIZE: usize = 20; + const G2_POINTS_NUM: usize = 2; + let g1_points_num = 3 * domain_len as usize + 1; + + OVERHEAD_SIZE + + g1_points_num * G1_POINT_UNCOMPRESSED_SIZE + + G2_POINTS_NUM * G2_POINT_UNCOMPRESSED_SIZE + } + + pub(crate) const RING_VERIFIER_DATA_SERIALIZED_SIZE: usize = 388; + pub(crate) const RING_SIGNATURE_SERIALIZED_SIZE: usize = 755; + + /// remove as soon as soon as serialization is implemented by the backend + pub struct RingVerifierData { + /// Domain size. + pub domain_size: u32, + /// Verifier key. + pub verifier_key: VerifierKey, + } + + impl From for RingVerifier { + fn from(vd: RingVerifierData) -> RingVerifier { + bandersnatch_vrfs::ring::make_ring_verifier(vd.verifier_key, vd.domain_size as usize) + } + } + + impl Encode for RingVerifierData { + fn encode(&self) -> Vec { + const ERR_STR: &str = "serialization length is constant and checked by test; qed"; + let mut buf = [0; RING_VERIFIER_DATA_SERIALIZED_SIZE]; + self.domain_size.serialize_compressed(&mut buf[..4]).expect(ERR_STR); + self.verifier_key.serialize_compressed(&mut buf[4..]).expect(ERR_STR); + buf.encode() + } + } + + impl Decode for RingVerifierData { + fn decode(i: &mut R) -> Result { + const ERR_STR: &str = "serialization length is constant and checked by test; qed"; + let buf = <[u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]>::decode(i)?; + let domain_size = + ::deserialize_compressed_unchecked(&mut &buf[..4]) + .expect(ERR_STR); + let verifier_key = ::deserialize_compressed_unchecked(&mut &buf[4..]).expect(ERR_STR); + + Ok(RingVerifierData { domain_size, verifier_key }) + } + } + + impl EncodeLike for RingVerifierData {} + + impl MaxEncodedLen for RingVerifierData { + fn max_encoded_len() -> usize { + <[u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]>::max_encoded_len() + } + } + + impl TypeInfo for RingVerifierData { + type Identity = [u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]; - /// Context used to produce ring signatures. + fn type_info() -> scale_info::Type { + Self::Identity::type_info() + } + } + + /// Context used to construct ring prover and verifier. + /// + /// Generic parameter `D` represents the ring domain size and drives + /// the max number of supported ring members [`RingContext::max_keyset_size`] + /// which is equal to `D - RING_DOMAIN_OVERHEAD`. #[derive(Clone)] - pub struct RingContext(KZG); + pub struct RingContext(KZG); - impl RingContext { - /// Build an dummy instance used for testing purposes. + impl RingContext { + /// Build an dummy instance for testing purposes. pub fn new_testing() -> Self { - Self(KZG::testing_kzg_setup([0; 32], RING_DOMAIN_SIZE as u32)) + Self(KZG::testing_kzg_setup([0; 32], D)) } /// Get the keyset max size. @@ -630,7 +722,7 @@ pub mod ring_vrf { pub fn prover(&self, public_keys: &[Public], public_idx: usize) -> Option { let mut pks = Vec::with_capacity(public_keys.len()); for public_key in public_keys { - let pk = PublicKey::deserialize_compressed(public_key.as_slice()).ok()?; + let pk = PublicKey::deserialize_compressed_unchecked(public_key.as_slice()).ok()?; pks.push(pk.0.into()); } @@ -643,7 +735,7 @@ pub mod ring_vrf { pub fn verifier(&self, public_keys: &[Public]) -> Option { let mut pks = Vec::with_capacity(public_keys.len()); for public_key in public_keys { - let pk = PublicKey::deserialize_compressed(public_key.as_slice()).ok()?; + let pk = PublicKey::deserialize_compressed_unchecked(public_key.as_slice()).ok()?; pks.push(pk.0.into()); } @@ -651,50 +743,70 @@ pub mod ring_vrf { let ring_verifier = self.0.init_ring_verifier(verifier_key); Some(ring_verifier) } + + /// Information required for a lazy construction of a ring verifier. + pub fn verifier_data(&self, public_keys: &[Public]) -> Option { + let mut pks = Vec::with_capacity(public_keys.len()); + for public_key in public_keys { + let pk = PublicKey::deserialize_compressed_unchecked(public_key.as_slice()).ok()?; + pks.push(pk.0.into()); + } + Some(RingVerifierData { + verifier_key: self.0.verifier_key(pks), + domain_size: self.0.domain_size, + }) + } } - impl Encode for RingContext { + impl Encode for RingContext { fn encode(&self) -> Vec { - let mut buf = Box::new([0; RING_CONTEXT_SERIALIZED_LEN]); + let mut buf = vec![0; ring_context_serialized_size(D)]; self.0 - .serialize_compressed(buf.as_mut_slice()) + .serialize_uncompressed(buf.as_mut_slice()) .expect("serialization length is constant and checked by test; qed"); - buf.encode() + buf } } - impl Decode for RingContext { - fn decode(i: &mut R) -> Result { - let buf = >::decode(i)?; - let kzg = - KZG::deserialize_compressed(buf.as_slice()).map_err(|_| "KZG decode error")?; + impl Decode for RingContext { + fn decode(input: &mut R) -> Result { + let mut buf = vec![0; ring_context_serialized_size(D)]; + input.read(&mut buf[..])?; + let kzg = KZG::deserialize_uncompressed_unchecked(buf.as_slice()) + .map_err(|_| "KZG decode error")?; Ok(RingContext(kzg)) } } - impl EncodeLike for RingContext {} + impl EncodeLike for RingContext {} - impl MaxEncodedLen for RingContext { + impl MaxEncodedLen for RingContext { fn max_encoded_len() -> usize { - <[u8; RING_CONTEXT_SERIALIZED_LEN]>::max_encoded_len() + ring_context_serialized_size(D) } } - impl TypeInfo for RingContext { - type Identity = [u8; RING_CONTEXT_SERIALIZED_LEN]; + impl TypeInfo for RingContext { + type Identity = Self; fn type_info() -> scale_info::Type { - Self::Identity::type_info() + let path = scale_info::Path::new("RingContext", module_path!()); + let array_type_def = scale_info::TypeDefArray { + len: ring_context_serialized_size(D) as u32, + type_param: scale_info::MetaType::new::(), + }; + let type_def = scale_info::TypeDef::Array(array_type_def); + scale_info::Type { path, type_params: Vec::new(), type_def, docs: Vec::new() } } } /// Ring VRF signature. #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct RingVrfSignature { - /// VRF (pre)outputs. - pub outputs: VrfIosVec, /// Ring signature. - pub signature: [u8; RING_SIGNATURE_SERIALIZED_LEN], + pub signature: [u8; RING_SIGNATURE_SERIALIZED_SIZE], + /// VRF pre-outputs. + pub pre_outputs: VrfIosVec, } #[cfg(feature = "full_crypto")] @@ -727,11 +839,12 @@ pub mod ring_vrf { bandersnatch_vrfs::RingProver { ring_prover: prover, secret: &self.secret } .sign_ring_vrf(data.transcript.clone(), &ios); - let outputs: Vec<_> = ring_signature.preouts.into_iter().map(VrfOutput).collect(); - let outputs = VrfIosVec::truncate_from(outputs); + let pre_outputs: Vec<_> = + ring_signature.preouts.into_iter().map(VrfPreOutput).collect(); + let pre_outputs = VrfIosVec::truncate_from(pre_outputs); let mut signature = - RingVrfSignature { outputs, signature: [0; RING_SIGNATURE_SERIALIZED_LEN] }; + RingVrfSignature { pre_outputs, signature: [0; RING_SIGNATURE_SERIALIZED_SIZE] }; ring_signature .proof @@ -749,7 +862,7 @@ pub mod ring_vrf { /// from which the [`RingVerifier`] has been constructed. pub fn ring_vrf_verify(&self, data: &VrfSignData, verifier: &RingVerifier) -> bool { const _: () = assert!(MAX_VRF_IOS == 3, "`MAX_VRF_IOS` expected to be 3"); - let preouts_len = self.outputs.len(); + let preouts_len = self.pre_outputs.len(); if preouts_len != data.inputs.len() { return false } @@ -769,7 +882,7 @@ pub mod ring_vrf { verifier: &RingVerifier, ) -> bool { let Ok(vrf_signature) = - bandersnatch_vrfs::RingVrfSignature::<0>::deserialize_compressed( + bandersnatch_vrfs::RingVrfSignature::<0>::deserialize_compressed_unchecked( self.signature.as_slice(), ) else { @@ -777,7 +890,7 @@ pub mod ring_vrf { }; let preouts: [bandersnatch_vrfs::VrfPreOut; N] = - core::array::from_fn(|i| self.outputs[i].0); + core::array::from_fn(|i| self.pre_outputs[i].0); let signature = bandersnatch_vrfs::RingVrfSignature { proof: vrf_signature.proof, preouts }; @@ -795,7 +908,11 @@ pub mod ring_vrf { mod tests { use super::{ring_vrf::*, vrf::*, *}; use crate::crypto::{VrfPublic, VrfSecret, DEV_PHRASE}; - const DEV_SEED: &[u8; SEED_SERIALIZED_LEN] = &[0xcb; SEED_SERIALIZED_LEN]; + + const DEV_SEED: &[u8; SEED_SERIALIZED_SIZE] = &[0xcb; SEED_SERIALIZED_SIZE]; + const TEST_DOMAIN_SIZE: u32 = 1024; + + type TestRingContext = RingContext; #[allow(unused)] fn b2h(bytes: &[u8]) -> String { @@ -808,9 +925,10 @@ mod tests { #[test] fn backend_assumptions_sanity_check() { - let kzg = KZG::testing_kzg_setup([0; 32], RING_DOMAIN_SIZE as u32); - assert_eq!(kzg.max_keyset_size(), RING_DOMAIN_SIZE - 257); - assert_eq!(kzg.compressed_size(), RING_CONTEXT_SERIALIZED_LEN); + let kzg = KZG::testing_kzg_setup([0; 32], TEST_DOMAIN_SIZE); + assert_eq!(kzg.max_keyset_size() as u32, TEST_DOMAIN_SIZE - RING_DOMAIN_OVERHEAD); + + assert_eq!(kzg.uncompressed_size(), ring_context_serialized_size(TEST_DOMAIN_SIZE)); let pks: Vec<_> = (0..16) .map(|i| SecretKey::from_seed(&[i as u8; 32]).to_public().0.into()) @@ -819,11 +937,14 @@ mod tests { let secret = SecretKey::from_seed(&[0u8; 32]); let public = secret.to_public(); - assert_eq!(public.compressed_size(), PUBLIC_SERIALIZED_LEN); + assert_eq!(public.compressed_size(), PUBLIC_SERIALIZED_SIZE); let input = VrfInput::new(b"foo", &[]); let preout = secret.vrf_preout(&input.0); - assert_eq!(preout.compressed_size(), PREOUT_SERIALIZED_LEN); + assert_eq!(preout.compressed_size(), PREOUT_SERIALIZED_SIZE); + + let verifier_key = kzg.verifier_key(pks.clone()); + assert_eq!(verifier_key.compressed_size() + 4, RING_VERIFIER_DATA_SERIALIZED_SIZE); let prover_key = kzg.prover_key(pks); let ring_prover = kzg.init_ring_prover(prover_key, 0); @@ -832,12 +953,12 @@ mod tests { let thin_signature: bandersnatch_vrfs::ThinVrfSignature<0> = secret.sign_thin_vrf(data.transcript.clone(), &[]); - assert_eq!(thin_signature.compressed_size(), SIGNATURE_SERIALIZED_LEN); + assert_eq!(thin_signature.compressed_size(), SIGNATURE_SERIALIZED_SIZE); let ring_signature: bandersnatch_vrfs::RingVrfSignature<0> = bandersnatch_vrfs::RingProver { ring_prover: &ring_prover, secret: &secret } .sign_ring_vrf(data.transcript.clone(), &[]); - assert_eq!(ring_signature.compressed_size(), RING_SIGNATURE_SERIALIZED_LEN); + assert_eq!(ring_signature.compressed_size(), RING_SIGNATURE_SERIALIZED_SIZE); } #[test] @@ -919,11 +1040,11 @@ mod tests { let signature = pair.vrf_sign(&data); let o10 = pair.make_bytes::<32>(b"ctx1", &i1); - let o11 = signature.outputs[0].make_bytes::<32>(b"ctx1", &i1); + let o11 = signature.pre_outputs[0].make_bytes::<32>(b"ctx1", &i1); assert_eq!(o10, o11); let o20 = pair.make_bytes::<48>(b"ctx2", &i2); - let o21 = signature.outputs[1].make_bytes::<48>(b"ctx2", &i2); + let o21 = signature.pre_outputs[1].make_bytes::<48>(b"ctx2", &i2); assert_eq!(o20, o21); } @@ -941,7 +1062,8 @@ mod tests { let bytes = expected.encode(); - let expected_len = data.inputs.len() * PREOUT_SERIALIZED_LEN + SIGNATURE_SERIALIZED_LEN + 1; + let expected_len = + data.inputs.len() * PREOUT_SERIALIZED_SIZE + SIGNATURE_SERIALIZED_SIZE + 1; assert_eq!(bytes.len(), expected_len); let decoded = VrfSignature::decode(&mut bytes.as_slice()).unwrap(); @@ -958,7 +1080,7 @@ mod tests { #[test] fn ring_vrf_sign_verify() { - let ring_ctx = RingContext::new_testing(); + let ring_ctx = TestRingContext::new_testing(); let mut pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); assert!(pks.len() <= ring_ctx.max_keyset_size()); @@ -984,7 +1106,7 @@ mod tests { #[test] fn ring_vrf_sign_verify_with_out_of_ring_key() { - let ring_ctx = RingContext::new_testing(); + let ring_ctx = TestRingContext::new_testing(); let pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); let pair = Pair::from_seed(DEV_SEED); @@ -1003,7 +1125,7 @@ mod tests { #[test] fn ring_vrf_make_bytes_matches() { - let ring_ctx = RingContext::new_testing(); + let ring_ctx = TestRingContext::new_testing(); let mut pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); assert!(pks.len() <= ring_ctx.max_keyset_size()); @@ -1022,17 +1144,17 @@ mod tests { let signature = pair.ring_vrf_sign(&data, &prover); let o10 = pair.make_bytes::<32>(b"ctx1", &i1); - let o11 = signature.outputs[0].make_bytes::<32>(b"ctx1", &i1); + let o11 = signature.pre_outputs[0].make_bytes::<32>(b"ctx1", &i1); assert_eq!(o10, o11); let o20 = pair.make_bytes::<48>(b"ctx2", &i2); - let o21 = signature.outputs[1].make_bytes::<48>(b"ctx2", &i2); + let o21 = signature.pre_outputs[1].make_bytes::<48>(b"ctx2", &i2); assert_eq!(o20, o21); } #[test] fn encode_decode_ring_vrf_signature() { - let ring_ctx = RingContext::new_testing(); + let ring_ctx = TestRingContext::new_testing(); let mut pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); assert!(pks.len() <= ring_ctx.max_keyset_size()); @@ -1055,7 +1177,7 @@ mod tests { let bytes = expected.encode(); let expected_len = - data.inputs.len() * PREOUT_SERIALIZED_LEN + RING_SIGNATURE_SERIALIZED_LEN + 1; + data.inputs.len() * PREOUT_SERIALIZED_SIZE + RING_SIGNATURE_SERIALIZED_SIZE + 1; assert_eq!(bytes.len(), expected_len); let decoded = RingVrfSignature::decode(&mut bytes.as_slice()).unwrap(); @@ -1064,14 +1186,36 @@ mod tests { #[test] fn encode_decode_ring_vrf_context() { - let ctx1 = RingContext::new_testing(); + let ctx1 = TestRingContext::new_testing(); let enc1 = ctx1.encode(); - assert_eq!(enc1.len(), RingContext::max_encoded_len()); + let _ti = ::type_info(); + + assert_eq!(enc1.len(), ring_context_serialized_size(TEST_DOMAIN_SIZE)); + assert_eq!(enc1.len(), TestRingContext::max_encoded_len()); - let ctx2 = RingContext::decode(&mut enc1.as_slice()).unwrap(); + let ctx2 = TestRingContext::decode(&mut enc1.as_slice()).unwrap(); let enc2 = ctx2.encode(); assert_eq!(enc1, enc2); } + + #[test] + fn encode_decode_verifier_data() { + let ring_ctx = TestRingContext::new_testing(); + + let pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); + assert!(pks.len() <= ring_ctx.max_keyset_size()); + + let verifier_data = ring_ctx.verifier_data(&pks).unwrap(); + let enc1 = verifier_data.encode(); + + assert_eq!(enc1.len(), RING_VERIFIER_DATA_SERIALIZED_SIZE); + assert_eq!(RingVerifierData::max_encoded_len(), RING_VERIFIER_DATA_SERIALIZED_SIZE); + + let vd2 = RingVerifierData::decode(&mut enc1.as_slice()).unwrap(); + let enc2 = vd2.encode(); + + assert_eq!(enc1, enc2); + } } diff --git a/substrate/primitives/core/src/const_hex2array.rs b/substrate/primitives/core/src/const_hex2array.rs new file mode 100644 index 0000000000000000000000000000000000000000..cd6071028e6cb7c1dedf29a708c603cf0bf6b8e2 --- /dev/null +++ b/substrate/primitives/core/src/const_hex2array.rs @@ -0,0 +1,162 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Provides a const function for converting a hex string to a `u8` array at compile time, when used +//! in the proper context. + +/// Provides a const array from given string literal. +/// +/// Valid characters are `[0-9a-fA-F]`, and the hex string should not start +/// with the `0x` prefix. +#[macro_export] +macro_rules! hex2array { + ($input:expr) => {{ + const BYTES: [u8; $input.len() / 2] = $crate::const_hex2array::private_hex2array($input); + BYTES + }}; +} + +/// Generates array from (static) string literal. +/// +/// Valid characters are `[0-9a-fA-F]`, and the hex string should not start +/// with the `0x` prefix. +/// +/// # Panics +/// +/// The function will panic at compile time when used in a const context if: +/// - The given hex string has an invalid length. +/// - It contains invalid characters. +/// +/// The function will panic at runtime when used in a non-const context if the above conditions are +/// met. +#[doc(hidden)] +pub const fn private_hex2array(hex: &str) -> [u8; N] { + const fn c2b(c: u8) -> u8 { + match c as char { + '0'..='9' => c - b'0', + 'a'..='f' => c - (b'a' - 10), + 'A'..='F' => c - (b'A' - 10), + _ => panic!("hex string contains invalid character"), + } + } + let mut output = [0; N]; + let mut i = 0; + if hex.len() != 2 * N { + panic!("hex string length is not valid"); + } + while i < N { + output[i] = 16 * c2b(hex.as_bytes()[2 * i]) + c2b(hex.as_bytes()[2 * i + 1]); + i += 1; + } + output +} + +#[cfg(test)] +mod testh2b { + use super::private_hex2array; + + #[test] + fn t00() { + const T0: [u8; 0] = private_hex2array(""); + const EMPTY: [u8; 0] = []; + assert_eq!(T0, EMPTY); + } + + macro_rules! test_byte { + ($a:expr, $b:expr) => {{ + const X: [u8; 1] = private_hex2array($a); + assert_eq!(X, [$b]); + }}; + } + + #[test] + fn t01() { + test_byte!("00", 0); + test_byte!("01", 1); + test_byte!("02", 2); + test_byte!("03", 3); + test_byte!("04", 4); + test_byte!("05", 5); + test_byte!("06", 6); + test_byte!("07", 7); + test_byte!("08", 8); + test_byte!("09", 9); + test_byte!("0a", 10); + test_byte!("0A", 10); + test_byte!("0b", 11); + test_byte!("0B", 11); + test_byte!("0c", 12); + test_byte!("0C", 12); + test_byte!("0d", 13); + test_byte!("0D", 13); + test_byte!("0e", 14); + test_byte!("0E", 14); + test_byte!("0f", 15); + test_byte!("0F", 15); + } + + #[test] + fn t02() { + const T0: [u8; 2] = private_hex2array("0a10"); + assert_eq!(T0, [10, 16]); + const T1: [u8; 2] = private_hex2array("4545"); + assert_eq!(T1, [69, 69]); + } + + #[test] + fn t02m() { + assert_eq!(hex2array!("0a10"), [10, 16]); + assert_eq!(hex2array!("4545"), [69, 69]); + assert_eq!( + hex2array!("000102030405060708090a0b0c0d0e0f"), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + } + + #[test] + fn t16() { + const T16: [u8; 16] = private_hex2array("000102030405060708090a0b0c0d0e0f"); + + assert_eq!(T16, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + } + + #[test] + fn t33() { + const T33: [u8; 33] = + private_hex2array("9c8af77d3a4e3f6f076853922985b9e6724fc9675329087f47aff1ceaaae772180"); + + assert_eq!( + T33, + [ + 156, 138, 247, 125, 58, 78, 63, 111, 7, 104, 83, 146, 41, 133, 185, 230, 114, 79, + 201, 103, 83, 41, 8, 127, 71, 175, 241, 206, 170, 174, 119, 33, 128 + ] + ); + } + + #[test] + #[should_panic = "hex string length is not valid"] + fn t_panic_incorrect_length2() { + let _ = private_hex2array::<2>("454"); + } + + #[test] + #[should_panic = "hex string contains invalid character"] + fn t_panic_invalid_character() { + let _ = private_hex2array::<2>("45ag"); + } +} diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index d369de5a1c0115b0e0385fe903c6f3e8a16bbaa4..1f3ae7445332d69e2f5e3dbe4eca3aaab16fba6c 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -25,8 +25,6 @@ use codec::{Decode, Encode, MaxEncodedLen}; use itertools::Itertools; #[cfg(feature = "std")] use rand::{rngs::OsRng, RngCore}; -#[cfg(feature = "std")] -use regex::Regex; use scale_info::TypeInfo; #[cfg(feature = "std")] pub use secrecy::{ExposeSecret, SecretString}; @@ -43,6 +41,11 @@ pub use ss58_registry::{from_known_address_format, Ss58AddressFormat, Ss58Addres /// Trait to zeroize a memory buffer. pub use zeroize::Zeroize; +#[cfg(feature = "std")] +pub use crate::address_uri::AddressUri; +#[cfg(any(feature = "std", feature = "full_crypto"))] +pub use crate::address_uri::Error as AddressUriError; + /// The root phrase for our publicly known keys. pub const DEV_PHRASE: &str = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; @@ -82,8 +85,8 @@ impl> UncheckedInto for S { #[cfg(feature = "full_crypto")] pub enum SecretStringError { /// The overall format was invalid (e.g. the seed phrase contained symbols). - #[cfg_attr(feature = "std", error("Invalid format"))] - InvalidFormat, + #[cfg_attr(feature = "std", error("Invalid format {0}"))] + InvalidFormat(AddressUriError), /// The seed phrase provided is not a valid BIP39 phrase. #[cfg_attr(feature = "std", error("Invalid phrase"))] InvalidPhrase, @@ -101,6 +104,13 @@ pub enum SecretStringError { InvalidPath, } +#[cfg(any(feature = "std", feature = "full_crypto"))] +impl From for SecretStringError { + fn from(e: AddressUriError) -> Self { + Self::InvalidFormat(e) + } +} + /// An error when deriving a key. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[derive(Debug, Clone, PartialEq, Eq)] @@ -208,7 +218,7 @@ impl> From for DeriveJunction { /// An error type for SS58 decoding. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[cfg_attr(not(feature = "std"), derive(Debug))] -#[derive(Clone, Copy, Eq, PartialEq)] +#[derive(Clone, Eq, PartialEq)] #[allow(missing_docs)] #[cfg(any(feature = "full_crypto", feature = "serde"))] pub enum PublicError { @@ -235,6 +245,11 @@ pub enum PublicError { InvalidPath, #[cfg_attr(feature = "std", error("Disallowed SS58 Address Format for this datatype."))] FormatNotAllowed, + #[cfg_attr(feature = "std", error("Password not allowed."))] + PasswordNotAllowed, + #[cfg(feature = "std")] + #[cfg_attr(feature = "std", error("Incorrect URI syntax {0}."))] + MalformedUri(#[from] AddressUriError), } #[cfg(feature = "std")] @@ -414,47 +429,40 @@ pub fn set_default_ss58_version(new_default: Ss58AddressFormat) { DEFAULT_VERSION.store(new_default.into(), core::sync::atomic::Ordering::Relaxed); } -#[cfg(feature = "std")] -lazy_static::lazy_static! { - static ref SS58_REGEX: Regex = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") - .expect("constructed from known-good static value; qed"); - static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[\d\w ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") - .expect("constructed from known-good static value; qed"); - static ref JUNCTION_REGEX: Regex = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); -} - #[cfg(feature = "std")] impl + AsRef<[u8]> + Public + Derive> Ss58Codec for T { fn from_string(s: &str) -> Result { - let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; - let s = cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS); + let cap = AddressUri::parse(s)?; + if cap.pass.is_some() { + return Err(PublicError::PasswordNotAllowed); + } + let s = cap.phrase.unwrap_or(DEV_ADDRESS); let addr = if let Some(stripped) = s.strip_prefix("0x") { let d = array_bytes::hex2bytes(stripped).map_err(|_| PublicError::InvalidFormat)?; Self::from_slice(&d).map_err(|()| PublicError::BadLength)? } else { Self::from_ss58check(s)? }; - if cap["path"].is_empty() { + if cap.paths.is_empty() { Ok(addr) } else { - let path = - JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); - addr.derive(path).ok_or(PublicError::InvalidPath) + addr.derive(cap.paths.iter().map(DeriveJunction::from)) + .ok_or(PublicError::InvalidPath) } } fn from_string_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { - let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; - let (addr, v) = Self::from_ss58check_with_version( - cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS), - )?; - if cap["path"].is_empty() { + let cap = AddressUri::parse(s)?; + if cap.pass.is_some() { + return Err(PublicError::PasswordNotAllowed); + } + let (addr, v) = Self::from_ss58check_with_version(cap.phrase.unwrap_or(DEV_ADDRESS))?; + if cap.paths.is_empty() { Ok((addr, v)) } else { - let path = - JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); - addr.derive(path).ok_or(PublicError::InvalidPath).map(|a| (a, v)) + addr.derive(cap.paths.iter().map(DeriveJunction::from)) + .ok_or(PublicError::InvalidPath) + .map(|a| (a, v)) } } } @@ -817,22 +825,15 @@ impl sp_std::str::FromStr for SecretUri { type Err = SecretStringError; fn from_str(s: &str) -> Result { - let cap = SECRET_PHRASE_REGEX.captures(s).ok_or(SecretStringError::InvalidFormat)?; - - let junctions = JUNCTION_REGEX - .captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])) - .collect::>(); - - let phrase = cap.name("phrase").map(|r| r.as_str()).unwrap_or(DEV_PHRASE); - let password = cap.name("password"); + let cap = AddressUri::parse(s)?; + let phrase = cap.phrase.unwrap_or(DEV_PHRASE); Ok(Self { phrase: SecretString::from_str(phrase).expect("Returns infallible error; qed"), - password: password.map(|v| { - SecretString::from_str(v.as_str()).expect("Returns infallible error; qed") - }), - junctions, + password: cap + .pass + .map(|v| SecretString::from_str(v).expect("Returns infallible error; qed")), + junctions: cap.paths.iter().map(DeriveJunction::from).collect::>(), }) } } @@ -1108,8 +1109,8 @@ impl<'a> TryFrom<&'a str> for KeyTypeId { pub trait VrfCrypto { /// VRF input. type VrfInput; - /// VRF output. - type VrfOutput; + /// VRF pre-output. + type VrfPreOutput; /// VRF signing data. type VrfSignData; /// VRF signature. @@ -1118,8 +1119,8 @@ pub trait VrfCrypto { /// VRF Secret Key. pub trait VrfSecret: VrfCrypto { - /// Get VRF-specific output . - fn vrf_output(&self, data: &Self::VrfInput) -> Self::VrfOutput; + /// Get VRF-specific pre-output. + fn vrf_pre_output(&self, data: &Self::VrfInput) -> Self::VrfPreOutput; /// Sign VRF-specific data. fn vrf_sign(&self, input: &Self::VrfSignData) -> Self::VrfSignature; diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs index 603fa515a30e8b7cefc1093daa5905ccfac1320c..471714582a6bba766cc05ad6e73e870c3d690a93 100644 --- a/substrate/primitives/core/src/ecdsa.rs +++ b/substrate/primitives/core/src/ecdsa.rs @@ -336,7 +336,7 @@ impl Signature { pub fn recover_prehashed(&self, message: &[u8; 32]) -> Option { let rid = RecoveryId::from_i32(self.0[64] as i32).ok()?; let sig = RecoverableSignature::from_compact(&self.0[..64], rid).ok()?; - let message = Message::from_slice(message).expect("Message is 32 bytes; qed"); + let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); #[cfg(feature = "std")] let context = SECP256K1; @@ -458,7 +458,7 @@ impl Pair { /// Sign a pre-hashed message pub fn sign_prehashed(&self, message: &[u8; 32]) -> Signature { - let message = Message::from_slice(message).expect("Message is 32 bytes; qed"); + let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); #[cfg(feature = "std")] let context = SECP256K1; @@ -508,12 +508,7 @@ impl Pair { #[cfg(feature = "full_crypto")] impl Drop for Pair { fn drop(&mut self) { - let ptr = self.secret.as_mut_ptr(); - for off in 0..self.secret.len() { - unsafe { - core::ptr::write_volatile(ptr.add(off), 0); - } - } + self.secret.non_secure_erase() } } @@ -760,7 +755,7 @@ mod test { let msg = [0u8; 32]; let sig1 = pair.sign_prehashed(&msg); let sig2: Signature = { - let message = Message::from_slice(&msg).unwrap(); + let message = Message::from_digest_slice(&msg).unwrap(); SECP256K1.sign_ecdsa_recoverable(&message, &pair.secret).into() }; assert_eq!(sig1, sig2); diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index ec0641c54668b8b54613dc7b9a1c188489b9dfc4..c7232563cb738a6087eeaebbaec678d36d0128be 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -51,10 +51,13 @@ pub mod hashing; #[cfg(feature = "full_crypto")] pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_64}; +pub mod const_hex2array; pub mod crypto; pub mod hexdisplay; pub use paste; +#[cfg(any(feature = "full_crypto", feature = "std"))] +mod address_uri; #[cfg(feature = "bandersnatch-experimental")] pub mod bandersnatch; #[cfg(feature = "bls-experimental")] diff --git a/substrate/primitives/core/src/paired_crypto.rs b/substrate/primitives/core/src/paired_crypto.rs index a97b657e7578fc8dc0277c9c5827ad72c51f74a2..960b8469249e22b10352545f26d3940df3da2e77 100644 --- a/substrate/primitives/core/src/paired_crypto.rs +++ b/substrate/primitives/core/src/paired_crypto.rs @@ -39,7 +39,13 @@ use sp_std::convert::TryFrom; /// ECDSA and BLS12-377 paired crypto scheme #[cfg(feature = "bls-experimental")] pub mod ecdsa_bls377 { - use crate::{bls377, crypto::CryptoTypeId, ecdsa}; + #[cfg(feature = "full_crypto")] + use crate::Hasher; + use crate::{ + bls377, + crypto::{CryptoTypeId, Pair as PairT, UncheckedFrom}, + ecdsa, + }; /// An identifier used to match public keys against BLS12-377 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ecb7"); @@ -71,6 +77,60 @@ pub mod ecdsa_bls377 { impl super::CryptoType for Pair { type Pair = Pair; } + + #[cfg(feature = "full_crypto")] + impl Pair { + /// Hashes the `message` with the specified [`Hasher`] before signing sith the ECDSA secret + /// component. + /// + /// The hasher does not affect the BLS12-377 component. This generates BLS12-377 Signature + /// according to IETF standard. + pub fn sign_with_hasher(&self, message: &[u8]) -> Signature + where + H: Hasher, + H::Out: Into<[u8; 32]>, + { + let msg_hash = H::hash(message).into(); + + let mut raw: [u8; SIGNATURE_LEN] = [0u8; SIGNATURE_LEN]; + raw[..ecdsa::SIGNATURE_SERIALIZED_SIZE] + .copy_from_slice(self.left.sign_prehashed(&msg_hash).as_ref()); + raw[ecdsa::SIGNATURE_SERIALIZED_SIZE..] + .copy_from_slice(self.right.sign(message).as_ref()); + ::Signature::unchecked_from(raw) + } + + /// Hashes the `message` with the specified [`Hasher`] before verifying with the ECDSA + /// public component. + /// + /// The hasher does not affect the the BLS12-377 component. This verifies whether the + /// BLS12-377 signature was hashed and signed according to IETF standard + pub fn verify_with_hasher(sig: &Signature, message: &[u8], public: &Public) -> bool + where + H: Hasher, + H::Out: Into<[u8; 32]>, + { + let msg_hash = H::hash(message).into(); + + let Ok(left_pub) = public.0[..ecdsa::PUBLIC_KEY_SERIALIZED_SIZE].try_into() else { + return false + }; + let Ok(left_sig) = sig.0[0..ecdsa::SIGNATURE_SERIALIZED_SIZE].try_into() else { + return false + }; + if !ecdsa::Pair::verify_prehashed(&left_sig, &msg_hash, &left_pub) { + return false + } + + let Ok(right_pub) = public.0[ecdsa::PUBLIC_KEY_SERIALIZED_SIZE..].try_into() else { + return false + }; + let Ok(right_sig) = sig.0[ecdsa::SIGNATURE_SERIALIZED_SIZE..].try_into() else { + return false + }; + bls377::Pair::verify(&right_sig, message, &right_pub) + } + } } /// Secure seed length. @@ -455,12 +515,12 @@ where #[cfg(all(test, feature = "bls-experimental"))] mod test { use super::*; - use crate::crypto::DEV_PHRASE; + use crate::{crypto::DEV_PHRASE, KeccakHasher}; use ecdsa_bls377::{Pair, Signature}; use crate::{bls377, ecdsa}; - #[test] + #[test] fn test_length_of_paired_ecdsa_and_bls377_public_key_and_signature_is_correct() { assert_eq!( ::Public::LEN, @@ -617,6 +677,16 @@ mod test { assert_eq!(cmp, public); } + #[test] + fn sign_and_verify_with_hasher_works() { + let pair = + Pair::from_seed(&(b"12345678901234567890123456789012".as_slice().try_into().unwrap())); + let message = b"Something important"; + let signature = pair.sign_with_hasher::(&message[..]); + + assert!(Pair::verify_with_hasher::(&signature, &message[..], &pair.public())); + } + #[test] fn signature_serialization_works() { let pair = diff --git a/substrate/primitives/core/src/sr25519.rs b/substrate/primitives/core/src/sr25519.rs index ffa52ef97d1f58f3af2b2f2c62015c411e464ae6..b821055e2c56713067bf096899c2b10b6155c877 100644 --- a/substrate/primitives/core/src/sr25519.rs +++ b/substrate/primitives/core/src/sr25519.rs @@ -555,7 +555,7 @@ pub mod vrf { use crate::crypto::{VrfCrypto, VrfPublic}; use schnorrkel::{ errors::MultiSignatureStage, - vrf::{VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH}, + vrf::{VRF_PREOUT_LENGTH, VRF_PROOF_LENGTH}, SignatureError, }; @@ -628,37 +628,37 @@ pub mod vrf { /// VRF signature data #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct VrfSignature { - /// VRF output. - pub output: VrfOutput, + /// VRF pre-output. + pub pre_output: VrfPreOutput, /// VRF proof. pub proof: VrfProof, } - /// VRF output type suitable for schnorrkel operations. + /// VRF pre-output type suitable for schnorrkel operations. #[derive(Clone, Debug, PartialEq, Eq)] - pub struct VrfOutput(pub schnorrkel::vrf::VRFOutput); + pub struct VrfPreOutput(pub schnorrkel::vrf::VRFPreOut); - impl Encode for VrfOutput { + impl Encode for VrfPreOutput { fn encode(&self) -> Vec { self.0.as_bytes().encode() } } - impl Decode for VrfOutput { + impl Decode for VrfPreOutput { fn decode(i: &mut R) -> Result { - let decoded = <[u8; VRF_OUTPUT_LENGTH]>::decode(i)?; - Ok(Self(schnorrkel::vrf::VRFOutput::from_bytes(&decoded).map_err(convert_error)?)) + let decoded = <[u8; VRF_PREOUT_LENGTH]>::decode(i)?; + Ok(Self(schnorrkel::vrf::VRFPreOut::from_bytes(&decoded).map_err(convert_error)?)) } } - impl MaxEncodedLen for VrfOutput { + impl MaxEncodedLen for VrfPreOutput { fn max_encoded_len() -> usize { - <[u8; VRF_OUTPUT_LENGTH]>::max_encoded_len() + <[u8; VRF_PREOUT_LENGTH]>::max_encoded_len() } } - impl TypeInfo for VrfOutput { - type Identity = [u8; VRF_OUTPUT_LENGTH]; + impl TypeInfo for VrfPreOutput { + type Identity = [u8; VRF_PREOUT_LENGTH]; fn type_info() -> scale_info::Type { Self::Identity::type_info() @@ -699,7 +699,7 @@ pub mod vrf { #[cfg(feature = "full_crypto")] impl VrfCrypto for Pair { type VrfInput = VrfTranscript; - type VrfOutput = VrfOutput; + type VrfPreOutput = VrfPreOutput; type VrfSignData = VrfSignData; type VrfSignature = VrfSignature; } @@ -717,18 +717,18 @@ pub mod vrf { let proof = self.0.dleq_proove(extra, &inout, true).0; - VrfSignature { output: VrfOutput(inout.to_output()), proof: VrfProof(proof) } + VrfSignature { pre_output: VrfPreOutput(inout.to_preout()), proof: VrfProof(proof) } } - fn vrf_output(&self, input: &Self::VrfInput) -> Self::VrfOutput { - let output = self.0.vrf_create_hash(input.0.clone()).to_output(); - VrfOutput(output) + fn vrf_pre_output(&self, input: &Self::VrfInput) -> Self::VrfPreOutput { + let pre_output = self.0.vrf_create_hash(input.0.clone()).to_preout(); + VrfPreOutput(pre_output) } } impl VrfCrypto for Public { type VrfInput = VrfTranscript; - type VrfOutput = VrfOutput; + type VrfPreOutput = VrfPreOutput; type VrfSignData = VrfSignData; type VrfSignature = VrfSignature; } @@ -739,7 +739,7 @@ pub mod vrf { let public = schnorrkel::PublicKey::from_bytes(self)?; let inout = - signature.output.0.attach_input_hash(&public, data.transcript.0.clone())?; + signature.pre_output.0.attach_input_hash(&public, data.transcript.0.clone())?; let extra = data .extra @@ -762,6 +762,7 @@ pub mod vrf { ScalarFormatError => "Signature error: `ScalarFormatError`".into(), NotMarkedSchnorrkel => "Signature error: `NotMarkedSchnorrkel`".into(), BytesLengthError { .. } => "Signature error: `BytesLengthError`".into(), + InvalidKey => "Signature error: `InvalidKey`".into(), MuSigAbsent { musig_stage: Commitment } => "Signature error: `MuSigAbsent` at stage `Commitment`".into(), MuSigAbsent { musig_stage: Reveal } => @@ -802,19 +803,21 @@ pub mod vrf { &self, context: &[u8], input: &VrfInput, - output: &VrfOutput, + pre_output: &VrfPreOutput, ) -> Result<[u8; N], codec::Error> where [u8; N]: Default, { let pubkey = schnorrkel::PublicKey::from_bytes(&self.0).map_err(convert_error)?; - let inout = - output.0.attach_input_hash(&pubkey, input.0.clone()).map_err(convert_error)?; + let inout = pre_output + .0 + .attach_input_hash(&pubkey, input.0.clone()) + .map_err(convert_error)?; Ok(inout.make_bytes::<[u8; N]>(context)) } } - impl VrfOutput { + impl VrfPreOutput { /// Generate output bytes from the given VRF configuration. pub fn make_bytes( &self, @@ -1097,10 +1100,10 @@ mod tests { let input = VrfTranscript::new(b"label", &[(b"domain1", b"data1")]); - let output = pair.vrf_output(&input); + let pre_output = pair.vrf_pre_output(&input); let out1 = pair.make_bytes::<32>(ctx, &input); - let out2 = output.make_bytes::<32>(ctx, &input, &public).unwrap(); + let out2 = pre_output.make_bytes::<32>(ctx, &input, &public).unwrap(); assert_eq!(out1, out2); let extra = VrfTranscript::new(b"extra", &[(b"domain2", b"data2")]); @@ -1108,7 +1111,7 @@ mod tests { let signature = pair.vrf_sign(&data); assert!(public.vrf_verify(&data, &signature)); - let out3 = public.make_bytes::<32>(ctx, &input, &signature.output).unwrap(); + let out3 = public.make_bytes::<32>(ctx, &input, &signature.pre_output).unwrap(); assert_eq!(out2, out3); } @@ -1126,7 +1129,7 @@ mod tests { assert!(public.vrf_verify(&data, &signature)); let out1 = pair.make_bytes::<32>(ctx, &input); - let out2 = public.make_bytes::<32>(ctx, &input, &signature.output).unwrap(); + let out2 = public.make_bytes::<32>(ctx, &input, &signature.pre_output).unwrap(); assert_eq!(out1, out2); // Direct call to backend version of sign after check with extra params @@ -1139,9 +1142,9 @@ mod tests { }) .unwrap(); let signature2 = - VrfSignature { output: VrfOutput(inout.to_output()), proof: VrfProof(proof) }; + VrfSignature { pre_output: VrfPreOutput(inout.to_preout()), proof: VrfProof(proof) }; assert!(public.vrf_verify(&data, &signature2)); - assert_eq!(signature.output, signature2.output); + assert_eq!(signature.pre_output, signature2.pre_output); } } diff --git a/substrate/primitives/core/src/traits.rs b/substrate/primitives/core/src/traits.rs index 9815c84f3396a69401be386a7a5d0b627bddd761..851d89103914e788682ef5c9531861f7c0ed1940 100644 --- a/substrate/primitives/core/src/traits.rs +++ b/substrate/primitives/core/src/traits.rs @@ -51,7 +51,6 @@ pub trait CodeExecutor: Sized + Send + Sync + ReadRuntimeVersion + Clone + 'stat runtime_code: &RuntimeCode, method: &str, data: &[u8], - use_native: bool, context: CallContext, ) -> (Result, Self::Error>, bool); } diff --git a/substrate/primitives/crypto/ec-utils/Cargo.toml b/substrate/primitives/crypto/ec-utils/Cargo.toml index 1484406e7b2417454e8f1b472e36ae1af71cb82c..3baa8ea5b78421e37ab0e56fb0513c81f5c4a049 100644 --- a/substrate/primitives/crypto/ec-utils/Cargo.toml +++ b/substrate/primitives/crypto/ec-utils/Cargo.toml @@ -8,6 +8,9 @@ license = "Apache-2.0" homepage = "https://substrate.io" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -23,12 +26,12 @@ ark-ed-on-bls12-381-bandersnatch-ext = { version = "0.4.1", default-features = f ark-ed-on-bls12-381-bandersnatch = { version = "0.4.0", default-features = false, optional = true } ark-ed-on-bls12-377-ext = { version = "0.4.1", default-features = false, optional = true } ark-ed-on-bls12-377 = { version = "0.4.0", default-features = false, optional = true } -ark-scale = { version = "0.0.11", default-features = false, features = ["hazmat"], optional = true } +ark-scale = { version = "0.0.12", default-features = false, features = ["hazmat"], optional = true } sp-runtime-interface = { path = "../../runtime-interface", default-features = false, optional = true } sp-std = { path = "../../std", default-features = false, optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "ark-bls12-377-ext?/std", "ark-bls12-377?/std", @@ -46,11 +49,11 @@ std = [ "sp-runtime-interface?/std", "sp-std?/std", ] -common = [ "ark-ec", "ark-scale", "sp-runtime-interface", "sp-std" ] -bls12-377 = [ "ark-bls12-377", "ark-bls12-377-ext", "common" ] -bls12-381 = [ "ark-bls12-381", "ark-bls12-381-ext", "common" ] -bw6-761 = [ "ark-bw6-761", "ark-bw6-761-ext", "common" ] -ed-on-bls12-377 = [ "ark-ed-on-bls12-377", "ark-ed-on-bls12-377-ext", "common" ] +common = ["ark-ec", "ark-scale", "sp-runtime-interface", "sp-std"] +bls12-377 = ["ark-bls12-377", "ark-bls12-377-ext", "common"] +bls12-381 = ["ark-bls12-381", "ark-bls12-381-ext", "common"] +bw6-761 = ["ark-bw6-761", "ark-bw6-761-ext", "common"] +ed-on-bls12-377 = ["ark-ed-on-bls12-377", "ark-ed-on-bls12-377-ext", "common"] ed-on-bls12-381-bandersnatch = [ "ark-ed-on-bls12-381-bandersnatch", "ark-ed-on-bls12-381-bandersnatch-ext", diff --git a/substrate/primitives/database/Cargo.toml b/substrate/primitives/database/Cargo.toml index 430895236d4f0eec73b8939df14e8075a32cd630..00ccf97c83e95d4a979812b59284e00fe2e74545 100644 --- a/substrate/primitives/database/Cargo.toml +++ b/substrate/primitives/database/Cargo.toml @@ -10,6 +10,9 @@ description = "Substrate database trait." documentation = "https://docs.rs/sp-database" readme = "README.md" +[lints] +workspace = true + [dependencies] kvdb = "0.13.0" parking_lot = "0.12.1" diff --git a/substrate/primitives/database/src/lib.rs b/substrate/primitives/database/src/lib.rs index 012f699552d749a1cca49f9655485841a0d8dca2..42920bbefb499e550efdfc67552aa67b52665859 100644 --- a/substrate/primitives/database/src/lib.rs +++ b/substrate/primitives/database/src/lib.rs @@ -101,7 +101,9 @@ pub trait Database>: Send + Sync { /// This may be faster than `get` since it doesn't allocate. /// Use `with_get` helper function if you need `f` to return a value from `f` fn with_get(&self, col: ColumnId, key: &[u8], f: &mut dyn FnMut(&[u8])) { - self.get(col, key).map(|v| f(&v)); + if let Some(v) = self.get(col, key) { + f(&v) + } } /// Check if database supports internal ref counting for state data. diff --git a/substrate/primitives/debug-derive/Cargo.toml b/substrate/primitives/debug-derive/Cargo.toml index c97c8a0a3991e94dd5c57f067fa26f5dbc4f091c..d23e311ee0b2aeb449fdfb1de556120726406cc9 100644 --- a/substrate/primitives/debug-derive/Cargo.toml +++ b/substrate/primitives/debug-derive/Cargo.toml @@ -9,6 +9,8 @@ repository.workspace = true description = "Macros to derive runtime debug implementation." documentation = "https://docs.rs/sp-debug-derive" +[lints] +workspace = true [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,11 +20,11 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = "2.0.38" +syn = "2.0.41" proc-macro2 = "1.0.56" [features] -default = [ "std" ] +default = ["std"] std = [] # By default `RuntimeDebug` implements `Debug` that outputs `` when `std` is # disabled. However, sometimes downstream users need to have the real `Debug` implementation for diff --git a/substrate/primitives/externalities/Cargo.toml b/substrate/primitives/externalities/Cargo.toml index 417eb363867b2d90d1f5be0244171f9bd4ed7bdf..4c7afc38b815f7579e527598b48d9543ac4543bb 100644 --- a/substrate/primitives/externalities/Cargo.toml +++ b/substrate/primitives/externalities/Cargo.toml @@ -10,15 +10,18 @@ description = "Substrate externalities abstraction" documentation = "https://docs.rs/sp-externalities" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } environmental = { version = "1.1.3", default-features = false } -sp-std = { path = "../std", default-features = false} -sp-storage = { path = "../storage", default-features = false} +sp-std = { path = "../std", default-features = false } +sp-storage = { path = "../storage", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "environmental/std", "sp-std/std", "sp-storage/std" ] +default = ["std"] +std = ["codec/std", "environmental/std", "sp-std/std", "sp-storage/std"] diff --git a/substrate/primitives/genesis-builder/Cargo.toml b/substrate/primitives/genesis-builder/Cargo.toml index cf7ce99571156ed3b1169e62d956a7bbd3407c2a..b376055d605f213efcb1c32f89162803746d8154 100644 --- a/substrate/primitives/genesis-builder/Cargo.toml +++ b/substrate/primitives/genesis-builder/Cargo.toml @@ -9,15 +9,18 @@ repository.workspace = true description = "Substrate GenesisConfig builder API" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../api", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-api = { path = "../api", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } serde_json = { version = "1.0.108", default-features = false, features = ["alloc"] } [features] -default = [ "std" ] -std = [ "serde_json/std", "sp-api/std", "sp-runtime/std", "sp-std/std" ] +default = ["std"] +std = ["serde_json/std", "sp-api/std", "sp-runtime/std", "sp-std/std"] diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index 4a511c653fd7a07a87117ace4d2607fd25509dea..5c13694ceac1624f7b8860b39dbcd4d7cce48239 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -10,23 +10,26 @@ description = "Provides types and traits for creating and checking inherents." documentation = "https://docs.rs/sp-inherents" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.57", optional = true } +async-trait = { version = "0.1.74", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" thiserror = { version = "1.0.48", optional = true } -sp-runtime = { path = "../runtime", default-features = false, optional = true} -sp-std = { path = "../std", default-features = false} +sp-runtime = { path = "../runtime", default-features = false, optional = true } +sp-std = { path = "../std", default-features = false } [dev-dependencies] futures = "0.3.21" [features] -default = [ "std" ] +default = ["std"] std = [ "async-trait", "codec/std", diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index 445104b736e0a08121fff098161b4569f5104b2b..47de957e6bf9eadc375c4f06f4c1beee9843a462 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -11,6 +11,9 @@ documentation = "https://docs.rs/sp-io" readme = "README.md" build = "build.rs" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,28 +21,28 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { version = "1.1.0", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bytes"] } -sp-core = { path = "../core", default-features = false} -sp-keystore = { path = "../keystore", default-features = false, optional = true} -sp-std = { path = "../std", default-features = false} +sp-core = { path = "../core", default-features = false } +sp-keystore = { path = "../keystore", default-features = false, optional = true } +sp-std = { path = "../std", default-features = false } libsecp256k1 = { version = "0.7", optional = true } -sp-state-machine = { path = "../state-machine", default-features = false, optional = true} -sp-runtime-interface = { path = "../runtime-interface", default-features = false} -sp-trie = { path = "../trie", default-features = false, optional = true} -sp-externalities = { path = "../externalities", default-features = false} -sp-tracing = { path = "../tracing", default-features = false} +sp-state-machine = { path = "../state-machine", default-features = false, optional = true } +sp-runtime-interface = { path = "../runtime-interface", default-features = false } +sp-trie = { path = "../trie", default-features = false, optional = true } +sp-externalities = { path = "../externalities", default-features = false } +sp-tracing = { path = "../tracing", default-features = false } log = { version = "0.4.17", optional = true } -secp256k1 = { version = "0.24.0", features = ["recovery", "global-context"], optional = true } +secp256k1 = { version = "0.28.0", features = ["global-context", "recovery"], optional = true } tracing = { version = "0.1.29", default-features = false } -tracing-core = { version = "0.1.28", default-features = false} +tracing-core = { version = "0.1.32", default-features = false } # Required for backwards compatibility reason, but only used for verifying when `UseDalekExt` is set. -ed25519-dalek = { version = "2.0", default-features = false, optional = true } +ed25519-dalek = { version = "2.1", default-features = false, optional = true } [build-dependencies] rustversion = "1.0.6" [features] -default = [ "std" ] +default = ["std"] std = [ "bytes/std", "codec/std", @@ -60,7 +63,7 @@ std = [ "tracing/std", ] -with-tracing = [ "sp-tracing/with-tracing" ] +with-tracing = ["sp-tracing/with-tracing"] # These two features are used for `no_std` builds for the environments which already provides # `#[panic_handler]`, `#[alloc_error_handler]` and `#[global_allocator]`. @@ -92,9 +95,9 @@ improved_panic_error_reporting = [] # This feature adds BLS crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bls-experimental = [ "sp-keystore/bls-experimental" ] +bls-experimental = ["sp-keystore/bls-experimental"] # This feature adds Bandersnatch crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bandersnatch-experimental = [ "sp-keystore/bandersnatch-experimental" ] +bandersnatch-experimental = ["sp-keystore/bandersnatch-experimental"] diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index c4182d6ab3a00155584868cc32845160655cea5f..a300152ee66d1288de4633918d08590b483d6423 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -1139,7 +1139,7 @@ pub trait Crypto { .map_err(|_| EcdsaVerifyError::BadV)?; let sig = RecoverableSignature::from_compact(&sig[..64], rid) .map_err(|_| EcdsaVerifyError::BadRS)?; - let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed"); + let msg = Message::from_digest_slice(msg).expect("Message is 32 bytes; qed"); let pubkey = SECP256K1 .recover_ecdsa(&msg, &sig) .map_err(|_| EcdsaVerifyError::BadSignature)?; @@ -1185,7 +1185,7 @@ pub trait Crypto { .map_err(|_| EcdsaVerifyError::BadV)?; let sig = RecoverableSignature::from_compact(&sig[..64], rid) .map_err(|_| EcdsaVerifyError::BadRS)?; - let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed"); + let msg = Message::from_digest_slice(msg).expect("Message is 32 bytes; qed"); let pubkey = SECP256K1 .recover_ecdsa(&msg, &sig) .map_err(|_| EcdsaVerifyError::BadSignature)?; diff --git a/substrate/primitives/keyring/Cargo.toml b/substrate/primitives/keyring/Cargo.toml index 1ab78eeed453c83d157a0685d3cda452bb2fb80f..80d773b452aed4ffa41501640b723230f5ef6f8a 100644 --- a/substrate/primitives/keyring/Cargo.toml +++ b/substrate/primitives/keyring/Cargo.toml @@ -10,11 +10,13 @@ description = "Keyring support code for the runtime. A set of test accounts." documentation = "https://docs.rs/sp-keyring" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -lazy_static = "1.4.0" strum = { version = "0.24.1", features = ["derive"], default-features = false } sp-core = { path = "../core" } sp-runtime = { path = "../runtime" } @@ -23,4 +25,4 @@ sp-runtime = { path = "../runtime" } # This feature adds Bandersnatch crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bandersnatch-experimental = [ "sp-core/bandersnatch-experimental" ] +bandersnatch-experimental = ["sp-core/bandersnatch-experimental"] diff --git a/substrate/primitives/keyring/src/bandersnatch.rs b/substrate/primitives/keyring/src/bandersnatch.rs index 8de6786a6fbf6384de2308e420201c45a31109f6..eb60f85632725ca9efbac46c63222fc71609d74f 100644 --- a/substrate/primitives/keyring/src/bandersnatch.rs +++ b/substrate/primitives/keyring/src/bandersnatch.rs @@ -21,12 +21,9 @@ pub use sp_core::bandersnatch; use sp_core::{ bandersnatch::{Pair, Public, Signature}, crypto::UncheckedFrom, - ByteArray, Pair as PairT, + hex2array, ByteArray, Pair as PairT, }; -use lazy_static::lazy_static; -use std::{collections::HashMap, ops::Deref, sync::Mutex}; - /// Set of test accounts. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] pub enum Keyring { @@ -74,7 +71,7 @@ impl Keyring { } pub fn public(self) -> Public { - self.pair().public() + Public::from(self) } pub fn to_seed(self) -> String { @@ -129,20 +126,9 @@ impl std::str::FromStr for Keyring { } } -lazy_static! { - static ref PRIVATE_KEYS: Mutex> = - Mutex::new(Keyring::iter().map(|who| (who, who.pair())).collect()); - static ref PUBLIC_KEYS: HashMap = PRIVATE_KEYS - .lock() - .unwrap() - .iter() - .map(|(&who, pair)| (who, pair.public())) - .collect(); -} - impl From for Public { fn from(k: Keyring) -> Self { - *(*PUBLIC_KEYS).get(&k).unwrap() + Public::unchecked_from(<[u8; PUBLIC_RAW_LEN]>::from(k)) } } @@ -154,32 +140,24 @@ impl From for Pair { impl From for [u8; PUBLIC_RAW_LEN] { fn from(k: Keyring) -> Self { - *(*PUBLIC_KEYS).get(&k).unwrap().as_ref() - } -} - -impl From for &'static [u8; PUBLIC_RAW_LEN] { - fn from(k: Keyring) -> Self { - PUBLIC_KEYS.get(&k).unwrap().as_ref() - } -} - -impl AsRef<[u8; PUBLIC_RAW_LEN]> for Keyring { - fn as_ref(&self) -> &[u8; PUBLIC_RAW_LEN] { - PUBLIC_KEYS.get(self).unwrap().as_ref() - } -} - -impl AsRef for Keyring { - fn as_ref(&self) -> &Public { - PUBLIC_KEYS.get(self).unwrap() - } -} - -impl Deref for Keyring { - type Target = [u8; PUBLIC_RAW_LEN]; - fn deref(&self) -> &[u8; PUBLIC_RAW_LEN] { - PUBLIC_KEYS.get(self).unwrap().as_ref() + match k { + Keyring::Alice => + hex2array!("9c8af77d3a4e3f6f076853922985b9e6724fc9675329087f47aff1ceaaae772180"), + Keyring::Bob => + hex2array!("1abfbb76dc8374a1a6d93d59a5c81f07c18835f4681a6258aa0f514d363bff4780"), + Keyring::Charlie => + hex2array!("0f4a9990aca3d39a7cd8bf187e2e81a9ea6f9cedb2db405f2fffff384c5dd02680"), + Keyring::Dave => + hex2array!("bd7a87d4dfa89926a408b5acbed554ae3b053fa3532531053295cbabf07d337000"), + Keyring::Eve => + hex2array!("f992d5b8eac8fc004d521bee6edc1174cfa7fae3a1baec8262511ee351f9f85e00"), + Keyring::Ferdie => + hex2array!("1ce2613e89bc5c8e358aad884099cfb576a61176f2f9968cd0d486a04457245180"), + Keyring::One => + hex2array!("a29e03ac273e521274d8e501a6242abd2ab393d7e197221a9113bdf8e2e5b34d00"), + Keyring::Two => + hex2array!("f968d47e819ddb18a9d0f2ebd16501680b1a3f07ee375c6f81310e5f99a04f4d00"), + } } } @@ -206,4 +184,9 @@ mod tests { &Keyring::Bob.public(), )); } + #[test] + fn verify_static_public_keys() { + assert!(Keyring::iter() + .all(|k| { k.pair().public().as_ref() == <[u8; PUBLIC_RAW_LEN]>::from(k) })); + } } diff --git a/substrate/primitives/keyring/src/ed25519.rs b/substrate/primitives/keyring/src/ed25519.rs index 3060bfb1ad9870f8f7a6af51ed57e634931127e8..ade42b294940213664d0553a4e4ec537ce553f15 100644 --- a/substrate/primitives/keyring/src/ed25519.rs +++ b/substrate/primitives/keyring/src/ed25519.rs @@ -17,14 +17,12 @@ //! Support code for the runtime. A set of test accounts. -use lazy_static::lazy_static; pub use sp_core::ed25519; use sp_core::{ ed25519::{Pair, Public, Signature}, - ByteArray, Pair as PairT, H256, + hex2array, ByteArray, Pair as PairT, H256, }; use sp_runtime::AccountId32; -use std::{collections::HashMap, ops::Deref}; /// Set of test accounts. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] @@ -93,7 +91,7 @@ impl Keyring { } pub fn public(self) -> Public { - self.pair().public() + Public::from(self) } pub fn to_seed(self) -> String { @@ -128,16 +126,9 @@ impl From for sp_runtime::MultiSigner { } } -lazy_static! { - static ref PRIVATE_KEYS: HashMap = - Keyring::iter().map(|i| (i, i.pair())).collect(); - static ref PUBLIC_KEYS: HashMap = - PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect(); -} - impl From for Public { fn from(k: Keyring) -> Self { - *(*PUBLIC_KEYS).get(&k).unwrap() + Public::from_raw(k.into()) } } @@ -155,38 +146,42 @@ impl From for Pair { impl From for [u8; 32] { fn from(k: Keyring) -> Self { - *(*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() + match k { + Keyring::Alice => + hex2array!("88dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee"), + Keyring::Bob => + hex2array!("d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae69"), + Keyring::Charlie => + hex2array!("439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f"), + Keyring::Dave => + hex2array!("5e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d9"), + Keyring::Eve => + hex2array!("1dfe3e22cc0d45c70779c1095f7489a8ef3cf52d62fbd8c2fa38c9f1723502b5"), + Keyring::Ferdie => + hex2array!("568cb4a574c6d178feb39c27dfc8b3f789e5f5423e19c71633c748b9acf086b5"), + Keyring::AliceStash => + hex2array!("451781cd0c5504504f69ceec484cc66e4c22a2b6a9d20fb1a426d91ad074a2a8"), + Keyring::BobStash => + hex2array!("292684abbb28def63807c5f6e84e9e8689769eb37b1ab130d79dbfbf1b9a0d44"), + Keyring::CharlieStash => + hex2array!("dd6a6118b6c11c9c9e5a4f34ed3d545e2c74190f90365c60c230fa82e9423bb9"), + Keyring::DaveStash => + hex2array!("1d0432d75331ab299065bee79cdb1bdc2497c597a3087b4d955c67e3c000c1e2"), + Keyring::EveStash => + hex2array!("c833bdd2e1a7a18acc1c11f8596e2e697bb9b42d6b6051e474091a1d43a294d7"), + Keyring::FerdieStash => + hex2array!("199d749dbf4b8135cb1f3c8fd697a390fc0679881a8a110c1d06375b3b62cd09"), + Keyring::One => + hex2array!("16f97016bbea8f7b45ae6757b49efc1080accc175d8f018f9ba719b60b0815e4"), + Keyring::Two => + hex2array!("5079bcd20fd97d7d2f752c4607012600b401950260a91821f73e692071c82bf5"), + } } } impl From for H256 { fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref().into() - } -} - -impl From for &'static [u8; 32] { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() - } -} - -impl AsRef<[u8; 32]> for Keyring { - fn as_ref(&self) -> &[u8; 32] { - (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() - } -} - -impl AsRef for Keyring { - fn as_ref(&self) -> &Public { - (*PUBLIC_KEYS).get(self).unwrap() - } -} - -impl Deref for Keyring { - type Target = [u8; 32]; - fn deref(&self) -> &[u8; 32] { - (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() + k.into() } } @@ -213,4 +208,9 @@ mod tests { &Keyring::Bob.public(), )); } + + #[test] + fn verify_static_public_keys() { + assert!(Keyring::iter().all(|k| { k.pair().public().as_ref() == <[u8; 32]>::from(k) })); + } } diff --git a/substrate/primitives/keyring/src/sr25519.rs b/substrate/primitives/keyring/src/sr25519.rs index 914a66b4d837c94b1073d10cfc92187402597700..1c2a2526efb1eccb16cb0696f89cb3fc7583357b 100644 --- a/substrate/primitives/keyring/src/sr25519.rs +++ b/substrate/primitives/keyring/src/sr25519.rs @@ -17,14 +17,13 @@ //! Support code for the runtime. A set of test accounts. -use lazy_static::lazy_static; pub use sp_core::sr25519; use sp_core::{ + hex2array, sr25519::{Pair, Public, Signature}, ByteArray, Pair as PairT, H256, }; use sp_runtime::AccountId32; -use std::{collections::HashMap, ops::Deref}; /// Set of test accounts. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] @@ -93,7 +92,7 @@ impl Keyring { } pub fn public(self) -> Public { - self.pair().public() + Public::from(self) } pub fn to_seed(self) -> String { @@ -165,13 +164,6 @@ impl std::str::FromStr for Keyring { } } -lazy_static! { - static ref PRIVATE_KEYS: HashMap = - Keyring::iter().map(|i| (i, i.pair())).collect(); - static ref PUBLIC_KEYS: HashMap = - PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect(); -} - impl From for AccountId32 { fn from(k: Keyring) -> Self { k.to_account_id() @@ -180,7 +172,7 @@ impl From for AccountId32 { impl From for Public { fn from(k: Keyring) -> Self { - *(*PUBLIC_KEYS).get(&k).unwrap() + Public::from_raw(k.into()) } } @@ -192,38 +184,42 @@ impl From for Pair { impl From for [u8; 32] { fn from(k: Keyring) -> Self { - *(*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() + match k { + Keyring::Alice => + hex2array!("d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"), + Keyring::Bob => + hex2array!("8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48"), + Keyring::Charlie => + hex2array!("90b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe22"), + Keyring::Dave => + hex2array!("306721211d5404bd9da88e0204360a1a9ab8b87c66c1bc2fcdd37f3c2222cc20"), + Keyring::Eve => + hex2array!("e659a7a1628cdd93febc04a4e0646ea20e9f5f0ce097d9a05290d4a9e054df4e"), + Keyring::Ferdie => + hex2array!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c"), + Keyring::AliceStash => + hex2array!("be5ddb1579b72e84524fc29e78609e3caf42e85aa118ebfe0b0ad404b5bdd25f"), + Keyring::BobStash => + hex2array!("fe65717dad0447d715f660a0a58411de509b42e6efb8375f562f58a554d5860e"), + Keyring::CharlieStash => + hex2array!("1e07379407fecc4b89eb7dbd287c2c781cfb1907a96947a3eb18e4f8e7198625"), + Keyring::DaveStash => + hex2array!("e860f1b1c7227f7c22602f53f15af80747814dffd839719731ee3bba6edc126c"), + Keyring::EveStash => + hex2array!("8ac59e11963af19174d0b94d5d78041c233f55d2e19324665bafdfb62925af2d"), + Keyring::FerdieStash => + hex2array!("101191192fc877c24d725b337120fa3edc63d227bbc92705db1e2cb65f56981a"), + Keyring::One => + hex2array!("ac859f8a216eeb1b320b4c76d118da3d7407fa523484d0a980126d3b4d0d220a"), + Keyring::Two => + hex2array!("1254f7017f0b8347ce7ab14f96d818802e7e9e0c0d1b7c9acb3c726b080e7a03"), + } } } impl From for H256 { fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref().into() - } -} - -impl From for &'static [u8; 32] { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() - } -} - -impl AsRef<[u8; 32]> for Keyring { - fn as_ref(&self) -> &[u8; 32] { - (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() - } -} - -impl AsRef for Keyring { - fn as_ref(&self) -> &Public { - (*PUBLIC_KEYS).get(self).unwrap() - } -} - -impl Deref for Keyring { - type Target = [u8; 32]; - fn deref(&self) -> &[u8; 32] { - (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() + k.into() } } @@ -250,4 +246,8 @@ mod tests { &Keyring::Bob.public(), )); } + #[test] + fn verify_static_public_keys() { + assert!(Keyring::iter().all(|k| { k.pair().public().as_ref() == <[u8; 32]>::from(k) })); + } } diff --git a/substrate/primitives/keystore/Cargo.toml b/substrate/primitives/keystore/Cargo.toml index ff7c27bf5654839a37a7ab9a02081c68479692f4..d60f5d6c568c50c783554beb2756610d7cae9855 100644 --- a/substrate/primitives/keystore/Cargo.toml +++ b/substrate/primitives/keystore/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Keystore primitives." documentation = "https://docs.rs/sp-core" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,23 +19,23 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } parking_lot = { version = "0.12.1", default-features = false } thiserror = "1.0" -sp-core = { path = "../core", default-features = false} -sp-externalities = { path = "../externalities", default-features = false} +sp-core = { path = "../core", default-features = false } +sp-externalities = { path = "../externalities", default-features = false } [dev-dependencies] -rand = "0.7.2" +rand = "0.8.5" rand_chacha = "0.2.2" [features] -default = [ "std" ] -std = [ "codec/std", "sp-core/std", "sp-externalities/std" ] +default = ["std"] +std = ["codec/std", "sp-core/std", "sp-externalities/std"] # This feature adds BLS crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bls-experimental = [ "sp-core/bls-experimental" ] +bls-experimental = ["sp-core/bls-experimental"] # This feature adds Bandersnatch crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bandersnatch-experimental = [ "sp-core/bandersnatch-experimental" ] +bandersnatch-experimental = ["sp-core/bandersnatch-experimental"] diff --git a/substrate/primitives/keystore/src/lib.rs b/substrate/primitives/keystore/src/lib.rs index e415080779cf43774eabb933a9778f1c94b7a489..07c4e2d5fd1dc4b8cc903d34154dbc51a050426c 100644 --- a/substrate/primitives/keystore/src/lib.rs +++ b/substrate/primitives/keystore/src/lib.rs @@ -92,19 +92,19 @@ pub trait Keystore: Send + Sync { data: &sr25519::vrf::VrfSignData, ) -> Result, Error>; - /// Generate an sr25519 VRF output for a given input data. + /// Generate an sr25519 VRF pre-output for a given input data. /// /// Receives [`KeyTypeId`] and an [`sr25519::Public`] key to be able to map /// them to a private key that exists in the keystore. /// /// Returns `None` if the given `key_type` and `public` combination doesn't /// exist in the keystore or an `Err` when something failed. - fn sr25519_vrf_output( + fn sr25519_vrf_pre_output( &self, key_type: KeyTypeId, public: &sr25519::Public, input: &sr25519::vrf::VrfInput, - ) -> Result, Error>; + ) -> Result, Error>; /// Returns all ed25519 public keys for the given key type. fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec; @@ -223,7 +223,7 @@ pub trait Keystore: Send + Sync { input: &bandersnatch::vrf::VrfSignData, ) -> Result, Error>; - /// Generate a bandersnatch VRF (pre)output for a given input data. + /// Generate a bandersnatch VRF pre-output for a given input data. /// /// Receives [`KeyTypeId`] and an [`bandersnatch::Public`] key to be able to map /// them to a private key that exists in the keystore. @@ -231,12 +231,12 @@ pub trait Keystore: Send + Sync { /// Returns `None` if the given `key_type` and `public` combination doesn't /// exist in the keystore or an `Err` when something failed. #[cfg(feature = "bandersnatch-experimental")] - fn bandersnatch_vrf_output( + fn bandersnatch_vrf_pre_output( &self, key_type: KeyTypeId, public: &bandersnatch::Public, input: &bandersnatch::vrf::VrfInput, - ) -> Result, Error>; + ) -> Result, Error>; /// Generate a bandersnatch ring-VRF signature for the given data. /// @@ -474,13 +474,13 @@ impl Keystore for Arc { (**self).sr25519_vrf_sign(key_type, public, data) } - fn sr25519_vrf_output( + fn sr25519_vrf_pre_output( &self, key_type: KeyTypeId, public: &sr25519::Public, input: &sr25519::vrf::VrfInput, - ) -> Result, Error> { - (**self).sr25519_vrf_output(key_type, public, input) + ) -> Result, Error> { + (**self).sr25519_vrf_pre_output(key_type, public, input) } fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec { @@ -569,13 +569,13 @@ impl Keystore for Arc { } #[cfg(feature = "bandersnatch-experimental")] - fn bandersnatch_vrf_output( + fn bandersnatch_vrf_pre_output( &self, key_type: KeyTypeId, public: &bandersnatch::Public, input: &bandersnatch::vrf::VrfInput, - ) -> Result, Error> { - (**self).bandersnatch_vrf_output(key_type, public, input) + ) -> Result, Error> { + (**self).bandersnatch_vrf_pre_output(key_type, public, input) } #[cfg(feature = "bandersnatch-experimental")] diff --git a/substrate/primitives/keystore/src/testing.rs b/substrate/primitives/keystore/src/testing.rs index 08110e8e497919db42d52d3d6e75194e0d4e3130..585efba02831a442bda511844811a862442cc87d 100644 --- a/substrate/primitives/keystore/src/testing.rs +++ b/substrate/primitives/keystore/src/testing.rs @@ -113,14 +113,14 @@ impl MemoryKeystore { Ok(sig) } - fn vrf_output( + fn vrf_pre_output( &self, key_type: KeyTypeId, public: &T::Public, input: &T::VrfInput, - ) -> Result, Error> { - let preout = self.pair::(key_type, public).map(|pair| pair.vrf_output(input)); - Ok(preout) + ) -> Result, Error> { + let pre_output = self.pair::(key_type, public).map(|pair| pair.vrf_pre_output(input)); + Ok(pre_output) } } @@ -155,13 +155,13 @@ impl Keystore for MemoryKeystore { self.vrf_sign::(key_type, public, data) } - fn sr25519_vrf_output( + fn sr25519_vrf_pre_output( &self, key_type: KeyTypeId, public: &sr25519::Public, input: &sr25519::vrf::VrfInput, - ) -> Result, Error> { - self.vrf_output::(key_type, public, input) + ) -> Result, Error> { + self.vrf_pre_output::(key_type, public, input) } fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec { @@ -265,13 +265,13 @@ impl Keystore for MemoryKeystore { } #[cfg(feature = "bandersnatch-experimental")] - fn bandersnatch_vrf_output( + fn bandersnatch_vrf_pre_output( &self, key_type: KeyTypeId, public: &bandersnatch::Public, input: &bandersnatch::vrf::VrfInput, - ) -> Result, Error> { - self.vrf_output::(key_type, public, input) + ) -> Result, Error> { + self.vrf_pre_output::(key_type, public, input) } #[cfg(feature = "bls-experimental")] @@ -443,7 +443,7 @@ mod tests { } #[test] - fn sr25519_vrf_output() { + fn sr25519_vrf_pre_output() { let store = MemoryKeystore::new(); let secret_uri = "//Alice"; @@ -458,16 +458,17 @@ mod tests { ], ); - let result = store.sr25519_vrf_output(SR25519, &pair.public(), &input); + let result = store.sr25519_vrf_pre_output(SR25519, &pair.public(), &input); assert!(result.unwrap().is_none()); store .insert(SR25519, secret_uri, pair.public().as_ref()) .expect("Inserts unknown key"); - let preout = store.sr25519_vrf_output(SR25519, &pair.public(), &input).unwrap().unwrap(); + let pre_output = + store.sr25519_vrf_pre_output(SR25519, &pair.public(), &input).unwrap().unwrap(); - let result = preout.make_bytes::<32>(b"rand", &input, &pair.public()); + let result = pre_output.make_bytes::<32>(b"rand", &input, &pair.public()); assert!(result.is_ok()); } @@ -525,7 +526,7 @@ mod tests { let store = MemoryKeystore::new(); - let ring_ctx = bandersnatch::ring_vrf::RingContext::new_testing(); + let ring_ctx = bandersnatch::ring_vrf::RingContext::<1024>::new_testing(); let mut pks: Vec<_> = (0..16) .map(|i| bandersnatch::Pair::from_seed(&[i as u8; 32]).public()) diff --git a/substrate/primitives/maybe-compressed-blob/Cargo.toml b/substrate/primitives/maybe-compressed-blob/Cargo.toml index c6fa7103672fc17097a9c446ab01262612034efc..86f73626c0a0bfdbcac66e8b5e6019602713ad03 100644 --- a/substrate/primitives/maybe-compressed-blob/Cargo.toml +++ b/substrate/primitives/maybe-compressed-blob/Cargo.toml @@ -10,6 +10,9 @@ description = "Handling of blobs, usually Wasm code, which may be compresed" documentation = "https://docs.rs/sp-maybe-compressed-blob" readme = "README.md" +[lints] +workspace = true + [dependencies] thiserror = "1.0" zstd = { version = "0.12.4", default-features = false } diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index 5216765825ff71f4b0320df14e372ca851113fe8..dec55a5c8f30d14349cb0a16072358b54bbc2824 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "Merkle Mountain Range primitives." +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,19 +19,19 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } -serde = { version = "1.0.188", features = ["derive", "alloc"], default-features = false, optional = true } -sp-api = { path = "../api", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-debug-derive = { path = "../debug-derive", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +serde = { version = "1.0.193", features = ["alloc", "derive"], default-features = false, optional = true } +sp-api = { path = "../api", default-features = false } +sp-core = { path = "../core", default-features = false } +sp-debug-derive = { path = "../debug-derive", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } thiserror = "1.0" [dev-dependencies] array-bytes = "6.1" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "log/std", @@ -43,4 +46,4 @@ std = [ ] # Serde support without relying on std features. -serde = [ "dep:serde", "scale-info/serde", "sp-core/serde", "sp-runtime/serde" ] +serde = ["dep:serde", "scale-info/serde", "sp-core/serde", "sp-runtime/serde"] diff --git a/substrate/primitives/metadata-ir/Cargo.toml b/substrate/primitives/metadata-ir/Cargo.toml index 77c21b920f2f8e2f211271098e7479b8bf62f8f2..0dc496bab53130138933996c29a6022298779a59 100644 --- a/substrate/primitives/metadata-ir/Cargo.toml +++ b/substrate/primitives/metadata-ir/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Intermediate representation of the runtime metadata." documentation = "https://docs.rs/sp-metadata-ir" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,8 +19,8 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../std", default-features = false} +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "frame-metadata/std", "scale-info/std", "sp-std/std" ] +default = ["std"] +std = ["codec/std", "frame-metadata/std", "scale-info/std", "sp-std/std"] diff --git a/substrate/primitives/mixnet/Cargo.toml b/substrate/primitives/mixnet/Cargo.toml index bc6878086cf5eae3f355b0741c356ee57c4fd4c6..6ea7a6cbe8c436359a763effe7075e7099787628 100644 --- a/substrate/primitives/mixnet/Cargo.toml +++ b/substrate/primitives/mixnet/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -20,7 +23,7 @@ sp-application-crypto = { default-features = false, path = "../application-crypt sp-std = { default-features = false, path = "../std" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", diff --git a/substrate/primitives/npos-elections/Cargo.toml b/substrate/primitives/npos-elections/Cargo.toml index 90418e561f217aeaf28c7d9023839715e8a22bf9..dcd03e7e5e01880b652c56dbe1d187c43f5f1bb3 100644 --- a/substrate/primitives/npos-elections/Cargo.toml +++ b/substrate/primitives/npos-elections/Cargo.toml @@ -9,24 +9,27 @@ repository.workspace = true description = "NPoS election algorithm primitives" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } -sp-arithmetic = { path = "../arithmetic", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } +sp-arithmetic = { path = "../arithmetic", default-features = false } +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } [dev-dependencies] rand = "0.8.5" substrate-test-utils = { path = "../../test-utils" } [features] -default = [ "std" ] +default = ["std"] bench = [] std = [ "codec/std", diff --git a/substrate/primitives/npos-elections/fuzzer/Cargo.toml b/substrate/primitives/npos-elections/fuzzer/Cargo.toml index 5e75f926f87cae29e7ed69621d3222ce3862680a..37eaeea2b822c659177f083465d08913bfdbbf68 100644 --- a/substrate/primitives/npos-elections/fuzzer/Cargo.toml +++ b/substrate/primitives/npos-elections/fuzzer/Cargo.toml @@ -10,13 +10,16 @@ description = "Fuzzer for phragmén implementation." documentation = "https://docs.rs/sp-npos-elections-fuzzer" publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } honggfuzz = "0.5" -rand = { version = "0.8", features = ["std", "small_rng"] } +rand = { version = "0.8", features = ["small_rng", "std"] } sp-npos-elections = { path = ".." } sp-runtime = { path = "../../runtime" } diff --git a/substrate/primitives/npos-elections/src/lib.rs b/substrate/primitives/npos-elections/src/lib.rs index 62ae0502114823c3e701c39f727d1ce8aca39e16..8d741f4130d9ca724dc6a7566edf8ed6a483e7fa 100644 --- a/substrate/primitives/npos-elections/src/lib.rs +++ b/substrate/primitives/npos-elections/src/lib.rs @@ -22,9 +22,8 @@ //! - [`ghragmms`](phragmms::phragmms()): Implements a hybrid approach inspired by Phragmén which is //! executed faster but it can achieve a constant factor approximation of the maximin problem, //! similar to that of the MMS algorithm. -//! - [`balance`](balancing::balance): Implements the star balancing algorithm. This iterative -//! process can push a solution toward being more "balanced", which in turn can increase its -//! score. +//! - [`balance`]: Implements the star balancing algorithm. This iterative process can push a +//! solution toward being more "balanced", which in turn can increase its score. //! //! ### Terminology //! diff --git a/substrate/primitives/offchain/Cargo.toml b/substrate/primitives/offchain/Cargo.toml index 5f8821b43c7edac0cb3bd07948bf568ae0ea53da..19d66ae31e9fe6941937a08b50e8dd8fdc762a56 100644 --- a/substrate/primitives/offchain/Cargo.toml +++ b/substrate/primitives/offchain/Cargo.toml @@ -9,14 +9,17 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../api", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} +sp-api = { path = "../api", default-features = false } +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } [features] -default = [ "std" ] -std = [ "sp-api/std", "sp-core/std", "sp-runtime/std" ] +default = ["std"] +std = ["sp-api/std", "sp-core/std", "sp-runtime/std"] diff --git a/substrate/primitives/panic-handler/Cargo.toml b/substrate/primitives/panic-handler/Cargo.toml index 428062757c1556119c5dfe15cf7e1817f842bdd5..a0df527f56e0818c3eb78309b8171675538d15f6 100644 --- a/substrate/primitives/panic-handler/Cargo.toml +++ b/substrate/primitives/panic-handler/Cargo.toml @@ -10,6 +10,9 @@ description = "Custom panic hook with bug report link" documentation = "https://docs.rs/sp-panic-handler" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/primitives/rpc/Cargo.toml b/substrate/primitives/rpc/Cargo.toml index 77bdcc4f89a1d7a8bfc1666884f4d8804ce497a1..a542b65cdc93dfe79f5a77efad0eac19d6b26a2e 100644 --- a/substrate/primitives/rpc/Cargo.toml +++ b/substrate/primitives/rpc/Cargo.toml @@ -9,12 +9,15 @@ repository.workspace = true description = "Substrate RPC primitives and utilities." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] rustc-hash = "1.1.0" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } sp-core = { path = "../core" } [dev-dependencies] diff --git a/substrate/primitives/runtime-interface/Cargo.toml b/substrate/primitives/runtime-interface/Cargo.toml index 69a0d112a1621196394c374287ce9474f5934b58..a4c8457b598efa713c1b7a6a2444cd8e16edeacc 100644 --- a/substrate/primitives/runtime-interface/Cargo.toml +++ b/substrate/primitives/runtime-interface/Cargo.toml @@ -10,20 +10,23 @@ description = "Substrate runtime interface" documentation = "https://docs.rs/sp-runtime-interface/" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { version = "1.1.0", default-features = false } sp-wasm-interface = { path = "../wasm-interface", default-features = false } -sp-std = { path = "../std", default-features = false} -sp-tracing = { path = "../tracing", default-features = false} +sp-std = { path = "../std", default-features = false } +sp-tracing = { path = "../tracing", default-features = false } sp-runtime-interface-proc-macro = { path = "proc-macro" } -sp-externalities = { path = "../externalities", default-features = false} +sp-externalities = { path = "../externalities", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bytes"] } static_assertions = "1.0.0" primitive-types = { version = "0.12.0", default-features = false } -sp-storage = { path = "../storage", default-features = false} +sp-storage = { path = "../storage", default-features = false } impl-trait-for-tuples = "0.2.2" [dev-dependencies] @@ -35,7 +38,7 @@ rustversion = "1.0.6" trybuild = "1.0.74" [features] -default = [ "std" ] +default = ["std"] std = [ "bytes/std", "codec/std", diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml index fbc49785ae97008e66fcc4b84a7111e998b8dc61..190ccd51ecf07523e5c003e3714876c4d5255000 100644 --- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "This crate provides procedural macros for usage within the context of the Substrate runtime interface." documentation = "https://docs.rs/sp-runtime-interface-proc-macro" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,7 +20,8 @@ proc-macro = true [dependencies] Inflector = "0.11.4" -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.1" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.38", features = ["full", "visit", "fold", "extra-traits"] } +expander = "2.0.0" +syn = { version = "2.0.41", features = ["extra-traits", "fold", "full", "visit"] } diff --git a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs index 008d69b32100807d8fefc945a73722eafdefd2b8..d0cc9e7b96bac609c6e534ed00d7dda0d87ae379 100644 --- a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs +++ b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs @@ -68,5 +68,11 @@ pub fn runtime_interface_impl( } }; + let res = expander::Expander::new("runtime_interface") + .dry(std::env::var("EXPAND_MACROS").is_err()) + .verbose(true) + .write_to_out_dir(res) + .expect("Does not fail because of IO in OUT_DIR; qed"); + Ok(res) } diff --git a/substrate/primitives/runtime-interface/proc-macro/src/utils.rs b/substrate/primitives/runtime-interface/proc-macro/src/utils.rs index 9818fd6842a639191e1f5e31398646984662f0a7..7d97f9f3e1ca09c7540861f8b0b859d297f72030 100644 --- a/substrate/primitives/runtime-interface/proc-macro/src/utils.rs +++ b/substrate/primitives/runtime-interface/proc-macro/src/utils.rs @@ -89,7 +89,7 @@ struct RuntimeInterfaceFunctionSet { impl RuntimeInterfaceFunctionSet { fn new(version: VersionAttribute, trait_item: &TraitItemFn) -> Result { Ok(Self { - latest_version_to_call: version.is_callable().then(|| version.version), + latest_version_to_call: version.is_callable().then_some(version.version), versions: BTreeMap::from([( version.version, RuntimeInterfaceFunction::new(trait_item)?, diff --git a/substrate/primitives/runtime-interface/src/pass_by.rs b/substrate/primitives/runtime-interface/src/pass_by.rs index 8d145669adc3cb6a2b733fcac1d41e15d41eb6ab..103e9c16220542d5a9be5d85301bd45f7906e5e9 100644 --- a/substrate/primitives/runtime-interface/src/pass_by.rs +++ b/substrate/primitives/runtime-interface/src/pass_by.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Provides the [`PassBy`](PassBy) trait to simplify the implementation of the +//! Provides the [`PassBy`] trait to simplify the implementation of the //! runtime interface traits for custom types. //! //! [`Codec`], [`Inner`] and [`Enum`] are the provided strategy implementations. diff --git a/substrate/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml b/substrate/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml index 8e06aac851f9e62f82082e92bf6b797b7c919a6c..f663c6d47263b27909510d51eb1860e4caa8d12a 100644 --- a/substrate/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml +++ b/substrate/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml @@ -9,19 +9,22 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../../core", default-features = false} -sp-io = { path = "../../io", default-features = false} -sp-runtime-interface = { path = "..", default-features = false} +sp-core = { path = "../../core", default-features = false } +sp-io = { path = "../../io", default-features = false } +sp-runtime-interface = { path = "..", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "sp-core/std", "sp-io/std", diff --git a/substrate/primitives/runtime-interface/test-wasm/Cargo.toml b/substrate/primitives/runtime-interface/test-wasm/Cargo.toml index 7729f89fa39ab263af141f9a797d90d24937d8f8..ecb3c7f8732dd18fc79918bd55d8b50066b754d4 100644 --- a/substrate/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/substrate/primitives/runtime-interface/test-wasm/Cargo.toml @@ -9,21 +9,24 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { version = "1.1.0", default-features = false } -sp-core = { path = "../../core", default-features = false} -sp-io = { path = "../../io", default-features = false} -sp-runtime-interface = { path = "..", default-features = false} -sp-std = { path = "../../std", default-features = false} +sp-core = { path = "../../core", default-features = false } +sp-io = { path = "../../io", default-features = false } +sp-runtime-interface = { path = "..", default-features = false } +sp-std = { path = "../../std", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "bytes/std", "sp-core/std", diff --git a/substrate/primitives/runtime-interface/test/Cargo.toml b/substrate/primitives/runtime-interface/test/Cargo.toml index feb6a454af1565a874cdec4075da9023a3232560..55d70960989e8888b524a205d8cb0d58fca975d1 100644 --- a/substrate/primitives/runtime-interface/test/Cargo.toml +++ b/substrate/primitives/runtime-interface/test/Cargo.toml @@ -8,12 +8,15 @@ publish = false homepage = "https://substrate.io" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] tracing = "0.1.29" -tracing-core = "0.1.28" +tracing-core = "0.1.32" sc-executor = { path = "../../../client/executor" } sc-executor-common = { path = "../../../client/executor/common" } sp-io = { path = "../../io" } diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index bf6cf93c5f048412342f8176b02d784a2c434b5c..a95efbd6ddd96737b25fb2dc1b330d8abf23e988 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -10,6 +10,9 @@ description = "Runtime Modules shared primitive types." documentation = "https://docs.rs/sp-runtime" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -22,13 +25,16 @@ log = { version = "0.4.17", default-features = false } paste = "1.0" rand = { version = "0.8.5", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } -sp-application-crypto = { path = "../application-crypto", default-features = false} -sp-arithmetic = { path = "../arithmetic", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-io = { path = "../io", default-features = false} -sp-std = { path = "../std", default-features = false} -sp-weights = { path = "../weights", default-features = false} +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } +sp-application-crypto = { path = "../application-crypto", default-features = false } +sp-arithmetic = { path = "../arithmetic", default-features = false } +sp-core = { path = "../core", default-features = false } +sp-io = { path = "../io", default-features = false } +sp-std = { path = "../std", default-features = false } +sp-weights = { path = "../weights", default-features = false } +docify = { version = "0.2.6" } + +simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", rev = "e48b187bcfd5cc75111acd9d241f1bd36604344b" } [dev-dependencies] rand = "0.8.5" @@ -42,7 +48,7 @@ substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } [features] runtime-benchmarks = [] try-runtime = [] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "either/use_std", diff --git a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs index 4b0e017f4517b2948e9e0e45a53efc6fe87c82d6..44325920beee0c1026c4ec7dc68b6943652dfc92 100644 --- a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs @@ -26,9 +26,11 @@ use crate::{ transaction_validity::{TransactionSource, TransactionValidity}, }; -/// Definition of something that the external world might want to say; its -/// existence implies that it has been checked and is good, particularly with -/// regards to the signature. +/// Definition of something that the external world might want to say; its existence implies that it +/// has been checked and is good, particularly with regards to the signature. +/// +/// This is typically passed into [`traits::Applyable::apply`], which should execute +/// [`CheckedExtrinsic::function`], alongside all other bits and bobs. #[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] pub struct CheckedExtrinsic { /// Who this purports to be from and the number of extrinsics have come before diff --git a/substrate/primitives/runtime/src/generic/header.rs b/substrate/primitives/runtime/src/generic/header.rs index 82ab9a61f96d8f26579bf704947b74a206b7ec99..0eeef363a06dc95e999a0277563e1c86f46e7028 100644 --- a/substrate/primitives/runtime/src/generic/header.rs +++ b/substrate/primitives/runtime/src/generic/header.rs @@ -21,16 +21,11 @@ use crate::{ codec::{Codec, Decode, Encode}, generic::Digest, scale_info::TypeInfo, - traits::{ - self, AtLeast32BitUnsigned, Hash as HashT, MaybeDisplay, MaybeFromStr, - MaybeSerializeDeserialize, Member, - }, + traits::{self, AtLeast32BitUnsigned, BlockNumber, Hash as HashT, MaybeDisplay, Member}, }; -use codec::{FullCodec, MaxEncodedLen}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use sp_core::U256; -use sp_std::fmt::Debug; /// Abstraction over a block header for a substrate chain. #[derive(Encode, Decode, PartialEq, Eq, Clone, sp_core::RuntimeDebug, TypeInfo)] @@ -79,20 +74,7 @@ where impl traits::Header for Header where - Number: Member - + MaybeSerializeDeserialize - + MaybeFromStr - + Debug - + Default - + sp_std::hash::Hash - + MaybeDisplay - + AtLeast32BitUnsigned - + FullCodec - + Copy - + MaxEncodedLen - + Into - + TryFrom - + TypeInfo, + Number: BlockNumber, Hash: HashT, { type Number = Number; diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 1cdc0b8e4051b7977b2eea4caa1339b17b4ceb06..6ac381babeea04175c80ab9e8cc798b555a61e1a 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -43,16 +43,37 @@ const EXTRINSIC_FORMAT_VERSION: u8 = 4; /// The `SingaturePayload` of `UncheckedExtrinsic`. type UncheckedSignaturePayload = (Address, Signature, Extra); -/// A extrinsic right from the external world. This is unchecked and so -/// can contain a signature. +/// An extrinsic right from the external world. This is unchecked and so can contain a signature. +/// +/// An extrinsic is formally described as any external data that is originating from the outside of +/// the runtime and fed into the runtime as a part of the block-body. +/// +/// Inherents are special types of extrinsics that are placed into the block by the block-builder. +/// They are unsigned because the assertion is that they are "inherently true" by virtue of getting +/// past all validators. +/// +/// Transactions are all other statements provided by external entities that the chain deems values +/// and decided to include in the block. This value is typically in the form of fee payment, but it +/// could in principle be any other interaction. Transactions are either signed or unsigned. A +/// sensible transaction pool should ensure that only transactions that are worthwhile are +/// considered for block-building. +#[doc = simple_mermaid::mermaid!("../../../../../docs/mermaid/extrinsics.mmd")] +/// This type is by no means enforced within Substrate, but given its genericness, it is highly +/// likely that for most use-cases it will suffice. Thus, the encoding of this type will dictate +/// exactly what bytes should be sent to a runtime to transact with it. +/// +/// This can be checked using [`Checkable`], yielding a [`CheckedExtrinsic`], which is the +/// counterpart of this type after its signature (and other non-negotiable validity checks) have +/// passed. #[derive(PartialEq, Eq, Clone)] pub struct UncheckedExtrinsic where Extra: SignedExtension, { - /// The signature, address, number of extrinsics have come before from - /// the same signer and an era describing the longevity of this transaction, - /// if this is a signed extrinsic. + /// The signature, address, number of extrinsics have come before from the same signer and an + /// era describing the longevity of this transaction, if this is a signed extrinsic. + /// + /// `None` if it is unsigned or an inherent. pub signature: Option>, /// The function that should be called. pub function: Call, @@ -286,6 +307,7 @@ where } } +#[docify::export(unchecked_extrinsic_encode_impl)] impl Encode for UncheckedExtrinsic where Address: Encode, diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index 0e1d4c31fd7126dc267ada440ede3bafe44e219b..ddf92554c83056f192015a26285408b7fdf33695 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -954,6 +954,32 @@ pub fn print(print: impl traits::Printable) { print.print(); } +/// Utility function to declare string literals backed by an array of length N. +/// +/// The input can be shorter than N, in that case the end of the array is padded with zeros. +/// +/// [`str_array`] is useful when converting strings that end up in the storage as fixed size arrays +/// or in const contexts where static data types have strings that could also end up in the storage. +/// +/// # Example +/// +/// ```rust +/// # use sp_runtime::str_array; +/// const MY_STR: [u8; 6] = str_array("data"); +/// assert_eq!(MY_STR, *b"data\0\0"); +/// ``` +pub const fn str_array(s: &str) -> [u8; N] { + debug_assert!(s.len() <= N, "String literal doesn't fit in array"); + let mut i = 0; + let mut arr = [0; N]; + let s = s.as_bytes(); + while i < s.len() { + arr[i] = s[i]; + i += 1; + } + arr +} + /// Describes on what should happen with a storage transaction. pub enum TransactionOutcome { /// Commit the transaction. diff --git a/substrate/primitives/runtime/src/offchain/storage_lock.rs b/substrate/primitives/runtime/src/offchain/storage_lock.rs index 1b795978447df13d037385d9e7dca09a9dd47596..a2da48721e7591909932e58cb20fffbeedb5f88d 100644 --- a/substrate/primitives/runtime/src/offchain/storage_lock.rs +++ b/substrate/primitives/runtime/src/offchain/storage_lock.rs @@ -156,7 +156,7 @@ pub struct BlockAndTimeDeadline { impl Clone for BlockAndTimeDeadline { fn clone(&self) -> Self { - Self { block_number: self.block_number.clone(), timestamp: self.timestamp } + Self { block_number: self.block_number, timestamp: self.timestamp } } } @@ -250,7 +250,7 @@ impl Lockable for BlockAndTime { /// /// A lock that is persisted in the DB and provides the ability to guard against /// concurrent access in an off-chain worker, with a defined expiry deadline -/// based on the concrete [`Lockable`](Lockable) implementation. +/// based on the concrete [`Lockable`] implementation. pub struct StorageLock<'a, L = Time> { // A storage value ref which defines the DB entry representing the lock. value_ref: StorageValueRef<'a>, diff --git a/substrate/primitives/runtime/src/traits.rs b/substrate/primitives/runtime/src/traits.rs index ec79f43cabdc36249f56f1829de6cdb2289f9913..2ac4047a80bc0ee3ab0ec3b41edafaff5395a114 100644 --- a/substrate/primitives/runtime/src/traits.rs +++ b/substrate/primitives/runtime/src/traits.rs @@ -38,7 +38,7 @@ pub use sp_arithmetic::traits::{ EnsureOp, EnsureOpAssign, EnsureSub, EnsureSubAssign, IntegerSquareRoot, One, SaturatedConversion, Saturating, UniqueSaturatedFrom, UniqueSaturatedInto, Zero, }; -use sp_core::{self, storage::StateVersion, Hasher, RuntimeDebug, TypeId}; +use sp_core::{self, storage::StateVersion, Hasher, RuntimeDebug, TypeId, U256}; #[doc(hidden)] pub use sp_core::{ parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, @@ -1149,6 +1149,44 @@ pub trait IsMember { fn is_member(member_id: &MemberId) -> bool; } +/// Super trait with all the attributes for a block number. +pub trait BlockNumber: + Member + + MaybeSerializeDeserialize + + MaybeFromStr + + Debug + + sp_std::hash::Hash + + Copy + + MaybeDisplay + + AtLeast32BitUnsigned + + Into + + TryFrom + + Default + + TypeInfo + + MaxEncodedLen + + FullCodec +{ +} + +impl< + T: Member + + MaybeSerializeDeserialize + + MaybeFromStr + + Debug + + sp_std::hash::Hash + + Copy + + MaybeDisplay + + AtLeast32BitUnsigned + + Into + + TryFrom + + Default + + TypeInfo + + MaxEncodedLen + + FullCodec, + > BlockNumber for T +{ +} + /// Something which fulfills the abstract idea of a Substrate header. It has types for a `Number`, /// a `Hash` and a `Hashing`. It provides access to an `extrinsics_root`, `state_root` and /// `parent_hash`, as well as a `digest` and a block `number`. @@ -1158,18 +1196,7 @@ pub trait Header: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + TypeInfo + 'static { /// Header number. - type Number: Member - + MaybeSerializeDeserialize - + MaybeFromStr - + Debug - + sp_std::hash::Hash - + Copy - + MaybeDisplay - + AtLeast32BitUnsigned - + Default - + TypeInfo - + MaxEncodedLen - + FullCodec; + type Number: BlockNumber; /// Header hash type type Hash: HashOutput; /// Hashing algorithm @@ -2265,7 +2292,15 @@ pub trait BlockIdTo { /// Get current block number pub trait BlockNumberProvider { /// Type of `BlockNumber` to provide. - type BlockNumber: Codec + Clone + Ord + Eq + AtLeast32BitUnsigned; + type BlockNumber: Codec + + Clone + + Ord + + Eq + + AtLeast32BitUnsigned + + TypeInfo + + Debug + + MaxEncodedLen + + Copy; /// Returns the current block number. /// @@ -2293,6 +2328,13 @@ pub trait BlockNumberProvider { fn set_block_number(_block: Self::BlockNumber) {} } +impl BlockNumberProvider for () { + type BlockNumber = u32; + fn current_block_number() -> Self::BlockNumber { + 0 + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml index 4c11762ffb7cf67890108e03fbc9dde05a5576b7..25700210feef2c0078992ace19597b72291625cc 100644 --- a/substrate/primitives/session/Cargo.toml +++ b/substrate/primitives/session/Cargo.toml @@ -9,21 +9,24 @@ repository.workspace = true description = "Primitives for sessions" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-api = { path = "../api", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-runtime = { path = "../runtime", optional = true} -sp-staking = { path = "../staking", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-api = { path = "../api", default-features = false } +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", optional = true } +sp-staking = { path = "../staking", default-features = false } +sp-std = { path = "../std", default-features = false } sp-keystore = { path = "../keystore", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index ef96276a00394eba9297dc5a5305cdf1227bb98b..2c7212651428aafb9cb16df3ea075a844b223cd3 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -9,21 +9,24 @@ repository.workspace = true description = "A crate which contains primitives that are useful for implementation that uses staking approaches in general. Definitions related to sessions, slashing, etc go here." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" -sp-core = { path = "../core", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", @@ -32,4 +35,4 @@ std = [ "sp-runtime/std", "sp-std/std", ] -runtime-benchmarks = [ "sp-runtime/runtime-benchmarks" ] +runtime-benchmarks = ["sp-runtime/runtime-benchmarks"] diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index ec5d9b5ea14e5b930130445c02c35f6a64ee9733..f891a74dbf4d49459a55b198667f141bc256dc5d 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -10,6 +10,9 @@ repository.workspace = true documentation = "https://docs.rs/sp-state-machine" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -22,11 +25,11 @@ rand = { version = "0.8.5", optional = true } smallvec = "1.11.0" thiserror = { version = "1.0.48", optional = true } tracing = { version = "0.1.29", optional = true } -sp-core = { path = "../core", default-features = false} -sp-externalities = { path = "../externalities", default-features = false} -sp-panic-handler = { path = "../panic-handler", optional = true} -sp-std = { path = "../std", default-features = false} -sp-trie = { path = "../trie", default-features = false} +sp-core = { path = "../core", default-features = false } +sp-externalities = { path = "../externalities", default-features = false } +sp-panic-handler = { path = "../panic-handler", optional = true } +sp-std = { path = "../std", default-features = false } +sp-trie = { path = "../trie", default-features = false } trie-db = { version = "0.28.0", default-features = false } [dev-dependencies] @@ -37,7 +40,7 @@ sp-runtime = { path = "../runtime" } assert_matches = "1.5" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "hash-db/std", diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index 0e2b9bfdfffcf01ae06933d5e02ff6842a80cc77..5909a30a814c32686acbb6c369831a6d43649a5d 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -142,7 +142,6 @@ pub use crate::{ mod std_reexport { pub use crate::{ basic::BasicExternalities, - error::{Error, ExecutionError}, in_memory_backend::new_in_mem, read_only::{InspectState, ReadOnlyExternalities}, testing::TestExternalities, @@ -289,7 +288,7 @@ mod execution { let result = self .exec - .call(&mut ext, self.runtime_code, self.method, self.call_data, false, self.context) + .call(&mut ext, self.runtime_code, self.method, self.call_data, self.context) .0; self.overlay @@ -1120,10 +1119,9 @@ mod tests { _: &RuntimeCode, _method: &str, _data: &[u8], - use_native: bool, _: CallContext, ) -> (CallResult, bool) { - let using_native = use_native && self.native_available; + let using_native = self.native_available; match (using_native, self.native_succeeds, self.fallback_succeeds) { (true, true, _) | (false, _, true) => ( Ok(vec![ diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 8f2d02fd6840eb33735eb15bcd5c57f8776e6f5c..59589dbbb37e8c37022ab176df0b02c25d53b3b6 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -298,7 +298,7 @@ impl OverlayedMap { /// Call this when control returns from the runtime. /// - /// This commits all dangling transaction left open by the runtime. + /// This rollbacks all dangling transaction left open by the runtime. /// Calling this while already outside the runtime will return an error. pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { if let ExecutionMode::Client = self.execution_mode { diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index 28cfecf1dbd62b5387f79cff8938fc6fc9d4bf16..626cf6c3cafe128c98f6c5bedfe5d6cbe1ab958d 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -348,7 +348,7 @@ impl OverlayedChanges { /// `None` can be used to delete a value specified by the given key. /// /// Can be rolled back or committed when called inside a transaction. - pub(crate) fn set_child_storage( + pub fn set_child_storage( &mut self, child_info: &ChildInfo, key: StorageKey, @@ -373,7 +373,7 @@ impl OverlayedChanges { /// Clear child storage of given storage key. /// /// Can be rolled back or committed when called inside a transaction. - pub(crate) fn clear_child_storage(&mut self, child_info: &ChildInfo) -> u32 { + pub fn clear_child_storage(&mut self, child_info: &ChildInfo) -> u32 { self.mark_dirty(); let extrinsic_index = self.extrinsic_index(); @@ -391,7 +391,7 @@ impl OverlayedChanges { /// Removes all key-value pairs which keys share the given prefix. /// /// Can be rolled back or committed when called inside a transaction. - pub(crate) fn clear_prefix(&mut self, prefix: &[u8]) -> u32 { + pub fn clear_prefix(&mut self, prefix: &[u8]) -> u32 { self.mark_dirty(); self.top.clear_where(|key, _| key.starts_with(prefix), self.extrinsic_index()) @@ -400,7 +400,7 @@ impl OverlayedChanges { /// Removes all key-value pairs which keys share the given prefix. /// /// Can be rolled back or committed when called inside a transaction - pub(crate) fn clear_child_prefix(&mut self, child_info: &ChildInfo, prefix: &[u8]) -> u32 { + pub fn clear_child_prefix(&mut self, child_info: &ChildInfo, prefix: &[u8]) -> u32 { self.mark_dirty(); let extrinsic_index = self.extrinsic_index(); @@ -498,7 +498,7 @@ impl OverlayedChanges { /// Call this when control returns from the runtime. /// - /// This commits all dangling transaction left open by the runtime. + /// This rollbacks all dangling transaction left open by the runtime. /// Calling this while outside the runtime will return an error. pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { self.top.exit_runtime()?; diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index 7b337b5fd54036059b0c777ff03e51bdfbc74cca..7496463e642100dd6cd6908525e29ee5fc34bf9a 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -33,12 +33,12 @@ use sp_core::storage::{ChildInfo, StateVersion}; #[cfg(feature = "std")] use sp_trie::{ cache::{LocalTrieCache, TrieCache}, - recorder::Recorder, - MemoryDB, StorageProof, + MemoryDB, }; #[cfg(not(feature = "std"))] use sp_trie::{Error, NodeCodec}; -use sp_trie::{MerkleValue, PrefixedMemoryDB}; +use sp_trie::{MerkleValue, PrefixedMemoryDB, StorageProof, TrieRecorderProvider}; + use trie_db::TrieCache as TrieCacheT; #[cfg(not(feature = "std"))] use trie_db::{node::NodeOwned, CachedValue}; @@ -112,8 +112,6 @@ pub struct UnimplementedCacheProvider { // Not strictly necessary, but the H bound allows to use this as a drop-in // replacement for the `LocalTrieCache` in no-std contexts. _phantom: core::marker::PhantomData, - // Statically prevents construction. - _infallible: core::convert::Infallible, } #[cfg(not(feature = "std"))] @@ -156,52 +154,83 @@ impl TrieCacheProvider for UnimplementedCacheProvider { } } +/// Recorder provider that allows construction of a [`TrieBackend`] and satisfies the requirements, +/// but can never be instantiated. +#[cfg(not(feature = "std"))] +pub struct UnimplementedRecorderProvider { + // Not strictly necessary, but the H bound allows to use this as a drop-in + // replacement for the [`sp_trie::recorder::Recorder`] in no-std contexts. + _phantom: core::marker::PhantomData, +} + +#[cfg(not(feature = "std"))] +impl trie_db::TrieRecorder for UnimplementedRecorderProvider { + fn record<'a>(&mut self, _access: trie_db::TrieAccess<'a, H::Out>) { + unimplemented!() + } + + fn trie_nodes_recorded_for_key(&self, _key: &[u8]) -> trie_db::RecordedForKey { + unimplemented!() + } +} + +#[cfg(not(feature = "std"))] +impl TrieRecorderProvider for UnimplementedRecorderProvider { + type Recorder<'a> = UnimplementedRecorderProvider where H: 'a; + + fn drain_storage_proof(self) -> Option { + unimplemented!() + } + + fn as_trie_recorder(&self, _storage_root: H::Out) -> Self::Recorder<'_> { + unimplemented!() + } +} + #[cfg(feature = "std")] type DefaultCache = LocalTrieCache; #[cfg(not(feature = "std"))] type DefaultCache = UnimplementedCacheProvider; +#[cfg(feature = "std")] +type DefaultRecorder = sp_trie::recorder::Recorder; + +#[cfg(not(feature = "std"))] +type DefaultRecorder = UnimplementedRecorderProvider; + /// Builder for creating a [`TrieBackend`]. -pub struct TrieBackendBuilder, H: Hasher, C = DefaultCache> { +pub struct TrieBackendBuilder< + S: TrieBackendStorage, + H: Hasher, + C = DefaultCache, + R = DefaultRecorder, +> { storage: S, root: H::Out, - #[cfg(feature = "std")] - recorder: Option>, + recorder: Option, cache: Option, } -impl TrieBackendBuilder> +impl TrieBackendBuilder where S: TrieBackendStorage, H: Hasher, { /// Create a new builder instance. pub fn new(storage: S, root: H::Out) -> Self { - Self { - storage, - root, - #[cfg(feature = "std")] - recorder: None, - cache: None, - } + Self { storage, root, recorder: None, cache: None } } } -impl TrieBackendBuilder +impl TrieBackendBuilder where S: TrieBackendStorage, H: Hasher, { /// Create a new builder instance. pub fn new_with_cache(storage: S, root: H::Out, cache: C) -> Self { - Self { - storage, - root, - #[cfg(feature = "std")] - recorder: None, - cache: Some(cache), - } + Self { storage, root, recorder: None, cache: Some(cache) } } /// Wrap the given [`TrieBackend`]. /// @@ -210,53 +239,47 @@ where /// backend. /// /// The backend storage and the cache will be taken from `other`. - pub fn wrap(other: &TrieBackend) -> TrieBackendBuilder<&S, H, &C> { + pub fn wrap(other: &TrieBackend) -> TrieBackendBuilder<&S, H, &C, R> { TrieBackendBuilder { storage: other.essence.backend_storage(), root: *other.essence.root(), - #[cfg(feature = "std")] recorder: None, cache: other.essence.trie_node_cache.as_ref(), } } /// Use the given optional `recorder` for the to be configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn with_optional_recorder(self, recorder: Option>) -> Self { + pub fn with_optional_recorder(self, recorder: Option) -> Self { Self { recorder, ..self } } /// Use the given `recorder` for the to be configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn with_recorder(self, recorder: Recorder) -> Self { + pub fn with_recorder(self, recorder: R) -> Self { Self { recorder: Some(recorder), ..self } } /// Use the given optional `cache` for the to be configured [`TrieBackend`]. - pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder { + pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder { TrieBackendBuilder { cache, root: self.root, storage: self.storage, - #[cfg(feature = "std")] recorder: self.recorder, } } /// Use the given `cache` for the to be configured [`TrieBackend`]. - pub fn with_cache(self, cache: LC) -> TrieBackendBuilder { + pub fn with_cache(self, cache: LC) -> TrieBackendBuilder { TrieBackendBuilder { cache: Some(cache), root: self.root, storage: self.storage, - #[cfg(feature = "std")] recorder: self.recorder, } } /// Build the configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn build(self) -> TrieBackend { + pub fn build(self) -> TrieBackend { TrieBackend { essence: TrieBackendEssence::new_with_cache_and_recorder( self.storage, @@ -267,27 +290,18 @@ where next_storage_key_cache: Default::default(), } } - - /// Build the configured [`TrieBackend`]. - #[cfg(not(feature = "std"))] - pub fn build(self) -> TrieBackend { - TrieBackend { - essence: TrieBackendEssence::new_with_cache(self.storage, self.root, self.cache), - next_storage_key_cache: Default::default(), - } - } } /// A cached iterator. -struct CachedIter +struct CachedIter where H: Hasher, { last_key: sp_std::vec::Vec, - iter: RawIter, + iter: RawIter, } -impl Default for CachedIter +impl Default for CachedIter where H: Hasher, { @@ -313,23 +327,32 @@ fn access_cache(cell: &CacheCell, callback: impl FnOnce(&mut T) -> R) - } /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. -pub struct TrieBackend, H: Hasher, C = DefaultCache> { - pub(crate) essence: TrieBackendEssence, - next_storage_key_cache: CacheCell>>, +pub struct TrieBackend< + S: TrieBackendStorage, + H: Hasher, + C = DefaultCache, + R = DefaultRecorder, +> { + pub(crate) essence: TrieBackendEssence, + next_storage_key_cache: CacheCell>>, } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> - TrieBackend +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > TrieBackend where H::Out: Codec, { #[cfg(test)] - pub(crate) fn from_essence(essence: TrieBackendEssence) -> Self { + pub(crate) fn from_essence(essence: TrieBackendEssence) -> Self { Self { essence, next_storage_key_cache: Default::default() } } /// Get backend essence reference. - pub fn essence(&self) -> &TrieBackendEssence { + pub fn essence(&self) -> &TrieBackendEssence { &self.essence } @@ -361,28 +384,31 @@ where /// Extract the [`StorageProof`]. /// /// This only returns `Some` when there was a recorder set. - #[cfg(feature = "std")] pub fn extract_proof(mut self) -> Option { - self.essence.recorder.take().map(|r| r.drain_storage_proof()) + self.essence.recorder.take().and_then(|r| r.drain_storage_proof()) } } -impl, H: Hasher, C: TrieCacheProvider> sp_std::fmt::Debug - for TrieBackend +impl, H: Hasher, C: TrieCacheProvider, R: TrieRecorderProvider> + sp_std::fmt::Debug for TrieBackend { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { write!(f, "TrieBackend") } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> Backend - for TrieBackend +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > Backend for TrieBackend where H::Out: Ord + Codec, { type Error = crate::DefaultError; type TrieBackendStorage = S; - type RawIter = crate::trie_backend_essence::RawIter; + type RawIter = crate::trie_backend_essence::RawIter; fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { self.essence.storage_hash(key) diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index ad7aeab899c8887e9fcaca2af1577b51f7533b1d..3f789111deeffe27ea4e8d8f7c47de8ea61eb082 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -28,19 +28,19 @@ use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix}; #[cfg(feature = "std")] use parking_lot::RwLock; use sp_core::storage::{ChildInfo, ChildType, StateVersion}; -use sp_std::{boxed::Box, marker::PhantomData, vec::Vec}; #[cfg(feature = "std")] -use sp_trie::recorder::Recorder; +use sp_std::sync::Arc; +use sp_std::{boxed::Box, marker::PhantomData, vec::Vec}; use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_first_descedant_value, read_child_trie_hash, read_child_trie_value, read_trie_first_descedant_value, read_trie_value, trie_types::{TrieDBBuilder, TrieError}, DBValue, KeySpacedDB, MerkleValue, NodeCodec, PrefixedMemoryDB, Trie, TrieCache, - TrieDBRawIterator, TrieRecorder, + TrieDBRawIterator, TrieRecorder, TrieRecorderProvider, }; #[cfg(feature = "std")] -use std::{collections::HashMap, sync::Arc}; +use std::collections::HashMap; // In this module, we only use layout for read operation and empty root, // where V1 and V0 are equivalent. use sp_trie::LayoutV1 as Layout; @@ -83,7 +83,7 @@ enum IterState { } /// A raw iterator over the storage. -pub struct RawIter +pub struct RawIter where H: Hasher, { @@ -93,25 +93,26 @@ where child_info: Option, trie_iter: TrieDBRawIterator>, state: IterState, - _phantom: PhantomData<(S, C)>, + _phantom: PhantomData<(S, C, R)>, } -impl RawIter +impl RawIter where H: Hasher, S: TrieBackendStorage, H::Out: Codec + Ord, C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, { #[inline] - fn prepare( + fn prepare( &mut self, - backend: &TrieBackendEssence, + backend: &TrieBackendEssence, callback: impl FnOnce( &sp_trie::TrieDB>, &mut TrieDBRawIterator>, - ) -> Option::Out>>>>, - ) -> Option> { + ) -> Option::Out>>>>, + ) -> Option> { if !matches!(self.state, IterState::Pending) { return None } @@ -139,7 +140,7 @@ where } } -impl Default for RawIter +impl Default for RawIter where H: Hasher, { @@ -156,14 +157,15 @@ where } } -impl StorageIterator for RawIter +impl StorageIterator for RawIter where H: Hasher, S: TrieBackendStorage, H::Out: Codec + Ord, C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, { - type Backend = crate::TrieBackend; + type Backend = crate::TrieBackend; type Error = crate::DefaultError; #[inline] @@ -204,18 +206,17 @@ where } /// Patricia trie-based pairs storage essence. -pub struct TrieBackendEssence, H: Hasher, C> { +pub struct TrieBackendEssence, H: Hasher, C, R> { storage: S, root: H::Out, empty: H::Out, #[cfg(feature = "std")] pub(crate) cache: Arc>>, pub(crate) trie_node_cache: Option, - #[cfg(feature = "std")] - pub(crate) recorder: Option>, + pub(crate) recorder: Option, } -impl, H: Hasher, C> TrieBackendEssence { +impl, H: Hasher, C, R> TrieBackendEssence { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { Self::new_with_cache(storage, root, None) @@ -230,23 +231,22 @@ impl, H: Hasher, C> TrieBackendEssence { #[cfg(feature = "std")] cache: Arc::new(RwLock::new(Cache::new())), trie_node_cache: cache, - #[cfg(feature = "std")] recorder: None, } } /// Create new trie-based backend. - #[cfg(feature = "std")] pub fn new_with_cache_and_recorder( storage: S, root: H::Out, cache: Option, - recorder: Option>, + recorder: Option, ) -> Self { TrieBackendEssence { storage, root, empty: H::hash(&[0u8]), + #[cfg(feature = "std")] cache: Arc::new(RwLock::new(Cache::new())), trie_node_cache: cache, recorder, @@ -289,37 +289,31 @@ impl, H: Hasher, C> TrieBackendEssence { } } -impl, H: Hasher, C: TrieCacheProvider> TrieBackendEssence { +impl, H: Hasher, C: TrieCacheProvider, R: TrieRecorderProvider> + TrieBackendEssence +{ /// Call the given closure passing it the recorder and the cache. /// /// If the given `storage_root` is `None`, `self.root` will be used. #[inline] - fn with_recorder_and_cache( + fn with_recorder_and_cache( &self, storage_root: Option, callback: impl FnOnce( Option<&mut dyn TrieRecorder>, Option<&mut dyn TrieCache>>, - ) -> R, - ) -> R { + ) -> RE, + ) -> RE { let storage_root = storage_root.unwrap_or_else(|| self.root); let mut cache = self.trie_node_cache.as_ref().map(|c| c.as_trie_db_cache(storage_root)); let cache = cache.as_mut().map(|c| c as _); - #[cfg(feature = "std")] - { - let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); - let recorder = match recorder.as_mut() { - Some(recorder) => Some(recorder as &mut dyn TrieRecorder), - None => None, - }; - callback(recorder, cache) - } - - #[cfg(not(feature = "std"))] - { - callback(None, cache) - } + let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); + let recorder = match recorder.as_mut() { + Some(recorder) => Some(recorder as &mut dyn TrieRecorder), + None => None, + }; + callback(recorder, cache) } /// Call the given closure passing it the recorder and the cache. @@ -329,15 +323,14 @@ impl, H: Hasher, C: TrieCacheProvider> TrieBackendEs /// the new storage root. This is required to register the changes in the cache /// for the correct storage root. The given `storage_root` corresponds to the root of the "old" /// trie. If the value is not given, `self.root` is used. - #[cfg(feature = "std")] - fn with_recorder_and_cache_for_storage_root( + fn with_recorder_and_cache_for_storage_root( &self, storage_root: Option, callback: impl FnOnce( Option<&mut dyn TrieRecorder>, Option<&mut dyn TrieCache>>, - ) -> (Option, R), - ) -> R { + ) -> (Option, RE), + ) -> RE { let storage_root = storage_root.unwrap_or_else(|| self.root); let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); let recorder = match recorder.as_mut() { @@ -361,46 +354,26 @@ impl, H: Hasher, C: TrieCacheProvider> TrieBackendEs result } - - #[cfg(not(feature = "std"))] - fn with_recorder_and_cache_for_storage_root( - &self, - _storage_root: Option, - callback: impl FnOnce( - Option<&mut dyn TrieRecorder>, - Option<&mut dyn TrieCache>>, - ) -> (Option, R), - ) -> R { - if let Some(local_cache) = self.trie_node_cache.as_ref() { - let mut cache = local_cache.as_trie_db_mut_cache(); - - let (new_root, r) = callback(None, Some(&mut cache)); - - if let Some(new_root) = new_root { - local_cache.merge(cache, new_root); - } - - r - } else { - callback(None, None).1 - } - } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> - TrieBackendEssence +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > TrieBackendEssence where H::Out: Codec + Ord, { /// Calls the given closure with a [`TrieDb`] constructed for the given /// storage root and (optionally) child trie. #[inline] - fn with_trie_db( + fn with_trie_db( &self, root: H::Out, child_info: Option<&ChildInfo>, - callback: impl FnOnce(&sp_trie::TrieDB>) -> R, - ) -> R { + callback: impl FnOnce(&sp_trie::TrieDB>) -> RE, + ) -> RE { let backend = self as &dyn HashDBRef>; let db = child_info .as_ref() @@ -609,7 +582,7 @@ where } /// Create a raw iterator over the storage. - pub fn raw_iter(&self, args: IterArgs) -> Result> { + pub fn raw_iter(&self, args: IterArgs) -> Result> { let root = if let Some(child_info) = args.child_info.as_ref() { let root = match self.child_root(&child_info)? { Some(root) => root, @@ -831,19 +804,28 @@ where } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> - AsHashDB for TrieBackendEssence +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > AsHashDB for TrieBackendEssence { fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { self } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { self } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> HashDB - for TrieBackendEssence +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > HashDB for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { @@ -875,8 +857,12 @@ impl, H: Hasher, C: TrieCacheProvider + Send + Sync> } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> - HashDBRef for TrieBackendEssence +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > HashDBRef for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { HashDB::get(self, key, prefix) @@ -928,7 +914,10 @@ mod test { .expect("insert failed"); }; - let essence_1 = TrieBackendEssence::<_, _, LocalTrieCache<_>>::new(mdb, root_1); + let essence_1 = + TrieBackendEssence::<_, _, LocalTrieCache<_>, sp_trie::recorder::Recorder<_>>::new( + mdb, root_1, + ); let mdb = essence_1.backend_storage().clone(); let essence_1 = TrieBackend::from_essence(essence_1); @@ -938,7 +927,10 @@ mod test { assert_eq!(essence_1.next_storage_key(b"5"), Ok(Some(b"6".to_vec()))); assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); - let essence_2 = TrieBackendEssence::<_, _, LocalTrieCache<_>>::new(mdb, root_2); + let essence_2 = + TrieBackendEssence::<_, _, LocalTrieCache<_>, sp_trie::recorder::Recorder<_>>::new( + mdb, root_2, + ); assert_eq!(essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec()))); diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index 658229cef220b1be0c8ac25bf5b0993ebd18413d..cacfd08f3ebf4802ee3f2f64cfb6d6b8eee2f454 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -9,32 +9,35 @@ repository.workspace = true description = "A crate which contains primitives related to the statement store" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-core = { path = "../core", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} -sp-api = { path = "../api", default-features = false} -sp-application-crypto = { path = "../application-crypto", default-features = false} -sp-runtime-interface = { path = "../runtime-interface", default-features = false} -sp-externalities = { path = "../externalities", default-features = false} +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } +sp-api = { path = "../api", default-features = false } +sp-application-crypto = { path = "../application-crypto", default-features = false } +sp-runtime-interface = { path = "../runtime-interface", default-features = false } +sp-externalities = { path = "../externalities", default-features = false } thiserror = { version = "1.0", optional = true } # ECIES dependencies -ed25519-dalek = { version = "2.0.0", optional = true } -x25519-dalek = { version = "2.0.0", optional = true, features = ["static_secrets"] } -curve25519-dalek = { version = "4.0.0", optional = true } +ed25519-dalek = { version = "2.1", optional = true } +x25519-dalek = { version = "2.0", optional = true, features = ["static_secrets"] } +curve25519-dalek = { version = "4.1.1", optional = true } aes-gcm = { version = "0.10", optional = true } hkdf = { version = "0.12.0", optional = true } sha2 = { version = "0.10.7", optional = true } -rand = { version = "0.8.5", features = ["small_rng"], optional = true } +rand = { version = "0.8.5", features = ["small_rng"], optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "aes-gcm", "aes-gcm?/std", diff --git a/substrate/primitives/std/Cargo.toml b/substrate/primitives/std/Cargo.toml index 2283a4a97a40f0de065d76ea6371275c6956f39c..f349a7b119688fab1a651f4ea709b76274af2796 100644 --- a/substrate/primitives/std/Cargo.toml +++ b/substrate/primitives/std/Cargo.toml @@ -10,9 +10,12 @@ description = "Lowest-abstraction level for the Substrate runtime: just exports documentation = "https://docs.rs/sp-std" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [features] -default = [ "std" ] +default = ["std"] std = [] diff --git a/substrate/primitives/storage/Cargo.toml b/substrate/primitives/storage/Cargo.toml index 11e574f1c4ced1402ec7488cce7b722bedd7cca5..429c17fde50ef3dc47aef97adbf3ca36387df422 100644 --- a/substrate/primitives/storage/Cargo.toml +++ b/substrate/primitives/storage/Cargo.toml @@ -10,6 +10,9 @@ repository.workspace = true documentation = "https://docs.rs/sp-storage/" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,12 +20,12 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", optional = true, default-features = false } ref-cast = "1.0.0" -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } -sp-debug-derive = { path = "../debug-derive", default-features = false} -sp-std = { path = "../std", default-features = false} +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } +sp-debug-derive = { path = "../debug-derive", default-features = false } +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "impl-serde/std", @@ -32,4 +35,4 @@ std = [ ] # Serde support without relying on std features. -serde = [ "dep:serde", "impl-serde" ] +serde = ["dep:serde", "impl-serde"] diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs index f8dc40f051c21cdd7b4a2e0f0df36bbd3f103d10..3528d0558f530b2d37711a564974cd5ab63343c9 100644 --- a/substrate/primitives/storage/src/lib.rs +++ b/substrate/primitives/storage/src/lib.rs @@ -414,12 +414,13 @@ impl ChildTrieParentKeyId { /// /// V0 and V1 uses a same trie implementation, but V1 will write external value node in the trie for /// value with size at least `TRIE_VALUE_NODE_THRESHOLD`. -#[derive(Debug, Clone, Copy, Eq, PartialEq)] +#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)] #[cfg_attr(feature = "std", derive(Encode, Decode))] pub enum StateVersion { /// Old state version, no value nodes. V0 = 0, /// New state version can use value nodes. + #[default] V1 = 1, } @@ -432,12 +433,6 @@ impl Display for StateVersion { } } -impl Default for StateVersion { - fn default() -> Self { - StateVersion::V1 - } -} - impl From for u8 { fn from(version: StateVersion) -> u8 { version as u8 diff --git a/substrate/primitives/test-primitives/Cargo.toml b/substrate/primitives/test-primitives/Cargo.toml index a3775d7f61f7b07089910992d3afa1e1283ef671..536cca334ddcecd7e3b45d0f718b438cc7fbd004 100644 --- a/substrate/primitives/test-primitives/Cargo.toml +++ b/substrate/primitives/test-primitives/Cargo.toml @@ -8,20 +8,23 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive"], optional = true } -sp-application-crypto = { path = "../application-crypto", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +serde = { version = "1.0.193", default-features = false, features = ["derive"], optional = true } +sp-application-crypto = { path = "../application-crypto", default-features = false } +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index 44b0fdd831c029fed9f1df4acbcd18705d1782f1..b61f36f2056b8a838e9d5c86d89614f834968f45 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -9,19 +9,22 @@ repository.workspace = true description = "Substrate core types and inherents for timestamps." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.57", optional = true } +async-trait = { version = "0.1.74", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } thiserror = { version = "1.0.48", optional = true } -sp-inherents = { path = "../inherents", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-inherents = { path = "../inherents", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "async-trait", "codec/std", diff --git a/substrate/primitives/tracing/Cargo.toml b/substrate/primitives/tracing/Cargo.toml index 0f7e217ec3882610afbbba4041f4f1b9a49242c9..0ad3cd0705b3423bdced8be291b45e13f25b672e 100644 --- a/substrate/primitives/tracing/Cargo.toml +++ b/substrate/primitives/tracing/Cargo.toml @@ -9,13 +9,16 @@ repository.workspace = true description = "Instrumentation primitives and macros for Substrate." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] # let's default to wasm32 default-target = "wasm32-unknown-unknown" # with the tracing enabled features = ["with-tracing"] # allowing for linux-gnu here, too, allows for `std` to show up as well -targets = ["x86_64-unknown-linux-gnu", "wasm32-unknown-unknown"] +targets = ["wasm32-unknown-unknown", "x86_64-unknown-linux-gnu"] [dependencies] sp-std = { path = "../std", default-features = false } @@ -23,14 +26,14 @@ codec = { version = "3.6.1", package = "parity-scale-codec", default-features = "derive", ] } tracing = { version = "0.1.29", default-features = false } -tracing-core = { version = "0.1.28", default-features = false } +tracing-core = { version = "0.1.32", default-features = false } tracing-subscriber = { version = "0.2.25", optional = true, features = [ "tracing-log", ] } [features] -default = [ "std" ] -with-tracing = [ "codec/derive", "codec/full" ] +default = ["std"] +with-tracing = ["codec/derive", "codec/full"] std = [ "codec/std", "sp-std/std", diff --git a/substrate/primitives/transaction-pool/Cargo.toml b/substrate/primitives/transaction-pool/Cargo.toml index d1d38ffa1af809cf527d253661d1baa3db5b94c9..6e66910ac388576d761d0215b570f1c62f970e95 100644 --- a/substrate/primitives/transaction-pool/Cargo.toml +++ b/substrate/primitives/transaction-pool/Cargo.toml @@ -10,13 +10,16 @@ description = "Transaction pool runtime facing API." documentation = "https://docs.rs/sp-transaction-pool" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../api", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} +sp-api = { path = "../api", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } [features] -default = [ "std" ] -std = [ "sp-api/std", "sp-runtime/std" ] +default = ["std"] +std = ["sp-api/std", "sp-runtime/std"] diff --git a/substrate/primitives/transaction-storage-proof/Cargo.toml b/substrate/primitives/transaction-storage-proof/Cargo.toml index 5a35dd8f11f7daa5e084968ec8719dfe81b35983..e1c50a09c59d28c40fa5c97343cc38621cfe99db 100644 --- a/substrate/primitives/transaction-storage-proof/Cargo.toml +++ b/substrate/primitives/transaction-storage-proof/Cargo.toml @@ -9,21 +9,24 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.57", optional = true } +async-trait = { version = "0.1.74", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-core = { path = "../core", optional = true} -sp-inherents = { path = "../inherents", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} -sp-trie = { path = "../trie", optional = true} +sp-core = { path = "../core", optional = true } +sp-inherents = { path = "../inherents", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } +sp-trie = { path = "../trie", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "async-trait", "codec/std", diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 0822d84a76e6da8bcb2c31c341134c44ecb949d2..79ed5c2000094b422e252b99e2f15505d1b8a3f8 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -10,6 +10,9 @@ homepage = "https://substrate.io" documentation = "https://docs.rs/sp-trie" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -20,7 +23,6 @@ harness = false [dependencies] ahash = { version = "0.8.2", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -hashbrown = { version = "0.13.2", optional = true } hash-db = { version = "0.16.0", default-features = false } lazy_static = { version = "1.4.0", optional = true } memory-db = { version = "0.32.0", default-features = false } @@ -32,8 +34,9 @@ thiserror = { version = "1.0.48", optional = true } tracing = { version = "0.1.29", optional = true } trie-db = { version = "0.28.0", default-features = false } trie-root = { version = "0.18.0", default-features = false } -sp-core = { path = "../core", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-core = { path = "../core", default-features = false } +sp-std = { path = "../std", default-features = false } +sp-externalities = { path = "../externalities", default-features = false } schnellru = { version = "0.2.1", optional = true } [dev-dependencies] @@ -44,12 +47,11 @@ trie-standardmap = "0.16.0" sp-runtime = { path = "../runtime" } [features] -default = [ "std" ] +default = ["std"] std = [ "ahash", "codec/std", "hash-db/std", - "hashbrown", "lazy_static", "memory-db/std", "nohash-hasher", @@ -58,6 +60,7 @@ std = [ "scale-info/std", "schnellru", "sp-core/std", + "sp-externalities/std", "sp-runtime/std", "sp-std/std", "thiserror", diff --git a/substrate/primitives/trie/src/cache/shared_cache.rs b/substrate/primitives/trie/src/cache/shared_cache.rs index 01ac41a1e47d995a99bea354d7f6dfaf82c433c2..e3ba94a2af7c1c9a4565169c85d6bb6053107a11 100644 --- a/substrate/primitives/trie/src/cache/shared_cache.rs +++ b/substrate/primitives/trie/src/cache/shared_cache.rs @@ -19,11 +19,11 @@ ///! that combines both caches and is exported to the outside. use super::{CacheSize, NodeCached}; use hash_db::Hasher; -use hashbrown::{hash_set::Entry as SetEntry, HashSet}; use nohash_hasher::BuildNoHashHasher; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use schnellru::LruMap; use std::{ + collections::{hash_map::Entry as SetEntry, HashMap}, hash::{BuildHasher, Hasher as _}, sync::Arc, }; @@ -148,7 +148,7 @@ pub struct SharedValueCacheLimiter { heap_size: usize, /// A set with all of the keys deduplicated to save on memory. - known_storage_keys: HashSet>, + known_storage_keys: HashMap, (), ahash::RandomState>, /// A counter with the number of elements that got evicted from the cache. /// @@ -189,10 +189,10 @@ where } self.heap_size += new_item_heap_size; - entry.insert(); + entry.insert(()); }, SetEntry::Occupied(entry) => { - key.storage_key = entry.get().clone(); + key.storage_key = entry.key().clone(); }, } @@ -491,7 +491,7 @@ impl> SharedValueCache { max_inline_size, max_heap_size, heap_size: 0, - known_storage_keys: Default::default(), + known_storage_keys: HashMap::with_hasher(RANDOM_STATE.clone()), items_evicted: 0, max_items_evicted: 0, // Will be set during `update`. }, @@ -778,7 +778,9 @@ mod tests { assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len()); assert_eq!( 3, // Two instances inside the cache + one extra in `known_storage_keys`. - Arc::strong_count(cache.lru.limiter_mut().known_storage_keys.get(&key[..]).unwrap()) + Arc::strong_count( + cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).unwrap().0 + ) ); assert_eq!(key.len(), cache.lru.limiter().heap_size); assert_eq!(cache.lru.len(), 2); @@ -792,7 +794,9 @@ mod tests { assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len()); assert_eq!( 3, - Arc::strong_count(cache.lru.limiter_mut().known_storage_keys.get(&key[..]).unwrap()) + Arc::strong_count( + cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).unwrap().0 + ) ); assert_eq!(key.len(), cache.lru.limiter().heap_size); assert_eq!(cache.lru.len(), 2); @@ -812,7 +816,9 @@ mod tests { assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len()); assert_eq!( 3, - Arc::strong_count(cache.lru.limiter_mut().known_storage_keys.get(&key[..]).unwrap()) + Arc::strong_count( + cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).unwrap().0 + ) ); assert_eq!(key.len(), cache.lru.limiter().heap_size); assert_eq!(cache.lru.len(), 2); @@ -833,7 +839,7 @@ mod tests { assert_eq!(cache.lru.limiter().items_evicted, 2); assert_eq!(10, cache.lru.len()); assert_eq!(10, cache.lru.limiter_mut().known_storage_keys.len()); - assert!(cache.lru.limiter_mut().known_storage_keys.get(&key[..]).is_none()); + assert!(cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).is_none()); assert_eq!(key.len() * 10, cache.lru.limiter().heap_size); assert_eq!(cache.lru.len(), 10); assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size); @@ -854,6 +860,6 @@ mod tests { vec![], ); - assert!(cache.lru.limiter_mut().known_storage_keys.get(&key[..]).is_none()); + assert!(cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).is_none()); } } diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 1a1ed670454dce46f72ef16ff033af1336eadf8a..fd1320b3fbcb1a6c0aa7c7ece504ec2da7c34c24 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -30,6 +30,9 @@ mod storage_proof; mod trie_codec; mod trie_stream; +#[cfg(feature = "std")] +pub mod proof_size_extension; + /// Our `NodeCodec`-specific error. pub use error::Error; /// Various re-exports from the `hash-db` crate. @@ -146,6 +149,29 @@ where } } +/// Type that is able to provide a [`trie_db::TrieRecorder`]. +/// +/// Types implementing this trait can be used to maintain recorded state +/// across operations on different [`trie_db::TrieDB`] instances. +pub trait TrieRecorderProvider { + /// Recorder type that is going to be returned by implementors of this trait. + type Recorder<'a>: trie_db::TrieRecorder + 'a + where + Self: 'a; + + /// Create a [`StorageProof`] derived from the internal state. + fn drain_storage_proof(self) -> Option; + + /// Provide a recorder implementing [`trie_db::TrieRecorder`]. + fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_>; +} + +/// Type that is able to provide a proof size estimation. +pub trait ProofSizeProvider { + /// Returns the storage proof size. + fn estimate_encoded_size(&self) -> usize; +} + /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. diff --git a/substrate/primitives/trie/src/proof_size_extension.rs b/substrate/primitives/trie/src/proof_size_extension.rs new file mode 100644 index 0000000000000000000000000000000000000000..c97f334494afd96c27999810c009643eb948e117 --- /dev/null +++ b/substrate/primitives/trie/src/proof_size_extension.rs @@ -0,0 +1,39 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Externalities extension that provides access to the current proof size +//! of the underlying recorder. + +use crate::ProofSizeProvider; + +sp_externalities::decl_extension! { + /// The proof size extension to fetch the current storage proof size + /// in externalities. + pub struct ProofSizeExt(Box); +} + +impl ProofSizeExt { + /// Creates a new instance of [`ProofSizeExt`]. + pub fn new(recorder: T) -> Self { + ProofSizeExt(Box::new(recorder)) + } + + /// Returns the storage proof size. + pub fn storage_proof_size(&self) -> u64 { + self.0.estimate_encoded_size() as _ + } +} diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 154cee3f37dcba79c9eea34c3bec4bfc2fdcc0b7..22a22b33b370994d554415519b4ee42fd82ae891 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -23,7 +23,7 @@ use crate::{NodeCodec, StorageProof}; use codec::Encode; use hash_db::Hasher; -use parking_lot::Mutex; +use parking_lot::{Mutex, MutexGuard}; use std::{ collections::{HashMap, HashSet}, marker::PhantomData, @@ -80,7 +80,9 @@ impl Default for RecorderInner { /// The trie recorder. /// -/// It can be used to record accesses to the trie and then to convert them into a [`StorageProof`]. +/// Owns the recorded data. Is used to transform data into a storage +/// proof and to provide transaction support. The `as_trie_recorder` method provides a +/// [`trie_db::TrieDB`] compatible recorder that implements the actual recording logic. pub struct Recorder { inner: Arc>>, /// The estimated encoded size of the storage proof this recorder will produce. @@ -105,6 +107,13 @@ impl Clone for Recorder { } impl Recorder { + /// Returns [`RecordedForKey`] per recorded key per trie. + /// + /// There are multiple tries when working with e.g. child tries. + pub fn recorded_keys(&self) -> HashMap<::Out, HashMap, RecordedForKey>> { + self.inner.lock().recorded_keys.clone() + } + /// Returns the recorder as [`TrieRecorder`](trie_db::TrieRecorder) compatible type. /// /// - `storage_root`: The storage root of the trie for which accesses are recorded. This is @@ -112,11 +121,8 @@ impl Recorder { /// /// NOTE: This locks a mutex that stays locked until the return value is dropped. #[inline] - pub fn as_trie_recorder( - &self, - storage_root: H::Out, - ) -> impl trie_db::TrieRecorder + '_ { - TrieRecorder:: { + pub fn as_trie_recorder(&self, storage_root: H::Out) -> TrieRecorder<'_, H> { + TrieRecorder:: { inner: self.inner.lock(), storage_root, encoded_size_estimation: self.encoded_size_estimation.clone(), @@ -231,15 +237,33 @@ impl Recorder { } } +impl crate::ProofSizeProvider for Recorder { + fn estimate_encoded_size(&self) -> usize { + Recorder::estimate_encoded_size(self) + } +} + /// The [`TrieRecorder`](trie_db::TrieRecorder) implementation. -struct TrieRecorder { - inner: I, +pub struct TrieRecorder<'a, H: Hasher> { + inner: MutexGuard<'a, RecorderInner>, storage_root: H::Out, encoded_size_estimation: Arc, _phantom: PhantomData, } -impl>> TrieRecorder { +impl crate::TrieRecorderProvider for Recorder { + type Recorder<'a> = TrieRecorder<'a, H> where H: 'a; + + fn drain_storage_proof(self) -> Option { + Some(Recorder::drain_storage_proof(self)) + } + + fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_> { + Recorder::as_trie_recorder(&self, storage_root) + } +} + +impl<'a, H: Hasher> TrieRecorder<'a, H> { /// Update the recorded keys entry for the given `full_key`. fn update_recorded_keys(&mut self, full_key: &[u8], access: RecordedForKey) { let inner = self.inner.deref_mut(); @@ -283,9 +307,7 @@ impl>> TrieRecorder } } -impl>> trie_db::TrieRecorder - for TrieRecorder -{ +impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { fn record(&mut self, access: TrieAccess) { let mut encoded_size_update = 0; diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index 41a83f01f66a9aed891f348c1e5036b9f47089c3..ed056f7ac368cf59b0351b5b3598e3f6abfba1f4 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -10,6 +10,9 @@ description = "Version module for the Substrate runtime; Provides a function tha documentation = "https://docs.rs/sp-version" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,15 +21,15 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = impl-serde = { version = "0.4.0", default-features = false, optional = true } parity-wasm = { version = "0.45", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } thiserror = { version = "1.0.48", optional = true } sp-core-hashing-proc-macro = { path = "../core/hashing/proc-macro" } -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} -sp-version-proc-macro = { path = "proc-macro", default-features = false} +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } +sp-version-proc-macro = { path = "proc-macro", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "impl-serde/std", @@ -39,4 +42,4 @@ std = [ ] # Serde support without relying on std features. -serde = [ "dep:serde", "impl-serde", "sp-runtime/serde" ] +serde = ["dep:serde", "impl-serde", "sp-runtime/serde"] diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index 7fce559e3ed633aac290d5e7b550f47fe0d196ba..413a1e9940a990d7bf9e970368bb6d70aaea0b2f 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Macro for defining a runtime version." documentation = "https://docs.rs/sp-api-proc-macro" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,10 +19,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.38", features = ["full", "fold", "extra-traits", "visit"] } +syn = { version = "2.0.41", features = ["extra-traits", "fold", "full", "visit"] } [dev-dependencies] sp-version = { path = ".." } diff --git a/substrate/primitives/wasm-interface/Cargo.toml b/substrate/primitives/wasm-interface/Cargo.toml index c7413fec43c42ff4039b9043fb89d14787de4071..9fe5cc1f2d00ba48fc609521194534202e8d046b 100644 --- a/substrate/primitives/wasm-interface/Cargo.toml +++ b/substrate/primitives/wasm-interface/Cargo.toml @@ -10,6 +10,9 @@ description = "Types and traits for interfacing between the host and the wasm ru documentation = "https://docs.rs/sp-wasm-interface" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -19,9 +22,9 @@ impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", optional = true } wasmtime = { version = "8.0.1", default-features = false, optional = true } anyhow = { version = "1.0.68", optional = true } -sp-std = { path = "../std", default-features = false} +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "log/std", "sp-std/std", "wasmtime" ] -wasmtime = [ "anyhow", "dep:wasmtime" ] +default = ["std"] +std = ["codec/std", "log/std", "sp-std/std", "wasmtime"] +wasmtime = ["anyhow", "dep:wasmtime"] diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index 6642f97029ff67b73c4072b7e930f579ae2507c6..c01e1a5a07f6571c8d6439250356b6efae41fee0 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -9,38 +9,46 @@ repository.workspace = true description = "Types and traits for interfacing between the host and the wasm runtime." documentation = "https://docs.rs/sp-wasm-interface" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] +bounded-collections = { version = "0.1.4", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, optional = true, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, optional = true, features = ["alloc", "derive"] } smallvec = "1.11.0" -sp-arithmetic = { path = "../arithmetic", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-debug-derive = { path = "../debug-derive", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-arithmetic = { path = "../arithmetic", default-features = false } +sp-debug-derive = { path = "../debug-derive", default-features = false } +sp-std = { path = "../std", default-features = false } +schemars = { version = "0.8.3", default-features = false, optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ + "bounded-collections/std", "codec/std", "scale-info/std", "serde/std", "sp-arithmetic/std", - "sp-core/std", "sp-debug-derive/std", "sp-std/std", ] # By default some types have documentation, `full-metadata-docs` allows to add documentation to # more types in the metadata. -full-metadata-docs = [ "scale-info/docs" ] +full-metadata-docs = ["scale-info/docs"] # Serde support without relying on std features. serde = [ + "bounded-collections/serde", "dep:serde", "scale-info/serde", "sp-arithmetic/serde", - "sp-core/serde", +] + +json-schema = [ + "dep:schemars", ] diff --git a/substrate/primitives/weights/src/lib.rs b/substrate/primitives/weights/src/lib.rs index ececb622fa0f55d401498a408ff8bb7195efa37d..ef431bddee265fab663efd1e5b039a1c3e78bdc4 100644 --- a/substrate/primitives/weights/src/lib.rs +++ b/substrate/primitives/weights/src/lib.rs @@ -27,6 +27,7 @@ extern crate self as sp_weights; mod weight_meter; mod weight_v2; +use bounded_collections::Get; use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "serde")] @@ -36,7 +37,6 @@ use sp_arithmetic::{ traits::{BaseArithmetic, SaturatedConversion, Unsigned}, Perbill, }; -use sp_core::Get; use sp_debug_derive::RuntimeDebug; pub use weight_meter::*; @@ -270,7 +270,7 @@ pub type NoFee = FixedFee<0, T>; /// # Example /// /// ``` -/// # use sp_core::ConstU128; +/// # use bounded_collections::ConstU128; /// # use sp_weights::ConstantMultiplier; /// // Results in a multiplier of 10 for each unit of weight (or length) /// type LengthToFee = ConstantMultiplier::>; @@ -360,7 +360,7 @@ mod tests { #[test] fn constant_fee_works() { - use sp_core::ConstU128; + use bounded_collections::ConstU128; assert_eq!( ConstantMultiplier::>::weight_to_fee(&Weight::zero()), 0 diff --git a/substrate/primitives/weights/src/weight_v2.rs b/substrate/primitives/weights/src/weight_v2.rs index d692aaff8f5ab98d7cd3e701f09b85e3c9465d9c..3c10929f433b6fd23a88eb9138df54f8b67aba8d 100644 --- a/substrate/primitives/weights/src/weight_v2.rs +++ b/substrate/primitives/weights/src/weight_v2.rs @@ -23,6 +23,7 @@ use super::*; #[derive(Encode, Decode, MaxEncodedLen, TypeInfo, Eq, PartialEq, Copy, Clone, Debug, Default)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub struct Weight { #[codec(compact)] /// The weight of computational time used based on some reference hardware. diff --git a/substrate/scripts/ci/deny.toml b/substrate/scripts/ci/deny.toml index 1afb4a4f693d13f4e28f1d8ad4a86a6fe4e35743..b1dbf773e31f5297f42da64795a0cb5388d986ef 100644 --- a/substrate/scripts/ci/deny.toml +++ b/substrate/scripts/ci/deny.toml @@ -5,7 +5,7 @@ unlicensed = "deny" # See https://spdx.org/licenses/ for list of possible licenses # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. allow = [ - "MPL-2.0", + "MPL-2.0", ] # List of explicitly disallowed licenses # See https://spdx.org/licenses/ for list of possible licenses @@ -34,70 +34,70 @@ confidence-threshold = 0.8 # Allow 1 or more licenses on a per-crate basis, so that particular licenses # aren't accepted for every possible crate as with the normal allow list exceptions = [ - # Each entry is the crate and version constraint, and its specific allow list - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "chain-spec-builder" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "mmr-gadget" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-bench" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "staging-node-cli" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-inspect" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-template-release" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-testing" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-authority-discovery" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-basic-authorship" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-block-builder" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-chain-spec" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-chain-spec-derive" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-cli" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-client-api" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-client-db" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-aura" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-babe" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-babe-rpc" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-beefy" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-beefy-rpc" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-epochs" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-grandpa" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-grandpa-rpc" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-manual-seal" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-pow" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-slots" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-common" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-wasmi" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-wasmtime" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-informant" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-keystore" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-mixnet" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-bitswap" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-common" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-gossip" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-light" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-sync" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-test" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-transactions" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-statement" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-offchain" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-peerset" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-proposer-metrics" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-api" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-server" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-spec-v2" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-runtime-test" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service-test" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-state-db" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-statement-store" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-storage-monitor" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-sysinfo" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-telemetry" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-tracing" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool-api" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "subkey" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "substrate" }, + # Each entry is the crate and version constraint, and its specific allow list + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "chain-spec-builder" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "mmr-gadget" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-bench" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-inspect" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-template-release" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-testing" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-authority-discovery" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-basic-authorship" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-block-builder" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-chain-spec" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-chain-spec-derive" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-cli" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-client-api" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-client-db" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-aura" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-babe" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-babe-rpc" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-beefy" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-beefy-rpc" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-epochs" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-grandpa" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-grandpa-rpc" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-manual-seal" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-pow" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-slots" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-common" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-wasmi" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-wasmtime" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-informant" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-keystore" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-mixnet" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-bitswap" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-common" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-gossip" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-light" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-statement" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-sync" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-test" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-transactions" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-offchain" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-peerset" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-proposer-metrics" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-api" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-server" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-spec-v2" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-runtime-test" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service-test" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-state-db" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-statement-store" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-storage-monitor" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-sysinfo" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-telemetry" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-tracing" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool-api" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "staging-node-cli" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "subkey" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "substrate" }, ] # Some crates don't have (easily) machine readable licensing information, @@ -114,6 +114,6 @@ expression = "MIT AND ISC AND OpenSSL" # and the crate will be checked normally, which may produce warnings or errors # depending on the rest of your configuration license-files = [ - # Each entry is a crate relative path, and the (opaque) hash of its contents - { path = "LICENSE", hash = 0xbd0eed23 } + # Each entry is a crate relative path, and the (opaque) hash of its contents + { path = "LICENSE", hash = 0xbd0eed23 }, ] diff --git a/substrate/scripts/ci/node-template-release/Cargo.toml b/substrate/scripts/ci/node-template-release/Cargo.toml index 73ffce8645b868a07b2cc77f1f00e47d0339ae79..ca9759d596317bd07ac994c725f42937f520d5b1 100644 --- a/substrate/scripts/ci/node-template-release/Cargo.toml +++ b/substrate/scripts/ci/node-template-release/Cargo.toml @@ -7,11 +7,14 @@ license = "GPL-3.0 WITH Classpath-exception-2.0" homepage = "https://substrate.io" publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } flate2 = "1.0" fs_extra = "1.3" glob = "0.3" diff --git a/substrate/src/lib.rs b/substrate/src/lib.rs deleted file mode 100644 index b5a583fcfcf1ed1f67ac3e9473392807dd4ae1a9..0000000000000000000000000000000000000000 --- a/substrate/src/lib.rs +++ /dev/null @@ -1,242 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! # Substrate -//! -//! Substrate is a Rust framework for building blockchains in a modular and extensible way. While in -//! itself un-opinionated, it is the main engine behind the Polkadot ecosystem. -//! -//! [![github]](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/) - [![polkadot]](https://polkadot.network) -//! -//! This crate in itself does not contain any code and is just meant ot be a documentation hub for -//! substrate-based crates. -//! -//! ## Overview -//! -//! Substrate approaches blockchain development with an acknowledgement of a few self-evident -//! truths: -//! -//! 1. Society and technology evolves. -//! 2. Humans are fallible. -//! -//! This, specifically, makes the task of designing a correct, safe and long-lasting blockchain -//! system hard. -//! -//! Nonetheless, in order to achieve this goal, substrate embraces the following: -//! -//! 1. Use of **Rust** as a modern, and safe programming language, which limits human error through -//! various means, most notably memory safety. -//! 2. Substrate is written from the ground-up with a generic, modular and extensible design. This -//! ensures that software components can be easily swapped and upgraded. Examples of this is -//! multiple consensus mechanisms provided by Substrate, as listed below. -//! 3. Lastly, the final blockchain system created with the above properties needs to be -//! upgradeable. In order to achieve this, Substrate is designed as a meta-protocol, whereby the -//! application logic of the blockchain (called "Runtime") is encoded as a Wasm blob, and is -//! stored onchain. The rest of the system (called "Client") acts as the executor of the Wasm -//! blob. -//! -//! In essence, the meta-protocol of all Substrate based chains is the "Runtime as Wasm blob" -//! accord. This enables the Runtime to become inherently upgradeable (without forks). The upgrade -//! is merely a matter of the Wasm blob being changed in the chain state, which is, in principle, -//! same as updating an account's balance. -//! -//! ### Architecture -//! -//! Therefore, Substrate can be visualized as follows: -#![doc = simple_mermaid::mermaid!("../../docs/mermaid/substrate_simple.mmd")] -//! -//! The client and the runtime of course need to communicate. This is done through two concepts: -//! -//! 1. Host functions: a way for the (Wasm) runtime to talk to the client. All host functions are -//! defined in [`sp-io`]. For example, [`sp-io::storage`] are the set of host functions that -//! allow the runtime to read and write data to the on-chain state. -//! 2. Runtime APIs: a way for the client to talk to the Wasm runtime. Runtime APIs are defined -//! using macros and utilities in [`sp-api`]. For example, [`sp-api::Core`] is the most basic -//! runtime API that any blockchain must implement in order to be able to (re) execute blocks. -#![doc = simple_mermaid::mermaid!("../../docs/mermaid/substrate_client_runtime.mmd")] -//! -//! [`FRAME`], Substrate's default runtime development library takes the above even further by -//! embracing a declarative programming model whereby correctness is enhanced and the system is -//! highly configurable through parameterization. -//! -//! All in all, this design enables all substrate-based chains to achieve forkless, self-enacting -//! upgrades out of the box. Combined with governance abilities that are shipped with `FRAME`, this -//! enables a chain to survive the test of time. -//! -//! ## How to Get Stared -//! -//! Most developers want to leave the client side code as-is, and focus on the runtime. To do so, -//! look into the [`frame`] crate, which is the entry point crate into runtime development with -//! FRAME. -//! -//! > Side note, it is entirely possible to craft a substrate-based runtime without FRAME, an -//! > example of which can be found [here](https://github.com/JoshOrndorff/frameless-node-template). -//! -//! In more broad terms, the following avenues exist into developing with substrate: -//! -//! * **Templates**: A number of substrate-based templates exist and they can be used for various -//! purposes, with zero to little additional code needed. All of these templates contain runtimes -//! that are highly configurable and are likely suitable for basic needs. -//! * [`FRAME`]: If need, one can customize that runtime even further, by using `FRAME` and -//! developing custom modules. -//! * **Core**: To the contrary, some developers may want to customize the client side software to -//! achieve novel goals such as a new consensus engine, or a new database backend. While -//! Substrate's main configurability is in the runtime, the client is also highly generic and can -//! be customized to a great extent. -//! -//! ## Structure -//! -//! Substrate is a massive cargo workspace with hundreds of crates, therefore it is useful to know -//! how to navigate its crates. -//! -//! In broad terms, it is divided into three categories: -//! -//! * `sc-*` (short for *substrate-client*) crates, located under `./client` folder. These are all -//! the client crates. Notable examples are crates such as [`sc-network`], various consensus -//! crates, [`sc-rpc-api`] and [`sc-client-db`], all of which are expected to reside in the client -//! side. -//! * `sp-*` (short for *substrate-primitives*) crates, located under `./primitives` folder. These -//! are the traits that glue the client and runtime together, but are not opinionated about what -//! framework is using for building the runtime. Notable examples are [`sp-api`] and [`sp-io`], -//! which form the communication bridge between the client and runtime. -//! * `pallet-*` and `frame-*` crates, located under `./frame` folder. These are the crates related -//! to FRAME. See [`frame`] for more information. -//! -//! ### Wasm Build -//! -//! Many of the Substrate crates, such as entire `sp-*`, need to compile to both Wasm (when a Wasm -//! runtime is being generated) and native (for example, when testing). To achieve this, Substrate -//! follows the convention of the Rust community, and uses a `feature = "std"` to signify that a -//! crate is being built with the standard library, and is built for native. Otherwise, it is built -//! for `no_std`. -//! -//! This can be summarized in `#![cfg_attr(not(feature = "std"), no_std)]`, which you can often find -//! in any Substrate-based runtime. -//! -//! Substrate-based runtimes use [`substrate-wasm-builder`] in their `build.rs` to automatically -//! build their Wasm files as a part of normal build commandsOnce built, the wasm file is placed in -//! `./target/{debug|release}/wbuild/{runtime_name}.wasm`. -//! -//! ### Binaries -//! -//! Multiple binaries are shipped with substrate, the most important of which are located in the -//! `./bin` folder. -//! -//! * [`node`] is an extensive substrate node that contains the superset of all runtime and client -//! side features. The corresponding runtime, called [`kitchensink_runtime`] contains all of the -//! modules that are provided with `FRAME`. This node and runtime is only used for testing and -//! demonstration. -//! * [`chain-spec-builder`]: Utility to build more detailed [chain-spec][`sc-chain-spec`] for the -//! aforementioned node. Other projects typically contain a `build-spec` subcommand that does the -//! same. -//! * [`node-template`]: a template node that contains a minimal set of features and can act as a -//! starting point of a project. -//! * [`subkey`]: Substrate's key management utility. -//! -//! ### Anatomy of a Binary Crate -//! -//! From the above, [`node`] and [`node-template`] are essentially blueprints of a substrate-based -//! project, as the name of the latter is implying. Each substrate-based project typically contains -//! the following: -//! -//! * Under `./runtime`, a `./runtime/src/lib.rs` which is the top level runtime amalgamator file. -//! This file typically contains the [`frame_support::construct_runtime`] macro, which is the -//! final definition of a runtime. -//! -//! * Under `./node`, a `main.rs`, which is the point, and a `./service.rs`, which contains all the -//! client side components. Skimming this file yields an overview of the networking, database, -//! consensus and similar client side components. -//! -//! > The above two are conventions, not rules. -//! -//! ## Parachain? -//! -//! As noted above, Substrate is the main engine behind the Polkadot ecosystem. One of the ways -//! through which Polkadot can be utilized is by building "parachains", blockchains that are -//! connected to Polkadot's shared security. -//! -//! To build a parachain, one could use -//! [`Cumulus`](https://github.com/paritytech/polkadot-sdk/tree/master/cumulus), the library on top -//! of Substrate, empowering any substrate-based chain to be a Polkadot parachain. -//! -//! ## Where To Go Next? -//! -//! Additional noteworthy crates within substrate: -//! -//! - Chain specification of a Substrate node: -//! - [`sc-chain-spec`] -//! - RPC APIs of a Substrate node: [`sc-rpc-api`]/[`sc-rpc`] -//! - CLI Options of a Substrate node: [`sc-cli`] -//! - All of the consensus related crates provided by Substrate: -//! - [`sc-consensus-aura`] -//! - [`sc-consensus-babe`] -//! - [`sc-consensus-grandpa`] -//! - [`sc-consensus-beefy`] -//! - [`sc-consensus-manual-seal`] -//! - [`sc-consensus-pow`] -//! -//! Additional noteworthy external resources: -//! -//! - [Substrate Developer Hub](https://substrate.dev) -//! - [Parity Tech's Documentation Hub](https://paritytech.github.io/) -//! - [Frontier: Substrate's Ethereum Compatibility Library](https://paritytech.github.io/frontier/) -//! - [Polkadot Wiki](https://wiki.polkadot.network/en/) -//! -//! Notable upstream crates: -//! -//! - [`parity-scale-codec`](https://github.com/paritytech/parity-scale-codec) -//! - [`parity-db`](https://github.com/paritytech/parity-db) -//! - [`trie`](https://github.com/paritytech/trie) -//! - [`parity-common`](https://github.com/paritytech/parity-common) -//! -//! Templates: -//! -//! - classic [`substrate-node-template`](https://github.com/substrate-developer-hub/substrate-node-template) -//! - classic [cumulus-parachain-template](https://github.com/substrate-developer-hub/substrate-parachain-template) -//! - [`extended-parachain-template`](https://github.com/paritytech/extended-parachain-template) -//! - [`frontier-parachain-template`](https://github.com/paritytech/frontier-parachain-template) -//! -//! [polkadot]: -//! https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white -//! [github]: -//! https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github -//! [`FRAME`]: ../frame/index.html -//! [`sp-io`]: ../sp_io/index.html -//! [`sp-api`]: ../sp_api/index.html -//! [`sp-api`]: ../sp_api/index.html -//! [`sc-client-db`]: ../sc_client_db/index.html -//! [`sc-chain-spec`]: ../sc_chain_spec/index.html -//! [`sc-network`]: ../sc_network/index.html -//! [`sc-rpc-api`]: ../sc_rpc_api/index.html -//! [`sc-rpc`]: ../sc_rpc/index.html -//! [`sc-cli`]: ../sc_cli/index.html -//! [`sc-consensus-aura`]: ../sc_consensus_aura/index.html -//! [`sc-consensus-babe`]: ../sc_consensus_babe/index.html -//! [`sc-consensus-grandpa`]: ../sc_consensus_grandpa/index.html -//! [`sc-consensus-beefy`]: ../sc_consensus_beefy/index.html -//! [`sc-consensus-manual-seal`]: ../sc_consensus_manual_seal/index.html -//! [`sc-consensus-pow`]: ../sc_consensus_pow/index.html -//! [`node`]: ../node_cli/index.html -//! [`node-template`]: ../node_template/index.html -//! [`kitchensink_runtime`]: ../kitchensink_runtime/index.html -//! [`subkey`]: ../subkey/index.html -//! [`chain-spec-builder`]: ../chain_spec_builder/index.html -//! [`substrate-wasm-builder`]: https://crates.io/crates/substrate-wasm-builder - -#![deny(rustdoc::broken_intra_doc_links)] -#![deny(rustdoc::private_intra_doc_links)] diff --git a/substrate/test-utils/Cargo.toml b/substrate/test-utils/Cargo.toml index 31bdc0f663a10734d1e4775f1fb03c63ffb9f1ed..526ed7c049c708c257cb22fc44efb1dc7d6fe43b 100644 --- a/substrate/test-utils/Cargo.toml +++ b/substrate/test-utils/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Substrate test utilities" publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,5 +20,5 @@ futures = "0.3.16" tokio = { version = "1.22.0", features = ["macros", "time"] } [dev-dependencies] -trybuild = { version = "1.0.74", features = [ "diff" ] } +trybuild = { version = "1.0.74", features = ["diff"] } sc-service = { path = "../client/service" } diff --git a/substrate/test-utils/cli/Cargo.toml b/substrate/test-utils/cli/Cargo.toml index 022db32c34f1c5c2308435271474f31208b159fa..d654a3aaa7258668c066657b27e3eb97deec6b19 100644 --- a/substrate/test-utils/cli/Cargo.toml +++ b/substrate/test-utils/cli/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -26,4 +29,4 @@ sc-service = { path = "../../client/service" } futures = "0.3.28" [features] -try-runtime = [ "node-cli/try-runtime" ] +try-runtime = ["node-cli/try-runtime"] diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index 032fbaf4e654c99d08d0d8b3dc9f2bec1bd04640..2e343f4155b4b41a10f3f553e25b601c634e5723 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -9,26 +9,29 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" -async-trait = "0.1.57" +async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" -serde = "1.0.188" +serde = "1.0.193" serde_json = "1.0.108" sc-client-api = { path = "../../client/api" } sc-client-db = { path = "../../client/db", default-features = false, features = [ "test-helpers", -]} +] } sc-consensus = { path = "../../client/consensus/common" } sc-executor = { path = "../../client/executor" } sc-offchain = { path = "../../client/offchain" } sc-service = { path = "../../client/service", default-features = false, features = [ "test-helpers", -]} +] } sp-blockchain = { path = "../../primitives/blockchain" } sp-consensus = { path = "../../primitives/consensus/common" } sp-core = { path = "../../primitives/core" } diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index 084dd2a1861cd3b73c5d3f6d89f6cd84c81ac0ef..f383f7c3dc3e06d1ce5bcdafbb8ab3729a20734f 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -263,9 +263,10 @@ impl D: sc_executor::NativeExecutionDispatch + 'static, Backend: sc_client_api::backend::Backend + 'static, { - let executor = executor.into().unwrap_or_else(|| { + let mut executor = executor.into().unwrap_or_else(|| { NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()) }); + executor.disable_use_native(); let executor = LocalCallExecutor::new( self.backend.clone(), executor.clone(), diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 2f1e192eded0d608eb157058f7b2c0cef7e9c49b..881a77f1f92835b0315dcc4fcd03dbb33167a955 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -9,41 +9,44 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } +sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false, features = ["serde"] } sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features = false, features = ["serde"] } -sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false} -sp-block-builder = { path = "../../primitives/block-builder", default-features = false} +sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } +sp-block-builder = { path = "../../primitives/block-builder", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-inherents = { path = "../../primitives/inherents", default-features = false} -sp-keyring = { path = "../../primitives/keyring", optional = true} -sp-offchain = { path = "../../primitives/offchain", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -frame-support = { path = "../../frame/support", default-features = false} -sp-version = { path = "../../primitives/version", default-features = false} -sp-session = { path = "../../primitives/session", default-features = false} -sp-api = { path = "../../primitives/api", default-features = false} +sp-inherents = { path = "../../primitives/inherents", default-features = false } +sp-keyring = { path = "../../primitives/keyring", optional = true } +sp-offchain = { path = "../../primitives/offchain", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +frame-support = { path = "../../frame/support", default-features = false } +sp-version = { path = "../../primitives/version", default-features = false } +sp-session = { path = "../../primitives/session", default-features = false } +sp-api = { path = "../../primitives/api", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -pallet-babe = { path = "../../frame/babe", default-features = false} -pallet-balances = { path = "../../frame/balances", default-features = false} -frame-executive = { path = "../../frame/executive", default-features = false} -frame-system = { path = "../../frame/system", default-features = false} -frame-system-rpc-runtime-api = { path = "../../frame/system/rpc/runtime-api", default-features = false} -pallet-timestamp = { path = "../../frame/timestamp", default-features = false} +pallet-babe = { path = "../../frame/babe", default-features = false } +pallet-balances = { path = "../../frame/balances", default-features = false } +frame-executive = { path = "../../frame/executive", default-features = false } +frame-system = { path = "../../frame/system", default-features = false } +frame-system-rpc-runtime-api = { path = "../../frame/system/rpc/runtime-api", default-features = false } +pallet-timestamp = { path = "../../frame/timestamp", default-features = false } sp-consensus-grandpa = { path = "../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-trie = { path = "../../primitives/trie", default-features = false} -sp-transaction-pool = { path = "../../primitives/transaction-pool", default-features = false} +sp-trie = { path = "../../primitives/trie", default-features = false } +sp-transaction-pool = { path = "../../primitives/transaction-pool", default-features = false } trie-db = { version = "0.28.0", default-features = false } -sc-service = { path = "../../client/service", default-features = false, features = ["test-helpers"], optional = true} -sp-state-machine = { path = "../../primitives/state-machine", default-features = false} -sp-externalities = { path = "../../primitives/externalities", default-features = false} +sc-service = { path = "../../client/service", default-features = false, features = ["test-helpers"], optional = true } +sp-state-machine = { path = "../../primitives/state-machine", default-features = false } +sp-externalities = { path = "../../primitives/externalities", default-features = false } # 3rd party array-bytes = { version = "6.1", optional = true } @@ -58,14 +61,14 @@ sp-consensus = { path = "../../primitives/consensus/common" } substrate-test-runtime-client = { path = "client" } sp-tracing = { path = "../../primitives/tracing" } json-patch = { version = "1.0.0", default-features = false } -serde = { version = "1.0.188", features = ["alloc", "derive"], default-features = false } +serde = { version = "1.0.193", features = ["alloc", "derive"], default-features = false } serde_json = { version = "1.0.108", default-features = false, features = ["alloc"] } [build-dependencies] substrate-wasm-builder = { path = "../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "array-bytes", @@ -108,4 +111,4 @@ std = [ "trie-db/std", ] # Special feature to disable logging -disable-logging = [ "sp-api/disable-logging" ] +disable-logging = ["sp-api/disable-logging"] diff --git a/substrate/test-utils/runtime/client/Cargo.toml b/substrate/test-utils/runtime/client/Cargo.toml index 40cfa8ab1b7092ab2263690fba9a314e0dcd7a24..cbb964f6785237ff45b58f6b702b9a3e4a3abbbe 100644 --- a/substrate/test-utils/runtime/client/Cargo.toml +++ b/substrate/test-utils/runtime/client/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 1a4e9fd04667c830ad912316bf7fe6fcaae11c41..16ab467772f2f60a4b6496da88b9129cc04851f5 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -27,7 +27,7 @@ pub mod substrate_test_pallet; use codec::{Decode, Encode}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, @@ -342,6 +342,7 @@ parameter_types! { .build_or_panic(); } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::pallet::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; diff --git a/substrate/test-utils/runtime/transaction-pool/Cargo.toml b/substrate/test-utils/runtime/transaction-pool/Cargo.toml index cb6ee6d79f448da2c161073f1c08fc6134047537..b52a897438b6854a066a95e51ba49bd0eddd7f84 100644 --- a/substrate/test-utils/runtime/transaction-pool/Cargo.toml +++ b/substrate/test-utils/runtime/transaction-pool/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/utils/binary-merkle-tree/Cargo.toml b/substrate/utils/binary-merkle-tree/Cargo.toml index 6bb1d5f0f1e69b6c3601c4a8806e2e992e977de9..441f89b790f1b3bde9db9a67877f978c60a824f2 100644 --- a/substrate/utils/binary-merkle-tree/Cargo.toml +++ b/substrate/utils/binary-merkle-tree/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true description = "A no-std/Substrate compatible library to construct binary merkle tree." homepage = "https://substrate.io" +[lints] +workspace = true + [dependencies] array-bytes = { version = "6.1", optional = true } log = { version = "0.4", default-features = false, optional = true } @@ -20,6 +23,6 @@ sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } [features] -debug = [ "array-bytes", "log" ] -default = [ "debug", "std" ] -std = [ "hash-db/std", "log/std", "sp-core/std", "sp-runtime/std" ] +debug = ["array-bytes", "log"] +default = ["debug", "std"] +std = ["hash-db/std", "log/std", "sp-core/std", "sp-runtime/std"] diff --git a/substrate/utils/build-script-utils/Cargo.toml b/substrate/utils/build-script-utils/Cargo.toml index ab15d5552c29a6a17dac404537a9cfc3ee59fdfe..464647ea723e0398937a0dc49f0f00f749786e41 100644 --- a/substrate/utils/build-script-utils/Cargo.toml +++ b/substrate/utils/build-script-utils/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Crate with utility functions for `build.rs` scripts." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/utils/build-script-utils/src/git.rs b/substrate/utils/build-script-utils/src/git.rs index 057ee0af15f7623ba7e9fddbd799dc75f6605e1a..430a3e17c190a9042af6356076047163403823fa 100644 --- a/substrate/utils/build-script-utils/src/git.rs +++ b/substrate/utils/build-script-utils/src/git.rs @@ -15,7 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::{env, fs, fs::File, io, io::Read, path::PathBuf}; +use std::{ + env, fs, + fs::File, + io, + io::Read, + path::{Path, PathBuf}, +}; /// Make sure the calling `build.rs` script is rerun when `.git/HEAD` or the ref of `.git/HEAD` /// changed. @@ -55,7 +61,7 @@ pub fn rerun_if_git_head_changed() { } // Code taken from https://github.com/rustyhorde/vergen/blob/8d522db8c8e16e26c0fc9ea8e6b0247cbf5cca84/src/output/envvar.rs -fn get_git_paths(path: &PathBuf) -> Result>, io::Error> { +fn get_git_paths(path: &Path) -> Result>, io::Error> { let git_dir_or_file = path.join(".git"); if let Ok(metadata) = fs::metadata(&git_dir_or_file) { diff --git a/substrate/utils/build-script-utils/src/version.rs b/substrate/utils/build-script-utils/src/version.rs index f6a9ff9554abc5249b74a1f052d61e2517eb52af..d85c78d2c9974ef35483d7c9726e5f16ce2f0b45 100644 --- a/substrate/utils/build-script-utils/src/version.rs +++ b/substrate/utils/build-script-utils/src/version.rs @@ -25,7 +25,7 @@ pub fn generate_cargo_keys() { // We deliberately set the length here to `11` to ensure that // the emitted hash is always of the same length; otherwise // it can (and will!) vary between different build environments. - match Command::new("git").args(&["rev-parse", "--short=11", "HEAD"]).output() { + match Command::new("git").args(["rev-parse", "--short=11", "HEAD"]).output() { Ok(o) if o.status.success() => { let sha = String::from_utf8_lossy(&o.stdout).trim().to_owned(); Cow::from(sha) @@ -59,3 +59,34 @@ fn get_version(impl_commit: &str) -> String { impl_commit ) } + +/// Generate `SUBSTRATE_WASMTIME_VERSION` +pub fn generate_wasmtime_version() { + generate_dependency_version("wasmtime", "SUBSTRATE_WASMTIME_VERSION"); +} + +fn generate_dependency_version(dep: &str, env_var: &str) { + // we only care about the root + match std::process::Command::new("cargo") + .args(["tree", "--depth=0", "--locked", "--package", dep]) + .output() + { + Ok(output) if output.status.success() => { + let version = String::from_utf8_lossy(&output.stdout); + + // vX.X.X + if let Some(ver) = version.strip_prefix(&format!("{} v", dep)) { + println!("cargo:rustc-env={}={}", env_var, ver); + } else { + println!("cargo:warning=Unexpected result {}", version); + } + }, + + // command errors out when it could not find the given dependency + // or when having multiple versions of it + Ok(output) => + println!("cargo:warning=`cargo tree` {}", String::from_utf8_lossy(&output.stderr)), + + Err(err) => println!("cargo:warning=Could not run `cargo tree`: {}", err), + } +} diff --git a/substrate/utils/fork-tree/Cargo.toml b/substrate/utils/fork-tree/Cargo.toml index eea500641fe4eaf7631f97e1adcf2cefcda87288..27bb908986f8e35e9caafc9448167ba68c41ec51 100644 --- a/substrate/utils/fork-tree/Cargo.toml +++ b/substrate/utils/fork-tree/Cargo.toml @@ -10,6 +10,9 @@ description = "Utility library for managing tree-like ordered data with logic fo documentation = "https://docs.rs/fork-tree" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index b67d08a85c2eb7f97c5d9900d14f86301a8bc60f..b9495fa46c2b738845d930f9afa1c93729d6e11f 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -9,13 +9,16 @@ repository.workspace = true description = "CLI for benchmarking FRAME" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" chrono = "0.4" -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1" } comfy-table = { version = "7.0.1", default-features = false } handlebars = "4.2.2" @@ -24,9 +27,9 @@ itertools = "0.10.3" lazy_static = "1.4.0" linked-hash-map = "0.5.4" log = "0.4.17" -rand = { version = "0.8.4", features = ["small_rng"] } +rand = { version = "0.8.5", features = ["small_rng"] } rand_pcg = "0.3.1" -serde = "1.0.188" +serde = "1.0.193" serde_json = "1.0.108" thiserror = "1.0.48" thousands = "0.2.0" @@ -34,11 +37,11 @@ frame-benchmarking = { path = "../../../frame/benchmarking" } frame-support = { path = "../../../frame/support" } frame-system = { path = "../../../frame/system" } sc-block-builder = { path = "../../../client/block-builder" } -sc-cli = { path = "../../../client/cli", default-features = false} +sc-cli = { path = "../../../client/cli", default-features = false } sc-client-api = { path = "../../../client/api" } -sc-client-db = { path = "../../../client/db", default-features = false} +sc-client-db = { path = "../../../client/db", default-features = false } sc-executor = { path = "../../../client/executor" } -sc-service = { path = "../../../client/service", default-features = false} +sc-service = { path = "../../../client/service", default-features = false } sc-sysinfo = { path = "../../../client/sysinfo" } sp-api = { path = "../../../primitives/api" } sp-blockchain = { path = "../../../primitives/blockchain" } @@ -56,7 +59,7 @@ sp-wasm-interface = { path = "../../../primitives/wasm-interface" } gethostname = "0.2.3" [features] -default = [ "rocksdb" ] +default = ["rocksdb"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -65,4 +68,4 @@ runtime-benchmarks = [ "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -rocksdb = [ "sc-cli/rocksdb", "sc-client-db/rocksdb" ] +rocksdb = ["sc-cli/rocksdb", "sc-client-db/rocksdb"] diff --git a/substrate/utils/frame/benchmarking-cli/src/block/bench.rs b/substrate/utils/frame/benchmarking-cli/src/block/bench.rs index a028c7d438e80ac80409c2ae7ddc1d683de21052..ef8dc29dde8407ca26d0912bc2d65c0bc39d73d0 100644 --- a/substrate/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/substrate/utils/frame/benchmarking-cli/src/block/bench.rs @@ -25,9 +25,13 @@ use sc_cli::{Error, Result}; use sc_client_api::{ Backend as ClientBackend, BlockBackend, HeaderBackend, StorageProvider, UsageProvider, }; -use sp_api::{ApiExt, Core, HeaderT, ProvideRuntimeApi}; +use sp_api::{ApiExt, Core, ProvideRuntimeApi}; use sp_blockchain::Error::RuntimeApiError; -use sp_runtime::{generic::BlockId, traits::Block as BlockT, DigestItem, OpaqueExtrinsic}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, + DigestItem, OpaqueExtrinsic, +}; use sp_storage::StorageKey; use clap::Args; diff --git a/substrate/utils/frame/benchmarking-cli/src/storage/write.rs b/substrate/utils/frame/benchmarking-cli/src/storage/write.rs index 4def1909ead5e0ef1675eeb8ca9bcd777b606005..65941497bda41f94b1230f73d59b187895cee4d9 100644 --- a/substrate/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/substrate/utils/frame/benchmarking-cli/src/storage/write.rs @@ -18,10 +18,10 @@ use sc_cli::Result; use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider}; use sc_client_db::{DbHash, DbState, DbStateBuilder}; -use sp_api::StateBackend; use sp_blockchain::HeaderBackend; use sp_database::{ColumnId, Transaction}; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT}; +use sp_state_machine::Backend as StateBackend; use sp_trie::PrefixedMemoryDB; use log::{info, trace}; diff --git a/substrate/utils/frame/frame-utilities-cli/Cargo.toml b/substrate/utils/frame/frame-utilities-cli/Cargo.toml index 24c04f47391e8157a45f830986a7e3f6585ebb1b..7e0c0241947fc9e15e6cff8b014dc7dc1cd07673 100644 --- a/substrate/utils/frame/frame-utilities-cli/Cargo.toml +++ b/substrate/utils/frame/frame-utilities-cli/Cargo.toml @@ -10,8 +10,11 @@ description = "cli interface for FRAME" documentation = "https://docs.rs/substrate-frame-cli" readme = "README.md" +[lints] +workspace = true + [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } frame-support = { path = "../../../frame/support" } frame-system = { path = "../../../frame/system" } sc-cli = { path = "../../../client/cli" } diff --git a/substrate/utils/frame/generate-bags/Cargo.toml b/substrate/utils/frame/generate-bags/Cargo.toml index ac22197c5ac4b3fe25de85eb704572420595cb91..4afb2a80b7710beae446c5f95d6f4e8dc727ff9b 100644 --- a/substrate/utils/frame/generate-bags/Cargo.toml +++ b/substrate/utils/frame/generate-bags/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "Bag threshold generation script for pallet-bag-list" +[lints] +workspace = true + [dependencies] # FRAME frame-support = { path = "../../../frame/support" } @@ -17,5 +20,5 @@ pallet-staking = { path = "../../../frame/staking" } sp-staking = { path = "../../../primitives/staking" } # third party -chrono = { version = "0.4.27" } +chrono = { version = "0.4.31" } num-format = "0.4.3" diff --git a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml index 13e61138356231a4c0c56da19bc61abbbf7286cc..4614caa7f7b35c04ae7eff4a31cad4cae9ae7a0e 100644 --- a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -9,9 +9,12 @@ repository.workspace = true description = "Bag threshold generation script for pallet-bag-list and kitchensink-runtime." publish = false +[lints] +workspace = true + [dependencies] kitchensink-runtime = { path = "../../../../bin/node/runtime" } generate-bags = { path = ".." } # third-party -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index 7067aed238aca08b5b03ae99f568ad1777751911..bd5a51eeec629f776c9a219b1dd93dd7a23855b7 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "An externalities provided environment that can load itself from remote nodes or cached files" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] jsonrpsee = { version = "0.16.2", features = ["http-client"] } codec = { package = "parity-scale-codec", version = "3.6.1" } log = "0.4.17" -serde = "1.0.188" +serde = "1.0.193" sp-core = { path = "../../../primitives/core" } sp-state-machine = { path = "../../../primitives/state-machine" } sp-io = { path = "../../../primitives/io" } diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 71e9320ebeeb272faa156d758dd6571c9cb148a9..5c7a36867ff6ea1398daed763505b49e13b4ca03 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -47,6 +47,7 @@ use std::{ fs, ops::{Deref, DerefMut}, path::{Path, PathBuf}, + sync::Arc, time::{Duration, Instant}, }; use substrate_rpc_client::{rpc_params, BatchRequestBuilder, ChainApi, ClientT, StateApi}; @@ -298,6 +299,7 @@ impl Default for SnapshotConfig { } /// Builder for remote-externalities. +#[derive(Clone)] pub struct Builder { /// Custom key-pairs to be injected into the final externalities. The *hashed* keys and values /// must be given. @@ -400,41 +402,134 @@ where }) } - /// Get all the keys at `prefix` at `hash` using the paged, safe RPC methods. - async fn rpc_get_keys_paged( + /// Get keys with `prefix` at `block` in a parallel manner. + async fn rpc_get_keys_parallel( &self, - prefix: StorageKey, - at: B::Hash, + prefix: &StorageKey, + block: B::Hash, + parallel: usize, + ) -> Result, &'static str> { + /// Divide the workload and return the start key of each chunks. Guaranteed to return a + /// non-empty list. + fn gen_start_keys(prefix: &StorageKey) -> Vec { + let mut prefix = prefix.as_ref().to_vec(); + let scale = 32usize.saturating_sub(prefix.len()); + + // no need to divide workload + if scale < 9 { + prefix.extend(vec![0; scale]); + return vec![StorageKey(prefix)] + } + + let chunks = 16; + let step = 0x10000 / chunks; + let ext = scale - 2; + + (0..chunks) + .map(|i| { + let mut key = prefix.clone(); + let start = i * step; + key.extend(vec![(start >> 8) as u8, (start & 0xff) as u8]); + key.extend(vec![0; ext]); + StorageKey(key) + }) + .collect() + } + + let start_keys = gen_start_keys(&prefix); + let start_keys: Vec> = start_keys.iter().map(Some).collect(); + let mut end_keys: Vec> = start_keys[1..].to_vec(); + end_keys.push(None); + + // use a semaphore to limit max scraping tasks + let parallel = Arc::new(tokio::sync::Semaphore::new(parallel)); + let builder = Arc::new(self.clone()); + let mut handles = vec![]; + + for (start_key, end_key) in start_keys.into_iter().zip(end_keys) { + let permit = parallel + .clone() + .acquire_owned() + .await + .expect("semaphore is not closed until the end of loop"); + + let builder = builder.clone(); + let prefix = prefix.clone(); + let start_key = start_key.cloned(); + let end_key = end_key.cloned(); + + let handle = tokio::spawn(async move { + let res = builder + .rpc_get_keys_in_range(&prefix, block, start_key.as_ref(), end_key.as_ref()) + .await; + drop(permit); + res + }); + + handles.push(handle); + } + + parallel.close(); + + let keys = futures::future::join_all(handles) + .await + .into_iter() + .filter_map(|res| match res { + Ok(Ok(keys)) => Some(keys), + _ => None, + }) + .flatten() + .collect::>(); + + Ok(keys) + } + + /// Get all keys with `prefix` within the given range at `block`. + /// Both `start_key` and `end_key` are optional if you want an open-ended range. + async fn rpc_get_keys_in_range( + &self, + prefix: &StorageKey, + block: B::Hash, + start_key: Option<&StorageKey>, + end_key: Option<&StorageKey>, ) -> Result, &'static str> { - let mut last_key: Option = None; - let mut all_keys: Vec = vec![]; - let keys = loop { + let mut last_key: Option<&StorageKey> = start_key; + let mut keys: Vec = vec![]; + + loop { // This loop can hit the node with very rapid requests, occasionally causing it to // error out in CI (https://github.com/paritytech/substrate/issues/14129), so we retry. let retry_strategy = FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); let get_page_closure = - || self.get_keys_single_page(Some(prefix.clone()), last_key.clone(), at); - let page = Retry::spawn(retry_strategy, get_page_closure).await?; - let page_len = page.len(); + || self.get_keys_single_page(Some(prefix.clone()), last_key.cloned(), block); + let mut page = Retry::spawn(retry_strategy, get_page_closure).await?; - all_keys.extend(page); + // avoid duplicated keys across workloads + if let (Some(last), Some(end)) = (page.last(), end_key) { + if last >= end { + page.retain(|key| key < end); + } + } + let page_len = page.len(); + keys.extend(page); + last_key = keys.last(); + + // scraping out of range or no more matches, + // we are done either way if page_len < Self::DEFAULT_KEY_DOWNLOAD_PAGE as usize { log::debug!(target: LOG_TARGET, "last page received: {}", page_len); - break all_keys - } else { - let new_last_key = - all_keys.last().expect("all_keys is populated; has .last(); qed"); - log::debug!( - target: LOG_TARGET, - "new total = {}, full page received: {}", - all_keys.len(), - HexDisplay::from(new_last_key) - ); - last_key = Some(new_last_key.clone()); - }; - }; + break + } + + log::debug!( + target: LOG_TARGET, + "new total = {}, full page received: {}", + keys.len(), + HexDisplay::from(last_key.expect("full page received, cannot be None")) + ); + } Ok(keys) } @@ -529,7 +624,7 @@ where "Batch request failed ({}/{} retries). Error: {}", retries, Self::MAX_RETRIES, - e.to_string() + e ); // after 2 subsequent failures something very wrong is happening. log a warning // and reset the batch size down to 1. @@ -590,7 +685,7 @@ where /// map them to values one by one. /// /// This can work with public nodes. But, expect it to be darn slow. - pub(crate) async fn rpc_get_pairs_paged( + pub(crate) async fn rpc_get_pairs( &self, prefix: StorageKey, at: B::Hash, @@ -598,8 +693,10 @@ where ) -> Result, &'static str> { let start = Instant::now(); let mut sp = Spinner::with_timer(Spinners::Dots, "Scraping keys...".into()); + // TODO We could start downloading when having collected the first batch of keys + // https://github.com/paritytech/polkadot-sdk/issues/2494 let keys = self - .rpc_get_keys_paged(prefix.clone(), at) + .rpc_get_keys_parallel(&prefix, at, Self::PARALLEL_REQUESTS) .await? .into_iter() .collect::>(); @@ -628,9 +725,9 @@ where .unwrap() .progress_chars("=>-"), ); - let payloads_chunked = payloads.chunks((&payloads.len() / Self::PARALLEL_REQUESTS).max(1)); + let payloads_chunked = payloads.chunks((payloads.len() / Self::PARALLEL_REQUESTS).max(1)); let requests = payloads_chunked.map(|payload_chunk| { - Self::get_storage_data_dynamic_batch_size(&client, payload_chunk.to_vec(), &bar) + Self::get_storage_data_dynamic_batch_size(client, payload_chunk.to_vec(), &bar) }); // Execute the requests and move the Result outside. let storage_data_result: Result, _> = @@ -644,7 +741,7 @@ where }, }; bar.finish_with_message("✅ Downloaded key values"); - print!("\n"); + println!(); // Check if we got responses for all submitted requests. assert_eq!(keys.len(), storage_data.len()); @@ -778,8 +875,9 @@ where pending_ext: &mut TestExternalities>, ) -> Result { let child_roots = top_kv - .into_iter() - .filter_map(|(k, _)| is_default_child_storage_key(k.as_ref()).then(|| k.clone())) + .iter() + .filter(|(k, _)| is_default_child_storage_key(k.as_ref())) + .map(|(k, _)| k.clone()) .collect::>(); if child_roots.is_empty() { @@ -799,11 +897,10 @@ where let mut child_kv = vec![]; for prefixed_top_key in child_roots { let child_keys = - Self::rpc_child_get_keys(&client, &prefixed_top_key, StorageKey(vec![]), at) - .await?; + Self::rpc_child_get_keys(client, &prefixed_top_key, StorageKey(vec![]), at).await?; let child_kv_inner = - Self::rpc_child_get_storage_paged(&client, &prefixed_top_key, child_keys, at) + Self::rpc_child_get_storage_paged(client, &prefixed_top_key, child_keys, at) .await?; let prefixed_top_key = PrefixedStorageKey::new(prefixed_top_key.clone().0); @@ -846,7 +943,7 @@ where for prefix in &config.hashed_prefixes { let now = std::time::Instant::now(); let additional_key_values = - self.rpc_get_pairs_paged(StorageKey(prefix.to_vec()), at, pending_ext).await?; + self.rpc_get_pairs(StorageKey(prefix.to_vec()), at, pending_ext).await?; let elapsed = now.elapsed(); log::info!( target: LOG_TARGET, @@ -1110,7 +1207,7 @@ mod test_prelude { pub(crate) type Block = RawBlock>; pub(crate) fn init_logger() { - let _ = sp_tracing::try_init_simple(); + sp_tracing::try_init_simple(); } } @@ -1440,4 +1537,26 @@ mod remote_tests { .unwrap() .execute_with(|| {}); } + + #[tokio::test] + async fn can_fetch_in_parallel() { + init_logger(); + + let uri = String::from("wss://kusama-bridge-hub-rpc.polkadot.io:443"); + let mut builder = Builder::::new() + .mode(Mode::Online(OnlineConfig { transport: uri.into(), ..Default::default() })); + builder.init_remote_client().await.unwrap(); + + let at = builder.as_online().at.unwrap(); + + let prefix = StorageKey(vec![13]); + let paged = builder.rpc_get_keys_in_range(&prefix, at, None, None).await.unwrap(); + let para = builder.rpc_get_keys_parallel(&prefix, at, 4).await.unwrap(); + assert_eq!(paged, para); + + let prefix = StorageKey(vec![]); + let paged = builder.rpc_get_keys_in_range(&prefix, at, None, None).await.unwrap(); + let para = builder.rpc_get_keys_parallel(&prefix, at, 8).await.unwrap(); + assert_eq!(paged, para); + } } diff --git a/substrate/utils/frame/rpc/client/Cargo.toml b/substrate/utils/frame/rpc/client/Cargo.toml index d0f323c096ff965daec69432e0d0278539f22bda..1e8a298726eb8d710f9b78c02628e3da4a0bcb5f 100644 --- a/substrate/utils/frame/rpc/client/Cargo.toml +++ b/substrate/utils/frame/rpc/client/Cargo.toml @@ -8,13 +8,16 @@ homepage = "https://substrate.io" repository.workspace = true description = "Shared JSON-RPC client" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.16.2", features = ["ws-client"] } sc-rpc-api = { path = "../../../../client/rpc-api" } -async-trait = "0.1.57" +async-trait = "0.1.74" serde = "1" sp-runtime = { path = "../../../../primitives/runtime" } log = "0.4" diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index d25e6ec67c9a841f7a908cb6d4c6a229250cc905..368273d609fcb08c67047ce5486cc5b32c014ba7 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Node-specific RPC methods for interaction with state trie migration." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -21,7 +24,7 @@ sp-state-machine = { path = "../../../../primitives/state-machine" } sp-trie = { path = "../../../../primitives/trie" } trie-db = "0.28.0" -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } # Substrate Dependencies sc-client-api = { path = "../../../../client/api" } diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index 22283fbf4506cccc0cbfbda3fabdb852c5b89168..1cc6d8e98b365e5fcfd8bc6004b4d13b8c1a0ae4 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "Substrate RPC for FRAME's support" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -21,7 +24,7 @@ sp-storage = { path = "../../../../primitives/storage" } [dev-dependencies] scale-info = "2.10.0" -jsonrpsee = { version = "0.16.2", features = ["ws-client", "jsonrpsee-types"] } +jsonrpsee = { version = "0.16.2", features = ["jsonrpsee-types", "ws-client"] } tokio = "1.22.0" sp-core = { path = "../../../../primitives/core" } sp-runtime = { path = "../../../../primitives/runtime" } diff --git a/substrate/utils/frame/rpc/support/src/lib.rs b/substrate/utils/frame/rpc/support/src/lib.rs index 2d8e45cbfc69f009a808f13cac9ba4170dfcf2eb..bb5098293c2859a444b108086a79ef829869428f 100644 --- a/substrate/utils/frame/rpc/support/src/lib.rs +++ b/substrate/utils/frame/rpc/support/src/lib.rs @@ -34,7 +34,7 @@ use sp_storage::{StorageData, StorageKey}; /// # use jsonrpsee::core::Error as RpcError; /// # use jsonrpsee::ws_client::WsClientBuilder; /// # use codec::Encode; -/// # use frame_support::{construct_runtime, traits::ConstU32}; +/// # use frame_support::{construct_runtime, derive_impl, traits::ConstU32}; /// # use substrate_frame_rpc_support::StorageQuery; /// # use sc_rpc_api::state::StateApiClient; /// # use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; @@ -49,6 +49,7 @@ use sp_storage::{StorageData, StorageKey}; /// # /// # type Hash = sp_core::H256; /// # +/// # #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] /// # impl frame_system::Config for TestRuntime { /// # type BaseCallFilter = (); /// # type BlockWeights = (); @@ -62,6 +63,7 @@ use sp_storage::{StorageData, StorageKey}; /// # type Lookup = IdentityLookup; /// # type Block = frame_system::mocking::MockBlock; /// # type RuntimeEvent = RuntimeEvent; +/// # type RuntimeTask = RuntimeTask; /// # type BlockHashCount = (); /// # type DbWeight = (); /// # type Version = (); diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml index 77c3b7eeee3c0cb04203ddc50f10558a1ff02030..84c3265c93d36e90fda0e6bea0cb261456fe6676 100644 --- a/substrate/utils/frame/rpc/system/Cargo.toml +++ b/substrate/utils/frame/rpc/system/Cargo.toml @@ -9,12 +9,15 @@ repository.workspace = true description = "FRAME's system exposed over Substrate RPC" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } futures = "0.3.21" log = "0.4.17" frame-system-rpc-runtime-api = { path = "../../../../frame/system/rpc/runtime-api" } diff --git a/substrate/utils/frame/try-runtime/cli/Cargo.toml b/substrate/utils/frame/try-runtime/cli/Cargo.toml index 6be4306193ce41437d80f4a2f6bf53b1432f4f83..e7ae9a6d3dbf8bbc91839b1f82515bb79b5ef993 100644 --- a/substrate/utils/frame/try-runtime/cli/Cargo.toml +++ b/substrate/utils/frame/try-runtime/cli/Cargo.toml @@ -8,11 +8,14 @@ homepage = "https://substrate.io" repository.workspace = true description = "Cli command runtime testing and dry-running" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -remote-externalities = { package = "frame-remote-externalities" , path = "../../remote-externalities" } +remote-externalities = { package = "frame-remote-externalities", path = "../../remote-externalities" } sc-cli = { path = "../../../../client/cli" } sc-executor = { path = "../../../../client/executor" } sp-consensus-aura = { path = "../../../../primitives/consensus/aura" } @@ -31,15 +34,15 @@ sp-version = { path = "../../../../primitives/version" } sp-debug-derive = { path = "../../../../primitives/debug-derive" } sp-api = { path = "../../../../primitives/api" } sp-weights = { path = "../../../../primitives/weights" } -frame-try-runtime = { path = "../../../../frame/try-runtime", optional = true} +frame-try-runtime = { path = "../../../../frame/try-runtime", optional = true } substrate-rpc-client = { path = "../../rpc/client" } -async-trait = "0.1.57" -clap = { version = "4.4.6", features = ["derive"] } +async-trait = "0.1.74" +clap = { version = "4.4.11", features = ["derive"] } hex = { version = "0.4.3", default-features = false } log = "0.4.17" parity-scale-codec = "3.6.1" -serde = "1.0.188" +serde = "1.0.193" serde_json = "1.0.108" zstd = { version = "0.12.4", default-features = false } diff --git a/substrate/utils/prometheus/Cargo.toml b/substrate/utils/prometheus/Cargo.toml index bf999a66111f9456ff12b02613a9eeb3af2f197e..252998d94bd1cb79736bbee907b61700b4ec2724 100644 --- a/substrate/utils/prometheus/Cargo.toml +++ b/substrate/utils/prometheus/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index 2550122ad4b4c038367ffe9ddf4b6ad45fe76245..2154cfca177c9ea07fc642d2cfc967eeeb505a85 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -4,11 +4,13 @@ version = "5.0.0-dev" authors.workspace = true description = "Utility for building WASM binaries" edition.workspace = true -readme = "README.md" repository.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,7 +20,7 @@ build-helper = "0.1.1" cargo_metadata = "0.15.4" strum = { version = "0.24.1", features = ["derive"] } tempfile = "3.1.0" -toml = "0.7.3" +toml = "0.8.2" walkdir = "2.3.2" sp-maybe-compressed-blob = { path = "../../primitives/maybe-compressed-blob" } filetime = "0.2.16" diff --git a/substrate/utils/wasm-builder/README.md b/substrate/utils/wasm-builder/README.md deleted file mode 100644 index db32f5cbc955cc36b056c612caa65c1ef45ca683..0000000000000000000000000000000000000000 --- a/substrate/utils/wasm-builder/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# Wasm builder is a utility for building a project as a Wasm binary - -The Wasm builder is a tool that integrates the process of building the WASM binary of your project into the main `cargo` -build process. - -## Project setup - -A project that should be compiled as a Wasm binary needs to: - -1. Add a `build.rs` file. -2. Add `wasm-builder` as dependency into `build-dependencies` (can be made optional and only enabled when `std` feature - is used). - -The `build.rs` file needs to contain the following code: - -```rust -fn main() { - #[cfg(feature = "std")] - { - substrate_wasm_builder::WasmBuilder::new() - // Tell the builder to build the project (crate) this `build.rs` is part of. - .with_current_project() - // Make sure to export the `heap_base` global, this is required by Substrate - .export_heap_base() - // Build the Wasm file so that it imports the memory (need to be provided by at instantiation) - .import_memory() - // Build it. - .build(); - } -} -``` - -As the final step, you need to add the following to your project: - -```rust -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -``` - -This will include the generated Wasm binary as two constants `WASM_BINARY` and `WASM_BINARY_BLOATY`. The former is a -compact Wasm binary and the latter is the Wasm binary as being generated by the compiler. Both variables have -`Option<&'static [u8]>` as type. - -### Features - -Wasm builder supports to enable cargo features while building the Wasm binary. By default it will enable all features in -the wasm build that are enabled for the native build except the `default` and `std` features. Besides that, wasm builder -supports the special `runtime-wasm` feature. This `runtime-wasm` feature will be enabled by the wasm builder when it -compiles the Wasm binary. If this feature is not present, it will not be enabled. - -## Environment variables - -By using environment variables, you can configure which Wasm binaries are built and how: - -- `SKIP_WASM_BUILD` - Skips building any Wasm binary. This is useful when only native should be recompiled. If this is - the first run and there doesn't exist a Wasm binary, this will set both variables to `None`. -- `WASM_BUILD_TYPE` - Sets the build type for building Wasm binaries. Supported values are `release` or `debug`. By - default the build type is equal to the build type used by the main build. -- `FORCE_WASM_BUILD` - Can be set to force a Wasm build. On subsequent calls the value of the variable needs to change. - As wasm-builder instructs `cargo` to watch for file changes this environment variable should only - be required in certain circumstances. -- `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. -- `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. -- `WASM_TARGET_DIRECTORY` - Will copy any build Wasm binary to the given directory. The path needs to be absolute. -- `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the Wasm binaries. The format needs to be the same - as used by cargo, e.g. `nightly-2020-02-20`. -- `CARGO_NET_OFFLINE` - If `true`, `--offline` will be passed to all processes launched to prevent network access. - Useful in offline environments. - -Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. Where -`PROJECT_NAME` needs to be replaced by the name of the cargo project, e.g. `node-runtime` will be `NODE_RUNTIME`. - -## Prerequisites - -Wasm builder requires the following prerequisites for building the Wasm binary: - -- rust nightly + `wasm32-unknown-unknown` toolchain - -or - -- rust stable and version at least 1.68.0 + `wasm32-unknown-unknown` toolchain - -If a specific rust is installed with `rustup`, it is important that the wasm target is installed as well. For example if -installing the rust from 20.02.2020 using `rustup install nightly-2020-02-20`, the wasm target needs to be installed as -well `rustup target add wasm32-unknown-unknown --toolchain nightly-2020-02-20`. - -License: Apache-2.0 diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index c9011f97be711d9338c3cd572f4c105a910ca581..ec85fd1ffddbf8f9aa54aec0e840830c99861a11 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -87,6 +87,9 @@ //! required as we walk up from the target directory until we find a `Cargo.toml`. If the target //! directory is changed for the build, this environment variable can be used to point to the //! actual workspace. +//! - `WASM_BUILD_STD` - Sets whether the Rust's standard library crates will also be built. This is +//! necessary to make sure the standard library crates only use the exact WASM feature set that +//! our executor supports. Enabled by default. //! - `CARGO_NET_OFFLINE` - If `true`, `--offline` will be passed to all processes launched to //! prevent network access. Useful in offline environments. //! @@ -158,6 +161,9 @@ const FORCE_WASM_BUILD_ENV: &str = "FORCE_WASM_BUILD"; /// Environment variable that hints the workspace we are building. const WASM_BUILD_WORKSPACE_HINT: &str = "WASM_BUILD_WORKSPACE_HINT"; +/// Environment variable to set whether we'll build `core`/`std`. +const WASM_BUILD_STD: &str = "WASM_BUILD_STD"; + /// Write to the given `file` if the `content` is different. fn write_file_if_changed(file: impl AsRef, content: impl AsRef) { if fs::read_to_string(file.as_ref()).ok().as_deref() != Some(content.as_ref()) { @@ -282,6 +288,12 @@ impl CargoCommand { self.version } + /// Returns whether this version of the toolchain supports nightly features. + fn supports_nightly_features(&self) -> bool { + self.version.map_or(false, |version| version.is_nightly) || + env::var("RUSTC_BOOTSTRAP").is_ok() + } + /// Check if the supplied cargo command supports our Substrate wasm environment. /// /// This means that either the cargo version is at minimum 1.68.0 or this is a nightly cargo. @@ -332,3 +344,26 @@ impl std::ops::Deref for CargoCommandVersioned { fn color_output_enabled() -> bool { env::var(crate::WASM_BUILD_NO_COLOR).is_err() } + +/// Fetches a boolean environment variable. Will exit the process if the value is invalid. +fn get_bool_environment_variable(name: &str) -> Option { + let value = env::var_os(name)?; + + // We're comparing `OsString`s here so we can't use a `match`. + if value == "1" { + Some(true) + } else if value == "0" { + Some(false) + } else { + build_helper::warning!( + "the '{}' environment variable has an invalid value; it must be either '1' or '0'", + name + ); + std::process::exit(1); + } +} + +/// Returns whether we need to also compile the standard library when compiling the runtime. +fn build_std_required() -> bool { + crate::get_bool_environment_variable(crate::WASM_BUILD_STD).unwrap_or(true) +} diff --git a/substrate/utils/wasm-builder/src/prerequisites.rs b/substrate/utils/wasm-builder/src/prerequisites.rs index 8e81e6774faa68b0081a5cb0b679175160440042..2cdbdd2798ebc91293e927ac9cb7080c52959ae1 100644 --- a/substrate/utils/wasm-builder/src/prerequisites.rs +++ b/substrate/utils/wasm-builder/src/prerequisites.rs @@ -47,14 +47,11 @@ pub(crate) fn check() -> Result { check_wasm_toolchain_installed(cargo_command) } -/// Create the project that will be used to check that the wasm toolchain is installed and to -/// extract the rustc version. -fn create_check_toolchain_project(project_dir: &Path) { - let lib_rs_file = project_dir.join("src/lib.rs"); - let main_rs_file = project_dir.join("src/main.rs"); - let build_rs_file = project_dir.join("build.rs"); - let manifest_path = project_dir.join("Cargo.toml"); +/// Creates a minimal dummy crate at the given path and returns the manifest path. +fn create_minimal_crate(project_dir: &Path) -> std::path::PathBuf { + fs::create_dir_all(project_dir.join("src")).expect("Creating src dir does not fail; qed"); + let manifest_path = project_dir.join("Cargo.toml"); write_file_if_changed( &manifest_path, r#" @@ -62,120 +59,99 @@ fn create_check_toolchain_project(project_dir: &Path) { name = "wasm-test" version = "1.0.0" edition = "2021" - build = "build.rs" - - [lib] - name = "wasm_test" - crate-type = ["cdylib"] [workspace] "#, ); - write_file_if_changed(lib_rs_file, "pub fn test() {}"); - - // We want to know the rustc version of the rustc that is being used by our cargo command. - // The cargo command is determined by some *very* complex algorithm to find the cargo command - // that supports nightly. - // The best solution would be if there is a `cargo rustc --version` command, which sadly - // doesn't exists. So, the only available way of getting the rustc version is to build a project - // and capture the rustc version in this build process. This `build.rs` is exactly doing this. - // It gets the rustc version by calling `rustc --version` and exposing it in the `RUSTC_VERSION` - // environment variable. - write_file_if_changed( - build_rs_file, - r#" - fn main() { - let rustc_cmd = std::env::var("RUSTC").ok().unwrap_or_else(|| "rustc".into()); - - let rustc_version = std::process::Command::new(rustc_cmd) - .arg("--version") - .output() - .ok() - .and_then(|o| String::from_utf8(o.stdout).ok()); - - println!( - "cargo:rustc-env=RUSTC_VERSION={}", - rustc_version.unwrap_or_else(|| "unknown rustc version".into()), - ); - } - "#, - ); - // Just prints the `RURSTC_VERSION` environment variable that is being created by the - // `build.rs` script. - write_file_if_changed( - main_rs_file, - r#" - fn main() { - println!("{}", env!("RUSTC_VERSION")); - } - "#, - ); + + write_file_if_changed(project_dir.join("src/main.rs"), "fn main() {}"); + manifest_path } fn check_wasm_toolchain_installed( cargo_command: CargoCommand, ) -> Result { let temp = tempdir().expect("Creating temp dir does not fail; qed"); - fs::create_dir_all(temp.path().join("src")).expect("Creating src dir does not fail; qed"); - create_check_toolchain_project(temp.path()); - - let err_msg = print_error_message("Rust WASM toolchain not installed, please install it!"); - let manifest_path = temp.path().join("Cargo.toml").display().to_string(); - - let mut build_cmd = cargo_command.command(); - // Chdir to temp to avoid including project's .cargo/config.toml - // by accident - it can happen in some CI environments. - build_cmd.current_dir(&temp); - build_cmd.args(&[ - "build", - "--target=wasm32-unknown-unknown", - "--manifest-path", - &manifest_path, - ]); + let manifest_path = create_minimal_crate(temp.path()).display().to_string(); + + let prepare_command = |subcommand| { + let mut cmd = cargo_command.command(); + // Chdir to temp to avoid including project's .cargo/config.toml + // by accident - it can happen in some CI environments. + cmd.current_dir(&temp); + cmd.args(&[ + subcommand, + "--target=wasm32-unknown-unknown", + "--manifest-path", + &manifest_path, + ]); + + if super::color_output_enabled() { + cmd.arg("--color=always"); + } - if super::color_output_enabled() { - build_cmd.arg("--color=always"); + // manually set the `CARGO_TARGET_DIR` to prevent a cargo deadlock + let target_dir = temp.path().join("target").display().to_string(); + cmd.env("CARGO_TARGET_DIR", &target_dir); + + // Make sure the host's flags aren't used here, e.g. if an alternative linker is specified + // in the RUSTFLAGS then the check we do here will break unless we clear these. + cmd.env_remove("CARGO_ENCODED_RUSTFLAGS"); + cmd.env_remove("RUSTFLAGS"); + cmd + }; + + let err_msg = + print_error_message("Rust WASM toolchain is not properly installed; please install it!"); + let build_result = prepare_command("build").output().map_err(|_| err_msg.clone())?; + if !build_result.status.success() { + return match String::from_utf8(build_result.stderr) { + Ok(ref err) if err.contains("the `wasm32-unknown-unknown` target may not be installed") => + Err(print_error_message("Cannot compile the WASM runtime: the `wasm32-unknown-unknown` target is not installed!\n\ + You can install it with `rustup target add wasm32-unknown-unknown` if you're using `rustup`.")), + + // Apparently this can happen when we're running on a non Tier 1 platform. + Ok(ref err) if err.contains("linker `rust-lld` not found") => + Err(print_error_message("Cannot compile the WASM runtime: `rust-lld` not found!")), + + Ok(ref err) => Err(format!( + "{}\n\n{}\n{}\n{}{}\n", + err_msg, + Color::Yellow.bold().paint("Further error information:"), + Color::Yellow.bold().paint("-".repeat(60)), + err, + Color::Yellow.bold().paint("-".repeat(60)), + )), + + Err(_) => Err(err_msg), + }; } - let mut run_cmd = cargo_command.command(); - // Chdir to temp to avoid including project's .cargo/config.toml - // by accident - it can happen in some CI environments. - run_cmd.current_dir(&temp); - run_cmd.args(&["run", "--manifest-path", &manifest_path]); - - // manually set the `CARGO_TARGET_DIR` to prevent a cargo deadlock - let target_dir = temp.path().join("target").display().to_string(); - build_cmd.env("CARGO_TARGET_DIR", &target_dir); - run_cmd.env("CARGO_TARGET_DIR", &target_dir); - - // Make sure the host's flags aren't used here, e.g. if an alternative linker is specified - // in the RUSTFLAGS then the check we do here will break unless we clear these. - build_cmd.env_remove("CARGO_ENCODED_RUSTFLAGS"); - run_cmd.env_remove("CARGO_ENCODED_RUSTFLAGS"); - build_cmd.env_remove("RUSTFLAGS"); - run_cmd.env_remove("RUSTFLAGS"); - - build_cmd.output().map_err(|_| err_msg.clone()).and_then(|s| { - if s.status.success() { - let version = run_cmd.output().ok().and_then(|o| String::from_utf8(o.stdout).ok()); - Ok(CargoCommandVersioned::new( - cargo_command, - version.unwrap_or_else(|| "unknown rustc version".into()), - )) - } else { - match String::from_utf8(s.stderr) { - Ok(ref err) if err.contains("linker `rust-lld` not found") => - Err(print_error_message("`rust-lld` not found, please install it!")), - Ok(ref err) => Err(format!( - "{}\n\n{}\n{}\n{}{}\n", - err_msg, - Color::Yellow.bold().paint("Further error information:"), - Color::Yellow.bold().paint("-".repeat(60)), - err, - Color::Yellow.bold().paint("-".repeat(60)), - )), - Err(_) => Err(err_msg), + let mut run_cmd = prepare_command("rustc"); + run_cmd.args(&["-q", "--", "--version"]); + + let version = run_cmd + .output() + .ok() + .and_then(|o| String::from_utf8(o.stdout).ok()) + .unwrap_or_else(|| "unknown rustc version".into()); + + if crate::build_std_required() { + let mut sysroot_cmd = prepare_command("rustc"); + sysroot_cmd.args(&["-q", "--", "--print", "sysroot"]); + if let Some(sysroot) = + sysroot_cmd.output().ok().and_then(|o| String::from_utf8(o.stdout).ok()) + { + let src_path = + Path::new(sysroot.trim()).join("lib").join("rustlib").join("src").join("rust"); + if !src_path.exists() { + return Err(print_error_message( + "Cannot compile the WASM runtime: no standard library sources found!\n\ + You can install them with `rustup component add rust-src` if you're using `rustup`.", + )) } } - }) + } + + Ok(CargoCommandVersioned::new(cargo_command, version)) } diff --git a/substrate/utils/wasm-builder/src/version.rs b/substrate/utils/wasm-builder/src/version.rs index e4f7d98be61876840a9ca70f3e29176ba92c5a99..3a0a306d737dbb5c51eb50d8926101f0320b0b5e 100644 --- a/substrate/utils/wasm-builder/src/version.rs +++ b/substrate/utils/wasm-builder/src/version.rs @@ -212,4 +212,21 @@ mod tests { version_1_69_0, ); } + + #[test] + fn parse_rustc_version() { + let version = Version::extract("rustc 1.73.0 (cc66ad468 2023-10-03)").unwrap(); + assert_eq!( + version, + Version { + major: 1, + minor: 73, + patch: 0, + is_nightly: false, + year: Some(2023), + month: Some(10), + day: Some(03), + } + ); + } } diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index c41e0935d750737c9a083846dfc75495ca789b83..5bf44c2c9b20e5fa5f304c4f2b02c9c2bc671c35 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -18,7 +18,7 @@ use crate::{write_file_if_changed, CargoCommandVersioned, OFFLINE}; use build_helper::rerun_if_changed; -use cargo_metadata::{CargoOpt, Metadata, MetadataCommand}; +use cargo_metadata::{DependencyKind, Metadata, MetadataCommand}; use parity_wasm::elements::{deserialize_buffer, Module}; use std::{ borrow::ToOwned, @@ -89,8 +89,7 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { cargo_manifest.to_path_buf() }; - let mut crate_metadata_command = create_metadata_command(cargo_manifest); - crate_metadata_command.features(CargoOpt::AllFeatures); + let crate_metadata_command = create_metadata_command(cargo_manifest); let crate_metadata = crate_metadata_command .exec() @@ -751,6 +750,25 @@ fn build_bloaty_blob( build_cmd.arg("--offline"); } + // Our executor currently only supports the WASM MVP feature set, however nowadays + // when compiling WASM the Rust compiler has more features enabled by default. + // + // We do set the `-C target-cpu=mvp` flag to make sure that *our* code gets compiled + // in a way that is compatible with our executor, however this doesn't affect Rust's + // standard library crates (`std`, `core` and `alloc`) which are by default precompiled + // and still can make use of these extra features. + // + // So here we force the compiler to also compile the standard library crates for us + // to make sure that they also only use the MVP features. + if crate::build_std_required() { + // Unfortunately this is still a nightly-only flag, but FWIW it is pretty widely used + // so it's unlikely to break without a replacement. + build_cmd.arg("-Z").arg("build-std"); + if !cargo_cmd.supports_nightly_features() { + build_cmd.env("RUSTC_BOOTSTRAP", "1"); + } + } + println!("{}", colorize_info_message("Information that should be included in a bug report.")); println!("{} {:?}", colorize_info_message("Executing build command:"), build_cmd); println!("{} {}", colorize_info_message("Using rustc version:"), cargo_cmd.rustc_version()); @@ -915,6 +933,11 @@ fn generate_rerun_if_changed_instructions( packages.insert(DeduplicatePackage::from(package)); while let Some(dependency) = dependencies.pop() { + // Ignore all dev dependencies + if dependency.kind == DependencyKind::Development { + continue; + } + let path_or_git_dep = dependency.source.as_ref().map(|s| s.starts_with("git+")).unwrap_or(true); @@ -948,6 +971,7 @@ fn generate_rerun_if_changed_instructions( println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_RUSTFLAGS_ENV); println!("cargo:rerun-if-env-changed={}", crate::WASM_TARGET_DIRECTORY); println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_TOOLCHAIN); + println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_STD); } /// Track files and paths related to the given package to rerun `build.rs` on any relevant change. @@ -967,9 +991,7 @@ fn package_rerun_if_changed(package: &DeduplicatePackage) { p.path() == manifest_path || !p.path().is_dir() || !p.path().join("Cargo.toml").exists() }) .filter_map(|p| p.ok().map(|p| p.into_path())) - .filter(|p| { - p.is_dir() || p.extension().map(|e| e == "rs" || e == "toml").unwrap_or_default() - }) + .filter(|p| p.extension().map(|e| e == "rs" || e == "toml").unwrap_or_default()) .for_each(rerun_if_changed); }

, keystore: Option, + mut notification_service: Box, ) where B: Block, C: BlockchainEvents + ProvideRuntimeApi + HeaderBackend, C::Api: MixnetApi, S: SyncOracle, - N: NetworkStateInfo + NetworkEventStream + NetworkNotification + NetworkPeers, + N: NetworkStateInfo + NetworkNotification + NetworkPeers, P: TransactionPool + LocalTransactionPool + 'static, { let local_peer_id = network.local_peer_id(); @@ -189,7 +191,6 @@ pub async fn run( } else { None }; - let mut network_events = network.event_stream("mixnet").fuse(); let mut next_forward_packet_delay = MaybeInfDelay::new(None); let mut next_authored_packet_delay = MaybeInfDelay::new(None); let mut ready_peers = FuturesUnordered::new(); @@ -248,33 +249,36 @@ pub async fn run( } } - event = network_events.select_next_some() => match event { - NotificationStreamOpened { remote, protocol, .. } - if protocol == protocol_name => packet_dispatcher.add_peer(&remote), - NotificationStreamClosed { remote, protocol } - if protocol == protocol_name => packet_dispatcher.remove_peer(&remote), - NotificationsReceived { remote, messages } => { - for message in messages { - if message.0 == protocol_name { - match message.1.as_ref().try_into() { - Ok(packet) => handle_packet(packet, - &mut mixnet, &mut request_manager, &mut reply_manager, - &mut extrinsic_queue, &config.substrate), - Err(_) => debug!(target: LOG_TARGET, - "Dropped incorrectly sized packet ({} bytes) from {remote}", - message.1.len(), - ), - } - } + event = notification_service.next_event().fuse() => match event { + None => todo!(), + Some(NotificationEvent::ValidateInboundSubstream { result_tx, .. }) => { + let _ = result_tx.send(ValidationResult::Accept); + }, + Some(NotificationEvent::NotificationStreamOpened { peer, .. }) => { + packet_dispatcher.add_peer(&peer); + }, + Some(NotificationEvent::NotificationStreamClosed { peer }) => { + packet_dispatcher.remove_peer(&peer); + }, + Some(NotificationEvent::NotificationReceived { peer, notification }) => { + let notification: Bytes = notification.into(); + + match notification.as_ref().try_into() { + Ok(packet) => handle_packet(packet, + &mut mixnet, &mut request_manager, &mut reply_manager, + &mut extrinsic_queue, &config.substrate), + Err(_) => debug!(target: LOG_TARGET, + "Dropped incorrectly sized packet ({} bytes) from {peer}", + notification.len(), + ), } - } - _ => () + }, }, _ = next_forward_packet_delay => { if let Some(packet) = mixnet.pop_next_forward_packet() { if let Some(ready_peer) = packet_dispatcher.dispatch(packet) { - if let Some(fut) = ready_peer.send_packet(&*network, protocol_name.clone()) { + if let Some(fut) = ready_peer.send_packet(¬ification_service) { ready_peers.push(fut); } } @@ -288,7 +292,7 @@ pub async fn run( _ = next_authored_packet_delay => { if let Some(packet) = mixnet.pop_next_authored_packet(&packet_dispatcher) { if let Some(ready_peer) = packet_dispatcher.dispatch(packet) { - if let Some(fut) = ready_peer.send_packet(&*network, protocol_name.clone()) { + if let Some(fut) = ready_peer.send_packet(¬ification_service) { ready_peers.push(fut); } } @@ -297,7 +301,7 @@ pub async fn run( ready_peer = ready_peers.select_next_some() => { if let Some(ready_peer) = ready_peer { - if let Some(fut) = ready_peer.send_packet(&*network, protocol_name.clone()) { + if let Some(fut) = ready_peer.send_packet(¬ification_service) { ready_peers.push(fut); } } diff --git a/substrate/client/mixnet/src/sync_with_runtime.rs b/substrate/client/mixnet/src/sync_with_runtime.rs index 4a80b3c75f43d88ac5fd714e55fd8e7cb4c26d1c..f3be96025410dbbbf28cb24197a6e09ff436cae9 100644 --- a/substrate/client/mixnet/src/sync_with_runtime.rs +++ b/substrate/client/mixnet/src/sync_with_runtime.rs @@ -196,6 +196,7 @@ where #[cfg(test)] mod tests { use super::*; + use multiaddr::multiaddr; #[test] fn fixup_empty_external_addresses() { diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index 95e26a232c1dcf58d792c649db882722407077bd..c53c53fb1350fd9c2e213298f5853ecdee9079a4 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -10,6 +10,9 @@ repository.workspace = true documentation = "https://docs.rs/sc-network-gossip" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] ahash = "0.8.2" futures = "0.3.21" futures-timer = "3.0.1" -libp2p = "0.51.3" +libp2p = "0.51.4" log = "0.4.17" schnellru = "0.2.1" tracing = "0.1.29" @@ -29,5 +32,7 @@ sp-runtime = { path = "../../primitives/runtime" } [dev-dependencies] tokio = "1.22.0" +async-trait = "0.1.74" +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } quickcheck = { version = "1.0.3", default-features = false } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs index 8f7d490757b3ec1400d0dce44a4d70e642ef45be..1d6a4bdd0c086996ed45a7074a1a51bcf34f12d9 100644 --- a/substrate/client/network-gossip/src/bridge.rs +++ b/substrate/client/network-gossip/src/bridge.rs @@ -21,7 +21,11 @@ use crate::{ Network, Syncing, Validator, }; -use sc_network::{event::Event, types::ProtocolName, ReputationChange}; +use sc_network::{ + service::traits::{NotificationEvent, ValidationResult}, + types::ProtocolName, + NotificationService, ReputationChange, +}; use sc_network_sync::SyncEvent; use futures::{ @@ -48,10 +52,10 @@ pub struct GossipEngine { periodic_maintenance_interval: futures_timer::Delay, protocol: ProtocolName, - /// Incoming events from the network. - network_event_stream: Pin + Send>>, /// Incoming events from the syncing service. sync_event_stream: Pin + Send>>, + /// Handle for polling notification-related events. + notification_service: Box, /// Outgoing events to the consumer. message_sinks: HashMap>>, /// Buffered messages (see [`ForwardingState`]). @@ -81,6 +85,7 @@ impl GossipEngine { pub fn new( network: N, sync: S, + notification_service: Box, protocol: impl Into, validator: Arc>, metrics_registry: Option<&Registry>, @@ -91,17 +96,16 @@ impl GossipEngine { S: Syncing + Send + Clone + 'static, { let protocol = protocol.into(); - let network_event_stream = network.event_stream("network-gossip"); let sync_event_stream = sync.event_stream("network-gossip"); GossipEngine { state_machine: ConsensusGossip::new(validator, protocol.clone(), metrics_registry), network: Box::new(network), sync: Box::new(sync), + notification_service, periodic_maintenance_interval: futures_timer::Delay::new(PERIODIC_MAINTENANCE_INTERVAL), protocol, - network_event_stream, sync_event_stream, message_sinks: HashMap::new(), forwarding_state: ForwardingState::Idle, @@ -125,7 +129,7 @@ impl GossipEngine { /// Broadcast all messages with given topic. pub fn broadcast_topic(&mut self, topic: B::Hash, force: bool) { - self.state_machine.broadcast_topic(&mut *self.network, topic, force); + self.state_machine.broadcast_topic(&mut self.notification_service, topic, force); } /// Get data of valid, incoming messages for a topic (but might have expired meanwhile). @@ -150,19 +154,21 @@ impl GossipEngine { /// Send all messages with given topic to a peer. pub fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool) { - self.state_machine.send_topic(&mut *self.network, who, topic, force) + self.state_machine.send_topic(&mut self.notification_service, who, topic, force) } /// Multicast a message to all peers. pub fn gossip_message(&mut self, topic: B::Hash, message: Vec, force: bool) { - self.state_machine.multicast(&mut *self.network, topic, message, force) + self.state_machine + .multicast(&mut self.notification_service, topic, message, force) } /// Send addressed message to the given peers. The message is not kept or multicast /// later on. pub fn send_message(&mut self, who: Vec, data: Vec) { for who in &who { - self.state_machine.send_message(&mut *self.network, who, data.clone()); + self.state_machine + .send_message(&mut self.notification_service, who, data.clone()); } } @@ -173,6 +179,11 @@ impl GossipEngine { pub fn announce(&self, block: B::Hash, associated_data: Option>) { self.sync.announce_block(block, associated_data); } + + /// Consume [`GossipEngine`] and return the notification service. + pub fn take_notification_service(self) -> Box { + self.notification_service + } } impl Future for GossipEngine { @@ -184,46 +195,56 @@ impl Future for GossipEngine { 'outer: loop { match &mut this.forwarding_state { ForwardingState::Idle => { - let net_event_stream = this.network_event_stream.poll_next_unpin(cx); + let next_notification_event = + this.notification_service.next_event().poll_unpin(cx); let sync_event_stream = this.sync_event_stream.poll_next_unpin(cx); - if net_event_stream.is_pending() && sync_event_stream.is_pending() { + if next_notification_event.is_pending() && sync_event_stream.is_pending() { break } - match net_event_stream { + match next_notification_event { Poll::Ready(Some(event)) => match event { - Event::NotificationStreamOpened { remote, protocol, role, .. } => - if protocol == this.protocol { - this.state_machine.new_peer(&mut *this.network, remote, role); - }, - Event::NotificationStreamClosed { remote, protocol } => { - if protocol == this.protocol { - this.state_machine - .peer_disconnected(&mut *this.network, remote); - } + NotificationEvent::ValidateInboundSubstream { + peer, + handshake, + result_tx, + .. + } => { + // only accept peers whose role can be determined + let result = this + .network + .peer_role(peer, handshake) + .map_or(ValidationResult::Reject, |_| ValidationResult::Accept); + let _ = result_tx.send(result); }, - Event::NotificationsReceived { remote, messages } => { - let messages = messages - .into_iter() - .filter_map(|(engine, data)| { - if engine == this.protocol { - Some(data.to_vec()) - } else { - None - } - }) - .collect(); - + NotificationEvent::NotificationStreamOpened { + peer, handshake, .. + } => { + let Some(role) = this.network.peer_role(peer, handshake) else { + log::debug!(target: "gossip", "role for {peer} couldn't be determined"); + continue + }; + + this.state_machine.new_peer( + &mut this.notification_service, + peer, + role, + ); + }, + NotificationEvent::NotificationStreamClosed { peer } => { + this.state_machine + .peer_disconnected(&mut this.notification_service, peer); + }, + NotificationEvent::NotificationReceived { peer, notification } => { let to_forward = this.state_machine.on_incoming( &mut *this.network, - remote, - messages, + &mut this.notification_service, + peer, + vec![notification], ); - this.forwarding_state = ForwardingState::Busy(to_forward.into()); }, - Event::Dht(_) => {}, }, // The network event stream closed. Do the same for [`GossipValidator`]. Poll::Ready(None) => { @@ -306,7 +327,7 @@ impl Future for GossipEngine { while let Poll::Ready(()) = this.periodic_maintenance_interval.poll_unpin(cx) { this.periodic_maintenance_interval.reset(PERIODIC_MAINTENANCE_INTERVAL); - this.state_machine.tick(&mut *this.network); + this.state_machine.tick(&mut this.notification_service); this.message_sinks.retain(|_, sinks| { sinks.retain(|sink| !sink.is_closed()); @@ -328,15 +349,19 @@ impl futures::future::FusedFuture for GossipEngine { mod tests { use super::*; use crate::{multiaddr::Multiaddr, ValidationResult, ValidatorContext}; + use codec::{DecodeAll, Encode}; use futures::{ - channel::mpsc::{unbounded, UnboundedSender}, + channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, executor::{block_on, block_on_stream}, future::poll_fn, }; use quickcheck::{Arbitrary, Gen, QuickCheck}; use sc_network::{ - config::MultiaddrWithPeerId, NetworkBlock, NetworkEventStream, NetworkNotification, - NetworkPeers, NotificationSenderError, NotificationSenderT as NotificationSender, + config::MultiaddrWithPeerId, + service::traits::{Direction, MessageSink, NotificationEvent}, + Event, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NotificationSenderError, NotificationSenderT as NotificationSender, NotificationService, + Roles, }; use sc_network_common::role::ObservedRole; use sc_network_sync::SyncEventStream; @@ -351,14 +376,10 @@ mod tests { use substrate_test_runtime_client::runtime::Block; #[derive(Clone, Default)] - struct TestNetwork { - inner: Arc>, - } + struct TestNetwork {} #[derive(Clone, Default)] - struct TestNetworkInner { - event_senders: Vec>, - } + struct TestNetworkInner {} impl NetworkPeers for TestNetwork { fn set_authorized_peers(&self, _peers: HashSet) { @@ -373,9 +394,13 @@ mod tests { unimplemented!(); } - fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) {} + fn report_peer(&self, _peer_id: PeerId, _cost_benefit: ReputationChange) {} + + fn peer_reputation(&self, _peer_id: &PeerId) -> i32 { + unimplemented!() + } - fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) { + fn disconnect_peer(&self, _peer_id: PeerId, _protocol: ProtocolName) { unimplemented!(); } @@ -422,14 +447,17 @@ mod tests { fn sync_num_connected(&self) -> usize { unimplemented!(); } + + fn peer_role(&self, _peer_id: PeerId, handshake: Vec) -> Option { + Roles::decode_all(&mut &handshake[..]) + .ok() + .and_then(|role| Some(ObservedRole::from(role))) + } } impl NetworkEventStream for TestNetwork { fn event_stream(&self, _name: &'static str) -> Pin + Send>> { - let (tx, rx) = unbounded(); - self.inner.lock().unwrap().event_senders.push(tx); - - Box::pin(rx) + unimplemented!(); } } @@ -501,6 +529,58 @@ mod tests { } } + #[derive(Debug)] + pub(crate) struct TestNotificationService { + rx: UnboundedReceiver, + } + + #[async_trait::async_trait] + impl sc_network::service::traits::NotificationService for TestNotificationService { + async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + fn send_sync_notification(&self, _peer: &PeerId, _notification: Vec) { + unimplemented!(); + } + + async fn send_async_notification( + &self, + _peer: &PeerId, + _notification: Vec, + ) -> Result<(), sc_network::error::Error> { + unimplemented!(); + } + + async fn set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + fn try_set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + async fn next_event(&mut self) -> Option { + self.rx.next().await + } + + fn clone(&mut self) -> Result, ()> { + unimplemented!(); + } + + fn protocol(&self) -> &ProtocolName { + unimplemented!(); + } + + fn message_sink(&self, _peer: &PeerId) -> Option> { + unimplemented!(); + } + } + struct AllowAll; impl Validator for AllowAll { fn validate( @@ -521,16 +601,19 @@ mod tests { fn returns_when_network_event_stream_closes() { let network = TestNetwork::default(); let sync = Arc::new(TestSync::default()); + let (tx, rx) = unbounded(); + let notification_service = Box::new(TestNotificationService { rx }); let mut gossip_engine = GossipEngine::::new( network.clone(), sync, + notification_service, "/my_protocol", Arc::new(AllowAll {}), None, ); - // Drop network event stream sender side. - drop(network.inner.lock().unwrap().event_senders.pop()); + // drop notification service sender side. + drop(tx); block_on(poll_fn(move |ctx| { if let Poll::Pending = gossip_engine.poll_unpin(ctx) { @@ -550,42 +633,37 @@ mod tests { let remote_peer = PeerId::random(); let network = TestNetwork::default(); let sync = Arc::new(TestSync::default()); + let (mut tx, rx) = unbounded(); + let notification_service = Box::new(TestNotificationService { rx }); let mut gossip_engine = GossipEngine::::new( network.clone(), sync.clone(), + notification_service, protocol.clone(), Arc::new(AllowAll {}), None, ); - let mut event_sender = network.inner.lock().unwrap().event_senders.pop().unwrap(); - // Register the remote peer. - event_sender - .start_send(Event::NotificationStreamOpened { - remote: remote_peer, - protocol: protocol.clone(), - negotiated_fallback: None, - role: ObservedRole::Authority, - received_handshake: vec![], - }) - .expect("Event stream is unbounded; qed."); + tx.send(NotificationEvent::NotificationStreamOpened { + peer: remote_peer, + direction: Direction::Inbound, + negotiated_fallback: None, + handshake: Roles::FULL.encode(), + }) + .await + .unwrap(); let messages = vec![vec![1], vec![2]]; - let events = messages - .iter() - .cloned() - .map(|m| Event::NotificationsReceived { - remote: remote_peer, - messages: vec![(protocol.clone(), m.into())], - }) - .collect::>(); // Send first event before subscribing. - event_sender - .start_send(events[0].clone()) - .expect("Event stream is unbounded; qed."); + tx.send(NotificationEvent::NotificationReceived { + peer: remote_peer, + notification: messages[0].clone().into(), + }) + .await + .unwrap(); let mut subscribers = vec![]; for _ in 0..2 { @@ -593,9 +671,12 @@ mod tests { } // Send second event after subscribing. - event_sender - .start_send(events[1].clone()) - .expect("Event stream is unbounded; qed."); + tx.send(NotificationEvent::NotificationReceived { + peer: remote_peer, + notification: messages[1].clone().into(), + }) + .await + .unwrap(); tokio::spawn(gossip_engine); @@ -672,6 +753,8 @@ mod tests { let remote_peer = PeerId::random(); let network = TestNetwork::default(); let sync = Arc::new(TestSync::default()); + let (mut tx, rx) = unbounded(); + let notification_service = Box::new(TestNotificationService { rx }); let num_channels_per_topic = channels.iter().fold( HashMap::new(), @@ -699,6 +782,7 @@ mod tests { let mut gossip_engine = GossipEngine::::new( network.clone(), sync.clone(), + notification_service, protocol.clone(), Arc::new(TestValidator {}), None, @@ -724,22 +808,18 @@ mod tests { } } - let mut event_sender = network.inner.lock().unwrap().event_senders.pop().unwrap(); - // Register the remote peer. - event_sender - .start_send(Event::NotificationStreamOpened { - remote: remote_peer, - protocol: protocol.clone(), - negotiated_fallback: None, - role: ObservedRole::Authority, - received_handshake: vec![], - }) - .expect("Event stream is unbounded; qed."); + tx.start_send(NotificationEvent::NotificationStreamOpened { + peer: remote_peer, + direction: Direction::Inbound, + negotiated_fallback: None, + handshake: Roles::FULL.encode(), + }) + .unwrap(); // Send messages into the network event stream. for (i_notification, messages) in notifications.iter().enumerate() { - let messages = messages + let messages: Vec> = messages .into_iter() .enumerate() .map(|(i_message, Message { topic })| { @@ -752,13 +832,17 @@ mod tests { message.push(i_notification.try_into().unwrap()); message.push(i_message.try_into().unwrap()); - (protocol.clone(), message.into()) + message.into() }) .collect(); - event_sender - .start_send(Event::NotificationsReceived { remote: remote_peer, messages }) - .expect("Event stream is unbounded; qed."); + for message in messages { + tx.start_send(NotificationEvent::NotificationReceived { + peer: remote_peer, + notification: message, + }) + .unwrap(); + } } let mut received_msgs_per_topic_all_chan = HashMap::::new(); diff --git a/substrate/client/network-gossip/src/state_machine.rs b/substrate/client/network-gossip/src/state_machine.rs index 4bfb5a7d37f49bf6d8325006dd65420cdbb7fb42..069d7cdba16599b4b4da0965a5d8e4588478d633 100644 --- a/substrate/client/network-gossip/src/state_machine.rs +++ b/substrate/client/network-gossip/src/state_machine.rs @@ -23,7 +23,7 @@ use libp2p::PeerId; use schnellru::{ByLength, LruMap}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network::types::ProtocolName; +use sc_network::{types::ProtocolName, NotificationService}; use sc_network_common::role::ObservedRole; use sp_runtime::traits::{Block as BlockT, Hash, HashingFor}; use std::{collections::HashMap, iter, sync::Arc, time, time::Instant}; @@ -74,33 +74,33 @@ struct MessageEntry { /// Local implementation of `ValidatorContext`. struct NetworkContext<'g, 'p, B: BlockT> { gossip: &'g mut ConsensusGossip, - network: &'p mut dyn Network, + notification_service: &'p mut Box, } impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { /// Broadcast all messages with given topic to peers that do not have it yet. fn broadcast_topic(&mut self, topic: B::Hash, force: bool) { - self.gossip.broadcast_topic(self.network, topic, force); + self.gossip.broadcast_topic(self.notification_service, topic, force); } /// Broadcast a message to all peers that have not received it previously. fn broadcast_message(&mut self, topic: B::Hash, message: Vec, force: bool) { - self.gossip.multicast(self.network, topic, message, force); + self.gossip.multicast(self.notification_service, topic, message, force); } /// Send addressed message to a peer. fn send_message(&mut self, who: &PeerId, message: Vec) { - self.network.write_notification(*who, self.gossip.protocol.clone(), message); + self.notification_service.send_sync_notification(who, message); } /// Send all messages with given topic to a peer. fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool) { - self.gossip.send_topic(self.network, who, topic, force); + self.gossip.send_topic(self.notification_service, who, topic, force); } } fn propagate<'a, B: BlockT, I>( - network: &mut dyn Network, + notification_service: &mut Box, protocol: ProtocolName, messages: I, intent: MessageIntent, @@ -147,7 +147,7 @@ where ?message, "Propagating message", ); - network.write_notification(*id, protocol.clone(), message.clone()); + notification_service.send_sync_notification(id, message.clone()); } } } @@ -191,7 +191,12 @@ impl ConsensusGossip { } /// Handle new connected peer. - pub fn new_peer(&mut self, network: &mut dyn Network, who: PeerId, role: ObservedRole) { + pub fn new_peer( + &mut self, + notification_service: &mut Box, + who: PeerId, + role: ObservedRole, + ) { tracing::trace!( target:"gossip", %who, @@ -202,7 +207,7 @@ impl ConsensusGossip { self.peers.insert(who, PeerConsensus { known_messages: Default::default() }); let validator = self.validator.clone(); - let mut context = NetworkContext { gossip: self, network }; + let mut context = NetworkContext { gossip: self, notification_service }; validator.new_peer(&mut context, &who, role); } @@ -233,30 +238,35 @@ impl ConsensusGossip { } /// Call when a peer has been disconnected to stop tracking gossip status. - pub fn peer_disconnected(&mut self, network: &mut dyn Network, who: PeerId) { + pub fn peer_disconnected( + &mut self, + notification_service: &mut Box, + who: PeerId, + ) { let validator = self.validator.clone(); - let mut context = NetworkContext { gossip: self, network }; + let mut context = NetworkContext { gossip: self, notification_service }; validator.peer_disconnected(&mut context, &who); self.peers.remove(&who); } /// Perform periodic maintenance - pub fn tick(&mut self, network: &mut dyn Network) { + pub fn tick(&mut self, notification_service: &mut Box) { self.collect_garbage(); if Instant::now() >= self.next_broadcast { - self.rebroadcast(network); + self.rebroadcast(notification_service); self.next_broadcast = Instant::now() + REBROADCAST_INTERVAL; } } /// Rebroadcast all messages to all peers. - fn rebroadcast(&mut self, network: &mut dyn Network) { + fn rebroadcast(&mut self, notification_service: &mut Box) { let messages = self .messages .iter() .map(|entry| (&entry.message_hash, &entry.topic, &entry.message)); + propagate( - network, + notification_service, self.protocol.clone(), messages, MessageIntent::PeriodicRebroadcast, @@ -266,7 +276,12 @@ impl ConsensusGossip { } /// Broadcast all messages with given topic. - pub fn broadcast_topic(&mut self, network: &mut dyn Network, topic: B::Hash, force: bool) { + pub fn broadcast_topic( + &mut self, + notification_service: &mut Box, + topic: B::Hash, + force: bool, + ) { let messages = self.messages.iter().filter_map(|entry| { if entry.topic == topic { Some((&entry.message_hash, &entry.topic, &entry.message)) @@ -276,7 +291,7 @@ impl ConsensusGossip { }); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; propagate( - network, + notification_service, self.protocol.clone(), messages, intent, @@ -327,6 +342,7 @@ impl ConsensusGossip { pub fn on_incoming( &mut self, network: &mut dyn Network, + notification_service: &mut Box, who: PeerId, messages: Vec>, ) -> Vec<(B::Hash, TopicNotification)> { @@ -367,7 +383,7 @@ impl ConsensusGossip { // validate the message let validation = { let validator = self.validator.clone(); - let mut context = NetworkContext { gossip: self, network }; + let mut context = NetworkContext { gossip: self, notification_service }; validator.validate(&mut context, &who, &message) }; @@ -414,7 +430,7 @@ impl ConsensusGossip { /// Send all messages with given topic to a peer. pub fn send_topic( &mut self, - network: &mut dyn Network, + notification_service: &mut Box, who: &PeerId, topic: B::Hash, force: bool, @@ -443,7 +459,7 @@ impl ConsensusGossip { ?entry.message, "Sending topic message", ); - network.write_notification(*who, self.protocol.clone(), entry.message.clone()); + notification_service.send_sync_notification(who, entry.message.clone()); } } } @@ -451,7 +467,7 @@ impl ConsensusGossip { /// Multicast a message to all peers. pub fn multicast( &mut self, - network: &mut dyn Network, + notification_service: &mut Box, topic: B::Hash, message: Vec, force: bool, @@ -460,7 +476,7 @@ impl ConsensusGossip { self.register_message_hashed(message_hash, topic, message.clone(), None); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; propagate( - network, + notification_service, self.protocol.clone(), iter::once((&message_hash, &topic, &message)), intent, @@ -471,7 +487,12 @@ impl ConsensusGossip { /// Send addressed message to a peer. The message is not kept or multicast /// later on. - pub fn send_message(&mut self, network: &mut dyn Network, who: &PeerId, message: Vec) { + pub fn send_message( + &mut self, + notification_service: &mut Box, + who: &PeerId, + message: Vec, + ) { let peer = match self.peers.get_mut(who) { None => return, Some(peer) => peer, @@ -488,7 +509,7 @@ impl ConsensusGossip { ); peer.known_messages.insert(message_hash); - network.write_notification(*who, self.protocol.clone(), message); + notification_service.send_sync_notification(who, message) } } @@ -524,9 +545,9 @@ mod tests { use crate::multiaddr::Multiaddr; use futures::prelude::*; use sc_network::{ - config::MultiaddrWithPeerId, event::Event, NetworkBlock, NetworkEventStream, - NetworkNotification, NetworkPeers, NotificationSenderError, - NotificationSenderT as NotificationSender, ReputationChange, + config::MultiaddrWithPeerId, event::Event, service::traits::NotificationEvent, MessageSink, + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NotificationSenderError, NotificationSenderT as NotificationSender, ReputationChange, }; use sp_runtime::{ testing::{Block as RawBlock, ExtrinsicWrapper, H256}, @@ -600,11 +621,15 @@ mod tests { unimplemented!(); } - fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - self.inner.lock().unwrap().peer_reports.push((who, cost_benefit)); + fn report_peer(&self, peer_id: PeerId, cost_benefit: ReputationChange) { + self.inner.lock().unwrap().peer_reports.push((peer_id, cost_benefit)); + } + + fn peer_reputation(&self, _peer_id: &PeerId) -> i32 { + unimplemented!() } - fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) { + fn disconnect_peer(&self, _peer_id: PeerId, _protocol: ProtocolName) { unimplemented!(); } @@ -651,6 +676,10 @@ mod tests { fn sync_num_connected(&self) -> usize { unimplemented!(); } + + fn peer_role(&self, _peer_id: PeerId, _handshake: Vec) -> Option { + None + } } impl NetworkEventStream for NoOpNetwork { @@ -691,6 +720,62 @@ mod tests { } } + #[derive(Debug, Default)] + struct NoOpNotificationService {} + + #[async_trait::async_trait] + impl NotificationService for NoOpNotificationService { + /// Instruct `Notifications` to open a new substream for `peer`. + async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + /// Instruct `Notifications` to close substream for `peer`. + async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + /// Send synchronous `notification` to `peer`. + fn send_sync_notification(&self, _peer: &PeerId, _notification: Vec) { + unimplemented!(); + } + + /// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure. + async fn send_async_notification( + &self, + _peer: &PeerId, + _notification: Vec, + ) -> Result<(), sc_network::error::Error> { + unimplemented!(); + } + + /// Set handshake for the notification protocol replacing the old handshake. + async fn set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + fn try_set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + /// Get next event from the `Notifications` event stream. + async fn next_event(&mut self) -> Option { + None + } + + fn clone(&mut self) -> Result, ()> { + unimplemented!(); + } + + fn protocol(&self) -> &ProtocolName { + unimplemented!(); + } + + fn message_sink(&self, _peer: &PeerId) -> Option> { + unimplemented!(); + } + } + #[test] fn collects_garbage() { struct AllowOne; @@ -773,20 +858,28 @@ mod tests { fn peer_is_removed_on_disconnect() { let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None); - let mut network = NoOpNetwork::default(); + let mut notification_service: Box = + Box::new(NoOpNotificationService::default()); let peer_id = PeerId::random(); - consensus.new_peer(&mut network, peer_id, ObservedRole::Full); + consensus.new_peer(&mut notification_service, peer_id, ObservedRole::Full); assert!(consensus.peers.contains_key(&peer_id)); - consensus.peer_disconnected(&mut network, peer_id); + consensus.peer_disconnected(&mut notification_service, peer_id); assert!(!consensus.peers.contains_key(&peer_id)); } #[test] fn on_incoming_ignores_discarded_messages() { + let mut notification_service: Box = + Box::new(NoOpNotificationService::default()); let to_forward = ConsensusGossip::::new(Arc::new(DiscardAll), "/foo".into(), None) - .on_incoming(&mut NoOpNetwork::default(), PeerId::random(), vec![vec![1, 2, 3]]); + .on_incoming( + &mut NoOpNetwork::default(), + &mut notification_service, + PeerId::random(), + vec![vec![1, 2, 3]], + ); assert!( to_forward.is_empty(), @@ -798,11 +891,14 @@ mod tests { #[test] fn on_incoming_ignores_unregistered_peer() { let mut network = NoOpNetwork::default(); + let mut notification_service: Box = + Box::new(NoOpNotificationService::default()); let remote = PeerId::random(); let to_forward = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None) .on_incoming( &mut network, + &mut notification_service, // Unregistered peer. remote, vec![vec![1, 2, 3]], @@ -822,18 +918,20 @@ mod tests { let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None); let mut network = NoOpNetwork::default(); + let mut notification_service: Box = + Box::new(NoOpNotificationService::default()); let peer_id = PeerId::random(); - consensus.new_peer(&mut network, peer_id, ObservedRole::Full); + consensus.new_peer(&mut notification_service, peer_id, ObservedRole::Full); assert!(consensus.peers.contains_key(&peer_id)); let peer_id2 = PeerId::random(); - consensus.new_peer(&mut network, peer_id2, ObservedRole::Full); + consensus.new_peer(&mut notification_service, peer_id2, ObservedRole::Full); assert!(consensus.peers.contains_key(&peer_id2)); let message = vec![vec![1, 2, 3]]; - consensus.on_incoming(&mut network, peer_id, message.clone()); - consensus.on_incoming(&mut network, peer_id2, message.clone()); + consensus.on_incoming(&mut network, &mut notification_service, peer_id, message.clone()); + consensus.on_incoming(&mut network, &mut notification_service, peer_id2, message.clone()); assert_eq!( vec![(peer_id, rep::GOSSIP_SUCCESS)], diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index 7b0536addda4337eacfd9a116ced47253e1d7089..6fa88457e989d82e8f93bcef2e6c1de6fddb45a1 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -10,6 +10,9 @@ repository.workspace = true documentation = "https://docs.rs/sc-network" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -25,7 +28,7 @@ fnv = "1.0.6" futures = "0.3.21" futures-timer = "3.0.2" ip_network = "0.4.1" -libp2p = { version = "0.51.3", features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "tcp", "tokio", "yamux", "websocket", "request-response"] } +libp2p = { version = "0.51.4", features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "request-response", "tcp", "tokio", "websocket", "yamux"] } linked_hash_set = "0.1.3" log = "0.4.17" mockall = "0.11.3" @@ -33,11 +36,13 @@ parking_lot = "0.12.1" partial_sort = "0.2.0" pin-project = "1.0.12" rand = "0.8.5" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" smallvec = "1.11.0" thiserror = "1.0" -unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } +tokio = { version = "1.22.0", features = ["macros", "sync"] } +tokio-stream = "0.1.7" +unsigned-varint = { version = "0.7.1", features = ["asynchronous_codec", "futures"] } zeroize = "1.4.3" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } sc-client-api = { path = "../api" } diff --git a/substrate/client/network/bitswap/Cargo.toml b/substrate/client/network/bitswap/Cargo.toml index 412d603163d8c4ce340adaf2fee2059920142313..cc919d2977ea1f4368ca93879232b341f6703507 100644 --- a/substrate/client/network/bitswap/Cargo.toml +++ b/substrate/client/network/bitswap/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true documentation = "https://docs.rs/sc-network-bitswap" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -23,7 +26,7 @@ libp2p-identity = { version = "0.1.3", features = ["peerid"] } log = "0.4.17" prost = "0.11" thiserror = "1.0" -unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } +unsigned-varint = { version = "0.7.1", features = ["asynchronous_codec", "futures"] } sc-client-api = { path = "../../api" } sc-network = { path = ".." } sp-blockchain = { path = "../../../primitives/blockchain" } diff --git a/substrate/client/network/common/Cargo.toml b/substrate/client/network/common/Cargo.toml index 65c8e1d71c721b99447e8292d036a3312f9cfc1a..8e5ad61d5e4b28f09d97ec3f5c8570a8faa5014d 100644 --- a/substrate/client/network/common/Cargo.toml +++ b/substrate/client/network/common/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true documentation = "https://docs.rs/sc-network-sync" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.11" [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.74" bitflags = "1.3.2" codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", diff --git a/substrate/client/network/common/src/role.rs b/substrate/client/network/common/src/role.rs index fd02c00e2324afb59e10c441b5018ea292149fad..11b7a7924c46ad90c4662221db7c50866e81e195 100644 --- a/substrate/client/network/common/src/role.rs +++ b/substrate/client/network/common/src/role.rs @@ -28,7 +28,7 @@ use codec::{self, Encode, EncodeLike, Input, Output}; /// > **Note**: This enum is different from the `Role` enum. The `Role` enum indicates what a /// > node says about itself, while `ObservedRole` is a `Role` merged with the /// > information known locally about that node. -#[derive(Debug, Clone)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ObservedRole { /// Full node. Full, @@ -45,6 +45,18 @@ impl ObservedRole { } } +impl From for ObservedRole { + fn from(roles: Roles) -> Self { + if roles.is_authority() { + ObservedRole::Authority + } else if roles.is_full() { + ObservedRole::Full + } else { + ObservedRole::Light + } + } +} + /// Role of the local node. #[derive(Debug, Clone)] pub enum Role { diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml index f426cda7fc860a22fba6000935afb234d738f767..c75d14f0deb69531ae77a0498f1d865ca9b4ebea 100644 --- a/substrate/client/network/light/Cargo.toml +++ b/substrate/client/network/light/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true documentation = "https://docs.rs/sc-network-light" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -19,7 +22,7 @@ prost-build = "0.11" async-channel = "1.8.0" array-bytes = "6.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = [ - "derive", + "derive", ] } futures = "0.3.21" libp2p-identity = { version = "0.1.3", features = ["peerid"] } diff --git a/substrate/client/network/src/behaviour.rs b/substrate/client/network/src/behaviour.rs index 0aa724818e02a3b4db78eefebfb7223ca7e04c1f..745550412fc219a4ca481deb0cd3405a020e3fbe 100644 --- a/substrate/client/network/src/behaviour.rs +++ b/substrate/client/network/src/behaviour.rs @@ -22,12 +22,13 @@ use crate::{ peer_info, peer_store::PeerStoreHandle, protocol::{CustomMessageOutcome, NotificationsSink, Protocol}, + protocol_controller::SetId, request_responses::{self, IfDisconnected, ProtocolConfig, RequestFailure}, + service::traits::Direction, types::ProtocolName, ReputationChange, }; -use bytes::Bytes; use futures::channel::oneshot; use libp2p::{ core::Multiaddr, identify::Info as IdentifyInfo, identity::PublicKey, kad::RecordKey, @@ -35,11 +36,10 @@ use libp2p::{ }; use parking_lot::Mutex; -use sc_network_common::role::{ObservedRole, Roles}; use sp_runtime::traits::Block as BlockT; use std::{collections::HashSet, sync::Arc, time::Duration}; -pub use crate::request_responses::{InboundFailure, OutboundFailure, RequestId, ResponseFailure}; +pub use crate::request_responses::{InboundFailure, OutboundFailure, ResponseFailure}; /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] @@ -97,8 +97,10 @@ pub enum BehaviourOut { NotificationStreamOpened { /// Node we opened the substream with. remote: PeerId, - /// The concerned protocol. Each protocol uses a different substream. - protocol: ProtocolName, + /// Set ID. + set_id: SetId, + /// Direction of the stream. + direction: Direction, /// If the negotiation didn't use the main name of the protocol (the one in /// `notifications_protocol`), then this field contains which name has actually been /// used. @@ -106,8 +108,6 @@ pub enum BehaviourOut { negotiated_fallback: Option, /// Object that permits sending notifications to the peer. notifications_sink: NotificationsSink, - /// Role of the remote. - role: ObservedRole, /// Received handshake. received_handshake: Vec, }, @@ -120,8 +120,8 @@ pub enum BehaviourOut { NotificationStreamReplaced { /// Id of the peer we are connected to. remote: PeerId, - /// The concerned protocol. Each protocol uses a different substream. - protocol: ProtocolName, + /// Set ID. + set_id: SetId, /// Replacement for the previous [`NotificationsSink`]. notifications_sink: NotificationsSink, }, @@ -131,16 +131,18 @@ pub enum BehaviourOut { NotificationStreamClosed { /// Node we closed the substream with. remote: PeerId, - /// The concerned protocol. Each protocol uses a different substream. - protocol: ProtocolName, + /// Set ID. + set_id: SetId, }, /// Received one or more messages from the given node using the given protocol. NotificationsReceived { /// Node we received the message from. remote: PeerId, + /// Set ID. + set_id: SetId, /// Concerned protocol and associated message. - messages: Vec<(ProtocolName, Bytes)>, + notification: Vec, }, /// We have obtained identity information from a peer, including the addresses it is listening @@ -272,44 +274,33 @@ impl Behaviour { } } -fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { - if roles.is_authority() { - ObservedRole::Authority - } else if roles.is_full() { - ObservedRole::Full - } else { - ObservedRole::Light - } -} - impl From for BehaviourOut { fn from(event: CustomMessageOutcome) -> Self { match event { CustomMessageOutcome::NotificationStreamOpened { remote, - protocol, + set_id, + direction, negotiated_fallback, - roles, received_handshake, notifications_sink, } => BehaviourOut::NotificationStreamOpened { remote, - protocol, + set_id, + direction, negotiated_fallback, - role: reported_roles_to_observed_role(roles), received_handshake, notifications_sink, }, CustomMessageOutcome::NotificationStreamReplaced { remote, - protocol, + set_id, notifications_sink, - } => BehaviourOut::NotificationStreamReplaced { remote, protocol, notifications_sink }, - CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => - BehaviourOut::NotificationStreamClosed { remote, protocol }, - CustomMessageOutcome::NotificationsReceived { remote, messages } => - BehaviourOut::NotificationsReceived { remote, messages }, - CustomMessageOutcome::None => BehaviourOut::None, + } => BehaviourOut::NotificationStreamReplaced { remote, set_id, notifications_sink }, + CustomMessageOutcome::NotificationStreamClosed { remote, set_id } => + BehaviourOut::NotificationStreamClosed { remote, set_id }, + CustomMessageOutcome::NotificationsReceived { remote, set_id, notification } => + BehaviourOut::NotificationsReceived { remote, set_id, notification }, } } } diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs index 124d73a74dbce43b4c4f004c93ef0c43d31c7262..24e96843c32d62187a666e1e7ee0a053c5af2482 100644 --- a/substrate/client/network/src/config.rs +++ b/substrate/client/network/src/config.rs @@ -23,10 +23,11 @@ pub use crate::{ discovery::DEFAULT_KADEMLIA_REPLICATION_FACTOR, - protocol::NotificationsSink, + protocol::{notification_service, NotificationsSink, ProtocolHandlePair}, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, + service::traits::NotificationService, types::ProtocolName, }; @@ -47,7 +48,6 @@ pub use sc_network_common::{ ExHashT, }; -use sc_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::Block as BlockT; use std::{ @@ -454,14 +454,14 @@ impl Default for SetConfig { /// /// > **Note**: As new fields might be added in the future, please consider using the `new` method /// > and modifiers instead of creating this struct manually. -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct NonDefaultSetConfig { /// Name of the notifications protocols of this set. A substream on this set will be /// considered established once this protocol is open. /// /// > **Note**: This field isn't present for the default set, as this is handled internally /// > by the networking code. - pub notifications_protocol: ProtocolName, + protocol_name: ProtocolName, /// If the remote reports that it doesn't support the protocol indicated in the /// `notifications_protocol` field, then each of these fallback names will be tried one by @@ -469,37 +469,84 @@ pub struct NonDefaultSetConfig { /// /// If a fallback is used, it will be reported in /// `sc_network::protocol::event::Event::NotificationStreamOpened::negotiated_fallback` - pub fallback_names: Vec, + fallback_names: Vec, /// Handshake of the protocol /// /// NOTE: Currently custom handshakes are not fully supported. See issue #5685 for more /// details. This field is temporarily used to allow moving the hardcoded block announcement /// protocol out of `protocol.rs`. - pub handshake: Option, + handshake: Option, /// Maximum allowed size of single notifications. - pub max_notification_size: u64, + max_notification_size: u64, /// Base configuration. - pub set_config: SetConfig, + set_config: SetConfig, + + /// Notification handle. + /// + /// Notification handle is created during `NonDefaultSetConfig` creation and its other half, + /// `Box` is given to the protocol created the config and + /// `ProtocolHandle` is given to `Notifications` when it initializes itself. This handle allows + /// `Notifications ` to communicate with the protocol directly without relaying events through + /// `sc-network.` + protocol_handle_pair: ProtocolHandlePair, } impl NonDefaultSetConfig { /// Creates a new [`NonDefaultSetConfig`]. Zero slots and accepts only reserved nodes. - pub fn new(notifications_protocol: ProtocolName, max_notification_size: u64) -> Self { - Self { - notifications_protocol, - max_notification_size, - fallback_names: Vec::new(), - handshake: None, - set_config: SetConfig { - in_peers: 0, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, + /// Also returns an object which allows the protocol to communicate with `Notifications`. + pub fn new( + protocol_name: ProtocolName, + fallback_names: Vec, + max_notification_size: u64, + handshake: Option, + set_config: SetConfig, + ) -> (Self, Box) { + let (protocol_handle_pair, notification_service) = + notification_service(protocol_name.clone()); + ( + Self { + protocol_name, + max_notification_size, + fallback_names, + handshake, + set_config, + protocol_handle_pair, }, - } + notification_service, + ) + } + + /// Get reference to protocol name. + pub fn protocol_name(&self) -> &ProtocolName { + &self.protocol_name + } + + /// Get reference to fallback protocol names. + pub fn fallback_names(&self) -> impl Iterator { + self.fallback_names.iter() + } + + /// Get reference to handshake. + pub fn handshake(&self) -> &Option { + &self.handshake + } + + /// Get maximum notification size. + pub fn max_notification_size(&self) -> u64 { + self.max_notification_size + } + + /// Get reference to `SetConfig`. + pub fn set_config(&self) -> &SetConfig { + &self.set_config + } + + /// Take `ProtocolHandlePair` from `NonDefaultSetConfig` + pub fn take_protocol_handle(self) -> ProtocolHandlePair { + self.protocol_handle_pair } /// Modifies the configuration to allow non-reserved nodes. @@ -703,9 +750,6 @@ pub struct Params { /// Block announce protocol configuration pub block_announce_config: NonDefaultSetConfig, - - /// TX channel for direct communication with `SyncingEngine` and `Protocol`. - pub tx: TracingUnboundedSender>, } /// Full network configuration. diff --git a/substrate/client/network/src/error.rs b/substrate/client/network/src/error.rs index f0828fb821f3504b63669ebfd663cc703388a437..01e8356fb55355cd50633d0fe8d2f9f565957b23 100644 --- a/substrate/client/network/src/error.rs +++ b/substrate/client/network/src/error.rs @@ -68,6 +68,15 @@ pub enum Error { /// Name of the protocol registered multiple times. protocol: ProtocolName, }, + /// Peer does not exist. + #[error("Peer `{0}` does not exist.")] + PeerDoesntExist(PeerId), + /// Channel closed. + #[error("Channel closed")] + ChannelClosed, + /// Connection closed. + #[error("Connection closed")] + ConnectionClosed, } // Make `Debug` use the `Display` implementation. diff --git a/substrate/client/network/src/event.rs b/substrate/client/network/src/event.rs index 2913f0b55225f07df8005026deae5edcf19d85bf..dc4fd53a49aa9d198cc23c26dec3d68e239838bf 100644 --- a/substrate/client/network/src/event.rs +++ b/substrate/client/network/src/event.rs @@ -19,14 +19,12 @@ //! Network event types. These are are not the part of the protocol, but rather //! events that happen on the network like DHT get/put results received. -use crate::{types::ProtocolName, NotificationsSink}; +use crate::types::ProtocolName; use bytes::Bytes; -use futures::channel::oneshot; use libp2p::{kad::record::Key, PeerId}; -use sc_network_common::{role::ObservedRole, sync::message::BlockAnnouncesHandshake}; -use sp_runtime::traits::Block as BlockT; +use sc_network_common::role::ObservedRole; /// Events generated by DHT as a response to get_value and put_value requests. #[derive(Debug, Clone)] @@ -92,46 +90,3 @@ pub enum Event { messages: Vec<(ProtocolName, Bytes)>, }, } - -/// Event sent to `SyncingEngine` -// TODO: remove once `NotificationService` is implemented. -pub enum SyncEvent { - /// Opened a substream with the given node with the given notifications protocol. - /// - /// The protocol is always one of the notification protocols that have been registered. - NotificationStreamOpened { - /// Node we opened the substream with. - remote: PeerId, - /// Received handshake. - received_handshake: BlockAnnouncesHandshake, - /// Notification sink. - sink: NotificationsSink, - /// Is the connection inbound. - inbound: bool, - /// Channel for reporting accept/reject of the substream. - tx: oneshot::Sender, - }, - - /// Closed a substream with the given node. Always matches a corresponding previous - /// `NotificationStreamOpened` message. - NotificationStreamClosed { - /// Node we closed the substream with. - remote: PeerId, - }, - - /// Notification sink was replaced. - NotificationSinkReplaced { - /// Node we closed the substream with. - remote: PeerId, - /// Notification sink. - sink: NotificationsSink, - }, - - /// Received one or more messages from the given node using the given protocol. - NotificationsReceived { - /// Node we received the message from. - remote: PeerId, - /// Concerned protocol and associated message. - messages: Vec, - }, -} diff --git a/substrate/client/network/src/lib.rs b/substrate/client/network/src/lib.rs index 4dc9bdb4cc1c7244fd562acc01c18aa3f6c43f21..4c39c57e8dfcad122f7a2ecf207bc0b877862b78 100644 --- a/substrate/client/network/src/lib.rs +++ b/substrate/client/network/src/lib.rs @@ -244,7 +244,6 @@ mod behaviour; mod protocol; -mod service; #[cfg(test)] mod mock; @@ -258,25 +257,30 @@ pub mod peer_info; pub mod peer_store; pub mod protocol_controller; pub mod request_responses; +pub mod service; pub mod transport; pub mod types; pub mod utils; -pub use event::{DhtEvent, Event, SyncEvent}; +pub use event::{DhtEvent, Event}; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; pub use request_responses::{Config, IfDisconnected, RequestFailure}; -pub use sc_network_common::{role::ObservedRole, types::ReputationChange}; +pub use sc_network_common::{ + role::{ObservedRole, Roles}, + types::ReputationChange, +}; pub use service::{ signature::Signature, traits::{ - KademliaKey, NetworkBlock, NetworkDHTProvider, NetworkEventStream, NetworkNotification, - NetworkPeers, NetworkRequest, NetworkSigner, NetworkStateInfo, NetworkStatus, - NetworkStatusProvider, NetworkSyncForkRequest, NotificationSender as NotificationSenderT, - NotificationSenderError, NotificationSenderReady, + KademliaKey, MessageSink, NetworkBlock, NetworkDHTProvider, NetworkEventStream, + NetworkNotification, NetworkPeers, NetworkRequest, NetworkSigner, NetworkStateInfo, + NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, + NotificationSender as NotificationSenderT, NotificationSenderError, + NotificationSenderReady, NotificationService, }, - DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, NotificationsSink, - OutboundFailure, PublicKey, + DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, OutboundFailure, + PublicKey, }; pub use types::ProtocolName; diff --git a/substrate/client/network/src/mock.rs b/substrate/client/network/src/mock.rs index bc596b0fa579e1bc356cabcd54b6ef94e7de416f..534b81189707163340f8887d16afb401a045dd24 100644 --- a/substrate/client/network/src/mock.rs +++ b/substrate/client/network/src/mock.rs @@ -20,6 +20,7 @@ use crate::{peer_store::PeerStoreProvider, protocol_controller::ProtocolHandle, ReputationChange}; use libp2p::PeerId; +use sc_network_common::role::ObservedRole; use std::collections::HashSet; /// No-op `PeerStore`. @@ -49,6 +50,14 @@ impl PeerStoreProvider for MockPeerStore { 0 } + fn peer_role(&self, _peer_id: &PeerId) -> Option { + None + } + + fn set_peer_role(&mut self, _peer_id: &PeerId, _role: ObservedRole) { + unimplemented!(); + } + fn outgoing_candidates(&self, _count: usize, _ignored: HashSet<&PeerId>) -> Vec { unimplemented!() } diff --git a/substrate/client/network/src/peer_store.rs b/substrate/client/network/src/peer_store.rs index 35d17e588cb962099b4656481276457c5f8e56f2..4b28b8e7544031139f2906df7d109b24fafe9230 100644 --- a/substrate/client/network/src/peer_store.rs +++ b/substrate/client/network/src/peer_store.rs @@ -23,7 +23,7 @@ use libp2p::PeerId; use log::trace; use parking_lot::Mutex; use partial_sort::PartialSort; -use sc_network_common::types::ReputationChange; +use sc_network_common::{role::ObservedRole, types::ReputationChange}; use std::{ cmp::{Ord, Ordering, PartialOrd}, collections::{hash_map::Entry, HashMap, HashSet}, @@ -66,9 +66,15 @@ pub trait PeerStoreProvider: Debug + Send { /// Adjust peer reputation. fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange); + /// Set peer role. + fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole); + /// Get peer reputation. fn peer_reputation(&self, peer_id: &PeerId) -> i32; + /// Get peer role, if available. + fn peer_role(&self, peer_id: &PeerId) -> Option; + /// Get candidates with highest reputations for initiating outgoing connections. fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec; } @@ -96,10 +102,18 @@ impl PeerStoreProvider for PeerStoreHandle { self.inner.lock().report_peer(peer_id, change) } + fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole) { + self.inner.lock().set_peer_role(peer_id, role) + } + fn peer_reputation(&self, peer_id: &PeerId) -> i32 { self.inner.lock().peer_reputation(peer_id) } + fn peer_role(&self, peer_id: &PeerId) -> Option { + self.inner.lock().peer_role(peer_id) + } + fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec { self.inner.lock().outgoing_candidates(count, ignored) } @@ -122,13 +136,19 @@ impl PeerStoreHandle { #[derive(Debug, Clone, Copy)] struct PeerInfo { + /// Reputation of the peer. reputation: i32, + + /// Instant when the peer was last updated. last_updated: Instant, + + /// Role of the peer, if known. + role: Option, } impl Default for PeerInfo { fn default() -> Self { - Self { reputation: 0, last_updated: Instant::now() } + Self { reputation: 0, last_updated: Instant::now(), role: None } } } @@ -242,10 +262,27 @@ impl PeerStoreInner { } } + fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole) { + log::trace!(target: LOG_TARGET, "Set {peer_id} role to {role:?}"); + + match self.peers.entry(*peer_id) { + Entry::Occupied(mut entry) => { + entry.get_mut().role = Some(role); + }, + Entry::Vacant(entry) => { + entry.insert(PeerInfo { role: Some(role), ..Default::default() }); + }, + } + } + fn peer_reputation(&self, peer_id: &PeerId) -> i32 { self.peers.get(peer_id).map_or(0, |info| info.reputation) } + fn peer_role(&self, peer_id: &PeerId) -> Option { + self.peers.get(peer_id).map_or(None, |info| info.role) + } + fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec { let mut candidates = self .peers diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs index 9b94f288352841073840dc07bba4cbbab581b204..73b1cd97279637380a24ebdfddcd3378aad819cb 100644 --- a/substrate/client/network/src/protocol.rs +++ b/substrate/client/network/src/protocol.rs @@ -20,12 +20,11 @@ use crate::{ config, error, peer_store::{PeerStoreHandle, PeerStoreProvider}, protocol_controller::{self, SetId}, + service::traits::Direction, types::ProtocolName, }; -use bytes::Bytes; -use codec::{DecodeAll, Encode}; -use futures::{channel::oneshot, stream::FuturesUnordered, StreamExt}; +use codec::Encode; use libp2p::{ core::Endpoint, swarm::{ @@ -34,24 +33,23 @@ use libp2p::{ }, Multiaddr, PeerId, }; -use log::{debug, error, warn}; +use log::warn; -use sc_network_common::{role::Roles, sync::message::BlockAnnouncesHandshake}; -use sc_utils::mpsc::{TracingUnboundedReceiver, TracingUnboundedSender}; +use codec::DecodeAll; +use prometheus_endpoint::Registry; +use sc_network_common::role::Roles; +use sc_utils::mpsc::TracingUnboundedReceiver; use sp_runtime::traits::Block as BlockT; -use std::{ - collections::{HashMap, HashSet}, - future::Future, - iter, - pin::Pin, - task::Poll, -}; +use std::{collections::HashSet, iter, task::Poll}; + +use notifications::{metrics, Notifications, NotificationsOut}; -use message::{generic::Message as GenericMessage, Message}; -use notifications::{Notifications, NotificationsOut}; +pub(crate) use notifications::ProtocolHandle; -pub use notifications::{NotificationsSink, NotifsHandlerError, Ready}; +pub use notifications::{ + notification_service, NotificationsSink, NotifsHandlerError, ProtocolHandlePair, Ready, +}; mod notifications; @@ -64,85 +62,98 @@ pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = 16 * 1024 * /// Identifier of the peerset for the block announces protocol. const HARDCODED_PEERSETS_SYNC: SetId = SetId::from(0); -mod rep { - use crate::ReputationChange as Rep; - /// We received a message that failed to decode. - pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); -} - -type PendingSyncSubstreamValidation = - Pin> + Send>>; - // Lock must always be taken in order declared here. pub struct Protocol { - /// Used to report reputation changes. - peer_store_handle: PeerStoreHandle, /// Handles opening the unique substream and sending and receiving raw messages. behaviour: Notifications, /// List of notifications protocols that have been registered. notification_protocols: Vec, - /// If we receive a new "substream open" event that contains an invalid handshake, we ask the - /// inner layer to force-close the substream. Force-closing the substream will generate a - /// "substream closed" event. This is a problem: since we can't propagate the "substream open" - /// event to the outer layers, we also shouldn't propagate this "substream closed" event. To - /// solve this, an entry is added to this map whenever an invalid handshake is received. - /// Entries are removed when the corresponding "substream closed" is later received. - bad_handshake_substreams: HashSet<(PeerId, SetId)>, - /// Connected peers on sync protocol. - peers: HashMap, - sync_substream_validations: FuturesUnordered, - tx: TracingUnboundedSender>, + /// Handle to `PeerStore`. + peer_store_handle: PeerStoreHandle, + /// Streams for peers whose handshake couldn't be determined. + bad_handshake_streams: HashSet, + sync_handle: ProtocolHandle, _marker: std::marker::PhantomData, } impl Protocol { /// Create a new instance. - pub fn new( + pub(crate) fn new( roles: Roles, + registry: &Option, notification_protocols: Vec, block_announces_protocol: config::NonDefaultSetConfig, peer_store_handle: PeerStoreHandle, protocol_controller_handles: Vec, from_protocol_controllers: TracingUnboundedReceiver, - tx: TracingUnboundedSender>, - ) -> error::Result { - let behaviour = { - Notifications::new( - protocol_controller_handles, - from_protocol_controllers, - // NOTE: Block announcement protocol is still very much hardcoded into `Protocol`. - // This protocol must be the first notification protocol given to - // `Notifications` - iter::once(notifications::ProtocolConfig { - name: block_announces_protocol.notifications_protocol.clone(), - fallback_names: block_announces_protocol.fallback_names.clone(), - handshake: block_announces_protocol.handshake.as_ref().unwrap().to_vec(), - max_notification_size: block_announces_protocol.max_notification_size, - }) - .chain(notification_protocols.iter().map(|s| notifications::ProtocolConfig { - name: s.notifications_protocol.clone(), - fallback_names: s.fallback_names.clone(), - handshake: s.handshake.as_ref().map_or(roles.encode(), |h| (*h).to_vec()), - max_notification_size: s.max_notification_size, - })), + ) -> error::Result<(Self, Vec)> { + let (behaviour, notification_protocols, handles) = { + let installed_protocols = iter::once(block_announces_protocol.protocol_name().clone()) + .chain(notification_protocols.iter().map(|p| p.protocol_name().clone())) + .collect::>(); + + // NOTE: Block announcement protocol is still very much hardcoded into + // `Protocol`. This protocol must be the first notification protocol given to + // `Notifications` + let (protocol_configs, mut handles): (Vec<_>, Vec<_>) = iter::once({ + let config = notifications::ProtocolConfig { + name: block_announces_protocol.protocol_name().clone(), + fallback_names: block_announces_protocol.fallback_names().cloned().collect(), + handshake: block_announces_protocol.handshake().as_ref().unwrap().to_vec(), + max_notification_size: block_announces_protocol.max_notification_size(), + }; + + let (handle, command_stream) = + block_announces_protocol.take_protocol_handle().split(); + + ((config, handle.clone(), command_stream), handle) + }) + .chain(notification_protocols.into_iter().map(|s| { + let config = notifications::ProtocolConfig { + name: s.protocol_name().clone(), + fallback_names: s.fallback_names().cloned().collect(), + handshake: s.handshake().as_ref().map_or(roles.encode(), |h| (*h).to_vec()), + max_notification_size: s.max_notification_size(), + }; + + let (handle, command_stream) = s.take_protocol_handle().split(); + + ((config, handle.clone(), command_stream), handle) + })) + .unzip(); + + let metrics = registry.as_ref().and_then(|registry| metrics::register(®istry).ok()); + handles.iter_mut().for_each(|handle| { + handle.set_metrics(metrics.clone()); + }); + + ( + Notifications::new( + protocol_controller_handles, + from_protocol_controllers, + metrics, + protocol_configs.into_iter(), + ), + installed_protocols, + handles, ) }; let protocol = Self { - peer_store_handle, behaviour, - notification_protocols: iter::once(block_announces_protocol.notifications_protocol) - .chain(notification_protocols.iter().map(|s| s.notifications_protocol.clone())) - .collect(), - bad_handshake_substreams: Default::default(), - peers: HashMap::new(), - sync_substream_validations: FuturesUnordered::new(), - tx, + sync_handle: handles[0].clone(), + peer_store_handle, + notification_protocols, + bad_handshake_streams: HashSet::new(), // TODO: remove when `BlockAnnouncesHandshake` is moved away from `Protocol` _marker: Default::default(), }; - Ok(protocol) + Ok((protocol, handles)) + } + + pub fn num_sync_peers(&self) -> usize { + self.sync_handle.num_peers() } /// Returns the list of all the peers we have an open channel to. @@ -163,21 +174,12 @@ impl Protocol { } } - /// Returns the number of peers we're connected to on sync protocol. - pub fn num_connected_peers(&self) -> usize { - self.peers.len() - } - - /// Set handshake for the notification protocol. - pub fn set_notification_handshake(&mut self, protocol: ProtocolName, handshake: Vec) { - if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.behaviour.set_notif_protocol_handshake(SetId::from(index), handshake); - } else { - error!( - target: "sub-libp2p", - "set_notification_handshake with unknown protocol: {}", - protocol - ); + /// Check if role is available for `peer_id` by attempt to decode the handshake to roles and if + /// that fails, check if the role has been registered to `PeerStore`. + fn role_available(&self, peer_id: &PeerId, handshake: &Vec) -> bool { + match Roles::decode_all(&mut &handshake[..]) { + Ok(_) => true, + Err(_) => self.peer_store_handle.peer_role(&peer_id).is_some(), } } } @@ -189,25 +191,42 @@ pub enum CustomMessageOutcome { /// Notification protocols have been opened with a remote. NotificationStreamOpened { remote: PeerId, - protocol: ProtocolName, + // protocol: ProtocolName, + set_id: SetId, + /// Direction of the stream. + direction: Direction, /// See [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. negotiated_fallback: Option, - roles: Roles, + /// Received handshake. received_handshake: Vec, + /// Notification sink. notifications_sink: NotificationsSink, }, /// The [`NotificationsSink`] of some notification protocols need an update. NotificationStreamReplaced { + // Peer ID. remote: PeerId, - protocol: ProtocolName, + /// Set ID. + set_id: SetId, + /// New notification sink. notifications_sink: NotificationsSink, }, /// Notification protocols have been closed with a remote. - NotificationStreamClosed { remote: PeerId, protocol: ProtocolName }, + NotificationStreamClosed { + // Peer ID. + remote: PeerId, + /// Set ID. + set_id: SetId, + }, /// Messages have been received on one or more notifications protocols. - NotificationsReceived { remote: PeerId, messages: Vec<(ProtocolName, Bytes)> }, - /// Now connected to a new peer for syncing purposes. - None, + NotificationsReceived { + // Peer ID. + remote: PeerId, + /// Set ID. + set_id: SetId, + /// Received notification. + notification: Vec, + }, } impl NetworkBehaviour for Protocol { @@ -274,23 +293,6 @@ impl NetworkBehaviour for Protocol { cx: &mut std::task::Context, params: &mut impl PollParameters, ) -> Poll>> { - while let Poll::Ready(Some(validation_result)) = - self.sync_substream_validations.poll_next_unpin(cx) - { - match validation_result { - Ok((peer, roles)) => { - self.peers.insert(peer, roles); - }, - Err(peer) => { - log::debug!( - target: "sub-libp2p", - "`SyncingEngine` rejected stream" - ); - self.behaviour.disconnect_peer(&peer, HARDCODED_PEERSETS_SYNC); - }, - } - } - let event = match self.behaviour.poll(cx, params) { Poll::Pending => return Poll::Pending, Poll::Ready(ToSwarm::GenerateEvent(ev)) => ev, @@ -307,204 +309,86 @@ impl NetworkBehaviour for Protocol { NotificationsOut::CustomProtocolOpen { peer_id, set_id, + direction, received_handshake, notifications_sink, negotiated_fallback, - inbound, - } => { - // Set number 0 is hardcoded the default set of peers we sync from. + .. + } => if set_id == HARDCODED_PEERSETS_SYNC { - // `received_handshake` can be either a `Status` message if received from the - // legacy substream ,or a `BlockAnnouncesHandshake` if received from the block - // announces substream. - match as DecodeAll>::decode_all(&mut &received_handshake[..]) { - Ok(GenericMessage::Status(handshake)) => { - let roles = handshake.roles; - let handshake = BlockAnnouncesHandshake:: { - roles: handshake.roles, - best_number: handshake.best_number, - best_hash: handshake.best_hash, - genesis_hash: handshake.genesis_hash, - }; - - let (tx, rx) = oneshot::channel(); - let _ = self.tx.unbounded_send( - crate::SyncEvent::NotificationStreamOpened { - inbound, - remote: peer_id, - received_handshake: handshake, - sink: notifications_sink, - tx, - }, - ); - self.sync_substream_validations.push(Box::pin(async move { - match rx.await { - Ok(accepted) => - if accepted { - Ok((peer_id, roles)) - } else { - Err(peer_id) - }, - Err(_) => Err(peer_id), - } - })); - - CustomMessageOutcome::None - }, - Ok(msg) => { - debug!( - target: "sync", - "Expected Status message from {}, but got {:?}", - peer_id, - msg, - ); - self.peer_store_handle.report_peer(peer_id, rep::BAD_MESSAGE); - CustomMessageOutcome::None - }, - Err(err) => { - match as DecodeAll>::decode_all( - &mut &received_handshake[..], - ) { - Ok(handshake) => { - let roles = handshake.roles; - - let (tx, rx) = oneshot::channel(); - let _ = self.tx.unbounded_send( - crate::SyncEvent::NotificationStreamOpened { - inbound, - remote: peer_id, - received_handshake: handshake, - sink: notifications_sink, - tx, - }, - ); - self.sync_substream_validations.push(Box::pin(async move { - match rx.await { - Ok(accepted) => - if accepted { - Ok((peer_id, roles)) - } else { - Err(peer_id) - }, - Err(_) => Err(peer_id), - } - })); - CustomMessageOutcome::None - }, - Err(err2) => { - log::debug!( - target: "sync", - "Couldn't decode handshake sent by {}: {:?}: {} & {}", - peer_id, - received_handshake, - err, - err2, - ); - self.peer_store_handle.report_peer(peer_id, rep::BAD_MESSAGE); - CustomMessageOutcome::None - }, - } - }, - } + let _ = self.sync_handle.report_substream_opened( + peer_id, + direction, + received_handshake, + negotiated_fallback, + notifications_sink, + ); + None } else { - match ( - Roles::decode_all(&mut &received_handshake[..]), - self.peers.get(&peer_id), - ) { - (Ok(roles), _) => CustomMessageOutcome::NotificationStreamOpened { + match self.role_available(&peer_id, &received_handshake) { + true => Some(CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), + set_id, + direction, negotiated_fallback, - roles, received_handshake, notifications_sink, - }, - (Err(_), Some(roles)) if received_handshake.is_empty() => { - // As a convenience, we allow opening substreams for "external" - // notification protocols with an empty handshake. This fetches the - // roles from the locally-known roles. - // TODO: remove this after https://github.com/paritytech/substrate/issues/5685 - CustomMessageOutcome::NotificationStreamOpened { - remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), - negotiated_fallback, - roles: *roles, - received_handshake, - notifications_sink, - } - }, - (Err(err), _) => { - debug!(target: "sync", "Failed to parse remote handshake: {}", err); - self.bad_handshake_substreams.insert((peer_id, set_id)); - self.behaviour.disconnect_peer(&peer_id, set_id); - self.peer_store_handle.report_peer(peer_id, rep::BAD_MESSAGE); - CustomMessageOutcome::None + }), + false => { + self.bad_handshake_streams.insert(peer_id); + None }, } - } - }, + }, NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => - if self.bad_handshake_substreams.contains(&(peer_id, set_id)) { - CustomMessageOutcome::None - } else if set_id == HARDCODED_PEERSETS_SYNC { - let _ = self.tx.unbounded_send(crate::SyncEvent::NotificationSinkReplaced { - remote: peer_id, - sink: notifications_sink, - }); - CustomMessageOutcome::None + if set_id == HARDCODED_PEERSETS_SYNC { + let _ = self + .sync_handle + .report_notification_sink_replaced(peer_id, notifications_sink); + None } else { - CustomMessageOutcome::NotificationStreamReplaced { - remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), - notifications_sink, - } + (!self.bad_handshake_streams.contains(&peer_id)).then_some( + CustomMessageOutcome::NotificationStreamReplaced { + remote: peer_id, + set_id, + notifications_sink, + }, + ) }, NotificationsOut::CustomProtocolClosed { peer_id, set_id } => { - if self.bad_handshake_substreams.remove(&(peer_id, set_id)) { - // The substream that has just been closed had been opened with a bad - // handshake. The outer layers have never received an opening event about this - // substream, and consequently shouldn't receive a closing event either. - CustomMessageOutcome::None - } else if set_id == HARDCODED_PEERSETS_SYNC { - let _ = self.tx.unbounded_send(crate::SyncEvent::NotificationStreamClosed { - remote: peer_id, - }); - self.peers.remove(&peer_id); - CustomMessageOutcome::None + if set_id == HARDCODED_PEERSETS_SYNC { + let _ = self.sync_handle.report_substream_closed(peer_id); + None } else { - CustomMessageOutcome::NotificationStreamClosed { - remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), - } + (!self.bad_handshake_streams.remove(&peer_id)).then_some( + CustomMessageOutcome::NotificationStreamClosed { remote: peer_id, set_id }, + ) } }, NotificationsOut::Notification { peer_id, set_id, message } => { - if self.bad_handshake_substreams.contains(&(peer_id, set_id)) { - CustomMessageOutcome::None - } else if set_id == HARDCODED_PEERSETS_SYNC { - let _ = self.tx.unbounded_send(crate::SyncEvent::NotificationsReceived { - remote: peer_id, - messages: vec![message.freeze()], - }); - CustomMessageOutcome::None + if set_id == HARDCODED_PEERSETS_SYNC { + let _ = self + .sync_handle + .report_notification_received(peer_id, message.freeze().into()); + None } else { - let protocol_name = self.notification_protocols[usize::from(set_id)].clone(); - CustomMessageOutcome::NotificationsReceived { - remote: peer_id, - messages: vec![(protocol_name, message.freeze())], - } + (!self.bad_handshake_streams.contains(&peer_id)).then_some( + CustomMessageOutcome::NotificationsReceived { + remote: peer_id, + set_id, + notification: message.freeze().into(), + }, + ) } }, }; - if !matches!(outcome, CustomMessageOutcome::None) { - return Poll::Ready(ToSwarm::GenerateEvent(outcome)) + match outcome { + Some(event) => Poll::Ready(ToSwarm::GenerateEvent(event)), + None => { + cx.waker().wake_by_ref(); + Poll::Pending + }, } - - // This block can only be reached if an event was pulled from the behaviour and that - // resulted in `CustomMessageOutcome::None`. Since there might be another pending - // message from the behaviour, the task is scheduled again. - cx.waker().wake_by_ref(); - Poll::Pending } } diff --git a/substrate/client/network/src/protocol/message.rs b/substrate/client/network/src/protocol/message.rs index 66dca2975375f9886b6f15513c2debf09883db03..5f2511fd6ddc93d4ef2018b43d7be9e3d138ba32 100644 --- a/substrate/client/network/src/protocol/message.rs +++ b/substrate/client/network/src/protocol/message.rs @@ -19,16 +19,13 @@ //! Network packet message types. These get serialized and put into the lower level protocol //! payload. -pub use self::generic::{ - RemoteCallRequest, RemoteChangesRequest, RemoteChangesResponse, RemoteHeaderRequest, - RemoteHeaderResponse, RemoteReadChildRequest, RemoteReadRequest, -}; use codec::{Decode, Encode}; use sc_client_api::StorageProof; use sc_network_common::message::RequestId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; /// Type alias for using the message type using block type parameters. +#[allow(unused)] pub type Message = generic::Message< ::Header, ::Hash, diff --git a/substrate/client/network/src/protocol/notifications.rs b/substrate/client/network/src/protocol/notifications.rs index aa49cfcf9d44e230d7948e9ef251f81734e8ff07..8becc1390e7d479b8386d17c03ca73adfc0b127e 100644 --- a/substrate/client/network/src/protocol/notifications.rs +++ b/substrate/client/network/src/protocol/notifications.rs @@ -22,9 +22,13 @@ pub use self::{ behaviour::{Notifications, NotificationsOut, ProtocolConfig}, handler::{NotificationsSink, NotifsHandlerError, Ready}, + service::{notification_service, ProtocolHandlePair}, }; +pub(crate) use self::service::{metrics, ProtocolHandle}; + mod behaviour; mod handler; +mod service; mod tests; mod upgrade; diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index b78f15f8529c6e1ef7bb338a2d41811c47be8700..cdbf2a71b932fb1d0acd6b8489919d8e904d7805 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -17,16 +17,18 @@ // along with this program. If not, see . use crate::{ - protocol::notifications::handler::{ - self, NotificationsSink, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut, + protocol::notifications::{ + handler::{self, NotificationsSink, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut}, + service::{metrics, NotificationCommand, ProtocolHandle, ValidationCallResult}, }, protocol_controller::{self, IncomingIndex, Message, SetId}, + service::traits::{Direction, ValidationResult}, types::ProtocolName, }; use bytes::BytesMut; use fnv::FnvHashMap; -use futures::prelude::*; +use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; use libp2p::{ core::{ConnectedPoint, Endpoint, Multiaddr}, swarm::{ @@ -36,11 +38,14 @@ use libp2p::{ }, PeerId, }; -use log::{debug, error, info, trace, warn}; +use log::{debug, error, trace, warn}; use parking_lot::RwLock; use rand::distributions::{Distribution as _, Uniform}; use sc_utils::mpsc::TracingUnboundedReceiver; use smallvec::SmallVec; +use tokio::sync::oneshot::error::RecvError; +use tokio_stream::StreamMap; + use std::{ cmp, collections::{hash_map::Entry, VecDeque}, @@ -51,6 +56,13 @@ use std::{ time::{Duration, Instant}, }; +/// Type representing a pending substream validation. +type PendingInboundValidation = + BoxFuture<'static, (Result, IncomingIndex)>; + +/// Logging target for the file. +const LOG_TARGET: &str = "sub-libp2p"; + /// Network behaviour that handles opening substreams for custom protocols with other peers. /// /// # How it works @@ -106,6 +118,12 @@ pub struct Notifications { /// Notification protocols. Entries never change after initialization. notif_protocols: Vec, + /// Protocol handles. + protocol_handles: Vec, + + // Command streams. + command_streams: StreamMap + Send + Unpin>>, + /// Protocol controllers are responsible for peer connections management. protocol_controller_handles: Vec, @@ -138,6 +156,18 @@ pub struct Notifications { /// Events to produce from `poll()`. events: VecDeque>, + + /// Pending inbound substream validations. + // + // NOTE: it's possible to read a stale response from `pending_inbound_validations` + // as the substream may get closed by the remote peer before the protocol has had + // a chance to validate it. [`Notifications`] must compare the `crate::peerset::IncomingIndex` + // returned by the completed future against the `crate::peerset::IncomingIndex` stored in + // `PeerState::Incoming` to check whether the completed future is stale or not. + pending_inbound_validations: FuturesUnordered, + + /// Metrics for notifications. + metrics: Option, } /// Configuration for a notifications protocol. @@ -235,6 +265,9 @@ enum PeerState { /// Incoming index tracking this connection. incoming_index: IncomingIndex, + /// Peerset has signaled it wants the substream closed. + peerset_rejected: bool, + /// List of connections with this peer, and their state. connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, @@ -303,6 +336,8 @@ struct IncomingPeer { alive: bool, /// Id that the we sent to the peerset. incoming_id: IncomingIndex, + /// Received handshake. + handshake: Vec, } /// Event that can be emitted by the `Notifications`. @@ -314,6 +349,8 @@ pub enum NotificationsOut { peer_id: PeerId, /// Peerset set ID the substream is tied to. set_id: SetId, + /// Direction of the stream. + direction: Direction, /// If `Some`, a fallback protocol name has been used rather the main protocol name. /// Always matches one of the fallback names passed at initialization. negotiated_fallback: Option, @@ -364,24 +401,51 @@ pub enum NotificationsOut { impl Notifications { /// Creates a `CustomProtos`. - pub fn new( + pub(crate) fn new( protocol_controller_handles: Vec, from_protocol_controllers: TracingUnboundedReceiver, - notif_protocols: impl Iterator, + metrics: Option, + notif_protocols: impl Iterator< + Item = ( + ProtocolConfig, + ProtocolHandle, + Box + Send + Unpin>, + ), + >, ) -> Self { - let notif_protocols = notif_protocols - .map(|cfg| handler::ProtocolConfig { - name: cfg.name, - fallback_names: cfg.fallback_names, - handshake: Arc::new(RwLock::new(cfg.handshake)), - max_notification_size: cfg.max_notification_size, + let (notif_protocols, protocol_handles): (Vec<_>, Vec<_>) = notif_protocols + .map(|(cfg, protocol_handle, command_stream)| { + ( + handler::ProtocolConfig { + name: cfg.name, + fallback_names: cfg.fallback_names, + handshake: Arc::new(RwLock::new(cfg.handshake)), + max_notification_size: cfg.max_notification_size, + }, + (protocol_handle, command_stream), + ) }) - .collect::>(); - + .unzip(); assert!(!notif_protocols.is_empty()); + let (mut protocol_handles, command_streams): (Vec<_>, Vec<_>) = protocol_handles + .into_iter() + .enumerate() + .map(|(set_id, (mut protocol_handle, command_stream))| { + protocol_handle.set_metrics(metrics.clone()); + + (protocol_handle, (set_id, command_stream)) + }) + .unzip(); + + protocol_handles.iter_mut().skip(1).for_each(|handle| { + handle.delegate_to_peerset(true); + }); + Self { notif_protocols, + protocol_handles, + command_streams: StreamMap::from_iter(command_streams.into_iter()), protocol_controller_handles, from_protocol_controllers, peers: FnvHashMap::default(), @@ -390,6 +454,8 @@ impl Notifications { incoming: SmallVec::new(), next_incoming_index: IncomingIndex(0), events: VecDeque::new(), + pending_inbound_validations: FuturesUnordered::new(), + metrics, } } @@ -807,14 +873,21 @@ impl Notifications { *entry.into_mut() = PeerState::Backoff { timer, timer_deadline } }, - // Invalid state transitions. - st @ PeerState::Incoming { .. } => { - info!( + // `ProtocolController` disconnected peer while it was still being validated by the + // protocol, mark the connection as rejected and once the validation is received from + // the protocol, reject the substream + PeerState::Incoming { backoff_until, connections, incoming_index, .. } => { + debug!( target: "sub-libp2p", "PSM => Drop({}, {:?}): Ignoring obsolete disconnect, we are awaiting accept/reject.", entry.key().0, set_id, ); - *entry.into_mut() = st; + *entry.into_mut() = PeerState::Incoming { + backoff_until, + connections, + incoming_index, + peerset_rejected: true, + }; }, PeerState::Poisoned => { error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()); @@ -823,20 +896,71 @@ impl Notifications { } } + /// Substream has been accepted by the `ProtocolController` and must now be sent + /// to the protocol for validation. + fn peerset_report_preaccept(&mut self, index: IncomingIndex) { + let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) else { + error!(target: LOG_TARGET, "PSM => Preaccept({:?}): Invalid index", index); + return + }; + + trace!( + target: LOG_TARGET, + "PSM => Preaccept({:?}): Sent to protocol for validation", + index + ); + let incoming = &self.incoming[pos]; + + match self.protocol_handles[usize::from(incoming.set_id)] + .report_incoming_substream(incoming.peer_id, incoming.handshake.clone()) + { + Ok(ValidationCallResult::Delegated) => { + self.protocol_report_accept(index); + }, + Ok(ValidationCallResult::WaitForValidation(rx)) => { + self.pending_inbound_validations + .push(Box::pin(async move { (rx.await, index) })); + }, + Err(err) => { + // parachain collators enable the syncing protocol but `NotificationService` for + // `SyncingEngine` is not created which causes `report_incoming_substream()` to + // fail. This is not a fatal error and should be ignored even though in typical + // cases the `NotificationService` not existing is a fatal error and indicates that + // the protocol has exited. Until the parachain collator issue is fixed, just report + // and error and reject the peer. + debug!(target: LOG_TARGET, "protocol has exited: {err:?} {:?}", incoming.set_id); + + self.protocol_report_reject(index); + }, + } + } + /// Function that is called when the peerset wants us to accept a connection /// request from a peer. - fn peerset_report_accept(&mut self, index: IncomingIndex) { - let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) - { - self.incoming.remove(pos) - } else { - error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); - return + fn protocol_report_accept(&mut self, index: IncomingIndex) { + let (pos, incoming) = + if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { + (pos, self.incoming.get(pos)) + } else { + error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); + return + }; + + let Some(incoming) = incoming else { + error!(target: "sub-libp2p", "Incoming connection ({:?}) doesn't exist", index); + debug_assert!(false); + return; }; if !incoming.alive { - trace!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Obsolete incoming", - index, incoming.peer_id, incoming.set_id); + trace!( + target: "sub-libp2p", + "PSM => Accept({:?}, {}, {:?}): Obsolete incoming", + index, + incoming.peer_id, + incoming.set_id, + ); + match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) { Some(PeerState::DisabledPendingEnable { .. }) | Some(PeerState::Enabled { .. }) => { }, @@ -847,26 +971,42 @@ impl Notifications { .dropped(incoming.peer_id); }, } + + self.incoming.remove(pos); return } let state = match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) { Some(s) => s, None => { - debug_assert!(false); + log::debug!( + target: "sub-libp2p", + "Connection to {:?} closed, ({:?} {:?}), ignoring accept", + incoming.peer_id, + incoming.set_id, + index, + ); + self.incoming.remove(pos); return }, }; match mem::replace(state, PeerState::Poisoned) { // Incoming => Enabled - PeerState::Incoming { mut connections, incoming_index, .. } => { + PeerState::Incoming { + mut connections, + incoming_index, + peerset_rejected, + backoff_until, + } => { if index < incoming_index { warn!( target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Ignoring obsolete incoming index, we are already awaiting {:?}.", index, incoming.peer_id, incoming.set_id, incoming_index ); + + self.incoming.remove(pos); return } else if index > incoming_index { error!( @@ -874,12 +1014,39 @@ impl Notifications { "PSM => Accept({:?}, {}, {:?}): Ignoring incoming index from the future, we are awaiting {:?}.", index, incoming.peer_id, incoming.set_id, incoming_index ); + + self.incoming.remove(pos); debug_assert!(false); return } - trace!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Enabling connections.", - index, incoming.peer_id, incoming.set_id); + // while the substream was being validated by the protocol, `Peerset` had request + // for the it to be closed so reject the substream now + if peerset_rejected { + trace!( + target: "sub-libp2p", + "Protocol accepted ({:?} {:?} {:?}) but Peerset had request disconnection, rejecting", + index, + incoming.peer_id, + incoming.set_id + ); + + *state = PeerState::Incoming { + connections, + backoff_until, + peerset_rejected, + incoming_index, + }; + return self.report_reject(index).map_or((), |_| ()); + } + + trace!( + target: "sub-libp2p", + "PSM => Accept({:?}, {}, {:?}): Enabling connections.", + index, + incoming.peer_id, + incoming.set_id + ); debug_assert!(connections .iter() @@ -898,53 +1065,85 @@ impl Notifications { *connec_state = ConnectionState::Opening; } + self.incoming.remove(pos); *state = PeerState::Enabled { connections }; }, - + st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => { + self.incoming.remove(pos); + *state = st; + }, // Any state other than `Incoming` is invalid. peer => { - error!(target: "sub-libp2p", + error!( + target: "sub-libp2p", "State mismatch in libp2p: Expected alive incoming. Got {:?}.", - peer); + peer + ); + + self.incoming.remove(pos); debug_assert!(false); }, } } - /// Function that is called when the peerset wants us to reject an incoming peer. + /// Function that is called when `ProtocolController` wants us to reject an incoming peer. fn peerset_report_reject(&mut self, index: IncomingIndex) { + let _ = self.report_reject(index); + } + + /// Function that is called when the protocol wants us to reject an incoming peer. + fn protocol_report_reject(&mut self, index: IncomingIndex) { + if let Some((set_id, peer_id)) = self.report_reject(index) { + self.protocol_controller_handles[usize::from(set_id)].dropped(peer_id) + } + } + + /// Function that is called when the peerset wants us to reject an incoming peer. + fn report_reject(&mut self, index: IncomingIndex) -> Option<(SetId, PeerId)> { let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { self.incoming.remove(pos) } else { error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index); - return + return None }; if !incoming.alive { - trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Obsolete incoming, \ - ignoring", index, incoming.peer_id, incoming.set_id); - return + trace!( + target: "sub-libp2p", + "PSM => Reject({:?}, {}, {:?}): Obsolete incoming, ignoring", + index, + incoming.peer_id, + incoming.set_id, + ); + + return None } let state = match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) { Some(s) => s, None => { - debug_assert!(false); - return + log::debug!( + target: "sub-libp2p", + "Connection to {:?} closed, ({:?} {:?}), ignoring accept", + incoming.peer_id, + incoming.set_id, + index, + ); + return None }, }; match mem::replace(state, PeerState::Poisoned) { // Incoming => Disabled - PeerState::Incoming { mut connections, backoff_until, incoming_index } => { + PeerState::Incoming { mut connections, backoff_until, incoming_index, .. } => { if index < incoming_index { warn!( target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Ignoring obsolete incoming index, we are already awaiting {:?}.", index, incoming.peer_id, incoming.set_id, incoming_index ); - return + return None } else if index > incoming_index { error!( target: "sub-libp2p", @@ -952,7 +1151,7 @@ impl Notifications { index, incoming.peer_id, incoming.set_id, incoming_index ); debug_assert!(false); - return + return None } trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Rejecting connections.", @@ -976,10 +1175,20 @@ impl Notifications { } *state = PeerState::Disabled { connections, backoff_until }; + Some((incoming.set_id, incoming.peer_id)) + }, + // connection to peer may have been closed already + st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => { + *state = st; + None + }, + peer => { + error!( + target: LOG_TARGET, + "State mismatch in libp2p: Expected alive incoming. Got {peer:?}.", + ); + None }, - peer => error!(target: "sub-libp2p", - "State mismatch in libp2p: Expected alive incoming. Got {:?}.", - peer), } } } @@ -1021,6 +1230,7 @@ impl NetworkBehaviour for Notifications { send_back_addr: remote_addr.clone(), }, self.notif_protocols.clone(), + self.metrics.clone(), )) } @@ -1035,6 +1245,7 @@ impl NetworkBehaviour for Notifications { peer, ConnectedPoint::Dialer { address: addr.clone(), role_override }, self.notif_protocols.clone(), + self.metrics.clone(), )) } @@ -1195,7 +1406,12 @@ impl NetworkBehaviour for Notifications { }, // Incoming => Incoming | Disabled | Backoff | Ø - PeerState::Incoming { mut connections, backoff_until, incoming_index } => { + PeerState::Incoming { + mut connections, + backoff_until, + incoming_index, + peerset_rejected, + } => { trace!( target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}, {:?}): OpenDesiredByRemote.", @@ -1274,6 +1490,7 @@ impl NetworkBehaviour for Notifications { connections, backoff_until, incoming_index, + peerset_rejected, }; } }, @@ -1313,7 +1530,7 @@ impl NetworkBehaviour for Notifications { let event = NotificationsOut::CustomProtocolReplaced { peer_id, set_id, - notifications_sink: replacement_sink, + notifications_sink: replacement_sink.clone(), }; self.events.push_back(ToSwarm::GenerateEvent(event)); } @@ -1474,7 +1691,7 @@ impl NetworkBehaviour for Notifications { event: THandlerOutEvent, ) { match event { - NotifsHandlerOut::OpenDesiredByRemote { protocol_index } => { + NotifsHandlerOut::OpenDesiredByRemote { protocol_index, handshake } => { let set_id = SetId::from(protocol_index); trace!(target: "sub-libp2p", @@ -1495,7 +1712,12 @@ impl NetworkBehaviour for Notifications { match mem::replace(entry.get_mut(), PeerState::Poisoned) { // Incoming => Incoming - PeerState::Incoming { mut connections, backoff_until, incoming_index } => { + PeerState::Incoming { + mut connections, + backoff_until, + incoming_index, + peerset_rejected, + } => { debug_assert!(connections .iter() .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); @@ -1523,8 +1745,12 @@ impl NetworkBehaviour for Notifications { debug_assert!(false); } - *entry.into_mut() = - PeerState::Incoming { connections, backoff_until, incoming_index }; + *entry.into_mut() = PeerState::Incoming { + connections, + backoff_until, + incoming_index, + peerset_rejected, + }; }, PeerState::Enabled { mut connections } => { @@ -1588,11 +1814,13 @@ impl NetworkBehaviour for Notifications { set_id, alive: true, incoming_id, + handshake, }); *entry.into_mut() = PeerState::Incoming { connections, backoff_until, + peerset_rejected: false, incoming_index: incoming_id, }; } else { @@ -1725,7 +1953,7 @@ impl NetworkBehaviour for Notifications { let event = NotificationsOut::CustomProtocolReplaced { peer_id, set_id, - notifications_sink: replacement_sink, + notifications_sink: replacement_sink.clone(), }; self.events.push_back(ToSwarm::GenerateEvent(event)); } @@ -1830,8 +2058,13 @@ impl NetworkBehaviour for Notifications { peer_id, set_id, inbound, - negotiated_fallback, - received_handshake, + direction: if inbound { + Direction::Inbound + } else { + Direction::Outbound + }, + received_handshake: received_handshake.clone(), + negotiated_fallback: negotiated_fallback.clone(), notifications_sink: notifications_sink.clone(), }; self.events.push_back(ToSwarm::GenerateEvent(event)); @@ -1979,8 +2212,11 @@ impl NetworkBehaviour for Notifications { peer_id, set_id, ); - let event = NotificationsOut::Notification { peer_id, set_id, message }; - + let event = NotificationsOut::Notification { + peer_id, + set_id, + message: message.clone(), + }; self.events.push_back(ToSwarm::GenerateEvent(event)); } else { trace!( @@ -2009,10 +2245,10 @@ impl NetworkBehaviour for Notifications { loop { match futures::Stream::poll_next(Pin::new(&mut self.from_protocol_controllers), cx) { Poll::Ready(Some(Message::Accept(index))) => { - self.peerset_report_accept(index); + self.peerset_report_preaccept(index); }, Poll::Ready(Some(Message::Reject(index))) => { - self.peerset_report_reject(index); + let _ = self.peerset_report_reject(index); }, Poll::Ready(Some(Message::Connect { peer_id, set_id, .. })) => { self.peerset_report_connect(peer_id, set_id); @@ -2031,6 +2267,43 @@ impl NetworkBehaviour for Notifications { } } + // poll commands from protocols + loop { + match futures::Stream::poll_next(Pin::new(&mut self.command_streams), cx) { + Poll::Ready(Some((set_id, command))) => match command { + NotificationCommand::SetHandshake(handshake) => { + self.set_notif_protocol_handshake(set_id.into(), handshake); + }, + NotificationCommand::OpenSubstream(_peer) | + NotificationCommand::CloseSubstream(_peer) => { + todo!("substream control not implemented"); + }, + }, + Poll::Ready(None) => { + error!(target: LOG_TARGET, "Protocol command streams have been shut down"); + break + }, + Poll::Pending => break, + } + } + + while let Poll::Ready(Some((result, index))) = + self.pending_inbound_validations.poll_next_unpin(cx) + { + match result { + Ok(ValidationResult::Accept) => { + self.protocol_report_accept(index); + }, + Ok(ValidationResult::Reject) => { + self.protocol_report_reject(index); + }, + Err(_) => { + error!(target: LOG_TARGET, "Protocol has shut down"); + break + }, + } + } + while let Poll::Ready(Some((delay_id, peer_id, set_id))) = Pin::new(&mut self.delays).poll_next(cx) { @@ -2153,7 +2426,10 @@ mod tests { } } - fn development_notifs() -> (Notifications, ProtocolController) { + fn development_notifs( + ) -> (Notifications, ProtocolController, Box) { + let (protocol_handle_pair, notif_service) = + crate::protocol::notifications::service::notification_service("/proto/1".into()); let (to_notifications, from_controller) = tracing_unbounded("test_controller_to_notifications", 10_000); @@ -2169,24 +2445,31 @@ mod tests { Box::new(MockPeerStore {}), ); + let (notif_handle, command_stream) = protocol_handle_pair.split(); ( Notifications::new( vec![handle], from_controller, - iter::once(ProtocolConfig { - name: "/foo".into(), - fallback_names: Vec::new(), - handshake: vec![1, 2, 3, 4], - max_notification_size: u64::MAX, - }), + None, + iter::once(( + ProtocolConfig { + name: "/foo".into(), + fallback_names: Vec::new(), + handshake: vec![1, 2, 3, 4], + max_notification_size: u64::MAX, + }, + notif_handle, + command_stream, + )), ), controller, + notif_service, ) } #[test] fn update_handshake() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let inner = notif.notif_protocols.get_mut(0).unwrap().handshake.read().clone(); assert_eq!(inner, vec![1, 2, 3, 4]); @@ -2201,14 +2484,14 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn update_unknown_handshake() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); notif.set_notif_protocol_handshake(1337.into(), vec![5, 6, 7, 8]); } #[test] fn disconnect_backoff_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); notif.peers.insert( @@ -2225,7 +2508,7 @@ mod tests { #[test] fn disconnect_pending_request() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); notif.peers.insert( @@ -2242,7 +2525,7 @@ mod tests { #[test] fn disconnect_requested_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); notif.peers.insert((peer, 0.into()), PeerState::Requested); @@ -2253,7 +2536,7 @@ mod tests { #[test] fn disconnect_disabled_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); notif.peers.insert( (peer, 0.into()), @@ -2269,7 +2552,7 @@ mod tests { #[test] fn remote_opens_connection_and_substream() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -2299,7 +2582,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); if let Some(&PeerState::Incoming { ref connections, backoff_until: None, .. }) = @@ -2319,7 +2605,7 @@ mod tests { #[tokio::test] async fn disconnect_remote_substream_before_handled_by_controller() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -2339,7 +2625,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); notif.disconnect_peer(&peer, 0.into()); @@ -2355,7 +2644,7 @@ mod tests { #[test] fn peerset_report_connect_backoff() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -2393,7 +2682,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -2420,7 +2709,7 @@ mod tests { #[test] fn peerset_connect_incoming() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -2444,19 +2733,22 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); // attempt to connect to the peer and verify that the peer state is `Enabled`; // we rely on implementation detail that incoming indices are counted from 0 // to not mock the `Peerset` - notif.peerset_report_accept(IncomingIndex(0)); + notif.protocol_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); } #[test] fn peerset_disconnect_disable_pending_enable() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -2503,7 +2795,7 @@ mod tests { #[test] fn peerset_disconnect_enabled() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -2525,11 +2817,14 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); // we rely on the implementation detail that incoming indices are counted from 0 // to not mock the `Peerset` - notif.peerset_report_accept(IncomingIndex(0)); + notif.protocol_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); // disconnect peer and verify that the state is `Disabled` @@ -2539,7 +2834,7 @@ mod tests { #[test] fn peerset_disconnect_requested() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); @@ -2554,7 +2849,7 @@ mod tests { #[test] fn peerset_disconnect_pending_request() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -2587,7 +2882,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -2607,7 +2902,7 @@ mod tests { #[test] fn peerset_accept_peer_not_alive() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -2631,7 +2926,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -2647,14 +2945,14 @@ mod tests { IncomingPeer { alive: false, incoming_id: IncomingIndex(0), .. }, )); - notif.peerset_report_accept(IncomingIndex(0)); + notif.protocol_report_accept(IncomingIndex(0)); assert_eq!(notif.incoming.len(), 0); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(PeerState::Disabled { .. }))); } #[test] fn secondary_connection_peer_state_incoming() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); @@ -2678,7 +2976,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); if let Some(PeerState::Incoming { connections, .. }) = notif.peers.get(&(peer, set_id)) { assert_eq!(connections.len(), 1); @@ -2709,7 +3010,7 @@ mod tests { #[test] fn close_connection_for_disabled_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -2734,7 +3035,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -2743,7 +3044,7 @@ mod tests { #[test] fn close_connection_for_incoming_peer_one_connection() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -2766,7 +3067,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -2775,7 +3079,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -2788,7 +3092,7 @@ mod tests { #[test] fn close_connection_for_incoming_peer_two_connections() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let conn1 = ConnectionId::new_unchecked(1); @@ -2815,7 +3119,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -2842,7 +3149,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -2857,7 +3164,7 @@ mod tests { #[test] fn connection_and_substream_open() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -2882,13 +3189,16 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); // We rely on the implementation detail that incoming indices are counted // from 0 to not mock the `Peerset`. - notif.peerset_report_accept(IncomingIndex(0)); + notif.protocol_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); // open new substream @@ -2911,7 +3221,7 @@ mod tests { #[test] fn connection_closed_sink_replaced() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); @@ -2947,7 +3257,10 @@ mod tests { notif.on_connection_handler_event( peer, conn2, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); if let Some(PeerState::Enabled { connections, .. }) = notif.peers.get(&(peer, set_id)) { @@ -2984,7 +3297,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3005,7 +3318,7 @@ mod tests { #[test] fn dial_failure_for_requested_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); @@ -3028,7 +3341,7 @@ mod tests { #[tokio::test] async fn write_notification() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -3077,7 +3390,7 @@ mod tests { #[test] fn peerset_report_connect_backoff_expired() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -3110,7 +3423,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3125,7 +3438,7 @@ mod tests { #[test] fn peerset_report_disconnect_disabled() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); @@ -3151,7 +3464,7 @@ mod tests { #[test] fn peerset_report_disconnect_backoff() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -3184,7 +3497,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3197,7 +3510,7 @@ mod tests { #[test] fn peer_is_backed_off_if_both_connections_get_closed_while_peer_is_disabled_with_back_off() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); @@ -3247,7 +3560,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected.clone(), vec![]), + handler: NotifsHandler::new(peer, connected.clone(), vec![], None), remaining_established: 0usize, }, )); @@ -3261,7 +3574,7 @@ mod tests { peer_id: peer, connection_id: conn2, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3270,7 +3583,7 @@ mod tests { #[test] fn inject_connection_closed_incoming_with_backoff() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); @@ -3294,7 +3607,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); // manually add backoff for the entry @@ -3312,7 +3628,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3321,7 +3637,7 @@ mod tests { #[test] fn two_connections_inactive_connection_gets_closed_peer_state_is_still_incoming() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); @@ -3355,7 +3671,10 @@ mod tests { notif.on_connection_handler_event( peer, conn1, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!( notif.peers.get_mut(&(peer, 0.into())), @@ -3367,7 +3686,7 @@ mod tests { peer_id: peer, connection_id: conn2, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3376,7 +3695,7 @@ mod tests { #[test] fn two_connections_active_connection_gets_closed_peer_state_is_disabled() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); @@ -3413,7 +3732,10 @@ mod tests { notif.on_connection_handler_event( peer, conn1, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!( notif.peers.get_mut(&(peer, 0.into())), @@ -3425,7 +3747,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3434,7 +3756,7 @@ mod tests { #[test] fn inject_connection_closed_for_active_connection() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); @@ -3494,7 +3816,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3502,7 +3824,7 @@ mod tests { #[test] fn inject_dial_failure_for_pending_request() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -3535,7 +3857,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3565,7 +3887,7 @@ mod tests { #[test] fn peerstate_incoming_open_desired_by_remote() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); let conn1 = ConnectionId::new_unchecked(0); @@ -3599,7 +3921,10 @@ mod tests { notif.on_connection_handler_event( peer, conn1, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -3607,7 +3932,10 @@ mod tests { notif.on_connection_handler_event( peer, conn2, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); if let Some(PeerState::Incoming { ref connections, .. }) = notif.peers.get(&(peer, set_id)) @@ -3619,7 +3947,7 @@ mod tests { #[tokio::test] async fn remove_backoff_peer_after_timeout() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); @@ -3652,7 +3980,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3697,7 +4025,7 @@ mod tests { #[tokio::test] async fn reschedule_disabled_pending_enable_when_connection_not_closed() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -3726,13 +4054,16 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); // we rely on the implementation detail that incoming indices are counted from 0 // to not mock the `Peerset` - notif.peerset_report_accept(IncomingIndex(0)); + notif.protocol_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); let event = conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]); @@ -3815,7 +4146,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn peerset_report_connect_with_enabled_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -3840,7 +4171,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -3865,7 +4199,7 @@ mod tests { #[test] #[cfg(debug_assertions)] fn peerset_report_connect_with_disabled_pending_enable_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -3911,7 +4245,7 @@ mod tests { #[test] #[cfg(debug_assertions)] fn peerset_report_connect_with_requested_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); @@ -3927,7 +4261,7 @@ mod tests { #[test] #[cfg(debug_assertions)] fn peerset_report_connect_with_pending_requested() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -3960,7 +4294,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3984,7 +4318,7 @@ mod tests { #[test] #[cfg(debug_assertions)] fn peerset_report_connect_with_incoming_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); @@ -4008,7 +4342,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -4019,7 +4356,7 @@ mod tests { #[test] #[cfg(debug_assertions)] fn peerset_report_disconnect_with_incoming_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); @@ -4043,7 +4380,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -4052,13 +4392,68 @@ mod tests { } #[test] - #[should_panic] #[cfg(debug_assertions)] - fn peerset_report_accept_incoming_peer() { - let (mut notif, _controller) = development_notifs(); + fn peerset_report_disconnect_with_incoming_peer_protocol_accepts() { + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); + let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); + let connected = ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: Multiaddr::empty(), + }; + + notif.on_swarm_event(FromSwarm::ConnectionEstablished( + libp2p::swarm::behaviour::ConnectionEstablished { + peer_id: peer, + connection_id: conn, + endpoint: &connected, + failed_addresses: &[], + other_established: 0usize, + }, + )); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); + + // remote opens a substream, verify that peer state is updated to `Incoming` + notif.on_connection_handler_event( + peer, + conn, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, + ); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); + + // `Peerset` wants to disconnect the peer but since it's still under validation, + // it won't be disabled automatically + notif.peerset_report_disconnect(peer, set_id); + + let incoming_index = match notif.peers.get(&(peer, set_id)) { + Some(&PeerState::Incoming { peerset_rejected, incoming_index, .. }) => { + assert!(peerset_rejected); + incoming_index + }, + state => panic!("invalid state: {state:?}"), + }; + + // protocol accepted peer but since `Peerset` wanted to disconnect it, the peer will be + // disabled + notif.protocol_report_accept(incoming_index); + + match notif.peers.get(&(peer, set_id)) { + Some(&PeerState::Disabled { .. }) => {}, + state => panic!("invalid state: {state:?}"), + }; + } + + #[test] + #[cfg(debug_assertions)] + fn peer_disconnected_protocol_accepts() { + let (mut notif, _controller, _notif_service) = development_notifs(); + let peer = PeerId::random(); let set_id = SetId::from(0); + let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -4079,24 +4474,188 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); - assert!(std::matches!( - notif.incoming[0], - IncomingPeer { alive: true, incoming_id: IncomingIndex(0), .. }, + assert!(notif.incoming.iter().any(|entry| entry.incoming_id == IncomingIndex(0))); + notif.disconnect_peer(&peer, set_id); + + // since the connection was closed, nothing happens for the peer state because + // there is nothing actionable + notif.protocol_report_accept(IncomingIndex(0)); + + match notif.peers.get(&(peer, set_id)) { + Some(&PeerState::Disabled { .. }) => {}, + state => panic!("invalid state: {state:?}"), + }; + + assert!(!notif.incoming.iter().any(|entry| entry.incoming_id == IncomingIndex(0))); + } + + #[test] + #[cfg(debug_assertions)] + fn connection_closed_protocol_accepts() { + let (mut notif, _controller, _notif_service) = development_notifs(); + let peer = PeerId::random(); + let set_id = SetId::from(0); + let conn = ConnectionId::new_unchecked(0); + let connected = ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: Multiaddr::empty(), + }; + + notif.on_swarm_event(FromSwarm::ConnectionEstablished( + libp2p::swarm::behaviour::ConnectionEstablished { + peer_id: peer, + connection_id: conn, + endpoint: &connected, + failed_addresses: &[], + other_established: 0usize, + }, + )); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); + + // remote opens a substream, verify that peer state is updated to `Incoming` + notif.on_connection_handler_event( + peer, + conn, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, + ); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); + + notif.on_swarm_event(FromSwarm::ConnectionClosed( + libp2p::swarm::behaviour::ConnectionClosed { + peer_id: peer, + connection_id: ConnectionId::new_unchecked(0), + endpoint: &connected.clone(), + handler: NotifsHandler::new(peer, connected, vec![], None), + remaining_established: 0usize, + }, )); - notif.peers.remove(&(peer, set_id)); - notif.peerset_report_accept(IncomingIndex(0)); + // connection closed, nothing to do + notif.protocol_report_accept(IncomingIndex(0)); + + match notif.peers.get(&(peer, set_id)) { + None => {}, + state => panic!("invalid state: {state:?}"), + }; + } + + #[test] + #[cfg(debug_assertions)] + fn peer_disconnected_protocol_reject() { + let (mut notif, _controller, _notif_service) = development_notifs(); + let peer = PeerId::random(); + let set_id = SetId::from(0); + let conn = ConnectionId::new_unchecked(0); + let connected = ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: Multiaddr::empty(), + }; + + notif.on_swarm_event(FromSwarm::ConnectionEstablished( + libp2p::swarm::behaviour::ConnectionEstablished { + peer_id: peer, + connection_id: conn, + endpoint: &connected, + failed_addresses: &[], + other_established: 0usize, + }, + )); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); + + // remote opens a substream, verify that peer state is updated to `Incoming` + notif.on_connection_handler_event( + peer, + conn, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, + ); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); + + assert!(notif.incoming.iter().any(|entry| entry.incoming_id == IncomingIndex(0))); + notif.disconnect_peer(&peer, set_id); + + // since the connection was closed, nothing happens for the peer state because + // there is nothing actionable + notif.protocol_report_reject(IncomingIndex(0)); + + match notif.peers.get(&(peer, set_id)) { + Some(&PeerState::Disabled { .. }) => {}, + state => panic!("invalid state: {state:?}"), + }; + + assert!(!notif.incoming.iter().any(|entry| entry.incoming_id == IncomingIndex(0))); + } + + #[test] + #[cfg(debug_assertions)] + fn connection_closed_protocol_rejects() { + let (mut notif, _controller, _notif_service) = development_notifs(); + let peer = PeerId::random(); + let set_id = SetId::from(0); + let conn = ConnectionId::new_unchecked(0); + let connected = ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: Multiaddr::empty(), + }; + + notif.on_swarm_event(FromSwarm::ConnectionEstablished( + libp2p::swarm::behaviour::ConnectionEstablished { + peer_id: peer, + connection_id: conn, + endpoint: &connected, + failed_addresses: &[], + other_established: 0usize, + }, + )); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); + + // remote opens a substream, verify that peer state is updated to `Incoming` + notif.on_connection_handler_event( + peer, + conn, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, + ); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); + + notif.on_swarm_event(FromSwarm::ConnectionClosed( + libp2p::swarm::behaviour::ConnectionClosed { + peer_id: peer, + connection_id: ConnectionId::new_unchecked(0), + endpoint: &connected.clone(), + handler: NotifsHandler::new(peer, connected, vec![], None), + remaining_established: 0usize, + }, + )); + + // connection closed, nothing to do + notif.protocol_report_reject(IncomingIndex(0)); + + match notif.peers.get(&(peer, set_id)) { + None => {}, + state => panic!("invalid state: {state:?}"), + }; } #[test] #[should_panic] #[cfg(debug_assertions)] - fn peerset_report_accept_not_incoming_peer() { - let (mut notif, _controller) = development_notifs(); + fn protocol_report_accept_not_incoming_peer() { + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -4121,7 +4680,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -4138,14 +4700,14 @@ mod tests { assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); notif.incoming[0].alive = true; - notif.peerset_report_accept(IncomingIndex(0)); + notif.protocol_report_accept(IncomingIndex(0)); } #[test] #[should_panic] #[cfg(debug_assertions)] fn inject_connection_closed_non_existent_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let endpoint = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), @@ -4157,7 +4719,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &endpoint.clone(), - handler: NotifsHandler::new(peer, endpoint, vec![]), + handler: NotifsHandler::new(peer, endpoint, vec![], None), remaining_established: 0usize, }, )); @@ -4165,7 +4727,7 @@ mod tests { #[test] fn disconnect_non_existent_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); @@ -4177,9 +4739,9 @@ mod tests { #[test] fn accept_non_existent_connection() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); - notif.peerset_report_accept(0.into()); + notif.protocol_report_accept(0.into()); assert!(notif.peers.is_empty()); assert!(notif.incoming.is_empty()); @@ -4187,9 +4749,9 @@ mod tests { #[test] fn reject_non_existent_connection() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); - notif.peerset_report_reject(0.into()); + notif.protocol_report_reject(0.into()); assert!(notif.peers.is_empty()); assert!(notif.incoming.is_empty()); @@ -4197,7 +4759,7 @@ mod tests { #[test] fn reject_non_active_connection() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -4221,61 +4783,24 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); notif.incoming[0].alive = false; - notif.peerset_report_reject(0.into()); - - assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); - } - - #[test] - #[should_panic] - #[cfg(debug_assertions)] - fn reject_non_existent_peer_but_alive_connection() { - let (mut notif, _controller) = development_notifs(); - let peer = PeerId::random(); - let conn = ConnectionId::new_unchecked(0); - let set_id = SetId::from(0); - let connected = ConnectedPoint::Listener { - local_addr: Multiaddr::empty(), - send_back_addr: Multiaddr::empty(), - }; - - notif.on_swarm_event(FromSwarm::ConnectionEstablished( - libp2p::swarm::behaviour::ConnectionEstablished { - peer_id: peer, - connection_id: conn, - endpoint: &connected, - failed_addresses: &[], - other_established: 0usize, - }, - )); - assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); + notif.protocol_report_reject(0.into()); - // remote opens a substream, verify that peer state is updated to `Incoming` - notif.on_connection_handler_event( - peer, - conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, - ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); - assert!(std::matches!( - notif.incoming[0], - IncomingPeer { alive: true, incoming_id: IncomingIndex(0), .. }, - )); - - notif.peers.remove(&(peer, set_id)); - notif.peerset_report_reject(0.into()); } #[test] #[should_panic] #[cfg(debug_assertions)] fn inject_non_existent_connection_closed_for_incoming_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -4299,7 +4824,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -4308,7 +4836,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -4318,7 +4846,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_non_existent_connection_closed_for_disabled_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -4343,7 +4871,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -4353,7 +4881,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_non_existent_connection_closed_for_disabled_pending_enable() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -4394,7 +4922,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -4404,7 +4932,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_connection_closed_for_incoming_peer_state_mismatch() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -4428,7 +4956,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); notif.incoming[0].alive = false; @@ -4438,7 +4969,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -4448,7 +4979,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_connection_closed_for_enabled_state_mismatch() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -4472,7 +5003,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -4485,7 +5019,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -4495,7 +5029,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_connection_closed_for_backoff_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -4528,7 +5062,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected.clone(), vec![]), + handler: NotifsHandler::new(peer, connected.clone(), vec![], None), remaining_established: 0usize, }, )); @@ -4539,7 +5073,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -4549,7 +5083,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn open_result_ok_non_existent_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs index cffdec7d71ee4e11d3167e506e8ae71f70039e73..28662be29feede2ef67f150d6fa1b67ff313ae36 100644 --- a/substrate/client/network/src/protocol/notifications/handler.rs +++ b/substrate/client/network/src/protocol/notifications/handler.rs @@ -58,9 +58,12 @@ //! [`NotifsHandlerIn::Open`] has gotten an answer. use crate::{ - protocol::notifications::upgrade::{ - NotificationsIn, NotificationsInSubstream, NotificationsOut, NotificationsOutSubstream, - UpgradeCollec, + protocol::notifications::{ + service::metrics, + upgrade::{ + NotificationsIn, NotificationsInSubstream, NotificationsOut, NotificationsOutSubstream, + UpgradeCollec, + }, }, types::ProtocolName, }; @@ -92,7 +95,7 @@ use std::{ /// Number of pending notifications in asynchronous contexts. /// See [`NotificationsSink::reserve_notification`] for context. -const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; +pub(crate) const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; /// Number of pending notifications in synchronous contexts. const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; @@ -126,11 +129,19 @@ pub struct NotifsHandler { events_queue: VecDeque< ConnectionHandlerEvent, >, + + /// Metrics. + metrics: Option>, } impl NotifsHandler { /// Creates new [`NotifsHandler`]. - pub fn new(peer_id: PeerId, endpoint: ConnectedPoint, protocols: Vec) -> Self { + pub fn new( + peer_id: PeerId, + endpoint: ConnectedPoint, + protocols: Vec, + metrics: Option, + ) -> Self { Self { protocols: protocols .into_iter() @@ -148,6 +159,7 @@ impl NotifsHandler { endpoint, when_connection_open: Instant::now(), events_queue: VecDeque::with_capacity(16), + metrics: metrics.map_or(None, |metrics| Some(Arc::new(metrics))), } } } @@ -303,6 +315,8 @@ pub enum NotifsHandlerOut { OpenDesiredByRemote { /// Index of the protocol in the list of protocols passed at initialization. protocol_index: usize, + /// Received handshake. + handshake: Vec, }, /// The remote would like the substreams to be closed. Send a [`NotifsHandlerIn::Close`] in @@ -331,6 +345,36 @@ pub enum NotifsHandlerOut { #[derive(Debug, Clone)] pub struct NotificationsSink { inner: Arc, + metrics: Option>, +} + +impl NotificationsSink { + /// Create new [`NotificationsSink`]. + /// NOTE: only used for testing but must be `pub` as other crates in `client/network` use this. + pub fn new( + peer_id: PeerId, + ) -> (Self, mpsc::Receiver, mpsc::Receiver) + { + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + ( + NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + peer_id, + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(Some(sync_tx)), + }), + metrics: None, + }, + async_rx, + sync_rx, + ) + } + + /// Get reference to metrics. + pub fn metrics(&self) -> &Option> { + &self.metrics + } } #[derive(Debug)] @@ -350,8 +394,8 @@ struct NotificationsSinkInner { /// Message emitted through the [`NotificationsSink`] and processed by the background task /// dedicated to the peer. -#[derive(Debug)] -enum NotificationsSinkMessage { +#[derive(Debug, PartialEq, Eq)] +pub enum NotificationsSinkMessage { /// Message emitted by [`NotificationsSink::reserve_notification`] and /// [`NotificationsSink::write_notification_now`]. Notification { message: Vec }, @@ -379,8 +423,8 @@ impl NotificationsSink { let mut lock = self.inner.sync_channel.lock(); if let Some(tx) = lock.as_mut() { - let result = - tx.try_send(NotificationsSinkMessage::Notification { message: message.into() }); + let message = message.into(); + let result = tx.try_send(NotificationsSinkMessage::Notification { message }); if result.is_err() { // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the @@ -476,7 +520,10 @@ impl ConnectionHandler for NotifsHandler { match protocol_info.state { State::Closed { pending_opening } => { self.events_queue.push_back(ConnectionHandlerEvent::Custom( - NotifsHandlerOut::OpenDesiredByRemote { protocol_index }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index, + handshake: in_substream_open.handshake, + }, )); protocol_info.state = State::OpenDesiredByRemote { @@ -531,6 +578,7 @@ impl ConnectionHandler for NotifsHandler { async_channel: FuturesMutex::new(async_tx), sync_channel: Mutex::new(Some(sync_tx)), }), + metrics: self.metrics.clone(), }; self.protocols[protocol_index].state = State::Open { @@ -881,6 +929,7 @@ pub mod tests { async_channel: FuturesMutex::new(async_tx), sync_channel: Mutex::new(Some(sync_tx)), }), + metrics: None, }; let (in_substream, out_substream) = MockSubstream::new(); @@ -1040,6 +1089,7 @@ pub mod tests { }, peer_id: PeerId::random(), events_queue: VecDeque::new(), + metrics: None, } } @@ -1545,6 +1595,7 @@ pub mod tests { async_channel: FuturesMutex::new(async_tx), sync_channel: Mutex::new(Some(sync_tx)), }), + metrics: None, }; handler.protocols[0].state = State::Open { @@ -1597,7 +1648,7 @@ pub mod tests { assert!(std::matches!( handler.poll(cx), Poll::Ready(ConnectionHandlerEvent::Custom( - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0, .. }, )) )); assert!(std::matches!( diff --git a/substrate/client/network/src/protocol/notifications/service/metrics.rs b/substrate/client/network/src/protocol/notifications/service/metrics.rs new file mode 100644 index 0000000000000000000000000000000000000000..2a57d57c17576fde27be047d747c94fdd19e64e5 --- /dev/null +++ b/substrate/client/network/src/protocol/notifications/service/metrics.rs @@ -0,0 +1,130 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::types::ProtocolName; + +use prometheus_endpoint::{ + self as prometheus, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, + U64, +}; + +use std::sync::Arc; + +/// Notification metrics. +#[derive(Debug, Clone)] +pub struct Metrics { + // Total number of opened substreams. + pub notifications_streams_opened_total: CounterVec, + + /// Total number of closed substreams. + pub notifications_streams_closed_total: CounterVec, + + /// In/outbound notification sizes. + pub notifications_sizes: HistogramVec, +} + +impl Metrics { + fn register(registry: &Registry) -> Result { + Ok(Self { + notifications_sizes: prometheus::register( + HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "substrate_sub_libp2p_notifications_sizes", + "Sizes of the notifications send to and received from all nodes", + ), + buckets: prometheus::exponential_buckets(64.0, 4.0, 8) + .expect("parameters are always valid values; qed"), + }, + &["direction", "protocol"], + )?, + registry, + )?, + notifications_streams_closed_total: prometheus::register( + CounterVec::new( + Opts::new( + "substrate_sub_libp2p_notifications_streams_closed_total", + "Total number of notification substreams that have been closed", + ), + &["protocol"], + )?, + registry, + )?, + notifications_streams_opened_total: prometheus::register( + CounterVec::new( + Opts::new( + "substrate_sub_libp2p_notifications_streams_opened_total", + "Total number of notification substreams that have been opened", + ), + &["protocol"], + )?, + registry, + )?, + }) + } +} + +/// Register metrics. +pub fn register(registry: &Registry) -> Result { + Metrics::register(registry) +} + +/// Register opened substream to Prometheus. +pub fn register_substream_opened(metrics: &Option, protocol: &ProtocolName) { + if let Some(metrics) = metrics { + metrics.notifications_streams_opened_total.with_label_values(&[&protocol]).inc(); + } +} + +/// Register closed substream to Prometheus. +pub fn register_substream_closed(metrics: &Option, protocol: &ProtocolName) { + if let Some(metrics) = metrics { + metrics + .notifications_streams_closed_total + .with_label_values(&[&protocol[..]]) + .inc(); + } +} + +/// Register sent notification to Prometheus. +pub fn register_notification_sent( + metrics: &Option>, + protocol: &ProtocolName, + size: usize, +) { + if let Some(metrics) = metrics { + metrics + .notifications_sizes + .with_label_values(&["out", protocol]) + .observe(size as f64); + } +} + +/// Register received notification to Prometheus. +pub fn register_notification_received( + metrics: &Option, + protocol: &ProtocolName, + size: usize, +) { + if let Some(metrics) = metrics { + metrics + .notifications_sizes + .with_label_values(&["in", protocol]) + .observe(size as f64); + } +} diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..62e6d88a3d5a6ae36b46728a37dc3ff5c31177db --- /dev/null +++ b/substrate/client/network/src/protocol/notifications/service/mod.rs @@ -0,0 +1,634 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Notification service implementation. + +use crate::{ + error, + protocol::notifications::handler::NotificationsSink, + service::traits::{ + Direction, MessageSink, NotificationEvent, NotificationService, ValidationResult, + }, + types::ProtocolName, +}; + +use futures::{ + stream::{FuturesUnordered, Stream}, + StreamExt, +}; +use libp2p::PeerId; +use parking_lot::Mutex; +use tokio::sync::{mpsc, oneshot}; +use tokio_stream::wrappers::ReceiverStream; + +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; + +use std::{collections::HashMap, fmt::Debug, sync::Arc}; + +pub(crate) mod metrics; + +#[cfg(test)] +mod tests; + +/// Logging target for the file. +const LOG_TARGET: &str = "sub-libp2p"; + +/// Default command queue size. +const COMMAND_QUEUE_SIZE: usize = 64; + +/// Type representing subscribers of a notification protocol. +type Subscribers = Arc>>>; + +/// Type represending a distributable message sink. +/// Detached message sink must carry the protocol name for registering metrics. +/// +/// See documentation for [`PeerContext`] for more details. +type NotificationSink = Arc>; + +#[async_trait::async_trait] +impl MessageSink for NotificationSink { + /// Send synchronous `notification` to the peer associated with this [`MessageSink`]. + fn send_sync_notification(&self, notification: Vec) { + let sink = self.lock(); + + metrics::register_notification_sent(&sink.0.metrics(), &sink.1, notification.len()); + sink.0.send_sync_notification(notification); + } + + /// Send an asynchronous `notification` to the peer associated with this [`MessageSink`], + /// allowing sender to exercise backpressure. + /// + /// Returns an error if the peer does not exist. + async fn send_async_notification(&self, notification: Vec) -> Result<(), error::Error> { + // notification sink must be cloned because the lock cannot be held across `.await` + // this makes the implementation less efficient but not prohibitively so as the same + // method is also used by `NetworkService` when sending notifications. + let notification_len = notification.len(); + let sink = self.lock().clone(); + let permit = sink + .0 + .reserve_notification() + .await + .map_err(|_| error::Error::ConnectionClosed)?; + + permit.send(notification).map_err(|_| error::Error::ChannelClosed).map(|res| { + metrics::register_notification_sent(&sink.0.metrics(), &sink.1, notification_len); + res + }) + } +} + +/// Inner notification event to deal with `NotificationsSinks` without exposing that +/// implementation detail to [`NotificationService`] consumers. +#[derive(Debug)] +enum InnerNotificationEvent { + /// Validate inbound substream. + ValidateInboundSubstream { + /// Peer ID. + peer: PeerId, + + /// Received handshake. + handshake: Vec, + + /// `oneshot::Sender` for sending validation result back to `Notifications` + result_tx: oneshot::Sender, + }, + + /// Notification substream open to `peer`. + NotificationStreamOpened { + /// Peer ID. + peer: PeerId, + + /// Direction of the substream. + direction: Direction, + + /// Received handshake. + handshake: Vec, + + /// Negotiated fallback. + negotiated_fallback: Option, + + /// Notification sink. + sink: NotificationsSink, + }, + + /// Substream was closed. + NotificationStreamClosed { + /// Peer ID. + peer: PeerId, + }, + + /// Notification was received from the substream. + NotificationReceived { + /// Peer ID. + peer: PeerId, + + /// Received notification. + notification: Vec, + }, + + /// Notification sink has been replaced. + NotificationSinkReplaced { + /// Peer ID. + peer: PeerId, + + /// Notification sink. + sink: NotificationsSink, + }, +} + +/// Notification commands. +/// +/// Sent by the installed protocols to `Notifications` to open/close/modify substreams. +#[derive(Debug)] +pub enum NotificationCommand { + /// Instruct `Notifications` to open a substream to peer. + #[allow(unused)] + OpenSubstream(PeerId), + + /// Instruct `Notifications` to close the substream to peer. + #[allow(unused)] + CloseSubstream(PeerId), + + /// Set handshake for the notifications protocol. + SetHandshake(Vec), +} + +/// Context assigned to each peer. +/// +/// Contains `NotificationsSink` used by [`NotificationService`] to send notifications +/// and an additional, distributable `NotificationsSink` which the protocol may acquire +/// if it wishes to send notifications through `NotificationsSink` directly. +/// +/// The distributable `NoticationsSink` is wrapped in an `Arc>` to allow +/// `NotificationsService` to swap the underlying sink in case it's replaced. +#[derive(Debug, Clone)] +struct PeerContext { + /// Sink for sending notificaitons. + sink: NotificationsSink, + + /// Distributable notification sink. + shared_sink: NotificationSink, +} + +/// Handle that is passed on to the notifications protocol. +#[derive(Debug)] +pub struct NotificationHandle { + /// Protocol name. + protocol: ProtocolName, + + /// TX channel for sending commands to `Notifications`. + tx: mpsc::Sender, + + /// RX channel for receiving events from `Notifications`. + rx: TracingUnboundedReceiver, + + /// All subscribers of `NotificationEvent`s. + subscribers: Subscribers, + + /// Connected peers. + peers: HashMap, +} + +impl NotificationHandle { + /// Create new [`NotificationHandle`]. + fn new( + protocol: ProtocolName, + tx: mpsc::Sender, + rx: TracingUnboundedReceiver, + subscribers: Arc>>>, + ) -> Self { + Self { protocol, tx, rx, subscribers, peers: HashMap::new() } + } +} + +#[async_trait::async_trait] +impl NotificationService for NotificationHandle { + /// Instruct `Notifications` to open a new substream for `peer`. + async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + todo!("support for opening substreams not implemented yet"); + } + + /// Instruct `Notifications` to close substream for `peer`. + async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + todo!("support for closing substreams not implemented yet, call `NetworkService::disconnect_peer()` instead"); + } + + /// Send synchronous `notification` to `peer`. + fn send_sync_notification(&self, peer: &PeerId, notification: Vec) { + if let Some(info) = self.peers.get(&peer) { + metrics::register_notification_sent( + &info.sink.metrics(), + &self.protocol, + notification.len(), + ); + + let _ = info.sink.send_sync_notification(notification); + } + } + + /// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure. + async fn send_async_notification( + &self, + peer: &PeerId, + notification: Vec, + ) -> Result<(), error::Error> { + let notification_len = notification.len(); + let sink = &self.peers.get(&peer).ok_or_else(|| error::Error::PeerDoesntExist(*peer))?.sink; + + sink.reserve_notification() + .await + .map_err(|_| error::Error::ConnectionClosed)? + .send(notification) + .map_err(|_| error::Error::ChannelClosed) + .map(|res| { + metrics::register_notification_sent( + &sink.metrics(), + &self.protocol, + notification_len, + ); + res + }) + } + + /// Set handshake for the notification protocol replacing the old handshake. + async fn set_handshake(&mut self, handshake: Vec) -> Result<(), ()> { + log::trace!(target: LOG_TARGET, "{}: set handshake to {handshake:?}", self.protocol); + + self.tx.send(NotificationCommand::SetHandshake(handshake)).await.map_err(|_| ()) + } + + /// Non-blocking variant of `set_handshake()` that attempts to update the handshake + /// and returns an error if the channel is blocked. + /// + /// Technically the function can return an error if the channel to `Notifications` is closed + /// but that doesn't happen under normal operation. + fn try_set_handshake(&mut self, handshake: Vec) -> Result<(), ()> { + self.tx.try_send(NotificationCommand::SetHandshake(handshake)).map_err(|_| ()) + } + + /// Get next event from the `Notifications` event stream. + async fn next_event(&mut self) -> Option { + loop { + match self.rx.next().await? { + InnerNotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx } => + return Some(NotificationEvent::ValidateInboundSubstream { + peer, + handshake, + result_tx, + }), + InnerNotificationEvent::NotificationStreamOpened { + peer, + handshake, + negotiated_fallback, + direction, + sink, + } => { + self.peers.insert( + peer, + PeerContext { + sink: sink.clone(), + shared_sink: Arc::new(Mutex::new((sink, self.protocol.clone()))), + }, + ); + return Some(NotificationEvent::NotificationStreamOpened { + peer, + handshake, + direction, + negotiated_fallback, + }) + }, + InnerNotificationEvent::NotificationStreamClosed { peer } => { + self.peers.remove(&peer); + return Some(NotificationEvent::NotificationStreamClosed { peer }) + }, + InnerNotificationEvent::NotificationReceived { peer, notification } => + return Some(NotificationEvent::NotificationReceived { peer, notification }), + InnerNotificationEvent::NotificationSinkReplaced { peer, sink } => { + match self.peers.get_mut(&peer) { + None => log::error!( + "{}: notification sink replaced for {peer} but peer does not exist", + self.protocol + ), + Some(context) => { + context.sink = sink.clone(); + *context.shared_sink.lock() = (sink.clone(), self.protocol.clone()); + }, + } + }, + } + } + } + + // Clone [`NotificationService`] + fn clone(&mut self) -> Result, ()> { + let mut subscribers = self.subscribers.lock(); + let (event_tx, event_rx) = tracing_unbounded("mpsc-notification-to-protocol", 100_000); + subscribers.push(event_tx); + + Ok(Box::new(NotificationHandle { + protocol: self.protocol.clone(), + tx: self.tx.clone(), + rx: event_rx, + peers: self.peers.clone(), + subscribers: self.subscribers.clone(), + })) + } + + /// Get protocol name. + fn protocol(&self) -> &ProtocolName { + &self.protocol + } + + /// Get message sink of the peer. + fn message_sink(&self, peer: &PeerId) -> Option> { + match self.peers.get(peer) { + Some(context) => Some(Box::new(context.shared_sink.clone())), + None => None, + } + } +} + +/// Channel pair which allows `Notifications` to interact with a protocol. +#[derive(Debug)] +pub struct ProtocolHandlePair { + /// Protocol name. + protocol: ProtocolName, + + /// Subscribers of the notification protocol events. + subscribers: Subscribers, + + // Receiver for notification commands received from the protocol implementation. + rx: mpsc::Receiver, +} + +impl ProtocolHandlePair { + /// Create new [`ProtocolHandlePair`]. + fn new( + protocol: ProtocolName, + subscribers: Subscribers, + rx: mpsc::Receiver, + ) -> Self { + Self { protocol, subscribers, rx } + } + + /// Consume `self` and split [`ProtocolHandlePair`] into a handle which allows it to send events + /// to the protocol and a stream of commands received from the protocol. + pub(crate) fn split( + self, + ) -> (ProtocolHandle, Box + Send + Unpin>) { + ( + ProtocolHandle::new(self.protocol, self.subscribers), + Box::new(ReceiverStream::new(self.rx)), + ) + } +} + +/// Handle that is passed on to `Notifications` and allows it to directly communicate +/// with the protocol. +#[derive(Debug, Clone)] +pub(crate) struct ProtocolHandle { + /// Protocol name. + protocol: ProtocolName, + + /// Subscribers of the notification protocol. + subscribers: Subscribers, + + /// Number of connected peers. + num_peers: usize, + + /// Delegate validation to `Peerset`. + delegate_to_peerset: bool, + + /// Prometheus metrics. + metrics: Option, +} + +pub(crate) enum ValidationCallResult { + WaitForValidation(oneshot::Receiver), + Delegated, +} + +impl ProtocolHandle { + /// Create new [`ProtocolHandle`]. + fn new(protocol: ProtocolName, subscribers: Subscribers) -> Self { + Self { protocol, subscribers, num_peers: 0usize, metrics: None, delegate_to_peerset: false } + } + + /// Set metrics. + pub fn set_metrics(&mut self, metrics: Option) { + self.metrics = metrics; + } + + /// Delegate validation to `Peerset`. + /// + /// Protocols that do not do any validation themselves and only rely on `Peerset` handling + /// validation can disable protocol-side validation entirely by delegating all validation to + /// `Peerset`. + pub fn delegate_to_peerset(&mut self, delegate: bool) { + self.delegate_to_peerset = delegate; + } + + /// Report to the protocol that a substream has been opened and it must be validated by the + /// protocol. + /// + /// Return `oneshot::Receiver` which allows `Notifications` to poll for the validation result + /// from protocol. + pub fn report_incoming_substream( + &self, + peer: PeerId, + handshake: Vec, + ) -> Result { + let subscribers = self.subscribers.lock(); + + log::trace!( + target: LOG_TARGET, + "{}: report incoming substream for {peer}, handshake {handshake:?}", + self.protocol + ); + + if self.delegate_to_peerset { + return Ok(ValidationCallResult::Delegated) + } + + // if there is only one subscriber, `Notifications` can wait directly on the + // `oneshot::channel()`'s RX half without indirection + if subscribers.len() == 1 { + let (result_tx, rx) = oneshot::channel(); + return subscribers[0] + .unbounded_send(InnerNotificationEvent::ValidateInboundSubstream { + peer, + handshake, + result_tx, + }) + .map(|_| ValidationCallResult::WaitForValidation(rx)) + .map_err(|_| ()) + } + + // if there are multiple subscribers, create a task which waits for all of the + // validations to finish and returns the combined result to `Notifications` + let mut results: FuturesUnordered<_> = subscribers + .iter() + .filter_map(|subscriber| { + let (result_tx, rx) = oneshot::channel(); + + subscriber + .unbounded_send(InnerNotificationEvent::ValidateInboundSubstream { + peer, + handshake: handshake.clone(), + result_tx, + }) + .is_ok() + .then_some(rx) + }) + .collect(); + + let (tx, rx) = oneshot::channel(); + tokio::spawn(async move { + while let Some(event) = results.next().await { + match event { + Err(_) | Ok(ValidationResult::Reject) => + return tx.send(ValidationResult::Reject), + Ok(ValidationResult::Accept) => {}, + } + } + + return tx.send(ValidationResult::Accept) + }); + + Ok(ValidationCallResult::WaitForValidation(rx)) + } + + /// Report to the protocol that a substream has been opened and that it can now use the handle + /// to send notifications to the remote peer. + pub fn report_substream_opened( + &mut self, + peer: PeerId, + direction: Direction, + handshake: Vec, + negotiated_fallback: Option, + sink: NotificationsSink, + ) -> Result<(), ()> { + metrics::register_substream_opened(&self.metrics, &self.protocol); + + let mut subscribers = self.subscribers.lock(); + log::trace!(target: LOG_TARGET, "{}: substream opened for {peer:?}", self.protocol); + + subscribers.retain(|subscriber| { + subscriber + .unbounded_send(InnerNotificationEvent::NotificationStreamOpened { + peer, + direction, + handshake: handshake.clone(), + negotiated_fallback: negotiated_fallback.clone(), + sink: sink.clone(), + }) + .is_ok() + }); + self.num_peers += 1; + + Ok(()) + } + + /// Substream was closed. + pub fn report_substream_closed(&mut self, peer: PeerId) -> Result<(), ()> { + metrics::register_substream_closed(&self.metrics, &self.protocol); + + let mut subscribers = self.subscribers.lock(); + log::trace!(target: LOG_TARGET, "{}: substream closed for {peer:?}", self.protocol); + + subscribers.retain(|subscriber| { + subscriber + .unbounded_send(InnerNotificationEvent::NotificationStreamClosed { peer }) + .is_ok() + }); + self.num_peers -= 1; + + Ok(()) + } + + /// Notification was received from the substream. + pub fn report_notification_received( + &mut self, + peer: PeerId, + notification: Vec, + ) -> Result<(), ()> { + metrics::register_notification_received(&self.metrics, &self.protocol, notification.len()); + + let mut subscribers = self.subscribers.lock(); + log::trace!(target: LOG_TARGET, "{}: notification received from {peer:?}", self.protocol); + + subscribers.retain(|subscriber| { + subscriber + .unbounded_send(InnerNotificationEvent::NotificationReceived { + peer, + notification: notification.clone(), + }) + .is_ok() + }); + + Ok(()) + } + + /// Notification sink was replaced. + pub fn report_notification_sink_replaced( + &mut self, + peer: PeerId, + sink: NotificationsSink, + ) -> Result<(), ()> { + let mut subscribers = self.subscribers.lock(); + + log::trace!( + target: LOG_TARGET, + "{}: notification sink replaced for {peer:?}", + self.protocol + ); + + subscribers.retain(|subscriber| { + subscriber + .unbounded_send(InnerNotificationEvent::NotificationSinkReplaced { + peer, + sink: sink.clone(), + }) + .is_ok() + }); + + Ok(()) + } + + /// Get the number of connected peers. + pub fn num_peers(&self) -> usize { + self.num_peers + } +} + +/// Create new (protocol, notification) handle pair. +/// +/// Handle pair allows `Notifications` and the protocol to communicate with each other directly. +pub fn notification_service( + protocol: ProtocolName, +) -> (ProtocolHandlePair, Box) { + let (cmd_tx, cmd_rx) = mpsc::channel(COMMAND_QUEUE_SIZE); + let (event_tx, event_rx) = tracing_unbounded("mpsc-notification-to-protocol", 100_000); + let subscribers = Arc::new(Mutex::new(vec![event_tx])); + + ( + ProtocolHandlePair::new(protocol.clone(), subscribers.clone(), cmd_rx), + Box::new(NotificationHandle::new(protocol.clone(), cmd_tx, event_rx, subscribers)), + ) +} diff --git a/substrate/client/network/src/protocol/notifications/service/tests.rs b/substrate/client/network/src/protocol/notifications/service/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..02ba9e1711c3856df095c8a31688e3388bf0bb00 --- /dev/null +++ b/substrate/client/network/src/protocol/notifications/service/tests.rs @@ -0,0 +1,839 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::*; +use crate::protocol::notifications::handler::{ + NotificationsSinkMessage, ASYNC_NOTIFICATIONS_BUFFER_SIZE, +}; + +use std::future::Future; + +#[tokio::test] +async fn validate_and_accept_substream() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (handle, _stream) = proto.split(); + + let peer_id = PeerId::random(); + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); +} + +#[tokio::test] +async fn substream_opened() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, _, _) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + + let peer_id = PeerId::random(); + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } +} + +#[tokio::test] +async fn send_sync_notification() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, _, mut sync_rx) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]); + assert_eq!( + sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] }) + ); +} + +#[tokio::test] +async fn send_async_notification() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, mut async_rx, _) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap(); + assert_eq!( + async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] }) + ); +} + +#[tokio::test] +async fn send_sync_notification_to_non_existent_peer() { + let (proto, notif) = notification_service("/proto/1".into()); + let (_sink, _, _sync_rx) = NotificationsSink::new(PeerId::random()); + let (_handle, _stream) = proto.split(); + let peer = PeerId::random(); + + // as per the original implementation, the call doesn't fail + notif.send_sync_notification(&peer, vec![1, 3, 3, 7]) +} + +#[tokio::test] +async fn send_async_notification_to_non_existent_peer() { + let (proto, notif) = notification_service("/proto/1".into()); + let (_sink, _, _sync_rx) = NotificationsSink::new(PeerId::random()); + let (_handle, _stream) = proto.split(); + let peer = PeerId::random(); + + if let Err(error::Error::PeerDoesntExist(peer_id)) = + notif.send_async_notification(&peer, vec![1, 3, 3, 7]).await + { + assert_eq!(peer, peer_id); + } else { + panic!("invalid error received from `send_async_notification()`"); + } +} + +#[tokio::test] +async fn receive_notification() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, _, _sync_rx) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + // notification is received + handle.report_notification_received(peer_id, vec![1, 3, 3, 8]).unwrap(); + + if let Some(NotificationEvent::NotificationReceived { peer, notification }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(notification, vec![1, 3, 3, 8]); + } else { + panic!("invalid event received"); + } +} + +#[tokio::test] +async fn backpressure_works() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, mut async_rx, _) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + // fill the message buffer with messages + for i in 0..=ASYNC_NOTIFICATIONS_BUFFER_SIZE { + assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, i as u8])) + .is_ready()); + } + + // try to send one more message and verify that the call blocks + assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, 9])).is_pending()); + + // release one slot from the buffer for new message + assert_eq!( + async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 0] }) + ); + + // verify that a message can be sent + assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, 9])).is_ready()); +} + +#[tokio::test] +async fn peer_disconnects_then_sync_notification_is_sent() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, _, sync_rx) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + // report that a substream has been closed but don't poll `notif` to receive this + // information + handle.report_substream_closed(peer_id).unwrap(); + drop(sync_rx); + + // as per documentation, error is not reported but the notification is silently dropped + notif.send_sync_notification(&peer_id, vec![1, 3, 3, 7]); +} + +#[tokio::test] +async fn peer_disconnects_then_async_notification_is_sent() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, async_rx, _) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + // report that a substream has been closed but don't poll `notif` to receive this + // information + handle.report_substream_closed(peer_id).unwrap(); + drop(async_rx); + + // as per documentation, error is not reported but the notification is silently dropped + if let Err(error::Error::ConnectionClosed) = + notif.send_async_notification(&peer_id, vec![1, 3, 3, 7]).await + { + } else { + panic!("invalid state after calling `send_async_notificatio()` on closed connection") + } +} + +#[tokio::test] +async fn cloned_service_opening_substream_works() { + let (proto, mut notif1) = notification_service("/proto/1".into()); + let (_sink, _async_rx, _) = NotificationsSink::new(PeerId::random()); + let (handle, _stream) = proto.split(); + let mut notif2 = notif1.clone().unwrap(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(mut result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + // verify that `notif1` gets the event + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif1.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + + // verify that because only one listener has thus far send their result, the result is + // pending + assert!(result_rx.try_recv().is_err()); + + // verify that `notif2` also gets the event + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif2.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); +} + +#[tokio::test] +async fn cloned_service_one_service_rejects_substream() { + let (proto, mut notif1) = notification_service("/proto/1".into()); + let (_sink, _async_rx, _) = NotificationsSink::new(PeerId::random()); + let (handle, _stream) = proto.split(); + let mut notif2 = notif1.clone().unwrap(); + let mut notif3 = notif2.clone().unwrap(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(mut result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + for notif in vec![&mut notif1, &mut notif2] { + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + } + + // `notif3` has not yet sent their validation result + assert!(result_rx.try_recv().is_err()); + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif3.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Reject).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Reject); +} + +#[tokio::test] +async fn cloned_service_opening_substream_sending_and_receiving_notifications_work() { + let (proto, mut notif1) = notification_service("/proto/1".into()); + let (sink, _, mut sync_rx) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let mut notif2 = notif1.clone().unwrap(); + let mut notif3 = notif1.clone().unwrap(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + for notif in vec![&mut notif1, &mut notif2, &mut notif3] { + // accept the inbound substream for all services + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that then notification stream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + for notif in vec![&mut notif1, &mut notif2, &mut notif3] { + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + } + // receive a notification from peer and verify all services receive it + handle.report_notification_received(peer_id, vec![1, 3, 3, 8]).unwrap(); + + for notif in vec![&mut notif1, &mut notif2, &mut notif3] { + if let Some(NotificationEvent::NotificationReceived { peer, notification }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(notification, vec![1, 3, 3, 8]); + } else { + panic!("invalid event received"); + } + } + + for (i, notif) in vec![&mut notif1, &mut notif2, &mut notif3].iter().enumerate() { + // send notification from each service and verify peer receives it + notif.send_sync_notification(&peer_id, vec![1, 3, 3, i as u8]); + assert_eq!( + sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, i as u8] }) + ); + } + + // close the substream for peer and verify all services receive the event + handle.report_substream_closed(peer_id).unwrap(); + + for notif in vec![&mut notif1, &mut notif2, &mut notif3] { + if let Some(NotificationEvent::NotificationStreamClosed { peer }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + } else { + panic!("invalid event received"); + } + } +} + +#[tokio::test] +async fn sending_notifications_using_notifications_sink_works() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, mut async_rx, mut sync_rx) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + // get a copy of the notification sink and send a synchronous notification using. + let sink = notif.message_sink(&peer_id).unwrap(); + sink.send_sync_notification(vec![1, 3, 3, 6]); + + // send an asynchronous notification using the acquired notifications sink. + let _ = sink.send_async_notification(vec![1, 3, 3, 7]).await.unwrap(); + + assert_eq!( + sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 6] }), + ); + assert_eq!( + async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 7] }), + ); + + // send notifications using the stored notification sink as well. + notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]); + notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap(); + + assert_eq!( + sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] }), + ); + assert_eq!( + async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] }), + ); +} + +#[test] +fn try_to_get_notifications_sink_for_non_existent_peer() { + let (_proto, notif) = notification_service("/proto/1".into()); + assert!(notif.message_sink(&PeerId::random()).is_none()); +} + +#[tokio::test] +async fn notification_sink_replaced() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, mut async_rx, mut sync_rx) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + // get a copy of the notification sink and send a synchronous notification using. + let sink = notif.message_sink(&peer_id).unwrap(); + sink.send_sync_notification(vec![1, 3, 3, 6]); + + // send an asynchronous notification using the acquired notifications sink. + let _ = sink.send_async_notification(vec![1, 3, 3, 7]).await.unwrap(); + + assert_eq!( + sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 6] }), + ); + assert_eq!( + async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 7] }), + ); + + // send notifications using the stored notification sink as well. + notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]); + notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap(); + + assert_eq!( + sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] }), + ); + assert_eq!( + async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] }), + ); + + // the initial connection was closed and `Notifications` switched to secondary connection + // and emitted `CustomProtocolReplaced` which informs the local `NotificationService` that + // the notification sink was replaced. + let (new_sink, mut new_async_rx, mut new_sync_rx) = NotificationsSink::new(PeerId::random()); + handle.report_notification_sink_replaced(peer_id, new_sink).unwrap(); + + // drop the old sinks and poll `notif` once to register the sink replacement + drop(sync_rx); + drop(async_rx); + + futures::future::poll_fn(|cx| { + let _ = std::pin::Pin::new(&mut notif.next_event()).poll(cx); + std::task::Poll::Ready(()) + }) + .await; + + // verify that using the `NotificationService` API automatically results in using the correct + // sink + notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]); + notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap(); + + assert_eq!( + new_sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] }), + ); + assert_eq!( + new_async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] }), + ); + + // now send two notifications using the acquired message sink and verify that + // it's also updated + sink.send_sync_notification(vec![1, 3, 3, 6]); + + // send an asynchronous notification using the acquired notifications sink. + let _ = sink.send_async_notification(vec![1, 3, 3, 7]).await.unwrap(); + + assert_eq!( + new_sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 6] }), + ); + assert_eq!( + new_async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 7] }), + ); +} + +#[tokio::test] +async fn set_handshake() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (_handle, mut stream) = proto.split(); + + assert!(notif.try_set_handshake(vec![1, 3, 3, 7]).is_ok()); + + match stream.next().await { + Some(NotificationCommand::SetHandshake(handshake)) => { + assert_eq!(handshake, vec![1, 3, 3, 7]); + }, + _ => panic!("invalid event received"), + } + + for _ in 0..COMMAND_QUEUE_SIZE { + assert!(notif.try_set_handshake(vec![1, 3, 3, 7]).is_ok()); + } + + assert!(notif.try_set_handshake(vec![1, 3, 3, 7]).is_err()); +} diff --git a/substrate/client/network/src/protocol/notifications/tests.rs b/substrate/client/network/src/protocol/notifications/tests.rs index d57c24144f571ee75981a5ebeb6dbdfd38d7ae1f..0178bd75e8b7984e90b4408275a35abe1a4d2acc 100644 --- a/substrate/client/network/src/protocol/notifications/tests.rs +++ b/substrate/client/network/src/protocol/notifications/tests.rs @@ -22,6 +22,7 @@ use crate::{ peer_store::PeerStore, protocol::notifications::{Notifications, NotificationsOut, ProtocolConfig}, protocol_controller::{ProtoSetConfig, ProtocolController, SetId}, + service::traits::{NotificationEvent, ValidationResult}, }; use futures::{future::BoxFuture, prelude::*}; @@ -70,6 +71,8 @@ fn build_nodes() -> (Swarm, Swarm) { .timeout(Duration::from_secs(20)) .boxed(); + let (protocol_handle_pair, mut notif_service) = + crate::protocol::notifications::service::notification_service("/foo".into()); let peer_store = PeerStore::new(if index == 0 { keypairs.iter().skip(1).map(|keypair| keypair.public().to_peer_id()).collect() } else { @@ -91,16 +94,22 @@ fn build_nodes() -> (Swarm, Swarm) { Box::new(peer_store.handle()), ); + let (notif_handle, command_stream) = protocol_handle_pair.split(); let behaviour = CustomProtoWithAddr { inner: Notifications::new( vec![controller_handle], from_controller, - iter::once(ProtocolConfig { - name: "/foo".into(), - fallback_names: Vec::new(), - handshake: Vec::new(), - max_notification_size: 1024 * 1024, - }), + None, + iter::once(( + ProtocolConfig { + name: "/foo".into(), + fallback_names: Vec::new(), + handshake: Vec::new(), + max_notification_size: 1024 * 1024, + }, + notif_handle, + command_stream, + )), ), peer_store_future: peer_store.run().boxed(), protocol_controller_future: controller.run().boxed(), @@ -118,6 +127,16 @@ fn build_nodes() -> (Swarm, Swarm) { }; let runtime = tokio::runtime::Runtime::new().unwrap(); + runtime.spawn(async move { + loop { + if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = + notif_service.next_event().await.unwrap() + { + result_tx.send(ValidationResult::Accept).unwrap(); + } + } + }); + let mut swarm = SwarmBuilder::with_executor( transport, behaviour, diff --git a/substrate/client/network/src/protocol/notifications/upgrade.rs b/substrate/client/network/src/protocol/notifications/upgrade.rs index 70c6023623f51b2c0d97379298febc8e4826dfbe..8fd837f949d8a10d744513b9225ecf53d8a74d1e 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade.rs @@ -16,12 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#[cfg(test)] +pub(crate) use self::notifications::{ + NotificationsInOpen, NotificationsInSubstreamHandshake, NotificationsOutOpen, +}; pub use self::{ collec::UpgradeCollec, notifications::{ - NotificationsHandshakeError, NotificationsIn, NotificationsInOpen, - NotificationsInSubstream, NotificationsInSubstreamHandshake, NotificationsOut, - NotificationsOutError, NotificationsOutOpen, NotificationsOutSubstream, + NotificationsIn, NotificationsInSubstream, NotificationsOut, NotificationsOutSubstream, }, }; diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs index 3a305011ded0296c7f56711182b2ec520febe94a..4c8f119baa203b3942f4156e2f92efb72007c77f 100644 --- a/substrate/client/network/src/protocol_controller.rs +++ b/substrate/client/network/src/protocol_controller.rs @@ -847,6 +847,7 @@ mod tests { use super::*; use crate::{peer_store::PeerStoreProvider, ReputationChange}; use libp2p::PeerId; + use sc_network_common::role::ObservedRole; use sc_utils::mpsc::{tracing_unbounded, TryRecvError}; use std::collections::HashSet; @@ -858,8 +859,10 @@ mod tests { fn is_banned(&self, peer_id: &PeerId) -> bool; fn register_protocol(&self, protocol_handle: ProtocolHandle); fn report_disconnect(&mut self, peer_id: PeerId); + fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole); fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange); fn peer_reputation(&self, peer_id: &PeerId) -> i32; + fn peer_role(&self, peer_id: &PeerId) -> Option; fn outgoing_candidates<'a>(&self, count: usize, ignored: HashSet<&'a PeerId>) -> Vec; } } diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs index c1df48ad7858d512eef38df27c1bab4066cf7c2d..06db23844d0d9d07a33e5ffb4303cdf1f9179ec8 100644 --- a/substrate/client/network/src/service.rs +++ b/substrate/client/network/src/service.rs @@ -54,6 +54,7 @@ use crate::{ ReputationChange, }; +use codec::DecodeAll; use either::Either; use futures::{channel::oneshot, prelude::*}; #[allow(deprecated)] @@ -71,10 +72,13 @@ use libp2p::{ Multiaddr, PeerId, }; use log::{debug, error, info, trace, warn}; -use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; +use metrics::{Histogram, MetricSources, Metrics}; use parking_lot::Mutex; -use sc_network_common::ExHashT; +use sc_network_common::{ + role::{ObservedRole, Roles}, + ExHashT, +}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::Block as BlockT; @@ -116,14 +120,10 @@ pub struct NetworkService { local_identity: Keypair, /// Bandwidth logging system. Can be queried to know the average bandwidth consumed. bandwidth: Arc, + /// Used to query and report reputation changes. + peer_store_handle: PeerStoreHandle, /// Channel that sends messages to the actual worker. to_worker: TracingUnboundedSender, - /// For each peer and protocol combination, an object that allows sending notifications to - /// that peer. Updated by the [`NetworkWorker`]. - peers_notifications_sinks: Arc>>, - /// Field extracted from the [`Metrics`] struct and necessary to report the - /// notifications-related metrics. - notifications_sizes_metric: Option, /// Protocol name -> `SetId` mapping for notification protocols. The map never changes after /// initialization. notification_protocol_ids: HashMap, @@ -199,7 +199,7 @@ where )?; for notification_protocol in ¬ification_protocols { ensure_addresses_consistent_with_transport( - notification_protocol.set_config.reserved_nodes.iter().map(|x| &x.multiaddr), + notification_protocol.set_config().reserved_nodes.iter().map(|x| &x.multiaddr), &network_config.transport, )?; } @@ -241,7 +241,7 @@ where .map(|cfg| usize::try_from(cfg.max_response_size).unwrap_or(usize::MAX)); let notifs_max = notification_protocols .iter() - .map(|cfg| usize::try_from(cfg.max_notification_size).unwrap_or(usize::MAX)); + .map(|cfg| usize::try_from(cfg.max_notification_size()).unwrap_or(usize::MAX)); // A "default" max is added to cover all the other protocols: ping, identify, // kademlia, block announces, and transactions. @@ -273,7 +273,7 @@ where // We must prepend a hardcoded default peer set to notification protocols. let all_peer_sets_iter = iter::once(&network_config.default_peers_set) - .chain(notification_protocols.iter().map(|protocol| &protocol.set_config)); + .chain(notification_protocols.iter().map(|protocol| protocol.set_config())); let (protocol_handles, protocol_controllers): (Vec<_>, Vec<_>) = all_peer_sets_iter .enumerate() @@ -312,21 +312,9 @@ where iter::once(¶ms.block_announce_config) .chain(notification_protocols.iter()) .enumerate() - .map(|(index, protocol)| { - (protocol.notifications_protocol.clone(), SetId::from(index)) - }) + .map(|(index, protocol)| (protocol.protocol_name().clone(), SetId::from(index))) .collect(); - let protocol = Protocol::new( - From::from(¶ms.role), - notification_protocols.clone(), - params.block_announce_config, - params.peer_store.clone(), - protocol_handles.clone(), - from_protocol_controllers, - params.tx, - )?; - let known_addresses = { // Collect all reserved nodes and bootnodes addresses. let mut addresses: Vec<_> = network_config @@ -336,7 +324,7 @@ where .map(|reserved| (reserved.peer_id, reserved.multiaddr.clone())) .chain(notification_protocols.iter().flat_map(|protocol| { protocol - .set_config + .set_config() .reserved_nodes .iter() .map(|reserved| (reserved.peer_id, reserved.multiaddr.clone())) @@ -389,6 +377,16 @@ where let num_connected = Arc::new(AtomicUsize::new(0)); let external_addresses = Arc::new(Mutex::new(HashSet::new())); + let (protocol, notif_protocol_handles) = Protocol::new( + From::from(¶ms.role), + ¶ms.metrics_registry, + notification_protocols, + params.block_announce_config, + params.peer_store.clone(), + protocol_handles.clone(), + from_protocol_controllers, + )?; + // Build the swarm. let (mut swarm, bandwidth): (Swarm>, _) = { let user_agent = @@ -508,7 +506,6 @@ where } let listen_addresses = Arc::new(Mutex::new(HashSet::new())); - let peers_notifications_sinks = Arc::new(Mutex::new(HashMap::new())); let service = Arc::new(NetworkService { bandwidth, @@ -518,13 +515,10 @@ where local_peer_id, local_identity, to_worker, - peers_notifications_sinks: peers_notifications_sinks.clone(), - notifications_sizes_metric: metrics - .as_ref() - .map(|metrics| metrics.notifications_sizes.clone()), notification_protocol_ids, protocol_handles, sync_protocol_handle, + peer_store_handle: params.peer_store.clone(), _marker: PhantomData, _block: Default::default(), }); @@ -539,8 +533,8 @@ where metrics, boot_node_ids, reported_invalid_boot_nodes: Default::default(), - peers_notifications_sinks, peer_store_handle: params.peer_store, + notif_protocol_handles, _marker: Default::default(), _block: Default::default(), }) @@ -567,7 +561,7 @@ where /// Returns the number of peers we're connected to. pub fn num_connected_peers(&self) -> usize { - self.network_service.behaviour().user_protocol().num_connected_peers() + self.network_service.behaviour().user_protocol().num_sync_peers() } /// Adds an address for a node. @@ -871,12 +865,18 @@ where .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); } - fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::ReportPeer(who, cost_benefit)); + fn report_peer(&self, peer_id: PeerId, cost_benefit: ReputationChange) { + self.peer_store_handle.clone().report_peer(peer_id, cost_benefit); + } + + fn peer_reputation(&self, peer_id: &PeerId) -> i32 { + self.peer_store_handle.peer_reputation(peer_id) } - fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol)); + fn disconnect_peer(&self, peer_id: PeerId, protocol: ProtocolName) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::DisconnectPeer(peer_id, protocol)); } fn accept_unreserved_peers(&self) { @@ -991,6 +991,16 @@ where fn sync_num_connected(&self) -> usize { self.num_connected.load(Ordering::Relaxed) } + + fn peer_role(&self, peer_id: PeerId, handshake: Vec) -> Option { + match Roles::decode_all(&mut &handshake[..]) { + Ok(role) => Some(role.into()), + Err(_) => { + log::debug!(target: "sub-libp2p", "handshake doesn't contain peer role: {handshake:?}"); + self.peer_store_handle.peer_role(&peer_id) + }, + } + } } impl NetworkEventStream for NetworkService @@ -1010,68 +1020,20 @@ where B: BlockT + 'static, H: ExHashT, { - fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec) { - // We clone the `NotificationsSink` in order to be able to unlock the network-wide - // `peers_notifications_sinks` mutex as soon as possible. - let sink = { - let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { - sink.clone() - } else { - // Notification silently discarded, as documented. - debug!( - target: "sub-libp2p", - "Attempted to send notification on missing or closed substream: {}, {:?}", - target, protocol, - ); - return - } - }; - - if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { - notifications_sizes_metric - .with_label_values(&["out", &protocol]) - .observe(message.len() as f64); - } - - // Sending is communicated to the `NotificationsSink`. - trace!( - target: "sub-libp2p", - "External API => Notification({:?}, {:?}, {} bytes)", - target, protocol, message.len() - ); - trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); - sink.send_sync_notification(message); + fn write_notification(&self, _target: PeerId, _protocol: ProtocolName, _message: Vec) { + unimplemented!(); } fn notification_sender( &self, - target: PeerId, - protocol: ProtocolName, + _target: PeerId, + _protocol: ProtocolName, ) -> Result, NotificationSenderError> { - // We clone the `NotificationsSink` in order to be able to unlock the network-wide - // `peers_notifications_sinks` mutex as soon as possible. - let sink = { - let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { - sink.clone() - } else { - return Err(NotificationSenderError::Closed) - } - }; - - let notification_size_metric = self - .notifications_sizes_metric - .as_ref() - .map(|histogram| histogram.with_label_values(&["out", &protocol])); - - Ok(Box::new(NotificationSender { sink, protocol_name: protocol, notification_size_metric })) + unimplemented!(); } - fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::SetNotificationHandshake(protocol, handshake)); + fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec) { + unimplemented!(); } } @@ -1193,7 +1155,6 @@ enum ServiceToWorkerMsg { GetValue(KademliaKey), PutValue(KademliaKey, Vec), AddKnownAddress(PeerId, Multiaddr), - ReportPeer(PeerId, ReputationChange), EventStream(out_events::Sender), Request { target: PeerId, @@ -1209,7 +1170,6 @@ enum ServiceToWorkerMsg { pending_response: oneshot::Sender>, }, DisconnectPeer(PeerId, ProtocolName), - SetNotificationHandshake(ProtocolName, Vec), } /// Main network worker. Must be polled in order for the network to advance. @@ -1239,11 +1199,10 @@ where boot_node_ids: Arc>>, /// Boot nodes that we already have reported as invalid. reported_invalid_boot_nodes: HashSet, - /// For each peer and protocol combination, an object that allows sending notifications to - /// that peer. Shared with the [`NetworkService`]. - peers_notifications_sinks: Arc>>, /// Peer reputation store handle. peer_store_handle: PeerStoreHandle, + /// Notification protocol handles. + notif_protocol_handles: Vec, /// Marker to pin the `H` generic. Serves no purpose except to not break backwards /// compatibility. _marker: PhantomData, @@ -1282,8 +1241,7 @@ where }; // Update the `num_connected` count shared with the `NetworkService`. - let num_connected_peers = - self.network_service.behaviour_mut().user_protocol_mut().num_connected_peers(); + let num_connected_peers = self.network_service.behaviour().user_protocol().num_sync_peers(); self.num_connected.store(num_connected_peers, Ordering::Relaxed); if let Some(metrics) = self.metrics.as_ref() { @@ -1324,8 +1282,6 @@ where self.network_service.behaviour_mut().put_value(key, value), ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => self.network_service.behaviour_mut().add_known_address(peer_id, addr), - ServiceToWorkerMsg::ReportPeer(peer_id, reputation_change) => - self.peer_store_handle.report_peer(peer_id, reputation_change), ServiceToWorkerMsg::EventStream(sender) => self.event_streams.push(sender), ServiceToWorkerMsg::Request { target, @@ -1353,11 +1309,6 @@ where .behaviour_mut() .user_protocol_mut() .disconnect_peer(&who, protocol_name), - ServiceToWorkerMsg::SetNotificationHandshake(protocol, handshake) => self - .network_service - .behaviour_mut() - .user_protocol_mut() - .set_notification_handshake(protocol, handshake), } } @@ -1472,47 +1423,27 @@ where }, SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { remote, - protocol, + set_id, + direction, negotiated_fallback, notifications_sink, - role, received_handshake, }) => { - if let Some(metrics) = self.metrics.as_ref() { - metrics - .notifications_streams_opened_total - .with_label_values(&[&protocol]) - .inc(); - } - { - let mut peers_notifications_sinks = self.peers_notifications_sinks.lock(); - let _previous_value = peers_notifications_sinks - .insert((remote, protocol.clone()), notifications_sink); - debug_assert!(_previous_value.is_none()); - } - self.event_streams.send(Event::NotificationStreamOpened { + let _ = self.notif_protocol_handles[usize::from(set_id)].report_substream_opened( remote, - protocol, - negotiated_fallback, - role, + direction, received_handshake, - }); + negotiated_fallback, + notifications_sink, + ); }, SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced { remote, - protocol, + set_id, notifications_sink, }) => { - let mut peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(s) = peers_notifications_sinks.get_mut(&(remote, protocol)) { - *s = notifications_sink; - } else { - error!( - target: "sub-libp2p", - "NotificationStreamReplaced for non-existing substream" - ); - debug_assert!(false); - } + let _ = self.notif_protocol_handles[usize::from(set_id)] + .report_notification_sink_replaced(remote, notifications_sink); // TODO: Notifications might have been lost as a result of the previous // connection being dropped, and as a result it would be preferable to notify @@ -1535,31 +1466,17 @@ where // role, // }); }, - SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, protocol }) => { - if let Some(metrics) = self.metrics.as_ref() { - metrics - .notifications_streams_closed_total - .with_label_values(&[&protocol[..]]) - .inc(); - } - self.event_streams - .send(Event::NotificationStreamClosed { remote, protocol: protocol.clone() }); - { - let mut peers_notifications_sinks = self.peers_notifications_sinks.lock(); - let _previous_value = peers_notifications_sinks.remove(&(remote, protocol)); - debug_assert!(_previous_value.is_some()); - } + SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, set_id }) => { + let _ = self.notif_protocol_handles[usize::from(set_id)] + .report_substream_closed(remote); }, - SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived { remote, messages }) => { - if let Some(metrics) = self.metrics.as_ref() { - for (protocol, message) in &messages { - metrics - .notifications_sizes - .with_label_values(&["in", protocol]) - .observe(message.len() as f64); - } - } - self.event_streams.send(Event::NotificationsReceived { remote, messages }); + SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived { + remote, + set_id, + notification, + }) => { + let _ = self.notif_protocol_handles[usize::from(set_id)] + .report_notification_received(remote, notification); }, SwarmEvent::Behaviour(BehaviourOut::Dht(event, duration)) => { if let Some(metrics) = self.metrics.as_ref() { diff --git a/substrate/client/network/src/service/metrics.rs b/substrate/client/network/src/service/metrics.rs index 13bc4b4e7aff8bc948da3976882d011a9f81d867..c349fd98c76b02e9e7a60142c242061f8fec72fe 100644 --- a/substrate/client/network/src/service/metrics.rs +++ b/substrate/client/network/src/service/metrics.rs @@ -61,9 +61,6 @@ pub struct Metrics { pub kbuckets_num_nodes: GaugeVec, pub listeners_local_addresses: Gauge, pub listeners_errors_total: Counter, - pub notifications_sizes: HistogramVec, - pub notifications_streams_closed_total: CounterVec, - pub notifications_streams_opened_total: CounterVec, pub peerset_num_discovered: Gauge, pub pending_connections: Gauge, pub pending_connections_errors_total: CounterVec, @@ -153,31 +150,6 @@ impl Metrics { "substrate_sub_libp2p_listeners_errors_total", "Total number of non-fatal errors reported by a listener" )?, registry)?, - notifications_sizes: prometheus::register(HistogramVec::new( - HistogramOpts { - common_opts: Opts::new( - "substrate_sub_libp2p_notifications_sizes", - "Sizes of the notifications send to and received from all nodes" - ), - buckets: prometheus::exponential_buckets(64.0, 4.0, 8) - .expect("parameters are always valid values; qed"), - }, - &["direction", "protocol"] - )?, registry)?, - notifications_streams_closed_total: prometheus::register(CounterVec::new( - Opts::new( - "substrate_sub_libp2p_notifications_streams_closed_total", - "Total number of notification substreams that have been closed" - ), - &["protocol"] - )?, registry)?, - notifications_streams_opened_total: prometheus::register(CounterVec::new( - Opts::new( - "substrate_sub_libp2p_notifications_streams_opened_total", - "Total number of notification substreams that have been opened" - ), - &["protocol"] - )?, registry)?, peerset_num_discovered: prometheus::register(Gauge::new( "substrate_sub_libp2p_peerset_num_discovered", "Number of nodes stored in the peerset manager", diff --git a/substrate/client/network/src/service/signature.rs b/substrate/client/network/src/service/signature.rs index 024f60e4c466bda1c1df8ce47b4222b69da28093..5b2ba6be8cf8dec5516af04375816a860f9abf9a 100644 --- a/substrate/client/network/src/service/signature.rs +++ b/substrate/client/network/src/service/signature.rs @@ -18,6 +18,8 @@ // // If you read this, you are very thorough, congratulations. +//! Signature-related code + use libp2p::{ identity::{Keypair, PublicKey}, PeerId, diff --git a/substrate/client/network/src/service/traits.rs b/substrate/client/network/src/service/traits.rs index bed325ede4a85552510eeb0f25edb837d50ea397..d4d4a05a86f1dad6cc25120f86d8d267eb49220a 100644 --- a/substrate/client/network/src/service/traits.rs +++ b/substrate/client/network/src/service/traits.rs @@ -18,8 +18,11 @@ // // If you read this, you are very thorough, congratulations. +//! Traits defined by `sc-network`. + use crate::{ config::MultiaddrWithPeerId, + error, event::Event, request_responses::{IfDisconnected, RequestFailure}, service::signature::Signature, @@ -30,7 +33,9 @@ use crate::{ use futures::{channel::oneshot, Stream}; use libp2p::{Multiaddr, PeerId}; -use std::{collections::HashSet, future::Future, pin::Pin, sync::Arc}; +use sc_network_common::role::ObservedRole; + +use std::{collections::HashSet, fmt::Debug, future::Future, pin::Pin, sync::Arc}; pub use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; @@ -150,12 +155,15 @@ pub trait NetworkPeers { /// Report a given peer as either beneficial (+) or costly (-) according to the /// given scalar. - fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange); + fn report_peer(&self, peer_id: PeerId, cost_benefit: ReputationChange); + + /// Get peer reputation. + fn peer_reputation(&self, peer_id: &PeerId) -> i32; /// Disconnect from a node as soon as possible. /// /// This triggers the same effects as if the connection had closed itself spontaneously. - fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName); + fn disconnect_peer(&self, peer_id: PeerId, protocol: ProtocolName); /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. fn accept_unreserved_peers(&self); @@ -221,6 +229,14 @@ pub trait NetworkPeers { /// Returns the number of peers in the sync peer set we're connected to. fn sync_num_connected(&self) -> usize; + + /// Attempt to get peer role. + /// + /// Right now the peer role is decoded from the received handshake for all protocols + /// (`/block-announces/1` has other information as well). If the handshake cannot be + /// decoded into a role, the role queried from `PeerStore` and if the role is not stored + /// there either, `None` is returned and the peer should be discarded. + fn peer_role(&self, peer_id: PeerId, handshake: Vec) -> Option; } // Manual implementation to avoid extra boxing here @@ -241,16 +257,16 @@ where T::add_known_address(self, peer_id, addr) } - fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - // TODO: when we get rid of `Peerset`, we'll likely need to add some kind of async - // interface to `PeerStore`, otherwise we'll have trouble calling functions accepting - // `&mut self` via `Arc`. - // See https://github.com/paritytech/substrate/issues/14170. - T::report_peer(self, who, cost_benefit) + fn report_peer(&self, peer_id: PeerId, cost_benefit: ReputationChange) { + T::report_peer(self, peer_id, cost_benefit) } - fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { - T::disconnect_peer(self, who, protocol) + fn peer_reputation(&self, peer_id: &PeerId) -> i32 { + T::peer_reputation(self, peer_id) + } + + fn disconnect_peer(&self, peer_id: PeerId, protocol: ProtocolName) { + T::disconnect_peer(self, peer_id, protocol) } fn accept_unreserved_peers(&self) { @@ -296,6 +312,10 @@ where fn sync_num_connected(&self) -> usize { T::sync_num_connected(self) } + + fn peer_role(&self, peer_id: PeerId, handshake: Vec) -> Option { + T::peer_role(self, peer_id, handshake) + } } /// Provides access to network-level event stream. @@ -611,3 +631,189 @@ where T::new_best_block_imported(self, hash, number) } } + +/// Substream acceptance result. +#[derive(Debug, PartialEq, Eq)] +pub enum ValidationResult { + /// Accept inbound substream. + Accept, + + /// Reject inbound substream. + Reject, +} + +/// Substream direction. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum Direction { + /// Substream opened by the remote node. + Inbound, + + /// Substream opened by the local node. + Outbound, +} + +impl Direction { + /// Is the direction inbound. + pub fn is_inbound(&self) -> bool { + std::matches!(self, Direction::Inbound) + } +} + +/// Events received by the protocol from `Notifications`. +#[derive(Debug)] +pub enum NotificationEvent { + /// Validate inbound substream. + ValidateInboundSubstream { + /// Peer ID. + peer: PeerId, + + /// Received handshake. + handshake: Vec, + + /// `oneshot::Sender` for sending validation result back to `Notifications` + result_tx: tokio::sync::oneshot::Sender, + }, + + /// Remote identified by `PeerId` opened a substream and sent `Handshake`. + /// Validate `Handshake` and report status (accept/reject) to `Notifications`. + NotificationStreamOpened { + /// Peer ID. + peer: PeerId, + + /// Is the substream inbound or outbound. + direction: Direction, + + /// Received handshake. + handshake: Vec, + + /// Negotiated fallback. + negotiated_fallback: Option, + }, + + /// Substream was closed. + NotificationStreamClosed { + /// Peer Id. + peer: PeerId, + }, + + /// Notification was received from the substream. + NotificationReceived { + /// Peer ID. + peer: PeerId, + + /// Received notification. + notification: Vec, + }, +} + +/// Notification service +/// +/// Defines behaviors that both the protocol implementations and `Notifications` can expect from +/// each other. +/// +/// `Notifications` can send two different kinds of information to protocol: +/// * substream-related information +/// * notification-related information +/// +/// When an unvalidated, inbound substream is received by `Notifications`, it sends the inbound +/// stream information (peer ID, handshake) to protocol for validation. Protocol must then verify +/// that the handshake is valid (and in the future that it has a slot it can allocate for the peer) +/// and then report back the `ValidationResult` which is either `Accept` or `Reject`. +/// +/// After the validation result has been received by `Notifications`, it prepares the +/// substream for communication by initializing the necessary sinks and emits +/// `NotificationStreamOpened` which informs the protocol that the remote peer is ready to receive +/// notifications. +/// +/// Two different flavors of sending options are provided: +/// * synchronous sending ([`NotificationService::send_sync_notification()`]) +/// * asynchronous sending ([`NotificationService::send_async_notification()`]) +/// +/// The former is used by the protocols not ready to exercise backpressure and the latter by the +/// protocols that can do it. +/// +/// Both local and remote peer can close the substream at any time. Local peer can do so by calling +/// [`NotificationService::close_substream()`] which instructs `Notifications` to close the +/// substream. Remote closing the substream is indicated to the local peer by receiving +/// [`NotificationEvent::NotificationStreamClosed`] event. +/// +/// In case the protocol must update its handshake while it's operating (such as updating the best +/// block information), it can do so by calling [`NotificationService::set_handshake()`] +/// which instructs `Notifications` to update the handshake it stored during protocol +/// initialization. +/// +/// All peer events are multiplexed on the same incoming event stream from `Notifications` and thus +/// each event carries a `PeerId` so the protocol knows whose information to update when receiving +/// an event. +#[async_trait::async_trait] +pub trait NotificationService: Debug + Send { + /// Instruct `Notifications` to open a new substream for `peer`. + /// + /// `dial_if_disconnected` informs `Notifications` whether to dial + // the peer if there is currently no active connection to it. + // + // NOTE: not offered by the current implementation + async fn open_substream(&mut self, peer: PeerId) -> Result<(), ()>; + + /// Instruct `Notifications` to close substream for `peer`. + // + // NOTE: not offered by the current implementation + async fn close_substream(&mut self, peer: PeerId) -> Result<(), ()>; + + /// Send synchronous `notification` to `peer`. + fn send_sync_notification(&self, peer: &PeerId, notification: Vec); + + /// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure. + /// + /// Returns an error if the peer doesn't exist. + async fn send_async_notification( + &self, + peer: &PeerId, + notification: Vec, + ) -> Result<(), error::Error>; + + /// Set handshake for the notification protocol replacing the old handshake. + async fn set_handshake(&mut self, handshake: Vec) -> Result<(), ()>; + + /// Non-blocking variant of `set_handshake()` that attempts to update the handshake + /// and returns an error if the channel is blocked. + /// + /// Technically the function can return an error if the channel to `Notifications` is closed + /// but that doesn't happen under normal operation. + fn try_set_handshake(&mut self, handshake: Vec) -> Result<(), ()>; + + /// Get next event from the `Notifications` event stream. + async fn next_event(&mut self) -> Option; + + /// Make a copy of the object so it can be shared between protocol components + /// who wish to have access to the same underlying notification protocol. + fn clone(&mut self) -> Result, ()>; + + /// Get protocol name of the `NotificationService`. + fn protocol(&self) -> &ProtocolName; + + /// Get message sink of the peer. + fn message_sink(&self, peer: &PeerId) -> Option>; +} + +/// Message sink for peers. +/// +/// If protocol cannot use [`NotificationService`] to send notifications to peers and requires, +/// e.g., notifications to be sent in another task, the protocol may acquire a [`MessageSink`] +/// object for each peer by calling [`NotificationService::message_sink()`]. Calling this +/// function returns an object which allows the protocol to send notifications to the remote peer. +/// +/// Use of this API is discouraged as it's not as performant as sending notifications through +/// [`NotificationService`] due to synchronization required to keep the underlying notification +/// sink up to date with possible sink replacement events. +#[async_trait::async_trait] +pub trait MessageSink: Send + Sync { + /// Send synchronous `notification` to the peer associated with this [`MessageSink`]. + fn send_sync_notification(&self, notification: Vec); + + /// Send an asynchronous `notification` to to the peer associated with this [`MessageSink`], + /// allowing sender to exercise backpressure. + /// + /// Returns an error if the peer does not exist. + async fn send_async_notification(&self, notification: Vec) -> Result<(), error::Error>; +} diff --git a/substrate/client/network/statement/Cargo.toml b/substrate/client/network/statement/Cargo.toml index ef974b4f33f1931b632f8760cd79f975b3970941..d3ce2a63ef14ddca0be86867dabcbd5bf541ec05 100644 --- a/substrate/client/network/statement/Cargo.toml +++ b/substrate/client/network/statement/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true documentation = "https://docs.rs/sc-network-statement" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,7 +20,7 @@ array-bytes = "6.1" async-channel = "1.8.0" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" -libp2p = "0.51.3" +libp2p = "0.51.4" log = "0.4.17" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-network-common = { path = "../common" } diff --git a/substrate/client/network/statement/src/lib.rs b/substrate/client/network/statement/src/lib.rs index 69d4faa13ef287ac41427cd87e2c1e846e728bab..5187e681d83c48b09bea2476f8cfe8c8b6806a3c 100644 --- a/substrate/client/network/statement/src/lib.rs +++ b/substrate/client/network/statement/src/lib.rs @@ -21,12 +21,13 @@ //! Usage: //! //! - Use [`StatementHandlerPrototype::new`] to create a prototype. -//! - Pass the return value of [`StatementHandlerPrototype::set_config`] to the network -//! configuration as an extra peers set. +//! - Pass the `NonDefaultSetConfig` returned from [`StatementHandlerPrototype::new`] to the network +//! configuration as an extra peers set. //! - Use [`StatementHandlerPrototype::build`] then [`StatementHandler::run`] to obtain a //! `Future` that processes statements. use crate::config::*; + use codec::{Decode, Encode}; use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered, FutureExt}; use libp2p::{multiaddr, PeerId}; @@ -34,7 +35,7 @@ use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network::{ config::{NonDefaultSetConfig, NonReservedPeerMode, SetConfig}, error, - event::Event, + service::traits::{NotificationEvent, NotificationService, ValidationResult}, types::ProtocolName, utils::{interval, LruHashSet}, NetworkEventStream, NetworkNotification, NetworkPeers, @@ -101,35 +102,35 @@ impl Metrics { /// Prototype for a [`StatementHandler`]. pub struct StatementHandlerPrototype { protocol_name: ProtocolName, + notification_service: Box, } impl StatementHandlerPrototype { /// Create a new instance. - pub fn new>(genesis_hash: Hash, fork_id: Option<&str>) -> Self { + pub fn new>( + genesis_hash: Hash, + fork_id: Option<&str>, + ) -> (Self, NonDefaultSetConfig) { let genesis_hash = genesis_hash.as_ref(); let protocol_name = if let Some(fork_id) = fork_id { format!("/{}/{}/statement/1", array_bytes::bytes2hex("", genesis_hash), fork_id) } else { format!("/{}/statement/1", array_bytes::bytes2hex("", genesis_hash)) }; - - Self { protocol_name: protocol_name.into() } - } - - /// Returns the configuration of the set to put in the network configuration. - pub fn set_config(&self) -> NonDefaultSetConfig { - NonDefaultSetConfig { - notifications_protocol: self.protocol_name.clone(), - fallback_names: Vec::new(), - max_notification_size: MAX_STATEMENT_SIZE, - handshake: None, - set_config: SetConfig { + let (config, notification_service) = NonDefaultSetConfig::new( + protocol_name.clone().into(), + Vec::new(), + MAX_STATEMENT_SIZE, + None, + SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), non_reserved_mode: NonReservedPeerMode::Deny, }, - } + ); + + (Self { protocol_name: protocol_name.into(), notification_service }, config) } /// Turns the prototype into the actual handler. @@ -147,7 +148,6 @@ impl StatementHandlerPrototype { metrics_registry: Option<&Registry>, executor: impl Fn(Pin + Send>>) + Send, ) -> error::Result> { - let net_event_stream = network.event_stream("statement-handler-net"); let sync_event_stream = sync.event_stream("statement-handler-sync"); let (queue_sender, mut queue_receiver) = async_channel::bounded(100_000); @@ -176,6 +176,7 @@ impl StatementHandlerPrototype { let handler = StatementHandler { protocol_name: self.protocol_name, + notification_service: self.notification_service, propagate_timeout: (Box::pin(interval(PROPAGATE_TIMEOUT)) as Pin + Send>>) .fuse(), @@ -183,7 +184,6 @@ impl StatementHandlerPrototype { pending_statements_peers: HashMap::new(), network, sync, - net_event_stream: net_event_stream.fuse(), sync_event_stream: sync_event_stream.fuse(), peers: HashMap::new(), statement_store, @@ -219,10 +219,10 @@ pub struct StatementHandler< network: N, /// Syncing service. sync: S, - /// Stream of networking events. - net_event_stream: stream::Fuse + Send>>>, /// Receiver for syncing-related events. sync_event_stream: stream::Fuse + Send>>>, + /// Notification service. + notification_service: Box, // All connected peers peers: HashMap, statement_store: Arc, @@ -261,14 +261,6 @@ where log::warn!(target: LOG_TARGET, "Inconsistent state, no peers for pending statement!"); } }, - network_event = self.net_event_stream.next() => { - if let Some(network_event) = network_event { - self.handle_network_event(network_event).await; - } else { - // Networking has seemingly closed. Closing as well. - return; - } - }, sync_event = self.sync_event_stream.next() => { if let Some(sync_event) = sync_event { self.handle_sync_event(sync_event); @@ -277,6 +269,14 @@ where return; } } + event = self.notification_service.next_event().fuse() => { + if let Some(event) = event { + self.handle_notification_event(event) + } else { + // `Notifications` has seemingly closed. Closing as well. + return + } + } } } } @@ -306,14 +306,24 @@ where } } - async fn handle_network_event(&mut self, event: Event) { + fn handle_notification_event(&mut self, event: NotificationEvent) { match event { - Event::Dht(_) => {}, - Event::NotificationStreamOpened { remote, protocol, role, .. } - if protocol == self.protocol_name => - { + NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx, .. } => { + // only accept peers whose role can be determined + let result = self + .network + .peer_role(peer, handshake) + .map_or(ValidationResult::Reject, |_| ValidationResult::Accept); + let _ = result_tx.send(result); + }, + NotificationEvent::NotificationStreamOpened { peer, handshake, .. } => { + let Some(role) = self.network.peer_role(peer, handshake) else { + log::debug!(target: LOG_TARGET, "role for {peer} couldn't be determined"); + return + }; + let _was_in = self.peers.insert( - remote, + peer, Peer { known_statements: LruHashSet::new( NonZeroUsize::new(MAX_KNOWN_STATEMENTS).expect("Constant is nonzero"), @@ -323,39 +333,26 @@ where ); debug_assert!(_was_in.is_none()); }, - Event::NotificationStreamClosed { remote, protocol } - if protocol == self.protocol_name => - { - let _peer = self.peers.remove(&remote); + NotificationEvent::NotificationStreamClosed { peer } => { + let _peer = self.peers.remove(&peer); debug_assert!(_peer.is_some()); }, + NotificationEvent::NotificationReceived { peer, notification } => { + // Accept statements only when node is not major syncing + if self.sync.is_major_syncing() { + log::trace!( + target: LOG_TARGET, + "{peer}: Ignoring statements while major syncing or offline" + ); + return + } - Event::NotificationsReceived { remote, messages } => { - for (protocol, message) in messages { - if protocol != self.protocol_name { - continue - } - // Accept statements only when node is not major syncing - if self.sync.is_major_syncing() { - log::trace!( - target: LOG_TARGET, - "{remote}: Ignoring statements while major syncing or offline" - ); - continue - } - if let Ok(statements) = ::decode(&mut message.as_ref()) { - self.on_statements(remote, statements); - } else { - log::debug!( - target: LOG_TARGET, - "Failed to decode statement list from {remote}" - ); - } + if let Ok(statements) = ::decode(&mut notification.as_ref()) { + self.on_statements(peer, statements); + } else { + log::debug!(target: LOG_TARGET, "Failed to decode statement list from {peer}"); } }, - - // Not our concern. - Event::NotificationStreamOpened { .. } | Event::NotificationStreamClosed { .. } => {}, } } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index a1ea39a852fc18e087234cdffa4dcad7af663eb4..cb19e1adbe53a3b4ef9ec0b1d380d99fc6add835 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true documentation = "https://docs.rs/sc-network-sync" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,11 +21,11 @@ prost-build = "0.11" [dependencies] array-bytes = "6.1" async-channel = "1.8.0" -async-trait = "0.1.58" +async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.2" -libp2p = "0.51.3" +libp2p = "0.51.4" log = "0.4.17" mockall = "0.11.3" prost = "0.11" @@ -30,7 +33,7 @@ schnellru = "0.2.1" smallvec = "1.11.0" thiserror = "1.0" tokio-stream = "0.1.14" -tokio = { version = "1.32.0", features = ["time", "macros"] } +tokio = { version = "1.32.0", features = ["macros", "time"] } fork-tree = { path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } diff --git a/substrate/client/network/sync/src/chain_sync.rs b/substrate/client/network/sync/src/chain_sync.rs index 858125f93f1fd64834c87401a4063652359f69b8..3825cfa33f73bd4a77f51521d048ae4d512e621e 100644 --- a/substrate/client/network/sync/src/chain_sync.rs +++ b/substrate/client/network/sync/src/chain_sync.rs @@ -184,90 +184,30 @@ struct GapSync { target: NumberFor, } -/// Action that the parent of [`ChainSync`] should perform after reporting imported blocks with -/// [`ChainSync::on_blocks_processed`]. -pub enum BlockRequestAction { +/// Action that the parent of [`ChainSync`] should perform after reporting a network or block event. +#[derive(Debug)] +pub enum ChainSyncAction { /// Send block request to peer. Always implies dropping a stale block request to the same peer. - SendRequest { peer_id: PeerId, request: BlockRequest }, + SendBlockRequest { peer_id: PeerId, request: BlockRequest }, /// Drop stale block request. - RemoveStale { peer_id: PeerId }, -} - -/// Action that the parent of [`ChainSync`] should perform if we want to import blocks. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct ImportBlocksAction { - pub origin: BlockOrigin, - pub blocks: Vec>, -} - -/// Action that the parent of [`ChainSync`] should perform if we want to import justifications. -pub struct ImportJustificationsAction { - pub peer_id: PeerId, - pub hash: B::Hash, - pub number: NumberFor, - pub justifications: Justifications, -} - -/// Result of [`ChainSync::on_block_data`]. -#[derive(Debug, Clone, PartialEq, Eq)] -enum OnBlockData { - /// The block should be imported. - Import(ImportBlocksAction), - /// A new block request needs to be made to the given peer. - Request(PeerId, BlockRequest), - /// Continue processing events. - Continue, -} - -/// Result of [`ChainSync::on_block_justification`]. -#[derive(Debug, Clone, PartialEq, Eq)] -enum OnBlockJustification { - /// The justification needs no further handling. - Nothing, - /// The justification should be imported. - Import { + CancelBlockRequest { peer_id: PeerId }, + /// Send state request to peer. + SendStateRequest { peer_id: PeerId, request: OpaqueStateRequest }, + /// Send warp proof request to peer. + SendWarpProofRequest { peer_id: PeerId, request: WarpProofRequest }, + /// Peer misbehaved. Disconnect, report it and cancel the block request to it. + DropPeer(BadPeer), + /// Import blocks. + ImportBlocks { origin: BlockOrigin, blocks: Vec> }, + /// Import justifications. + ImportJustifications { peer_id: PeerId, - hash: Block::Hash, - number: NumberFor, + hash: B::Hash, + number: NumberFor, justifications: Justifications, }, } -// Result of [`ChainSync::on_state_data`]. -#[derive(Debug)] -enum OnStateData { - /// The block and state that should be imported. - Import(BlockOrigin, IncomingBlock), - /// A new state request needs to be made to the given peer. - Continue, -} - -/// Action that the parent of [`ChainSync`] should perform after reporting block response with -/// [`ChainSync::on_block_response`]. -pub enum OnBlockResponse { - /// Nothing to do. - Nothing, - /// Perform block request. - SendBlockRequest { peer_id: PeerId, request: BlockRequest }, - /// Import blocks. - ImportBlocks(ImportBlocksAction), - /// Import justifications. - ImportJustifications(ImportJustificationsAction), - /// Invalid block response, the peer should be disconnected and reported. - DisconnectPeer(BadPeer), -} - -/// Action that the parent of [`ChainSync`] should perform after reporting state response with -/// [`ChainSync::on_state_response`]. -pub enum OnStateResponse { - /// Nothing to do. - Nothing, - /// Import blocks. - ImportBlocks(ImportBlocksAction), - /// Invalid state response, the peer should be disconnected and reported. - DisconnectPeer(BadPeer), -} - /// The main data structure which contains all the state for a chains /// active syncing strategy. pub struct ChainSync { @@ -313,6 +253,8 @@ pub struct ChainSync { import_existing: bool, /// Gap download process. gap_sync: Option>, + /// Pending actions. + actions: Vec>, } /// All the data we have about a Peer that we are trying to sync with @@ -427,6 +369,7 @@ where gap_sync: None, warp_sync_config, warp_sync_target_block_header: None, + actions: Vec::new(), }; sync.reset_sync_start_point()?; @@ -509,8 +452,17 @@ where } /// Notify syncing state machine that a new sync peer has connected. + pub fn new_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { + match self.new_peer_inner(peer_id, best_hash, best_number) { + Ok(Some(request)) => + self.actions.push(ChainSyncAction::SendBlockRequest { peer_id, request }), + Ok(None) => {}, + Err(bad_peer) => self.actions.push(ChainSyncAction::DropPeer(bad_peer)), + } + } + #[must_use] - pub fn new_peer( + fn new_peer_inner( &mut self, peer_id: PeerId, best_hash: B::Hash, @@ -727,7 +679,7 @@ where peer_id: &PeerId, request: Option>, response: BlockResponse, - ) -> Result, BadPeer> { + ) -> Result<(), BadPeer> { self.downloaded_blocks += response.blocks.len(); let mut gap = false; let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(peer_id) { @@ -892,10 +844,12 @@ where start: *start, state: next_state, }; - return Ok(OnBlockData::Request( - *peer_id, - ancestry_request::(next_num), - )) + let request = ancestry_request::(next_num); + self.actions.push(ChainSyncAction::SendBlockRequest { + peer_id: *peer_id, + request, + }); + return Ok(()) } else { // Ancestry search is complete. Check if peer is on a stale fork unknown // to us and add it to sync targets if necessary. @@ -929,7 +883,7 @@ where .insert(*peer_id); } peer.state = PeerSyncState::Available; - Vec::new() + return Ok(()) } }, PeerSyncState::DownloadingWarpTargetBlock => { @@ -940,8 +894,7 @@ where match warp_sync.import_target_block( blocks.pop().expect("`blocks` len checked above."), ) { - warp::TargetBlockImportResult::Success => - return Ok(OnBlockData::Continue), + warp::TargetBlockImportResult::Success => return Ok(()), warp::TargetBlockImportResult::BadResponse => return Err(BadPeer(*peer_id, rep::VERIFICATION_FAIL)), } @@ -963,7 +916,7 @@ where "Logic error: we think we are downloading warp target block from {}, but no warp sync is happening.", peer_id, ); - return Ok(OnBlockData::Continue) + return Ok(()) } }, PeerSyncState::Available | @@ -1000,7 +953,9 @@ where return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) }; - Ok(OnBlockData::Import(self.validate_and_queue_blocks(new_blocks, gap))) + self.validate_and_queue_blocks(new_blocks, gap); + + Ok(()) } /// Submit a justification response for processing. @@ -1009,7 +964,7 @@ where &mut self, peer_id: PeerId, response: BlockResponse, - ) -> Result, BadPeer> { + ) -> Result<(), BadPeer> { let peer = if let Some(peer) = self.peers.get_mut(&peer_id) { peer } else { @@ -1017,7 +972,7 @@ where target: LOG_TARGET, "💔 Called on_block_justification with a peer ID of an unknown peer", ); - return Ok(OnBlockJustification::Nothing) + return Ok(()) }; self.allowed_requests.add(&peer_id); @@ -1054,11 +1009,17 @@ where if let Some((peer_id, hash, number, justifications)) = self.extra_justifications.on_response(peer_id, justification) { - return Ok(OnBlockJustification::Import { peer_id, hash, number, justifications }) + self.actions.push(ChainSyncAction::ImportJustifications { + peer_id, + hash, + number, + justifications, + }); + return Ok(()) } } - Ok(OnBlockJustification::Nothing) + Ok(()) } /// Report a justification import (successful or not). @@ -1196,8 +1157,7 @@ where } /// Notify that a sync peer has disconnected. - #[must_use] - pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Option> { + pub fn peer_disconnected(&mut self, peer_id: &PeerId) { self.blocks.clear_peer_download(peer_id); if let Some(gap_sync) = &mut self.gap_sync { gap_sync.blocks.clear_peer_download(peer_id) @@ -1212,7 +1172,9 @@ where let blocks = self.ready_blocks(); - (!blocks.is_empty()).then(|| self.validate_and_queue_blocks(blocks, false)) + if !blocks.is_empty() { + self.validate_and_queue_blocks(blocks, false); + } } /// Get prometheus metrics. @@ -1259,11 +1221,7 @@ where } } - fn validate_and_queue_blocks( - &mut self, - mut new_blocks: Vec>, - gap: bool, - ) -> ImportBlocksAction { + fn validate_and_queue_blocks(&mut self, mut new_blocks: Vec>, gap: bool) { let orig_len = new_blocks.len(); new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); if new_blocks.len() != orig_len { @@ -1295,7 +1253,7 @@ where } self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); - ImportBlocksAction { origin, blocks: new_blocks } + self.actions.push(ChainSyncAction::ImportBlocks { origin, blocks: new_blocks }) } fn update_peer_common_number(&mut self, peer_id: &PeerId, new_common: NumberFor) { @@ -1346,7 +1304,7 @@ where /// Restart the sync process. This will reset all pending block requests and return an iterator /// of new block requests to make to peers. Peers that were downloading finality data (i.e. /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. - fn restart(&mut self) -> impl Iterator, BadPeer>> + '_ { + fn restart(&mut self) { self.blocks.clear(); if let Err(e) = self.reset_sync_start_point() { warn!(target: LOG_TARGET, "💔 Unable to restart sync: {e}"); @@ -1360,7 +1318,7 @@ where ); let old_peers = std::mem::take(&mut self.peers); - old_peers.into_iter().filter_map(move |(peer_id, mut p)| { + old_peers.into_iter().for_each(|(peer_id, mut p)| { // peers that were downloading justifications // should be kept in that state. if let PeerSyncState::DownloadingJustification(_) = p.state { @@ -1374,19 +1332,21 @@ where ); p.common_number = self.best_queued_number; self.peers.insert(peer_id, p); - return None + return } // handle peers that were in other states. - match self.new_peer(peer_id, p.best_hash, p.best_number) { + let action = match self.new_peer_inner(peer_id, p.best_hash, p.best_number) { // since the request is not a justification, remove it from pending responses - Ok(None) => Some(Ok(BlockRequestAction::RemoveStale { peer_id })), + Ok(None) => ChainSyncAction::CancelBlockRequest { peer_id }, // update the request if the new one is available - Ok(Some(request)) => Some(Ok(BlockRequestAction::SendRequest { peer_id, request })), + Ok(Some(request)) => ChainSyncAction::SendBlockRequest { peer_id, request }, // this implies that we need to drop pending response from the peer - Err(e) => Some(Err(e)), - } - }) + Err(bad_peer) => ChainSyncAction::DropPeer(bad_peer), + }; + + self.actions.push(action); + }); } /// Find a block to start sync from. If we sync with state, that's the latest block we have @@ -1464,11 +1424,6 @@ where .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) } - /// Check if the peer is known to the sync state machine. Used for sanity checks. - pub fn is_peer_known(&self, peer_id: &PeerId) -> bool { - self.peers.contains_key(peer_id) - } - /// Get the set of downloaded blocks that are ready to be queued for import. fn ready_blocks(&mut self) -> Vec> { self.blocks @@ -1534,13 +1489,12 @@ where } /// Submit blocks received in a response. - #[must_use] pub fn on_block_response( &mut self, peer_id: PeerId, request: BlockRequest, blocks: Vec>, - ) -> OnBlockResponse { + ) { let block_response = BlockResponse:: { id: request.id, blocks }; let blocks_range = || match ( @@ -1563,46 +1517,26 @@ where blocks_range(), ); - if request.fields == BlockAttributes::JUSTIFICATION { - match self.on_block_justification(peer_id, block_response) { - Ok(OnBlockJustification::Nothing) => OnBlockResponse::Nothing, - Ok(OnBlockJustification::Import { peer_id, hash, number, justifications }) => - OnBlockResponse::ImportJustifications(ImportJustificationsAction { - peer_id, - hash, - number, - justifications, - }), - Err(bad_peer) => OnBlockResponse::DisconnectPeer(bad_peer), - } + let res = if request.fields == BlockAttributes::JUSTIFICATION { + self.on_block_justification(peer_id, block_response) } else { - match self.on_block_data(&peer_id, Some(request), block_response) { - Ok(OnBlockData::Import(action)) => OnBlockResponse::ImportBlocks(action), - Ok(OnBlockData::Request(peer_id, request)) => - OnBlockResponse::SendBlockRequest { peer_id, request }, - Ok(OnBlockData::Continue) => OnBlockResponse::Nothing, - Err(bad_peer) => OnBlockResponse::DisconnectPeer(bad_peer), - } + self.on_block_data(&peer_id, Some(request), block_response) + }; + + if let Err(bad_peer) = res { + self.actions.push(ChainSyncAction::DropPeer(bad_peer)); } } /// Submit a state received in a response. - #[must_use] - pub fn on_state_response( - &mut self, - peer_id: PeerId, - response: OpaqueStateResponse, - ) -> OnStateResponse { - match self.on_state_data(&peer_id, response) { - Ok(OnStateData::Import(origin, block)) => - OnStateResponse::ImportBlocks(ImportBlocksAction { origin, blocks: vec![block] }), - Ok(OnStateData::Continue) => OnStateResponse::Nothing, - Err(bad_peer) => OnStateResponse::DisconnectPeer(bad_peer), + pub fn on_state_response(&mut self, peer_id: PeerId, response: OpaqueStateResponse) { + if let Err(bad_peer) = self.on_state_data(&peer_id, response) { + self.actions.push(ChainSyncAction::DropPeer(bad_peer)); } } /// Get justification requests scheduled by sync to be sent out. - pub fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { + fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { let peers = &mut self.peers; let mut matcher = self.extra_justifications.matcher(); std::iter::from_fn(move || { @@ -1629,7 +1563,7 @@ where } /// Get block requests scheduled by sync to be sent out. - pub fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { + fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { if self.mode == SyncMode::Warp { return self .warp_target_block_request() @@ -1756,7 +1690,7 @@ where } /// Get a state request scheduled by sync to be sent out (if any). - pub fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { + fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { if self.allowed_requests.is_empty() { return None } @@ -1802,7 +1736,7 @@ where } /// Get a warp proof request scheduled by sync to be sent out (if any). - pub fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { + fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { if let Some(sync) = &self.warp_sync { if self.allowed_requests.is_empty() || sync.is_complete() || @@ -1833,11 +1767,12 @@ where None } + #[must_use] fn on_state_data( &mut self, peer_id: &PeerId, response: OpaqueStateResponse, - ) -> Result, BadPeer> { + ) -> Result<(), BadPeer> { let response: Box = response.0.downcast().map_err(|_error| { error!( target: LOG_TARGET, @@ -1892,9 +1827,10 @@ where state: Some(state), }; debug!(target: LOG_TARGET, "State download is complete. Import is queued"); - Ok(OnStateData::Import(origin, block)) + self.actions.push(ChainSyncAction::ImportBlocks { origin, blocks: vec![block] }); + Ok(()) }, - ImportResult::Continue => Ok(OnStateData::Continue), + ImportResult::Continue => Ok(()), ImportResult::BadResponse => { debug!(target: LOG_TARGET, "Bad state data received from {peer_id}"); Err(BadPeer(*peer_id, rep::BAD_BLOCK)) @@ -1903,12 +1839,7 @@ where } /// Submit a warp proof response received. - #[must_use] - pub fn on_warp_sync_response( - &mut self, - peer_id: &PeerId, - response: EncodedProof, - ) -> Result<(), BadPeer> { + pub fn on_warp_sync_response(&mut self, peer_id: &PeerId, response: EncodedProof) { if let Some(peer) = self.peers.get_mut(peer_id) { if let PeerSyncState::DownloadingWarpProof = peer.state { peer.state = PeerSyncState::Available; @@ -1925,14 +1856,16 @@ where sync.import_warp_proof(response) } else { debug!(target: LOG_TARGET, "Ignored obsolete warp sync response from {peer_id}"); - return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + self.actions + .push(ChainSyncAction::DropPeer(BadPeer(*peer_id, rep::NOT_REQUESTED))); + return }; match import_result { - WarpProofImportResult::Success => Ok(()), + WarpProofImportResult::Success => {}, WarpProofImportResult::BadResponse => { debug!(target: LOG_TARGET, "Bad proof data received from {peer_id}"); - Err(BadPeer(*peer_id, rep::BAD_BLOCK)) + self.actions.push(ChainSyncAction::DropPeer(BadPeer(*peer_id, rep::BAD_BLOCK))); }, } } @@ -1942,17 +1875,14 @@ where /// Call this when a batch of blocks have been processed by the import /// queue, with or without errors. If an error is returned, the pending response /// from the peer must be dropped. - #[must_use] pub fn on_blocks_processed( &mut self, imported: usize, count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) -> Box, BadPeer>>> { + ) { trace!(target: LOG_TARGET, "Imported {imported} of {count}"); - let mut output = Vec::new(); - let mut has_error = false; for (_, hash) in &results { self.queue_blocks.remove(hash); @@ -1993,7 +1923,10 @@ where if aux.bad_justification { if let Some(ref peer) = peer_id { warn!("💔 Sent block with bad justification to import"); - output.push(Err(BadPeer(*peer, rep::BAD_JUSTIFICATION))); + self.actions.push(ChainSyncAction::DropPeer(BadPeer( + *peer, + rep::BAD_JUSTIFICATION, + ))); } } @@ -2010,7 +1943,7 @@ where ); self.state_sync = None; self.mode = SyncMode::Full; - output.extend(self.restart()); + self.restart(); } let warp_sync_complete = self .warp_sync @@ -2024,7 +1957,7 @@ where ); self.warp_sync = None; self.mode = SyncMode::Full; - output.extend(self.restart()); + self.restart(); } let gap_sync_complete = self.gap_sync.as_ref().map_or(false, |s| s.target == number); @@ -2042,8 +1975,9 @@ where target: LOG_TARGET, "💔 Peer sent block with incomplete header to import", ); - output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); - output.extend(self.restart()); + self.actions + .push(ChainSyncAction::DropPeer(BadPeer(peer, rep::INCOMPLETE_HEADER))); + self.restart(); }, Err(BlockImportError::VerificationFailed(peer_id, e)) => { let extra_message = peer_id @@ -2055,10 +1989,11 @@ where ); if let Some(peer) = peer_id { - output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); + self.actions + .push(ChainSyncAction::DropPeer(BadPeer(peer, rep::VERIFICATION_FAIL))); } - output.extend(self.restart()); + self.restart(); }, Err(BlockImportError::BadBlock(peer_id)) => if let Some(peer) = peer_id { @@ -2066,7 +2001,7 @@ where target: LOG_TARGET, "💔 Block {hash:?} received from peer {peer} has been blacklisted", ); - output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); + self.actions.push(ChainSyncAction::DropPeer(BadPeer(peer, rep::BAD_BLOCK))); }, Err(BlockImportError::MissingState) => { // This may happen if the chain we were requesting upon has been discarded @@ -2078,14 +2013,50 @@ where warn!(target: LOG_TARGET, "💔 Error importing block {hash:?}: {}", e.unwrap_err()); self.state_sync = None; self.warp_sync = None; - output.extend(self.restart()); + self.restart(); }, Err(BlockImportError::Cancelled) => {}, }; } self.allowed_requests.set_all(); - Box::new(output.into_iter()) + } + + /// Get pending actions to perform. + #[must_use] + pub fn actions(&mut self) -> impl Iterator> { + let block_requests = self + .block_requests() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendBlockRequest { peer_id, request }); + self.actions.extend(block_requests); + + let justification_requests = self + .justification_requests() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendBlockRequest { peer_id, request }); + self.actions.extend(justification_requests); + + let state_request = self + .state_request() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendStateRequest { peer_id, request }); + self.actions.extend(state_request); + + let warp_proof_request = self + .warp_sync_request() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendWarpProofRequest { peer_id, request }); + self.actions.extend(warp_proof_request); + + std::mem::take(&mut self.actions).into_iter() + } + + /// A version of `actions()` that doesn't schedule extra requests. For testing only. + #[cfg(test)] + #[must_use] + fn take_actions(&mut self) -> impl Iterator> { + std::mem::take(&mut self.actions).into_iter() } } diff --git a/substrate/client/network/sync/src/chain_sync/test.rs b/substrate/client/network/sync/src/chain_sync/test.rs index 2eefd2ad13ef8c95cb41426fb74ebc73c939818b..15b2a95a07c8739b3dca5d2e5c2e9eb89ccf50ef 100644 --- a/substrate/client/network/sync/src/chain_sync/test.rs +++ b/substrate/client/network/sync/src/chain_sync/test.rs @@ -53,7 +53,7 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { }; // add a new peer with the same best block - sync.new_peer(peer_id, a1_hash, a1_number).unwrap(); + sync.new_peer(peer_id, a1_hash, a1_number); // and request a justification for the block sync.request_justification(&a1_hash, a1_number); @@ -74,10 +74,8 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { // if the peer replies with an empty response (i.e. it doesn't know the block), // the active request should be cleared. - assert_eq!( - sync.on_block_justification(peer_id, BlockResponse:: { id: 0, blocks: vec![] }), - Ok(OnBlockJustification::Nothing), - ); + sync.on_block_justification(peer_id, BlockResponse:: { id: 0, blocks: vec![] }) + .unwrap(); // there should be no in-flight requests assert_eq!(sync.extra_justifications.active_requests().count(), 0); @@ -119,8 +117,8 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { let (b1_hash, b1_number) = new_blocks(50); // add 2 peers at blocks that we don't have locally - sync.new_peer(peer_id1, Hash::random(), 42).unwrap(); - sync.new_peer(peer_id2, Hash::random(), 10).unwrap(); + sync.new_peer(peer_id1, Hash::random(), 42); + sync.new_peer(peer_id2, Hash::random(), 10); // we wil send block requests to these peers // for these blocks we don't know about @@ -130,7 +128,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { .all(|(p, _)| { p == peer_id1 || p == peer_id2 })); // add a new peer at a known block - sync.new_peer(peer_id3, b1_hash, b1_number).unwrap(); + sync.new_peer(peer_id3, b1_hash, b1_number); // we request a justification for a block we have locally sync.request_justification(&b1_hash, b1_number); @@ -148,14 +146,19 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { PeerSyncState::DownloadingJustification(b1_hash), ); + // clear old actions + let _ = sync.take_actions(); + // we restart the sync state - let block_requests = sync.restart(); + sync.restart(); + let actions = sync.take_actions().collect::>(); // which should make us send out block requests to the first two peers - assert!(block_requests.map(|r| r.unwrap()).all(|event| match event { - BlockRequestAction::SendRequest { peer_id, .. } => - peer_id == peer_id1 || peer_id == peer_id2, - BlockRequestAction::RemoveStale { .. } => false, + assert_eq!(actions.len(), 2); + assert!(actions.iter().all(|action| match action { + ChainSyncAction::SendBlockRequest { peer_id, .. } => + peer_id == &peer_id1 || peer_id == &peer_id2, + _ => false, })); // peer 3 should be unaffected it was downloading finality data @@ -166,7 +169,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { // Set common block to something that we don't have (e.g. failed import) sync.peers.get_mut(&peer_id3).unwrap().common_number = 100; - let _ = sync.restart().count(); + sync.restart(); assert_eq!(sync.peers.get(&peer_id3).unwrap().common_number, 50); } @@ -280,9 +283,8 @@ fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { let best_block = blocks.last().unwrap().clone(); let max_blocks_to_request = sync.max_blocks_per_request; // Connect the node we will sync from - sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) - .unwrap(); - sync.new_peer(peer_id2, info.best_hash, 0).unwrap(); + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()); + sync.new_peer(peer_id2, info.best_hash, 0); let mut best_block_num = 0; while best_block_num < MAX_DOWNLOAD_AHEAD { @@ -300,11 +302,17 @@ fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { let response = create_block_response(resp_blocks.clone()); - let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + // Clear old actions to not deal with them + let _ = sync.take_actions(); + + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == max_blocks_to_request as usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == max_blocks_to_request as usize, + )); best_block_num += max_blocks_to_request as u32; @@ -356,11 +364,14 @@ fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { assert_eq!(FromBlock::Number(best_block_num as u64), peer2_req.from); let response = create_block_response(vec![blocks[(best_block_num - 1) as usize].clone()]); - let res = sync.on_block_data(&peer_id2, Some(peer2_req), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty() - ),); + + // Clear old actions to not deal with them + let _ = sync.take_actions(); + + sync.on_block_data(&peer_id2, Some(peer2_req), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert!(actions.is_empty()); let peer1_from = unwrap_from_block_number(peer1_req.unwrap().from); @@ -421,25 +432,34 @@ fn can_sync_huge_fork() { let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); let mut request = get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); + // Discard old actions we are not interested in + let _ = sync.take_actions(); + // Do the ancestor search loop { let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; let response = create_block_response(vec![block.clone()]); - let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - request = if let OnBlockData::Request(_peer, request) = on_block_data { - request - } else { + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + + request = if actions.is_empty() { // We found the ancenstor break + } else { + assert_eq!(actions.len(), 1); + match &actions[0] { + ChainSyncAction::SendBlockRequest { peer_id: _, request } => request.clone(), + action @ _ => panic!("Unexpected action: {action:?}"), + } }; log::trace!(target: LOG_TARGET, "Request: {request:?}"); @@ -463,15 +483,18 @@ fn can_sync_huge_fork() { let response = create_block_response(resp_blocks.clone()); - let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == sync.max_blocks_per_request as usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == sync.max_blocks_per_request as usize + )); best_block_num += sync.max_blocks_per_request as u32; - let _ = sync.on_blocks_processed( + sync.on_blocks_processed( max_blocks_to_request as usize, max_blocks_to_request as usize, resp_blocks @@ -490,6 +513,9 @@ fn can_sync_huge_fork() { .collect(), ); + // Discard pending actions + let _ = sync.take_actions(); + resp_blocks .into_iter() .rev() @@ -539,25 +565,34 @@ fn syncs_fork_without_duplicate_requests() { let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); let mut request = get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); + // Discard pending actions + let _ = sync.take_actions(); + // Do the ancestor search loop { let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; let response = create_block_response(vec![block.clone()]); - let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - request = if let OnBlockData::Request(_peer, request) = on_block_data { - request - } else { + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + + request = if actions.is_empty() { // We found the ancenstor break + } else { + assert_eq!(actions.len(), 1); + match &actions[0] { + ChainSyncAction::SendBlockRequest { peer_id: _, request } => request.clone(), + action @ _ => panic!("Unexpected action: {action:?}"), + } }; log::trace!(target: LOG_TARGET, "Request: {request:?}"); @@ -582,11 +617,17 @@ fn syncs_fork_without_duplicate_requests() { let response = create_block_response(resp_blocks.clone()); - let res = sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); + // Discard old actions + let _ = sync.take_actions(); + + sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == max_blocks_to_request as usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == max_blocks_to_request as usize + )); best_block_num += max_blocks_to_request as u32; @@ -653,8 +694,7 @@ fn removes_target_fork_on_disconnect() { let peer_id1 = PeerId::random(); let common_block = blocks[1].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); // Create a "new" header and announce it let mut header = blocks[0].header().clone(); @@ -678,8 +718,7 @@ fn can_import_response_with_missing_blocks() { let peer_id1 = PeerId::random(); let best_block = blocks[3].clone(); - sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()); sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available; sync.peers.get_mut(&peer_id1).unwrap().common_number = 0; @@ -730,7 +769,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { let (b1_hash, b1_number) = new_blocks(50); // add new peer and request blocks from them - sync.new_peer(peers[0], Hash::random(), 42).unwrap(); + sync.new_peer(peers[0], Hash::random(), 42); // we don't actually perform any requests, just keep track of peers waiting for a response let mut pending_responses = HashSet::new(); @@ -743,7 +782,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { } // add a new peer at a known block - sync.new_peer(peers[1], b1_hash, b1_number).unwrap(); + sync.new_peer(peers[1], b1_hash, b1_number); // we request a justification for a block we have locally sync.request_justification(&b1_hash, b1_number); @@ -766,24 +805,29 @@ fn sync_restart_removes_block_but_not_justification_requests() { ); assert_eq!(pending_responses.len(), 2); + // discard old actions + let _ = sync.take_actions(); + // restart sync - let request_events = sync.restart().collect::>(); - for event in request_events.iter() { - match event.as_ref().unwrap() { - BlockRequestAction::RemoveStale { peer_id } => { + sync.restart(); + let actions = sync.take_actions().collect::>(); + for action in actions.iter() { + match action { + ChainSyncAction::CancelBlockRequest { peer_id } => { pending_responses.remove(&peer_id); }, - BlockRequestAction::SendRequest { peer_id, .. } => { + ChainSyncAction::SendBlockRequest { peer_id, .. } => { // we drop obsolete response, but don't register a new request, it's checked in // the `assert!` below pending_responses.remove(&peer_id); }, + action @ _ => panic!("Unexpected action: {action:?}"), } } - assert!(request_events.iter().any(|event| { - match event.as_ref().unwrap() { - BlockRequestAction::RemoveStale { .. } => false, - BlockRequestAction::SendRequest { peer_id, .. } => peer_id == &peers[0], + assert!(actions.iter().any(|action| { + match action { + ChainSyncAction::SendBlockRequest { peer_id, .. } => peer_id == &peers[0], + _ => false, } })); @@ -848,11 +892,9 @@ fn request_across_forks() { // Add the peers, all at the common ancestor 100. let common_block = blocks.last().unwrap(); let peer_id1 = PeerId::random(); - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); let peer_id2 = PeerId::random(); - sync.new_peer(peer_id2, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id2, common_block.hash(), *common_block.header().number()); // Peer 1 announces 107 from fork 1, 100-107 get downloaded. { @@ -864,11 +906,17 @@ fn request_across_forks() { let mut resp_blocks = fork_a_blocks[100_usize..107_usize].to_vec(); resp_blocks.reverse(); let response = create_block_response(resp_blocks.clone()); - let res = sync.on_block_data(&peer, Some(request), response).unwrap(); + + // Drop old actions + let _ = sync.take_actions(); + + sync.on_block_data(&peer, Some(request), response).unwrap(); + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 7_usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == 7_usize + )); assert_eq!(sync.best_queued_number, 107); assert_eq!(sync.best_queued_hash, block.hash()); assert!(sync.is_known(&block.header.parent_hash())); @@ -903,11 +951,17 @@ fn request_across_forks() { // block is announced. let request = get_block_request(&mut sync, FromBlock::Hash(block.hash()), 1, &peer); let response = create_block_response(vec![block.clone()]); - let res = sync.on_block_data(&peer, Some(request), response).unwrap(); + + // Drop old actions we are not going to check + let _ = sync.take_actions(); + + sync.on_block_data(&peer, Some(request), response).unwrap(); + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 1_usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == 1_usize + )); assert!(sync.is_known(&block.header.parent_hash())); } } diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 560887132e3a85a8fa89cb5ef6d1d97eee084798..d7b024cd801c71064b959c55c1dd18e23714db39 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -25,15 +25,12 @@ use crate::{ }, block_relay_protocol::{BlockDownloader, BlockResponseError}, block_request_handler::MAX_BLOCKS_IN_RESPONSE, - chain_sync::{ - BlockRequestAction, ChainSync, ImportBlocksAction, ImportJustificationsAction, - OnBlockResponse, OnStateResponse, - }, + chain_sync::{ChainSync, ChainSyncAction}, pending_responses::{PendingResponses, ResponseEvent}, schema::v1::{StateRequest, StateResponse}, service::{ self, - chain_sync::{SyncingService, ToServiceCommand}, + syncing_service::{SyncingService, ToServiceCommand}, }, types::{ BadPeer, ExtendedPeerInfo, OpaqueStateRequest, OpaqueStateResponse, PeerRequest, SyncEvent, @@ -41,7 +38,7 @@ use crate::{ warp::{EncodedProof, WarpProofRequest, WarpSyncParams}, }; -use codec::{Decode, Encode}; +use codec::{Decode, DecodeAll, Encode}; use futures::{ channel::oneshot, future::{BoxFuture, Fuse}, @@ -58,15 +55,18 @@ use schnellru::{ByLength, LruMap}; use tokio::time::{Interval, MissedTickBehavior}; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; -use sc_consensus::import_queue::ImportQueueService; +use sc_consensus::{import_queue::ImportQueueService, IncomingBlock}; use sc_network::{ config::{ FullNetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, }, + peer_store::{PeerStoreHandle, PeerStoreProvider}, request_responses::{IfDisconnected, RequestFailure}, + service::traits::{Direction, NotificationEvent, ValidationResult}, + types::ProtocolName, utils::LruHashSet, - NotificationsSink, ProtocolName, ReputationChange, + NotificationService, ReputationChange, }; use sc_network_common::{ role::Roles, @@ -74,8 +74,11 @@ use sc_network_common::{ }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::{Error as ClientError, HeaderMetadata}; -use sp_consensus::block_validation::BlockAnnounceValidator; -use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero}; +use sp_consensus::{block_validation::BlockAnnounceValidator, BlockOrigin}; +use sp_runtime::{ + traits::{Block as BlockT, Header, NumberFor, Zero}, + Justifications, +}; use std::{ collections::{HashMap, HashSet}, @@ -88,15 +91,15 @@ use std::{ time::{Duration, Instant}, }; -/// Log target for this file. -const LOG_TARGET: &'static str = "sync"; - /// Interval at which we perform time based maintenance const TICK_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(1100); /// Maximum number of known block hashes to keep for a peer. const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead +/// Logging target for the file. +const LOG_TARGET: &str = "sync"; + /// If the block announces stream to peer has been inactive for 30 seconds meaning local node /// has not sent or received block announcements to/from the peer, report the node for inactivity, /// disconnect it and attempt to establish connection to some other peer. @@ -226,8 +229,6 @@ pub struct Peer { pub info: ExtendedPeerInfo, /// Holds a set of blocks known to this peer. pub known_blocks: LruHashSet, - /// Notification sink. - sink: NotificationsSink, /// Is the peer inbound. inbound: bool, } @@ -252,9 +253,6 @@ pub struct SyncingEngine { /// Channel for receiving service commands service_rx: TracingUnboundedReceiver>, - /// Channel for receiving inbound connections from `Protocol`. - sync_events_rx: sc_utils::mpsc::TracingUnboundedReceiver>, - /// Assigned roles. roles: Roles, @@ -312,12 +310,18 @@ pub struct SyncingEngine { /// Prometheus metrics. metrics: Option, + /// Handle that is used to communicate with `sc_network::Notifications`. + notification_service: Box, + /// When the syncing was started. /// /// Stored as an `Option` so once the initial wait has passed, `SyncingEngine` /// can reset the peer timers and continue with the normal eviction process. syncing_started: Option, + /// Handle to `PeerStore`. + peer_store_handle: PeerStoreHandle, + /// Instant when the last notification was sent or received. last_notification_io: Instant, @@ -362,7 +366,7 @@ where block_downloader: Arc>, state_request_protocol_name: ProtocolName, warp_sync_protocol_name: Option, - sync_events_rx: sc_utils::mpsc::TracingUnboundedReceiver>, + peer_store_handle: PeerStoreHandle, ) -> Result<(Self, SyncingService, NonDefaultSetConfig), ClientError> { let mode = net_config.network_config.sync_mode; let max_parallel_downloads = net_config.network_config.max_parallel_downloads; @@ -387,7 +391,7 @@ where } for config in net_config.notification_protocols() { let peer_ids = config - .set_config + .set_config() .reserved_nodes .iter() .map(|info| info.peer_id) @@ -438,7 +442,7 @@ where let warp_sync_target_block_header_rx_fused = warp_sync_target_block_header_rx .map_or(futures::future::pending().boxed().fuse(), |rx| rx.boxed().fuse()); - let block_announce_config = Self::get_block_announce_proto_config( + let (block_announce_config, notification_service) = Self::get_block_announce_proto_config( protocol_id, fork_id, roles, @@ -450,7 +454,6 @@ where .flatten() .expect("Genesis block exists; qed"), ); - let block_announce_protocol_name = block_announce_config.notifications_protocol.clone(); let chain_sync = ChainSync::new( mode, @@ -460,6 +463,7 @@ where warp_sync_config, )?; + let block_announce_protocol_name = block_announce_config.protocol_name().clone(); let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync", 100_000); let num_connected = Arc::new(AtomicUsize::new(0)); let is_major_syncing = Arc::new(AtomicBool::new(false)); @@ -496,7 +500,6 @@ where num_connected: num_connected.clone(), is_major_syncing: is_major_syncing.clone(), service_rx, - sync_events_rx, genesis_hash, important_peers, default_peers_set_no_slot_connected_peers: HashSet::new(), @@ -508,8 +511,10 @@ where num_in_peers: 0usize, max_in_peers, event_streams: Vec::new(), + notification_service, tick_timeout, syncing_started: None, + peer_store_handle, last_notification_io: Instant::now(), metrics: if let Some(r) = metrics_registry { match Metrics::register(r, is_major_syncing.clone()) { @@ -673,23 +678,11 @@ where }; self.last_notification_io = Instant::now(); - peer.sink.send_sync_notification(message.encode()); + let _ = self.notification_service.send_sync_notification(peer_id, message.encode()); } } } - /// Inform sync about new best imported block. - pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { - log::debug!(target: LOG_TARGET, "New best block imported {hash:?}/#{number}"); - - self.chain_sync.update_chain_info(&hash, number); - self.network_service.set_notification_handshake( - self.block_announce_protocol_name.clone(), - BlockAnnouncesHandshake::::build(self.roles, number, hash, self.genesis_hash) - .encode(), - ) - } - pub async fn run(mut self) { self.syncing_started = Some(Instant::now()); @@ -698,8 +691,10 @@ where _ = self.tick_timeout.tick() => self.perform_periodic_actions(), command = self.service_rx.select_next_some() => self.process_service_command(command), - sync_event = self.sync_events_rx.select_next_some() => - self.process_sync_event(sync_event), + notification_event = self.notification_service.next_event() => match notification_event { + Some(event) => self.process_notification_event(event), + None => return, + }, warp_target_block_header = &mut self.warp_sync_target_block_header_rx_fused => self.pass_warp_sync_target_block_header(warp_target_block_header), response_event = self.pending_responses.select_next_some() => @@ -713,11 +708,82 @@ where self.is_major_syncing .store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed); - // Send outbound requests on `ChanSync`'s behalf. - self.send_chain_sync_requests(); + // Process actions requested by `ChainSync`. + self.process_chain_sync_actions(); } } + fn process_chain_sync_actions(&mut self) { + self.chain_sync.actions().for_each(|action| match action { + ChainSyncAction::SendBlockRequest { peer_id, request } => { + // Sending block request implies dropping obsolete pending response as we are not + // interested in it anymore (see [`ChainSyncAction::SendBlockRequest`]). + // Furthermore, only one request at a time is allowed to any peer. + let removed = self.pending_responses.remove(&peer_id); + self.send_block_request(peer_id, request.clone()); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::SendBlockRequest` to {} with {:?}, stale response removed: {}.", + peer_id, + request, + removed, + ) + }, + ChainSyncAction::CancelBlockRequest { peer_id } => { + let removed = self.pending_responses.remove(&peer_id); + + trace!(target: LOG_TARGET, "Processed {action:?}, response removed: {removed}."); + }, + ChainSyncAction::SendStateRequest { peer_id, request } => { + self.send_state_request(peer_id, request); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::SendBlockRequest` to {peer_id}.", + ); + }, + ChainSyncAction::SendWarpProofRequest { peer_id, request } => { + self.send_warp_proof_request(peer_id, request.clone()); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::SendWarpProofRequest` to {}, request: {:?}.", + peer_id, + request, + ); + }, + ChainSyncAction::DropPeer(BadPeer(peer_id, rep)) => { + self.pending_responses.remove(&peer_id); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(peer_id, rep); + + trace!(target: LOG_TARGET, "Processed {action:?}."); + }, + ChainSyncAction::ImportBlocks { origin, blocks } => { + let count = blocks.len(); + self.import_blocks(origin, blocks); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::ImportBlocks` with {count} blocks.", + ); + }, + ChainSyncAction::ImportJustifications { peer_id, hash, number, justifications } => { + self.import_justifications(peer_id, hash, number, justifications); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::ImportJustifications` from peer {} for block {} ({}).", + peer_id, + hash, + number, + ) + }, + }); + } + fn perform_periodic_actions(&mut self) { self.report_metrics(); @@ -766,28 +832,7 @@ where ToServiceCommand::ClearJustificationRequests => self.chain_sync.clear_justification_requests(), ToServiceCommand::BlocksProcessed(imported, count, results) => { - for result in self.chain_sync.on_blocks_processed(imported, count, results) { - match result { - Ok(action) => match action { - BlockRequestAction::SendRequest { peer_id, request } => { - // drop obsolete pending response first - self.pending_responses.remove(&peer_id); - self.send_block_request(peer_id, request); - }, - BlockRequestAction::RemoveStale { peer_id } => { - self.pending_responses.remove(&peer_id); - }, - }, - Err(BadPeer(peer_id, repu)) => { - self.pending_responses.remove(&peer_id); - self.network_service.disconnect_peer( - peer_id, - self.block_announce_protocol_name.clone(), - ); - self.network_service.report_peer(peer_id, repu) - }, - } - } + self.chain_sync.on_blocks_processed(imported, count, results); }, ToServiceCommand::JustificationImported(peer_id, hash, number, success) => { self.chain_sync.on_justification_import(hash, number, success); @@ -803,8 +848,20 @@ where } }, ToServiceCommand::AnnounceBlock(hash, data) => self.announce_block(hash, data), - ToServiceCommand::NewBestBlockImported(hash, number) => - self.new_best_block_imported(hash, number), + ToServiceCommand::NewBestBlockImported(hash, number) => { + log::debug!(target: "sync", "New best block imported {:?}/#{}", hash, number); + + self.chain_sync.update_chain_info(&hash, number); + let _ = self.notification_service.try_set_handshake( + BlockAnnouncesHandshake::::build( + self.roles, + number, + hash, + self.genesis_hash, + ) + .encode(), + ); + }, ToServiceCommand::Status(tx) => { let mut status = self.chain_sync.status(); status.num_connected_peers = self.peers.len() as u32; @@ -844,56 +901,60 @@ where } } - fn process_sync_event(&mut self, event: sc_network::SyncEvent) { + fn process_notification_event(&mut self, event: NotificationEvent) { match event { - sc_network::SyncEvent::NotificationStreamOpened { - remote, - received_handshake, - sink, - inbound, - tx, - } => match self.on_sync_peer_connected(remote, &received_handshake, sink, inbound) { - Ok(()) => { - let _ = tx.send(true); - }, - Err(()) => { - log::debug!( - target: LOG_TARGET, - "Failed to register peer {remote:?}: {received_handshake:?}", - ); - let _ = tx.send(false); - }, - }, - sc_network::SyncEvent::NotificationStreamClosed { remote } => { - if self.on_sync_peer_disconnected(remote).is_err() { - log::trace!( - target: LOG_TARGET, - "Disconnected peer which had earlier been refused by on_sync_peer_connected {}", - remote - ); - } + NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx } => { + let validation_result = self + .validate_connection(&peer, handshake, Direction::Inbound) + .map_or(ValidationResult::Reject, |_| ValidationResult::Accept); + + let _ = result_tx.send(validation_result); }, - sc_network::SyncEvent::NotificationsReceived { remote, messages } => { - for message in messages { - if self.peers.contains_key(&remote) { - if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) { - self.last_notification_io = Instant::now(); - self.push_block_announce_validation(remote, announce); - } else { - log::warn!(target: "sub-libp2p", "Failed to decode block announce"); + NotificationEvent::NotificationStreamOpened { peer, handshake, direction, .. } => { + log::debug!( + target: LOG_TARGET, + "Substream opened for {peer}, handshake {handshake:?}" + ); + + match self.validate_connection(&peer, handshake, direction) { + Ok(handshake) => { + if self.on_sync_peer_connected(peer, &handshake, direction).is_err() { + log::debug!(target: LOG_TARGET, "Failed to register peer {peer}"); + self.network_service + .disconnect_peer(peer, self.block_announce_protocol_name.clone()); } - } else { - log::trace!( - target: LOG_TARGET, - "Received sync for peer earlier refused by sync layer: {remote}", - ); - } + }, + Err(wrong_genesis) => { + log::debug!(target: LOG_TARGET, "`SyncingEngine` rejected {peer}"); + + if wrong_genesis { + self.peer_store_handle.report_peer(peer, rep::GENESIS_MISMATCH); + } + + self.network_service + .disconnect_peer(peer, self.block_announce_protocol_name.clone()); + }, } }, - sc_network::SyncEvent::NotificationSinkReplaced { remote, sink } => { - if let Some(peer) = self.peers.get_mut(&remote) { - peer.sink = sink; + NotificationEvent::NotificationStreamClosed { peer } => { + self.on_sync_peer_disconnected(peer); + }, + NotificationEvent::NotificationReceived { peer, notification } => { + if !self.peers.contains_key(&peer) { + log::error!( + target: LOG_TARGET, + "received notification from {peer} who had been earlier refused by `SyncingEngine`", + ); + return } + + let Ok(announce) = BlockAnnounce::decode(&mut notification.as_ref()) else { + log::warn!(target: LOG_TARGET, "failed to decode block announce"); + return + }; + + self.last_notification_io = Instant::now(); + self.push_block_announce_validation(peer, announce); }, } } @@ -915,131 +976,167 @@ where /// Called by peer when it is disconnecting. /// /// Returns a result if the handshake of this peer was indeed accepted. - fn on_sync_peer_disconnected(&mut self, peer_id: PeerId) -> Result<(), ()> { - if let Some(info) = self.peers.remove(&peer_id) { - if self.important_peers.contains(&peer_id) { - log::warn!(target: LOG_TARGET, "Reserved peer {peer_id} disconnected"); - } else { - log::debug!(target: LOG_TARGET, "{peer_id} disconnected"); - } - - if !self.default_peers_set_no_slot_connected_peers.remove(&peer_id) && - info.inbound && info.info.roles.is_full() - { - match self.num_in_peers.checked_sub(1) { - Some(value) => { - self.num_in_peers = value; - }, - None => { - log::error!( - target: LOG_TARGET, - "trying to disconnect an inbound node which is not counted as inbound" - ); - debug_assert!(false); - }, - } - } - - if let Some(import_blocks_action) = self.chain_sync.peer_disconnected(&peer_id) { - self.import_blocks(import_blocks_action) - } + fn on_sync_peer_disconnected(&mut self, peer_id: PeerId) { + let Some(info) = self.peers.remove(&peer_id) else { + log::debug!(target: LOG_TARGET, "{peer_id} does not exist in `SyncingEngine`"); + return + }; - self.pending_responses.remove(&peer_id); - self.event_streams.retain(|stream| { - stream.unbounded_send(SyncEvent::PeerDisconnected(peer_id)).is_ok() - }); - Ok(()) + if self.important_peers.contains(&peer_id) { + log::warn!(target: LOG_TARGET, "Reserved peer {peer_id} disconnected"); } else { - Err(()) + log::debug!(target: LOG_TARGET, "{peer_id} disconnected"); } - } - /// Called on the first connection between two peers on the default set, after their exchange - /// of handshake. - /// - /// Returns `Ok` if the handshake is accepted and the peer added to the list of peers we sync - /// from. - fn on_sync_peer_connected( - &mut self, - peer_id: PeerId, - status: &BlockAnnouncesHandshake, - sink: NotificationsSink, - inbound: bool, - ) -> Result<(), ()> { - log::trace!(target: LOG_TARGET, "New peer {peer_id} {status:?}"); - - if self.peers.contains_key(&peer_id) { - log::error!( - target: LOG_TARGET, - "Called on_sync_peer_connected with already connected peer {peer_id}", - ); - debug_assert!(false); - return Err(()) + if !self.default_peers_set_no_slot_connected_peers.remove(&peer_id) && + info.inbound && info.info.roles.is_full() + { + match self.num_in_peers.checked_sub(1) { + Some(value) => { + self.num_in_peers = value; + }, + None => { + log::error!( + target: LOG_TARGET, + "trying to disconnect an inbound node which is not counted as inbound" + ); + debug_assert!(false); + }, + } } - if status.genesis_hash != self.genesis_hash { - self.network_service.report_peer(peer_id, rep::GENESIS_MISMATCH); + self.chain_sync.peer_disconnected(&peer_id); + self.pending_responses.remove(&peer_id); + self.event_streams + .retain(|stream| stream.unbounded_send(SyncEvent::PeerDisconnected(peer_id)).is_ok()); + } + /// Validate received handshake. + fn validate_handshake( + &mut self, + peer_id: &PeerId, + handshake: Vec, + ) -> Result, bool> { + log::trace!(target: LOG_TARGET, "Validate handshake for {peer_id}"); + + let handshake = as DecodeAll>::decode_all(&mut &handshake[..]) + .map_err(|error| { + log::debug!(target: LOG_TARGET, "Failed to decode handshake for {peer_id}: {error:?}"); + false + })?; + + if handshake.genesis_hash != self.genesis_hash { if self.important_peers.contains(&peer_id) { log::error!( target: LOG_TARGET, - "Reserved peer id `{}` is on a different chain (our genesis: {} theirs: {})", - peer_id, + "Reserved peer id `{peer_id}` is on a different chain (our genesis: {} theirs: {})", self.genesis_hash, - status.genesis_hash, + handshake.genesis_hash, ); } else if self.boot_node_ids.contains(&peer_id) { log::error!( target: LOG_TARGET, - "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", - peer_id, + "Bootnode with peer id `{peer_id}` is on a different chain (our genesis: {} theirs: {})", self.genesis_hash, - status.genesis_hash, + handshake.genesis_hash, ); } else { log::debug!( target: LOG_TARGET, "Peer is on different chain (our genesis: {} theirs: {})", - self.genesis_hash, status.genesis_hash + self.genesis_hash, + handshake.genesis_hash ); } - return Err(()) + return Err(true) } - let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&peer_id); - let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; + Ok(handshake) + } - // make sure to accept no more than `--in-peers` many full nodes - if !no_slot_peer && - status.roles.is_full() && - inbound && self.num_in_peers == self.max_in_peers - { - log::debug!( + /// Validate connection. + // NOTE Returning `Err(bool)` is a really ugly hack to work around the issue + // that `ProtocolController` thinks the peer is connected when in fact it can + // still be under validation. If the peer has different genesis than the + // local node the validation fails but the peer cannot be reported in + // `validate_connection()` as that is also called by + // `ValiateInboundSubstream` which means that the peer is still being + // validated and banning the peer when handling that event would + // result in peer getting dropped twice. + // + // The proper way to fix this is to integrate `ProtocolController` more + // tightly with `NotificationService` or add an additional API call for + // banning pre-accepted peers (which is not desirable) + fn validate_connection( + &mut self, + peer_id: &PeerId, + handshake: Vec, + direction: Direction, + ) -> Result, bool> { + log::trace!(target: LOG_TARGET, "New peer {peer_id} {handshake:?}"); + + let handshake = self.validate_handshake(peer_id, handshake)?; + + if self.peers.contains_key(&peer_id) { + log::error!( target: LOG_TARGET, - "All inbound slots have been consumed, rejecting {peer_id}", + "Called `validate_connection()` with already connected peer {peer_id}", ); - return Err(()) + debug_assert!(false); + return Err(false) } - if status.roles.is_full() && + let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&peer_id); + let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; + + if handshake.roles.is_full() && self.chain_sync.num_peers() >= self.default_peers_set_num_full + self.default_peers_set_no_slot_connected_peers.len() + this_peer_reserved_slot { log::debug!(target: LOG_TARGET, "Too many full nodes, rejecting {peer_id}"); - return Err(()) + return Err(false) } - if status.roles.is_light() && + // make sure to accept no more than `--in-peers` many full nodes + if !no_slot_peer && + handshake.roles.is_full() && + direction.is_inbound() && + self.num_in_peers == self.max_in_peers + { + log::debug!(target: LOG_TARGET, "All inbound slots have been consumed, rejecting {peer_id}"); + return Err(false) + } + + // make sure that all slots are not occupied by light peers + // + // `ChainSync` only accepts full peers whereas `SyncingEngine` accepts both full and light + // peers. Verify that there is a slot in `SyncingEngine` for the inbound light peer + if handshake.roles.is_light() && (self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light { - // Make sure that not all slots are occupied by light clients. log::debug!(target: LOG_TARGET, "Too many light nodes, rejecting {peer_id}"); - return Err(()) + return Err(false) } + Ok(handshake) + } + + /// Called on the first connection between two peers on the default set, after their exchange + /// of handshake. + /// + /// Returns `Ok` if the handshake is accepted and the peer added to the list of peers we sync + /// from. + fn on_sync_peer_connected( + &mut self, + peer_id: PeerId, + status: &BlockAnnouncesHandshake, + direction: Direction, + ) -> Result<(), ()> { + log::trace!(target: LOG_TARGET, "New peer {peer_id} {status:?}"); + let peer = Peer { info: ExtendedPeerInfo { roles: status.roles, @@ -1049,62 +1146,30 @@ where known_blocks: LruHashSet::new( NonZeroUsize::new(MAX_KNOWN_BLOCKS).expect("Constant is nonzero"), ), - sink, - inbound, + inbound: direction.is_inbound(), }; - let req = if peer.info.roles.is_full() { - match self.chain_sync.new_peer(peer_id, peer.info.best_hash, peer.info.best_number) { - Ok(req) => req, - Err(BadPeer(id, repu)) => { - self.network_service.report_peer(id, repu); - return Err(()) - }, - } - } else { - None - }; + self.chain_sync.new_peer(peer_id, peer.info.best_hash, peer.info.best_number); log::debug!(target: LOG_TARGET, "Connected {peer_id}"); self.peers.insert(peer_id, peer); + self.peer_store_handle.set_peer_role(&peer_id, status.roles.into()); - if no_slot_peer { + if self.default_peers_set_no_slot_peers.contains(&peer_id) { self.default_peers_set_no_slot_connected_peers.insert(peer_id); - } else if inbound && status.roles.is_full() { + } else if direction.is_inbound() && status.roles.is_full() { self.num_in_peers += 1; } - if let Some(req) = req { - self.send_block_request(peer_id, req); - } - self.event_streams .retain(|stream| stream.unbounded_send(SyncEvent::PeerConnected(peer_id)).is_ok()); Ok(()) } - fn send_chain_sync_requests(&mut self) { - for (peer_id, request) in self.chain_sync.block_requests() { - self.send_block_request(peer_id, request); - } - - if let Some((peer_id, request)) = self.chain_sync.state_request() { - self.send_state_request(peer_id, request); - } - - for (peer_id, request) in self.chain_sync.justification_requests() { - self.send_block_request(peer_id, request); - } - - if let Some((peer_id, request)) = self.chain_sync.warp_sync_request() { - self.send_warp_sync_request(peer_id, request); - } - } - fn send_block_request(&mut self, peer_id: PeerId, request: BlockRequest) { - if !self.chain_sync.is_peer_known(&peer_id) { + if !self.peers.contains_key(&peer_id) { trace!(target: LOG_TARGET, "Cannot send block request to unknown peer {peer_id}"); debug_assert!(false); return @@ -1120,7 +1185,7 @@ where } fn send_state_request(&mut self, peer_id: PeerId, request: OpaqueStateRequest) { - if !self.chain_sync.is_peer_known(&peer_id) { + if !self.peers.contains_key(&peer_id) { trace!(target: LOG_TARGET, "Cannot send state request to unknown peer {peer_id}"); debug_assert!(false); return @@ -1149,8 +1214,8 @@ where } } - fn send_warp_sync_request(&mut self, peer_id: PeerId, request: WarpProofRequest) { - if !self.chain_sync.is_peer_known(&peer_id) { + fn send_warp_proof_request(&mut self, peer_id: PeerId, request: WarpProofRequest) { + if !self.peers.contains_key(&peer_id) { trace!(target: LOG_TARGET, "Cannot send warp proof request to unknown peer {peer_id}"); debug_assert!(false); return @@ -1202,22 +1267,7 @@ where PeerRequest::Block(req) => { match self.block_downloader.block_response_into_blocks(&req, resp) { Ok(blocks) => { - match self.chain_sync.on_block_response(peer_id, req, blocks) { - OnBlockResponse::SendBlockRequest { peer_id, request } => - self.send_block_request(peer_id, request), - OnBlockResponse::ImportBlocks(import_blocks_action) => - self.import_blocks(import_blocks_action), - OnBlockResponse::ImportJustifications(action) => - self.import_justifications(action), - OnBlockResponse::Nothing => {}, - OnBlockResponse::DisconnectPeer(BadPeer(peer_id, rep)) => { - self.network_service.disconnect_peer( - peer_id, - self.block_announce_protocol_name.clone(), - ); - self.network_service.report_peer(peer_id, rep); - }, - } + self.chain_sync.on_block_response(peer_id, req, blocks); }, Err(BlockResponseError::DecodeFailed(e)) => { debug!( @@ -1262,27 +1312,10 @@ where }, }; - match self.chain_sync.on_state_response(peer_id, response) { - OnStateResponse::ImportBlocks(import_blocks_action) => - self.import_blocks(import_blocks_action), - OnStateResponse::DisconnectPeer(BadPeer(peer_id, rep)) => { - self.network_service.disconnect_peer( - peer_id, - self.block_announce_protocol_name.clone(), - ); - self.network_service.report_peer(peer_id, rep); - }, - OnStateResponse::Nothing => {}, - } + self.chain_sync.on_state_response(peer_id, response); }, PeerRequest::WarpProof => { - if let Err(BadPeer(peer_id, rep)) = - self.chain_sync.on_warp_sync_response(&peer_id, EncodedProof(resp)) - { - self.network_service - .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(peer_id, rep); - } + self.chain_sync.on_warp_sync_response(&peer_id, EncodedProof(resp)); }, }, Ok(Err(e)) => { @@ -1349,7 +1382,7 @@ where best_number: NumberFor, best_hash: B::Hash, genesis_hash: B::Hash, - ) -> NonDefaultSetConfig { + ) -> (NonDefaultSetConfig, Box) { let block_announces_protocol = { let genesis_hash = genesis_hash.as_ref(); if let Some(ref fork_id) = fork_id { @@ -1363,14 +1396,11 @@ where } }; - NonDefaultSetConfig { - notifications_protocol: block_announces_protocol.into(), - fallback_names: iter::once( - format!("/{}/block-announces/1", protocol_id.as_ref()).into(), - ) - .collect(), - max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, - handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( + NonDefaultSetConfig::new( + block_announces_protocol.into(), + iter::once(format!("/{}/block-announces/1", protocol_id.as_ref()).into()).collect(), + MAX_BLOCK_ANNOUNCE_SIZE, + Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( roles, best_number, best_hash, @@ -1378,17 +1408,17 @@ where ))), // NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement // protocol is still hardcoded into the peerset. - set_config: SetConfig { + SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), non_reserved_mode: NonReservedPeerMode::Deny, }, - } + ) } /// Import blocks. - fn import_blocks(&mut self, ImportBlocksAction { origin, blocks }: ImportBlocksAction) { + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { if let Some(metrics) = &self.metrics { metrics.import_queue_blocks_submitted.inc(); } @@ -1397,13 +1427,17 @@ where } /// Import justifications. - fn import_justifications(&mut self, action: ImportJustificationsAction) { + fn import_justifications( + &mut self, + peer_id: PeerId, + hash: B::Hash, + number: NumberFor, + justifications: Justifications, + ) { if let Some(metrics) = &self.metrics { metrics.import_queue_justifications_submitted.inc(); } - let ImportJustificationsAction { peer_id, hash, number, justifications } = action; - self.import_queue.import_justifications(peer_id, hash, number, justifications); } } diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index c42b0601e659fb0d4a32f49cbb8fbcfe30a97dbe..1a7e773c95f7ad68f0831b4aeaf489f685996d7f 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -18,7 +18,7 @@ //! Blockchain syncing implementation in Substrate. -pub use service::chain_sync::SyncingService; +pub use service::syncing_service::SyncingService; pub use types::{SyncEvent, SyncEventStream, SyncState, SyncStatus, SyncStatusProvider}; mod block_announce_validator; diff --git a/substrate/client/network/sync/src/service/mock.rs b/substrate/client/network/sync/src/service/mock.rs index 885eb1f8da593e1315b0c9ac97842b7adb265810..6e307d8698444b78ccd12a08e8b7b41681b8ca1b 100644 --- a/substrate/client/network/sync/src/service/mock.rs +++ b/substrate/client/network/sync/src/service/mock.rs @@ -27,6 +27,7 @@ use sc_network::{ NetworkNotification, NetworkPeers, NetworkRequest, NetworkSyncForkRequest, NotificationSenderError, NotificationSenderT, ReputationChange, }; +use sc_network_common::role::ObservedRole; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::collections::HashSet; @@ -83,8 +84,9 @@ mockall::mock! { fn set_authorized_peers(&self, peers: HashSet); fn set_authorized_only(&self, reserved_only: bool); fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr); - fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange); - fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName); + fn report_peer(&self, peer_id: PeerId, cost_benefit: ReputationChange); + fn peer_reputation(&self, peer_id: &PeerId) -> i32; + fn disconnect_peer(&self, peer_id: PeerId, protocol: ProtocolName); fn accept_unreserved_peers(&self); fn deny_unreserved_peers(&self); fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String>; @@ -105,6 +107,7 @@ mockall::mock! { peers: Vec ) -> Result<(), String>; fn sync_num_connected(&self) -> usize; + fn peer_role(&self, peer_id: PeerId, handshake: Vec) -> Option; } #[async_trait::async_trait] diff --git a/substrate/client/network/sync/src/service/mod.rs b/substrate/client/network/sync/src/service/mod.rs index 18331d63ed29f76f383c928107b6753d7ca25d16..d045af26e70dea6da2d33ee6dcf10a6e50d44b55 100644 --- a/substrate/client/network/sync/src/service/mod.rs +++ b/substrate/client/network/sync/src/service/mod.rs @@ -16,8 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! `ChainSync`-related service code +//! `SyncingEngine`-related service code -pub mod chain_sync; pub mod mock; pub mod network; +pub mod syncing_service; diff --git a/substrate/client/network/sync/src/service/chain_sync.rs b/substrate/client/network/sync/src/service/syncing_service.rs similarity index 98% rename from substrate/client/network/sync/src/service/chain_sync.rs rename to substrate/client/network/sync/src/service/syncing_service.rs index 3d11880c511c215dd9a5b46dce10810ba4a4d69d..92d649d65dc3a5e5fac70c5a1cc16ed4c825d400 100644 --- a/substrate/client/network/sync/src/service/chain_sync.rs +++ b/substrate/client/network/sync/src/service/syncing_service.rs @@ -34,7 +34,7 @@ use std::{ }, }; -/// Commands send to `ChainSync` +/// Commands send to `SyncingEngine` pub enum ToServiceCommand { SetSyncForkRequest(Vec, B::Hash, NumberFor), RequestJustification(B::Hash, NumberFor), @@ -63,7 +63,7 @@ pub enum ToServiceCommand { // }, } -/// Handle for communicating with `ChainSync` asynchronously +/// Handle for communicating with `SyncingEngine` asynchronously #[derive(Clone)] pub struct SyncingService { tx: TracingUnboundedSender>, @@ -148,7 +148,7 @@ impl SyncingService { /// Get sync status /// - /// Returns an error if `ChainSync` has terminated. + /// Returns an error if `SyncingEngine` has terminated. pub async fn status(&self) -> Result, ()> { let (tx, rx) = oneshot::channel(); let _ = self.tx.unbounded_send(ToServiceCommand::Status(tx)); diff --git a/substrate/client/network/sync/src/warp.rs b/substrate/client/network/sync/src/warp.rs index 2c0adc856c126568ca80181e6807090f35544341..169b3de35aa1bfe5f11098680b9243bc3213d263 100644 --- a/substrate/client/network/sync/src/warp.rs +++ b/substrate/client/network/sync/src/warp.rs @@ -42,7 +42,7 @@ const LOG_TARGET: &'static str = "sync"; pub struct EncodedProof(pub Vec); /// Warp sync request -#[derive(Encode, Decode, Debug)] +#[derive(Encode, Decode, Debug, Clone)] pub struct WarpProofRequest { /// Start collecting proofs from this block. pub begin: B::Hash, diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index 09f8f1fa9efbffd904c05c414ea9069c8410ebd2..dced6ed673057deb6959eb4b6fb001cff5be301a 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -9,15 +9,18 @@ publish = false homepage = "https://substrate.io" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] tokio = "1.22.0" -async-trait = "0.1.57" +async-trait = "0.1.74" futures = "0.3.21" futures-timer = "3.0.1" -libp2p = "0.51.3" +libp2p = "0.51.4" log = "0.4.17" parking_lot = "0.12.1" rand = "0.8.5" @@ -29,7 +32,7 @@ sc-network-common = { path = "../common" } sc-utils = { path = "../../utils" } sc-network-light = { path = "../light" } sc-network-sync = { path = "../sync" } -sc-service = { path = "../../service", default-features = false, features = ["test-helpers"]} +sc-service = { path = "../../service", default-features = false, features = ["test-helpers"] } sp-blockchain = { path = "../../../primitives/blockchain" } sp-consensus = { path = "../../../primitives/consensus/common" } sp-core = { path = "../../../primitives/core" } diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index f869e3a171a3c23979b17767903fb748a9c5f467..71f13b74a5328e391e05f3f8187228c8da697444 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -58,13 +58,13 @@ use sc_network::{ request_responses::ProtocolConfig as RequestResponseConfig, types::ProtocolName, Multiaddr, NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest, - NetworkWorker, + NetworkWorker, NotificationService, }; use sc_network_common::role::Roles; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, - service::{chain_sync::SyncingService, network::NetworkServiceProvider}, + service::{network::NetworkServiceProvider, syncing_service::SyncingService}, state_request_handler::StateRequestHandler, warp::{ AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncParams, WarpSyncProvider, @@ -239,6 +239,7 @@ pub struct Peer { imported_blocks_stream: Pin> + Send>>, finality_notification_stream: Pin> + Send>>, listen_addr: Multiaddr, + notification_services: HashMap>, } impl Peer @@ -263,8 +264,8 @@ where } /// Returns the number of peers we're connected to. - pub fn num_peers(&self) -> usize { - self.network.num_connected_peers() + pub async fn num_peers(&self) -> usize { + self.sync_service.status().await.unwrap().num_connected_peers as usize } /// Returns the number of downloaded blocks. @@ -502,10 +503,19 @@ where self.network.service() } + /// Get `SyncingService`. pub fn sync_service(&self) -> &Arc> { &self.sync_service } + /// Take notification handle for enabled protocol. + pub fn take_notification_service( + &mut self, + protocol: &ProtocolName, + ) -> Option> { + self.notification_services.remove(protocol) + } + /// Get a reference to the network worker. pub fn network(&self) -> &NetworkWorker::Hash> { &self.network @@ -778,6 +788,23 @@ pub trait TestNetFactory: Default + Sized + Send { network_config.transport = TransportConfig::MemoryOnly; network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; + + let (notif_configs, notif_handles): (Vec<_>, Vec<_>) = config + .notifications_protocols + .into_iter() + .map(|p| { + let (config, handle) = NonDefaultSetConfig::new( + p.clone(), + Vec::new(), + 1024 * 1024, + None, + Default::default(), + ); + + (config, (p, handle)) + }) + .unzip(); + if let Some(connect_to) = config.connect_to_peers { let addrs = connect_to .iter() @@ -849,11 +876,16 @@ pub trait TestNetFactory: Default + Sized + Send { protocol_config }; + let peer_store = PeerStore::new( + network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(), + ); + let peer_store_handle = peer_store.handle(); + self.spawn_task(peer_store.run().boxed()); + let block_announce_validator = config .block_announce_validator .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)); - let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000); let (engine, sync_service, block_announce_config) = sc_network_sync::engine::SyncingEngine::new( Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }), @@ -869,7 +901,7 @@ pub trait TestNetFactory: Default + Sized + Send { block_relay_params.downloader, state_request_protocol_config.name.clone(), Some(warp_protocol_config.name.clone()), - rx, + peer_store_handle.clone(), ) .unwrap(); let sync_service_import_queue = Box::new(sync_service.clone()); @@ -887,22 +919,10 @@ pub trait TestNetFactory: Default + Sized + Send { full_net_config.add_request_response_protocol(config); } - for protocol in config.notifications_protocols { - full_net_config.add_notification_protocol(NonDefaultSetConfig { - notifications_protocol: protocol, - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - handshake: None, - set_config: Default::default(), - }); + for config in notif_configs { + full_net_config.add_notification_protocol(config); } - let peer_store = PeerStore::new( - network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(), - ); - let peer_store_handle = peer_store.handle(); - self.spawn_task(peer_store.run().boxed()); - let genesis_hash = client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed"); let network = NetworkWorker::new(sc_network::config::Params { @@ -917,7 +937,6 @@ pub trait TestNetFactory: Default + Sized + Send { fork_id, metrics_registry: None, block_announce_config, - tx, }) .unwrap(); @@ -953,6 +972,7 @@ pub trait TestNetFactory: Default + Sized + Send { backend: Some(backend), imported_blocks_stream, finality_notification_stream, + notification_services: HashMap::from_iter(notif_handles.into_iter()), block_import, verifier, network, @@ -967,20 +987,6 @@ pub trait TestNetFactory: Default + Sized + Send { tokio::spawn(f); } - /// Polls the testnet until all peers are connected to each other. - /// - /// Must be executed in a task context. - fn poll_until_connected(&mut self, cx: &mut FutureContext) -> Poll<()> { - self.poll(cx); - - let num_peers = self.peers().len(); - if self.peers().iter().all(|p| p.num_peers() == num_peers - 1) { - return Poll::Ready(()) - } - - Poll::Pending - } - async fn is_in_sync(&mut self) -> bool { let mut highest = None; let peers = self.peers_mut(); @@ -1058,10 +1064,27 @@ pub trait TestNetFactory: Default + Sized + Send { } /// Run the network until all peers are connected to each other. - /// - /// Calls `poll_until_connected` repeatedly with the runtime passed as parameter. async fn run_until_connected(&mut self) { - futures::future::poll_fn::<(), _>(|cx| self.poll_until_connected(cx)).await; + let num_peers = self.peers().len(); + let sync_services = + self.peers().iter().map(|info| info.sync_service.clone()).collect::>(); + + 'outer: loop { + for sync_service in &sync_services { + if sync_service.status().await.unwrap().num_connected_peers as usize != + num_peers - 1 + { + futures::future::poll_fn::<(), _>(|cx| { + self.poll(cx); + Poll::Ready(()) + }) + .await; + continue 'outer + } + } + + break + } } /// Polls the testnet. Processes all the pending actions. diff --git a/substrate/client/network/test/src/service.rs b/substrate/client/network/test/src/service.rs index 62d7f9f9d1bb1839f3f1f12bc601ecefb270d6bf..800c0d4369c2cf3871e206af0321050375ba2ad9 100644 --- a/substrate/client/network/test/src/service.rs +++ b/substrate/client/network/test/src/service.rs @@ -24,8 +24,9 @@ use sc_network::{ config::{self, FullNetworkConfiguration, MultiaddrWithPeerId, ProtocolId, TransportConfig}, event::Event, peer_store::PeerStore, - NetworkEventStream, NetworkNotification, NetworkPeers, NetworkService, NetworkStateInfo, - NetworkWorker, + service::traits::{NotificationEvent, ValidationResult}, + NetworkEventStream, NetworkPeers, NetworkService, NetworkStateInfo, NetworkWorker, + NotificationService, }; use sc_network_common::role::Roles; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; @@ -116,7 +117,7 @@ impl TestNetworkBuilder { self } - pub fn build(mut self) -> TestNetwork { + pub fn build(mut self) -> (TestNetwork, Option>) { let client = self.client.as_mut().map_or( Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0), |v| v.clone(), @@ -183,7 +184,12 @@ impl TestNetworkBuilder { protocol_config }; - let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000); + let peer_store = PeerStore::new( + network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(), + ); + let peer_store_handle = peer_store.handle(); + tokio::spawn(peer_store.run().boxed()); + let (engine, chain_sync_service, block_announce_config) = SyncingEngine::new( Roles::from(&config::Role::Full), client.clone(), @@ -198,24 +204,27 @@ impl TestNetworkBuilder { block_relay_params.downloader, state_request_protocol_config.name.clone(), None, - rx, + peer_store_handle.clone(), ) .unwrap(); let mut link = self.link.unwrap_or(Box::new(chain_sync_service.clone())); - if !self.notification_protocols.is_empty() { + let handle = if !self.notification_protocols.is_empty() { for config in self.notification_protocols { full_net_config.add_notification_protocol(config); } + None } else { - full_net_config.add_notification_protocol(config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME.into(), - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - handshake: None, - set_config: self.set_config.unwrap_or_default(), - }); - } + let (config, handle) = config::NonDefaultSetConfig::new( + PROTOCOL_NAME.into(), + Vec::new(), + 1024 * 1024, + None, + self.set_config.unwrap_or_default(), + ); + full_net_config.add_notification_protocol(config); + Some(handle) + }; for config in [ block_relay_params.request_response_config, @@ -225,12 +234,6 @@ impl TestNetworkBuilder { full_net_config.add_request_response_protocol(config); } - let peer_store = PeerStore::new( - network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(), - ); - let peer_store_handle = peer_store.handle(); - tokio::spawn(peer_store.run().boxed()); - let genesis_hash = client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed"); let worker = NetworkWorker::< @@ -248,7 +251,6 @@ impl TestNetworkBuilder { protocol_id, fork_id, metrics_registry: None, - tx, }) .unwrap(); @@ -268,7 +270,7 @@ impl TestNetworkBuilder { }); tokio::spawn(engine.run()); - TestNetwork::new(worker) + (TestNetwork::new(worker), handle) } } @@ -276,18 +278,18 @@ impl TestNetworkBuilder { /// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered. fn build_nodes_one_proto() -> ( Arc, - impl Stream, + Option>, Arc, - impl Stream, + Option>, ) { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let (node1, events_stream1) = TestNetworkBuilder::new() + let (network1, handle1) = TestNetworkBuilder::new() .with_listen_addresses(vec![listen_addr.clone()]) - .build() - .start_network(); + .build(); + let (node1, _) = network1.start_network(); - let (node2, events_stream2) = TestNetworkBuilder::new() + let (network2, handle2) = TestNetworkBuilder::new() .with_set_config(config::SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, @@ -295,10 +297,11 @@ fn build_nodes_one_proto() -> ( }], ..Default::default() }) - .build() - .start_network(); + .build(); + + let (node2, _) = network2.start_network(); - (node1, events_stream1, node2, events_stream2) + (node1, handle1, node2, handle2) } #[tokio::test] @@ -306,22 +309,15 @@ async fn notifications_state_consistent() { // Runs two nodes and ensures that events are propagated out of the API in a consistent // correct order, which means no notification received on a closed substream. - let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); + let (node1, handle1, node2, handle2) = build_nodes_one_proto(); + let (mut handle1, mut handle2) = (handle1.unwrap(), handle2.unwrap()); // Write some initial notifications that shouldn't get through. for _ in 0..(rand::random::() % 5) { - node1.write_notification( - node2.local_peer_id(), - PROTOCOL_NAME.into(), - b"hello world".to_vec(), - ); + let _ = handle1.send_sync_notification(&node2.local_peer_id(), b"hello world".to_vec()); } for _ in 0..(rand::random::() % 5) { - node2.write_notification( - node1.local_peer_id(), - PROTOCOL_NAME.into(), - b"hello world".to_vec(), - ); + let _ = handle2.send_sync_notification(&node1.local_peer_id(), b"hello world".to_vec()); } // True if we have an active substream from node1 to node2. @@ -343,18 +339,10 @@ async fn notifications_state_consistent() { // Start by sending a notification from node1 to node2 and vice-versa. Part of the // test consists in ensuring that notifications get ignored if the stream isn't open. if rand::random::() % 5 >= 3 { - node1.write_notification( - node2.local_peer_id(), - PROTOCOL_NAME.into(), - b"hello world".to_vec(), - ); + let _ = handle1.send_sync_notification(&node2.local_peer_id(), b"hello world".to_vec()); } if rand::random::() % 5 >= 3 { - node2.write_notification( - node1.local_peer_id(), - PROTOCOL_NAME.into(), - b"hello world".to_vec(), - ); + let _ = handle2.send_sync_notification(&node1.local_peer_id(), b"hello world".to_vec()); } // Also randomly disconnect the two nodes from time to time. @@ -367,8 +355,8 @@ async fn notifications_state_consistent() { // Grab next event from either `events_stream1` or `events_stream2`. let next_event = { - let next1 = events_stream1.next(); - let next2 = events_stream2.next(); + let next1 = handle1.next_event(); + let next2 = handle2.next_event(); // We also await on a small timer, otherwise it is possible for the test to wait // forever while nothing at all happens on the network. let continue_test = futures_timer::Delay::new(Duration::from_millis(20)); @@ -383,58 +371,55 @@ async fn notifications_state_consistent() { }; match next_event { - future::Either::Left(Event::NotificationStreamOpened { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME.into() { - something_happened = true; - assert!(!node1_to_node2_open); - node1_to_node2_open = true; - assert_eq!(remote, node2.local_peer_id()); - }, - future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME.into() { - something_happened = true; - assert!(!node2_to_node1_open); - node2_to_node1_open = true; - assert_eq!(remote, node1.local_peer_id()); - }, - future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME.into() { - assert!(node1_to_node2_open); - node1_to_node2_open = false; - assert_eq!(remote, node2.local_peer_id()); - }, - future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME.into() { - assert!(node2_to_node1_open); - node2_to_node1_open = false; - assert_eq!(remote, node1.local_peer_id()); - }, - future::Either::Left(Event::NotificationsReceived { remote, .. }) => { + future::Either::Left(NotificationEvent::ValidateInboundSubstream { + result_tx, .. + }) => { + result_tx.send(ValidationResult::Accept).unwrap(); + }, + future::Either::Right(NotificationEvent::ValidateInboundSubstream { + result_tx, + .. + }) => { + result_tx.send(ValidationResult::Accept).unwrap(); + }, + future::Either::Left(NotificationEvent::NotificationStreamOpened { peer, .. }) => { + something_happened = true; + assert!(!node1_to_node2_open); + node1_to_node2_open = true; + assert_eq!(peer, node2.local_peer_id()); + }, + future::Either::Right(NotificationEvent::NotificationStreamOpened { peer, .. }) => { + something_happened = true; + assert!(!node2_to_node1_open); + node2_to_node1_open = true; + assert_eq!(peer, node1.local_peer_id()); + }, + future::Either::Left(NotificationEvent::NotificationStreamClosed { peer, .. }) => { assert!(node1_to_node2_open); - assert_eq!(remote, node2.local_peer_id()); + node1_to_node2_open = false; + assert_eq!(peer, node2.local_peer_id()); + }, + future::Either::Right(NotificationEvent::NotificationStreamClosed { peer, .. }) => { + assert!(node2_to_node1_open); + node2_to_node1_open = false; + assert_eq!(peer, node1.local_peer_id()); + }, + future::Either::Left(NotificationEvent::NotificationReceived { peer, .. }) => { + assert!(node1_to_node2_open); + assert_eq!(peer, node2.local_peer_id()); if rand::random::() % 5 >= 4 { - node1.write_notification( - node2.local_peer_id(), - PROTOCOL_NAME.into(), - b"hello world".to_vec(), - ); + let _ = handle1 + .send_sync_notification(&node2.local_peer_id(), b"hello world".to_vec()); } }, - future::Either::Right(Event::NotificationsReceived { remote, .. }) => { + future::Either::Right(NotificationEvent::NotificationReceived { peer, .. }) => { assert!(node2_to_node1_open); - assert_eq!(remote, node1.local_peer_id()); + assert_eq!(peer, node1.local_peer_id()); if rand::random::() % 5 >= 4 { - node2.write_notification( - node1.local_peer_id(), - PROTOCOL_NAME.into(), - b"hello world".to_vec(), - ); + let _ = handle2 + .send_sync_notification(&node1.local_peer_id(), b"hello world".to_vec()); } }, - - // Add new events here. - future::Either::Left(Event::Dht(_)) => {}, - future::Either::Right(Event::Dht(_)) => {}, }; } } @@ -444,20 +429,29 @@ async fn lots_of_incoming_peers_works() { sp_tracing::try_init_simple(); let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let (main_node, _) = TestNetworkBuilder::new() + let (main_node, handle1) = TestNetworkBuilder::new() .with_listen_addresses(vec![listen_addr.clone()]) .with_set_config(config::SetConfig { in_peers: u32::MAX, ..Default::default() }) - .build() - .start_network(); + .build(); + let mut handle1 = handle1.unwrap(); + let (main_node, _) = main_node.start_network(); let main_node_peer_id = main_node.local_peer_id(); + tokio::spawn(async move { + while let Some(event) = handle1.next_event().await { + if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = event { + result_tx.send(ValidationResult::Accept).unwrap(); + } + } + }); + // We spawn background tasks and push them in this `Vec`. They will all be waited upon before // this test ends. let mut background_tasks_to_wait = Vec::new(); for _ in 0..32 { - let (_dialing_node, event_stream) = TestNetworkBuilder::new() + let (dialing_node, handle) = TestNetworkBuilder::new() .with_set_config(config::SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr.clone(), @@ -465,8 +459,9 @@ async fn lots_of_incoming_peers_works() { }], ..Default::default() }) - .build() - .start_network(); + .build(); + let mut handle = handle.unwrap(); + let (_, _) = dialing_node.start_network(); background_tasks_to_wait.push(tokio::spawn(async move { // Create a dummy timer that will "never" fire, and that will be overwritten when we @@ -474,34 +469,23 @@ async fn lots_of_incoming_peers_works() { // make the code below way more complicated. let mut timer = futures_timer::Delay::new(Duration::from_secs(3600 * 24 * 7)).fuse(); - let mut event_stream = event_stream.fuse(); - let mut sync_protocol_name = None; loop { futures::select! { _ = timer => { // Test succeeds when timer fires. return; } - ev = event_stream.next() => { - match ev.unwrap() { - Event::NotificationStreamOpened { protocol, remote, .. } => { - if let None = sync_protocol_name { - sync_protocol_name = Some(protocol.clone()); - } - - assert_eq!(remote, main_node_peer_id); - // Test succeeds after 5 seconds. This timer is here in order to - // detect a potential problem after opening. - timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse(); - } - Event::NotificationStreamClosed { protocol, .. } => { - if Some(protocol) != sync_protocol_name { - // Test failed. - panic!(); - } - } - _ => {} + ev = handle.next_event().fuse() => match ev.unwrap() { + NotificationEvent::ValidateInboundSubstream { result_tx, .. } => { + result_tx.send(ValidationResult::Accept).unwrap(); } + NotificationEvent::NotificationStreamOpened { peer, .. } => { + assert_eq!(peer, main_node_peer_id); + // Test succeeds after 5 seconds. This timer is here in order to + // detect a potential problem after opening. + timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse(); + } + _ => {} } } } @@ -518,33 +502,27 @@ async fn notifications_back_pressure() { const TOTAL_NOTIFS: usize = 10_000; - let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); + let (_node1, handle1, node2, handle2) = build_nodes_one_proto(); + let (mut handle1, mut handle2) = (handle1.unwrap(), handle2.unwrap()); let node2_id = node2.local_peer_id(); let receiver = tokio::spawn(async move { let mut received_notifications = 0; - let mut sync_protocol_name = None; while received_notifications < TOTAL_NOTIFS { - match events_stream2.next().await.unwrap() { - Event::NotificationStreamOpened { protocol, .. } => { - if let None = sync_protocol_name { - sync_protocol_name = Some(protocol); - } + match handle2.next_event().await.unwrap() { + NotificationEvent::ValidateInboundSubstream { result_tx, .. } => { + result_tx.send(ValidationResult::Accept).unwrap(); }, - Event::NotificationStreamClosed { protocol, .. } => { - if Some(&protocol) != sync_protocol_name.as_ref() { - panic!() - } + NotificationEvent::NotificationReceived { notification, .. } => { + assert_eq!( + notification, + format!("hello #{}", received_notifications).into_bytes() + ); + received_notifications += 1; }, - Event::NotificationsReceived { messages, .. } => - for message in messages { - assert_eq!(message.0, PROTOCOL_NAME.into()); - assert_eq!(message.1, format!("hello #{}", received_notifications)); - received_notifications += 1; - }, _ => {}, - }; + } if rand::random::() < 2 { tokio::time::sleep(Duration::from_millis(rand::random::() % 750)).await; @@ -554,20 +532,20 @@ async fn notifications_back_pressure() { // Wait for the `NotificationStreamOpened`. loop { - match events_stream1.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => break, + match handle1.next_event().await.unwrap() { + NotificationEvent::ValidateInboundSubstream { result_tx, .. } => { + result_tx.send(ValidationResult::Accept).unwrap(); + }, + NotificationEvent::NotificationStreamOpened { .. } => break, _ => {}, }; } // Sending! for num in 0..TOTAL_NOTIFS { - let notif = node1.notification_sender(node2_id, PROTOCOL_NAME.into()).unwrap(); - notif - .ready() + handle1 + .send_async_notification(&node2_id, format!("hello #{}", num).into_bytes()) .await - .unwrap() - .send(format!("hello #{}", num).into_bytes()) .unwrap(); } @@ -576,28 +554,31 @@ async fn notifications_back_pressure() { #[tokio::test] async fn fallback_name_working() { + sp_tracing::try_init_simple(); // Node 1 supports the protocols "new" and "old". Node 2 only supports "old". Checks whether // they can connect. const NEW_PROTOCOL_NAME: &str = "/new-shiny-protocol-that-isnt-PROTOCOL_NAME"; let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let (node1, mut events_stream1) = TestNetworkBuilder::new() - .with_notification_protocol(config::NonDefaultSetConfig { - notifications_protocol: NEW_PROTOCOL_NAME.into(), - fallback_names: vec![PROTOCOL_NAME.into()], - max_notification_size: 1024 * 1024, - handshake: None, - set_config: Default::default(), - }) + let (config, mut handle1) = config::NonDefaultSetConfig::new( + NEW_PROTOCOL_NAME.into(), + vec![PROTOCOL_NAME.into()], + 1024 * 1024, + None, + Default::default(), + ); + let (network1, _) = TestNetworkBuilder::new() + .with_notification_protocol(config) .with_config(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }) - .build() - .start_network(); + .build(); - let (_, mut events_stream2) = TestNetworkBuilder::new() + let (node1, _) = network1.start_network(); + + let (network2, handle2) = TestNetworkBuilder::new() .with_set_config(config::SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, @@ -605,34 +586,38 @@ async fn fallback_name_working() { }], ..Default::default() }) - .build() - .start_network(); + .build(); + let mut handle2 = handle2.unwrap(); + let _ = network2.start_network(); let receiver = tokio::spawn(async move { // Wait for the `NotificationStreamOpened`. loop { - match events_stream2.next().await.unwrap() { - Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } => { - assert_eq!(protocol, PROTOCOL_NAME.into()); + match handle2.next_event().await.unwrap() { + NotificationEvent::ValidateInboundSubstream { result_tx, .. } => { + result_tx.send(ValidationResult::Accept).unwrap(); + }, + NotificationEvent::NotificationStreamOpened { negotiated_fallback, .. } => { assert_eq!(negotiated_fallback, None); break }, _ => {}, - }; + } } }); // Wait for the `NotificationStreamOpened`. loop { - match events_stream1.next().await.unwrap() { - Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } - if protocol == NEW_PROTOCOL_NAME.into() => - { + match handle1.next_event().await.unwrap() { + NotificationEvent::ValidateInboundSubstream { result_tx, .. } => { + result_tx.send(ValidationResult::Accept).unwrap(); + }, + NotificationEvent::NotificationStreamOpened { negotiated_fallback, .. } => { assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME.into())); break }, _ => {}, - }; + } } receiver.await.unwrap(); @@ -655,6 +640,7 @@ async fn ensure_listen_addresses_consistent_with_transport_memory() { ) }) .build() + .0 .start_network(); } @@ -674,6 +660,7 @@ async fn ensure_listen_addresses_consistent_with_transport_not_memory() { ) }) .build() + .0 .start_network(); } @@ -699,6 +686,7 @@ async fn ensure_boot_node_addresses_consistent_with_transport_memory() { ) }) .build() + .0 .start_network(); } @@ -723,6 +711,7 @@ async fn ensure_boot_node_addresses_consistent_with_transport_not_memory() { ) }) .build() + .0 .start_network(); } @@ -751,6 +740,7 @@ async fn ensure_reserved_node_addresses_consistent_with_transport_memory() { ) }) .build() + .0 .start_network(); } @@ -778,6 +768,7 @@ async fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { ) }) .build() + .0 .start_network(); } @@ -800,6 +791,7 @@ async fn ensure_public_addresses_consistent_with_transport_memory() { ) }) .build() + .0 .start_network(); } @@ -821,5 +813,6 @@ async fn ensure_public_addresses_consistent_with_transport_not_memory() { ) }) .build() + .0 .start_network(); } diff --git a/substrate/client/network/test/src/sync.rs b/substrate/client/network/test/src/sync.rs index 389177b4aaf1bc1ffb6219c598b508c8fa83963e..f2be662ada164aa5f44f2e338d7ad3d9c89368ec 100644 --- a/substrate/client/network/test/src/sync.rs +++ b/substrate/client/network/test/src/sync.rs @@ -44,16 +44,16 @@ async fn sync_peers_works() { sp_tracing::try_init_simple(); let mut net = TestNet::new(3); - futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - for peer in 0..3 { - if net.peer(peer).num_peers() != 2 { - return Poll::Pending - } - } - Poll::Ready(()) - }) - .await; + while net.peer(0).num_peers().await != 2 && + net.peer(1).num_peers().await != 2 && + net.peer(2).num_peers().await != 2 + { + futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + Poll::Ready(()) + }) + .await; + } } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -412,15 +412,13 @@ async fn can_sync_small_non_best_forks() { assert!(net.peer(1).client().header(small_hash).unwrap().is_none()); // poll until the two nodes connect, otherwise announcing the block will not work - futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { - Poll::Pending - } else { + while net.peer(0).num_peers().await == 0 || net.peer(1).num_peers().await == 0 { + futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); Poll::Ready(()) - } - }) - .await; + }) + .await; + } // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. @@ -465,6 +463,7 @@ async fn can_sync_forks_ahead_of_the_best_chain() { net.peer(1).push_blocks(1, false); net.run_until_connected().await; + // Peer 0 is on 2-block fork which is announced with is_best=false let fork_hash = net .peer(0) @@ -516,15 +515,13 @@ async fn can_sync_explicit_forks() { assert!(net.peer(1).client().header(small_hash).unwrap().is_none()); // poll until the two nodes connect, otherwise announcing the block will not work - futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { - Poll::Pending - } else { + while net.peer(0).num_peers().await == 0 || net.peer(1).num_peers().await == 0 { + futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); Poll::Ready(()) - } - }) - .await; + }) + .await; + } // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. @@ -613,15 +610,14 @@ async fn full_sync_requires_block_body() { net.peer(0).push_headers(1); // Wait for nodes to connect - futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { - Poll::Pending - } else { + while net.peer(0).num_peers().await == 0 || net.peer(1).num_peers().await == 0 { + futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); Poll::Ready(()) - } - }) - .await; + }) + .await; + } + net.run_until_idle().await; assert_eq!(net.peer(1).client.info().best_number, 0); } @@ -917,18 +913,16 @@ async fn block_announce_data_is_propagated() { }); // Wait until peer 1 is connected to both nodes. - futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(1).num_peers() == 2 && - net.peer(0).num_peers() == 1 && - net.peer(2).num_peers() == 1 - { + while net.peer(1).num_peers().await != 2 || + net.peer(0).num_peers().await != 1 || + net.peer(2).num_peers().await != 1 + { + futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); Poll::Ready(()) - } else { - Poll::Pending - } - }) - .await; + }) + .await; + } let block_hash = net .peer(0) @@ -1010,7 +1004,7 @@ async fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; net.peer(0).push_blocks(1, false); net.run_until_sync().await; - assert_eq!(1, net.peer(0).num_peers()); + assert_eq!(1, net.peer(0).num_peers().await); } let hashof10 = hashes[9]; diff --git a/substrate/client/network/transactions/Cargo.toml b/substrate/client/network/transactions/Cargo.toml index 2a6aa4b3a40aee615ea1cce248b2681b33339610..24b5087af1f42fab1e351d8eaa0a15bb5d2805bd 100644 --- a/substrate/client/network/transactions/Cargo.toml +++ b/substrate/client/network/transactions/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true documentation = "https://docs.rs/sc-network-transactions" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = "6.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" -libp2p = "0.51.3" +libp2p = "0.51.4" log = "0.4.17" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-network = { path = ".." } diff --git a/substrate/client/network/transactions/src/lib.rs b/substrate/client/network/transactions/src/lib.rs index 1b97d4b96c9764f6fc18d2de2dfaeee7c530a2c6..9758ea4c4fcba72a0dfdc38cdbe1bdb718fd6323 100644 --- a/substrate/client/network/transactions/src/lib.rs +++ b/substrate/client/network/transactions/src/lib.rs @@ -21,8 +21,8 @@ //! Usage: //! //! - Use [`TransactionsHandlerPrototype::new`] to create a prototype. -//! - Pass the return value of [`TransactionsHandlerPrototype::set_config`] to the network -//! configuration as an extra peers set. +//! - Pass the `NonDefaultSetConfig` returned from [`TransactionsHandlerPrototype::new`] to the +//! network configuration as an extra peers set. //! - Use [`TransactionsHandlerPrototype::build`] then [`TransactionsHandler::run`] to obtain a //! `Future` that processes transactions. @@ -37,7 +37,7 @@ use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network::{ config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig}, error, - event::Event, + service::traits::{NotificationEvent, NotificationService, ValidationResult}, types::ProtocolName, utils::{interval, LruHashSet}, NetworkEventStream, NetworkNotification, NetworkPeers, @@ -115,8 +115,11 @@ impl Future for PendingTransaction { /// Prototype for a [`TransactionsHandler`]. pub struct TransactionsHandlerPrototype { + /// Name of the transaction protocol. protocol_name: ProtocolName, - fallback_protocol_names: Vec, + + /// Handle that is used to communicate with `sc_network::Notifications`. + notification_service: Box, } impl TransactionsHandlerPrototype { @@ -125,35 +128,28 @@ impl TransactionsHandlerPrototype { protocol_id: ProtocolId, genesis_hash: Hash, fork_id: Option<&str>, - ) -> Self { + ) -> (Self, NonDefaultSetConfig) { let genesis_hash = genesis_hash.as_ref(); - let protocol_name = if let Some(fork_id) = fork_id { + let protocol_name: ProtocolName = if let Some(fork_id) = fork_id { format!("/{}/{}/transactions/1", array_bytes::bytes2hex("", genesis_hash), fork_id) } else { format!("/{}/transactions/1", array_bytes::bytes2hex("", genesis_hash)) - }; - let legacy_protocol_name = format!("/{}/transactions/1", protocol_id.as_ref()); - - Self { - protocol_name: protocol_name.into(), - fallback_protocol_names: iter::once(legacy_protocol_name.into()).collect(), } - } - - /// Returns the configuration of the set to put in the network configuration. - pub fn set_config(&self) -> NonDefaultSetConfig { - NonDefaultSetConfig { - notifications_protocol: self.protocol_name.clone(), - fallback_names: self.fallback_protocol_names.clone(), - max_notification_size: MAX_TRANSACTIONS_SIZE, - handshake: None, - set_config: SetConfig { + .into(); + let (config, notification_service) = NonDefaultSetConfig::new( + protocol_name.clone(), + vec![format!("/{}/transactions/1", protocol_id.as_ref()).into()], + MAX_TRANSACTIONS_SIZE, + None, + SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), non_reserved_mode: NonReservedPeerMode::Deny, }, - } + ); + + (Self { protocol_name, notification_service }, config) } /// Turns the prototype into the actual handler. Returns a controller that allows controlling @@ -173,12 +169,12 @@ impl TransactionsHandlerPrototype { transaction_pool: Arc>, metrics_registry: Option<&Registry>, ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { - let net_event_stream = network.event_stream("transactions-handler-net"); let sync_event_stream = sync.event_stream("transactions-handler-sync"); let (to_handler, from_controller) = tracing_unbounded("mpsc_transactions_handler", 100_000); let handler = TransactionsHandler { protocol_name: self.protocol_name, + notification_service: self.notification_service, propagate_timeout: (Box::pin(interval(PROPAGATE_TIMEOUT)) as Pin + Send>>) .fuse(), @@ -186,7 +182,6 @@ impl TransactionsHandlerPrototype { pending_transactions_peers: HashMap::new(), network, sync, - net_event_stream: net_event_stream.fuse(), sync_event_stream: sync_event_stream.fuse(), peers: HashMap::new(), transaction_pool, @@ -253,8 +248,6 @@ pub struct TransactionsHandler< network: N, /// Syncing service. sync: S, - /// Stream of networking events. - net_event_stream: stream::Fuse + Send>>>, /// Receiver for syncing-related events. sync_event_stream: stream::Fuse + Send>>>, // All connected peers @@ -263,6 +256,8 @@ pub struct TransactionsHandler< from_controller: TracingUnboundedReceiver>, /// Prometheus metrics. metrics: Option, + /// Handle that is used to communicate with `sc_network::Notifications`. + notification_service: Box, } /// Peer information @@ -295,14 +290,6 @@ where warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending transaction!"); } }, - network_event = self.net_event_stream.next() => { - if let Some(network_event) = network_event { - self.handle_network_event(network_event).await; - } else { - // Networking has seemingly closed. Closing as well. - return; - } - }, sync_event = self.sync_event_stream.next() => { if let Some(sync_event) = sync_event { self.handle_sync_event(sync_event); @@ -317,10 +304,61 @@ where ToHandler::PropagateTransactions => self.propagate_transactions(), } }, + event = self.notification_service.next_event().fuse() => { + if let Some(event) = event { + self.handle_notification_event(event) + } else { + // `Notifications` has seemingly closed. Closing as well. + return + } + } } } } + fn handle_notification_event(&mut self, event: NotificationEvent) { + match event { + NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx, .. } => { + // only accept peers whose role can be determined + let result = self + .network + .peer_role(peer, handshake) + .map_or(ValidationResult::Reject, |_| ValidationResult::Accept); + let _ = result_tx.send(result); + }, + NotificationEvent::NotificationStreamOpened { peer, handshake, .. } => { + let Some(role) = self.network.peer_role(peer, handshake) else { + log::debug!(target: "sub-libp2p", "role for {peer} couldn't be determined"); + return + }; + + let _was_in = self.peers.insert( + peer, + Peer { + known_transactions: LruHashSet::new( + NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS).expect("Constant is nonzero"), + ), + role, + }, + ); + debug_assert!(_was_in.is_none()); + }, + NotificationEvent::NotificationStreamClosed { peer } => { + let _peer = self.peers.remove(&peer); + debug_assert!(_peer.is_some()); + }, + NotificationEvent::NotificationReceived { peer, notification } => { + if let Ok(m) = + as Decode>::decode(&mut notification.as_ref()) + { + self.on_transactions(peer, m); + } else { + warn!(target: "sub-libp2p", "Failed to decode transactions list"); + } + }, + } + } + fn handle_sync_event(&mut self, event: SyncEvent) { match event { SyncEvent::PeerConnected(remote) => { @@ -346,51 +384,6 @@ where } } - async fn handle_network_event(&mut self, event: Event) { - match event { - Event::Dht(_) => {}, - Event::NotificationStreamOpened { remote, protocol, role, .. } - if protocol == self.protocol_name => - { - let _was_in = self.peers.insert( - remote, - Peer { - known_transactions: LruHashSet::new( - NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS).expect("Constant is nonzero"), - ), - role, - }, - ); - debug_assert!(_was_in.is_none()); - }, - Event::NotificationStreamClosed { remote, protocol } - if protocol == self.protocol_name => - { - let _peer = self.peers.remove(&remote); - debug_assert!(_peer.is_some()); - }, - - Event::NotificationsReceived { remote, messages } => { - for (protocol, message) in messages { - if protocol != self.protocol_name { - continue - } - - if let Ok(m) = - as Decode>::decode(&mut message.as_ref()) - { - self.on_transactions(remote, m); - } else { - warn!(target: "sub-libp2p", "Failed to decode transactions list"); - } - } - }, - - // Not our concern. - Event::NotificationStreamOpened { .. } | Event::NotificationStreamClosed { .. } => {}, - } - } - /// Called when peer sends us new transactions fn on_transactions(&mut self, who: PeerId, transactions: Transactions) { // Accept transactions only when node is not major syncing @@ -482,8 +475,7 @@ where propagated_to.entry(hash).or_default().push(who.to_base58()); } trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); - self.network - .write_notification(*who, self.protocol_name.clone(), to_send.encode()); + let _ = self.notification_service.send_sync_notification(who, to_send.encode()); } } diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml index 83397f528797b322ca8f4bdaf2bbf6fc0ee547ae..b049ba0a3d89b5f7ecbca0980b4cbd7e2a258161 100644 --- a/substrate/client/offchain/Cargo.toml +++ b/substrate/client/offchain/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -19,9 +22,9 @@ codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive fnv = "1.0.6" futures = "0.3.21" futures-timer = "3.0.2" -hyper = { version = "0.14.16", features = ["stream", "http2"] } +hyper = { version = "0.14.16", features = ["http2", "stream"] } hyper-rustls = { version = "0.24.0", features = ["http2"] } -libp2p = "0.51.3" +libp2p = "0.51.4" num_cpus = "1.13" once_cell = "1.8" parking_lot = "0.12.1" @@ -45,7 +48,7 @@ log = "0.4.17" lazy_static = "1.4.0" tokio = "1.22.0" sc-block-builder = { path = "../block-builder" } -sc-client-db = { path = "../db", default-features = true} +sc-client-db = { path = "../db", default-features = true } sc-transaction-pool = { path = "../transaction-pool" } sc-transaction-pool-api = { path = "../transaction-pool/api" } sp-consensus = { path = "../../primitives/consensus/common" } diff --git a/substrate/client/offchain/src/api.rs b/substrate/client/offchain/src/api.rs index c7df5784d329eecd299619e89f1f4f0eb6f0d319..65e2f3ba64dbec30f5d5a944d5cb4f357c76e756 100644 --- a/substrate/client/offchain/src/api.rs +++ b/substrate/client/offchain/src/api.rs @@ -30,7 +30,6 @@ use sp_core::{ }, OpaquePeerId, }; -pub use sp_offchain::STORAGE_PREFIX; mod http; @@ -223,7 +222,7 @@ mod tests { use sc_client_db::offchain::LocalStorage; use sc_network::{ config::MultiaddrWithPeerId, types::ProtocolName, NetworkPeers, NetworkStateInfo, - ReputationChange, + ObservedRole, ReputationChange, }; use sp_core::offchain::{storage::OffchainDb, DbExternalities, Externalities, StorageKind}; use std::time::SystemTime; @@ -243,11 +242,15 @@ mod tests { unimplemented!(); } - fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) { + fn report_peer(&self, _peer_id: PeerId, _cost_benefit: ReputationChange) { unimplemented!(); } - fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) { + fn peer_reputation(&self, _peer_id: &PeerId) -> i32 { + unimplemented!() + } + + fn disconnect_peer(&self, _peer_id: PeerId, _protocol: ProtocolName) { unimplemented!(); } @@ -294,6 +297,10 @@ mod tests { fn sync_num_connected(&self) -> usize { unimplemented!(); } + + fn peer_role(&self, _peer_id: PeerId, _handshake: Vec) -> Option { + None + } } impl NetworkStateInfo for TestNetwork { diff --git a/substrate/client/offchain/src/lib.rs b/substrate/client/offchain/src/lib.rs index 756ab77ff94eb2cfd85765ecb40d5ed68ea5438b..eb3436432f342f4de97a5325c7a14ea4ce167581 100644 --- a/substrate/client/offchain/src/lib.rs +++ b/substrate/client/offchain/src/lib.rs @@ -330,7 +330,9 @@ mod tests { use libp2p::{Multiaddr, PeerId}; use sc_block_builder::BlockBuilderBuilder; use sc_client_api::Backend as _; - use sc_network::{config::MultiaddrWithPeerId, types::ProtocolName, ReputationChange}; + use sc_network::{ + config::MultiaddrWithPeerId, types::ProtocolName, ObservedRole, ReputationChange, + }; use sc_transaction_pool::BasicPool; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_consensus::BlockOrigin; @@ -372,11 +374,15 @@ mod tests { unimplemented!(); } - fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) { + fn report_peer(&self, _peer_id: PeerId, _cost_benefit: ReputationChange) { unimplemented!(); } - fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) { + fn peer_reputation(&self, _peer_id: &PeerId) -> i32 { + unimplemented!() + } + + fn disconnect_peer(&self, _peer_id: PeerId, _protocol: ProtocolName) { unimplemented!(); } @@ -423,6 +429,10 @@ mod tests { fn sync_num_connected(&self) -> usize { unimplemented!(); } + + fn peer_role(&self, _peer_id: PeerId, _handshake: Vec) -> Option { + None + } } #[test] diff --git a/substrate/client/proposer-metrics/Cargo.toml b/substrate/client/proposer-metrics/Cargo.toml index b6b4452ecc64edc466606d82f355b33c8bd7667c..664b72764a3b8b31b81cf437aa69f93c45f97b35 100644 --- a/substrate/client/proposer-metrics/Cargo.toml +++ b/substrate/client/proposer-metrics/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Basic metrics for block production." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index e72bbe48ee3e01349796b57c558a0765665068e9..6b1270fc370985f5f8d0428bafc77077734e4a74 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -9,13 +9,16 @@ repository.workspace = true description = "Substrate RPC interfaces." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" thiserror = "1.0" sc-chain-spec = { path = "../chain-spec" } @@ -25,4 +28,4 @@ sp-core = { path = "../../primitives/core" } sp-rpc = { path = "../../primitives/rpc" } sp-runtime = { path = "../../primitives/runtime" } sp-version = { path = "../../primitives/version" } -jsonrpsee = { version = "0.16.2", features = ["server", "client-core", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } diff --git a/substrate/client/rpc-servers/Cargo.toml b/substrate/client/rpc-servers/Cargo.toml index a7cc374f97a1e011b26f58d16850eabfde57fd65..5bb7317264c54f6d4953f185549d39513b4ab9b6 100644 --- a/substrate/client/rpc-servers/Cargo.toml +++ b/substrate/client/rpc-servers/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Substrate RPC servers." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index cfe7f8a117ddc09736238c701c6a13f5e2e47a60..b5fb8b5b20464a53e463a5fc3584891f7beef17f 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -9,11 +9,14 @@ repository.workspace = true description = "Substrate RPC interface v2." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } # Internal chain structures for "chain_spec". sc-chain-spec = { path = "../chain-spec" } # Pool for submitting extrinsics required by "transaction" @@ -21,6 +24,7 @@ sc-transaction-pool-api = { path = "../transaction-pool/api" } sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } sp-api = { path = "../../primitives/api" } +sp-rpc = { path = "../../primitives/rpc" } sp-blockchain = { path = "../../primitives/blockchain" } sp-version = { path = "../../primitives/version" } sc-client-api = { path = "../api" } @@ -36,14 +40,16 @@ tokio = { version = "1.22.0", features = ["sync"] } array-bytes = "6.1" log = "0.4.17" futures-util = { version = "0.3.19", default-features = false } + [dev-dependencies] serde_json = "1.0.108" tokio = { version = "1.22.0", features = ["macros"] } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } substrate-test-runtime = { path = "../../test-utils/runtime" } sp-consensus = { path = "../../primitives/consensus/common" } +sp-externalities = { path = "../../primitives/externalities" } sp-maybe-compressed-blob = { path = "../../primitives/maybe-compressed-blob" } sc-block-builder = { path = "../block-builder" } -sc-service = { path = "../service", features = ["test-helpers"]} +sc-service = { path = "../service", features = ["test-helpers"] } assert_matches = "1.3.0" pretty_assertions = "1.2.1" diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs index bded842d8fd0de408b0d233af12010d41a9a7df7..269962cfd7482356b56ac202f11adbb824737518 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -29,13 +29,13 @@ use jsonrpsee::core::{async_trait, RpcResult}; use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, CallExecutor, ExecutorProvider, StorageProvider, }; -use sp_api::{CallApiAt, CallContext, NumberFor}; +use sp_api::{CallApiAt, CallContext}; use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, }; use sp_core::Bytes; use sp_runtime::{ - traits::{Block as BlockT, Header as HeaderT}, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, SaturatedConversion, }; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; @@ -124,7 +124,7 @@ where let finalized_num = self.client.info().finalized_number; if finalized_num >= height { - let Ok(Some(hash)) = self.client.block_hash(height.into()) else { return Ok(vec![]) }; + let Ok(Some(hash)) = self.client.block_hash(height) else { return Ok(vec![]) }; return Ok(vec![hex_string(&hash.as_ref())]) } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs index d93c4018b60faee0928204f1a6ecdfb75dab845c..9ae801379559409ea8ad72e1a29c3a95f0e9919a 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs @@ -21,6 +21,7 @@ //! API trait of the chain head. use crate::chain_head::event::{FollowEvent, MethodResponse, StorageQuery}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use sp_rpc::list::ListOrValue; #[rpc(client, server)] pub trait ChainHeadApi { @@ -73,14 +74,6 @@ pub trait ChainHeadApi { hash: Hash, ) -> RpcResult>; - /// Get the chain's genesis hash. - /// - /// # Unstable - /// - /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_genesisHash", blocking)] - fn chain_head_unstable_genesis_hash(&self) -> RpcResult; - /// Returns storage entries at a specific block's state. /// /// # Unstable @@ -109,16 +102,22 @@ pub trait ChainHeadApi { call_parameters: String, ) -> RpcResult; - /// Unpin a block reported by the `follow` method. + /// Unpin a block or multiple blocks reported by the `follow` method. /// /// Ongoing operations that require the provided block /// will continue normally. /// + /// When this method returns an error, it is guaranteed that no blocks have been unpinned. + /// /// # Unstable /// /// This method is unstable and subject to change in the future. #[method(name = "chainHead_unstable_unpin", blocking)] - fn chain_head_unstable_unpin(&self, follow_subscription: String, hash: Hash) -> RpcResult<()>; + fn chain_head_unstable_unpin( + &self, + follow_subscription: String, + hash_or_hashes: ListOrValue, + ) -> RpcResult<()>; /// Resumes a storage fetch started with `chainHead_storage` after it has generated an /// `operationWaitingForContinue` event. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index a8c1c4f7e083d86b19906d8845a5ee84b3b5ca6e..8e04ac7b1778ebf2aff81dd34bd495af9e0766fe 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -48,6 +48,7 @@ use sc_client_api::{ use sp_api::CallApiAt; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_core::{traits::CallContext, Bytes}; +use sp_rpc::list::ListOrValue; use sp_runtime::traits::Block as BlockT; use std::{marker::PhantomData, sync::Arc, time::Duration}; @@ -106,8 +107,6 @@ pub struct ChainHead, Block: BlockT, Client> { executor: SubscriptionTaskExecutor, /// Keep track of the pinned blocks for each subscription. subscriptions: Arc>, - /// The hexadecimal encoded hash of the genesis block. - genesis_hash: String, /// The maximum number of items reported by the `chainHead_storage` before /// pagination is required. operation_max_storage_items: usize, @@ -117,14 +116,12 @@ pub struct ChainHead, Block: BlockT, Client> { impl, Block: BlockT, Client> ChainHead { /// Create a new [`ChainHead`]. - pub fn new>( + pub fn new( client: Arc, backend: Arc, executor: SubscriptionTaskExecutor, - genesis_hash: GenesisHash, config: ChainHeadConfig, ) -> Self { - let genesis_hash = hex_string(&genesis_hash.as_ref()); Self { client, backend: backend.clone(), @@ -136,7 +133,6 @@ impl, Block: BlockT, Client> ChainHead { backend, )), operation_max_storage_items: config.operation_max_storage_items, - genesis_hash, _phantom: PhantomData, } } @@ -202,7 +198,9 @@ where let sub_id = match self.accept_subscription(&mut sink) { Ok(sub_id) => sub_id, Err(err) => { - sink.close(ChainHeadRpcError::InvalidSubscriptionID); + sink.close(ChainHeadRpcError::InternalError( + "Cannot generate subscription ID".into(), + )); return Err(err) }, }; @@ -310,14 +308,10 @@ where self.client .header(hash) .map(|opt_header| opt_header.map(|h| hex_string(&h.encode()))) - .map_err(ChainHeadRpcError::FetchBlockHeader) + .map_err(|err| ChainHeadRpcError::InternalError(err.to_string())) .map_err(Into::into) } - fn chain_head_unstable_genesis_hash(&self) -> RpcResult { - Ok(self.genesis_hash.clone()) - } - fn chain_head_unstable_storage( &self, follow_subscription: String, @@ -401,7 +395,7 @@ where // Reject subscription if with_runtime is false. if !block_guard.has_runtime() { - return Err(ChainHeadRpcError::InvalidParam( + return Err(ChainHeadRpcError::InvalidRuntimeCall( "The runtime updates flag must be set".to_string(), ) .into()) @@ -432,9 +426,16 @@ where fn chain_head_unstable_unpin( &self, follow_subscription: String, - hash: Block::Hash, + hash_or_hashes: ListOrValue, ) -> RpcResult<()> { - match self.subscriptions.unpin_block(&follow_subscription, hash) { + let result = match hash_or_hashes { + ListOrValue::Value(hash) => + self.subscriptions.unpin_blocks(&follow_subscription, [hash]), + ListOrValue::List(hashes) => + self.subscriptions.unpin_blocks(&follow_subscription, hashes), + }; + + match result { Ok(()) => Ok(()), Err(SubscriptionManagementError::SubscriptionAbsent) => { // Invalid invalid subscription ID. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs index 6e19f59a5d68747dce1c9a9506dc4f36e662a5d3..c23489a050e52b5d9dba6a327082664049d4a413 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs @@ -22,8 +22,8 @@ use std::{collections::VecDeque, marker::PhantomData, sync::Arc}; use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sc_utils::mpsc::TracingUnboundedSender; -use sp_api::BlockT; use sp_core::storage::well_known_keys; +use sp_runtime::traits::Block as BlockT; use crate::chain_head::event::OperationStorageItems; diff --git a/substrate/client/rpc-spec-v2/src/chain_head/error.rs b/substrate/client/rpc-spec-v2/src/chain_head/error.rs index 811666428c5a56479ede3af21f9fadcbd305dce2..a9b7d7f96e49b662cac843f9427c667ef8e8e72b 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/error.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/error.rs @@ -22,7 +22,6 @@ use jsonrpsee::{ core::Error as RpcError, types::error::{CallError, ErrorObject}, }; -use sp_blockchain::Error as BlockchainError; /// ChainHead RPC errors. #[derive(Debug, thiserror::Error)] @@ -30,44 +29,55 @@ pub enum Error { /// The provided block hash is invalid. #[error("Invalid block hash")] InvalidBlock, - /// Fetch block header error. - #[error("Could not fetch block header: {0}")] - FetchBlockHeader(BlockchainError), + /// The follow subscription was started with `withRuntime` set to `false`. + #[error("The `chainHead_follow` subscription was started with `withRuntime` set to `false`")] + InvalidRuntimeCall(String), + /// Wait-for-continue event not generated. + #[error("Wait for continue event was not generated for the subscription")] + InvalidContinue, /// Invalid parameter provided to the RPC method. #[error("Invalid parameter: {0}")] InvalidParam(String), - /// Invalid subscription ID provided by the RPC server. - #[error("Invalid subscription ID")] - InvalidSubscriptionID, + /// Internal error. + #[error("Internal error: {0}")] + InternalError(String), +} + +/// Errors for `chainHead` RPC module, as defined in +/// . +pub mod rpc_spec_v2 { + /// The provided block hash is invalid. + pub const INVALID_BLOCK_ERROR: i32 = -32801; + /// The follow subscription was started with `withRuntime` set to `false`. + pub const INVALID_RUNTIME_CALL: i32 = -32802; /// Wait-for-continue event not generated. - #[error("Wait for continue event was not generated for the subscription")] - InvalidContinue, + pub const INVALID_CONTINUE: i32 = -32803; } -// Base code for all `chainHead` errors. -const BASE_ERROR: i32 = 2000; -/// The provided block hash is invalid. -const INVALID_BLOCK_ERROR: i32 = BASE_ERROR + 1; -/// Fetch block header error. -const FETCH_BLOCK_HEADER_ERROR: i32 = BASE_ERROR + 2; -/// Invalid parameter error. -const INVALID_PARAM_ERROR: i32 = BASE_ERROR + 3; -/// Invalid subscription ID. -const INVALID_SUB_ID: i32 = BASE_ERROR + 4; -/// Wait-for-continue event not generated. -const INVALID_CONTINUE: i32 = BASE_ERROR + 5; +/// General purpose errors, as defined in +/// . +pub mod json_rpc_spec { + /// Invalid parameter error. + pub const INVALID_PARAM_ERROR: i32 = -32602; + /// Internal error. + pub const INTERNAL_ERROR: i32 = -32603; +} impl From for ErrorObject<'static> { fn from(e: Error) -> Self { let msg = e.to_string(); match e { - Error::InvalidBlock => ErrorObject::owned(INVALID_BLOCK_ERROR, msg, None::<()>), - Error::FetchBlockHeader(_) => - ErrorObject::owned(FETCH_BLOCK_HEADER_ERROR, msg, None::<()>), - Error::InvalidParam(_) => ErrorObject::owned(INVALID_PARAM_ERROR, msg, None::<()>), - Error::InvalidSubscriptionID => ErrorObject::owned(INVALID_SUB_ID, msg, None::<()>), - Error::InvalidContinue => ErrorObject::owned(INVALID_CONTINUE, msg, None::<()>), + Error::InvalidBlock => + ErrorObject::owned(rpc_spec_v2::INVALID_BLOCK_ERROR, msg, None::<()>), + Error::InvalidRuntimeCall(_) => + ErrorObject::owned(rpc_spec_v2::INVALID_RUNTIME_CALL, msg, None::<()>), + Error::InvalidContinue => + ErrorObject::owned(rpc_spec_v2::INVALID_CONTINUE, msg, None::<()>), + Error::InvalidParam(_) => + ErrorObject::owned(json_rpc_spec::INVALID_PARAM_ERROR, msg, None::<()>), + Error::InternalError(_) => + ErrorObject::owned(json_rpc_spec::INTERNAL_ERROR, msg, None::<()>), } } } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 8a75029a99436efbe87e54407a5199ee8fc4e4bd..2b250f3dc2cf2d0b52a222256b314eefc035a310 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -750,22 +750,36 @@ impl> SubscriptionsInner { } } - pub fn unpin_block( + pub fn unpin_blocks( &mut self, sub_id: &str, - hash: Block::Hash, + hashes: impl IntoIterator + Clone, ) -> Result<(), SubscriptionManagementError> { let Some(sub) = self.subs.get_mut(sub_id) else { return Err(SubscriptionManagementError::SubscriptionAbsent) }; - // Check that unpin was not called before and the block was pinned - // for this subscription. - if !sub.unregister_block(hash) { - return Err(SubscriptionManagementError::BlockHashAbsent) + // Ensure that all blocks are part of the subscription before removing individual + // blocks. + for hash in hashes.clone() { + if !sub.contains_block(hash) { + return Err(SubscriptionManagementError::BlockHashAbsent) + } + } + + // Note: this needs to be separate from the global mappings to avoid barrow checker + // thinking we borrow `&mut self` twice: once from `self.subs.get_mut` and once from + // `self.global_unregister_block`. Although the borrowing is correct, since different + // fields of the structure are borrowed, one at a time. + for hash in hashes.clone() { + sub.unregister_block(hash); + } + + // Block have been removed from the subscription. Remove them from the global tracking. + for hash in hashes { + self.global_unregister_block(hash); } - self.global_unregister_block(hash); Ok(()) } @@ -1029,11 +1043,11 @@ mod tests { assert_eq!(block.has_runtime(), true); let invalid_id = "abc-invalid".to_string(); - let err = subs.unpin_block(&invalid_id, hash).unwrap_err(); + let err = subs.unpin_blocks(&invalid_id, vec![hash]).unwrap_err(); assert_eq!(err, SubscriptionManagementError::SubscriptionAbsent); // Unpin the block. - subs.unpin_block(&id, hash).unwrap(); + subs.unpin_blocks(&id, vec![hash]).unwrap(); let err = subs.lock_block(&id, hash, 1).unwrap_err(); assert_eq!(err, SubscriptionManagementError::BlockHashAbsent); } @@ -1077,13 +1091,13 @@ mod tests { // Ensure the block propagated to the subscription. subs.subs.get(&id_second).unwrap().blocks.get(&hash).unwrap(); - subs.unpin_block(&id, hash).unwrap(); + subs.unpin_blocks(&id, vec![hash]).unwrap(); assert_eq!(*subs.global_blocks.get(&hash).unwrap(), 1); // Cannot unpin a block twice for the same subscription. - let err = subs.unpin_block(&id, hash).unwrap_err(); + let err = subs.unpin_blocks(&id, vec![hash]).unwrap_err(); assert_eq!(err, SubscriptionManagementError::BlockHashAbsent); - subs.unpin_block(&id_second, hash).unwrap(); + subs.unpin_blocks(&id_second, vec![hash]).unwrap(); // Block unregistered from the memory. assert!(subs.global_blocks.get(&hash).is_none()); } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs index b25b1a4913b49f52035b17190f77df99f701c504..c830e662da2e5c3499f86ae1ddc40bd3b6f4e40a 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs @@ -94,22 +94,23 @@ impl> SubscriptionManagement { inner.pin_block(sub_id, hash) } - /// Unpin the block from the subscription. + /// Unpin the blocks from the subscription. /// - /// The last subscription that unpins the block is also unpinning the block - /// from the backend. + /// Blocks are reference counted and when the last subscription unpins a given block, the block + /// is also unpinned from the backend. /// /// This method is called only once per subscription. /// - /// Returns an error if the block is not pinned for the subscription or - /// the subscription ID is invalid. - pub fn unpin_block( + /// Returns an error if the subscription ID is invalid, or any of the blocks are not pinned + /// for the subscriptions. When an error is returned, it is guaranteed that no blocks have + /// been unpinned. + pub fn unpin_blocks( &self, sub_id: &str, - hash: Block::Hash, + hashes: impl IntoIterator + Clone, ) -> Result<(), SubscriptionManagementError> { let mut inner = self.inner.write(); - inner.unpin_block(sub_id, hash) + inner.unpin_blocks(sub_id, hashes) } /// Ensure the block remains pinned until the return object is dropped. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs index a901f3039ffeabb8b4e9999e1bba28d1aa10d687..d63a98a5cb0d93b3633864bd3405ad38c2b6e799 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -24,14 +24,15 @@ use sc_client_api::{ StorageData, StorageEventStream, StorageKey, StorageProvider, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; -use sp_api::{CallApiAt, CallApiAtParams, NumberFor, RuntimeVersion}; +use sp_api::{CallApiAt, CallApiAtParams}; use sp_blockchain::{BlockStatus, CachedHeaderMetadata, HeaderBackend, HeaderMetadata, Info}; use sp_consensus::BlockOrigin; use sp_runtime::{ generic::SignedBlock, - traits::{Block as BlockT, Header as HeaderT}, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, Justifications, }; +use sp_version::RuntimeVersion; use std::sync::Arc; use substrate_test_runtime::{Block, Hash, Header}; @@ -235,7 +236,7 @@ impl> CallApiAt for ChainHeadMock fn initialize_extensions( &self, at: ::Hash, - extensions: &mut sp_api::Extensions, + extensions: &mut sp_externalities::Extensions, ) -> Result<(), sp_api::ApiError> { self.client.initialize_extensions(at, extensions) } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index c3f5564ebc4e768969ce8f715c9346c3db52277b..c8f2362b9ebbf6821ed4e44d70478efbf97e6c33 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -28,13 +28,12 @@ use futures::Future; use jsonrpsee::{ core::{error::Error, server::rpc_module::Subscription as RpcSubscription}, rpc_params, - types::{error::CallError, EmptyServerParams as EmptyParams}, + types::error::CallError, RpcModule, }; use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; use sc_service::client::new_in_mem; -use sp_api::BlockT; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{ @@ -42,6 +41,7 @@ use sp_core::{ testing::TaskExecutor, Blake2Hasher, Hasher, }; +use sp_runtime::traits::Block as BlockT; use sp_version::RuntimeVersion; use std::{ collections::{HashMap, HashSet}, @@ -61,7 +61,6 @@ const MAX_PINNED_BLOCKS: usize = 32; const MAX_PINNED_SECS: u64 = 60; const MAX_OPERATIONS: usize = 16; const MAX_PAGINATION_LIMIT: usize = 5; -const CHAIN_GENESIS: [u8; 32] = [0; 32]; const INVALID_HASH: [u8; 32] = [1; 32]; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; @@ -111,7 +110,6 @@ async fn setup_api() -> ( client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -162,7 +160,6 @@ async fn follow_subscription_produces_blocks() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -231,7 +228,6 @@ async fn follow_with_runtime() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -345,31 +341,6 @@ async fn follow_with_runtime() { assert_eq!(event, expected); } -#[tokio::test] -async fn get_genesis() { - let builder = TestClientBuilder::new(); - let backend = builder.backend(); - let client = Arc::new(builder.build()); - - let api = ChainHead::new( - client.clone(), - backend, - Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, - ChainHeadConfig { - global_max_pinned_blocks: MAX_PINNED_BLOCKS, - subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), - subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - }, - ) - .into_rpc(); - - let genesis: String = - api.call("chainHead_unstable_genesisHash", EmptyParams::new()).await.unwrap(); - assert_eq!(genesis, hex_string(&CHAIN_GENESIS)); -} - #[tokio::test] async fn get_header() { let (_client, api, _sub, sub_id, block) = setup_api().await; @@ -389,7 +360,7 @@ async fn get_header() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); // Obtain the valid header. @@ -418,7 +389,7 @@ async fn get_body() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); // Valid call. @@ -503,7 +474,7 @@ async fn call_runtime() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); // Pass an invalid parameters that cannot be decode. @@ -516,7 +487,7 @@ async fn call_runtime() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2003 && err.message().contains("Invalid parameter") + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message().contains("Invalid parameter") ); // Valid call. @@ -569,7 +540,6 @@ async fn call_runtime_without_flag() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -620,7 +590,7 @@ async fn call_runtime_without_flag() { .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2003 && err.message().contains("The runtime updates flag must be set") + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_RUNTIME_CALL && err.message().contains("subscription was started with `withRuntime` set to `false`") ); } @@ -658,7 +628,7 @@ async fn get_storage_hash() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); // Valid call without storage at the key. @@ -926,7 +896,7 @@ async fn get_storage_value() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); // Valid call without storage at the key. @@ -1228,7 +1198,6 @@ async fn separate_operation_ids_for_subscriptions() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1316,7 +1285,6 @@ async fn follow_generates_initial_blocks() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1472,7 +1440,6 @@ async fn follow_exceeding_pinned_blocks() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: 2, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1549,7 +1516,6 @@ async fn follow_with_unpin() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: 2, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1591,22 +1557,28 @@ async fn follow_with_unpin() { // Unpin an invalid subscription ID must return Ok(()). let invalid_hash = hex_string(&INVALID_HASH); let _res: () = api - .call("chainHead_unstable_unpin", ["invalid_sub_id", &invalid_hash]) + .call("chainHead_unstable_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) .await .unwrap(); // Valid subscription with invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); let err = api - .call::<_, serde_json::Value>("chainHead_unstable_unpin", [&sub_id, &invalid_hash]) + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &invalid_hash], + ) .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); // To not exceed the number of pinned blocks, we need to unpin before the next import. - let _res: () = api.call("chainHead_unstable_unpin", [&sub_id, &block_hash]).await.unwrap(); + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_hash]) + .await + .unwrap(); // Block tree: // finalized_block -> block -> block2 @@ -1645,6 +1617,159 @@ async fn follow_with_unpin() { assert!(sub.next::>().await.is_none()); } +#[tokio::test] +async fn follow_with_multiple_unpin_hashes() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + }, + ) + .into_rpc(); + + let mut sub = api.subscribe("chainHead_unstable_follow", [false]).await.unwrap(); + let sub_id = sub.subscription_id(); + let sub_id = serde_json::to_string(&sub_id).unwrap(); + + // Import 3 blocks. + let block_1 = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_1_hash = block_1.header.hash(); + client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); + + let block_2 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1.hash()) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_2_hash = block_2.header.hash(); + client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); + + let block_3 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_2.hash()) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_3_hash = block_3.header.hash(); + client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::Initialized(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + + // Unpin an invalid subscription ID must return Ok(()). + let invalid_hash = hex_string(&INVALID_HASH); + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) + .await + .unwrap(); + + // Valid subscription with invalid block hash. + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &invalid_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" + ); + + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_1_hash]) + .await + .unwrap(); + + // One block hash is invalid. Block 1 is already unpinned. + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, vec![&block_1_hash, &block_2_hash, &block_3_hash]], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" + ); + + // Unpin multiple blocks. + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, vec![&block_2_hash, &block_3_hash]]) + .await + .unwrap(); + + // Check block 2 and 3 are unpinned. + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &block_2_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" + ); + + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &block_3_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" + ); +} + #[tokio::test] async fn follow_prune_best_block() { let builder = TestClientBuilder::new(); @@ -1655,7 +1780,6 @@ async fn follow_prune_best_block() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1828,7 +1952,7 @@ async fn follow_prune_best_block() { let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); let hash = format!("{:?}", block_2_hash); - let _res: () = api.call("chainHead_unstable_unpin", [&sub_id, &hash]).await.unwrap(); + let _res: () = api.call("chainHead_unstable_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); } #[tokio::test] @@ -1841,7 +1965,6 @@ async fn follow_forks_pruned_block() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1993,7 +2116,6 @@ async fn follow_report_multiple_pruned_block() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2239,7 +2361,6 @@ async fn pin_block_references() { client.clone(), backend.clone(), Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: 3, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2305,7 +2426,10 @@ async fn pin_block_references() { wait_pinned_references(&backend, &hash, 1).await; // To not exceed the number of pinned blocks, we need to unpin before the next import. - let _res: () = api.call("chainHead_unstable_unpin", [&sub_id, &block_hash]).await.unwrap(); + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_hash]) + .await + .unwrap(); // Make sure unpin clears out the reference. let refs = backend.pin_refs(&hash).unwrap(); @@ -2374,7 +2498,6 @@ async fn follow_finalized_before_new_block() { client_mock.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2489,7 +2612,6 @@ async fn ensure_operation_limits_works() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2594,7 +2716,6 @@ async fn check_continue_operation() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2777,7 +2898,6 @@ async fn stop_storage_operation() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml index 1cedcb3a6d08c1650919550d648fe63fefc3d5c7..361d98a6b10e937c8382cf8a23977783710822f8 100644 --- a/substrate/client/rpc/Cargo.toml +++ b/substrate/client/rpc/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Substrate Client RPC" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index de69c50702ac4810f8e5fa9184d7077ccc432d48..0dd6db753327bb91118a02ef97b363a1ea5b5e31 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -9,14 +9,17 @@ repository.workspace = true description = "Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. Manages communication between them." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [features] -default = [ "rocksdb" ] +default = ["rocksdb"] # The RocksDB feature activates the RocksDB database backend. If it is not activated, and you pass # a path to a database, an error will be produced at runtime. -rocksdb = [ "sc-client-db/rocksdb" ] +rocksdb = ["sc-client-db/rocksdb"] # exposes the client type test-helpers = [] runtime-benchmarks = [ @@ -34,7 +37,7 @@ log = "0.4.17" futures-timer = "3.0.1" exit-future = "0.2.0" pin-project = "1.0.12" -serde = "1.0.188" +serde = "1.0.193" serde_json = "1.0.108" sc-keystore = { path = "../keystore" } sp-runtime = { path = "../../primitives/runtime" } @@ -59,7 +62,7 @@ sc-network-transactions = { path = "../network/transactions" } sc-chain-spec = { path = "../chain-spec" } sc-client-api = { path = "../api" } sp-api = { path = "../../primitives/api" } -sc-client-db = { path = "../db", default-features = false} +sc-client-db = { path = "../db", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1" } sc-executor = { path = "../executor" } sc-transaction-pool = { path = "../transaction-pool" } @@ -76,8 +79,8 @@ sc-tracing = { path = "../tracing" } sc-sysinfo = { path = "../sysinfo" } tracing = "0.1.29" tracing-futures = { version = "0.2.4" } -async-trait = "0.1.57" -tokio = { version = "1.22.0", features = ["time", "rt-multi-thread", "parking_lot"] } +async-trait = "0.1.74" +tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "time"] } tempfile = "3.1.0" directories = "5.0.1" static_init = "1.0.3" diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 3838accde02393b053943e944e86f1f02ba8944e..1a3a679c519add2c6eeb0b3ce2aed3e754fca693 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -130,10 +130,11 @@ where } /// Create the initial parts of a full node with the default genesis block builder. -pub fn new_full_parts( +pub fn new_full_parts_record_import( config: &Configuration, telemetry: Option, executor: TExec, + enable_import_proof_recording: bool, ) -> Result, Error> where TBl: BlockT, @@ -148,7 +149,26 @@ where executor.clone(), )?; - new_full_parts_with_genesis_builder(config, telemetry, executor, backend, genesis_block_builder) + new_full_parts_with_genesis_builder( + config, + telemetry, + executor, + backend, + genesis_block_builder, + enable_import_proof_recording, + ) +} +/// Create the initial parts of a full node with the default genesis block builder. +pub fn new_full_parts( + config: &Configuration, + telemetry: Option, + executor: TExec, +) -> Result, Error> +where + TBl: BlockT, + TExec: CodeExecutor + RuntimeVersionOf + Clone, +{ + new_full_parts_record_import(config, telemetry, executor, false) } /// Create the initial parts of a full node. @@ -158,6 +178,7 @@ pub fn new_full_parts_with_genesis_builder>, genesis_block_builder: TBuildGenesisBlock, + enable_import_proof_recording: bool, ) -> Result, Error> where TBl: BlockT, @@ -225,6 +246,7 @@ where SyncMode::LightState { .. } | SyncMode::Warp { .. } ), wasm_runtime_substitutes, + enable_import_proof_recording, }, )?; @@ -637,7 +659,6 @@ where client.clone(), backend.clone(), task_executor.clone(), - client.info().genesis_hash, // Defaults to sensible limits for the `ChainHead`. sc_rpc_spec_v2::chain_head::ChainHeadConfig::default(), ) @@ -754,6 +775,11 @@ where } let protocol_id = config.protocol_id(); + let genesis_hash = client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"); let block_announce_validator = if let Some(f) = block_announce_validator_builder { f(client.clone()) @@ -803,11 +829,7 @@ where // Allow both outgoing and incoming requests. let (handler, protocol_config) = WarpSyncRequestHandler::new( protocol_id.clone(), - client - .block_hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), + genesis_hash, config.chain_spec.fork_id(), warp_with_provider.clone(), ); @@ -846,17 +868,13 @@ where } // create transactions protocol and add it to the list of supported protocols of - // `network_params` - let transactions_handler_proto = sc_network_transactions::TransactionsHandlerPrototype::new( - protocol_id.clone(), - client - .block_hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - config.chain_spec.fork_id(), - ); - net_config.add_notification_protocol(transactions_handler_proto.set_config()); + let (transactions_handler_proto, transactions_config) = + sc_network_transactions::TransactionsHandlerPrototype::new( + protocol_id.clone(), + genesis_hash, + config.chain_spec.fork_id(), + ); + net_config.add_notification_protocol(transactions_config); // Create `PeerStore` and initialize it with bootnode peer ids. let peer_store = PeerStore::new( @@ -870,7 +888,6 @@ where let peer_store_handle = peer_store.handle(); spawn_handle.spawn("peer-store", Some("networking"), peer_store.run()); - let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000); let (engine, sync_service, block_announce_config) = SyncingEngine::new( Roles::from(&config.role), client.clone(), @@ -885,7 +902,7 @@ where block_downloader, state_request_protocol_name, warp_request_protocol_name, - rx, + peer_store_handle.clone(), )?; let sync_service_import_queue = sync_service.clone(); let sync_service = Arc::new(sync_service); @@ -906,7 +923,6 @@ where fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_announce_config, - tx, }; let has_bootnodes = !network_params.network_config.network_config.boot_nodes.is_empty(); diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 9d51aae55b20d131841b16cabde6c4f3bc6c4e5a..aa9c1b80a29a95bd77efbda35620c132b624bd9b 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -77,7 +77,7 @@ use sp_state_machine::{ ChildStorageCollection, KeyValueStates, KeyValueStorageLevel, StorageCollection, MAX_NESTED_TRIE_DEPTH, }; -use sp_trie::{CompactProof, MerkleValue, StorageProof}; +use sp_trie::{proof_size_extension::ProofSizeExt, CompactProof, MerkleValue, StorageProof}; use std::{ collections::{HashMap, HashSet}, marker::PhantomData, @@ -184,7 +184,7 @@ where ) } -/// Relevant client configuration items relevant for the client. +/// Client configuration items. #[derive(Debug, Clone)] pub struct ClientConfig { /// Enable the offchain worker db. @@ -198,6 +198,8 @@ pub struct ClientConfig { /// Map of WASM runtime substitute starting at the child of the given block until the runtime /// version doesn't match anymore. pub wasm_runtime_substitutes: HashMap, Vec>, + /// Enable recording of storage proofs during block import + pub enable_import_proof_recording: bool, } impl Default for ClientConfig { @@ -208,6 +210,7 @@ impl Default for ClientConfig { wasm_runtime_overrides: None, no_genesis: false, wasm_runtime_substitutes: HashMap::new(), + enable_import_proof_recording: false, } } } @@ -858,6 +861,14 @@ where runtime_api.set_call_context(CallContext::Onchain); + if self.config.enable_import_proof_recording { + runtime_api.record_proof(); + let recorder = runtime_api + .proof_recorder() + .expect("Proof recording is enabled in the line above; qed."); + runtime_api.register_extension(ProofSizeExt::new(recorder)); + } + runtime_api.execute_block( *parent_hash, Block::new(import_block.header.clone(), body.clone()), diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index ff9eb982b862f9ebb36132c730835dc2087ef291..0c7e138ce905bd60bef7ba07896605873a3ab3a5 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -53,9 +53,10 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; pub use self::{ builder::{ build_network, new_client, new_db_backend, new_full_client, new_full_parts, - new_full_parts_with_genesis_builder, new_native_or_wasm_executor, new_wasm_executor, - spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, SpawnTasksParams, - TFullBackend, TFullCallExecutor, TFullClient, + new_full_parts_record_import, new_full_parts_with_genesis_builder, + new_native_or_wasm_executor, new_wasm_executor, spawn_tasks, BuildNetworkParams, + KeystoreContainer, NetworkStarter, SpawnTasksParams, TFullBackend, TFullCallExecutor, + TFullClient, }, client::{ClientConfig, LocalCallExecutor}, error::Error, diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 670312e4161aa18e56597997b7e13c65fc2d209c..625d8286396e7778dd7271db772d143550dc6b5e 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -8,13 +8,16 @@ publish = false homepage = "https://substrate.io" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-channel = "1.8.0" array-bytes = "6.1" -fdlimit = "0.2.1" +fdlimit = "0.3.0" futures = "0.3.21" log = "0.4.17" parity-scale-codec = "3.6.1" @@ -23,12 +26,12 @@ tempfile = "3.1.0" tokio = { version = "1.22.0", features = ["time"] } sc-block-builder = { path = "../../block-builder" } sc-client-api = { path = "../../api" } -sc-client-db = { path = "../../db", default-features = false} +sc-client-db = { path = "../../db", default-features = false } sc-consensus = { path = "../../consensus/common" } sc-executor = { path = "../../executor" } sc-network = { path = "../../network" } sc-network-sync = { path = "../../network/sync" } -sc-service = { path = "..", features = ["test-helpers"]} +sc-service = { path = "..", features = ["test-helpers"] } sc-transaction-pool-api = { path = "../../transaction-pool/api" } sp-api = { path = "../../../primitives/api" } sp-blockchain = { path = "../../../primitives/blockchain" } diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs index 9700c7643c48514de6189fe619255516636a737a..456df73459a31f2a92561b8ee0e57761eb03e5ae 100644 --- a/substrate/client/service/test/src/lib.rs +++ b/substrate/client/service/test/src/lib.rs @@ -285,7 +285,7 @@ where base_port: u16, ) -> TestNet { sp_tracing::try_init_simple(); - fdlimit::raise_fd_limit(); + fdlimit::raise_fd_limit().unwrap(); let runtime = Runtime::new().expect("Error creating tokio runtime"); let mut net = TestNet { runtime, diff --git a/substrate/client/state-db/Cargo.toml b/substrate/client/state-db/Cargo.toml index c5e8272637d4cadca5fdefc51048ef6b4c6d994a..001ada02ef2f8140dd4619e451517e95f91a136c 100644 --- a/substrate/client/state-db/Cargo.toml +++ b/substrate/client/state-db/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "State database maintenance. Handles canonicalization and pruning in the database." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/client/state-db/src/lib.rs b/substrate/client/state-db/src/lib.rs index c656f126ae6eba9c070d137d234ed1af73214158..41c231c31aafbc6453fa4343640c27b733174c28 100644 --- a/substrate/client/state-db/src/lib.rs +++ b/substrate/client/state-db/src/lib.rs @@ -474,7 +474,7 @@ impl StateDbSync { if have_block { let refs = self.pinned.entry(hash.clone()).or_default(); if *refs == 0 { - trace!(target: "state-db-pin", "Pinned block: {:?}", hash); + trace!(target: LOG_TARGET_PIN, "Pinned block: {:?}", hash); self.non_canonical.pin(hash); } *refs += 1; @@ -491,11 +491,11 @@ impl StateDbSync { Entry::Occupied(mut entry) => { *entry.get_mut() -= 1; if *entry.get() == 0 { - trace!(target: "state-db-pin", "Unpinned block: {:?}", hash); + trace!(target: LOG_TARGET_PIN, "Unpinned block: {:?}", hash); entry.remove(); self.non_canonical.unpin(hash); } else { - trace!(target: "state-db-pin", "Releasing reference for {:?}", hash); + trace!(target: LOG_TARGET_PIN, "Releasing reference for {:?}", hash); } }, Entry::Vacant(_) => {}, diff --git a/substrate/client/state-db/src/pruning.rs b/substrate/client/state-db/src/pruning.rs index 623d30b098b62a41545d40b7c7f63096051378ba..ae8a9a12490990f5bcecec1967091652262bd0d0 100644 --- a/substrate/client/state-db/src/pruning.rs +++ b/substrate/client/state-db/src/pruning.rs @@ -385,7 +385,7 @@ impl RefWindow { /// Prune next block. Expects at least one block in the window. Adds changes to `commit`. pub fn prune_one(&mut self, commit: &mut CommitSet) -> Result<(), Error> { if let Some(pruned) = self.queue.pop_front(self.base)? { - trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); + trace!(target: LOG_TARGET, "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); let index = self.base; commit.data.deleted.extend(pruned.deleted.into_iter()); commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); @@ -393,7 +393,7 @@ impl RefWindow { self.base += 1; Ok(()) } else { - trace!(target: "state-db", "Trying to prune when there's nothing to prune"); + trace!(target: LOG_TARGET, "Trying to prune when there's nothing to prune"); Err(Error::StateDb(StateDbError::BlockUnavailable)) } } @@ -418,7 +418,7 @@ impl RefWindow { return Err(Error::StateDb(StateDbError::InvalidBlockNumber)) } trace!( - target: "state-db", + target: LOG_TARGET, "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), diff --git a/substrate/client/statement-store/Cargo.toml b/substrate/client/statement-store/Cargo.toml index 371d67369166b8f8448146bd6d3dfb982d0e0dc9..adfd27a1705ad3e3ade840c81af60b23a0d02af1 100644 --- a/substrate/client/statement-store/Cargo.toml +++ b/substrate/client/statement-store/Cargo.toml @@ -9,13 +9,16 @@ repository.workspace = true description = "Substrate statement store." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.17" parking_lot = "0.12.1" -parity-db = "0.4.8" +parity-db = "0.4.12" tokio = { version = "1.22.0", features = ["time"] } sp-statement-store = { path = "../../primitives/statement-store" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } diff --git a/substrate/client/storage-monitor/Cargo.toml b/substrate/client/storage-monitor/Cargo.toml index 021ee76240b980221602f0516afde1549e4e84b5..1c4a136ade6b1134841f2003bfaa9e1806558641 100644 --- a/substrate/client/storage-monitor/Cargo.toml +++ b/substrate/client/storage-monitor/Cargo.toml @@ -8,11 +8,14 @@ repository.workspace = true description = "Storage monitor service for substrate" homepage = "https://substrate.io" +[lints] +workspace = true + [dependencies] -clap = { version = "4.4.6", features = ["derive", "string"] } +clap = { version = "4.4.11", features = ["derive", "string"] } log = "0.4.17" -fs4 = "0.6.3" -sc-client-db = { path = "../db", default-features = false} +fs4 = "0.7.0" +sc-client-db = { path = "../db", default-features = false } sp-core = { path = "../../primitives/core" } tokio = "1.22.0" thiserror = "1.0.48" diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index 569cd067f27d7f33122ac58b06d05f0b547a10cc..c839a4210e4709ee19c39eb2726875cdef2eb3f5 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -8,13 +8,16 @@ license = "Apache-2.0" homepage = "https://substrate.io" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } -serde = { version = "1.0.188", features = ["derive"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" thiserror = "1.0.48" sc-chain-spec = { path = "../chain-spec" } diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml index 86f03050c44e9caf9f4acdd92b0666e4dbec4eed..e5d5987c90ee15ce8a87bf64dbdf35be38e24d92 100644 --- a/substrate/client/sysinfo/Cargo.toml +++ b/substrate/client/sysinfo/Cargo.toml @@ -10,6 +10,9 @@ description = "A crate that provides basic hardware and software telemetry infor documentation = "https://docs.rs/sc-sysinfo" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -21,7 +24,7 @@ rand = "0.8.5" rand_pcg = "0.3.1" derive_more = "0.99" regex = "1" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" sc-telemetry = { path = "../telemetry" } sp-core = { path = "../../primitives/core" } diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index a693a2884b547dec53f6aa1f04341774e256bbeb..f4c73dec67e82925457cb8074499c3e73785a970 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -10,19 +10,22 @@ repository.workspace = true documentation = "https://docs.rs/sc-telemetry" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -chrono = "0.4.27" +chrono = "0.4.31" futures = "0.3.21" -libp2p = { version = "0.51.3", features = ["dns", "tcp", "tokio", "wasm-ext", "websocket"] } +libp2p = { version = "0.51.4", features = ["dns", "tcp", "tokio", "wasm-ext", "websocket"] } log = "0.4.17" parking_lot = "0.12.1" pin-project = "1.0.12" sc-utils = { path = "../utils" } rand = "0.8.5" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" thiserror = "1.0.48" wasm-timer = "0.2.5" diff --git a/substrate/client/tracing/Cargo.toml b/substrate/client/tracing/Cargo.toml index ffcbf07490836318cd17ccd983823bbde2f56f78..c5d5783460462c7876337a29c0ccbe04723d8b5f 100644 --- a/substrate/client/tracing/Cargo.toml +++ b/substrate/client/tracing/Cargo.toml @@ -9,20 +9,24 @@ repository.workspace = true description = "Instrumentation implementation for substrate." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" -atty = "0.2.13" -chrono = "0.4.27" +is-terminal = "0.4.9" +chrono = "0.4.31" +codec = { package = "parity-scale-codec", version = "3.6.1" } lazy_static = "1.4.0" libc = "0.2.121" log = { version = "0.4.17" } parking_lot = "0.12.1" regex = "1.6.0" rustc-hash = "1.1.0" -serde = "1.0.188" +serde = "1.0.193" thiserror = "1.0.48" tracing = "0.1.29" tracing-log = "0.1.3" diff --git a/substrate/client/tracing/proc-macro/Cargo.toml b/substrate/client/tracing/proc-macro/Cargo.toml index b134cbce3ccf4d4472680787838fd4ad6ebcd567..3d862a021b31f019e53a951b12abb03297957059 100644 --- a/substrate/client/tracing/proc-macro/Cargo.toml +++ b/substrate/client/tracing/proc-macro/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "Helper macros for Substrate's client CLI" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.1" proc-macro2 = "1.0.56" quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.38", features = ["proc-macro", "full", "extra-traits", "parsing"] } +syn = { version = "2.0.41", features = ["extra-traits", "full", "parsing", "proc-macro"] } diff --git a/substrate/client/tracing/src/block/mod.rs b/substrate/client/tracing/src/block/mod.rs index 9ebf8e55c94d05f007c6fda363409e110a61c4c2..01744cd5563bd696196b41340d93dc7d3a606367 100644 --- a/substrate/client/tracing/src/block/mod.rs +++ b/substrate/client/tracing/src/block/mod.rs @@ -25,6 +25,7 @@ use std::{ time::Instant, }; +use codec::Encode; use parking_lot::Mutex; use tracing::{ dispatcher, @@ -34,7 +35,7 @@ use tracing::{ use crate::{SpanDatum, TraceEvent, Values}; use sc_client_api::BlockBackend; -use sp_api::{Core, Encode, Metadata, ProvideRuntimeApi}; +use sp_api::{Core, Metadata, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::hexdisplay::HexDisplay; use sp_rpc::tracing::{BlockTrace, Span, TraceBlockResponse}; diff --git a/substrate/client/tracing/src/logging/mod.rs b/substrate/client/tracing/src/logging/mod.rs index a3cf277fbd5010ff636679d3a43249e52746725e..403839390d655714e03dc794ad92cbed4044b6db 100644 --- a/substrate/client/tracing/src/logging/mod.rs +++ b/substrate/client/tracing/src/logging/mod.rs @@ -33,6 +33,7 @@ pub(crate) type DefaultLogger = stderr_writer::MakeStderrWriter; pub use directives::*; pub use sc_tracing_proc_macro::*; +use is_terminal::IsTerminal; use std::io; use tracing::Subscriber; use tracing_subscriber::{ @@ -170,7 +171,7 @@ where _ => true, } || detailed_output; - let enable_color = force_colors.unwrap_or_else(|| atty::is(atty::Stream::Stderr)); + let enable_color = force_colors.unwrap_or_else(|| io::stderr().is_terminal()); let timer = fast_local_time::FastLocalTime { with_fractional: detailed_output }; let event_format = EventFormat { @@ -179,7 +180,7 @@ where display_level: detailed_output, display_thread_name: detailed_output, enable_color, - dup_to_stdout: !atty::is(atty::Stream::Stderr) && atty::is(atty::Stream::Stdout), + dup_to_stdout: !io::stderr().is_terminal() && io::stdout().is_terminal(), }; let builder = FmtSubscriber::builder().with_env_filter(env_filter); diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index b893dc839edd4c8a5a6d179b616d857bcd4d2e3c..2eeeb69d5cf470218d3dd5ab69f829f14157e43e 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -9,18 +9,21 @@ repository.workspace = true description = "Substrate transaction pool implementation." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" futures-timer = "3.0.2" linked-hash-map = "0.5.4" log = "0.4.17" parking_lot = "0.12.1" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } thiserror = "1.0.48" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } sc-client-api = { path = "../api" } diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index f5760c271ad7de8a9924759bb78568290559c77e..c7325dc2bb63bc26fd5ecef9d4508f5c5b1ddcd3 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -8,16 +8,19 @@ homepage = "https://substrate.io" repository.workspace = true description = "Transaction pool client facing API." +[lints] +workspace = true + [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" log = "0.4.17" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } thiserror = "1.0.48" sp-blockchain = { path = "../../../primitives/blockchain" } -sp-core = { path = "../../../primitives/core", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } [dev-dependencies] serde_json = "1.0.108" diff --git a/substrate/client/transaction-pool/src/graph/mod.rs b/substrate/client/transaction-pool/src/graph/mod.rs index 5afdddb7402d163284cfe3279b280c0fecb026a7..484a6d6cf9f07787a51cbd3a2ae6d339e157ce43 100644 --- a/substrate/client/transaction-pool/src/graph/mod.rs +++ b/substrate/client/transaction-pool/src/graph/mod.rs @@ -39,9 +39,6 @@ pub mod watcher; pub use self::{ base_pool::Transaction, - pool::{ - BlockHash, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, NumberFor, Options, Pool, - TransactionFor, - }, + pool::{BlockHash, ChainApi, ExtrinsicFor, ExtrinsicHash, NumberFor, Options, Pool}, }; pub use validated_pool::{IsValidator, ValidatedTransaction}; diff --git a/substrate/client/utils/Cargo.toml b/substrate/client/utils/Cargo.toml index 885b1d26a8e03585a25356847d277acc5d60635c..a19457ac3d077f548652ee78858023f00303bff1 100644 --- a/substrate/client/utils/Cargo.toml +++ b/substrate/client/utils/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "I/O for Substrate runtimes" readme = "README.md" +[lints] +workspace = true + [dependencies] async-channel = "1.8.0" futures = "0.3.21" @@ -17,10 +20,10 @@ lazy_static = "1.4.0" log = "0.4" parking_lot = "0.12.1" prometheus = { version = "0.13.0", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } [features] -default = [ "metered" ] +default = ["metered"] metered = [] [dev-dependencies] diff --git a/substrate/docs/STYLE_GUIDE.md b/substrate/docs/STYLE_GUIDE.md index 6ea0755d0807d4452b6f2b1347bb13999ca37fe5..d5e703b3fdf25c00f5fe6123a53debc4f0c7f3a7 100644 --- a/substrate/docs/STYLE_GUIDE.md +++ b/substrate/docs/STYLE_GUIDE.md @@ -157,7 +157,7 @@ format looks like this: - The feature is written as a single line if it fits within 80 chars: ```toml [features] -default = [ "std" ] +default = ["std"] ``` - Otherwise the feature is broken down into multiple lines with one entry per line. Each line is padded with one tab and diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 9f2f73ffb151ad8e5936dd23d743f4a93fd37594..083d098b22a4de15aa45f79d2657d6e3d6f04eb0 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "The single package to get you started with building frame pallets and runtimes" publish = false +[lints] +workspace = true + [package.metadata.docs.rs] # enable `experimental` feature for docs features = ["experimental"] @@ -17,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # external deps parity-scale-codec = { version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.6.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.6.0", default-features = false, features = ["derive"] } # primitive deps, used for developing FRAME pallets. sp-runtime = { default-features = false, path = "../primitives/runtime" } @@ -27,8 +30,8 @@ sp-core = { default-features = false, path = "../primitives/core" } sp-arithmetic = { default-features = false, path = "../primitives/arithmetic" } # frame deps, for developing FRAME pallets. -frame-support = { default-features = false, path = "support" } -frame-system = { default-features = false, path = "system" } +frame-support = { default-features = false, path = "support" } +frame-system = { default-features = false, path = "system" } # primitive types used for developing FRAME runtimes. sp-version = { default-features = false, path = "../primitives/version", optional = true } @@ -52,8 +55,8 @@ log = { version = "0.4.20", default-features = false } pallet-examples = { path = "./examples" } [features] -default = [ "runtime", "std" ] -experimental = [ "frame-support/experimental", "frame-system/experimental" ] +default = ["runtime", "std"] +experimental = ["frame-support/experimental"] runtime = [ "frame-executive", "frame-system-rpc-runtime-api", diff --git a/substrate/frame/alliance/Cargo.toml b/substrate/frame/alliance/Cargo.toml index d7d7352975aeafbbb78b72e93c861a0d9df5dbbe..39f5a6ceb756c4659d70c9a18e4b4f33d26258af 100644 --- a/substrate/frame/alliance/Cargo.toml +++ b/substrate/frame/alliance/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "The Alliance pallet provides a collective for standard-setting industry collaboration." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -19,27 +22,27 @@ log = { version = "0.4.14", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../../primitives/std", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } sp-core-hashing = { path = "../../primitives/core/hashing", default-features = false, optional = true } -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } pallet-identity = { path = "../identity", default-features = false } pallet-collective = { path = "../collective", default-features = false, optional = true } [dev-dependencies] array-bytes = "6.1" -sp-core-hashing = { path = "../../primitives/core/hashing", default-features = false} +sp-core-hashing = { path = "../../primitives/core/hashing", default-features = false } pallet-balances = { path = "../balances" } pallet-collective = { path = "../collective" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/alliance/src/benchmarking.rs b/substrate/frame/alliance/src/benchmarking.rs index 37cc3314037e5175039802fc2c39b225910ac912..cb2a04f17c57f47611da5d4a63ec869c6be34c27 100644 --- a/substrate/frame/alliance/src/benchmarking.rs +++ b/substrate/frame/alliance/src/benchmarking.rs @@ -183,7 +183,7 @@ mod benchmarks { let voter = &members[j as usize]; Alliance::::vote( SystemOrigin::Signed(voter.clone()).into(), - last_hash.clone(), + last_hash, index, true, )?; @@ -191,12 +191,7 @@ mod benchmarks { let voter = members[m as usize - 3].clone(); // Voter votes aye without resolving the vote. - Alliance::::vote( - SystemOrigin::Signed(voter.clone()).into(), - last_hash.clone(), - index, - true, - )?; + Alliance::::vote(SystemOrigin::Signed(voter.clone()).into(), last_hash, index, true)?; // Voter switches vote to nay, but does not kill the vote, just updates + inserts let approve = false; @@ -206,7 +201,7 @@ mod benchmarks { frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); #[extrinsic_call] - _(SystemOrigin::Signed(voter), last_hash.clone(), index, approve); + _(SystemOrigin::Signed(voter), last_hash, index, approve); //nothing to verify Ok(()) @@ -255,24 +250,19 @@ mod benchmarks { let voter = &members[j as usize]; Alliance::::vote( SystemOrigin::Signed(voter.clone()).into(), - last_hash.clone(), + last_hash, index, true, )?; } // Voter votes aye without resolving the vote. - Alliance::::vote( - SystemOrigin::Signed(voter.clone()).into(), - last_hash.clone(), - index, - true, - )?; + Alliance::::vote(SystemOrigin::Signed(voter.clone()).into(), last_hash, index, true)?; // Voter switches vote to nay, which kills the vote Alliance::::vote( SystemOrigin::Signed(voter.clone()).into(), - last_hash.clone(), + last_hash, index, false, )?; @@ -282,7 +272,7 @@ mod benchmarks { frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); #[extrinsic_call] - close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage); + close(SystemOrigin::Signed(voter), last_hash, index, Weight::MAX, bytes_in_storage); assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); Ok(()) @@ -330,7 +320,7 @@ mod benchmarks { // approval vote Alliance::::vote( SystemOrigin::Signed(proposer.clone()).into(), - last_hash.clone(), + last_hash, index, false, )?; @@ -340,7 +330,7 @@ mod benchmarks { let voter = &members[j as usize]; Alliance::::vote( SystemOrigin::Signed(voter.clone()).into(), - last_hash.clone(), + last_hash, index, false, )?; @@ -349,22 +339,17 @@ mod benchmarks { // Member zero is the first aye Alliance::::vote( SystemOrigin::Signed(members[0].clone()).into(), - last_hash.clone(), + last_hash, index, true, )?; let voter = members[1].clone(); // Caller switches vote to aye, which passes the vote - Alliance::::vote( - SystemOrigin::Signed(voter.clone()).into(), - last_hash.clone(), - index, - true, - )?; + Alliance::::vote(SystemOrigin::Signed(voter.clone()).into(), last_hash, index, true)?; #[extrinsic_call] - close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage); + close(SystemOrigin::Signed(voter), last_hash, index, Weight::MAX, bytes_in_storage); assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); Ok(()) @@ -414,7 +399,7 @@ mod benchmarks { let voter = &members[j as usize]; Alliance::::vote( SystemOrigin::Signed(voter.clone()).into(), - last_hash.clone(), + last_hash, index, true, )?; @@ -422,7 +407,7 @@ mod benchmarks { Alliance::::vote( SystemOrigin::Signed(voter.clone()).into(), - last_hash.clone(), + last_hash, index, false, )?; @@ -430,7 +415,7 @@ mod benchmarks { System::::set_block_number(BlockNumberFor::::max_value()); #[extrinsic_call] - close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage); + close(SystemOrigin::Signed(voter), last_hash, index, Weight::MAX, bytes_in_storage); // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); @@ -477,7 +462,7 @@ mod benchmarks { // The prime member votes aye, so abstentions default to aye. Alliance::::vote( SystemOrigin::Signed(proposer.clone()).into(), - last_hash.clone(), + last_hash, p - 1, true, // Vote aye. )?; @@ -489,7 +474,7 @@ mod benchmarks { let voter = &members[j as usize]; Alliance::::vote( SystemOrigin::Signed(voter.clone()).into(), - last_hash.clone(), + last_hash, index, false, )?; @@ -499,13 +484,7 @@ mod benchmarks { System::::set_block_number(BlockNumberFor::::max_value()); #[extrinsic_call] - close( - SystemOrigin::Signed(proposer), - last_hash.clone(), - index, - Weight::MAX, - bytes_in_storage, - ); + close(SystemOrigin::Signed(proposer), last_hash, index, Weight::MAX, bytes_in_storage); assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); Ok(()) diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index ace5214f145f7683b22aa9a1ec8f9689cd97221c..01e0e01fe7ec46c4c002a3afb6bafd940617d92b 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -19,16 +19,12 @@ pub use sp_core::H256; use sp_runtime::traits::Hash; -pub use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, -}; +pub use sp_runtime::{traits::BlakeTwo256, BuildStorage}; use sp_std::convert::{TryFrom, TryInto}; pub use frame_support::{ assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, - traits::{EitherOfDiverse, SortedMembers}, - BoundedVec, + traits::EitherOfDiverse, BoundedVec, }; use frame_system::{EnsureRoot, EnsureSignedBy}; use pallet_identity::{ diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index de898d4ccde14c1c9fa783cf98dabf4d63e756bf..0c7b06abf55d01c68dcf16f08bcdb00dcc04b0ea 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -9,29 +9,32 @@ repository.workspace = true description = "FRAME asset conversion pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-api = { path = "../../primitives/api", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} +sp-api = { path = "../../primitives/api", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } pallet-assets = { path = "../assets" } -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info", "num-traits"] } +primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/asset-conversion/src/benchmarking.rs b/substrate/frame/asset-conversion/src/benchmarking.rs index 87b541cd4744d1be32cd0908492b470c86d0f604..f0e02c802ad8e5a923cb9801c33c5539e9bd58a6 100644 --- a/substrate/frame/asset-conversion/src/benchmarking.rs +++ b/substrate/frame/asset-conversion/src/benchmarking.rs @@ -18,74 +18,142 @@ //! Asset Conversion pallet benchmarking. use super::*; -use frame_benchmarking::{benchmarks, whitelisted_caller}; +use crate::Pallet as AssetConversion; +use frame_benchmarking::{v2::*, whitelisted_caller}; use frame_support::{ assert_ok, - storage::bounded_vec::BoundedVec, traits::{ - fungible::{Inspect as InspectFungible, Mutate as MutateFungible, Unbalanced}, + fungible::NativeOrWithId, fungibles::{Create, Inspect, Mutate}, }, }; use frame_system::RawOrigin as SystemOrigin; use sp_core::Get; -use sp_runtime::traits::{Bounded, StaticLookup}; -use sp_std::{ops::Div, prelude::*}; +use sp_std::{marker::PhantomData, prelude::*}; -use crate::Pallet as AssetConversion; +/// Benchmark Helper +pub trait BenchmarkHelper { + /// Returns a valid assets pair for the pool creation. + /// + /// When a specific asset, such as the native asset, is required in every pool, it should be + /// returned for each odd-numbered seed. + fn create_pair(seed1: u32, seed2: u32) -> (AssetKind, AssetKind); +} + +impl BenchmarkHelper for () +where + AssetKind: From, +{ + fn create_pair(seed1: u32, seed2: u32) -> (AssetKind, AssetKind) { + (seed1.into(), seed2.into()) + } +} + +/// Factory for creating a valid asset pairs with [`NativeOrWithId::Native`] always leading in the +/// pair. +pub struct NativeOrWithIdFactory(PhantomData); +impl + Ord> BenchmarkHelper> + for NativeOrWithIdFactory +{ + fn create_pair(seed1: u32, seed2: u32) -> (NativeOrWithId, NativeOrWithId) { + if seed1 % 2 == 0 { + (NativeOrWithId::WithId(seed2.into()), NativeOrWithId::Native) + } else { + (NativeOrWithId::Native, NativeOrWithId::WithId(seed2.into())) + } + } +} -const INITIAL_ASSET_BALANCE: u128 = 1_000_000_000_000; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -type BalanceOf = - <::Currency as InspectFungible<::AccountId>>::Balance; +/// Provides a pair of amounts expected to serve as sufficient initial liquidity for a pool. +fn valid_liquidity_amount(ed1: T::Balance, ed2: T::Balance) -> (T::Balance, T::Balance) +where + T::Assets: Inspect, +{ + let l = + ed1.max(ed2) + T::MintMinLiquidity::get() + T::MintMinLiquidity::get() + T::Balance::one(); + (l, l) +} -fn get_lp_token_id() -> T::PoolAssetId +/// Create the `asset` and mint the `amount` for the `caller`. +fn create_asset(caller: &T::AccountId, asset: &T::AssetKind, amount: T::Balance) where - T::PoolAssetId: Into, + T::Assets: Create + Mutate, { - let next_id: u32 = AssetConversion::::get_next_pool_asset_id().into(); - (next_id - 1).into() + if !T::Assets::asset_exists(asset.clone()) { + assert_ok!(T::Assets::create(asset.clone(), caller.clone(), true, T::Balance::one())); + } + assert_ok!(T::Assets::mint_into( + asset.clone(), + &caller, + amount + T::Assets::minimum_balance(asset.clone()) + )); } -fn create_asset(asset: &T::MultiAssetId) -> (T::AccountId, AccountIdLookupOf) +/// Create the designated fee asset for pool creation. +fn create_fee_asset(caller: &T::AccountId) where - T::AssetBalance: From, - T::Currency: Unbalanced, T::Assets: Create + Mutate, { - let caller: T::AccountId = whitelisted_caller(); - let caller_lookup = T::Lookup::unlookup(caller.clone()); - if let MultiAssetIdConversionResult::Converted(asset_id) = - T::MultiAssetIdConverter::try_convert(asset) - { - T::Currency::set_balance(&caller, BalanceOf::::max_value().div(1000u32.into())); - assert_ok!(T::Assets::create(asset_id.clone(), caller.clone(), true, 1.into())); - assert_ok!(T::Assets::mint_into(asset_id, &caller, INITIAL_ASSET_BALANCE.into())); + let fee_asset = T::PoolSetupFeeAsset::get(); + if !T::Assets::asset_exists(fee_asset.clone()) { + assert_ok!(T::Assets::create(fee_asset.clone(), caller.clone(), true, T::Balance::one())); } - (caller, caller_lookup) + assert_ok!(T::Assets::mint_into( + fee_asset.clone(), + &caller, + T::Assets::minimum_balance(fee_asset) + )); } +/// Mint the fee asset for the `caller` sufficient to cover the fee for creating a new pool. +fn mint_setup_fee_asset( + caller: &T::AccountId, + asset1: &T::AssetKind, + asset2: &T::AssetKind, + lp_token: &T::PoolAssetId, +) where + T::Assets: Create + Mutate, +{ + assert_ok!(T::Assets::mint_into( + T::PoolSetupFeeAsset::get(), + &caller, + T::PoolSetupFee::get() + + T::Assets::deposit_required(asset1.clone()) + + T::Assets::deposit_required(asset2.clone()) + + T::PoolAssets::deposit_required(lp_token.clone()) + )); +} + +/// Creates a pool for a given asset pair. +/// +/// This action mints the necessary amounts of the given assets for the `caller` to provide initial +/// liquidity. It returns the LP token ID along with a pair of amounts sufficient for the pool's +/// initial liquidity. fn create_asset_and_pool( - asset1: &T::MultiAssetId, - asset2: &T::MultiAssetId, -) -> (T::PoolAssetId, T::AccountId, AccountIdLookupOf) + caller: &T::AccountId, + asset1: &T::AssetKind, + asset2: &T::AssetKind, +) -> (T::PoolAssetId, T::Balance, T::Balance) where - T::AssetBalance: From, - T::Currency: Unbalanced, T::Assets: Create + Mutate, - T::PoolAssetId: Into, { - let (_, _) = create_asset::(asset1); - let (caller, caller_lookup) = create_asset::(asset2); + let (liquidity1, liquidity2) = valid_liquidity_amount::( + T::Assets::minimum_balance(asset1.clone()), + T::Assets::minimum_balance(asset2.clone()), + ); + create_asset::(caller, asset1, liquidity1); + create_asset::(caller, asset2, liquidity2); + let lp_token = AssetConversion::::get_next_pool_asset_id(); + + mint_setup_fee_asset::(caller, asset1, asset2, &lp_token); assert_ok!(AssetConversion::::create_pool( SystemOrigin::Signed(caller.clone()).into(), - asset1.clone(), - asset2.clone() + Box::new(asset1.clone()), + Box::new(asset2.clone()) )); - let lp_token = get_lp_token_id::(); - (lp_token, caller, caller_lookup) + (lp_token, liquidity1, liquidity2) } fn assert_last_event(generic_event: ::RuntimeEvent) { @@ -96,242 +164,198 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { assert_eq!(event, &system_event); } -benchmarks! { - where_clause { - where - T::AssetBalance: From + Into, - T::Currency: Unbalanced, - T::Balance: From + Into, - T::Assets: Create + Mutate, - T::PoolAssetId: Into, - } +#[benchmarks(where T::Assets: Create + Mutate, T::PoolAssetId: Into,)] +mod benchmarks { + use super::*; - create_pool { - let asset1 = T::MultiAssetIdConverter::get_native(); - let asset2 = T::BenchmarkHelper::multiasset_id(0); - let (caller, _) = create_asset::(&asset2); - }: _(SystemOrigin::Signed(caller.clone()), asset1.clone(), asset2.clone()) - verify { - let lp_token = get_lp_token_id::(); - let pool_id = (asset1.clone(), asset2.clone()); - assert_last_event::(Event::PoolCreated { - creator: caller.clone(), - pool_account: AssetConversion::::get_pool_account(&pool_id), - pool_id, - lp_token, - }.into()); - } + #[benchmark] + fn create_pool() { + let caller: T::AccountId = whitelisted_caller(); + let (asset1, asset2) = T::BenchmarkHelper::create_pair(0, 1); + create_asset::(&caller, &asset1, T::Assets::minimum_balance(asset1.clone())); + create_asset::(&caller, &asset2, T::Assets::minimum_balance(asset2.clone())); - add_liquidity { - let asset1 = T::MultiAssetIdConverter::get_native(); - let asset2 = T::BenchmarkHelper::multiasset_id(0); - let (lp_token, caller, _) = create_asset_and_pool::(&asset1, &asset2); - let ed: u128 = T::Currency::minimum_balance().into(); - let add_amount = 1000 + ed; - }: _(SystemOrigin::Signed(caller.clone()), asset1.clone(), asset2.clone(), add_amount.into(), 1000.into(), 0.into(), 0.into(), caller.clone()) - verify { - let pool_id = (asset1.clone(), asset2.clone()); - let lp_minted = AssetConversion::::calc_lp_amount_for_zero_supply(&add_amount.into(), &1000.into()).unwrap().into(); - assert_eq!( - T::PoolAssets::balance(lp_token, &caller), - lp_minted.into() - ); - assert_eq!( - T::Currency::balance(&AssetConversion::::get_pool_account(&pool_id)), - add_amount.into() - ); - assert_eq!( - T::Assets::balance(T::BenchmarkHelper::asset_id(0), &AssetConversion::::get_pool_account(&pool_id)), - 1000.into() + let lp_token = AssetConversion::::get_next_pool_asset_id(); + create_fee_asset::(&caller); + mint_setup_fee_asset::(&caller, &asset1, &asset2, &lp_token); + + #[extrinsic_call] + _(SystemOrigin::Signed(caller.clone()), Box::new(asset1.clone()), Box::new(asset2.clone())); + + let pool_id = T::PoolLocator::pool_id(&asset1, &asset2).unwrap(); + let pool_account = T::PoolLocator::address(&pool_id).unwrap(); + assert_last_event::( + Event::PoolCreated { creator: caller, pool_account, pool_id, lp_token }.into(), ); } - remove_liquidity { - let asset1 = T::MultiAssetIdConverter::get_native(); - let asset2 = T::BenchmarkHelper::multiasset_id(0); - let (lp_token, caller, _) = create_asset_and_pool::(&asset1, &asset2); - let ed: u128 = T::Currency::minimum_balance().into(); - let add_amount = 100 * ed; - let lp_minted = AssetConversion::::calc_lp_amount_for_zero_supply(&add_amount.into(), &1000.into()).unwrap().into(); - let remove_lp_amount = lp_minted.checked_div(10).unwrap(); + #[benchmark] + fn add_liquidity() { + let caller: T::AccountId = whitelisted_caller(); + let (asset1, asset2) = T::BenchmarkHelper::create_pair(0, 1); - AssetConversion::::add_liquidity( - SystemOrigin::Signed(caller.clone()).into(), - asset1.clone(), - asset2.clone(), - add_amount.into(), - 1000.into(), - 0.into(), - 0.into(), + create_fee_asset::(&caller); + let (lp_token, liquidity1, liquidity2) = + create_asset_and_pool::(&caller, &asset1, &asset2); + + #[extrinsic_call] + _( + SystemOrigin::Signed(caller.clone()), + Box::new(asset1.clone()), + Box::new(asset2.clone()), + liquidity1, + liquidity2, + T::Balance::one(), + T::Balance::zero(), caller.clone(), - )?; - let total_supply = >::total_issuance(lp_token.clone()); - }: _(SystemOrigin::Signed(caller.clone()), asset1, asset2, remove_lp_amount.into(), 0.into(), 0.into(), caller.clone()) - verify { - let new_total_supply = >::total_issuance(lp_token.clone()); - assert_eq!( - new_total_supply, - total_supply - remove_lp_amount.into() ); + + let pool_account = T::PoolLocator::pool_address(&asset1, &asset2).unwrap(); + let lp_minted = + AssetConversion::::calc_lp_amount_for_zero_supply(&liquidity1, &liquidity2).unwrap(); + assert_eq!(T::PoolAssets::balance(lp_token, &caller), lp_minted); + assert_eq!(T::Assets::balance(asset1, &pool_account), liquidity1); + assert_eq!(T::Assets::balance(asset2, &pool_account), liquidity2); } - swap_exact_tokens_for_tokens { - let native = T::MultiAssetIdConverter::get_native(); - let asset1 = T::BenchmarkHelper::multiasset_id(1); - let asset2 = T::BenchmarkHelper::multiasset_id(2); - let (_, caller, _) = create_asset_and_pool::(&native, &asset1); - let (_, _) = create_asset::(&asset2); - let ed: u128 = T::Currency::minimum_balance().into(); + #[benchmark] + fn remove_liquidity() { + let caller: T::AccountId = whitelisted_caller(); + let (asset1, asset2) = T::BenchmarkHelper::create_pair(0, 1); - AssetConversion::::add_liquidity( + create_fee_asset::(&caller); + let (lp_token, liquidity1, liquidity2) = + create_asset_and_pool::(&caller, &asset1, &asset2); + + let remove_lp_amount = T::Balance::one(); + + assert_ok!(AssetConversion::::add_liquidity( SystemOrigin::Signed(caller.clone()).into(), - native.clone(), - asset1.clone(), - (100 * ed).into(), - 200.into(), - 0.into(), - 0.into(), + Box::new(asset1.clone()), + Box::new(asset2.clone()), + liquidity1, + liquidity2, + T::Balance::one(), + T::Balance::zero(), caller.clone(), - )?; - - let path; - let swap_amount; - // if we only allow the native-asset pools, then the worst case scenario would be to swap - // asset1-native-asset2 - if !T::AllowMultiAssetPools::get() { - AssetConversion::::create_pool(SystemOrigin::Signed(caller.clone()).into(), native.clone(), asset2.clone())?; - AssetConversion::::add_liquidity( - SystemOrigin::Signed(caller.clone()).into(), - native.clone(), - asset2.clone(), - (500 * ed).into(), - 1000.into(), - 0.into(), - 0.into(), - caller.clone(), - )?; - path = vec![asset1.clone(), native.clone(), asset2.clone()]; - swap_amount = 100.into(); - } else { - let asset3 = T::BenchmarkHelper::multiasset_id(3); - AssetConversion::::create_pool(SystemOrigin::Signed(caller.clone()).into(), asset1.clone(), asset2.clone())?; - let (_, _) = create_asset::(&asset3); - AssetConversion::::create_pool(SystemOrigin::Signed(caller.clone()).into(), asset2.clone(), asset3.clone())?; + )); + let total_supply = + >::total_issuance(lp_token.clone()); - AssetConversion::::add_liquidity( - SystemOrigin::Signed(caller.clone()).into(), - asset1.clone(), - asset2.clone(), - 200.into(), - 2000.into(), - 0.into(), - 0.into(), - caller.clone(), - )?; - AssetConversion::::add_liquidity( + #[extrinsic_call] + _( + SystemOrigin::Signed(caller.clone()), + Box::new(asset1), + Box::new(asset2), + remove_lp_amount, + T::Balance::zero(), + T::Balance::zero(), + caller.clone(), + ); + + let new_total_supply = >::total_issuance(lp_token); + assert_eq!(new_total_supply, total_supply - remove_lp_amount); + } + + #[benchmark] + fn swap_exact_tokens_for_tokens(n: Linear<2, { T::MaxSwapPathLength::get() }>) { + let mut swap_amount = T::Balance::one(); + let mut path = vec![]; + + let caller: T::AccountId = whitelisted_caller(); + create_fee_asset::(&caller); + for n in 1..n { + let (asset1, asset2) = T::BenchmarkHelper::create_pair(n - 1, n); + swap_amount = swap_amount + T::Balance::one(); + if path.len() == 0 { + path = vec![Box::new(asset1.clone()), Box::new(asset2.clone())]; + } else { + path.push(Box::new(asset2.clone())); + } + + let (_, liquidity1, liquidity2) = create_asset_and_pool::(&caller, &asset1, &asset2); + + assert_ok!(AssetConversion::::add_liquidity( SystemOrigin::Signed(caller.clone()).into(), - asset2.clone(), - asset3.clone(), - 2000.into(), - 2000.into(), - 0.into(), - 0.into(), + Box::new(asset1.clone()), + Box::new(asset2.clone()), + liquidity1, + liquidity2, + T::Balance::one(), + T::Balance::zero(), caller.clone(), - )?; - path = vec![native.clone(), asset1.clone(), asset2.clone(), asset3.clone()]; - swap_amount = ed.into(); + )); } - let path: BoundedVec<_, T::MaxSwapPathLength> = BoundedVec::try_from(path).unwrap(); - let native_balance = T::Currency::balance(&caller); - let asset1_balance = T::Assets::balance(T::BenchmarkHelper::asset_id(1), &caller); - }: _(SystemOrigin::Signed(caller.clone()), path, swap_amount, 1.into(), caller.clone(), false) - verify { - if !T::AllowMultiAssetPools::get() { - let new_asset1_balance = T::Assets::balance(T::BenchmarkHelper::asset_id(1), &caller); - assert_eq!(new_asset1_balance, asset1_balance - 100.into()); - } else { - let new_native_balance = T::Currency::balance(&caller); - assert_eq!(new_native_balance, native_balance - ed.into()); - } + let asset_in = *path.first().unwrap().clone(); + assert_ok!(T::Assets::mint_into( + asset_in.clone(), + &caller, + swap_amount + T::Balance::one() + )); + let init_caller_balance = T::Assets::balance(asset_in.clone(), &caller); + + #[extrinsic_call] + _( + SystemOrigin::Signed(caller.clone()), + path, + swap_amount, + T::Balance::one(), + caller.clone(), + true, + ); + + let actual_balance = T::Assets::balance(asset_in, &caller); + assert_eq!(actual_balance, init_caller_balance - swap_amount); } - swap_tokens_for_exact_tokens { - let native = T::MultiAssetIdConverter::get_native(); - let asset1 = T::BenchmarkHelper::multiasset_id(1); - let asset2 = T::BenchmarkHelper::multiasset_id(2); - let (_, caller, _) = create_asset_and_pool::(&native, &asset1); - let (_, _) = create_asset::(&asset2); - let ed: u128 = T::Currency::minimum_balance().into(); + #[benchmark] + fn swap_tokens_for_exact_tokens(n: Linear<2, { T::MaxSwapPathLength::get() }>) { + let mut max_swap_amount = T::Balance::one(); + let mut path = vec![]; - AssetConversion::::add_liquidity( - SystemOrigin::Signed(caller.clone()).into(), - native.clone(), - asset1.clone(), - (1000 * ed).into(), - 500.into(), - 0.into(), - 0.into(), - caller.clone(), - )?; - - let path; - // if we only allow the native-asset pools, then the worst case scenario would be to swap - // asset1-native-asset2 - if !T::AllowMultiAssetPools::get() { - AssetConversion::::create_pool(SystemOrigin::Signed(caller.clone()).into(), native.clone(), asset2.clone())?; - AssetConversion::::add_liquidity( - SystemOrigin::Signed(caller.clone()).into(), - native.clone(), - asset2.clone(), - (500 * ed).into(), - 1000.into(), - 0.into(), - 0.into(), - caller.clone(), - )?; - path = vec![asset1.clone(), native.clone(), asset2.clone()]; - } else { - AssetConversion::::create_pool(SystemOrigin::Signed(caller.clone()).into(), asset1.clone(), asset2.clone())?; - let asset3 = T::BenchmarkHelper::multiasset_id(3); - let (_, _) = create_asset::(&asset3); - AssetConversion::::create_pool(SystemOrigin::Signed(caller.clone()).into(), asset2.clone(), asset3.clone())?; + let caller: T::AccountId = whitelisted_caller(); + create_fee_asset::(&caller); + for n in 1..n { + let (asset1, asset2) = T::BenchmarkHelper::create_pair(n - 1, n); + max_swap_amount = max_swap_amount + T::Balance::one() + T::Balance::one(); + if path.len() == 0 { + path = vec![Box::new(asset1.clone()), Box::new(asset2.clone())]; + } else { + path.push(Box::new(asset2.clone())); + } - AssetConversion::::add_liquidity( - SystemOrigin::Signed(caller.clone()).into(), - asset1.clone(), - asset2.clone(), - 2000.into(), - 2000.into(), - 0.into(), - 0.into(), - caller.clone(), - )?; - AssetConversion::::add_liquidity( + let (_, liquidity1, liquidity2) = create_asset_and_pool::(&caller, &asset1, &asset2); + + assert_ok!(AssetConversion::::add_liquidity( SystemOrigin::Signed(caller.clone()).into(), - asset2.clone(), - asset3.clone(), - 2000.into(), - 2000.into(), - 0.into(), - 0.into(), + Box::new(asset1.clone()), + Box::new(asset2.clone()), + liquidity1, + liquidity2, + T::Balance::one(), + T::Balance::zero(), caller.clone(), - )?; - path = vec![native.clone(), asset1.clone(), asset2.clone(), asset3.clone()]; + )); } - let path: BoundedVec<_, T::MaxSwapPathLength> = BoundedVec::try_from(path).unwrap(); - let asset2_balance = T::Assets::balance(T::BenchmarkHelper::asset_id(2), &caller); - let asset3_balance = T::Assets::balance(T::BenchmarkHelper::asset_id(3), &caller); - }: _(SystemOrigin::Signed(caller.clone()), path.clone(), 100.into(), (1000 * ed).into(), caller.clone(), false) - verify { - if !T::AllowMultiAssetPools::get() { - let new_asset2_balance = T::Assets::balance(T::BenchmarkHelper::asset_id(2), &caller); - assert_eq!(new_asset2_balance, asset2_balance + 100.into()); - } else { - let new_asset3_balance = T::Assets::balance(T::BenchmarkHelper::asset_id(3), &caller); - assert_eq!(new_asset3_balance, asset3_balance + 100.into()); - } + let asset_in = *path.first().unwrap().clone(); + let asset_out = *path.last().unwrap().clone(); + assert_ok!(T::Assets::mint_into(asset_in, &caller, max_swap_amount)); + let init_caller_balance = T::Assets::balance(asset_out.clone(), &caller); + + #[extrinsic_call] + _( + SystemOrigin::Signed(caller.clone()), + path, + T::Balance::one(), + max_swap_amount, + caller.clone(), + true, + ); + + let actual_balance = T::Assets::balance(asset_out, &caller); + assert_eq!(actual_balance, init_caller_balance + T::Balance::one()); } impl_benchmark_test_suite!(AssetConversion, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/asset-conversion/src/lib.rs b/substrate/frame/asset-conversion/src/lib.rs index 8d811473e861f10b65c3b957afbb199a377875d3..f0695678fbddf07a4943c4ad0982ac95c4634866 100644 --- a/substrate/frame/asset-conversion/src/lib.rs +++ b/substrate/frame/asset-conversion/src/lib.rs @@ -53,63 +53,54 @@ //! (This can be run against the kitchen sync node in the `node` folder of this repo.) #![deny(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::traits::{DefensiveOption, Incrementable}; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; - -mod types; -pub mod weights; - -#[cfg(test)] -mod tests; - #[cfg(test)] mod mock; +mod swap; +#[cfg(test)] +mod tests; +mod types; +pub mod weights; +#[cfg(feature = "runtime-benchmarks")] +pub use benchmarking::{BenchmarkHelper, NativeOrWithIdFactory}; +pub use pallet::*; +pub use swap::*; +pub use types::*; +pub use weights::WeightInfo; use codec::Codec; use frame_support::{ - ensure, - traits::tokens::{AssetId, Balance}, -}; -use frame_system::{ - ensure_signed, - pallet_prelude::{BlockNumberFor, OriginFor}, + storage::{with_storage_layer, with_transaction}, + traits::{ + fungibles::{Balanced, Create, Credit, Inspect, Mutate}, + tokens::{ + AssetId, Balance, + Fortitude::Polite, + Precision::Exact, + Preservation::{Expendable, Preserve}, + }, + AccountTouch, Incrementable, OnUnbalanced, + }, + PalletId, }; -pub use pallet::*; -use sp_arithmetic::traits::Unsigned; +use sp_core::Get; use sp_runtime::{ traits::{ - CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Ensure, MaybeDisplay, TrailingZeroInput, + CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Ensure, IntegerSquareRoot, MaybeDisplay, + One, TrailingZeroInput, Zero, }, - DispatchError, + DispatchError, Saturating, TokenError, TransactionOutcome, }; -use sp_std::prelude::*; -pub use types::*; -pub use weights::WeightInfo; +use sp_std::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec}; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{ - pallet_prelude::*, - traits::{ - fungible::{Inspect as InspectFungible, Mutate as MutateFungible}, - fungibles::{Create, Inspect, Mutate}, - tokens::{ - Fortitude::Polite, - Precision::Exact, - Preservation::{Expendable, Preserve}, - }, - AccountTouch, ContainsPair, - }, - BoundedBTreeSet, PalletId, - }; - use sp_arithmetic::Permill; - use sp_runtime::{ - traits::{IntegerSquareRoot, One, Zero}, - Saturating, - }; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use sp_arithmetic::{traits::Unsigned, Permill}; #[pallet::pallet] pub struct Pallet(_); @@ -119,57 +110,46 @@ pub mod pallet { /// Overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// Currency type that this works on. - type Currency: InspectFungible - + MutateFungible; - - /// The `Currency::Balance` type of the native currency. + /// The type in which the assets for swapping are measured. type Balance: Balance; - /// The type used to describe the amount of fractions converted into assets. - type AssetBalance: Balance; - - /// A type used for conversions between `Balance` and `AssetBalance`. + /// A type used for calculations concerning the `Balance` type to avoid possible overflows. type HigherPrecisionBalance: IntegerSquareRoot + One + Ensure + Unsigned + From - + From + From - + TryInto + TryInto; - /// Identifier for the class of non-native asset. - /// Note: A `From` bound here would prevent `MultiLocation` from being used as an - /// `AssetId`. - type AssetId: AssetId; + /// Type of asset class, sourced from [`Config::Assets`], utilized to offer liquidity to a + /// pool. + type AssetKind: Parameter + MaxEncodedLen; - /// Type that identifies either the native currency or a token class from `Assets`. - /// `Ord` is added because of `get_pool_id`. - /// - /// The pool's `AccountId` is derived from this type. Any changes to the type may - /// necessitate a migration. - type MultiAssetId: AssetId + Ord + From; + /// Registry of assets utilized for providing liquidity to pools. + type Assets: Inspect + + Mutate + + AccountTouch + + Balanced; - /// Type to convert an `AssetId` into `MultiAssetId`. - type MultiAssetIdConverter: MultiAssetIdConverter; + /// Liquidity pool identifier. + type PoolId: Parameter + MaxEncodedLen + Ord; - /// `AssetId` to address the lp tokens by. - type PoolAssetId: AssetId + PartialOrd + Incrementable + From; + /// Provides means to resolve the [`Config::PoolId`] and it's `AccountId` from a pair + /// of [`Config::AssetKind`]s. + /// + /// Examples: [`crate::types::WithFirstAsset`], [`crate::types::Ascending`]. + type PoolLocator: PoolLocator; - /// Registry for the assets. - type Assets: Inspect - + Mutate - + AccountTouch - + ContainsPair; + /// Asset class for the lp tokens from [`Self::PoolAssets`]. + type PoolAssetId: AssetId + PartialOrd + Incrementable + From; /// Registry for the lp tokens. Ideally only this pallet should have create permissions on /// the assets. - type PoolAssets: Inspect + type PoolAssets: Inspect + Create + Mutate - + AccountTouch; + + AccountTouch; /// A % the liquidity providers will take of every swap. Represents 10ths of a percent. #[pallet::constant] @@ -179,8 +159,12 @@ pub mod pallet { #[pallet::constant] type PoolSetupFee: Get; - /// An account that receives the pool setup fee. - type PoolSetupFeeReceiver: Get; + /// Asset class from [`Config::Assets`] used to pay the [`Config::PoolSetupFee`]. + #[pallet::constant] + type PoolSetupFeeAsset: Get; + + /// Handler for the [`Config::PoolSetupFee`]. + type PoolSetupFeeTarget: OnUnbalanced>; /// A fee to withdraw the liquidity. #[pallet::constant] @@ -188,7 +172,7 @@ pub mod pallet { /// The minimum LP token amount that could be minted. Ameliorates rounding errors. #[pallet::constant] - type MintMinLiquidity: Get; + type MintMinLiquidity: Get; /// The max number of hops in a swap. #[pallet::constant] @@ -198,23 +182,19 @@ pub mod pallet { #[pallet::constant] type PalletId: Get; - /// A setting to allow creating pools with both non-native assets. - #[pallet::constant] - type AllowMultiAssetPools: Get; - /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// The benchmarks need a way to create asset ids from u32s. #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper: BenchmarkHelper; + type BenchmarkHelper: BenchmarkHelper; } /// Map from `PoolAssetId` to `PoolInfo`. This establishes whether a pool has been officially /// created rather than people sending tokens directly to a pool's public account. #[pallet::storage] pub type Pools = - StorageMap<_, Blake2_128Concat, PoolIdOf, PoolInfo, OptionQuery>; + StorageMap<_, Blake2_128Concat, T::PoolId, PoolInfo, OptionQuery>; /// Stores the `PoolAssetId` that is going to be used for the next lp token. /// This gets incremented whenever a new lp pool is created. @@ -231,7 +211,7 @@ pub mod pallet { creator: T::AccountId, /// The pool id associated with the pool. Note that the order of the assets may not be /// the same as the order specified in the create pool extrinsic. - pool_id: PoolIdOf, + pool_id: T::PoolId, /// The account ID of the pool. pool_account: T::AccountId, /// The id of the liquidity tokens that will be minted when assets are added to this @@ -246,15 +226,15 @@ pub mod pallet { /// The account that the liquidity tokens were minted to. mint_to: T::AccountId, /// The pool id of the pool that the liquidity was added to. - pool_id: PoolIdOf, + pool_id: T::PoolId, /// The amount of the first asset that was added to the pool. - amount1_provided: T::AssetBalance, + amount1_provided: T::Balance, /// The amount of the second asset that was added to the pool. - amount2_provided: T::AssetBalance, + amount2_provided: T::Balance, /// The id of the lp token that was minted. lp_token: T::PoolAssetId, /// The amount of lp tokens that were minted of that id. - lp_token_minted: T::AssetBalance, + lp_token_minted: T::Balance, }, /// A successful call of the `RemoveLiquidity` extrinsic will create this event. @@ -264,15 +244,15 @@ pub mod pallet { /// The account that the assets were transferred to. withdraw_to: T::AccountId, /// The pool id that the liquidity was removed from. - pool_id: PoolIdOf, + pool_id: T::PoolId, /// The amount of the first asset that was removed from the pool. - amount1: T::AssetBalance, + amount1: T::Balance, /// The amount of the second asset that was removed from the pool. - amount2: T::AssetBalance, + amount2: T::Balance, /// The id of the lp token that was burned. lp_token: T::PoolAssetId, /// The amount of lp tokens that were burned of that id. - lp_token_burned: T::AssetBalance, + lp_token_burned: T::Balance, /// Liquidity withdrawal fee (%). withdrawal_fee: Permill, }, @@ -283,33 +263,30 @@ pub mod pallet { who: T::AccountId, /// The account that the assets were transferred to. send_to: T::AccountId, - /// The route of asset ids that the swap went through. - /// E.g. A -> Dot -> B - path: BoundedVec, /// The amount of the first asset that was swapped. - amount_in: T::AssetBalance, + amount_in: T::Balance, /// The amount of the second asset that was received. - amount_out: T::AssetBalance, + amount_out: T::Balance, + /// The route of asset IDs with amounts that the swap went through. + /// E.g. (A, amount_in) -> (Dot, amount_out) -> (B, amount_out) + path: BalancePath, }, - /// An amount has been transferred from one account to another. - Transfer { - /// The account that the assets were transferred from. - from: T::AccountId, - /// The account that the assets were transferred to. - to: T::AccountId, - /// The asset that was transferred. - asset: T::MultiAssetId, - /// The amount of the asset that was transferred. - amount: T::AssetBalance, + /// Assets have been converted from one to another. + SwapCreditExecuted { + /// The amount of the first asset that was swapped. + amount_in: T::Balance, + /// The amount of the second asset that was received. + amount_out: T::Balance, + /// The route of asset IDs with amounts that the swap went through. + /// E.g. (A, amount_in) -> (Dot, amount_out) -> (B, amount_out) + path: BalancePath, }, } #[pallet::error] pub enum Error { - /// Provided assets are equal. - EqualAssets, - /// Provided asset is not supported for pool. - UnsupportedAsset, + /// Provided asset pair is not supported for pool. + InvalidAssetPair, /// Pool already exists. PoolExists, /// Desired amount can't be zero. @@ -345,26 +322,18 @@ pub mod pallet { ZeroLiquidity, /// Amount can't be zero. ZeroAmount, - /// Insufficient liquidity in the pool. - InsufficientLiquidity, /// Calculated amount out is less than provided minimum amount. ProvidedMinimumNotSufficientForSwap, /// Provided maximum amount is not sufficient for swap. ProvidedMaximumNotSufficientForSwap, - /// Only pools with native on one side are valid. - PoolMustContainNativeCurrency, /// The provided path must consists of 2 assets at least. InvalidPath, - /// It was not possible to calculate path data. - PathError, /// The provided path must consists of unique assets. NonUniquePath, /// It was not possible to get or increment the Id of the pool. IncorrectPoolAssetId, - /// Unable to find an element in an array/vec that should have one-to-one correspondence - /// with another. For example, an array of assets constituting a `path` should have a - /// corresponding array of `amounts` along the path. - CorrespondenceError, + /// The destination account cannot exist with the swapped funds. + BelowMinimum, } #[pallet::hooks] @@ -388,48 +357,32 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::create_pool())] pub fn create_pool( origin: OriginFor, - asset1: T::MultiAssetId, - asset2: T::MultiAssetId, + asset1: Box, + asset2: Box, ) -> DispatchResult { let sender = ensure_signed(origin)?; - ensure!(asset1 != asset2, Error::::EqualAssets); + ensure!(asset1 != asset2, Error::::InvalidAssetPair); // prepare pool_id - let pool_id = Self::get_pool_id(asset1, asset2); + let pool_id = T::PoolLocator::pool_id(&asset1, &asset2) + .map_err(|_| Error::::InvalidAssetPair)?; ensure!(!Pools::::contains_key(&pool_id), Error::::PoolExists); - let (asset1, asset2) = &pool_id; - if !T::AllowMultiAssetPools::get() && !T::MultiAssetIdConverter::is_native(asset1) { - Err(Error::::PoolMustContainNativeCurrency)?; - } - let pool_account = Self::get_pool_account(&pool_id); - frame_system::Pallet::::inc_providers(&pool_account); + let pool_account = + T::PoolLocator::address(&pool_id).map_err(|_| Error::::InvalidAssetPair)?; // pay the setup fee - T::Currency::transfer( - &sender, - &T::PoolSetupFeeReceiver::get(), - T::PoolSetupFee::get(), - Preserve, - )?; + let fee = + Self::withdraw(T::PoolSetupFeeAsset::get(), &sender, T::PoolSetupFee::get(), true)?; + T::PoolSetupFeeTarget::on_unbalanced(fee); - // try to convert both assets - match T::MultiAssetIdConverter::try_convert(asset1) { - MultiAssetIdConversionResult::Converted(asset) => - if !T::Assets::contains(&asset, &pool_account) { - T::Assets::touch(asset, pool_account.clone(), sender.clone())? - }, - MultiAssetIdConversionResult::Unsupported(_) => Err(Error::::UnsupportedAsset)?, - MultiAssetIdConversionResult::Native => (), - } - match T::MultiAssetIdConverter::try_convert(asset2) { - MultiAssetIdConversionResult::Converted(asset) => - if !T::Assets::contains(&asset, &pool_account) { - T::Assets::touch(asset, pool_account.clone(), sender.clone())? - }, - MultiAssetIdConversionResult::Unsupported(_) => Err(Error::::UnsupportedAsset)?, - MultiAssetIdConversionResult::Native => (), - } + if T::Assets::should_touch(*asset1.clone(), &pool_account) { + T::Assets::touch(*asset1, &pool_account, &sender)? + }; + + if T::Assets::should_touch(*asset2.clone(), &pool_account) { + T::Assets::touch(*asset2, &pool_account, &sender)? + }; let lp_token = NextPoolAssetId::::get() .or(T::PoolAssetId::initial_value()) @@ -438,7 +391,7 @@ pub mod pallet { NextPoolAssetId::::set(Some(next_lp_token_id)); T::PoolAssets::create(lp_token.clone(), pool_account.clone(), false, 1u32.into())?; - T::PoolAssets::touch(lp_token.clone(), pool_account.clone(), sender.clone())?; + T::PoolAssets::touch(lp_token.clone(), &pool_account, &sender)?; let pool_info = PoolInfo { lp_token: lp_token.clone() }; Pools::::insert(pool_id.clone(), pool_info); @@ -466,39 +419,33 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::add_liquidity())] pub fn add_liquidity( origin: OriginFor, - asset1: T::MultiAssetId, - asset2: T::MultiAssetId, - amount1_desired: T::AssetBalance, - amount2_desired: T::AssetBalance, - amount1_min: T::AssetBalance, - amount2_min: T::AssetBalance, + asset1: Box, + asset2: Box, + amount1_desired: T::Balance, + amount2_desired: T::Balance, + amount1_min: T::Balance, + amount2_min: T::Balance, mint_to: T::AccountId, ) -> DispatchResult { let sender = ensure_signed(origin)?; - let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); - // swap params if needed - let (amount1_desired, amount2_desired, amount1_min, amount2_min) = - if pool_id.0 == asset1 { - (amount1_desired, amount2_desired, amount1_min, amount2_min) - } else { - (amount2_desired, amount1_desired, amount2_min, amount1_min) - }; + let pool_id = T::PoolLocator::pool_id(&asset1, &asset2) + .map_err(|_| Error::::InvalidAssetPair)?; + ensure!( amount1_desired > Zero::zero() && amount2_desired > Zero::zero(), Error::::WrongDesiredAmount ); - let maybe_pool = Pools::::get(&pool_id); - let pool = maybe_pool.as_ref().ok_or(Error::::PoolNotFound)?; - let pool_account = Self::get_pool_account(&pool_id); + let pool = Pools::::get(&pool_id).ok_or(Error::::PoolNotFound)?; + let pool_account = + T::PoolLocator::address(&pool_id).map_err(|_| Error::::InvalidAssetPair)?; - let (asset1, asset2) = &pool_id; - let reserve1 = Self::get_balance(&pool_account, asset1)?; - let reserve2 = Self::get_balance(&pool_account, asset2)?; + let reserve1 = Self::get_balance(&pool_account, *asset1.clone()); + let reserve2 = Self::get_balance(&pool_account, *asset2.clone()); - let amount1: T::AssetBalance; - let amount2: T::AssetBalance; + let amount1: T::Balance; + let amount2: T::Balance; if reserve1.is_zero() || reserve2.is_zero() { amount1 = amount1_desired; amount2 = amount2_desired; @@ -527,17 +474,21 @@ pub mod pallet { } } - Self::validate_minimal_amount(amount1.saturating_add(reserve1), asset1) - .map_err(|_| Error::::AmountOneLessThanMinimal)?; - Self::validate_minimal_amount(amount2.saturating_add(reserve2), asset2) - .map_err(|_| Error::::AmountTwoLessThanMinimal)?; + ensure!( + amount1.saturating_add(reserve1) >= T::Assets::minimum_balance(*asset1.clone()), + Error::::AmountOneLessThanMinimal + ); + ensure!( + amount2.saturating_add(reserve2) >= T::Assets::minimum_balance(*asset2.clone()), + Error::::AmountTwoLessThanMinimal + ); - Self::transfer(asset1, &sender, &pool_account, amount1, true)?; - Self::transfer(asset2, &sender, &pool_account, amount2, true)?; + T::Assets::transfer(*asset1, &sender, &pool_account, amount1, Preserve)?; + T::Assets::transfer(*asset2, &sender, &pool_account, amount2, Preserve)?; let total_supply = T::PoolAssets::total_issuance(pool.lp_token.clone()); - let lp_token_amount: T::AssetBalance; + let lp_token_amount: T::Balance; if total_supply.is_zero() { lp_token_amount = Self::calc_lp_amount_for_zero_supply(&amount1, &amount2)?; T::PoolAssets::mint_into( @@ -564,7 +515,7 @@ pub mod pallet { pool_id, amount1_provided: amount1, amount2_provided: amount2, - lp_token: pool.lp_token.clone(), + lp_token: pool.lp_token, lp_token_minted: lp_token_amount, }); @@ -578,32 +529,26 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_liquidity())] pub fn remove_liquidity( origin: OriginFor, - asset1: T::MultiAssetId, - asset2: T::MultiAssetId, - lp_token_burn: T::AssetBalance, - amount1_min_receive: T::AssetBalance, - amount2_min_receive: T::AssetBalance, + asset1: Box, + asset2: Box, + lp_token_burn: T::Balance, + amount1_min_receive: T::Balance, + amount2_min_receive: T::Balance, withdraw_to: T::AccountId, ) -> DispatchResult { let sender = ensure_signed(origin)?; - let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); - // swap params if needed - let (amount1_min_receive, amount2_min_receive) = if pool_id.0 == asset1 { - (amount1_min_receive, amount2_min_receive) - } else { - (amount2_min_receive, amount1_min_receive) - }; - let (asset1, asset2) = pool_id.clone(); + let pool_id = T::PoolLocator::pool_id(&asset1, &asset2) + .map_err(|_| Error::::InvalidAssetPair)?; ensure!(lp_token_burn > Zero::zero(), Error::::ZeroLiquidity); - let maybe_pool = Pools::::get(&pool_id); - let pool = maybe_pool.as_ref().ok_or(Error::::PoolNotFound)?; + let pool = Pools::::get(&pool_id).ok_or(Error::::PoolNotFound)?; - let pool_account = Self::get_pool_account(&pool_id); - let reserve1 = Self::get_balance(&pool_account, &asset1)?; - let reserve2 = Self::get_balance(&pool_account, &asset2)?; + let pool_account = + T::PoolLocator::address(&pool_id).map_err(|_| Error::::InvalidAssetPair)?; + let reserve1 = Self::get_balance(&pool_account, *asset1.clone()); + let reserve2 = Self::get_balance(&pool_account, *asset2.clone()); let total_supply = T::PoolAssets::total_issuance(pool.lp_token.clone()); let withdrawal_fee_amount = T::LiquidityWithdrawalFee::get() * lp_token_burn; @@ -622,16 +567,20 @@ pub mod pallet { ); let reserve1_left = reserve1.saturating_sub(amount1); let reserve2_left = reserve2.saturating_sub(amount2); - Self::validate_minimal_amount(reserve1_left, &asset1) - .map_err(|_| Error::::ReserveLeftLessThanMinimal)?; - Self::validate_minimal_amount(reserve2_left, &asset2) - .map_err(|_| Error::::ReserveLeftLessThanMinimal)?; + ensure!( + reserve1_left >= T::Assets::minimum_balance(*asset1.clone()), + Error::::ReserveLeftLessThanMinimal + ); + ensure!( + reserve2_left >= T::Assets::minimum_balance(*asset2.clone()), + Error::::ReserveLeftLessThanMinimal + ); // burn the provided lp token amount that includes the fee T::PoolAssets::burn_from(pool.lp_token.clone(), &sender, lp_token_burn, Exact, Polite)?; - Self::transfer(&asset1, &pool_account, &withdraw_to, amount1, false)?; - Self::transfer(&asset2, &pool_account, &withdraw_to, amount2, false)?; + T::Assets::transfer(*asset1, &pool_account, &withdraw_to, amount1, Expendable)?; + T::Assets::transfer(*asset2, &pool_account, &withdraw_to, amount2, Expendable)?; Self::deposit_event(Event::LiquidityRemoved { who: sender, @@ -639,7 +588,7 @@ pub mod pallet { pool_id, amount1, amount2, - lp_token: pool.lp_token.clone(), + lp_token: pool.lp_token, lp_token_burned: lp_token_burn, withdrawal_fee: T::LiquidityWithdrawalFee::get(), }); @@ -654,19 +603,19 @@ pub mod pallet { /// [`AssetConversionApi::quote_price_exact_tokens_for_tokens`] runtime call can be called /// for a quote. #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::swap_exact_tokens_for_tokens())] + #[pallet::weight(T::WeightInfo::swap_exact_tokens_for_tokens(path.len() as u32))] pub fn swap_exact_tokens_for_tokens( origin: OriginFor, - path: BoundedVec, - amount_in: T::AssetBalance, - amount_out_min: T::AssetBalance, + path: Vec>, + amount_in: T::Balance, + amount_out_min: T::Balance, send_to: T::AccountId, keep_alive: bool, ) -> DispatchResult { let sender = ensure_signed(origin)?; Self::do_swap_exact_tokens_for_tokens( sender, - path, + path.into_iter().map(|a| *a).collect(), amount_in, Some(amount_out_min), send_to, @@ -682,19 +631,19 @@ pub mod pallet { /// [`AssetConversionApi::quote_price_tokens_for_exact_tokens`] runtime call can be called /// for a quote. #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::swap_tokens_for_exact_tokens())] + #[pallet::weight(T::WeightInfo::swap_tokens_for_exact_tokens(path.len() as u32))] pub fn swap_tokens_for_exact_tokens( origin: OriginFor, - path: BoundedVec, - amount_out: T::AssetBalance, - amount_in_max: T::AssetBalance, + path: Vec>, + amount_out: T::Balance, + amount_in_max: T::Balance, send_to: T::AccountId, keep_alive: bool, ) -> DispatchResult { let sender = ensure_signed(origin)?; Self::do_swap_tokens_for_exact_tokens( sender, - path, + path.into_iter().map(|a| *a).collect(), amount_out, Some(amount_in_max), send_to, @@ -713,25 +662,27 @@ pub mod pallet { /// respecting `keep_alive`. /// /// If successful, returns the amount of `path[1]` acquired for the `amount_in`. - pub fn do_swap_exact_tokens_for_tokens( + /// + /// WARNING: This may return an error after a partial storage mutation. It should be used + /// only inside a transactional storage context and an Err result must imply a storage + /// rollback. + pub(crate) fn do_swap_exact_tokens_for_tokens( sender: T::AccountId, - path: BoundedVec, - amount_in: T::AssetBalance, - amount_out_min: Option, + path: Vec, + amount_in: T::Balance, + amount_out_min: Option, send_to: T::AccountId, keep_alive: bool, - ) -> Result { + ) -> Result { ensure!(amount_in > Zero::zero(), Error::::ZeroAmount); if let Some(amount_out_min) = amount_out_min { ensure!(amount_out_min > Zero::zero(), Error::::ZeroAmount); } Self::validate_swap_path(&path)?; + let path = Self::balance_path_from_amount_in(amount_in, path)?; - let amounts = Self::get_amounts_out(&amount_in, &path)?; - let amount_out = - *amounts.last().defensive_ok_or("get_amounts_out() returned an empty result")?; - + let amount_out = path.last().map(|(_, a)| *a).ok_or(Error::::InvalidPath)?; if let Some(amount_out_min) = amount_out_min { ensure!( amount_out >= amount_out_min, @@ -739,7 +690,15 @@ pub mod pallet { ); } - Self::do_swap(sender, &amounts, path, send_to, keep_alive)?; + Self::swap(&sender, &path, &send_to, keep_alive)?; + + Self::deposit_event(Event::SwapExecuted { + who: sender, + send_to, + amount_in, + amount_out, + path, + }); Ok(amount_out) } @@ -751,25 +710,27 @@ pub mod pallet { /// respecting `keep_alive`. /// /// If successful returns the amount of the `path[0]` taken to provide `path[1]`. - pub fn do_swap_tokens_for_exact_tokens( + /// + /// WARNING: This may return an error after a partial storage mutation. It should be used + /// only inside a transactional storage context and an Err result must imply a storage + /// rollback. + pub(crate) fn do_swap_tokens_for_exact_tokens( sender: T::AccountId, - path: BoundedVec, - amount_out: T::AssetBalance, - amount_in_max: Option, + path: Vec, + amount_out: T::Balance, + amount_in_max: Option, send_to: T::AccountId, keep_alive: bool, - ) -> Result { + ) -> Result { ensure!(amount_out > Zero::zero(), Error::::ZeroAmount); if let Some(amount_in_max) = amount_in_max { ensure!(amount_in_max > Zero::zero(), Error::::ZeroAmount); } Self::validate_swap_path(&path)?; + let path = Self::balance_path_from_amount_out(amount_out, path)?; - let amounts = Self::get_amounts_in(&amount_out, &path)?; - let amount_in = - *amounts.first().defensive_ok_or("get_amounts_in() returned an empty result")?; - + let amount_in = path.first().map(|(_, a)| *a).ok_or(Error::::InvalidPath)?; if let Some(amount_in_max) = amount_in_max { ensure!( amount_in <= amount_in_max, @@ -777,198 +738,236 @@ pub mod pallet { ); } - Self::do_swap(sender, &amounts, path, send_to, keep_alive)?; + Self::swap(&sender, &path, &send_to, keep_alive)?; + + Self::deposit_event(Event::SwapExecuted { + who: sender, + send_to, + amount_in, + amount_out, + path, + }); + Ok(amount_in) } - /// Transfer an `amount` of `asset_id`, respecting the `keep_alive` requirements. - fn transfer( - asset_id: &T::MultiAssetId, - from: &T::AccountId, - to: &T::AccountId, - amount: T::AssetBalance, - keep_alive: bool, - ) -> Result { - let result = match T::MultiAssetIdConverter::try_convert(asset_id) { - MultiAssetIdConversionResult::Converted(asset_id) => - T::Assets::transfer(asset_id, from, to, amount, Expendable), - MultiAssetIdConversionResult::Native => { - let preservation = match keep_alive { - true => Preserve, - false => Expendable, - }; - let amount = Self::convert_asset_balance_to_native_balance(amount)?; - Ok(Self::convert_native_balance_to_asset_balance(T::Currency::transfer( - from, - to, - amount, - preservation, - )?)?) - }, - MultiAssetIdConversionResult::Unsupported(_) => - Err(Error::::UnsupportedAsset.into()), + /// Swap exactly `credit_in` of asset `path[0]` for asset `path[last]`. If `amount_out_min` + /// is provided and the swap can't achieve at least this amount, an error is returned. + /// + /// On a successful swap, the function returns the `credit_out` of `path[last]` obtained + /// from the `credit_in`. On failure, it returns an `Err` containing the original + /// `credit_in` and the associated error code. + /// + /// WARNING: This may return an error after a partial storage mutation. It should be used + /// only inside a transactional storage context and an Err result must imply a storage + /// rollback. + pub(crate) fn do_swap_exact_credit_tokens_for_tokens( + path: Vec, + credit_in: CreditOf, + amount_out_min: Option, + ) -> Result, (CreditOf, DispatchError)> { + let amount_in = credit_in.peek(); + let inspect_path = |credit_asset| { + ensure!( + path.first().map_or(false, |a| *a == credit_asset), + Error::::InvalidPath + ); + ensure!(!amount_in.is_zero(), Error::::ZeroAmount); + ensure!(amount_out_min.map_or(true, |a| !a.is_zero()), Error::::ZeroAmount); + + Self::validate_swap_path(&path)?; + let path = Self::balance_path_from_amount_in(amount_in, path)?; + + let amount_out = path.last().map(|(_, a)| *a).ok_or(Error::::InvalidPath)?; + ensure!( + amount_out_min.map_or(true, |a| amount_out >= a), + Error::::ProvidedMinimumNotSufficientForSwap + ); + Ok((path, amount_out)) + }; + let (path, amount_out) = match inspect_path(credit_in.asset()) { + Ok((p, a)) => (p, a), + Err(e) => return Err((credit_in, e)), }; - if result.is_ok() { - Self::deposit_event(Event::Transfer { - from: from.clone(), - to: to.clone(), - asset: (*asset_id).clone(), - amount, - }); - } - result - } + let credit_out = Self::credit_swap(credit_in, &path)?; - /// Convert a `Balance` type to an `AssetBalance`. - pub(crate) fn convert_native_balance_to_asset_balance( - amount: T::Balance, - ) -> Result> { - T::HigherPrecisionBalance::from(amount) - .try_into() - .map_err(|_| Error::::Overflow) - } + Self::deposit_event(Event::SwapCreditExecuted { amount_in, amount_out, path }); - /// Convert an `AssetBalance` type to a `Balance`. - pub(crate) fn convert_asset_balance_to_native_balance( - amount: T::AssetBalance, - ) -> Result> { - T::HigherPrecisionBalance::from(amount) - .try_into() - .map_err(|_| Error::::Overflow) + Ok(credit_out) } - /// Convert a `HigherPrecisionBalance` type to an `AssetBalance`. - pub(crate) fn convert_hpb_to_asset_balance( - amount: T::HigherPrecisionBalance, - ) -> Result> { - amount.try_into().map_err(|_| Error::::Overflow) + /// Swaps a portion of `credit_in` of `path[0]` asset to obtain the desired `amount_out` of + /// the `path[last]` asset. The provided `credit_in` must be adequate to achieve the target + /// `amount_out`, or an error will occur. + /// + /// On success, the function returns a (`credit_out`, `credit_change`) tuple, where + /// `credit_out` represents the acquired amount of the `path[last]` asset, and + /// `credit_change` is the remaining portion from the `credit_in`. On failure, an `Err` with + /// the initial `credit_in` and error code is returned. + /// + /// WARNING: This may return an error after a partial storage mutation. It should be used + /// only inside a transactional storage context and an Err result must imply a storage + /// rollback. + pub(crate) fn do_swap_credit_tokens_for_exact_tokens( + path: Vec, + credit_in: CreditOf, + amount_out: T::Balance, + ) -> Result<(CreditOf, CreditOf), (CreditOf, DispatchError)> { + let amount_in_max = credit_in.peek(); + let inspect_path = |credit_asset| { + ensure!( + path.first().map_or(false, |a| a == &credit_asset), + Error::::InvalidPath + ); + ensure!(amount_in_max > Zero::zero(), Error::::ZeroAmount); + ensure!(amount_out > Zero::zero(), Error::::ZeroAmount); + + Self::validate_swap_path(&path)?; + let path = Self::balance_path_from_amount_out(amount_out, path)?; + + let amount_in = path.first().map(|(_, a)| *a).ok_or(Error::::InvalidPath)?; + ensure!( + amount_in <= amount_in_max, + Error::::ProvidedMaximumNotSufficientForSwap + ); + + Ok((path, amount_in)) + }; + let (path, amount_in) = match inspect_path(credit_in.asset()) { + Ok((p, a)) => (p, a), + Err(e) => return Err((credit_in, e)), + }; + + let (credit_in, credit_change) = credit_in.split(amount_in); + let credit_out = Self::credit_swap(credit_in, &path)?; + + Self::deposit_event(Event::SwapCreditExecuted { amount_in, amount_out, path }); + + Ok((credit_out, credit_change)) } - /// Swap assets along a `path`, depositing in `send_to`. - pub(crate) fn do_swap( - sender: T::AccountId, - amounts: &Vec, - path: BoundedVec, - send_to: T::AccountId, + /// Swap assets along the `path`, withdrawing from `sender` and depositing in `send_to`. + /// + /// Note: It's assumed that the provided `path` is valid. + /// + /// WARNING: This may return an error after a partial storage mutation. It should be used + /// only inside a transactional storage context and an Err result must imply a storage + /// rollback. + fn swap( + sender: &T::AccountId, + path: &BalancePath, + send_to: &T::AccountId, keep_alive: bool, ) -> Result<(), DispatchError> { - ensure!(amounts.len() > 1, Error::::CorrespondenceError); - if let Some([asset1, asset2]) = &path.get(0..2) { - let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); - let pool_account = Self::get_pool_account(&pool_id); - // amounts should always contain a corresponding element to path. - let first_amount = amounts.first().ok_or(Error::::CorrespondenceError)?; - - Self::transfer(asset1, &sender, &pool_account, *first_amount, keep_alive)?; - - let mut i = 0; - let path_len = path.len() as u32; - for assets_pair in path.windows(2) { - if let [asset1, asset2] = assets_pair { - let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); - let pool_account = Self::get_pool_account(&pool_id); - - let amount_out = - amounts.get((i + 1) as usize).ok_or(Error::::CorrespondenceError)?; - - let to = if i < path_len - 2 { - let asset3 = path.get((i + 2) as usize).ok_or(Error::::PathError)?; - Self::get_pool_account(&Self::get_pool_id( + let (asset_in, amount_in) = path.first().ok_or(Error::::InvalidPath)?; + let credit_in = Self::withdraw(asset_in.clone(), sender, *amount_in, keep_alive)?; + + let credit_out = Self::credit_swap(credit_in, path).map_err(|(_, e)| e)?; + T::Assets::resolve(send_to, credit_out).map_err(|_| Error::::BelowMinimum)?; + + Ok(()) + } + + /// Swap assets along the specified `path`, consuming `credit_in` and producing + /// `credit_out`. + /// + /// If an error occurs, `credit_in` is returned back. + /// + /// Note: It's assumed that the provided `path` is valid and `credit_in` corresponds to the + /// first asset in the `path`. + /// + /// WARNING: This may return an error after a partial storage mutation. It should be used + /// only inside a transactional storage context and an Err result must imply a storage + /// rollback. + fn credit_swap( + credit_in: CreditOf, + path: &BalancePath, + ) -> Result, (CreditOf, DispatchError)> { + let resolve_path = || -> Result, DispatchError> { + for pos in 0..=path.len() { + if let Some([(asset1, _), (asset2, amount_out)]) = path.get(pos..=pos + 1) { + let pool_from = T::PoolLocator::pool_address(asset1, asset2) + .map_err(|_| Error::::InvalidAssetPair)?; + + if let Some((asset3, _)) = path.get(pos + 2) { + let pool_to = T::PoolLocator::pool_address(asset2, asset3) + .map_err(|_| Error::::InvalidAssetPair)?; + + T::Assets::transfer( asset2.clone(), - asset3.clone(), - )) + &pool_from, + &pool_to, + *amount_out, + Preserve, + )?; } else { - send_to.clone() - }; + let credit_out = + Self::withdraw(asset2.clone(), &pool_from, *amount_out, true)?; + return Ok(credit_out) + } + } + } + Err(Error::::InvalidPath.into()) + }; - let reserve = Self::get_balance(&pool_account, asset2)?; - let reserve_left = reserve.saturating_sub(*amount_out); - Self::validate_minimal_amount(reserve_left, asset2) - .map_err(|_| Error::::ReserveLeftLessThanMinimal)?; + let credit_out = match resolve_path() { + Ok(c) => c, + Err(e) => return Err((credit_in, e)), + }; - Self::transfer(asset2, &pool_account, &to, *amount_out, true)?; - } - i.saturating_inc(); + let pool_to = if let Some([(asset1, _), (asset2, _)]) = path.get(0..2) { + match T::PoolLocator::pool_address(asset1, asset2) { + Ok(address) => address, + Err(_) => return Err((credit_in, Error::::InvalidAssetPair.into())), } - Self::deposit_event(Event::SwapExecuted { - who: sender, - send_to, - path, - amount_in: *first_amount, - amount_out: *amounts.last().expect("Always has more than 1 element"), - }); } else { - return Err(Error::::InvalidPath.into()) - } - Ok(()) - } + return Err((credit_in, Error::::InvalidPath.into())) + }; - /// The account ID of the pool. - /// - /// This actually does computation. If you need to keep using it, then make sure you cache - /// the value and only call this once. - pub fn get_pool_account(pool_id: &PoolIdOf) -> T::AccountId { - let encoded_pool_id = sp_io::hashing::blake2_256(&Encode::encode(pool_id)[..]); + T::Assets::resolve(&pool_to, credit_in) + .map_err(|c| (c, Error::::BelowMinimum.into()))?; - Decode::decode(&mut TrailingZeroInput::new(encoded_pool_id.as_ref())) - .expect("infinite length input; no invalid inputs for type; qed") + Ok(credit_out) } - /// Get the `owner`'s balance of `asset`, which could be the chain's native asset or another - /// fungible. Returns a value in the form of an `AssetBalance`. - fn get_balance( - owner: &T::AccountId, - asset: &T::MultiAssetId, - ) -> Result> { - match T::MultiAssetIdConverter::try_convert(asset) { - MultiAssetIdConversionResult::Converted(asset_id) => Ok( - <::Assets>::reducible_balance(asset_id, owner, Expendable, Polite), - ), - MultiAssetIdConversionResult::Native => - Self::convert_native_balance_to_asset_balance( - <::Currency>::reducible_balance(owner, Expendable, Polite), - ), - MultiAssetIdConversionResult::Unsupported(_) => - Err(Error::::UnsupportedAsset.into()), + /// Removes `value` balance of `asset` from `who` account if possible. + fn withdraw( + asset: T::AssetKind, + who: &T::AccountId, + value: T::Balance, + keep_alive: bool, + ) -> Result, DispatchError> { + let preservation = match keep_alive { + true => Preserve, + false => Expendable, + }; + if preservation == Preserve { + // TODO drop the ensure! when this issue addressed + // https://github.com/paritytech/polkadot-sdk/issues/1698 + let free = T::Assets::reducible_balance(asset.clone(), who, preservation, Polite); + ensure!(free >= value, TokenError::NotExpendable); } + T::Assets::withdraw(asset, who, value, Exact, preservation, Polite) } - /// Returns a pool id constructed from 2 assets. - /// 1. Native asset should be lower than the other asset ids. - /// 2. Two native or two non-native assets are compared by their `Ord` implementation. - /// - /// We expect deterministic order, so (asset1, asset2) or (asset2, asset1) returns the same - /// result. - pub fn get_pool_id(asset1: T::MultiAssetId, asset2: T::MultiAssetId) -> PoolIdOf { - match ( - T::MultiAssetIdConverter::is_native(&asset1), - T::MultiAssetIdConverter::is_native(&asset2), - ) { - (true, false) => return (asset1, asset2), - (false, true) => return (asset2, asset1), - _ => { - // else we want to be deterministic based on `Ord` implementation - if asset1 <= asset2 { - (asset1, asset2) - } else { - (asset2, asset1) - } - }, - } + /// Get the `owner`'s balance of `asset`, which could be the chain's native asset or another + /// fungible. Returns a value in the form of an `Balance`. + fn get_balance(owner: &T::AccountId, asset: T::AssetKind) -> T::Balance { + T::Assets::reducible_balance(asset, owner, Expendable, Polite) } /// Returns the balance of each asset in the pool. /// The tuple result is in the order requested (not necessarily the same as pool order). pub fn get_reserves( - asset1: &T::MultiAssetId, - asset2: &T::MultiAssetId, - ) -> Result<(T::AssetBalance, T::AssetBalance), Error> { - let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); - let pool_account = Self::get_pool_account(&pool_id); + asset1: T::AssetKind, + asset2: T::AssetKind, + ) -> Result<(T::Balance, T::Balance), Error> { + let pool_account = T::PoolLocator::pool_address(&asset1, &asset2) + .map_err(|_| Error::::InvalidAssetPair)?; - let balance1 = Self::get_balance(&pool_account, asset1)?; - let balance2 = Self::get_balance(&pool_account, asset2)?; + let balance1 = Self::get_balance(&pool_account, asset1); + let balance2 = Self::get_balance(&pool_account, asset2); if balance1.is_zero() || balance2.is_zero() { Err(Error::::PoolNotFound)?; @@ -978,56 +977,66 @@ pub mod pallet { } /// Leading to an amount at the end of a `path`, get the required amounts in. - pub(crate) fn get_amounts_in( - amount_out: &T::AssetBalance, - path: &BoundedVec, - ) -> Result, DispatchError> { - let mut amounts: Vec = vec![*amount_out]; - - for assets_pair in path.windows(2).rev() { - if let [asset1, asset2] = assets_pair { - let (reserve_in, reserve_out) = Self::get_reserves(asset1, asset2)?; - let prev_amount = amounts.last().expect("Always has at least one element"); - let amount_in = Self::get_amount_in(prev_amount, &reserve_in, &reserve_out)?; - amounts.push(amount_in); - } + pub(crate) fn balance_path_from_amount_out( + amount_out: T::Balance, + path: Vec, + ) -> Result, DispatchError> { + let mut balance_path: BalancePath = Vec::with_capacity(path.len()); + let mut amount_in: T::Balance = amount_out; + + let mut iter = path.into_iter().rev().peekable(); + while let Some(asset2) = iter.next() { + let asset1 = match iter.peek() { + Some(a) => a, + None => { + balance_path.push((asset2, amount_in)); + break + }, + }; + let (reserve_in, reserve_out) = Self::get_reserves(asset1.clone(), asset2.clone())?; + balance_path.push((asset2, amount_in)); + amount_in = Self::get_amount_in(&amount_in, &reserve_in, &reserve_out)?; } + balance_path.reverse(); - amounts.reverse(); - Ok(amounts) + Ok(balance_path) } /// Following an amount into a `path`, get the corresponding amounts out. - pub(crate) fn get_amounts_out( - amount_in: &T::AssetBalance, - path: &BoundedVec, - ) -> Result, DispatchError> { - let mut amounts: Vec = vec![*amount_in]; - - for assets_pair in path.windows(2) { - if let [asset1, asset2] = assets_pair { - let (reserve_in, reserve_out) = Self::get_reserves(asset1, asset2)?; - let prev_amount = amounts.last().expect("Always has at least one element"); - let amount_out = Self::get_amount_out(prev_amount, &reserve_in, &reserve_out)?; - amounts.push(amount_out); - } + pub(crate) fn balance_path_from_amount_in( + amount_in: T::Balance, + path: Vec, + ) -> Result, DispatchError> { + let mut balance_path: BalancePath = Vec::with_capacity(path.len()); + let mut amount_out: T::Balance = amount_in; + + let mut iter = path.into_iter().peekable(); + while let Some(asset1) = iter.next() { + let asset2 = match iter.peek() { + Some(a) => a, + None => { + balance_path.push((asset1, amount_out)); + break + }, + }; + let (reserve_in, reserve_out) = Self::get_reserves(asset1.clone(), asset2.clone())?; + balance_path.push((asset1, amount_out)); + amount_out = Self::get_amount_out(&amount_out, &reserve_in, &reserve_out)?; } - - Ok(amounts) + Ok(balance_path) } /// Used by the RPC service to provide current prices. pub fn quote_price_exact_tokens_for_tokens( - asset1: T::MultiAssetId, - asset2: T::MultiAssetId, - amount: T::AssetBalance, + asset1: T::AssetKind, + asset2: T::AssetKind, + amount: T::Balance, include_fee: bool, - ) -> Option { - let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); - let pool_account = Self::get_pool_account(&pool_id); + ) -> Option { + let pool_account = T::PoolLocator::pool_address(&asset1, &asset2).ok()?; - let balance1 = Self::get_balance(&pool_account, &asset1).ok()?; - let balance2 = Self::get_balance(&pool_account, &asset2).ok()?; + let balance1 = Self::get_balance(&pool_account, asset1); + let balance2 = Self::get_balance(&pool_account, asset2); if !balance1.is_zero() { if include_fee { Self::get_amount_out(&amount, &balance1, &balance2).ok() @@ -1041,16 +1050,15 @@ pub mod pallet { /// Used by the RPC service to provide current prices. pub fn quote_price_tokens_for_exact_tokens( - asset1: T::MultiAssetId, - asset2: T::MultiAssetId, - amount: T::AssetBalance, + asset1: T::AssetKind, + asset2: T::AssetKind, + amount: T::Balance, include_fee: bool, - ) -> Option { - let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); - let pool_account = Self::get_pool_account(&pool_id); + ) -> Option { + let pool_account = T::PoolLocator::pool_address(&asset1, &asset2).ok()?; - let balance1 = Self::get_balance(&pool_account, &asset1).ok()?; - let balance2 = Self::get_balance(&pool_account, &asset2).ok()?; + let balance1 = Self::get_balance(&pool_account, asset1); + let balance2 = Self::get_balance(&pool_account, asset2); if !balance1.is_zero() { if include_fee { Self::get_amount_in(&amount, &balance1, &balance2).ok() @@ -1064,18 +1072,18 @@ pub mod pallet { /// Calculates the optimal amount from the reserves. pub fn quote( - amount: &T::AssetBalance, - reserve1: &T::AssetBalance, - reserve2: &T::AssetBalance, - ) -> Result> { - // amount * reserve2 / reserve1 + amount: &T::Balance, + reserve1: &T::Balance, + reserve2: &T::Balance, + ) -> Result> { + // (amount * reserve2) / reserve1 Self::mul_div(amount, reserve2, reserve1) } pub(super) fn calc_lp_amount_for_zero_supply( - amount1: &T::AssetBalance, - amount2: &T::AssetBalance, - ) -> Result> { + amount1: &T::Balance, + amount2: &T::Balance, + ) -> Result> { let amount1 = T::HigherPrecisionBalance::from(*amount1); let amount2 = T::HigherPrecisionBalance::from(*amount2); @@ -1089,11 +1097,7 @@ pub mod pallet { result.try_into().map_err(|_| Error::::Overflow) } - fn mul_div( - a: &T::AssetBalance, - b: &T::AssetBalance, - c: &T::AssetBalance, - ) -> Result> { + fn mul_div(a: &T::Balance, b: &T::Balance, c: &T::Balance) -> Result> { let a = T::HigherPrecisionBalance::from(*a); let b = T::HigherPrecisionBalance::from(*b); let c = T::HigherPrecisionBalance::from(*c); @@ -1112,16 +1116,16 @@ pub mod pallet { /// Given an input amount of an asset and pair reserves, returns the maximum output amount /// of the other asset. pub fn get_amount_out( - amount_in: &T::AssetBalance, - reserve_in: &T::AssetBalance, - reserve_out: &T::AssetBalance, - ) -> Result> { + amount_in: &T::Balance, + reserve_in: &T::Balance, + reserve_out: &T::Balance, + ) -> Result> { let amount_in = T::HigherPrecisionBalance::from(*amount_in); let reserve_in = T::HigherPrecisionBalance::from(*reserve_in); let reserve_out = T::HigherPrecisionBalance::from(*reserve_out); if reserve_in.is_zero() || reserve_out.is_zero() { - return Err(Error::::ZeroLiquidity.into()) + return Err(Error::::ZeroLiquidity) } let amount_in_with_fee = amount_in @@ -1147,20 +1151,20 @@ pub mod pallet { /// Given an output amount of an asset and pair reserves, returns a required input amount /// of the other asset. pub fn get_amount_in( - amount_out: &T::AssetBalance, - reserve_in: &T::AssetBalance, - reserve_out: &T::AssetBalance, - ) -> Result> { + amount_out: &T::Balance, + reserve_in: &T::Balance, + reserve_out: &T::Balance, + ) -> Result> { let amount_out = T::HigherPrecisionBalance::from(*amount_out); let reserve_in = T::HigherPrecisionBalance::from(*reserve_in); let reserve_out = T::HigherPrecisionBalance::from(*reserve_out); if reserve_in.is_zero() || reserve_out.is_zero() { - Err(Error::::ZeroLiquidity.into())? + Err(Error::::ZeroLiquidity)? } if amount_out >= reserve_out { - Err(Error::::AmountOutTooHigh.into())? + Err(Error::::AmountOutTooHigh)? } let numerator = reserve_in @@ -1184,42 +1188,19 @@ pub mod pallet { result.try_into().map_err(|_| Error::::Overflow) } - /// Ensure that a `value` meets the minimum balance requirements of an `asset` class. - fn validate_minimal_amount( - value: T::AssetBalance, - asset: &T::MultiAssetId, - ) -> Result<(), ()> { - if T::MultiAssetIdConverter::is_native(asset) { - let ed = T::Currency::minimum_balance(); - ensure!( - T::HigherPrecisionBalance::from(value) >= T::HigherPrecisionBalance::from(ed), - () - ); - } else { - let MultiAssetIdConversionResult::Converted(asset_id) = - T::MultiAssetIdConverter::try_convert(asset) - else { - return Err(()) - }; - let minimal = T::Assets::minimum_balance(asset_id); - ensure!(value >= minimal, ()); - } - Ok(()) - } - /// Ensure that a path is valid. - fn validate_swap_path( - path: &BoundedVec, - ) -> Result<(), DispatchError> { + fn validate_swap_path(path: &Vec) -> Result<(), DispatchError> { ensure!(path.len() >= 2, Error::::InvalidPath); + ensure!(path.len() as u32 <= T::MaxSwapPathLength::get(), Error::::InvalidPath); // validate all the pools in the path are unique - let mut pools = BoundedBTreeSet::, T::MaxSwapPathLength>::new(); + let mut pools = BTreeSet::::new(); for assets_pair in path.windows(2) { if let [asset1, asset2] = assets_pair { - let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); - let new_element = - pools.try_insert(pool_id).map_err(|_| Error::::Overflow)?; + let pool_id = T::PoolLocator::pool_id(asset1, asset2) + .map_err(|_| Error::::InvalidAssetPair)?; + + let new_element = pools.insert(pool_id); if !new_element { return Err(Error::::NonUniquePath.into()) } @@ -1238,69 +1219,35 @@ pub mod pallet { } } -impl Swap for Pallet { - fn swap_exact_tokens_for_tokens( - sender: T::AccountId, - path: Vec, - amount_in: T::HigherPrecisionBalance, - amount_out_min: Option, - send_to: T::AccountId, - keep_alive: bool, - ) -> Result { - let path = path.try_into().map_err(|_| Error::::PathError)?; - let amount_out_min = amount_out_min.map(Self::convert_hpb_to_asset_balance).transpose()?; - let amount_out = Self::do_swap_exact_tokens_for_tokens( - sender, - path, - Self::convert_hpb_to_asset_balance(amount_in)?, - amount_out_min, - send_to, - keep_alive, - )?; - Ok(amount_out.into()) - } - - fn swap_tokens_for_exact_tokens( - sender: T::AccountId, - path: Vec, - amount_out: T::HigherPrecisionBalance, - amount_in_max: Option, - send_to: T::AccountId, - keep_alive: bool, - ) -> Result { - let path = path.try_into().map_err(|_| Error::::PathError)?; - let amount_in_max = amount_in_max.map(Self::convert_hpb_to_asset_balance).transpose()?; - let amount_in = Self::do_swap_tokens_for_exact_tokens( - sender, - path, - Self::convert_hpb_to_asset_balance(amount_out)?, - amount_in_max, - send_to, - keep_alive, - )?; - Ok(amount_in.into()) - } -} - sp_api::decl_runtime_apis! { /// This runtime api allows people to query the size of the liquidity pools /// and quote prices for swaps. - pub trait AssetConversionApi where - Balance: Codec + MaybeDisplay, - AssetBalance: frame_support::traits::tokens::Balance, - AssetId: Codec + pub trait AssetConversionApi + where + Balance: frame_support::traits::tokens::Balance + MaybeDisplay, + AssetId: Codec, { /// Provides a quote for [`Pallet::swap_tokens_for_exact_tokens`]. /// /// Note that the price may have changed by the time the transaction is executed. /// (Use `amount_in_max` to control slippage.) - fn quote_price_tokens_for_exact_tokens(asset1: AssetId, asset2: AssetId, amount: AssetBalance, include_fee: bool) -> Option; + fn quote_price_tokens_for_exact_tokens( + asset1: AssetId, + asset2: AssetId, + amount: Balance, + include_fee: bool, + ) -> Option; /// Provides a quote for [`Pallet::swap_exact_tokens_for_tokens`]. /// /// Note that the price may have changed by the time the transaction is executed. /// (Use `amount_out_min` to control slippage.) - fn quote_price_exact_tokens_for_tokens(asset1: AssetId, asset2: AssetId, amount: AssetBalance, include_fee: bool) -> Option; + fn quote_price_exact_tokens_for_tokens( + asset1: AssetId, + asset2: AssetId, + amount: Balance, + include_fee: bool, + ) -> Option; /// Returns the size of the liquidity pool for the given asset pair. fn get_reserves(asset1: AssetId, asset2: AssetId) -> Option<(Balance, Balance)>; diff --git a/substrate/frame/asset-conversion/src/mock.rs b/substrate/frame/asset-conversion/src/mock.rs index 4eee701f193e11e077e805593cd67ed1a1d043fd..12c8fe2eb42cb4ae0a67dddce9bab40a1b121ebb 100644 --- a/substrate/frame/asset-conversion/src/mock.rs +++ b/substrate/frame/asset-conversion/src/mock.rs @@ -19,12 +19,17 @@ use super::*; use crate as pallet_asset_conversion; - use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, instances::{Instance1, Instance2}, ord_parameter_types, parameter_types, - traits::{AsEnsureOriginWithArg, ConstU128, ConstU32, ConstU64}, + traits::{ + tokens::{ + fungible::{NativeFromLeft, NativeOrWithId, UnionOf}, + imbalance::ResolveAssetTo, + }, + AsEnsureOriginWithArg, ConstU128, ConstU32, ConstU64, + }, PalletId, }; use frame_system::{EnsureSigned, EnsureSignedBy}; @@ -34,6 +39,7 @@ use sp_runtime::{ traits::{AccountIdConversion, BlakeTwo256, IdentityLookup}, BuildStorage, }; +use sp_std::default::Default; type Block = frame_system::mocking::MockBlock; @@ -48,6 +54,7 @@ construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -142,38 +149,37 @@ impl pallet_assets::Config for Test { parameter_types! { pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); - pub storage AllowMultiAssetPools: bool = true; - pub storage LiquidityWithdrawalFee: Permill = Permill::from_percent(0); // should be non-zero if AllowMultiAssetPools is true, otherwise can be zero + pub const Native: NativeOrWithId = NativeOrWithId::Native; + pub storage LiquidityWithdrawalFee: Permill = Permill::from_percent(0); } ord_parameter_types! { pub const AssetConversionOrigin: u128 = AccountIdConversion::::into_account_truncating(&AssetConversionPalletId::get()); } +pub type NativeAndAssets = UnionOf, u128>; +pub type AscendingLocator = Ascending>; +pub type WithFirstAssetLocator = WithFirstAsset>; + impl Config for Test { type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type AssetBalance = ::Balance; - type AssetId = u32; + type Balance = ::Balance; + type HigherPrecisionBalance = sp_core::U256; + type AssetKind = NativeOrWithId; + type Assets = NativeAndAssets; + type PoolId = (Self::AssetKind, Self::AssetKind); + type PoolLocator = Chain; type PoolAssetId = u32; - type Assets = Assets; type PoolAssets = PoolAssets; + type PoolSetupFee = ConstU128<100>; // should be more or equal to the existential deposit + type PoolSetupFeeAsset = Native; + type PoolSetupFeeTarget = ResolveAssetTo; type PalletId = AssetConversionPalletId; type WeightInfo = (); type LPFee = ConstU32<3>; // means 0.3% - type PoolSetupFee = ConstU128<100>; // should be more or equal to the existential deposit - type PoolSetupFeeReceiver = AssetConversionOrigin; type LiquidityWithdrawalFee = LiquidityWithdrawalFee; - type AllowMultiAssetPools = AllowMultiAssetPools; type MaxSwapPathLength = ConstU32<4>; type MintMinLiquidity = ConstU128<100>; // 100 is good enough when the main currency has 12 decimals. - - type Balance = u128; - type HigherPrecisionBalance = sp_core::U256; - - type MultiAssetId = NativeOrAssetId; - type MultiAssetIdConverter = NativeOrAssetIdConverter; - #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); } diff --git a/substrate/frame/asset-conversion/src/swap.rs b/substrate/frame/asset-conversion/src/swap.rs new file mode 100644 index 0000000000000000000000000000000000000000..a6154e29414767550106544585b592903c3a6f2a --- /dev/null +++ b/substrate/frame/asset-conversion/src/swap.rs @@ -0,0 +1,212 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and implementations for swap between the various asset classes. + +use super::*; + +/// Trait for providing methods to swap between the various asset classes. +pub trait Swap { + /// Measure units of the asset classes for swapping. + type Balance: Balance; + /// Kind of assets that are going to be swapped. + type AssetKind; + + /// Returns the upper limit on the length of the swap path. + fn max_path_len() -> u32; + + /// Swap exactly `amount_in` of asset `path[0]` for asset `path[last]`. + /// If an `amount_out_min` is specified, it will return an error if it is unable to acquire + /// the amount desired. + /// + /// Withdraws the `path[0]` asset from `sender`, deposits the `path[last]` asset to `send_to`, + /// respecting `keep_alive`. + /// + /// If successful, returns the amount of `path[last]` acquired for the `amount_in`. + /// + /// This operation is expected to be atomic. + fn swap_exact_tokens_for_tokens( + sender: AccountId, + path: Vec, + amount_in: Self::Balance, + amount_out_min: Option, + send_to: AccountId, + keep_alive: bool, + ) -> Result; + + /// Take the `path[0]` asset and swap some amount for `amount_out` of the `path[last]`. If an + /// `amount_in_max` is specified, it will return an error if acquiring `amount_out` would be + /// too costly. + /// + /// Withdraws `path[0]` asset from `sender`, deposits `path[last]` asset to `send_to`, + /// respecting `keep_alive`. + /// + /// If successful returns the amount of the `path[0]` taken to provide `path[last]`. + /// + /// This operation is expected to be atomic. + fn swap_tokens_for_exact_tokens( + sender: AccountId, + path: Vec, + amount_out: Self::Balance, + amount_in_max: Option, + send_to: AccountId, + keep_alive: bool, + ) -> Result; +} + +/// Trait providing methods to swap between the various asset classes. +pub trait SwapCredit { + /// Measure units of the asset classes for swapping. + type Balance: Balance; + /// Kind of assets that are going to be swapped. + type AssetKind; + /// Credit implying a negative imbalance in the system that can be placed into an account or + /// alter the total supply. + type Credit; + + /// Returns the upper limit on the length of the swap path. + fn max_path_len() -> u32; + + /// Swap exactly `credit_in` of asset `path[0]` for asset `path[last]`. If `amount_out_min` is + /// provided and the swap can't achieve at least this amount, an error is returned. + /// + /// On a successful swap, the function returns the `credit_out` of `path[last]` obtained from + /// the `credit_in`. On failure, it returns an `Err` containing the original `credit_in` and the + /// associated error code. + /// + /// This operation is expected to be atomic. + fn swap_exact_tokens_for_tokens( + path: Vec, + credit_in: Self::Credit, + amount_out_min: Option, + ) -> Result; + + /// Swaps a portion of `credit_in` of `path[0]` asset to obtain the desired `amount_out` of + /// the `path[last]` asset. The provided `credit_in` must be adequate to achieve the target + /// `amount_out`, or an error will occur. + /// + /// On success, the function returns a (`credit_out`, `credit_change`) tuple, where `credit_out` + /// represents the acquired amount of the `path[last]` asset, and `credit_change` is the + /// remaining portion from the `credit_in`. On failure, an `Err` with the initial `credit_in` + /// and error code is returned. + /// + /// This operation is expected to be atomic. + fn swap_tokens_for_exact_tokens( + path: Vec, + credit_in: Self::Credit, + amount_out: Self::Balance, + ) -> Result<(Self::Credit, Self::Credit), (Self::Credit, DispatchError)>; +} + +impl Swap for Pallet { + type Balance = T::Balance; + type AssetKind = T::AssetKind; + + fn max_path_len() -> u32 { + T::MaxSwapPathLength::get() + } + + fn swap_exact_tokens_for_tokens( + sender: T::AccountId, + path: Vec, + amount_in: Self::Balance, + amount_out_min: Option, + send_to: T::AccountId, + keep_alive: bool, + ) -> Result { + let amount_out = with_storage_layer(|| { + Self::do_swap_exact_tokens_for_tokens( + sender, + path, + amount_in, + amount_out_min, + send_to, + keep_alive, + ) + })?; + Ok(amount_out) + } + + fn swap_tokens_for_exact_tokens( + sender: T::AccountId, + path: Vec, + amount_out: Self::Balance, + amount_in_max: Option, + send_to: T::AccountId, + keep_alive: bool, + ) -> Result { + let amount_in = with_storage_layer(|| { + Self::do_swap_tokens_for_exact_tokens( + sender, + path, + amount_out, + amount_in_max, + send_to, + keep_alive, + ) + })?; + Ok(amount_in) + } +} + +impl SwapCredit for Pallet { + type Balance = T::Balance; + type AssetKind = T::AssetKind; + type Credit = CreditOf; + + fn max_path_len() -> u32 { + T::MaxSwapPathLength::get() + } + + fn swap_exact_tokens_for_tokens( + path: Vec, + credit_in: Self::Credit, + amount_out_min: Option, + ) -> Result { + let credit_asset = credit_in.asset(); + with_transaction(|| -> TransactionOutcome> { + let res = Self::do_swap_exact_credit_tokens_for_tokens(path, credit_in, amount_out_min); + match &res { + Ok(_) => TransactionOutcome::Commit(Ok(res)), + // wrapping `res` with `Ok`, since our `Err` doesn't satisfy the + // `From` bound of the `with_transaction` function. + Err(_) => TransactionOutcome::Rollback(Ok(res)), + } + }) + // should never map an error since `with_transaction` above never returns it. + .map_err(|_| (Self::Credit::zero(credit_asset), DispatchError::Corruption))? + } + + fn swap_tokens_for_exact_tokens( + path: Vec, + credit_in: Self::Credit, + amount_out: Self::Balance, + ) -> Result<(Self::Credit, Self::Credit), (Self::Credit, DispatchError)> { + let credit_asset = credit_in.asset(); + with_transaction(|| -> TransactionOutcome> { + let res = Self::do_swap_credit_tokens_for_exact_tokens(path, credit_in, amount_out); + match &res { + Ok(_) => TransactionOutcome::Commit(Ok(res)), + // wrapping `res` with `Ok`, since our `Err` doesn't satisfy the + // `From` bound of the `with_transaction` function. + Err(_) => TransactionOutcome::Rollback(Ok(res)), + } + }) + // should never map an error since `with_transaction` above never returns it. + .map_err(|_| (Self::Credit::zero(credit_asset), DispatchError::Corruption))? + } +} diff --git a/substrate/frame/asset-conversion/src/tests.rs b/substrate/frame/asset-conversion/src/tests.rs index 1c1267ab87b3fad874f6a06a3a917a81ac629d2d..e69d14fcb3c4098282d229309da69c576c983865 100644 --- a/substrate/frame/asset-conversion/src/tests.rs +++ b/substrate/frame/asset-conversion/src/tests.rs @@ -17,9 +17,15 @@ use crate::{mock::*, *}; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, assert_storage_noop, instances::Instance1, - traits::{fungible::Inspect, fungibles::InspectEnumerable, Get}, + traits::{ + fungible, + fungible::{Inspect as FungibleInspect, NativeOrWithId}, + fungibles, + fungibles::{Inspect, InspectEnumerable}, + Get, + }, }; use sp_arithmetic::Permill; use sp_runtime::{DispatchError, TokenError}; @@ -42,18 +48,14 @@ fn events() -> Vec> { result } -fn pools() -> Vec> { +fn pools() -> Vec<::PoolId> { let mut s: Vec<_> = Pools::::iter().map(|x| x.0).collect(); s.sort(); s } -fn assets() -> Vec> { - // if the storage would be public: - // let mut s: Vec<_> = pallet_assets::pallet::Asset::::iter().map(|x| x.0).collect(); - let mut s: Vec<_> = <::Assets>::asset_ids() - .map(|id| NativeOrAssetId::Asset(id)) - .collect(); +fn assets() -> Vec> { + let mut s: Vec<_> = Assets::asset_ids().map(|id| NativeOrWithId::WithId(id)).collect(); s.sort(); s } @@ -64,36 +66,71 @@ fn pool_assets() -> Vec { s } -fn create_tokens(owner: u128, tokens: Vec>) { +fn create_tokens(owner: u128, tokens: Vec>) { + create_tokens_with_ed(owner, tokens, 1) +} + +fn create_tokens_with_ed(owner: u128, tokens: Vec>, ed: u128) { for token_id in tokens { - let MultiAssetIdConversionResult::Converted(asset_id) = - NativeOrAssetIdConverter::try_convert(&token_id) - else { - unreachable!("invalid token") + let asset_id = match token_id { + NativeOrWithId::WithId(id) => id, + _ => unreachable!("invalid token"), }; - assert_ok!(Assets::force_create(RuntimeOrigin::root(), asset_id, owner, false, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), asset_id, owner, false, ed)); } } -fn balance(owner: u128, token_id: NativeOrAssetId) -> u128 { - match token_id { - NativeOrAssetId::Native => <::Currency>::free_balance(owner), - NativeOrAssetId::Asset(token_id) => <::Assets>::balance(token_id, owner), - } +fn balance(owner: u128, token_id: NativeOrWithId) -> u128 { + <::Assets>::balance(token_id, &owner) } fn pool_balance(owner: u128, token_id: u32) -> u128 { <::PoolAssets>::balance(token_id, owner) } -fn get_ed() -> u128 { - <::Currency>::minimum_balance() +fn get_native_ed() -> u128 { + <::Assets>::minimum_balance(NativeOrWithId::Native) } macro_rules! bvec { - ($( $x:tt )*) => { - vec![$( $x )*].try_into().unwrap() - } + ($($x:expr),+ $(,)?) => ( + vec![$( Box::new( $x ), )*] + ) +} + +#[test] +fn validate_with_first_asset_pool_id_locator() { + new_test_ext().execute_with(|| { + use NativeOrWithId::{Native, WithId}; + assert_eq!(WithFirstAssetLocator::pool_id(&Native, &WithId(2)), Ok((Native, WithId(2)))); + assert_eq!(WithFirstAssetLocator::pool_id(&WithId(2), &Native), Ok((Native, WithId(2)))); + assert_noop!(WithFirstAssetLocator::pool_id(&Native, &Native), ()); + assert_noop!(WithFirstAssetLocator::pool_id(&WithId(2), &WithId(1)), ()); + }); +} + +#[test] +fn validate_ascending_pool_id_locator() { + new_test_ext().execute_with(|| { + use NativeOrWithId::{Native, WithId}; + assert_eq!(AscendingLocator::pool_id(&Native, &WithId(2)), Ok((Native, WithId(2)))); + assert_eq!(AscendingLocator::pool_id(&WithId(2), &Native), Ok((Native, WithId(2)))); + assert_eq!(AscendingLocator::pool_id(&WithId(2), &WithId(1)), Ok((WithId(1), WithId(2)))); + assert_eq!(AscendingLocator::pool_id(&Native, &Native), Err(())); + assert_eq!(AscendingLocator::pool_id(&WithId(1), &WithId(1)), Err(())); + }); +} + +#[test] +fn validate_native_or_with_id_sorting() { + new_test_ext().execute_with(|| { + use NativeOrWithId::{Native, WithId}; + assert!(WithId(2) > WithId(1)); + assert!(WithId(1) <= WithId(1)); + assert_eq!(WithId(1), WithId(1)); + assert_eq!(Native::, Native::); + assert!(Native < WithId(1)); + }); } #[test] @@ -102,10 +139,11 @@ fn check_pool_accounts_dont_collide() { let mut map = HashSet::new(); for i in 0..1_000_000u32 { - let account = AssetConversion::get_pool_account(&( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(i), - )); + let account: u128 = ::PoolLocator::address(&( + NativeOrWithId::Native, + NativeOrWithId::WithId(i), + )) + .unwrap(); if map.contains(&account) { panic!("Collision at {}", i); } @@ -137,59 +175,67 @@ fn can_create_pool() { let asset_account_deposit: u128 = >::AssetAccountDeposit::get(); let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); - let pool_id = (token_1, token_2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let pool_id = (token_1.clone(), token_2.clone()); - create_tokens(user, vec![token_2]); + create_tokens(user, vec![token_2.clone()]); let lp_token = AssetConversion::get_next_pool_asset_id(); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 1000)); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_1)); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_2.clone()), + Box::new(token_1.clone()) + )); let setup_fee = <::PoolSetupFee as Get<::Balance>>::get(); - let pool_account = <::PoolSetupFeeReceiver as Get>::get(); + let pool_account = AssetConversionOrigin::get(); assert_eq!( - balance(user, NativeOrAssetId::Native), + balance(user, NativeOrWithId::Native), 1000 - (setup_fee + asset_account_deposit) ); - assert_eq!(balance(pool_account, NativeOrAssetId::Native), setup_fee); + assert_eq!(balance(pool_account, NativeOrWithId::Native), setup_fee); assert_eq!(lp_token + 1, AssetConversion::get_next_pool_asset_id()); assert_eq!( events(), [Event::::PoolCreated { creator: user, - pool_id, - pool_account: AssetConversion::get_pool_account(&pool_id), + pool_id: pool_id.clone(), + pool_account: ::PoolLocator::address(&pool_id).unwrap(), lp_token }] ); assert_eq!(pools(), vec![pool_id]); - assert_eq!(assets(), vec![token_2]); + assert_eq!(assets(), vec![token_2.clone()]); assert_eq!(pool_assets(), vec![lp_token]); assert_noop!( - AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_1), - Error::::EqualAssets + AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_1.clone()) + ), + Error::::InvalidAssetPair ); assert_noop!( - AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_2), - Error::::EqualAssets + AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_2.clone()), + Box::new(token_2.clone()) + ), + Error::::InvalidAssetPair ); - // validate we can create Asset(1)/Asset(2) pool - let token_1 = NativeOrAssetId::Asset(1); - create_tokens(user, vec![token_1]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); - - // validate we can force the first asset to be the Native currency only - AllowMultiAssetPools::set(&false); - let token_1 = NativeOrAssetId::Asset(3); - assert_noop!( - AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2), - Error::::PoolMustContainNativeCurrency - ); + // validate we cannot create WithId(1)/WithId(2) pool + let token_1 = NativeOrWithId::WithId(1); + create_tokens(user, vec![token_1.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); }); } @@ -197,25 +243,37 @@ fn can_create_pool() { fn create_same_pool_twice_should_fail() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); - create_tokens(user, vec![token_2]); + create_tokens(user, vec![token_2.clone()]); let lp_token = AssetConversion::get_next_pool_asset_id(); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_1)); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_2.clone()), + Box::new(token_1.clone()) + )); let expected_free = lp_token + 1; assert_eq!(expected_free, AssetConversion::get_next_pool_asset_id()); assert_noop!( - AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_1), + AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_2.clone()), + Box::new(token_1.clone()) + ), Error::::PoolExists ); assert_eq!(expected_free, AssetConversion::get_next_pool_asset_id()); // Try switching the same tokens around: assert_noop!( - AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2), + AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + ), Error::::PoolExists ); assert_eq!(expected_free, AssetConversion::get_next_pool_asset_id()); @@ -226,35 +284,43 @@ fn create_same_pool_twice_should_fail() { fn different_pools_should_have_different_lp_tokens() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); - let token_3 = NativeOrAssetId::Asset(3); - let pool_id_1_2 = (token_1, token_2); - let pool_id_1_3 = (token_1, token_3); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let token_3 = NativeOrWithId::WithId(3); + let pool_id_1_2 = (token_1.clone(), token_2.clone()); + let pool_id_1_3 = (token_1.clone(), token_3.clone()); - create_tokens(user, vec![token_2, token_3]); + create_tokens(user, vec![token_2.clone(), token_3.clone()]); let lp_token2_1 = AssetConversion::get_next_pool_asset_id(); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_1)); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_2.clone()), + Box::new(token_1.clone()) + )); let lp_token3_1 = AssetConversion::get_next_pool_asset_id(); assert_eq!( events(), [Event::::PoolCreated { creator: user, - pool_id: pool_id_1_2, - pool_account: AssetConversion::get_pool_account(&pool_id_1_2), + pool_id: pool_id_1_2.clone(), + pool_account: ::PoolLocator::address(&pool_id_1_2).unwrap(), lp_token: lp_token2_1 }] ); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_3, token_1)); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_3.clone()), + Box::new(token_1.clone()) + )); assert_eq!( events(), [Event::::PoolCreated { creator: user, - pool_id: pool_id_1_3, - pool_account: AssetConversion::get_pool_account(&pool_id_1_3), + pool_id: pool_id_1_3.clone(), + pool_account: ::PoolLocator::address(&pool_id_1_3).unwrap(), lp_token: lp_token3_1, }] ); @@ -267,25 +333,33 @@ fn different_pools_should_have_different_lp_tokens() { fn can_add_liquidity() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); - let token_3 = NativeOrAssetId::Asset(3); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let token_3 = NativeOrWithId::WithId(3); - create_tokens(user, vec![token_2, token_3]); + create_tokens(user, vec![token_2.clone(), token_3.clone()]); let lp_token1 = AssetConversion::get_next_pool_asset_id(); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); let lp_token2 = AssetConversion::get_next_pool_asset_id(); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_3)); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_3.clone()) + )); - let ed = get_ed(); + let ed = get_native_ed(); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 * 2 + ed)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 3, user, 1000)); assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), 10000, 10, 10000, @@ -293,28 +367,28 @@ fn can_add_liquidity() { user, )); - let pool_id = (token_1, token_2); + let pool_id = (token_1.clone(), token_2.clone()); assert!(events().contains(&Event::::LiquidityAdded { who: user, mint_to: user, - pool_id, + pool_id: pool_id.clone(), amount1_provided: 10000, amount2_provided: 10, lp_token: lp_token1, lp_token_minted: 216, })); - let pallet_account = AssetConversion::get_pool_account(&pool_id); - assert_eq!(balance(pallet_account, token_1), 10000); - assert_eq!(balance(pallet_account, token_2), 10); - assert_eq!(balance(user, token_1), 10000 + ed); - assert_eq!(balance(user, token_2), 1000 - 10); + let pallet_account = ::PoolLocator::address(&pool_id).unwrap(); + assert_eq!(balance(pallet_account, token_1.clone()), 10000); + assert_eq!(balance(pallet_account, token_2.clone()), 10); + assert_eq!(balance(user, token_1.clone()), 10000 + ed); + assert_eq!(balance(user, token_2.clone()), 1000 - 10); assert_eq!(pool_balance(user, lp_token1), 216); // try to pass the non-native - native assets, the result should be the same assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_3, - token_1, + Box::new(token_3.clone()), + Box::new(token_1.clone()), 10, 10000, 10, @@ -322,21 +396,21 @@ fn can_add_liquidity() { user, )); - let pool_id = (token_1, token_3); + let pool_id = (token_1.clone(), token_3.clone()); assert!(events().contains(&Event::::LiquidityAdded { who: user, mint_to: user, - pool_id, - amount1_provided: 10000, - amount2_provided: 10, + pool_id: pool_id.clone(), + amount1_provided: 10, + amount2_provided: 10000, lp_token: lp_token2, lp_token_minted: 216, })); - let pallet_account = AssetConversion::get_pool_account(&pool_id); - assert_eq!(balance(pallet_account, token_1), 10000); - assert_eq!(balance(pallet_account, token_3), 10); - assert_eq!(balance(user, token_1), ed); - assert_eq!(balance(user, token_3), 1000 - 10); + let pallet_account = ::PoolLocator::address(&pool_id).unwrap(); + assert_eq!(balance(pallet_account, token_1.clone()), 10000); + assert_eq!(balance(pallet_account, token_3.clone()), 10); + assert_eq!(balance(user, token_1.clone()), ed); + assert_eq!(balance(user, token_3.clone()), 1000 - 10); assert_eq!(pool_balance(user, lp_token2), 216); }); } @@ -345,11 +419,15 @@ fn can_add_liquidity() { fn add_tiny_liquidity_leads_to_insufficient_liquidity_minted_error() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); - create_tokens(user, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 1000)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); @@ -357,8 +435,8 @@ fn add_tiny_liquidity_leads_to_insufficient_liquidity_minted_error() { assert_noop!( AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), 1, 1, 1, @@ -371,9 +449,9 @@ fn add_tiny_liquidity_leads_to_insufficient_liquidity_minted_error() { assert_noop!( AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, - get_ed(), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + get_native_ed(), 1, 1, 1, @@ -388,27 +466,37 @@ fn add_tiny_liquidity_leads_to_insufficient_liquidity_minted_error() { fn add_tiny_liquidity_directly_to_pool_address() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); - let token_3 = NativeOrAssetId::Asset(3); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let token_3 = NativeOrWithId::WithId(3); - create_tokens(user, vec![token_2, token_3]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_3)); + create_tokens(user, vec![token_2.clone(), token_3.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_3.clone()) + )); - let ed = get_ed(); + let ed = get_native_ed(); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 * 2 + ed)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 3, user, 1000)); - // check we're still able to add the liquidity even when the pool already has some token_1 - let pallet_account = AssetConversion::get_pool_account(&(token_1, token_2)); + // check we're still able to add the liquidity even when the pool already has some + // token_1.clone() + let pallet_account = + ::PoolLocator::address(&(token_1.clone(), token_2.clone())).unwrap(); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), pallet_account, 1000)); assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), 10000, 10, 10000, @@ -416,13 +504,11 @@ fn add_tiny_liquidity_directly_to_pool_address() { user, )); - // check the same but for token_3 (non-native token) - let pallet_account = AssetConversion::get_pool_account(&(token_1, token_3)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, pallet_account, 1)); + // check the same but for token_3.clone() (non-native token) assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_3, + Box::new(token_1.clone()), + Box::new(token_3.clone()), 10000, 10, 10000, @@ -436,21 +522,31 @@ fn add_tiny_liquidity_directly_to_pool_address() { fn can_remove_liquidity() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); - let pool_id = (token_1, token_2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let pool_id = (token_1.clone(), token_2.clone()); - create_tokens(user, vec![token_2]); + create_tokens(user, vec![token_2.clone()]); let lp_token = AssetConversion::get_next_pool_asset_id(); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); - assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000000000)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 100000)); + let ed_token_1 = >::minimum_balance(); + let ed_token_2 = >::minimum_balance(2); + assert_ok!(Balances::force_set_balance( + RuntimeOrigin::root(), + user, + 10000000000 + ed_token_1 + )); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 100000 + ed_token_2)); assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), 1000000000, 100000, 1000000000, @@ -463,8 +559,8 @@ fn can_remove_liquidity() { assert_ok!(AssetConversion::remove_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), total_lp_received, 0, 0, @@ -474,7 +570,7 @@ fn can_remove_liquidity() { assert!(events().contains(&Event::::LiquidityRemoved { who: user, withdraw_to: user, - pool_id, + pool_id: pool_id.clone(), amount1: 899991000, amount2: 89999, lp_token, @@ -482,13 +578,16 @@ fn can_remove_liquidity() { withdrawal_fee: ::LiquidityWithdrawalFee::get() })); - let pool_account = AssetConversion::get_pool_account(&pool_id); - assert_eq!(balance(pool_account, token_1), 100009000); - assert_eq!(balance(pool_account, token_2), 10001); + let pool_account = ::PoolLocator::address(&pool_id).unwrap(); + assert_eq!(balance(pool_account, token_1.clone()), 100009000); + assert_eq!(balance(pool_account, token_2.clone()), 10001); assert_eq!(pool_balance(pool_account, lp_token), 100); - assert_eq!(balance(user, token_1), 10000000000 - 1000000000 + 899991000); - assert_eq!(balance(user, token_2), 89999); + assert_eq!( + balance(user, token_1.clone()), + 10000000000 - 1000000000 + 899991000 + ed_token_1 + ); + assert_eq!(balance(user, token_2.clone()), 89999 + ed_token_2); assert_eq!(pool_balance(user, lp_token), 0); }); } @@ -497,20 +596,28 @@ fn can_remove_liquidity() { fn can_not_redeem_more_lp_tokens_than_were_minted() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); let lp_token = AssetConversion::get_next_pool_asset_id(); - create_tokens(user, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); - assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 + get_ed())); + assert_ok!(Balances::force_set_balance( + RuntimeOrigin::root(), + user, + 10000 + get_native_ed() + )); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), 10000, 10, 10000, @@ -524,8 +631,8 @@ fn can_not_redeem_more_lp_tokens_than_were_minted() { assert_noop!( AssetConversion::remove_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), 216 + 1, // Try and redeem 10 lp tokens while only 9 minted. 0, 0, @@ -540,19 +647,23 @@ fn can_not_redeem_more_lp_tokens_than_were_minted() { fn can_quote_price() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); - create_tokens(user, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 100000)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), 10000, 200, 1, @@ -562,8 +673,8 @@ fn can_quote_price() { assert_eq!( AssetConversion::quote_price_exact_tokens_for_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(2), + NativeOrWithId::Native, + NativeOrWithId::WithId(2), 3000, false, ), @@ -572,8 +683,8 @@ fn can_quote_price() { // including fee so should get less out... assert_eq!( AssetConversion::quote_price_exact_tokens_for_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(2), + NativeOrWithId::Native, + NativeOrWithId::WithId(2), 3000, true, ), @@ -583,8 +694,8 @@ fn can_quote_price() { // (if the above accidentally exchanged then it would not give same quote as before) assert_eq!( AssetConversion::quote_price_exact_tokens_for_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(2), + NativeOrWithId::Native, + NativeOrWithId::WithId(2), 3000, false, ), @@ -593,8 +704,8 @@ fn can_quote_price() { // including fee so should get less out... assert_eq!( AssetConversion::quote_price_exact_tokens_for_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(2), + NativeOrWithId::Native, + NativeOrWithId::WithId(2), 3000, true, ), @@ -604,8 +715,8 @@ fn can_quote_price() { // Check inverse: assert_eq!( AssetConversion::quote_price_exact_tokens_for_tokens( - NativeOrAssetId::Asset(2), - NativeOrAssetId::Native, + NativeOrWithId::WithId(2), + NativeOrWithId::Native, 60, false, ), @@ -614,8 +725,8 @@ fn can_quote_price() { // including fee so should get less out... assert_eq!( AssetConversion::quote_price_exact_tokens_for_tokens( - NativeOrAssetId::Asset(2), - NativeOrAssetId::Native, + NativeOrWithId::WithId(2), + NativeOrWithId::Native, 60, true, ), @@ -627,8 +738,8 @@ fn can_quote_price() { // assert_eq!( AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(2), + NativeOrWithId::Native, + NativeOrWithId::WithId(2), 60, false, ), @@ -637,8 +748,8 @@ fn can_quote_price() { // including fee so should need to put more in... assert_eq!( AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(2), + NativeOrWithId::Native, + NativeOrWithId::WithId(2), 60, true, ), @@ -648,8 +759,8 @@ fn can_quote_price() { // (if the above accidentally exchanged then it would not give same quote as before) assert_eq!( AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(2), + NativeOrWithId::Native, + NativeOrWithId::WithId(2), 60, false, ), @@ -658,8 +769,8 @@ fn can_quote_price() { // including fee so should need to put more in... assert_eq!( AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(2), + NativeOrWithId::Native, + NativeOrWithId::WithId(2), 60, true, ), @@ -669,8 +780,8 @@ fn can_quote_price() { // Check inverse: assert_eq!( AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Asset(2), - NativeOrAssetId::Native, + NativeOrWithId::WithId(2), + NativeOrWithId::Native, 3000, false, ), @@ -679,8 +790,8 @@ fn can_quote_price() { // including fee so should need to put more in... assert_eq!( AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Asset(2), - NativeOrAssetId::Native, + NativeOrWithId::WithId(2), + NativeOrWithId::Native, 3000, true, ), @@ -694,14 +805,14 @@ fn can_quote_price() { assert_eq!( AssetConversion::quote_price_exact_tokens_for_tokens( - NativeOrAssetId::Asset(2), - NativeOrAssetId::Native, + NativeOrWithId::WithId(2), + NativeOrWithId::Native, amount_in, false, ) .and_then(|amount| AssetConversion::quote_price_exact_tokens_for_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(2), + NativeOrWithId::Native, + NativeOrWithId::WithId(2), amount, false, )), @@ -709,14 +820,14 @@ fn can_quote_price() { ); assert_eq!( AssetConversion::quote_price_exact_tokens_for_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(2), + NativeOrWithId::Native, + NativeOrWithId::WithId(2), amount_in, false, ) .and_then(|amount| AssetConversion::quote_price_exact_tokens_for_tokens( - NativeOrAssetId::Asset(2), - NativeOrAssetId::Native, + NativeOrWithId::WithId(2), + NativeOrWithId::Native, amount, false, )), @@ -725,14 +836,14 @@ fn can_quote_price() { assert_eq!( AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Asset(2), - NativeOrAssetId::Native, + NativeOrWithId::WithId(2), + NativeOrWithId::Native, amount_in, false, ) .and_then(|amount| AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(2), + NativeOrWithId::Native, + NativeOrWithId::WithId(2), amount, false, )), @@ -740,14 +851,14 @@ fn can_quote_price() { ); assert_eq!( AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(2), + NativeOrWithId::Native, + NativeOrWithId::WithId(2), amount_in, false, ) .and_then(|amount| AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Asset(2), - NativeOrAssetId::Native, + NativeOrWithId::WithId(2), + NativeOrWithId::Native, amount, false, )), @@ -761,19 +872,23 @@ fn quote_price_exact_tokens_for_tokens_matches_execution() { new_test_ext().execute_with(|| { let user = 1; let user2 = 2; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); - create_tokens(user, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 100000)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), 10000, 200, 1, @@ -784,23 +899,28 @@ fn quote_price_exact_tokens_for_tokens_matches_execution() { let amount = 1; let quoted_price = 49; assert_eq!( - AssetConversion::quote_price_exact_tokens_for_tokens(token_2, token_1, amount, true,), + AssetConversion::quote_price_exact_tokens_for_tokens( + token_2.clone(), + token_1.clone(), + amount, + true, + ), Some(quoted_price) ); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user2, amount)); let prior_dot_balance = 20000; - assert_eq!(prior_dot_balance, balance(user2, token_1)); + assert_eq!(prior_dot_balance, balance(user2, token_1.clone())); assert_ok!(AssetConversion::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user2), - bvec![token_2, token_1], + bvec![token_2.clone(), token_1.clone()], amount, 1, user2, false, )); - assert_eq!(prior_dot_balance + quoted_price, balance(user2, token_1)); + assert_eq!(prior_dot_balance + quoted_price, balance(user2, token_1.clone())); }); } @@ -809,19 +929,23 @@ fn quote_price_tokens_for_exact_tokens_matches_execution() { new_test_ext().execute_with(|| { let user = 1; let user2 = 2; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); - create_tokens(user, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 100000)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), 10000, 200, 1, @@ -832,26 +956,31 @@ fn quote_price_tokens_for_exact_tokens_matches_execution() { let amount = 49; let quoted_price = 1; assert_eq!( - AssetConversion::quote_price_tokens_for_exact_tokens(token_2, token_1, amount, true,), + AssetConversion::quote_price_tokens_for_exact_tokens( + token_2.clone(), + token_1.clone(), + amount, + true, + ), Some(quoted_price) ); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user2, amount)); let prior_dot_balance = 20000; - assert_eq!(prior_dot_balance, balance(user2, token_1)); + assert_eq!(prior_dot_balance, balance(user2, token_1.clone())); let prior_asset_balance = 49; - assert_eq!(prior_asset_balance, balance(user2, token_2)); + assert_eq!(prior_asset_balance, balance(user2, token_2.clone())); assert_ok!(AssetConversion::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user2), - bvec![token_2, token_1], + bvec![token_2.clone(), token_1.clone()], amount, 1, user2, false, )); - assert_eq!(prior_dot_balance + amount, balance(user2, token_1)); - assert_eq!(prior_asset_balance - quoted_price, balance(user2, token_2)); + assert_eq!(prior_dot_balance + amount, balance(user2, token_1.clone())); + assert_eq!(prior_asset_balance - quoted_price, balance(user2, token_2.clone())); }); } @@ -859,14 +988,18 @@ fn quote_price_tokens_for_exact_tokens_matches_execution() { fn can_swap_with_native() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); - let pool_id = (token_1, token_2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let pool_id = (token_1.clone(), token_2.clone()); - create_tokens(user, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); - let ed = get_ed(); + let ed = get_native_ed(); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 + ed)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); @@ -875,8 +1008,8 @@ fn can_swap_with_native() { assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), liquidity1, liquidity2, 1, @@ -892,18 +1025,18 @@ fn can_swap_with_native() { assert_ok!(AssetConversion::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), - bvec![token_2, token_1], + bvec![token_2.clone(), token_1.clone()], input_amount, 1, user, false, )); - let pallet_account = AssetConversion::get_pool_account(&pool_id); - assert_eq!(balance(user, token_1), expect_receive + ed); - assert_eq!(balance(user, token_2), 1000 - liquidity2 - input_amount); - assert_eq!(balance(pallet_account, token_1), liquidity1 - expect_receive); - assert_eq!(balance(pallet_account, token_2), liquidity2 + input_amount); + let pallet_account = ::PoolLocator::address(&pool_id).unwrap(); + assert_eq!(balance(user, token_1.clone()), expect_receive + ed); + assert_eq!(balance(user, token_2.clone()), 1000 - liquidity2 - input_amount); + assert_eq!(balance(pallet_account, token_1.clone()), liquidity1 - expect_receive); + assert_eq!(balance(pallet_account, token_2.clone()), liquidity2 + input_amount); }); } @@ -911,10 +1044,14 @@ fn can_swap_with_native() { fn can_swap_with_realistic_values() { new_test_ext().execute_with(|| { let user = 1; - let dot = NativeOrAssetId::Native; - let usd = NativeOrAssetId::Asset(2); - create_tokens(user, vec![usd]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), dot, usd)); + let dot = NativeOrWithId::Native; + let usd = NativeOrWithId::WithId(2); + create_tokens(user, vec![usd.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(dot.clone()), + Box::new(usd.clone()) + )); const UNIT: u128 = 1_000_000_000; @@ -925,8 +1062,8 @@ fn can_swap_with_realistic_values() { let liquidity_usd = 1_000_000 * UNIT; assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - dot, - usd, + Box::new(dot.clone()), + Box::new(usd.clone()), liquidity_dot, liquidity_usd, 1, @@ -938,7 +1075,7 @@ fn can_swap_with_realistic_values() { assert_ok!(AssetConversion::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), - bvec![usd, dot], + bvec![usd.clone(), dot.clone()], input_amount, 1, user, @@ -948,9 +1085,9 @@ fn can_swap_with_realistic_values() { assert!(events().contains(&Event::::SwapExecuted { who: user, send_to: user, - path: bvec![usd, dot], amount_in: 10 * UNIT, // usd amount_out: 1_993_980_120, // About 2 dot after div by UNIT. + path: vec![(usd, 10 * UNIT), (dot, 1_993_980_120)], })); }); } @@ -959,17 +1096,21 @@ fn can_swap_with_realistic_values() { fn can_not_swap_in_pool_with_no_liquidity_added_yet() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); - create_tokens(user, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); // Check can't swap an empty pool assert_noop!( AssetConversion::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), - bvec![token_2, token_1], + bvec![token_2.clone(), token_1.clone()], 10, 1, user, @@ -984,15 +1125,19 @@ fn can_not_swap_in_pool_with_no_liquidity_added_yet() { fn check_no_panic_when_try_swap_close_to_empty_pool() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); - let pool_id = (token_1, token_2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let pool_id = (token_1.clone(), token_2.clone()); let lp_token = AssetConversion::get_next_pool_asset_id(); - create_tokens(user, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); - let ed = get_ed(); + let ed = get_native_ed(); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 + ed)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); @@ -1001,8 +1146,8 @@ fn check_no_panic_when_try_swap_close_to_empty_pool() { assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), liquidity1, liquidity2, 1, @@ -1014,21 +1159,21 @@ fn check_no_panic_when_try_swap_close_to_empty_pool() { assert!(events().contains(&Event::::LiquidityAdded { who: user, mint_to: user, - pool_id, + pool_id: pool_id.clone(), amount1_provided: liquidity1, amount2_provided: liquidity2, lp_token, lp_token_minted, })); - let pallet_account = AssetConversion::get_pool_account(&pool_id); - assert_eq!(balance(pallet_account, token_1), liquidity1); - assert_eq!(balance(pallet_account, token_2), liquidity2); + let pallet_account = ::PoolLocator::address(&pool_id).unwrap(); + assert_eq!(balance(pallet_account, token_1.clone()), liquidity1); + assert_eq!(balance(pallet_account, token_2.clone()), liquidity2); assert_ok!(AssetConversion::remove_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), lp_token_minted, 1, 1, @@ -1037,33 +1182,33 @@ fn check_no_panic_when_try_swap_close_to_empty_pool() { // Now, the pool should exist but be almost empty. // Let's try and drain it. - assert_eq!(balance(pallet_account, token_1), 708); - assert_eq!(balance(pallet_account, token_2), 15); + assert_eq!(balance(pallet_account, token_1.clone()), 708); + assert_eq!(balance(pallet_account, token_2.clone()), 15); // validate the reserve should always stay above the ED assert_noop!( AssetConversion::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), - bvec![token_2, token_1], + bvec![token_2.clone(), token_1.clone()], 708 - ed + 1, // amount_out 500, // amount_in_max user, false, ), - Error::::ReserveLeftLessThanMinimal + TokenError::NotExpendable, ); assert_ok!(AssetConversion::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), - bvec![token_2, token_1], + bvec![token_2.clone(), token_1.clone()], 608, // amount_out 500, // amount_in_max user, false, )); - let token_1_left = balance(pallet_account, token_1); - let token_2_left = balance(pallet_account, token_2); + let token_1_left = balance(pallet_account, token_1.clone()); + let token_2_left = balance(pallet_account, token_2.clone()); assert_eq!(token_1_left, 708 - 608); // The price for the last tokens should be very high @@ -1077,7 +1222,7 @@ fn check_no_panic_when_try_swap_close_to_empty_pool() { assert_noop!( AssetConversion::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), - bvec![token_2, token_1], + bvec![token_2.clone(), token_1.clone()], token_1_left - 1, // amount_out 1000, // amount_in_max user, @@ -1090,7 +1235,7 @@ fn check_no_panic_when_try_swap_close_to_empty_pool() { assert_noop!( AssetConversion::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), - bvec![token_2, token_1], + bvec![token_2.clone(), token_1.clone()], token_1_left, // amount_out 1000, // amount_in_max user, @@ -1105,13 +1250,21 @@ fn check_no_panic_when_try_swap_close_to_empty_pool() { fn swap_should_not_work_if_too_much_slippage() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); - create_tokens(user, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); - assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 + get_ed())); + assert_ok!(Balances::force_set_balance( + RuntimeOrigin::root(), + user, + 10000 + get_native_ed() + )); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); let liquidity1 = 10000; @@ -1119,8 +1272,8 @@ fn swap_should_not_work_if_too_much_slippage() { assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), liquidity1, liquidity2, 1, @@ -1133,7 +1286,7 @@ fn swap_should_not_work_if_too_much_slippage() { assert_noop!( AssetConversion::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), - bvec![token_2, token_1], + bvec![token_2.clone(), token_1.clone()], exchange_amount, // amount_in 4000, // amount_out_min user, @@ -1148,28 +1301,32 @@ fn swap_should_not_work_if_too_much_slippage() { fn can_swap_tokens_for_exact_tokens() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); - let pool_id = (token_1, token_2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let pool_id = (token_1.clone(), token_2.clone()); - create_tokens(user, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); - let ed = get_ed(); + let ed = get_native_ed(); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 20000 + ed)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); - let pallet_account = AssetConversion::get_pool_account(&pool_id); - let before1 = balance(pallet_account, token_1) + balance(user, token_1); - let before2 = balance(pallet_account, token_2) + balance(user, token_2); + let pallet_account = ::PoolLocator::address(&pool_id).unwrap(); + let before1 = balance(pallet_account, token_1.clone()) + balance(user, token_1.clone()); + let before2 = balance(pallet_account, token_2.clone()) + balance(user, token_2.clone()); let liquidity1 = 10000; let liquidity2 = 200; assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), liquidity1, liquidity2, 1, @@ -1184,23 +1341,29 @@ fn can_swap_tokens_for_exact_tokens() { assert_ok!(AssetConversion::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), - bvec![token_1, token_2], + bvec![token_1.clone(), token_2.clone()], exchange_out, // amount_out 3500, // amount_in_max user, true, )); - assert_eq!(balance(user, token_1), 10000 + ed - expect_in); - assert_eq!(balance(user, token_2), 1000 - liquidity2 + exchange_out); - assert_eq!(balance(pallet_account, token_1), liquidity1 + expect_in); - assert_eq!(balance(pallet_account, token_2), liquidity2 - exchange_out); + assert_eq!(balance(user, token_1.clone()), 10000 + ed - expect_in); + assert_eq!(balance(user, token_2.clone()), 1000 - liquidity2 + exchange_out); + assert_eq!(balance(pallet_account, token_1.clone()), liquidity1 + expect_in); + assert_eq!(balance(pallet_account, token_2.clone()), liquidity2 - exchange_out); // check invariants: // native and asset totals should be preserved. - assert_eq!(before1, balance(pallet_account, token_1) + balance(user, token_1)); - assert_eq!(before2, balance(pallet_account, token_2) + balance(user, token_2)); + assert_eq!( + before1, + balance(pallet_account, token_1.clone()) + balance(user, token_1.clone()) + ); + assert_eq!( + before2, + balance(pallet_account, token_2.clone()) + balance(user, token_2.clone()) + ); }); } @@ -1209,34 +1372,40 @@ fn can_swap_tokens_for_exact_tokens_when_not_liquidity_provider() { new_test_ext().execute_with(|| { let user = 1; let user2 = 2; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); - let pool_id = (token_1, token_2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let pool_id = (token_1.clone(), token_2.clone()); let lp_token = AssetConversion::get_next_pool_asset_id(); - create_tokens(user2, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user2), token_1, token_2)); + create_tokens(user2, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user2), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); - let ed = get_ed(); + let ed = get_native_ed(); let base1 = 10000; let base2 = 1000; assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, base1 + ed)); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user2, base1 + ed)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user2), 2, user2, base2)); - let pallet_account = AssetConversion::get_pool_account(&pool_id); - let before1 = - balance(pallet_account, token_1) + balance(user, token_1) + balance(user2, token_1); - let before2 = - balance(pallet_account, token_2) + balance(user, token_2) + balance(user2, token_2); + let pallet_account = ::PoolLocator::address(&pool_id).unwrap(); + let before1 = balance(pallet_account, token_1.clone()) + + balance(user, token_1.clone()) + + balance(user2, token_1.clone()); + let before2 = balance(pallet_account, token_2.clone()) + + balance(user, token_2.clone()) + + balance(user2, token_2.clone()); let liquidity1 = 10000; let liquidity2 = 200; assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user2), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), liquidity1, liquidity2, 1, @@ -1244,8 +1413,8 @@ fn can_swap_tokens_for_exact_tokens_when_not_liquidity_provider() { user2, )); - assert_eq!(balance(user, token_1), base1 + ed); - assert_eq!(balance(user, token_2), 0); + assert_eq!(balance(user, token_1.clone()), base1 + ed); + assert_eq!(balance(user, token_2.clone()), 0); let exchange_out = 50; let expect_in = AssetConversion::get_amount_in(&exchange_out, &liquidity1, &liquidity2) @@ -1254,28 +1423,32 @@ fn can_swap_tokens_for_exact_tokens_when_not_liquidity_provider() { assert_ok!(AssetConversion::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), - bvec![token_1, token_2], + bvec![token_1.clone(), token_2.clone()], exchange_out, // amount_out 3500, // amount_in_max user, true, )); - assert_eq!(balance(user, token_1), base1 + ed - expect_in); - assert_eq!(balance(pallet_account, token_1), liquidity1 + expect_in); - assert_eq!(balance(user, token_2), exchange_out); - assert_eq!(balance(pallet_account, token_2), liquidity2 - exchange_out); + assert_eq!(balance(user, token_1.clone()), base1 + ed - expect_in); + assert_eq!(balance(pallet_account, token_1.clone()), liquidity1 + expect_in); + assert_eq!(balance(user, token_2.clone()), exchange_out); + assert_eq!(balance(pallet_account, token_2.clone()), liquidity2 - exchange_out); // check invariants: // native and asset totals should be preserved. assert_eq!( before1, - balance(pallet_account, token_1) + balance(user, token_1) + balance(user2, token_1) + balance(pallet_account, token_1.clone()) + + balance(user, token_1.clone()) + + balance(user2, token_1.clone()) ); assert_eq!( before2, - balance(pallet_account, token_2) + balance(user, token_2) + balance(user2, token_2) + balance(pallet_account, token_2.clone()) + + balance(user, token_2.clone()) + + balance(user2, token_2.clone()) ); let lp_token_minted = pool_balance(user2, lp_token); @@ -1283,8 +1456,8 @@ fn can_swap_tokens_for_exact_tokens_when_not_liquidity_provider() { assert_ok!(AssetConversion::remove_liquidity( RuntimeOrigin::signed(user2), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), lp_token_minted, 0, 0, @@ -1298,21 +1471,26 @@ fn swap_when_existential_deposit_would_cause_reaping_but_keep_alive_set() { new_test_ext().execute_with(|| { let user = 1; let user2 = 2; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); - create_tokens(user2, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user2), token_1, token_2)); + create_tokens(user2, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user2), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); - let ed = get_ed(); + let ed = get_native_ed(); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 101)); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user2, 10000 + ed)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user2), 2, user2, 1000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user2), 2, user, 2)); assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user2), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), 10000, 200, 1, @@ -1323,7 +1501,7 @@ fn swap_when_existential_deposit_would_cause_reaping_but_keep_alive_set() { assert_noop!( AssetConversion::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), - bvec![token_1, token_2], + bvec![token_1.clone(), token_2.clone()], 1, // amount_out 101, // amount_in_max user, @@ -1335,7 +1513,7 @@ fn swap_when_existential_deposit_would_cause_reaping_but_keep_alive_set() { assert_noop!( AssetConversion::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), - bvec![token_1, token_2], + bvec![token_1.clone(), token_2.clone()], 51, // amount_in 1, // amount_out_min user, @@ -1343,6 +1521,197 @@ fn swap_when_existential_deposit_would_cause_reaping_but_keep_alive_set() { ), DispatchError::Token(TokenError::NotExpendable) ); + + assert_noop!( + AssetConversion::swap_tokens_for_exact_tokens( + RuntimeOrigin::signed(user), + bvec![token_2.clone(), token_1.clone()], + 51, // amount_out + 2, // amount_in_max + user, + true, + ), + DispatchError::Token(TokenError::NotExpendable) + ); + + assert_noop!( + AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![token_2.clone(), token_1.clone()], + 2, // amount_in + 1, // amount_out_min + user, + true, + ), + DispatchError::Token(TokenError::NotExpendable) + ); + }); +} + +#[test] +fn swap_when_existential_deposit_would_cause_reaping_pool_account() { + new_test_ext().execute_with(|| { + let user = 1; + let user2 = 2; + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let token_3 = NativeOrWithId::WithId(3); + + let ed_assets = 100; + create_tokens_with_ed(user2, vec![token_2.clone(), token_3.clone()], ed_assets); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user2), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user2), + Box::new(token_1.clone()), + Box::new(token_3.clone()) + )); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user2), + Box::new(token_2.clone()), + Box::new(token_3.clone()) + )); + + let ed = get_native_ed(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 20000 + ed)); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user2, 20000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user2), 2, user2, 400 + ed_assets)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user2), 3, user2, 20000 + ed_assets)); + + assert_ok!(Assets::mint(RuntimeOrigin::signed(user2), 2, user, 400 + ed_assets)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user2), 3, user, 20000 + ed_assets)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user2), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + 10000, + 200, + 1, + 1, + user2, + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user2), + Box::new(token_1.clone()), + Box::new(token_3.clone()), + 200, + 10000, + 1, + 1, + user2, + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user2), + Box::new(token_2.clone()), + Box::new(token_3.clone()), + 200, + 10000, + 1, + 1, + user2, + )); + + // causes an account removal for asset token 2 + assert_noop!( + AssetConversion::swap_tokens_for_exact_tokens( + RuntimeOrigin::signed(user), + bvec![token_1.clone(), token_2.clone()], + 110, // amount_out + 20000, // amount_in_max + user, + true, + ), + DispatchError::Token(TokenError::NotExpendable) + ); + + // causes an account removal for asset token 2 + assert_noop!( + AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![token_1.clone(), token_2.clone()], + 15000, // amount_in + 110, // amount_out_min + user, + true, + ), + DispatchError::Token(TokenError::NotExpendable) + ); + + // causes an account removal for native token 1 + assert_noop!( + AssetConversion::swap_tokens_for_exact_tokens( + RuntimeOrigin::signed(user), + bvec![token_3.clone(), token_1.clone()], + 110, // amount_out + 20000, // amount_in_max + user, + true, + ), + DispatchError::Token(TokenError::NotExpendable) + ); + + // causes an account removal for native token 1 + assert_noop!( + AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![token_3.clone(), token_1.clone()], + 15000, // amount_in + 110, // amount_out_min + user, + true, + ), + DispatchError::Token(TokenError::NotExpendable) + ); + + // causes an account removal for native token 1 locate in the middle of a swap path + let amount_in = AssetConversion::balance_path_from_amount_out( + 110, + vec![token_3.clone(), token_1.clone()], + ) + .unwrap() + .first() + .map(|(_, a)| *a) + .unwrap(); + + assert_noop!( + AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![token_3.clone(), token_1.clone(), token_2.clone()], + amount_in, // amount_in + 1, // amount_out_min + user, + true, + ), + DispatchError::Token(TokenError::NotExpendable) + ); + + // causes an account removal for asset token 2 locate in the middle of a swap path + let amount_in = AssetConversion::balance_path_from_amount_out( + 110, + vec![token_1.clone(), token_2.clone()], + ) + .unwrap() + .first() + .map(|(_, a)| *a) + .unwrap(); + + assert_noop!( + AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![token_1.clone(), token_2.clone(), token_3.clone()], + amount_in, // amount_in + 1, // amount_out_min + user, + true, + ), + DispatchError::Token(TokenError::NotExpendable) + ); }); } @@ -1350,13 +1719,21 @@ fn swap_when_existential_deposit_would_cause_reaping_but_keep_alive_set() { fn swap_tokens_for_exact_tokens_should_not_work_if_too_much_slippage() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); - create_tokens(user, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); - assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 20000 + get_ed())); + assert_ok!(Balances::force_set_balance( + RuntimeOrigin::root(), + user, + 20000 + get_native_ed() + )); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); let liquidity1 = 10000; @@ -1364,8 +1741,8 @@ fn swap_tokens_for_exact_tokens_should_not_work_if_too_much_slippage() { assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), liquidity1, liquidity2, 1, @@ -1378,7 +1755,7 @@ fn swap_tokens_for_exact_tokens_should_not_work_if_too_much_slippage() { assert_noop!( AssetConversion::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), - bvec![token_1, token_2], + bvec![token_1.clone(), token_2.clone()], exchange_out, // amount_out 50, // amount_in_max just greater than slippage. user, @@ -1393,15 +1770,23 @@ fn swap_tokens_for_exact_tokens_should_not_work_if_too_much_slippage() { fn swap_exact_tokens_for_tokens_in_multi_hops() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); - let token_3 = NativeOrAssetId::Asset(3); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let token_3 = NativeOrWithId::WithId(3); - create_tokens(user, vec![token_2, token_3]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_3)); + create_tokens(user, vec![token_2.clone(), token_3.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_2.clone()), + Box::new(token_3.clone()) + )); - let ed = get_ed(); + let ed = get_native_ed(); let base1 = 10000; let base2 = 10000; assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, base1 * 2 + ed)); @@ -1414,8 +1799,8 @@ fn swap_exact_tokens_for_tokens_in_multi_hops() { assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), liquidity1, liquidity2, 1, @@ -1424,8 +1809,8 @@ fn swap_exact_tokens_for_tokens_in_multi_hops() { )); assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_2, - token_3, + Box::new(token_2.clone()), + Box::new(token_3.clone()), liquidity2, liquidity3, 1, @@ -1444,7 +1829,7 @@ fn swap_exact_tokens_for_tokens_in_multi_hops() { assert_noop!( AssetConversion::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), - bvec![token_1], + bvec![token_1.clone()], input_amount, 80, user, @@ -1456,7 +1841,7 @@ fn swap_exact_tokens_for_tokens_in_multi_hops() { assert_noop!( AssetConversion::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), - bvec![token_1, token_2, token_3, token_2], + bvec![token_1.clone(), token_2.clone(), token_3.clone(), token_2.clone()], input_amount, 80, user, @@ -1467,24 +1852,24 @@ fn swap_exact_tokens_for_tokens_in_multi_hops() { assert_ok!(AssetConversion::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), - bvec![token_1, token_2, token_3], + bvec![token_1.clone(), token_2.clone(), token_3.clone()], input_amount, // amount_in 80, // amount_out_min user, true, )); - let pool_id1 = (token_1, token_2); - let pool_id2 = (token_2, token_3); - let pallet_account1 = AssetConversion::get_pool_account(&pool_id1); - let pallet_account2 = AssetConversion::get_pool_account(&pool_id2); - - assert_eq!(balance(user, token_1), base1 + ed - input_amount); - assert_eq!(balance(pallet_account1, token_1), liquidity1 + input_amount); - assert_eq!(balance(pallet_account1, token_2), liquidity2 - expect_out2); - assert_eq!(balance(pallet_account2, token_2), liquidity2 + expect_out2); - assert_eq!(balance(pallet_account2, token_3), liquidity3 - expect_out3); - assert_eq!(balance(user, token_3), 10000 - liquidity3 + expect_out3); + let pool_id1 = (token_1.clone(), token_2.clone()); + let pool_id2 = (token_2.clone(), token_3.clone()); + let pallet_account1 = ::PoolLocator::address(&pool_id1).unwrap(); + let pallet_account2 = ::PoolLocator::address(&pool_id2).unwrap(); + + assert_eq!(balance(user, token_1.clone()), base1 + ed - input_amount); + assert_eq!(balance(pallet_account1, token_1.clone()), liquidity1 + input_amount); + assert_eq!(balance(pallet_account1, token_2.clone()), liquidity2 - expect_out2); + assert_eq!(balance(pallet_account2, token_2.clone()), liquidity2 + expect_out2); + assert_eq!(balance(pallet_account2, token_3.clone()), liquidity3 - expect_out3); + assert_eq!(balance(user, token_3.clone()), 10000 - liquidity3 + expect_out3); }); } @@ -1492,15 +1877,23 @@ fn swap_exact_tokens_for_tokens_in_multi_hops() { fn swap_tokens_for_exact_tokens_in_multi_hops() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); - let token_3 = NativeOrAssetId::Asset(3); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let token_3 = NativeOrWithId::WithId(3); - create_tokens(user, vec![token_2, token_3]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_3)); + create_tokens(user, vec![token_2.clone(), token_3.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_2.clone()), + Box::new(token_3.clone()) + )); - let ed = get_ed(); + let ed = get_native_ed(); let base1 = 10000; let base2 = 10000; assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, base1 * 2 + ed)); @@ -1513,8 +1906,8 @@ fn swap_tokens_for_exact_tokens_in_multi_hops() { assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), liquidity1, liquidity2, 1, @@ -1523,8 +1916,8 @@ fn swap_tokens_for_exact_tokens_in_multi_hops() { )); assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_2, - token_3, + Box::new(token_2.clone()), + Box::new(token_3.clone()), liquidity2, liquidity3, 1, @@ -1542,24 +1935,24 @@ fn swap_tokens_for_exact_tokens_in_multi_hops() { assert_ok!(AssetConversion::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), - bvec![token_1, token_2, token_3], + bvec![token_1.clone(), token_2.clone(), token_3.clone()], exchange_out3, // amount_out 1000, // amount_in_max user, true, )); - let pool_id1 = (token_1, token_2); - let pool_id2 = (token_2, token_3); - let pallet_account1 = AssetConversion::get_pool_account(&pool_id1); - let pallet_account2 = AssetConversion::get_pool_account(&pool_id2); - - assert_eq!(balance(user, token_1), base1 + ed - expect_in1); - assert_eq!(balance(pallet_account1, token_1), liquidity1 + expect_in1); - assert_eq!(balance(pallet_account1, token_2), liquidity2 - expect_in2); - assert_eq!(balance(pallet_account2, token_2), liquidity2 + expect_in2); - assert_eq!(balance(pallet_account2, token_3), liquidity3 - exchange_out3); - assert_eq!(balance(user, token_3), 10000 - liquidity3 + exchange_out3); + let pool_id1 = (token_1.clone(), token_2.clone()); + let pool_id2 = (token_2.clone(), token_3.clone()); + let pallet_account1 = ::PoolLocator::address(&pool_id1).unwrap(); + let pallet_account2 = ::PoolLocator::address(&pool_id2).unwrap(); + + assert_eq!(balance(user, token_1.clone()), base1 + ed - expect_in1); + assert_eq!(balance(pallet_account1, token_1.clone()), liquidity1 + expect_in1); + assert_eq!(balance(pallet_account1, token_2.clone()), liquidity2 - expect_in2); + assert_eq!(balance(pallet_account2, token_2.clone()), liquidity2 + expect_in2); + assert_eq!(balance(pallet_account2, token_3.clone()), liquidity3 - exchange_out3); + assert_eq!(balance(user, token_3.clone()), 10000 - liquidity3 + exchange_out3); }); } @@ -1567,9 +1960,10 @@ fn swap_tokens_for_exact_tokens_in_multi_hops() { fn can_not_swap_same_asset() { new_test_ext().execute_with(|| { let user = 1; - let token_1 = NativeOrAssetId::Asset(1); + let token_1 = NativeOrWithId::WithId(1); + let token_2 = NativeOrWithId::Native; - create_tokens(user, vec![token_1]); + create_tokens(user, vec![token_1.clone()]); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 1, user, 1000)); let liquidity1 = 1000; @@ -1577,60 +1971,44 @@ fn can_not_swap_same_asset() { assert_noop!( AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_1, + Box::new(token_1.clone()), + Box::new(token_1.clone()), liquidity1, liquidity2, 1, 1, user, ), - Error::::PoolNotFound + Error::::InvalidAssetPair ); let exchange_amount = 10; assert_noop!( AssetConversion::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), - bvec![token_1, token_1], + bvec![token_1.clone(), token_1.clone()], exchange_amount, 1, user, true, ), - Error::::PoolNotFound + Error::::InvalidAssetPair ); assert_noop!( AssetConversion::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), - bvec![NativeOrAssetId::Native, NativeOrAssetId::Native], + bvec![token_2.clone(), token_2.clone()], exchange_amount, 1, user, true, ), - Error::::PoolNotFound + Error::::InvalidAssetPair ); }); } -#[test] -fn validate_pool_id_sorting() { - new_test_ext().execute_with(|| { - use crate::NativeOrAssetId::{Asset, Native}; - assert_eq!(AssetConversion::get_pool_id(Native, Asset(2)), (Native, Asset(2))); - assert_eq!(AssetConversion::get_pool_id(Asset(2), Native), (Native, Asset(2))); - assert_eq!(AssetConversion::get_pool_id(Native, Native), (Native, Native)); - assert_eq!(AssetConversion::get_pool_id(Asset(2), Asset(1)), (Asset(1), Asset(2))); - assert!(Asset(2) > Asset(1)); - assert!(Asset(1) <= Asset(1)); - assert_eq!(Asset(1), Asset(1)); - assert_eq!(Native::, Native::); - assert!(Native < Asset(1)); - }); -} - #[test] fn cannot_block_pool_creation() { new_test_ext().execute_with(|| { @@ -1639,16 +2017,16 @@ fn cannot_block_pool_creation() { // User 2 is the attacker let attacker = 2; - let ed = get_ed(); + let ed = get_native_ed(); assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), attacker, 10000 + ed)); - // The target pool the user wants to create is Native <=> Asset(2) - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(2); + // The target pool the user wants to create is Native <=> WithId(2) + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); // Attacker computes the still non-existing pool account for the target pair let pool_account = - AssetConversion::get_pool_account(&AssetConversion::get_pool_id(token_2, token_1)); + ::PoolLocator::address(&(token_1.clone(), token_2.clone())).unwrap(); // And transfers the ED to that pool account assert_ok!(Balances::transfer_allow_death( RuntimeOrigin::signed(attacker), @@ -1657,17 +2035,21 @@ fn cannot_block_pool_creation() { )); // Then, the attacker creates 14 tokens and sends one of each to the pool account for i in 10..25 { - create_tokens(attacker, vec![NativeOrAssetId::Asset(i)]); + create_tokens(attacker, vec![NativeOrWithId::WithId(i)]); assert_ok!(Assets::mint(RuntimeOrigin::signed(attacker), i, attacker, 1000)); assert_ok!(Assets::transfer(RuntimeOrigin::signed(attacker), i, pool_account, 1)); } // User can still create the pool - create_tokens(user, vec![token_2]); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); - // User has to transfer one Asset(2) token to the pool account (otherwise add_liquidity will - // fail with `AssetTwoDepositDidNotMeetMinimum`) + // User has to transfer one WithId(2) token to the pool account (otherwise add_liquidity + // will fail with `AssetTwoDepositDidNotMeetMinimum`) assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 + ed)); assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 10000)); assert_ok!(Assets::transfer(RuntimeOrigin::signed(user), 2, pool_account, 1)); @@ -1675,8 +2057,8 @@ fn cannot_block_pool_creation() { // add_liquidity shouldn't fail because of the number of consumers assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(user), - token_1, - token_2, + Box::new(token_1.clone()), + Box::new(token_2.clone()), 10000, 100, 10000, @@ -1685,3 +2067,429 @@ fn cannot_block_pool_creation() { )); }); } + +#[test] +fn swap_transactional() { + new_test_ext().execute_with(|| { + let user = 1; + let user2 = 2; + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let token_3 = NativeOrWithId::WithId(3); + + let asset_ed = 150; + create_tokens_with_ed(user, vec![token_2.clone(), token_3.clone()], asset_ed); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_3.clone()) + )); + + let ed = get_native_ed(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 20000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 3, user, 1000)); + + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user2, 20000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user2, 1000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 3, user2, 1000)); + + let liquidity1 = 10000; + let liquidity2 = 200; + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + liquidity1, + liquidity2, + 1, + 1, + user, + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_3.clone()), + liquidity1, + liquidity2, + 1, + 1, + user, + )); + + let pool_1 = + ::PoolLocator::address(&(token_1.clone(), token_2.clone())).unwrap(); + let pool_2 = + ::PoolLocator::address(&(token_1.clone(), token_3.clone())).unwrap(); + + assert_eq!(Balances::balance(&pool_1), liquidity1); + assert_eq!(Assets::balance(2, pool_1), liquidity2); + assert_eq!(Balances::balance(&pool_2), liquidity1); + assert_eq!(Assets::balance(3, pool_2), liquidity2); + + // the amount that would cause a transfer from the last pool in the path to fail + let expected_out = liquidity2 - asset_ed + 1; + let amount_in = AssetConversion::balance_path_from_amount_out( + expected_out, + vec![token_2.clone(), token_1.clone(), token_3.clone()], + ) + .unwrap() + .first() + .map(|(_, a)| *a) + .unwrap(); + + // swap credit with `swap_tokens_for_exact_tokens` transactional + let credit_in = NativeAndAssets::issue(token_2.clone(), amount_in); + let credit_in_err_expected = NativeAndAssets::issue(token_2.clone(), amount_in); + // avoiding drop of any credit, to assert any storage mutation from an actual call. + let error; + assert_storage_noop!( + error = >::swap_tokens_for_exact_tokens( + vec![token_2.clone(), token_1.clone(), token_3.clone()], + credit_in, + expected_out, + ) + .unwrap_err() + ); + assert_eq!(error, (credit_in_err_expected, TokenError::NotExpendable.into())); + + // swap credit with `swap_exact_tokens_for_tokens` transactional + let credit_in = NativeAndAssets::issue(token_2.clone(), amount_in); + let credit_in_err_expected = NativeAndAssets::issue(token_2.clone(), amount_in); + // avoiding drop of any credit, to assert any storage mutation from an actual call. + let error; + assert_storage_noop!( + error = >::swap_exact_tokens_for_tokens( + vec![token_2.clone(), token_1.clone(), token_3.clone()], + credit_in, + Some(expected_out), + ) + .unwrap_err() + ); + assert_eq!(error, (credit_in_err_expected, TokenError::NotExpendable.into())); + + // swap with `swap_exact_tokens_for_tokens` transactional + assert_noop!( + >::swap_exact_tokens_for_tokens( + user2, + vec![token_2.clone(), token_1.clone(), token_3.clone()], + amount_in, + Some(expected_out), + user2, + true, + ), + TokenError::NotExpendable + ); + + // swap with `swap_exact_tokens_for_tokens` transactional + assert_noop!( + >::swap_tokens_for_exact_tokens( + user2, + vec![token_2.clone(), token_1.clone(), token_3.clone()], + expected_out, + Some(amount_in), + user2, + true, + ), + TokenError::NotExpendable + ); + + assert_eq!(Balances::balance(&pool_1), liquidity1); + assert_eq!(Assets::balance(2, pool_1), liquidity2); + assert_eq!(Balances::balance(&pool_2), liquidity1); + assert_eq!(Assets::balance(3, pool_2), liquidity2); + }) +} + +#[test] +fn swap_credit_returns_change() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + + let ed = get_native_ed(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 20000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + let liquidity1 = 10000; + let liquidity2 = 200; + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + liquidity1, + liquidity2, + 1, + 1, + user, + )); + + let expected_change = NativeAndAssets::issue(token_1.clone(), 100); + let expected_credit_out = NativeAndAssets::issue(token_2.clone(), 20); + + let amount_in_max = + AssetConversion::get_amount_in(&expected_credit_out.peek(), &liquidity1, &liquidity2) + .unwrap(); + + let credit_in = + NativeAndAssets::issue(token_1.clone(), amount_in_max + expected_change.peek()); + assert_ok!( + >::swap_tokens_for_exact_tokens( + vec![token_1.clone(), token_2.clone()], + credit_in, + expected_credit_out.peek(), + ), + (expected_credit_out, expected_change) + ); + }) +} + +#[test] +fn swap_credit_insufficient_amount_bounds() { + new_test_ext().execute_with(|| { + let user = 1; + let user2 = 2; + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + + let ed = get_native_ed(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 20000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user2, 20000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user2, 1000)); + + let liquidity1 = 10000; + let liquidity2 = 200; + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + liquidity1, + liquidity2, + 1, + 1, + user, + )); + + // provided `credit_in` is not sufficient to swap for desired `amount_out_min` + let amount_out_min = 20; + let amount_in = + AssetConversion::get_amount_in(&(amount_out_min - 1), &liquidity2, &liquidity1) + .unwrap(); + let credit_in = NativeAndAssets::issue(token_1.clone(), amount_in); + let expected_credit_in = NativeAndAssets::issue(token_1.clone(), amount_in); + let error = >::swap_exact_tokens_for_tokens( + vec![token_1.clone(), token_2.clone()], + credit_in, + Some(amount_out_min), + ) + .unwrap_err(); + assert_eq!( + error, + (expected_credit_in, Error::::ProvidedMinimumNotSufficientForSwap.into()) + ); + + // provided `credit_in` is not sufficient to swap for desired `amount_out` + let amount_out = 20; + let amount_in_max = + AssetConversion::get_amount_in(&(amount_out - 1), &liquidity2, &liquidity1).unwrap(); + let credit_in = NativeAndAssets::issue(token_1.clone(), amount_in_max); + let expected_credit_in = NativeAndAssets::issue(token_1.clone(), amount_in_max); + let error = >::swap_tokens_for_exact_tokens( + vec![token_1.clone(), token_2.clone()], + credit_in, + amount_out, + ) + .unwrap_err(); + assert_eq!( + error, + (expected_credit_in, Error::::ProvidedMaximumNotSufficientForSwap.into()) + ); + }) +} + +#[test] +fn swap_credit_zero_amount() { + new_test_ext().execute_with(|| { + let user = 1; + let user2 = 2; + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + + let ed = get_native_ed(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 20000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user2, 20000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user2, 1000)); + + let liquidity1 = 10000; + let liquidity2 = 200; + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + liquidity1, + liquidity2, + 1, + 1, + user, + )); + + // swap with zero credit fails for `swap_exact_tokens_for_tokens` + let credit_in = CreditOf::::zero(token_1.clone()); + let expected_credit_in = CreditOf::::zero(token_1.clone()); + let error = >::swap_exact_tokens_for_tokens( + vec![token_1.clone(), token_2.clone()], + credit_in, + None, + ) + .unwrap_err(); + assert_eq!(error, (expected_credit_in, Error::::ZeroAmount.into())); + + // swap with zero credit fails for `swap_tokens_for_exact_tokens` + let credit_in = CreditOf::::zero(token_1.clone()); + let expected_credit_in = CreditOf::::zero(token_1.clone()); + let error = >::swap_tokens_for_exact_tokens( + vec![token_1.clone(), token_2.clone()], + credit_in, + 10, + ) + .unwrap_err(); + assert_eq!(error, (expected_credit_in, Error::::ZeroAmount.into())); + + // swap with zero amount_out_min fails for `swap_exact_tokens_for_tokens` + let credit_in = NativeAndAssets::issue(token_1.clone(), 10); + let expected_credit_in = NativeAndAssets::issue(token_1.clone(), 10); + let error = >::swap_exact_tokens_for_tokens( + vec![token_1.clone(), token_2.clone()], + credit_in, + Some(0), + ) + .unwrap_err(); + assert_eq!(error, (expected_credit_in, Error::::ZeroAmount.into())); + + // swap with zero amount_out fails with `swap_tokens_for_exact_tokens` fails + let credit_in = NativeAndAssets::issue(token_1.clone(), 10); + let expected_credit_in = NativeAndAssets::issue(token_1.clone(), 10); + let error = >::swap_tokens_for_exact_tokens( + vec![token_1.clone(), token_2.clone()], + credit_in, + 0, + ) + .unwrap_err(); + assert_eq!(error, (expected_credit_in, Error::::ZeroAmount.into())); + }); +} + +#[test] +fn swap_credit_invalid_path() { + new_test_ext().execute_with(|| { + let user = 1; + let user2 = 2; + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + + create_tokens(user, vec![token_2.clone()]); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + + let ed = get_native_ed(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 20000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user2, 20000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user2, 1000)); + + let liquidity1 = 10000; + let liquidity2 = 200; + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + liquidity1, + liquidity2, + 1, + 1, + user, + )); + + // swap with credit_in.asset different from path[0] asset fails + let credit_in = NativeAndAssets::issue(token_1.clone(), 10); + let expected_credit_in = NativeAndAssets::issue(token_1.clone(), 10); + let error = >::swap_exact_tokens_for_tokens( + vec![token_2.clone(), token_1.clone()], + credit_in, + None, + ) + .unwrap_err(); + assert_eq!(error, (expected_credit_in, Error::::InvalidPath.into())); + + // swap with credit_in.asset different from path[0] asset fails + let credit_in = NativeAndAssets::issue(token_2.clone(), 10); + let expected_credit_in = NativeAndAssets::issue(token_2.clone(), 10); + let error = >::swap_tokens_for_exact_tokens( + vec![token_1.clone(), token_2.clone()], + credit_in, + 10, + ) + .unwrap_err(); + assert_eq!(error, (expected_credit_in, Error::::InvalidPath.into())); + + // swap with path.len < 2 fails + let credit_in = NativeAndAssets::issue(token_1.clone(), 10); + let expected_credit_in = NativeAndAssets::issue(token_1.clone(), 10); + let error = >::swap_exact_tokens_for_tokens( + vec![token_2.clone()], + credit_in, + None, + ) + .unwrap_err(); + assert_eq!(error, (expected_credit_in, Error::::InvalidPath.into())); + + // swap with path.len < 2 fails + let credit_in = NativeAndAssets::issue(token_2.clone(), 10); + let expected_credit_in = NativeAndAssets::issue(token_2.clone(), 10); + let error = + >::swap_tokens_for_exact_tokens(vec![], credit_in, 10) + .unwrap_err(); + assert_eq!(error, (expected_credit_in, Error::::InvalidPath.into())); + }); +} diff --git a/substrate/frame/asset-conversion/src/types.rs b/substrate/frame/asset-conversion/src/types.rs index ffdc63ce0ce7b75c0eb111de6fb1547161fdaccf..fd6d41a55b6139b74453861fd553f0d30d307146 100644 --- a/substrate/frame/asset-conversion/src/types.rs +++ b/substrate/frame/asset-conversion/src/types.rs @@ -16,16 +16,22 @@ // limitations under the License. use super::*; - use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -use sp_std::{cmp::Ordering, marker::PhantomData}; +use sp_std::marker::PhantomData; -/// Pool ID. +/// Represents a swap path with associated asset amounts indicating how much of the asset needs to +/// be deposited to get the following asset's amount withdrawn (this is inclusive of fees). /// -/// The pool's `AccountId` is derived from this type. Any changes to the type may necessitate a -/// migration. -pub(super) type PoolIdOf = (::MultiAssetId, ::MultiAssetId); +/// Example: +/// Given path [(asset1, amount_in), (asset2, amount_out2), (asset3, amount_out3)], can be resolved: +/// 1. `asset(asset1, amount_in)` take from `user` and move to the pool(asset1, asset2); +/// 2. `asset(asset2, amount_out2)` transfer from pool(asset1, asset2) to pool(asset2, asset3); +/// 3. `asset(asset3, amount_out3)` move from pool(asset2, asset3) to `user`. +pub(super) type BalancePath = Vec<(::AssetKind, ::Balance)>; + +/// Credit of [Config::Assets]. +pub type CreditOf = Credit<::AccountId, ::Assets>; /// Stores the lp_token asset id a particular pool has been assigned. #[derive(Decode, Encode, Default, PartialEq, Eq, MaxEncodedLen, TypeInfo)] @@ -34,155 +40,94 @@ pub struct PoolInfo { pub lp_token: PoolAssetId, } -/// A trait that converts between a MultiAssetId and either the native currency or an AssetId. -pub trait MultiAssetIdConverter { - /// Returns the MultiAssetId representing the native currency of the chain. - fn get_native() -> MultiAssetId; - - /// Returns true if the given MultiAssetId is the native currency. - fn is_native(asset: &MultiAssetId) -> bool; - - /// If it's not native, returns the AssetId for the given MultiAssetId. - fn try_convert(asset: &MultiAssetId) -> MultiAssetIdConversionResult; -} - -/// Result of `MultiAssetIdConverter::try_convert`. -#[cfg_attr(feature = "std", derive(PartialEq, Debug))] -pub enum MultiAssetIdConversionResult { - /// Input asset is successfully converted. Means that converted asset is supported. - Converted(AssetId), - /// Means that input asset is the chain's native asset, if it has one, so no conversion (see - /// `MultiAssetIdConverter::get_native`). - Native, - /// Means input asset is not supported for pool. - Unsupported(MultiAssetId), -} - -/// Benchmark Helper -#[cfg(feature = "runtime-benchmarks")] -pub trait BenchmarkHelper { - /// Returns an `AssetId` from a given integer. - fn asset_id(asset_id: u32) -> AssetId; - - /// Returns a `MultiAssetId` from a given integer. - fn multiasset_id(asset_id: u32) -> MultiAssetId; +/// Provides means to resolve the `PoolId` and `AccountId` from a pair of assets. +/// +/// Resulting `PoolId` remains consistent whether the asset pair is presented as (asset1, asset2) +/// or (asset2, asset1). The derived `AccountId` may serve as an address for liquidity provider +/// tokens. +pub trait PoolLocator { + /// Retrieves the account address associated with a valid `PoolId`. + fn address(id: &PoolId) -> Result; + /// Identifies the `PoolId` for a given pair of assets. + /// + /// Returns an error if the asset pair isn't supported. + fn pool_id(asset1: &AssetKind, asset2: &AssetKind) -> Result; + /// Retrieves the account address associated with a given asset pair. + /// + /// Returns an error if the asset pair isn't supported. + fn pool_address(asset1: &AssetKind, asset2: &AssetKind) -> Result { + if let Ok(id) = Self::pool_id(asset1, asset2) { + Self::address(&id) + } else { + Err(()) + } + } } -#[cfg(feature = "runtime-benchmarks")] -impl BenchmarkHelper for () +/// Pool locator that mandates the inclusion of the specified `FirstAsset` in every asset pair. +/// +/// The `PoolId` is represented as a tuple of `AssetKind`s with `FirstAsset` always positioned as +/// the first element. +pub struct WithFirstAsset( + PhantomData<(FirstAsset, AccountId, AssetKind)>, +); +impl PoolLocator + for WithFirstAsset where - AssetId: From, - MultiAssetId: From, + AssetKind: Eq + Clone + Encode, + AccountId: Decode, + FirstAsset: Get, { - fn asset_id(asset_id: u32) -> AssetId { - asset_id.into() + fn pool_id(asset1: &AssetKind, asset2: &AssetKind) -> Result<(AssetKind, AssetKind), ()> { + let first = FirstAsset::get(); + match true { + _ if asset1 == asset2 => Err(()), + _ if first == *asset1 => Ok((first, asset2.clone())), + _ if first == *asset2 => Ok((first, asset1.clone())), + _ => Err(()), + } } - - fn multiasset_id(asset_id: u32) -> MultiAssetId { - asset_id.into() + fn address(id: &(AssetKind, AssetKind)) -> Result { + let encoded = sp_io::hashing::blake2_256(&Encode::encode(id)[..]); + Decode::decode(&mut TrailingZeroInput::new(encoded.as_ref())).map_err(|_| ()) } } -/// Trait for providing methods to swap between the various asset classes. -pub trait Swap { - /// Swap exactly `amount_in` of asset `path[0]` for asset `path[1]`. - /// If an `amount_out_min` is specified, it will return an error if it is unable to acquire - /// the amount desired. - /// - /// Withdraws the `path[0]` asset from `sender`, deposits the `path[1]` asset to `send_to`, - /// respecting `keep_alive`. - /// - /// If successful, returns the amount of `path[1]` acquired for the `amount_in`. - fn swap_exact_tokens_for_tokens( - sender: AccountId, - path: Vec, - amount_in: Balance, - amount_out_min: Option, - send_to: AccountId, - keep_alive: bool, - ) -> Result; - - /// Take the `path[0]` asset and swap some amount for `amount_out` of the `path[1]`. If an - /// `amount_in_max` is specified, it will return an error if acquiring `amount_out` would be - /// too costly. - /// - /// Withdraws `path[0]` asset from `sender`, deposits `path[1]` asset to `send_to`, - /// respecting `keep_alive`. - /// - /// If successful returns the amount of the `path[0]` taken to provide `path[1]`. - fn swap_tokens_for_exact_tokens( - sender: AccountId, - path: Vec, - amount_out: Balance, - amount_in_max: Option, - send_to: AccountId, - keep_alive: bool, - ) -> Result; -} - -/// An implementation of MultiAssetId that can be either Native or an asset. -#[derive(Decode, Encode, Default, MaxEncodedLen, TypeInfo, Clone, Copy, Debug)] -pub enum NativeOrAssetId +/// Pool locator where the `PoolId` is a tuple of `AssetKind`s arranged in ascending order. +pub struct Ascending(PhantomData<(AccountId, AssetKind)>); +impl PoolLocator + for Ascending where - AssetId: Ord, + AssetKind: Ord + Clone + Encode, + AccountId: Decode, { - /// Native asset. For example, on the Polkadot Asset Hub this would be DOT. - #[default] - Native, - /// A non-native asset id. - Asset(AssetId), -} - -impl From for NativeOrAssetId { - fn from(asset: AssetId) -> Self { - Self::Asset(asset) - } -} - -impl Ord for NativeOrAssetId { - fn cmp(&self, other: &Self) -> Ordering { - match (self, other) { - (Self::Native, Self::Native) => Ordering::Equal, - (Self::Native, Self::Asset(_)) => Ordering::Less, - (Self::Asset(_), Self::Native) => Ordering::Greater, - (Self::Asset(id1), Self::Asset(id2)) => ::cmp(id1, id2), + fn pool_id(asset1: &AssetKind, asset2: &AssetKind) -> Result<(AssetKind, AssetKind), ()> { + match true { + _ if asset1 > asset2 => Ok((asset2.clone(), asset1.clone())), + _ if asset1 < asset2 => Ok((asset1.clone(), asset2.clone())), + _ => Err(()), } } -} -impl PartialOrd for NativeOrAssetId { - fn partial_cmp(&self, other: &Self) -> Option { - Some(::cmp(self, other)) - } -} -impl PartialEq for NativeOrAssetId { - fn eq(&self, other: &Self) -> bool { - self.cmp(other) == Ordering::Equal + fn address(id: &(AssetKind, AssetKind)) -> Result { + let encoded = sp_io::hashing::blake2_256(&Encode::encode(id)[..]); + Decode::decode(&mut TrailingZeroInput::new(encoded.as_ref())).map_err(|_| ()) } } -impl Eq for NativeOrAssetId {} -/// Converts between a MultiAssetId and an AssetId (or the native currency). -pub struct NativeOrAssetIdConverter { - _phantom: PhantomData, -} - -impl MultiAssetIdConverter, AssetId> - for NativeOrAssetIdConverter +/// Pool locator that chains the `First` and `Second` implementations of [`PoolLocator`]. +/// +/// If the `First` implementation fails, it falls back to the `Second`. +pub struct Chain(PhantomData<(First, Second)>); +impl PoolLocator + for Chain +where + First: PoolLocator, + Second: PoolLocator, { - fn get_native() -> NativeOrAssetId { - NativeOrAssetId::Native - } - - fn is_native(asset: &NativeOrAssetId) -> bool { - *asset == Self::get_native() + fn pool_id(asset1: &AssetKind, asset2: &AssetKind) -> Result<(AssetKind, AssetKind), ()> { + First::pool_id(asset1, asset2).or(Second::pool_id(asset1, asset2)) } - - fn try_convert( - asset: &NativeOrAssetId, - ) -> MultiAssetIdConversionResult, AssetId> { - match asset { - NativeOrAssetId::Asset(asset) => MultiAssetIdConversionResult::Converted(asset.clone()), - NativeOrAssetId::Native => MultiAssetIdConversionResult::Native, - } + fn address(id: &(AssetKind, AssetKind)) -> Result { + First::address(id).or(Second::address(id)) } } diff --git a/substrate/frame/asset-conversion/src/weights.rs b/substrate/frame/asset-conversion/src/weights.rs index 550878ba0be96ba13e0dc2aef6d685cd3ec257b7..a0e687f7a4168032c59daf8616249776a123c363 100644 --- a/substrate/frame/asset-conversion/src/weights.rs +++ b/substrate/frame/asset-conversion/src/weights.rs @@ -15,29 +15,27 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_asset_conversion +//! Autogenerated weights for `pallet_asset_conversion` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-30, STEPS: `5`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-gghbxkbs-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate +// ./target/debug/substrate-node // benchmark // pallet -// --steps=50 -// --repeat=20 +// --chain=dev +// --steps=5 +// --repeat=2 +// --pallet=pallet-asset-conversion // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_asset_conversion -// --chain=dev -// --header=./HEADER-APACHE2 -// --output=./frame/asset-conversion/src/weights.rs -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/asset-conversion/src/weights.rs +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,25 +45,25 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_asset_conversion. +/// Weight functions needed for `pallet_asset_conversion`. pub trait WeightInfo { fn create_pool() -> Weight; fn add_liquidity() -> Weight; fn remove_liquidity() -> Weight; - fn swap_exact_tokens_for_tokens() -> Weight; - fn swap_tokens_for_exact_tokens() -> Weight; + fn swap_exact_tokens_for_tokens(n: u32, ) -> Weight; + fn swap_tokens_for_exact_tokens(n: u32, ) -> Weight; } -/// Weights for pallet_asset_conversion using the Substrate node and recommended hardware. +/// Weights for `pallet_asset_conversion` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `AssetConversion::Pools` (r:1 w:1) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) + /// Storage: `Assets::Account` (r:2 w:2) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) + /// Storage: `Assets::Asset` (r:2 w:2) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `AssetConversion::NextPoolAssetId` (r:1 w:1) /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -75,20 +73,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: - // Measured: `729` - // Estimated: `6196` - // Minimum execution time: 131_688_000 picoseconds. - Weight::from_parts(134_092_000, 6196) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().writes(8_u64)) + // Measured: `1081` + // Estimated: `6360` + // Minimum execution time: 1_576_000_000 picoseconds. + Weight::from_parts(1_668_000_000, 6360) + .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().writes(10_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) + /// Storage: `Assets::Asset` (r:2 w:2) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) + /// Storage: `Assets::Account` (r:4 w:4) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) @@ -96,20 +92,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn add_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1382` - // Estimated: `6208` - // Minimum execution time: 157_310_000 picoseconds. - Weight::from_parts(161_547_000, 6208) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().writes(7_u64)) + // Measured: `1761` + // Estimated: `11426` + // Minimum execution time: 1_636_000_000 picoseconds. + Weight::from_parts(1_894_000_000, 11426) + .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().writes(9_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) + /// Storage: `Assets::Asset` (r:2 w:2) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) + /// Storage: `Assets::Account` (r:4 w:4) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) @@ -117,42 +111,46 @@ impl WeightInfo for SubstrateWeight { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn remove_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1371` - // Estimated: `6208` - // Minimum execution time: 142_769_000 picoseconds. - Weight::from_parts(145_139_000, 6208) - .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Measured: `1750` + // Estimated: `11426` + // Minimum execution time: 1_507_000_000 picoseconds. + Weight::from_parts(1_524_000_000, 11426) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(8_u64)) } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:3 w:3) + /// Storage: `Assets::Asset` (r:4 w:4) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:6 w:6) + /// Storage: `Assets::Account` (r:8 w:8) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn swap_exact_tokens_for_tokens() -> Weight { + /// The range of component `n` is `[2, 4]`. + fn swap_exact_tokens_for_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1738` - // Estimated: `16644` - // Minimum execution time: 213_186_000 picoseconds. - Weight::from_parts(217_471_000, 16644) - .saturating_add(T::DbWeight::get().reads(10_u64)) - .saturating_add(T::DbWeight::get().writes(10_u64)) + // Measured: `0 + n * (522 ±0)` + // Estimated: `990 + n * (5218 ±0)` + // Minimum execution time: 937_000_000 picoseconds. + Weight::from_parts(941_000_000, 990) + // Standard Error: 40_863_477 + .saturating_add(Weight::from_parts(205_862_068, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) } - /// Storage: `Assets::Asset` (r:3 w:3) + /// Storage: `Assets::Asset` (r:4 w:4) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:6 w:6) + /// Storage: `Assets::Account` (r:8 w:8) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn swap_tokens_for_exact_tokens() -> Weight { + /// The range of component `n` is `[2, 4]`. + fn swap_tokens_for_exact_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1738` - // Estimated: `16644` - // Minimum execution time: 213_793_000 picoseconds. - Weight::from_parts(218_584_000, 16644) - .saturating_add(T::DbWeight::get().reads(10_u64)) - .saturating_add(T::DbWeight::get().writes(10_u64)) + // Measured: `0 + n * (522 ±0)` + // Estimated: `990 + n * (5218 ±0)` + // Minimum execution time: 935_000_000 picoseconds. + Weight::from_parts(947_000_000, 990) + // Standard Error: 46_904_620 + .saturating_add(Weight::from_parts(218_275_862, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) } } @@ -162,9 +160,9 @@ impl WeightInfo for () { /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) + /// Storage: `Assets::Account` (r:2 w:2) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) + /// Storage: `Assets::Asset` (r:2 w:2) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `AssetConversion::NextPoolAssetId` (r:1 w:1) /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -174,20 +172,18 @@ impl WeightInfo for () { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: - // Measured: `729` - // Estimated: `6196` - // Minimum execution time: 131_688_000 picoseconds. - Weight::from_parts(134_092_000, 6196) - .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().writes(8_u64)) + // Measured: `1081` + // Estimated: `6360` + // Minimum execution time: 1_576_000_000 picoseconds. + Weight::from_parts(1_668_000_000, 6360) + .saturating_add(RocksDbWeight::get().reads(10_u64)) + .saturating_add(RocksDbWeight::get().writes(10_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) + /// Storage: `Assets::Asset` (r:2 w:2) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) + /// Storage: `Assets::Account` (r:4 w:4) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) @@ -195,20 +191,18 @@ impl WeightInfo for () { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn add_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1382` - // Estimated: `6208` - // Minimum execution time: 157_310_000 picoseconds. - Weight::from_parts(161_547_000, 6208) - .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().writes(7_u64)) + // Measured: `1761` + // Estimated: `11426` + // Minimum execution time: 1_636_000_000 picoseconds. + Weight::from_parts(1_894_000_000, 11426) + .saturating_add(RocksDbWeight::get().reads(10_u64)) + .saturating_add(RocksDbWeight::get().writes(9_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) + /// Storage: `Assets::Asset` (r:2 w:2) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) + /// Storage: `Assets::Account` (r:4 w:4) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) @@ -216,41 +210,45 @@ impl WeightInfo for () { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn remove_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1371` - // Estimated: `6208` - // Minimum execution time: 142_769_000 picoseconds. - Weight::from_parts(145_139_000, 6208) - .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Measured: `1750` + // Estimated: `11426` + // Minimum execution time: 1_507_000_000 picoseconds. + Weight::from_parts(1_524_000_000, 11426) + .saturating_add(RocksDbWeight::get().reads(9_u64)) + .saturating_add(RocksDbWeight::get().writes(8_u64)) } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:3 w:3) + /// Storage: `Assets::Asset` (r:4 w:4) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:6 w:6) + /// Storage: `Assets::Account` (r:8 w:8) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn swap_exact_tokens_for_tokens() -> Weight { + /// The range of component `n` is `[2, 4]`. + fn swap_exact_tokens_for_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1738` - // Estimated: `16644` - // Minimum execution time: 213_186_000 picoseconds. - Weight::from_parts(217_471_000, 16644) - .saturating_add(RocksDbWeight::get().reads(10_u64)) - .saturating_add(RocksDbWeight::get().writes(10_u64)) + // Measured: `0 + n * (522 ±0)` + // Estimated: `990 + n * (5218 ±0)` + // Minimum execution time: 937_000_000 picoseconds. + Weight::from_parts(941_000_000, 990) + // Standard Error: 40_863_477 + .saturating_add(Weight::from_parts(205_862_068, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) } - /// Storage: `Assets::Asset` (r:3 w:3) + /// Storage: `Assets::Asset` (r:4 w:4) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:6 w:6) + /// Storage: `Assets::Account` (r:8 w:8) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn swap_tokens_for_exact_tokens() -> Weight { + /// The range of component `n` is `[2, 4]`. + fn swap_tokens_for_exact_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1738` - // Estimated: `16644` - // Minimum execution time: 213_793_000 picoseconds. - Weight::from_parts(218_584_000, 16644) - .saturating_add(RocksDbWeight::get().reads(10_u64)) - .saturating_add(RocksDbWeight::get().writes(10_u64)) + // Measured: `0 + n * (522 ±0)` + // Estimated: `990 + n * (5218 ±0)` + // Minimum execution time: 935_000_000 picoseconds. + Weight::from_parts(947_000_000, 990) + // Standard Error: 46_904_620 + .saturating_add(Weight::from_parts(218_275_862, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) } } diff --git a/substrate/frame/asset-rate/Cargo.toml b/substrate/frame/asset-rate/Cargo.toml index 734bc5ef43f5780ad023748f6e7720d9b6a1322b..835a15e8c553ad73df116d7660ef293e6bb3f2da 100644 --- a/substrate/frame/asset-rate/Cargo.toml +++ b/substrate/frame/asset-rate/Cargo.toml @@ -8,6 +8,9 @@ edition.workspace = true license = "Apache-2.0" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,20 +19,20 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false, optional = true} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false, optional = true } [dev-dependencies] pallet-balances = { path = "../balances" } sp-io = { path = "../../primitives/io" } -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/asset-rate/src/benchmarking.rs b/substrate/frame/asset-rate/src/benchmarking.rs index 21d53a89e3976987a54c3ce03e8fdc91260f2330..6fcc7c7fadb24d0a367ba15520d2822abe08d89f 100644 --- a/substrate/frame/asset-rate/src/benchmarking.rs +++ b/substrate/frame/asset-rate/src/benchmarking.rs @@ -25,7 +25,6 @@ use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::RawOrigin; use sp_core::crypto::FromEntropy; -use sp_std::vec; /// Trait describing the factory function for the `AssetKind` parameter. pub trait AssetKindFactory { diff --git a/substrate/frame/asset-rate/src/mock.rs b/substrate/frame/asset-rate/src/mock.rs index 9ca0f0f3cc388641eb27a148d405268e4b294552..041f37409528059a2e6b47549b2f263818eaaa87 100644 --- a/substrate/frame/asset-rate/src/mock.rs +++ b/substrate/frame/asset-rate/src/mock.rs @@ -18,7 +18,10 @@ //! The crate's mock. use crate as pallet_asset_rate; -use frame_support::traits::{ConstU16, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU16, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -36,6 +39,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index a48964f13668005aa6db14f82026adc603645de5..7b0af2421eaad34c0c48020bb2d24022d231bc33 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME asset management pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,15 +19,15 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } # Needed for various traits. In our case, `OnFinalize`. -sp-runtime = { path = "../../primitives/runtime", default-features = false} +sp-runtime = { path = "../../primitives/runtime", default-features = false } # Needed for type-safe access to storage DB. -frame-support = { path = "../support", default-features = false} +frame-support = { path = "../support", default-features = false } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. -frame-system = { path = "../system", default-features = false} +frame-system = { path = "../system", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } [dev-dependencies] sp-std = { path = "../../primitives/std" } @@ -32,7 +35,7 @@ sp-io = { path = "../../primitives/io" } pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/assets/src/benchmarking.rs b/substrate/frame/assets/src/benchmarking.rs index c9b0825542de718ddf6393a38d6819f16f6f8178..8fe5a7e2493ae112b8f93d4cf961a8031a352846 100644 --- a/substrate/frame/assets/src/benchmarking.rs +++ b/substrate/frame/assets/src/benchmarking.rs @@ -54,7 +54,7 @@ fn create_default_asset, I: 'static>( (asset_id, caller, caller_lookup) } -fn create_default_minted_asset, I: 'static>( +pub fn create_default_minted_asset, I: 'static>( is_sufficient: bool, amount: T::Balance, ) -> (T::AssetIdParameter, T::AccountId, AccountIdLookupOf) { @@ -102,7 +102,7 @@ fn add_sufficients, I: 'static>(minter: T::AccountId, n: u32) { fn add_approvals, I: 'static>(minter: T::AccountId, n: u32) { let asset_id = default_asset_id::(); - T::Currency::deposit_creating( + let _ = T::Currency::deposit_creating( &minter, T::ApprovalDeposit::get() * n.into() + T::Currency::minimum_balance(), ); diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index 79e4fe3001872fe5c5471277c0888376d2f42e89..f3ae03d667b4ea22a963d31a2b1a1bb5a006da2d 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -141,7 +141,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "runtime-benchmarks")] -mod benchmarking; +pub mod benchmarking; pub mod migration; #[cfg(test)] pub mod mock; @@ -1648,8 +1648,20 @@ pub mod pallet { T::AssetAccountDeposit::get() } - fn touch(asset: T::AssetId, who: T::AccountId, depositor: T::AccountId) -> DispatchResult { - Self::do_touch(asset, who, depositor, false) + fn should_touch(asset: T::AssetId, who: &T::AccountId) -> bool { + match Asset::::get(&asset) { + Some(info) if info.is_sufficient => false, + Some(_) => !Account::::contains_key(asset, who), + _ => true, + } + } + + fn touch( + asset: T::AssetId, + who: &T::AccountId, + depositor: &T::AccountId, + ) -> DispatchResult { + Self::do_touch(asset, who.clone(), depositor.clone(), false) } } diff --git a/substrate/frame/assets/src/mock.rs b/substrate/frame/assets/src/mock.rs index 2c2203bcdada253106a2df6cbc361a513148f521..a4d85b64922f004d2d764319f97508813e76a6ae 100644 --- a/substrate/frame/assets/src/mock.rs +++ b/substrate/frame/assets/src/mock.rs @@ -22,7 +22,7 @@ use crate as pallet_assets; use codec::Encode; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, }; use sp_core::H256; @@ -46,6 +46,7 @@ construct_runtime!( type AccountId = u64; type AssetId = u32; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/assets/src/tests.rs b/substrate/frame/assets/src/tests.rs index f1b116a0f4a0d4ec612aed393290f5f18adb8ddd..e09648a51eccd09408b4549cc28315c8052d9ae5 100644 --- a/substrate/frame/assets/src/tests.rs +++ b/substrate/frame/assets/src/tests.rs @@ -28,6 +28,8 @@ use pallet_balances::Error as BalancesError; use sp_io::storage; use sp_runtime::{traits::ConvertInto, TokenError}; +mod sets; + fn asset_ids() -> Vec { let mut s: Vec<_> = Assets::asset_ids().collect(); s.sort(); diff --git a/substrate/frame/assets/src/tests/sets.rs b/substrate/frame/assets/src/tests/sets.rs new file mode 100644 index 0000000000000000000000000000000000000000..bdff5175185f940eaa9bd29b6195f31088925e18 --- /dev/null +++ b/substrate/frame/assets/src/tests/sets.rs @@ -0,0 +1,346 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for [`ItemOf`], [`fungible::UnionOf`] and [`fungibles::UnionOf`] set types. + +use super::*; +use frame_support::{ + parameter_types, + traits::{ + fungible, + fungible::ItemOf, + fungibles, + tokens::{ + fungibles::{ + Balanced as FungiblesBalanced, Create as FungiblesCreate, + Inspect as FungiblesInspect, Mutate as FungiblesMutate, + }, + Fortitude, Precision, Preservation, + }, + }, +}; +use sp_runtime::{traits::ConvertToValue, Either}; + +const FIRST_ASSET: u32 = 0; +const UNKNOWN_ASSET: u32 = 10; + +parameter_types! { + pub const LeftAsset: Either<(), u32> = Either::Left(()); + pub const RightAsset: Either = Either::Right(()); + pub const RightUnitAsset: Either<(), ()> = Either::Right(()); +} + +/// Implementation of the `fungible` traits through the [`ItemOf`] type, specifically for a +/// single asset class from [`T`] identified by [`FIRST_ASSET`]. +type FirstFungible = ItemOf, u64>; + +/// Implementation of the `fungible` traits through the [`ItemOf`] type, specifically for a +/// single asset class from [`T`] identified by [`UNKNOWN_ASSET`]. +type UnknownFungible = ItemOf, u64>; + +/// Implementation of `fungibles` traits using [`fungibles::UnionOf`] that exclusively utilizes +/// the [`FirstFungible`] from the left. +type LeftFungible = fungible::UnionOf, T, ConvertToValue, (), u64>; + +/// Implementation of `fungibles` traits using [`fungibles::UnionOf`] that exclusively utilizes +/// the [`LeftFungible`] from the right. +type RightFungible = + fungible::UnionOf, LeftFungible, ConvertToValue, (), u64>; + +/// Implementation of `fungibles` traits using [`fungibles::UnionOf`] that exclusively utilizes +/// the [`RightFungible`] from the left. +type LeftFungibles = fungibles::UnionOf, T, ConvertToValue, (), u64>; + +/// Implementation of `fungibles` traits using [`fungibles::UnionOf`] that exclusively utilizes +/// the [`LeftFungibles`] from the right. +/// +/// By using this type, we can navigate through each branch of [`fungible::UnionOf`], +/// [`fungibles::UnionOf`], and [`ItemOf`] to access the underlying `fungibles::*` +/// implementation provided by the pallet. +type First = fungibles::UnionOf, ConvertToValue, (), u64>; + +#[test] +fn deposit_from_set_types_works() { + new_test_ext().execute_with(|| { + let asset1 = 0; + let account1 = 1; + let account2 = 2; + + assert_ok!(>::create(asset1, account1, true, 1)); + assert_ok!(Assets::mint_into(asset1, &account1, 100)); + + assert_eq!(First::::total_issuance(()), 100); + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + + let imb = First::::deposit((), &account2, 50, Precision::Exact).unwrap(); + assert_eq!(First::::balance((), &account2), 50); + assert_eq!(First::::total_issuance(()), 100); + + assert_eq!(imb.peek(), 50); + + let (imb1, imb2) = imb.split(30); + assert_eq!(imb1.peek(), 30); + assert_eq!(imb2.peek(), 20); + + drop(imb2); + assert_eq!(First::::total_issuance(()), 120); + + assert!(First::::settle(&account1, imb1, Preservation::Preserve).is_ok()); + assert_eq!(First::::balance((), &account1), 70); + assert_eq!(First::::balance((), &account2), 50); + assert_eq!(First::::total_issuance(()), 120); + + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + }); +} + +#[test] +fn issue_from_set_types_works() { + new_test_ext().execute_with(|| { + let asset1: u32 = 0; + let account1: u64 = 1; + + assert_ok!(>::create(asset1, account1, true, 1)); + assert_ok!(Assets::mint_into(asset1, &account1, 100)); + + assert_eq!(First::::balance((), &account1), 100); + assert_eq!(First::::total_issuance(()), 100); + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + + let imb = First::::issue((), 100); + assert_eq!(First::::total_issuance(()), 200); + assert_eq!(imb.peek(), 100); + + let (imb1, imb2) = imb.split(30); + assert_eq!(imb1.peek(), 30); + assert_eq!(imb2.peek(), 70); + + drop(imb2); + assert_eq!(First::::total_issuance(()), 130); + + assert!(First::::resolve(&account1, imb1).is_ok()); + assert_eq!(First::::balance((), &account1), 130); + assert_eq!(First::::total_issuance(()), 130); + + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + }); +} + +#[test] +fn pair_from_set_types_works() { + new_test_ext().execute_with(|| { + let asset1: u32 = 0; + let account1: u64 = 1; + + assert_ok!(>::create(asset1, account1, true, 1)); + assert_ok!(Assets::mint_into(asset1, &account1, 100)); + + assert_eq!(First::::balance((), &account1), 100); + assert_eq!(First::::total_issuance(()), 100); + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + + let (debt, credit) = First::::pair((), 100); + assert_eq!(First::::total_issuance(()), 100); + assert_eq!(debt.peek(), 100); + assert_eq!(credit.peek(), 100); + + let (debt1, debt2) = debt.split(30); + assert_eq!(debt1.peek(), 30); + assert_eq!(debt2.peek(), 70); + + drop(debt2); + assert_eq!(First::::total_issuance(()), 170); + + assert!(First::::settle(&account1, debt1, Preservation::Preserve).is_ok()); + assert_eq!(First::::balance((), &account1), 70); + assert_eq!(First::::total_issuance(()), 170); + + let (credit1, credit2) = credit.split(40); + assert_eq!(credit1.peek(), 40); + assert_eq!(credit2.peek(), 60); + + drop(credit2); + assert_eq!(First::::total_issuance(()), 110); + + assert!(First::::resolve(&account1, credit1).is_ok()); + assert_eq!(First::::balance((), &account1), 110); + assert_eq!(First::::total_issuance(()), 110); + + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + }); +} + +#[test] +fn rescind_from_set_types_works() { + new_test_ext().execute_with(|| { + let asset1: u32 = 0; + let account1: u64 = 1; + + assert_ok!(>::create(asset1, account1, true, 1)); + assert_ok!(Assets::mint_into(asset1, &account1, 100)); + + assert_eq!(First::::total_issuance(()), 100); + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + + let imb = First::::rescind((), 20); + assert_eq!(First::::total_issuance(()), 80); + + assert_eq!(imb.peek(), 20); + + let (imb1, imb2) = imb.split(15); + assert_eq!(imb1.peek(), 15); + assert_eq!(imb2.peek(), 5); + + drop(imb2); + assert_eq!(First::::total_issuance(()), 85); + + assert!(First::::settle(&account1, imb1, Preservation::Preserve).is_ok()); + assert_eq!(First::::balance((), &account1), 85); + assert_eq!(First::::total_issuance(()), 85); + + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + }); +} + +#[test] +fn resolve_from_set_types_works() { + new_test_ext().execute_with(|| { + let asset1: u32 = 0; + let account1: u64 = 1; + let account2: u64 = 2; + let ed = 11; + + assert_ok!(>::create(asset1, account1, true, ed)); + assert_ok!(Assets::mint_into(asset1, &account1, 100)); + + assert_eq!(First::::balance((), &account1), 100); + assert_eq!(First::::total_issuance(()), 100); + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + + let imb = First::::issue((), 100); + assert_eq!(First::::total_issuance(()), 200); + assert_eq!(imb.peek(), 100); + + let (imb1, imb2) = imb.split(10); + assert_eq!(imb1.peek(), 10); + assert_eq!(imb2.peek(), 90); + assert_eq!(First::::total_issuance(()), 200); + + // ed requirements not met. + let imb1 = First::::resolve(&account2, imb1).unwrap_err(); + assert_eq!(imb1.peek(), 10); + drop(imb1); + assert_eq!(First::::total_issuance(()), 190); + assert_eq!(First::::balance((), &account2), 0); + + // resolve to new account `2`. + assert_ok!(First::::resolve(&account2, imb2)); + assert_eq!(First::::total_issuance(()), 190); + assert_eq!(First::::balance((), &account2), 90); + + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + }); +} + +#[test] +fn settle_from_set_types_works() { + new_test_ext().execute_with(|| { + let asset1: u32 = 0; + let account1: u64 = 1; + let account2: u64 = 2; + let ed = 11; + + assert_ok!(>::create(asset1, account1, true, ed)); + assert_ok!(Assets::mint_into(asset1, &account1, 100)); + assert_ok!(Assets::mint_into(asset1, &account2, 100)); + + assert_eq!(First::::balance((), &account2), 100); + assert_eq!(First::::total_issuance(()), 200); + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + + let imb = First::::rescind((), 100); + assert_eq!(First::::total_issuance(()), 100); + assert_eq!(imb.peek(), 100); + + let (imb1, imb2) = imb.split(10); + assert_eq!(imb1.peek(), 10); + assert_eq!(imb2.peek(), 90); + assert_eq!(First::::total_issuance(()), 100); + + // ed requirements not met. + let imb2 = First::::settle(&account2, imb2, Preservation::Preserve).unwrap_err(); + assert_eq!(imb2.peek(), 90); + drop(imb2); + assert_eq!(First::::total_issuance(()), 190); + assert_eq!(First::::balance((), &account2), 100); + + // settle to account `1`. + assert_ok!(First::::settle(&account2, imb1, Preservation::Preserve)); + assert_eq!(First::::total_issuance(()), 190); + assert_eq!(First::::balance((), &account2), 90); + + let imb = First::::rescind((), 85); + assert_eq!(First::::total_issuance(()), 105); + assert_eq!(imb.peek(), 85); + + // settle to account `1` and expect some dust. + let imb = First::::settle(&account2, imb, Preservation::Expendable).unwrap(); + assert_eq!(imb.peek(), 5); + assert_eq!(First::::total_issuance(()), 105); + assert_eq!(First::::balance((), &account2), 0); + + drop(imb); + assert_eq!(First::::total_issuance(()), 100); + + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + }); +} + +#[test] +fn withdraw_from_set_types_works() { + new_test_ext().execute_with(|| { + let asset1 = 0; + let account1 = 1; + let account2 = 2; + + assert_ok!(>::create(asset1, account1, true, 1)); + assert_ok!(Assets::mint_into(asset1, &account1, 100)); + assert_ok!(Assets::mint_into(asset1, &account2, 100)); + + assert_eq!(First::::total_issuance(()), 200); + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + + let imb = First::::withdraw( + (), + &account2, + 50, + Precision::Exact, + Preservation::Preserve, + Fortitude::Polite, + ) + .unwrap(); + assert_eq!(First::::balance((), &account2), 50); + assert_eq!(First::::total_issuance(()), 200); + + assert_eq!(imb.peek(), 50); + drop(imb); + assert_eq!(First::::total_issuance(()), 150); + assert_eq!(First::::balance((), &account2), 50); + + assert_eq!(First::::total_issuance(()), Assets::total_issuance(asset1)); + }); +} diff --git a/substrate/frame/atomic-swap/Cargo.toml b/substrate/frame/atomic-swap/Cargo.toml index 8315330d7fee7333512e8233b26a4f526e073d69..d34779d8bc09b23990ae1fff1f0606ca4ee2da3f 100644 --- a/substrate/frame/atomic-swap/Cargo.toml +++ b/substrate/frame/atomic-swap/Cargo.toml @@ -9,24 +9,27 @@ repository.workspace = true description = "FRAME atomic swap pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/atomic-swap/src/tests.rs b/substrate/frame/atomic-swap/src/tests.rs index 92eb9a04458575521ae25d2b76a1383f89c3a50a..7e2f22b18360ee83d776fc8b1bfd0cf473c5b1e2 100644 --- a/substrate/frame/atomic-swap/src/tests.rs +++ b/substrate/frame/atomic-swap/src/tests.rs @@ -20,7 +20,10 @@ use super::*; use crate as pallet_atomic_swap; -use frame_support::traits::{ConstU32, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU32, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -38,6 +41,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/aura/Cargo.toml b/substrate/frame/aura/Cargo.toml index bfe9193e9b528fa17da86cf85ab4e3c43a093621..e2419933a20ea3d2b41134764884128c04b33e14 100644 --- a/substrate/frame/aura/Cargo.toml +++ b/substrate/frame/aura/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME AURA consensus pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,20 +19,20 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-timestamp = { path = "../timestamp", default-features = false} -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false} -sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-timestamp = { path = "../timestamp", default-features = false } +sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false } +sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/aura/src/mock.rs b/substrate/frame/aura/src/mock.rs index 39b798c2f6841a98e34cc8dce3704be945ae54f7..14b87089ce391f35c530b2883d6006be64893365 100644 --- a/substrate/frame/aura/src/mock.rs +++ b/substrate/frame/aura/src/mock.rs @@ -21,7 +21,7 @@ use crate as pallet_aura; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, DisabledValidators}, }; use sp_consensus_aura::{ed25519::AuthorityId, AuthorityIndex}; @@ -41,6 +41,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/authority-discovery/Cargo.toml b/substrate/frame/authority-discovery/Cargo.toml index eb30ed3007c79e04be5e447e4458bdf867c1e671..a18199657443c2895a21f714fa0da856aaa3eb51 100644 --- a/substrate/frame/authority-discovery/Cargo.toml +++ b/substrate/frame/authority-discovery/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet for authority discovery" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,22 +20,22 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } pallet-session = { path = "../session", default-features = false, features = [ "historical", -]} -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false} -sp-authority-discovery = { path = "../../primitives/authority-discovery", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +] } +sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false } +sp-authority-discovery = { path = "../../primitives/authority-discovery", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/authority-discovery/src/lib.rs b/substrate/frame/authority-discovery/src/lib.rs index 87b743ae1967705f894dfbeacf840d1ac40c9122..3044b41e31de26baaa19c6596b7603755864dfdc 100644 --- a/substrate/frame/authority-discovery/src/lib.rs +++ b/substrate/frame/authority-discovery/src/lib.rs @@ -169,7 +169,7 @@ mod tests { use super::*; use crate as pallet_authority_discovery; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use sp_application_crypto::Pair; @@ -225,6 +225,7 @@ mod tests { pub const Offset: BlockNumber = 0; } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/authorship/Cargo.toml b/substrate/frame/authorship/Cargo.toml index bc1e6221a4589615e52be6fa63892b4841373d1f..41d4cf139721d7cd157cb88c9d2b5a34135898ea 100644 --- a/substrate/frame/authorship/Cargo.toml +++ b/substrate/frame/authorship/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository.workspace = true readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,17 +21,17 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } impl-trait-for-tuples = "0.2.2" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/babe/Cargo.toml b/substrate/frame/babe/Cargo.toml index 2dc414a784d308d03cdd28d86111754ef33748ab..639b9544b0c5fac66f53a6b0067f37535790bd5f 100644 --- a/substrate/frame/babe/Cargo.toml +++ b/substrate/frame/babe/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Consensus extension module for BABE consensus. Collects on-chain randomness from VRF outputs and manages epoch transitions." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,20 +19,20 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-authorship = { path = "../authorship", default-features = false} -pallet-session = { path = "../session", default-features = false} -pallet-timestamp = { path = "../timestamp", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-authorship = { path = "../authorship", default-features = false } +pallet-session = { path = "../session", default-features = false } +pallet-timestamp = { path = "../timestamp", default-features = false } sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features = false, features = ["serde"] } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false} +sp-session = { path = "../../primitives/session", default-features = false } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] frame-election-provider-support = { path = "../election-provider-support" } @@ -40,7 +43,7 @@ pallet-staking-reward-curve = { path = "../staking/reward-curve" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/babe/src/lib.rs b/substrate/frame/babe/src/lib.rs index 57e1dbb6b53c4a543e93f955d1d71bd04f144ae3..a6e44390dbc534e15e3a1658513c6ce9fa25088e 100644 --- a/substrate/frame/babe/src/lib.rs +++ b/substrate/frame/babe/src/lib.rs @@ -384,7 +384,11 @@ pub mod pallet { }); public - .make_bytes(RANDOMNESS_VRF_CONTEXT, &transcript, &signature.output) + .make_bytes( + RANDOMNESS_VRF_CONTEXT, + &transcript, + &signature.pre_output, + ) .ok() }); diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index 0003c6f9f11a1e3e1636cff00769cf114bad3d38..72abbc805db1a742b08aa3feb9b8f9b0b513600e 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -183,6 +183,7 @@ impl pallet_staking::Config for Test { type TargetList = pallet_staking::UseValidatorsMap; type NominationsQuota = FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<100>; type HistoryDepth = ConstU32<84>; type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index cb07ef94ff59699e6755811c4fd12c1d769a104f..198af21be81a50e83fe89bc22afbaa0722d83783 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME pallet bags list" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -50,7 +53,7 @@ frame-election-provider-support = { path = "../election-provider-support" } frame-benchmarking = { path = "../benchmarking" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/bags-list/fuzzer/Cargo.toml b/substrate/frame/bags-list/fuzzer/Cargo.toml index 9944c8865548fff5b3578823a4bfc174559a7b69..20760141b23612bee46b8a79ce40874dfdef8a7b 100644 --- a/substrate/frame/bags-list/fuzzer/Cargo.toml +++ b/substrate/frame/bags-list/fuzzer/Cargo.toml @@ -9,11 +9,14 @@ repository.workspace = true description = "Fuzzer for FRAME pallet bags list" publish = false +[lints] +workspace = true + [dependencies] honggfuzz = "0.5" -rand = { version = "0.8", features = ["std", "small_rng"] } -frame-election-provider-support = { path = "../../election-provider-support", features = ["fuzz"]} -pallet-bags-list = { path = "..", features = ["fuzz"]} +rand = { version = "0.8", features = ["small_rng", "std"] } +frame-election-provider-support = { path = "../../election-provider-support", features = ["fuzz"] } +pallet-bags-list = { path = "..", features = ["fuzz"] } [[bin]] name = "bags-list" diff --git a/substrate/frame/bags-list/remote-tests/Cargo.toml b/substrate/frame/bags-list/remote-tests/Cargo.toml index b7408e08d55ff08926bde2a37b286e92cee68204..fb61a9867783a32084dc363f6ac18c76ed4f3e10 100644 --- a/substrate/frame/bags-list/remote-tests/Cargo.toml +++ b/substrate/frame/bags-list/remote-tests/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet bags list remote test" publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -28,7 +31,7 @@ sp-runtime = { path = "../../../primitives/runtime" } sp-std = { path = "../../../primitives/std" } # utils -remote-externalities = { package = "frame-remote-externalities" , path = "../../../utils/frame/remote-externalities" } +remote-externalities = { package = "frame-remote-externalities", path = "../../../utils/frame/remote-externalities" } # others log = "0.4.17" diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index b91257df7b20d319bd0aa7d29fd6f2190ec144fe..23fe6e5832222b23b9c8b97fcab81688327e2221 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet to manage balances" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,11 +19,11 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-transaction-payment = { path = "../transaction-payment" } @@ -29,7 +32,7 @@ sp-io = { path = "../../primitives/io" } paste = "1.0.12" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/balances/src/impl_fungible.rs b/substrate/frame/balances/src/impl_fungible.rs index fc8c2d71f256eebd0d77b1a675f7677fc6326b43..6737727e0a2977c7a186f4cfd845d994e4e82bc2 100644 --- a/substrate/frame/balances/src/impl_fungible.rs +++ b/substrate/frame/balances/src/impl_fungible.rs @@ -17,10 +17,13 @@ //! Implementation of `fungible` traits for Balances pallet. use super::*; -use frame_support::traits::tokens::{ - Fortitude, - Preservation::{self, Preserve, Protect}, - Provenance::{self, Minted}, +use frame_support::traits::{ + tokens::{ + Fortitude, + Preservation::{self, Preserve, Protect}, + Provenance::{self, Minted}, + }, + AccountTouch, }; impl, I: 'static> fungible::Inspect for Pallet { @@ -356,3 +359,16 @@ impl, I: 'static> fungible::Balanced for Pallet } impl, I: 'static> fungible::BalancedHold for Pallet {} + +impl, I: 'static> AccountTouch<(), T::AccountId> for Pallet { + type Balance = T::Balance; + fn deposit_required(_: ()) -> Self::Balance { + Self::Balance::zero() + } + fn should_touch(_: (), _: &T::AccountId) -> bool { + false + } + fn touch(_: (), _: &T::AccountId, _: &T::AccountId) -> DispatchResult { + Ok(()) + } +} diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index d518f933df8dba7c0d8855be69052e6b98e8c46b..843bc351494e3fcad309e600c4571b6e307b8f53 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -49,8 +49,7 @@ //! - **Total Issuance:** The total number of units in existence in a system. //! //! - **Reaping an account:** The act of removing an account by resetting its nonce. Happens after -//! its -//! total balance has become zero (or, strictly speaking, less than the Existential Deposit). +//! its total balance has become zero (or, strictly speaking, less than the Existential Deposit). //! //! - **Free Balance:** The portion of a balance that is not reserved. The free balance is the only //! balance that matters for most operations. @@ -59,24 +58,23 @@ //! Reserved balance can still be slashed, but only after all the free balance has been slashed. //! //! - **Imbalance:** A condition when some funds were credited or debited without equal and opposite -//! accounting -//! (i.e. a difference between total issuance and account balances). Functions that result in an -//! imbalance will return an object of the `Imbalance` trait that can be managed within your runtime -//! logic. (If an imbalance is simply dropped, it should automatically maintain any book-keeping -//! such as total issuance.) +//! accounting (i.e. a difference between total issuance and account balances). Functions that +//! result in an imbalance will return an object of the `Imbalance` trait that can be managed within +//! your runtime logic. (If an imbalance is simply dropped, it should automatically maintain any +//! book-keeping such as total issuance.) //! //! - **Lock:** A freeze on a specified amount of an account's free balance until a specified block -//! number. Multiple -//! locks always operate over the same funds, so they "overlay" rather than "stack". +//! number. Multiple locks always operate over the same funds, so they "overlay" rather than +//! "stack". //! //! ### Implementations //! //! The Balances pallet provides implementations for the following traits. If these traits provide //! the functionality that you need, then you can avoid coupling with the Balances pallet. //! -//! - [`Currency`](frame_support::traits::Currency): Functions for dealing with a +//! - [`Currency`]: Functions for dealing with a //! fungible assets system. -//! - [`ReservableCurrency`](frame_support::traits::ReservableCurrency): +//! - [`ReservableCurrency`] //! - [`NamedReservableCurrency`](frame_support::traits::NamedReservableCurrency): //! Functions for dealing with assets that can be reserved from an account. //! - [`LockableCurrency`](frame_support::traits::LockableCurrency): Functions for @@ -105,7 +103,7 @@ //! ``` //! use frame_support::traits::Currency; //! # pub trait Config: frame_system::Config { -//! # type Currency: Currency; +//! # type Currency: Currency; //! # } //! //! pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -120,26 +118,26 @@ //! use frame_support::traits::{WithdrawReasons, LockableCurrency}; //! use sp_runtime::traits::Bounded; //! pub trait Config: frame_system::Config { -//! type Currency: LockableCurrency>; +//! type Currency: LockableCurrency>; //! } //! # struct StakingLedger { -//! # stash: ::AccountId, -//! # total: <::Currency as frame_support::traits::Currency<::AccountId>>::Balance, -//! # phantom: std::marker::PhantomData, +//! # stash: ::AccountId, +//! # total: <::Currency as frame_support::traits::Currency<::AccountId>>::Balance, +//! # phantom: std::marker::PhantomData, //! # } //! # const STAKING_ID: [u8; 8] = *b"staking "; //! //! fn update_ledger( -//! controller: &T::AccountId, -//! ledger: &StakingLedger +//! controller: &T::AccountId, +//! ledger: &StakingLedger //! ) { -//! T::Currency::set_lock( -//! STAKING_ID, -//! &ledger.stash, -//! ledger.total, -//! WithdrawReasons::all() -//! ); -//! // >::insert(controller, ledger); // Commented out as we don't have access to Staking's storage here. +//! T::Currency::set_lock( +//! STAKING_ID, +//! &ledger.stash, +//! ledger.total, +//! WithdrawReasons::all() +//! ); +//! // >::insert(controller, ledger); // Commented out as we don't have access to Staking's storage here. //! } //! # fn main() {} //! ``` diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs index dd3e5b7a85a2eeeb470822612dabdcb548888489..8e834483cbe61b55218f82307b8569f576deb2cf 100644 --- a/substrate/frame/balances/src/tests/mod.rs +++ b/substrate/frame/balances/src/tests/mod.rs @@ -22,7 +22,7 @@ use crate::{self as pallet_balances, AccountData, Config, CreditOf, Error, Pallet}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - assert_err, assert_noop, assert_ok, assert_storage_noop, + assert_err, assert_noop, assert_ok, assert_storage_noop, derive_impl, dispatch::{DispatchInfo, GetDispatchInfo}, parameter_types, traits::{ @@ -90,6 +90,8 @@ parameter_types! { ); pub static ExistentialDeposit: u64 = 1; } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index fe0321bea5167184eefc9c7e2c0265c3342d9d4d..7f647305456e29785b3aa1430e2a08d314f5bd03 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -8,32 +8,35 @@ description = "BEEFY + MMR runtime utilities" repository.workspace = true homepage = "https://substrate.io" +[lints] +workspace = true + [dependencies] array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } -binary-merkle-tree = { path = "../../utils/binary-merkle-tree", default-features = false} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-beefy = { path = "../beefy", default-features = false} -pallet-mmr = { path = "../merkle-mountain-range", default-features = false} -pallet-session = { path = "../session", default-features = false} -sp-consensus-beefy = { path = "../../primitives/consensus/beefy", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-api = { path = "../../primitives/api", default-features = false} -sp-state-machine = { path = "../../primitives/state-machine", default-features = false} +serde = { version = "1.0.193", optional = true } +binary-merkle-tree = { path = "../../utils/binary-merkle-tree", default-features = false } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-beefy = { path = "../beefy", default-features = false } +pallet-mmr = { path = "../merkle-mountain-range", default-features = false } +pallet-session = { path = "../session", default-features = false } +sp-consensus-beefy = { path = "../../primitives/consensus/beefy", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-api = { path = "../../primitives/api", default-features = false } +sp-state-machine = { path = "../../primitives/state-machine", default-features = false } [dev-dependencies] array-bytes = "6.1" sp-staking = { path = "../../primitives/staking" } [features] -default = [ "std" ] +default = ["std"] std = [ "array-bytes", "binary-merkle-tree/std", diff --git a/substrate/frame/beefy-mmr/src/lib.rs b/substrate/frame/beefy-mmr/src/lib.rs index a0bf7cdcf86a28a139693e596938169e95df029b..fa3caba7977d31767253763aac1f1bbba5c7d67e 100644 --- a/substrate/frame/beefy-mmr/src/lib.rs +++ b/substrate/frame/beefy-mmr/src/lib.rs @@ -36,6 +36,7 @@ use sp_runtime::traits::{Convert, Member}; use sp_std::prelude::*; +use codec::Decode; use pallet_mmr::{LeafDataProvider, ParentNumberAndHash}; use sp_consensus_beefy::{ mmr::{BeefyAuthoritySet, BeefyDataProvider, BeefyNextAuthoritySet, MmrLeaf, MmrLeafVersion}, @@ -226,7 +227,7 @@ sp_api::decl_runtime_apis! { /// API useful for BEEFY light clients. pub trait BeefyMmrApi where - BeefyAuthoritySet: sp_api::Decode, + BeefyAuthoritySet: Decode, { /// Return the currently active BEEFY authority set proof. fn authority_set_proof() -> BeefyAuthoritySet; diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index 1da09321342e261046b358aab323996f5bd9893f..185e548741421cdfacad7b55f9b70dbd71bb6c4f 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -8,20 +8,23 @@ repository.workspace = true description = "BEEFY FRAME pallet" homepage = "https://substrate.io" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -serde = { version = "1.0.188", optional = true } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-authorship = { path = "../authorship", default-features = false} -pallet-session = { path = "../session", default-features = false} +serde = { version = "1.0.193", optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-authorship = { path = "../authorship", default-features = false } +pallet-session = { path = "../session", default-features = false } sp-consensus-beefy = { path = "../../primitives/consensus/beefy", default-features = false, features = ["serde"] } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false} +sp-session = { path = "../../primitives/session", default-features = false } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] frame-election-provider-support = { path = "../election-provider-support" } @@ -33,10 +36,10 @@ pallet-timestamp = { path = "../timestamp" } sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } sp-staking = { path = "../../primitives/staking" } -sp-state-machine = { path = "../../primitives/state-machine", default-features = false} +sp-state-machine = { path = "../../primitives/state-machine", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-election-provider-support/std", diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 53d523cf724d92e2cb4b307fa485f1b02a8ebc31..8828fa3621853a8f3b5a2b0828fc386859ac45fc 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -37,10 +37,7 @@ use sp_state_machine::BasicExternalities; use crate as pallet_beefy; -pub use sp_consensus_beefy::{ - ecdsa_crypto::{AuthorityId as BeefyId, AuthoritySignature as BeefySignature}, - ConsensusLog, EquivocationProof, BEEFY_ENGINE_ID, -}; +pub use sp_consensus_beefy::{ecdsa_crypto::AuthorityId as BeefyId, ConsensusLog, BEEFY_ENGINE_ID}; impl_opaque_keys! { pub struct MockSessionKeys { @@ -201,6 +198,7 @@ impl pallet_staking::Config for Test { type TargetList = pallet_staking::UseValidatorsMap; type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<100>; type HistoryDepth = ConstU32<84>; type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index 79f35f6262591513ec8c0bf46b36280f03caa255..980f70a57746f0eb2bdd3b6d2b430d5a0fff7d81 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Macro for benchmarking a FRAME runtime." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,18 +21,18 @@ linregress = { version = "0.5.1", optional = true } log = { version = "0.4.17", default-features = false } paste = "1.0" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } -frame-support = { path = "../support", default-features = false} -frame-support-procedural = { path = "../support/procedural", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-api = { path = "../../primitives/api", default-features = false} -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-storage = { path = "../../primitives/storage", default-features = false} +serde = { version = "1.0.193", optional = true } +frame-support = { path = "../support", default-features = false } +frame-support-procedural = { path = "../support/procedural", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-api = { path = "../../primitives/api", default-features = false } +sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-storage = { path = "../../primitives/storage", default-features = false } static_assertions = "1.1.0" [dev-dependencies] @@ -38,7 +41,7 @@ rusty-fork = { version = "0.3.0", default-features = false } sp-keystore = { path = "../../primitives/keystore" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support-procedural/std", diff --git a/substrate/frame/benchmarking/pov/Cargo.toml b/substrate/frame/benchmarking/pov/Cargo.toml index 0d935063e9e466aa6299cb1c9122b6801805ca7f..7c36b2f8eec3e0bf793afea0a94e8b0e1b515b3d 100644 --- a/substrate/frame/benchmarking/pov/Cargo.toml +++ b/substrate/frame/benchmarking/pov/Cargo.toml @@ -8,21 +8,24 @@ homepage = "https://substrate.io" repository.workspace = true description = "Pallet for testing FRAME PoV benchmarking" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "..", default-features = false} -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-benchmarking = { path = "..", default-features = false } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/benchmarking/pov/src/benchmarking.rs b/substrate/frame/benchmarking/pov/src/benchmarking.rs index 473947b171ac56b9501d2edba24c0108d91d7cc3..a24b772ade017cb9d8185efc46173d94e7ae7ac0 100644 --- a/substrate/frame/benchmarking/pov/src/benchmarking.rs +++ b/substrate/frame/benchmarking/pov/src/benchmarking.rs @@ -339,6 +339,7 @@ frame_benchmarking::benchmarks! { #[cfg(test)] mod mock { + use frame_support::derive_impl; use sp_runtime::{testing::H256, BuildStorage}; type AccountId = u64; @@ -354,6 +355,7 @@ mod mock { } ); + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/benchmarking/pov/src/tests.rs b/substrate/frame/benchmarking/pov/src/tests.rs index f09e37a5288a99da74076ee3183fe049d29690c4..dda29c071dec7d42bd0f6c74f18ab1eca9e8822e 100644 --- a/substrate/frame/benchmarking/pov/src/tests.rs +++ b/substrate/frame/benchmarking/pov/src/tests.rs @@ -162,6 +162,7 @@ fn noop_is_free() { } mod mock { + use frame_support::derive_impl; use sp_runtime::testing::H256; type Block = frame_system::mocking::MockBlock; @@ -174,6 +175,7 @@ mod mock { } ); + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/benchmarking/src/baseline.rs b/substrate/frame/benchmarking/src/baseline.rs index 6cd23ebe028a348fc7733c57e8490c5e448797e6..6451284e012b47c686113fc4931ac07b5c1d5055 100644 --- a/substrate/frame/benchmarking/src/baseline.rs +++ b/substrate/frame/benchmarking/src/baseline.rs @@ -110,6 +110,7 @@ benchmarks! { #[cfg(test)] pub mod mock { + use frame_support::derive_impl; use sp_runtime::{testing::H256, BuildStorage}; type AccountId = u64; @@ -124,6 +125,7 @@ pub mod mock { } ); + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/benchmarking/src/tests.rs b/substrate/frame/benchmarking/src/tests.rs index e5bacbdb2361ae223cb0545639026394be1c8d88..7d6cfaad609c7dac2e68d37df38f45bae1870e06 100644 --- a/substrate/frame/benchmarking/src/tests.rs +++ b/substrate/frame/benchmarking/src/tests.rs @@ -19,7 +19,7 @@ #![cfg(test)] -use frame_support::{parameter_types, traits::ConstU32}; +use frame_support::{derive_impl, parameter_types, traits::ConstU32}; use sp_runtime::{ testing::H256, traits::{BlakeTwo256, IdentityLookup}, @@ -75,6 +75,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/benchmarking/src/tests_instance.rs b/substrate/frame/benchmarking/src/tests_instance.rs index f2c721c8114c469d8fd8df0e9afac527f62e6984..550108408968c9ccdd758279fe9bef82f468bdac 100644 --- a/substrate/frame/benchmarking/src/tests_instance.rs +++ b/substrate/frame/benchmarking/src/tests_instance.rs @@ -19,7 +19,7 @@ #![cfg(test)] -use frame_support::traits::ConstU32; +use frame_support::{derive_impl, traits::ConstU32}; use sp_runtime::{ testing::H256, traits::{BlakeTwo256, IdentityLookup}, @@ -85,6 +85,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/bounties/Cargo.toml b/substrate/frame/bounties/Cargo.toml index 7da21140542a8aa760f0d533609c97df07d01764..16da862d48847ef6b04c7c669c49cacb972f5060 100644 --- a/substrate/frame/bounties/Cargo.toml +++ b/substrate/frame/bounties/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet to manage bounties" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,20 +21,20 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-treasury = { path = "../treasury", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-treasury = { path = "../treasury", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/bounties/src/benchmarking.rs b/substrate/frame/bounties/src/benchmarking.rs index 6fff337cba45040b01d586c09e4c96d63ab93500..3558847c8fedd263e6c57db24195a898a3d0dd6d 100644 --- a/substrate/frame/bounties/src/benchmarking.rs +++ b/substrate/frame/bounties/src/benchmarking.rs @@ -222,7 +222,7 @@ benchmarks_instance_pallet! { ); } verify { - ensure!(missed_any == false, "Missed some"); + ensure!(!missed_any, "Missed some"); if b > 0 { ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); assert_last_event::(Event::BountyBecameActive { index: b - 1 }.into()) diff --git a/substrate/frame/bounties/src/migrations/v4.rs b/substrate/frame/bounties/src/migrations/v4.rs index 936bac117008968cba3aef3e70e303056c8646df..4e6ba934481628f8aeca3647711cec8a7ab0fd45 100644 --- a/substrate/frame/bounties/src/migrations/v4.rs +++ b/substrate/frame/bounties/src/migrations/v4.rs @@ -110,7 +110,7 @@ pub fn migrate< } /// Some checks prior to migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::pre_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn pre_migration>( @@ -164,7 +164,7 @@ pub fn pre_migration>( diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs index 233e41b474c7497e17ee78e9f5d64b1bbb254a0e..22e608cce6373ade108bcf113b81b2118880d235 100644 --- a/substrate/frame/bounties/src/tests.rs +++ b/substrate/frame/bounties/src/tests.rs @@ -23,7 +23,7 @@ use super::*; use crate as pallet_bounties; use frame_support::{ - assert_noop, assert_ok, parameter_types, + assert_noop, assert_ok, derive_impl, parameter_types, traits::{ tokens::{PayFromAccount, UnityAssetBalanceConversion}, ConstU32, ConstU64, OnInitialize, @@ -59,6 +59,7 @@ parameter_types! { type Balance = u64; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index 142d0a0e35e0403e1470b320c9ccc1bff22d282c..77757c30463619750fca0f28d0c6229367dd891d 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -8,26 +8,29 @@ edition.workspace = true license = "Apache-2.0" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive"] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } bitvec = { version = "1.0.0", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } [dev-dependencies] sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "bitvec/std", diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index d22f3936c3e2435665d869cd32edc0e042cc5f55..c57c4ccb8ce4ae4d37f9e41a81cfe09f6365a349 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -31,7 +31,7 @@ use frame_support::{ use frame_system::{Pallet as System, RawOrigin}; use sp_arithmetic::{traits::Zero, Perbill}; use sp_core::Get; -use sp_runtime::Saturating; +use sp_runtime::{traits::BlockNumberProvider, Saturating}; use sp_std::{vec, vec::Vec}; const SEED: u32 = 0; @@ -82,6 +82,10 @@ fn setup_leases(n: u32, task: u32, until: u32) { fn advance_to(b: u32) { while System::::block_number() < b.into() { System::::set_block_number(System::::block_number().saturating_add(1u32.into())); + + let block_number: u32 = System::::block_number().try_into().ok().unwrap(); + + RCBlockNumberProviderOf::::set_block_number(block_number.into()); Broker::::on_initialize(System::::block_number()); } } @@ -182,7 +186,8 @@ mod benches { #[benchmark] fn start_sales(n: Linear<0, { MAX_CORE_COUNT.into() }>) -> Result<(), BenchmarkError> { - Configuration::::put(new_config_record::()); + let config = new_config_record::(); + Configuration::::put(config.clone()); // Assume Reservations to be filled for worst case setup_reservations::(T::MaxReservedCores::get()); @@ -190,6 +195,8 @@ mod benches { // Assume Leases to be filled for worst case setup_leases::(T::MaxLeasedCores::get(), 1, 10); + let latest_region_begin = Broker::::latest_timeslice_ready_to_commit(&config); + let initial_price = 10u32.into(); let origin = @@ -205,8 +212,8 @@ mod benches { leadin_length: 1u32.into(), start_price: 20u32.into(), regular_price: 10u32.into(), - region_begin: 4, - region_end: 7, + region_begin: latest_region_begin + config.region_length, + region_end: latest_region_begin + config.region_length * 2, ideal_cores_sold: 0, cores_offered: n .saturating_sub(T::MaxReservedCores::get()) @@ -239,7 +246,11 @@ mod benches { assert_last_event::( Event::Purchased { who: caller, - region_id: RegionId { begin: 4, core, mask: CoreMask::complete() }, + region_id: RegionId { + begin: SaleInfo::::get().unwrap().region_begin, + core, + mask: CoreMask::complete(), + }, price: 10u32.into(), duration: 3u32.into(), } @@ -252,6 +263,7 @@ mod benches { #[benchmark] fn renew() -> Result<(), BenchmarkError> { setup_and_start_sale::()?; + let region_len = Configuration::::get().unwrap().region_length; advance_to::(2); @@ -267,12 +279,12 @@ mod benches { Broker::::do_assign(region, None, 1001, Final) .map_err(|_| BenchmarkError::Weightless)?; - advance_to::(6); + advance_to::((T::TimeslicePeriod::get() * region_len.into()).try_into().ok().unwrap()); #[extrinsic_call] _(RawOrigin::Signed(caller), region.core); - let id = AllowedRenewalId { core: region.core, when: 10 }; + let id = AllowedRenewalId { core: region.core, when: region.begin + region_len * 2 }; assert!(AllowedRenewals::::get(id).is_some()); Ok(()) @@ -331,10 +343,10 @@ mod benches { assert_last_event::( Event::Partitioned { - old_region_id: RegionId { begin: 4, core, mask: CoreMask::complete() }, + old_region_id: RegionId { begin: region.begin, core, mask: CoreMask::complete() }, new_region_ids: ( - RegionId { begin: 4, core, mask: CoreMask::complete() }, - RegionId { begin: 6, core, mask: CoreMask::complete() }, + RegionId { begin: region.begin, core, mask: CoreMask::complete() }, + RegionId { begin: region.begin + 2, core, mask: CoreMask::complete() }, ), } .into(), @@ -363,11 +375,11 @@ mod benches { assert_last_event::( Event::Interlaced { - old_region_id: RegionId { begin: 4, core, mask: CoreMask::complete() }, + old_region_id: RegionId { begin: region.begin, core, mask: CoreMask::complete() }, new_region_ids: ( - RegionId { begin: 4, core, mask: 0x00000_fffff_fffff_00000.into() }, + RegionId { begin: region.begin, core, mask: 0x00000_fffff_fffff_00000.into() }, RegionId { - begin: 4, + begin: region.begin, core, mask: CoreMask::complete() ^ 0x00000_fffff_fffff_00000.into(), }, @@ -404,7 +416,7 @@ mod benches { assert_last_event::( Event::Assigned { - region_id: RegionId { begin: 4, core, mask: CoreMask::complete() }, + region_id: RegionId { begin: region.begin, core, mask: CoreMask::complete() }, task: 1000, duration: 3u32.into(), } @@ -439,7 +451,7 @@ mod benches { assert_last_event::( Event::Pooled { - region_id: RegionId { begin: 4, core, mask: CoreMask::complete() }, + region_id: RegionId { begin: region.begin, core, mask: CoreMask::complete() }, duration: 3u32.into(), } .into(), @@ -494,7 +506,11 @@ mod benches { who: recipient, amount: 200u32.into(), next: if m < new_config_record::().region_length { - Some(RegionId { begin: 4.saturating_add(m), core, mask: CoreMask::complete() }) + Some(RegionId { + begin: region.begin.saturating_add(m), + core, + mask: CoreMask::complete(), + }) } else { None }, @@ -541,6 +557,7 @@ mod benches { #[benchmark] fn drop_region() -> Result<(), BenchmarkError> { let core = setup_and_start_sale::()?; + let region_len = Configuration::::get().unwrap().region_length; advance_to::(2); @@ -553,14 +570,16 @@ mod benches { let region = Broker::::do_purchase(caller.clone(), 10u32.into()) .map_err(|_| BenchmarkError::Weightless)?; - advance_to::(12); + advance_to::( + (T::TimeslicePeriod::get() * (region_len * 4).into()).try_into().ok().unwrap(), + ); #[extrinsic_call] _(RawOrigin::Signed(caller), region); assert_last_event::( Event::RegionDropped { - region_id: RegionId { begin: 4, core, mask: CoreMask::complete() }, + region_id: RegionId { begin: region.begin, core, mask: CoreMask::complete() }, duration: 3u32.into(), } .into(), @@ -572,6 +591,7 @@ mod benches { #[benchmark] fn drop_contribution() -> Result<(), BenchmarkError> { let core = setup_and_start_sale::()?; + let region_len = Configuration::::get().unwrap().region_length; advance_to::(2); @@ -589,14 +609,16 @@ mod benches { Broker::::do_pool(region, None, recipient, Final) .map_err(|_| BenchmarkError::Weightless)?; - advance_to::(26); + advance_to::( + (T::TimeslicePeriod::get() * (region_len * 8).into()).try_into().ok().unwrap(), + ); #[extrinsic_call] _(RawOrigin::Signed(caller), region); assert_last_event::( Event::ContributionDropped { - region_id: RegionId { begin: 4, core, mask: CoreMask::complete() }, + region_id: RegionId { begin: region.begin, core, mask: CoreMask::complete() }, } .into(), ); @@ -609,8 +631,11 @@ mod benches { setup_and_start_sale::()?; let when = 5u32.into(); let revenue = 10u32.into(); + let region_len = Configuration::::get().unwrap().region_length; - advance_to::(25); + advance_to::( + (T::TimeslicePeriod::get() * (region_len * 8).into()).try_into().ok().unwrap(), + ); let caller: T::AccountId = whitelisted_caller(); InstaPoolHistory::::insert( @@ -635,8 +660,11 @@ mod benches { fn drop_renewal() -> Result<(), BenchmarkError> { let core = setup_and_start_sale::()?; let when = 5u32.into(); + let region_len = Configuration::::get().unwrap().region_length; - advance_to::(10); + advance_to::( + (T::TimeslicePeriod::get() * (region_len * 3).into()).try_into().ok().unwrap(), + ); let id = AllowedRenewalId { core, when }; let record = AllowedRenewalRecord { @@ -677,7 +705,7 @@ mod benches { let core_count = n.try_into().unwrap(); - ::ensure_notify_core_count(core_count); + CoreCountInbox::::put(core_count); let mut status = Status::::get().ok_or(BenchmarkError::Weightless)?; @@ -704,10 +732,16 @@ mod benches { ); T::Currency::set_balance(&Broker::::account_id(), T::Currency::minimum_balance()); - ::ensure_notify_revenue_info(10u32.into(), 10u32.into()); + let timeslice_period: u32 = T::TimeslicePeriod::get().try_into().ok().unwrap(); + let multiplicator = 5; + ::ensure_notify_revenue_info( + (timeslice_period * multiplicator).into(), + 10u32.into(), + ); + let timeslice = multiplicator - 1; InstaPoolHistory::::insert( - 4u32, + timeslice, InstaPoolHistoryRecord { private_contributions: 1u32.into(), system_contributions: 9u32.into(), @@ -722,7 +756,7 @@ mod benches { assert_last_event::( Event::ClaimsReady { - when: 4u32.into(), + when: timeslice.into(), system_payout: 9u32.into(), private_payout: 1u32.into(), } @@ -769,7 +803,7 @@ mod benches { #[block] { - Broker::::rotate_sale(sale, &config, &status); + Broker::::rotate_sale(sale.clone(), &config, &status); } assert!(SaleInfo::::get().is_some()); @@ -779,8 +813,8 @@ mod benches { leadin_length: 1u32.into(), start_price: 20u32.into(), regular_price: 10u32.into(), - region_begin: 4, - region_end: 7, + region_begin: sale.region_begin + config.region_length, + region_end: sale.region_end + config.region_length, ideal_cores_sold: 0, cores_offered: n .saturating_sub(T::MaxReservedCores::get()) diff --git a/substrate/frame/broker/src/coretime_interface.rs b/substrate/frame/broker/src/coretime_interface.rs index fec40b9fdd7b3d0e18216a2f634040b7bffc9279..9e853e8f3fe0b1e2f76e3fac57b9ce0b8d03ab07 100644 --- a/substrate/frame/broker/src/coretime_interface.rs +++ b/substrate/frame/broker/src/coretime_interface.rs @@ -22,7 +22,8 @@ use frame_support::Parameter; use scale_info::TypeInfo; use sp_arithmetic::traits::AtLeast32BitUnsigned; use sp_core::RuntimeDebug; -use sp_std::{fmt::Debug, vec::Vec}; +use sp_runtime::traits::BlockNumberProvider; +use sp_std::vec::Vec; /// Index of a Polkadot Core. pub type CoreIndex = u16; @@ -46,6 +47,12 @@ pub enum CoreAssignment { Task(TaskId), } +/// Relay chain block number of `T` that implements [`CoretimeInterface`]. +pub type RCBlockNumberOf = as BlockNumberProvider>::BlockNumber; + +/// Relay chain block number provider of `T` that implements [`CoretimeInterface`]. +pub type RCBlockNumberProviderOf = ::RealyChainBlockNumberProvider; + /// Type able to accept Coretime scheduling instructions and provide certain usage information. /// Generally implemented by the Relay-chain or some means of communicating with it. /// @@ -57,17 +64,8 @@ pub trait CoretimeInterface { /// A (Relay-chain-side) balance. type Balance: AtLeast32BitUnsigned; - /// A (Relay-chain-side) block number. - type BlockNumber: AtLeast32BitUnsigned - + Copy - + TypeInfo - + Encode - + Decode - + MaxEncodedLen - + Debug; - - /// Return the latest block number on the Relay-chain. - fn latest() -> Self::BlockNumber; + /// A provider for the relay chain block number. + type RealyChainBlockNumberProvider: BlockNumberProvider; /// Requests the Relay-chain to alter the number of schedulable cores to `count`. Under normal /// operation, the Relay-chain SHOULD send a `notify_core_count(count)` message back. @@ -81,7 +79,7 @@ pub trait CoretimeInterface { /// should be understood on a channel outside of this proposal. In the case that the request /// cannot be serviced because `when` is too old a block then a `notify_revenue` message must /// still be returned, but its `revenue` field may be `None`. - fn request_revenue_info_at(when: Self::BlockNumber); + fn request_revenue_info_at(when: RCBlockNumberOf); /// Instructs the Relay-chain to add the `amount` of DOT to the Instantaneous Coretime Market /// Credit account of `who`. @@ -104,16 +102,11 @@ pub trait CoretimeInterface { /// remain unchanged regardless of the `end_hint` value. fn assign_core( core: CoreIndex, - begin: Self::BlockNumber, + begin: RCBlockNumberOf, assignment: Vec<(CoreAssignment, PartsOf57600)>, - end_hint: Option, + end_hint: Option>, ); - /// Indicate that from this block onwards, the range of acceptable values of the `core` - /// parameter of `assign_core` message is `[0, count)`. `assign_core` will be a no-op if - /// provided with a value for `core` outside of this range. - fn check_notify_core_count() -> Option; - /// Provide the amount of revenue accumulated from Instantaneous Coretime Sales from Relay-chain /// block number `last_until` to `until`, not including `until` itself. `last_until` is defined /// as being the `until` argument of the last `notify_revenue` message sent, or zero for the @@ -123,46 +116,33 @@ pub trait CoretimeInterface { /// This explicitly disregards the possibility of multiple parachains requesting and being /// notified of revenue information. The Relay-chain must be configured to ensure that only a /// single revenue information destination exists. - fn check_notify_revenue_info() -> Option<(Self::BlockNumber, Self::Balance)>; - - /// Ensure that core count is updated to the provided value. - /// - /// This is only used for benchmarking. - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_core_count(count: u16); + fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)>; /// Ensure that revenue information is updated to the provided value. /// /// This is only used for benchmarking. #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(when: Self::BlockNumber, revenue: Self::Balance); + fn ensure_notify_revenue_info(when: RCBlockNumberOf, revenue: Self::Balance); } impl CoretimeInterface for () { type AccountId = (); type Balance = u64; - type BlockNumber = u32; - fn latest() -> Self::BlockNumber { - 0 - } + type RealyChainBlockNumberProvider = (); + fn request_core_count(_count: CoreIndex) {} - fn request_revenue_info_at(_when: Self::BlockNumber) {} + fn request_revenue_info_at(_when: RCBlockNumberOf) {} fn credit_account(_who: Self::AccountId, _amount: Self::Balance) {} fn assign_core( _core: CoreIndex, - _begin: Self::BlockNumber, + _begin: RCBlockNumberOf, _assignment: Vec<(CoreAssignment, PartsOf57600)>, - _end_hint: Option, + _end_hint: Option>, ) { } - fn check_notify_core_count() -> Option { + fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)> { None } - fn check_notify_revenue_info() -> Option<(Self::BlockNumber, Self::Balance)> { - None - } - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_core_count(_count: u16) {} #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(_when: Self::BlockNumber, _revenue: Self::Balance) {} + fn ensure_notify_revenue_info(_when: RCBlockNumberOf, _revenue: Self::Balance) {} } diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index 0b08a7b665b75ac1c860774762878012bf82a749..b04e15b169bc59b84fdf416d5bffdc6357b7afb9 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -37,6 +37,11 @@ impl Pallet { Ok(()) } + pub(crate) fn do_notify_core_count(core_count: CoreIndex) -> DispatchResult { + CoreCountInbox::::put(core_count); + Ok(()) + } + pub(crate) fn do_reserve(workload: Schedule) -> DispatchResult { let mut r = Reservations::::get(); let index = r.len() as u32; diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index 4abd041f5f397c977ca06b8dbd6c294155b09b4f..38a5049005468cbcb6230158fad273241359bdbc 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -42,9 +42,7 @@ pub use weights::WeightInfo; pub use adapt_price::*; pub use core_mask::*; pub use coretime_interface::*; -pub use nonfungible_impl::*; pub use types::*; -pub use utility_impls::*; #[frame_support::pallet] pub mod pallet { @@ -161,6 +159,10 @@ pub mod pallet { pub type InstaPoolHistory = StorageMap<_, Blake2_128Concat, Timeslice, InstaPoolHistoryRecordOf>; + /// Received core count change from the relay chain. + #[pallet::storage] + pub type CoreCountInbox = StorageValue<_, CoreIndex, OptionQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -716,55 +718,51 @@ pub mod pallet { /// Drop an expired Region from the chain. /// - /// - `origin`: Must be a Signed origin. + /// - `origin`: Can be any kind of origin. /// - `region_id`: The Region which has expired. #[pallet::call_index(14)] pub fn drop_region( - origin: OriginFor, + _origin: OriginFor, region_id: RegionId, ) -> DispatchResultWithPostInfo { - let _ = ensure_signed(origin)?; Self::do_drop_region(region_id)?; Ok(Pays::No.into()) } /// Drop an expired Instantaneous Pool Contribution record from the chain. /// - /// - `origin`: Must be a Signed origin. + /// - `origin`: Can be any kind of origin. /// - `region_id`: The Region identifying the Pool Contribution which has expired. #[pallet::call_index(15)] pub fn drop_contribution( - origin: OriginFor, + _origin: OriginFor, region_id: RegionId, ) -> DispatchResultWithPostInfo { - let _ = ensure_signed(origin)?; Self::do_drop_contribution(region_id)?; Ok(Pays::No.into()) } /// Drop an expired Instantaneous Pool History record from the chain. /// - /// - `origin`: Must be a Signed origin. + /// - `origin`: Can be any kind of origin. /// - `region_id`: The time of the Pool History record which has expired. #[pallet::call_index(16)] - pub fn drop_history(origin: OriginFor, when: Timeslice) -> DispatchResultWithPostInfo { - let _ = ensure_signed(origin)?; + pub fn drop_history(_origin: OriginFor, when: Timeslice) -> DispatchResultWithPostInfo { Self::do_drop_history(when)?; Ok(Pays::No.into()) } /// Drop an expired Allowed Renewal record from the chain. /// - /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`. + /// - `origin`: Can be any kind of origin. /// - `core`: The core to which the expired renewal refers. /// - `when`: The timeslice to which the expired renewal refers. This must have passed. #[pallet::call_index(17)] pub fn drop_renewal( - origin: OriginFor, + _origin: OriginFor, core: CoreIndex, when: Timeslice, ) -> DispatchResultWithPostInfo { - let _ = ensure_signed(origin)?; Self::do_drop_renewal(core, when)?; Ok(Pays::No.into()) } @@ -780,5 +778,13 @@ pub mod pallet { Self::do_request_core_count(core_count)?; Ok(()) } + + #[pallet::call_index(19)] + #[pallet::weight(T::WeightInfo::notify_core_count())] + pub fn notify_core_count(origin: OriginFor, core_count: CoreIndex) -> DispatchResult { + T::AdminOrigin::ensure_origin_or_root(origin)?; + Self::do_notify_core_count(core_count)?; + Ok(()) + } } } diff --git a/substrate/frame/broker/src/mock.rs b/substrate/frame/broker/src/mock.rs index cab6b7389c06494a0bcddb4d8d02f8f734f67b8c..19c72340353c66d9bedf5d016e02dad7102116cb 100644 --- a/substrate/frame/broker/src/mock.rs +++ b/substrate/frame/broker/src/mock.rs @@ -19,7 +19,7 @@ use crate::{test_fungibles::TestFungibles, *}; use frame_support::{ - assert_ok, ensure, ord_parameter_types, parameter_types, + assert_ok, derive_impl, ensure, ord_parameter_types, parameter_types, traits::{ fungible::{Balanced, Credit, Inspect, ItemOf, Mutate}, nonfungible::Inspect as NftInspect, @@ -29,9 +29,9 @@ use frame_support::{ }; use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_arithmetic::Perbill; -use sp_core::{ConstU16, ConstU32, ConstU64, H256}; +use sp_core::{ConstU32, ConstU64}; use sp_runtime::{ - traits::{BlakeTwo256, Identity, IdentityLookup}, + traits::{BlockNumberProvider, Identity}, BuildStorage, Saturating, }; use sp_std::collections::btree_map::BTreeMap; @@ -47,30 +47,9 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } #[derive(Debug, Clone, Eq, PartialEq)] @@ -91,7 +70,6 @@ parameter_types! { pub static CoretimeWorkplan: BTreeMap<(u32, CoreIndex), Vec<(CoreAssignment, PartsOf57600)>> = Default::default(); pub static CoretimeUsage: BTreeMap> = Default::default(); pub static CoretimeInPool: CoreMaskBitCount = 0; - pub static NotifyCoreCount: Vec = Default::default(); pub static NotifyRevenueInfo: Vec<(u32, u64)> = Default::default(); } @@ -99,18 +77,20 @@ pub struct TestCoretimeProvider; impl CoretimeInterface for TestCoretimeProvider { type AccountId = u64; type Balance = u64; - type BlockNumber = u32; - fn latest() -> Self::BlockNumber { - System::block_number() as u32 - } + type RealyChainBlockNumberProvider = System; fn request_core_count(count: CoreIndex) { - NotifyCoreCount::mutate(|s| s.insert(0, count)); + CoreCountInbox::::put(count); } - fn request_revenue_info_at(when: Self::BlockNumber) { - if when > Self::latest() { - panic!("Asking for revenue info in the future {:?} {:?}", when, Self::latest()); + fn request_revenue_info_at(when: RCBlockNumberOf) { + if when > RCBlockNumberProviderOf::::current_block_number() { + panic!( + "Asking for revenue info in the future {:?} {:?}", + when, + RCBlockNumberProviderOf::::current_block_number() + ); } + let when = when as u32; let mut total = 0; CoretimeSpending::mutate(|s| { s.retain(|(n, a)| { @@ -129,27 +109,28 @@ impl CoretimeInterface for TestCoretimeProvider { } fn assign_core( core: CoreIndex, - begin: Self::BlockNumber, + begin: RCBlockNumberOf, assignment: Vec<(CoreAssignment, PartsOf57600)>, - end_hint: Option, + end_hint: Option>, ) { - CoretimeWorkplan::mutate(|p| p.insert((begin, core), assignment.clone())); - let item = (Self::latest(), AssignCore { core, begin, assignment, end_hint }); + CoretimeWorkplan::mutate(|p| p.insert((begin as u32, core), assignment.clone())); + let item = ( + RCBlockNumberProviderOf::::current_block_number() as u32, + AssignCore { + core, + begin: begin as u32, + assignment, + end_hint: end_hint.map(|v| v as u32), + }, + ); CoretimeTrace::mutate(|v| v.push(item)); } - fn check_notify_core_count() -> Option { - NotifyCoreCount::mutate(|s| s.pop()) - } - fn check_notify_revenue_info() -> Option<(Self::BlockNumber, Self::Balance)> { - NotifyRevenueInfo::mutate(|s| s.pop()) + fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)> { + NotifyRevenueInfo::mutate(|s| s.pop()).map(|v| (v.0 as _, v.1)) } #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_core_count(count: u16) { - NotifyCoreCount::mutate(|s| s.insert(0, count)); - } - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(when: Self::BlockNumber, revenue: Self::Balance) { - NotifyRevenueInfo::mutate(|s| s.push((when, revenue))); + fn ensure_notify_revenue_info(when: RCBlockNumberOf, revenue: Self::Balance) { + NotifyRevenueInfo::mutate(|s| s.push((when as u32, revenue))); } } impl TestCoretimeProvider { @@ -158,14 +139,16 @@ impl TestCoretimeProvider { ensure!(CoretimeInPool::get() > 0, ()); c.insert(who, c.get(&who).ok_or(())?.checked_sub(price).ok_or(())?); CoretimeCredit::set(c); - CoretimeSpending::mutate(|v| v.push((Self::latest(), price))); + CoretimeSpending::mutate(|v| { + v.push((RCBlockNumberProviderOf::::current_block_number() as u32, price)) + }); Ok(()) } pub fn bump() { let mut pool_size = CoretimeInPool::get(); let mut workplan = CoretimeWorkplan::get(); let mut usage = CoretimeUsage::get(); - let now = Self::latest(); + let now = RCBlockNumberProviderOf::::current_block_number() as u32; workplan.retain(|(when, core), assignment| { if *when <= now { if let Some(old_assignment) = usage.get(core) { @@ -208,7 +191,7 @@ impl crate::Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = ItemOf, ()>, (), u64>; type OnRevenue = IntoZero; - type TimeslicePeriod = ConstU32<2>; + type TimeslicePeriod = ConstU64<2>; type MaxLeasedCores = ConstU32<5>; type MaxReservedCores = ConstU32<5>; type Coretime = TestCoretimeProvider; @@ -264,7 +247,7 @@ impl TestExt { } pub fn advance_notice(mut self, advance_notice: Timeslice) -> Self { - self.0.advance_notice = advance_notice; + self.0.advance_notice = advance_notice as u64; self } diff --git a/substrate/frame/broker/src/tick_impls.rs b/substrate/frame/broker/src/tick_impls.rs index 7df8bd39d42fe13e6cf7375442d7ddab98c60d99..8b7860c8e3af6daaaa433d9b7d093c94f292a5a6 100644 --- a/substrate/frame/broker/src/tick_impls.rs +++ b/substrate/frame/broker/src/tick_impls.rs @@ -87,7 +87,7 @@ impl Pallet { } pub(crate) fn process_core_count(status: &mut StatusRecord) -> bool { - if let Some(core_count) = T::Coretime::check_notify_core_count() { + if let Some(core_count) = CoreCountInbox::::take() { status.core_count = core_count; Self::deposit_event(Event::::CoreCountChanged { core_count }); return true @@ -112,10 +112,16 @@ impl Pallet { } // Payout system InstaPool Cores. let total_contrib = r.system_contributions.saturating_add(r.private_contributions); - let system_payout = - revenue.saturating_mul(r.system_contributions.into()) / total_contrib.into(); - let _ = Self::charge(&Self::account_id(), system_payout); - revenue.saturating_reduce(system_payout); + let system_payout = if !total_contrib.is_zero() { + let system_payout = + revenue.saturating_mul(r.system_contributions.into()) / total_contrib.into(); + let _ = Self::charge(&Self::account_id(), system_payout); + revenue.saturating_reduce(system_payout); + + system_payout + } else { + Zero::zero() + }; if !revenue.is_zero() && r.private_contributions > 0 { r.maybe_payout = Some(revenue); diff --git a/substrate/frame/broker/src/types.rs b/substrate/frame/broker/src/types.rs index 89222ca8e95271098fe2d335142015024439a71a..7e9f351723a5d312fcd52dcf57d845ccea34a05a 100644 --- a/substrate/frame/broker/src/types.rs +++ b/substrate/frame/broker/src/types.rs @@ -16,7 +16,8 @@ // limitations under the License. use crate::{ - Config, CoreAssignment, CoreIndex, CoreMask, CoretimeInterface, TaskId, CORE_MASK_BITS, + Config, CoreAssignment, CoreIndex, CoreMask, CoretimeInterface, RCBlockNumberOf, TaskId, + CORE_MASK_BITS, }; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::traits::fungible::Inspect; @@ -28,7 +29,7 @@ use sp_runtime::BoundedVec; pub type BalanceOf = <::Currency as Inspect<::AccountId>>::Balance; pub type RelayBalanceOf = <::Coretime as CoretimeInterface>::Balance; -pub type RelayBlockNumberOf = <::Coretime as CoretimeInterface>::BlockNumber; +pub type RelayBlockNumberOf = RCBlockNumberOf<::Coretime>; pub type RelayAccountIdOf = <::Coretime as CoretimeInterface>::AccountId; /// Relay-chain block number with a fixed divisor of Config::TimeslicePeriod. diff --git a/substrate/frame/broker/src/utility_impls.rs b/substrate/frame/broker/src/utility_impls.rs index 2450198050b67fccc852bd06e116bcdc16628750..3dba5be5b398b8f30156179e7dd1b203fff5e311 100644 --- a/substrate/frame/broker/src/utility_impls.rs +++ b/substrate/frame/broker/src/utility_impls.rs @@ -29,17 +29,17 @@ use sp_arithmetic::{ traits::{SaturatedConversion, Saturating}, FixedPointNumber, FixedU64, }; -use sp_runtime::traits::AccountIdConversion; +use sp_runtime::traits::{AccountIdConversion, BlockNumberProvider}; impl Pallet { pub fn current_timeslice() -> Timeslice { - let latest = T::Coretime::latest(); + let latest = RCBlockNumberProviderOf::::current_block_number(); let timeslice_period = T::TimeslicePeriod::get(); (latest / timeslice_period).saturated_into() } pub fn latest_timeslice_ready_to_commit(config: &ConfigRecordOf) -> Timeslice { - let latest = T::Coretime::latest(); + let latest = RCBlockNumberProviderOf::::current_block_number(); let advanced = latest.saturating_add(config.advance_notice); let timeslice_period = T::TimeslicePeriod::get(); (advanced / timeslice_period).saturated_into() diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index b3a151c6062c45f43f9ae1ccafa513a07f70f08f..a8f50eeee6e6ceaf284c1764a68411956753cfff 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -74,6 +74,7 @@ pub trait WeightInfo { fn process_pool() -> Weight; fn process_core_schedule() -> Weight; fn request_revenue_info_at() -> Weight; + fn notify_core_count() -> Weight; fn do_tick_base() -> Weight; } @@ -447,6 +448,9 @@ impl WeightInfo for SubstrateWeight { // Minimum execution time: 147_000 picoseconds. Weight::from_parts(184_000, 0) } + fn notify_core_count() -> Weight { + T::DbWeight::get().reads_writes(1, 1) + } /// Storage: `Broker::Status` (r:1 w:1) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Configuration` (r:1 w:0) @@ -835,6 +839,10 @@ impl WeightInfo for () { // Minimum execution time: 147_000 picoseconds. Weight::from_parts(184_000, 0) } + fn notify_core_count() -> Weight { + RocksDbWeight::get().reads(1) + .saturating_add(RocksDbWeight::get().writes(1)) + } /// Storage: `Broker::Status` (r:1 w:1) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Configuration` (r:1 w:0) diff --git a/substrate/frame/child-bounties/Cargo.toml b/substrate/frame/child-bounties/Cargo.toml index ac29bc4997ba7a8e61cdccf9839140c2b844a7a3..6c1c362dc56fe769a67d61a9b6a559c9cb934206 100644 --- a/substrate/frame/child-bounties/Cargo.toml +++ b/substrate/frame/child-bounties/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet to manage child bounties" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,21 +21,21 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-bounties = { path = "../bounties", default-features = false} -pallet-treasury = { path = "../treasury", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-bounties = { path = "../bounties", default-features = false } +pallet-treasury = { path = "../treasury", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/child-bounties/src/tests.rs b/substrate/frame/child-bounties/src/tests.rs index 46f8fa65dd37749d1c50130f6f24b3c65f0f95c0..7de45c73127457c0bb4b7f731773b9c6bd13e8a0 100644 --- a/substrate/frame/child-bounties/src/tests.rs +++ b/substrate/frame/child-bounties/src/tests.rs @@ -23,7 +23,7 @@ use super::*; use crate as pallet_child_bounties; use frame_support::{ - assert_noop, assert_ok, parameter_types, + assert_noop, assert_ok, derive_impl, parameter_types, traits::{ tokens::{PayFromAccount, UnityAssetBalanceConversion}, ConstU32, ConstU64, OnInitialize, @@ -62,6 +62,7 @@ parameter_types! { type Balance = u64; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/collective/Cargo.toml b/substrate/frame/collective/Cargo.toml index 7f5e305e4f59b42e67b6aadb7db1402824d032fa..fb0bace740c5d9018835b431056ac8645f096e2a 100644 --- a/substrate/frame/collective/Cargo.toml +++ b/substrate/frame/collective/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Collective system: Members of a set of account IDs can make their collective feelings known through dispatched calls from one of two specialized origins." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,16 +19,16 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/collective/src/migrations/v4.rs b/substrate/frame/collective/src/migrations/v4.rs index b3326b4251c9bd3d2f42dba0c61ec67e22fd91c6..300dff23d8eb5dc7241761af56126a97892e4a87 100644 --- a/substrate/frame/collective/src/migrations/v4.rs +++ b/substrate/frame/collective/src/migrations/v4.rs @@ -76,7 +76,7 @@ pub fn migrate>(old_pallet_name: N) { @@ -104,7 +104,7 @@ pub fn pre_migrate>(old_p } /// Some checks for after migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::post_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn post_migrate>(old_pallet_name: N) { diff --git a/substrate/frame/collective/src/tests.rs b/substrate/frame/collective/src/tests.rs index 86b85e07a8bd9336472b0ab3de918e6839862b8f..06a91cf6fe9c8564c70d1aac74c5fae0087affa3 100644 --- a/substrate/frame/collective/src/tests.rs +++ b/substrate/frame/collective/src/tests.rs @@ -18,7 +18,7 @@ use super::{Event as CollectiveEvent, *}; use crate as pallet_collective; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, derive_impl, dispatch::Pays, parameter_types, traits::{ConstU32, ConstU64, StorageVersion}, @@ -90,6 +90,8 @@ parameter_types! { frame_system::limits::BlockWeights::simple_max(Weight::MAX); pub static MaxProposalWeight: Weight = default_max_proposal_weight(); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 0eb50c2b0bae11b2a92b7df3ebe545174dc24696..4c6ca41ed56bd9ae76d129a3d03fc90facf6f708 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -9,7 +9,10 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME pallet for WASM contracts" readme = "README.md" -include = ["src/**/*", "benchmarks/**", "build.rs", "README.md", "CHANGELOG.md"] +include = ["CHANGELOG.md", "README.md", "benchmarks/**", "build.rs", "src/**/*"] + +[lints] +workspace = true [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -37,16 +40,19 @@ rand_pcg = { version = "0.3", optional = true } # Substrate Dependencies environmental = { version = "1.1.4", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-balances = { path = "../balances", default-features = false , optional = true} -pallet-contracts-primitives = { path = "primitives", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-balances = { path = "../balances", default-features = false, optional = true } +pallet-contracts-uapi = { path = "uapi" } pallet-contracts-proc-macro = { path = "proc-macro" } -sp-api = { path = "../../primitives/api", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +sp-api = { path = "../../primitives/api", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } + +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } [dev-dependencies] array-bytes = "6.1" @@ -56,16 +62,22 @@ pretty_assertions = "1" wat = "1" pallet-contracts-fixtures = { path = "./fixtures" } +# Polkadot Dependencies +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder" } + # Substrate Dependencies pallet-balances = { path = "../balances" } pallet-timestamp = { path = "../timestamp" } +pallet-message-queue = { path = "../message-queue" } pallet-insecure-randomness-collective-flip = { path = "../insecure-randomness-collective-flip" } pallet-utility = { path = "../utility" } +pallet-assets = { path = "../assets" } pallet-proxy = { path = "../proxy" } sp-keystore = { path = "../../primitives/keystore" } +sp-tracing = { path = "../../primitives/tracing" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "environmental/std", @@ -74,14 +86,12 @@ std = [ "frame-system/std", "log/std", "pallet-balances?/std", - "pallet-contracts-fixtures/std", - "pallet-contracts-primitives/std", "pallet-contracts-proc-macro/full", "pallet-insecure-randomness-collective-flip/std", "pallet-proxy/std", "pallet-timestamp/std", "pallet-utility/std", - "rand/std", + "rand?/std", "scale-info/std", "serde", "sp-api/std", @@ -90,14 +100,18 @@ std = [ "sp-keystore/std", "sp-runtime/std", "sp-std/std", - "wasm-instrument/std", + "wasm-instrument?/std", "wasmi/std", + "xcm-builder/std", + "xcm/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", @@ -105,12 +119,15 @@ runtime-benchmarks = [ "rand_pcg", "sp-runtime/runtime-benchmarks", "wasm-instrument", + "xcm-builder/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "pallet-assets/try-runtime", "pallet-balances/try-runtime", "pallet-insecure-randomness-collective-flip/try-runtime", + "pallet-message-queue/try-runtime", "pallet-proxy/try-runtime", "pallet-timestamp/try-runtime", "pallet-utility/try-runtime", diff --git a/substrate/frame/contracts/build.rs b/substrate/frame/contracts/build.rs index 42bc45d563d325a605c79749f096b5048a412af5..83d5d368d4bc35746eda0ca88acc92e63e2c79c4 100644 --- a/substrate/frame/contracts/build.rs +++ b/substrate/frame/contracts/build.rs @@ -68,6 +68,5 @@ fn main() -> Result<(), Box> { version - 1, )?; - println!("cargo:rerun-if-changed=src/migration"); Ok(()) } diff --git a/substrate/frame/contracts/fixtures/Cargo.toml b/substrate/frame/contracts/fixtures/Cargo.toml index b44f36f2a5fe7d141ebb7b262e7d60b6489cabbe..97606479f2593c847cd44d81b01fa39927f0f81c 100644 --- a/substrate/frame/contracts/fixtures/Cargo.toml +++ b/substrate/frame/contracts/fixtures/Cargo.toml @@ -7,12 +7,19 @@ edition.workspace = true license.workspace = true description = "Fixtures for testing contracts pallet." +[lints] +workspace = true + [dependencies] wat = "1" -frame-system = { path = "../../system", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} - -[features] -default = [ "std" ] -std = [ "frame-system/std", "sp-runtime/std" ] +frame-system = { path = "../../system" } +sp-runtime = { path = "../../../primitives/runtime" } +anyhow = "1.0.0" +[build-dependencies] +parity-wasm = "0.45.0" +tempfile = "3.8.1" +toml = "0.8.2" +twox-hash = "1.6.3" +anyhow = "1.0.0" +cfg-if = { version = "1.0", default-features = false } diff --git a/substrate/frame/contracts/fixtures/build.rs b/substrate/frame/contracts/fixtures/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..49deb94a7faaa1431bf26f85a1e2dfec2fcb016a --- /dev/null +++ b/substrate/frame/contracts/fixtures/build.rs @@ -0,0 +1,277 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Compile contracts to wasm and RISC-V binaries. +use anyhow::Result; +use parity_wasm::elements::{deserialize_file, serialize_to_file, Internal}; +use std::{ + env, fs, + hash::Hasher, + path::{Path, PathBuf}, + process::Command, +}; +use twox_hash::XxHash32; + +/// Read the file at `path` and return its hash as a hex string. +fn file_hash(path: &Path) -> String { + let data = fs::read(path).expect("file exists; qed"); + let mut hasher = XxHash32::default(); + hasher.write(&data); + hasher.write(include_bytes!("build.rs")); + let hash = hasher.finish(); + format!("{:x}", hash) +} + +/// A contract entry. +struct Entry { + /// The path to the contract source file. + path: PathBuf, + /// The hash of the contract source file. + hash: String, +} + +impl Entry { + /// Create a new contract entry from the given path. + fn new(path: PathBuf) -> Self { + let hash = file_hash(&path); + Self { path, hash } + } + + /// Return the path to the contract source file. + fn path(&self) -> &str { + self.path.to_str().expect("path is valid unicode; qed") + } + + /// Return the name of the contract. + fn name(&self) -> &str { + self.path + .file_stem() + .expect("file exits; qed") + .to_str() + .expect("name is valid unicode; qed") + } + + /// Return the name of the output wasm file. + fn out_wasm_filename(&self) -> String { + format!("{}.wasm", self.name()) + } +} + +/// Collect all contract entries from the given source directory. +/// Contracts that have already been compiled are filtered out. +fn collect_entries(contracts_dir: &Path, out_dir: &Path) -> Vec { + fs::read_dir(&contracts_dir) + .expect("src dir exists; qed") + .filter_map(|file| { + let path = file.expect("file exists; qed").path(); + if path.extension().map_or(true, |ext| ext != "rs") { + return None; + } + + let entry = Entry::new(path); + if out_dir.join(&entry.hash).exists() { + None + } else { + Some(entry) + } + }) + .collect::>() +} + +/// Create a `Cargo.toml` to compile the given contract entries. +fn create_cargo_toml<'a>( + fixtures_dir: &Path, + entries: impl Iterator, + output_dir: &Path, +) -> Result<()> { + let uapi_path = fixtures_dir.join("../uapi").canonicalize()?; + let common_path = fixtures_dir.join("./contracts/common").canonicalize()?; + let mut cargo_toml: toml::Value = toml::from_str(&format!( + " +[package] +name = 'contracts' +version = '0.1.0' +edition = '2021' + +# Binary targets are injected below. +[[bin]] + +[dependencies] +uapi = {{ package = 'pallet-contracts-uapi', default-features = false, path = {uapi_path:?}}} +common = {{ package = 'pallet-contracts-fixtures-common', path = {common_path:?}}} + +[profile.release] +opt-level = 3 +lto = true +codegen-units = 1 +" + ))?; + + let binaries = entries + .map(|entry| { + let name = entry.name(); + let path = entry.path(); + toml::Value::Table(toml::toml! { + name = name + path = path + }) + }) + .collect::>(); + + cargo_toml["bin"] = toml::Value::Array(binaries); + let cargo_toml = toml::to_string_pretty(&cargo_toml)?; + fs::write(output_dir.join("Cargo.toml"), cargo_toml).map_err(Into::into) +} + +/// Invoke `cargo fmt` to check that fixtures files are formatted. +fn invoke_cargo_fmt<'a>( + config_path: &Path, + files: impl Iterator, + contract_dir: &Path, +) -> Result<()> { + // If rustfmt is not installed, skip the check. + if !Command::new("rustup") + .args(&["run", "nightly", "rustfmt", "--version"]) + .output() + .map_or(false, |o| o.status.success()) + { + return Ok(()) + } + + let fmt_res = Command::new("rustup") + .args(&["run", "nightly", "rustfmt", "--check", "--config-path"]) + .arg(config_path) + .args(files) + .output() + .expect("failed to execute process"); + + if fmt_res.status.success() { + return Ok(()) + } + + let stdout = String::from_utf8_lossy(&fmt_res.stdout); + let stderr = String::from_utf8_lossy(&fmt_res.stderr); + eprintln!("{}\n{}", stdout, stderr); + eprintln!( + "Fixtures files are not formatted.\n + Please run `rustup run nightly rustfmt --config-path {} {}/*.rs`", + config_path.display(), + contract_dir.display() + ); + + anyhow::bail!("Fixtures files are not formatted") +} + +/// Invoke `cargo build` to compile the contracts. +fn invoke_build(current_dir: &Path) -> Result<()> { + let encoded_rustflags = [ + "-Clink-arg=-zstack-size=65536", + "-Clink-arg=--import-memory", + "-Clinker-plugin-lto", + "-Ctarget-cpu=mvp", + "-Dwarnings", + ] + .join("\x1f"); + + let build_res = Command::new(env::var("CARGO")?) + .current_dir(current_dir) + .env("CARGO_ENCODED_RUSTFLAGS", encoded_rustflags) + .args(&["build", "--release", "--target=wasm32-unknown-unknown"]) + .output() + .expect("failed to execute process"); + + if build_res.status.success() { + return Ok(()) + } + + let stderr = String::from_utf8_lossy(&build_res.stderr); + eprintln!("{}", stderr); + anyhow::bail!("Failed to build contracts"); +} + +/// Post-process the compiled wasm contracts. +fn post_process_wasm(input_path: &Path, output_path: &Path) -> Result<()> { + let mut module = deserialize_file(input_path)?; + if let Some(section) = module.export_section_mut() { + section.entries_mut().retain(|entry| { + matches!(entry.internal(), Internal::Function(_)) && + (entry.field() == "call" || entry.field() == "deploy") + }); + } + + serialize_to_file(output_path, module).map_err(Into::into) +} + +/// Write the compiled contracts to the given output directory. +fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result<()> { + for entry in entries { + let wasm_output = entry.out_wasm_filename(); + post_process_wasm( + &build_dir.join("target/wasm32-unknown-unknown/release").join(&wasm_output), + &out_dir.join(&wasm_output), + )?; + fs::write(out_dir.join(&entry.hash), "")?; + } + + Ok(()) +} + +/// Returns the root path of the wasm workspace. +fn find_workspace_root(current_dir: &Path) -> Option { + let mut current_dir = current_dir.to_path_buf(); + + while current_dir.parent().is_some() { + if current_dir.join("Cargo.toml").exists() { + let cargo_toml_contents = + std::fs::read_to_string(current_dir.join("Cargo.toml")).ok()?; + if cargo_toml_contents.contains("[workspace]") { + return Some(current_dir); + } + } + + current_dir.pop(); + } + + None +} + +fn main() -> Result<()> { + let fixtures_dir: PathBuf = env::var("CARGO_MANIFEST_DIR")?.into(); + let contracts_dir = fixtures_dir.join("contracts"); + let out_dir: PathBuf = env::var("OUT_DIR")?.into(); + let workspace_root = find_workspace_root(&fixtures_dir).expect("workspace root exists; qed"); + + let entries = collect_entries(&contracts_dir, &out_dir); + if entries.is_empty() { + return Ok(()); + } + + let tmp_dir = tempfile::tempdir()?; + let tmp_dir_path = tmp_dir.path(); + + create_cargo_toml(&fixtures_dir, entries.iter(), tmp_dir.path())?; + invoke_cargo_fmt( + &workspace_root.join(".rustfmt.toml"), + entries.iter().map(|entry| &entry.path as _), + &contracts_dir, + )?; + + invoke_build(tmp_dir_path)?; + write_output(tmp_dir_path, &out_dir, entries)?; + + Ok(()) +} diff --git a/substrate/frame/contracts/fixtures/contracts/call.rs b/substrate/frame/contracts/fixtures/contracts/call.rs new file mode 100644 index 0000000000000000000000000000000000000000..396b71d5e96951f7f1dfdea2a0d9190c6861bfe6 --- /dev/null +++ b/substrate/frame/contracts/fixtures/contracts/call.rs @@ -0,0 +1,48 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This calls another contract as passed as its account id. +#![no_std] +#![no_main] + +extern crate common; +use uapi::{CallFlags, HostFn, HostFnImpl as api}; + +#[no_mangle] +pub extern "C" fn deploy() {} + +#[no_mangle] +pub extern "C" fn call() { + let mut buffer = [0u8; 40]; + let callee_input = 0..4; + let callee_addr = 4..36; + let value = 36..40; + + // Read the input data. + api::input(&mut &mut buffer[..]); + + // Call the callee + api::call_v1( + CallFlags::empty(), + &buffer[callee_addr], + 0u64, // How much gas to devote for the execution. 0 = all. + &buffer[value], + &buffer[callee_input], + None, + ) + .unwrap(); +} diff --git a/substrate/frame/contracts/fixtures/contracts/common/Cargo.toml b/substrate/frame/contracts/fixtures/contracts/common/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..377e8bc9dd58e74c24e672e63e601f8ec9ebea4c --- /dev/null +++ b/substrate/frame/contracts/fixtures/contracts/common/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "pallet-contracts-fixtures-common" +publish = false +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +description = "Common utilities for pallet-contracts-fixtures." + +[lints] +workspace = true diff --git a/substrate/frame/contracts/fixtures/contracts/common/src/lib.rs b/substrate/frame/contracts/fixtures/contracts/common/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..29bdbfbb04200907156d29d98f80efbfbe0f31d5 --- /dev/null +++ b/substrate/frame/contracts/fixtures/contracts/common/src/lib.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![no_std] +#![cfg(any(target_arch = "wasm32", target_arch = "riscv32"))] + +#[panic_handler] +fn panic(_info: &core::panic::PanicInfo) -> ! { + #[cfg(target_arch = "wasm32")] + core::arch::wasm32::unreachable(); + + #[cfg(target_arch = "riscv32")] + // Safety: The unimp instruction is guaranteed to trap + unsafe { + core::arch::asm!("unimp"); + core::hint::unreachable_unchecked(); + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/build.rs b/substrate/frame/contracts/fixtures/contracts/dummy.rs similarity index 76% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/build.rs rename to substrate/frame/contracts/fixtures/contracts/dummy.rs index 60f8a125129ff1344a1799246e931acdb1d139d5..98b9d494bbc67a277a6813bcb4ad06b5b5e5aee3 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/build.rs +++ b/substrate/frame/contracts/fixtures/contracts/dummy.rs @@ -1,3 +1,5 @@ +// This file is part of Substrate. + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 @@ -12,15 +14,13 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +#![no_std] +#![no_main] + +extern crate common; -#[cfg(feature = "std")] -fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() -} +#[no_mangle] +pub extern "C" fn deploy() {} -#[cfg(not(feature = "std"))] -fn main() {} +#[no_mangle] +pub extern "C" fn call() {} diff --git a/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat b/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat index ab6789066487084faca88cad8d6ca045cffe6750..e6d6ba8bb8140b221d0c727b892f5a1da30da43c 100644 --- a/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat +++ b/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat @@ -14,7 +14,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat b/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat index ef456b6d620a3a52110b73417977fe3139527a56..dac7736244da21e84d04249bf5fff1742764d01f 100644 --- a/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat +++ b/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat @@ -16,7 +16,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -44,8 +44,8 @@ ;; [0..4) - size of the call ;; [4..8) - action to perform ;; [8..42) - code hash of the callee - (set_local $action (i32.load (i32.const 4))) - (set_local $code_hash_ptr (i32.const 8)) + (local.set $action (i32.load (i32.const 4))) + (local.set $code_hash_ptr (i32.const 8)) ;; Assert input size == 36 (4 for action + 32 for code_hash). (call $assert @@ -56,25 +56,25 @@ ) ;; Call add_delegate_dependency when action == 1. - (if (i32.eq (get_local $action) (i32.const 1)) + (if (i32.eq (local.get $action) (i32.const 1)) (then - (call $add_delegate_dependency (get_local $code_hash_ptr)) + (call $add_delegate_dependency (local.get $code_hash_ptr)) ) (else) ) ;; Call remove_delegate_dependency when action == 2. - (if (i32.eq (get_local $action) (i32.const 2)) + (if (i32.eq (local.get $action) (i32.const 2)) (then (call $remove_delegate_dependency - (get_local $code_hash_ptr) + (local.get $code_hash_ptr) ) ) (else) ) ;; Call terminate when action == 3. - (if (i32.eq (get_local $action) (i32.const 3)) + (if (i32.eq (local.get $action) (i32.const 3)) (then (call $terminate (i32.const 100) ;; Pointer to beneficiary address diff --git a/substrate/frame/contracts/fixtures/data/balance.wat b/substrate/frame/contracts/fixtures/data/balance.wat index d86d5c4b1c60ced43282b03100bc370c60a6ebcf..d7970c92e414a9dd9f2b214534daa9962232eb87 100644 --- a/substrate/frame/contracts/fixtures/data/balance.wat +++ b/substrate/frame/contracts/fixtures/data/balance.wat @@ -12,7 +12,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/call.wat b/substrate/frame/contracts/fixtures/data/call.wat deleted file mode 100644 index 4558b2c6409b999801ce68a1afd68a60c91d53b8..0000000000000000000000000000000000000000 --- a/substrate/frame/contracts/fixtures/data/call.wat +++ /dev/null @@ -1,39 +0,0 @@ -;; This calls another contract as passed as its account id. -(module - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal1" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) - (import "env" "memory" (memory 1 1)) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func (export "deploy")) - - (func (export "call") - ;; Store length of input buffer. - (i32.store (i32.const 0) (i32.const 512)) - - ;; Copy input at address 4. - (call $seal_input (i32.const 4) (i32.const 0)) - - ;; Call passed contract. - (call $assert (i32.eqz - (call $seal_call - (i32.const 0) ;; No flags - (i32.const 8) ;; Pointer to "callee" address. - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 512) ;; Pointer to the buffer with value to transfer - (i32.const 4) ;; Pointer to input data buffer address - (i32.const 4) ;; Length of input data buffer - (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this case - ) - )) - ) -) diff --git a/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat b/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat index 3320922d9e2cb558706cbed4363577d6a5b7e1d0..5d76e19a74c70c61df225938dea6a5a42d283471 100644 --- a/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat +++ b/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat @@ -7,7 +7,7 @@ (func $assert (param i32) (block $ok - (br_if $ok (get_local 0)) + (br_if $ok (local.get 0)) (unreachable) ) ) diff --git a/substrate/frame/contracts/fixtures/data/caller_contract.wat b/substrate/frame/contracts/fixtures/data/caller_contract.wat index 929171b9a26f6bcbd2b9523ab13dfedf1ea2a1b8..43eb8ccfd54f50ca6fc7e26ed01dfe0bda2e8335 100644 --- a/substrate/frame/contracts/fixtures/data/caller_contract.wat +++ b/substrate/frame/contracts/fixtures/data/caller_contract.wat @@ -10,7 +10,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -37,10 +37,10 @@ ) ;; Read current balance into local variable. - (set_local $sp (i32.const 1024)) + (local.set $sp (i32.const 1024)) ;; Fail to deploy the contract since it returns a non-zero exit status. - (set_local $exit_code + (local.set $exit_code (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. (i64.const 0) ;; How much ref_time weight to devote for the execution. 0 = all. @@ -60,11 +60,11 @@ ;; Check non-zero exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted + (i32.eq (local.get $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted ) ;; Fail to deploy the contract due to insufficient ref_time weight. - (set_local $exit_code + (local.set $exit_code (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. (i64.const 1) ;; Supply too little ref_time weight @@ -85,11 +85,11 @@ ;; Check for special trap exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped + (i32.eq (local.get $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped ) ;; Fail to deploy the contract due to insufficient ref_time weight. - (set_local $exit_code + (local.set $exit_code (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. (i64.const 0) ;; How much ref_time weight to devote for the execution. 0 = all. @@ -110,17 +110,17 @@ ;; Check for special trap exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped + (i32.eq (local.get $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped ) ;; Length of the output buffer (i32.store - (i32.sub (get_local $sp) (i32.const 4)) + (i32.sub (local.get $sp) (i32.const 4)) (i32.const 256) ) ;; Deploy the contract successfully. - (set_local $exit_code + (local.set $exit_code (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. (i64.const 0) ;; How much ref_time weight to devote for the execution. 0 = all. @@ -130,7 +130,7 @@ (i32.const 8) ;; Pointer to input data buffer address (i32.const 8) ;; Length of input data buffer (i32.const 16) ;; Pointer to the address output buffer - (i32.sub (get_local $sp) (i32.const 4)) ;; Pointer to the address buffer length + (i32.sub (local.get $sp) (i32.const 4)) ;; Pointer to the address buffer length (i32.const 4294967295) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case (i32.const 0) ;; salt_ptr @@ -141,28 +141,28 @@ ;; Check for success exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success + (i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success ) ;; Check that address has the expected length (call $assert - (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 4))) (i32.const 32)) + (i32.eq (i32.load (i32.sub (local.get $sp) (i32.const 4))) (i32.const 32)) ) ;; Zero out destination buffer of output (i32.store - (i32.sub (get_local $sp) (i32.const 4)) + (i32.sub (local.get $sp) (i32.const 4)) (i32.const 0) ) ;; Length of the output buffer (i32.store - (i32.sub (get_local $sp) (i32.const 8)) + (i32.sub (local.get $sp) (i32.const 8)) (i32.const 4) ) ;; Call the new contract and expect it to return failing exit code. - (set_local $exit_code + (local.set $exit_code (call $seal_call (i32.const 0) ;; Set no flag (i32.const 16) ;; Pointer to "callee" address. @@ -172,29 +172,29 @@ (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 9) ;; Pointer to input data buffer address (i32.const 7) ;; Length of input data buffer - (i32.sub (get_local $sp) (i32.const 4)) ;; Ptr to output buffer - (i32.sub (get_local $sp) (i32.const 8)) ;; Ptr to output buffer len + (i32.sub (local.get $sp) (i32.const 4)) ;; Ptr to output buffer + (i32.sub (local.get $sp) (i32.const 8)) ;; Ptr to output buffer len ) ) ;; Check non-zero exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted + (i32.eq (local.get $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted ) ;; Check that output buffer contains the expected return data. (call $assert - (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 8))) (i32.const 3)) + (i32.eq (i32.load (i32.sub (local.get $sp) (i32.const 8))) (i32.const 3)) ) (call $assert (i32.eq - (i32.load (i32.sub (get_local $sp) (i32.const 4))) + (i32.load (i32.sub (local.get $sp) (i32.const 4))) (i32.const 0x00776655) ) ) ;; Fail to call the contract due to insufficient ref_time weight. - (set_local $exit_code + (local.set $exit_code (call $seal_call (i32.const 0) ;; Set no flag (i32.const 16) ;; Pointer to "callee" address. @@ -211,11 +211,11 @@ ;; Check for special trap exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped + (i32.eq (local.get $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped ) ;; Fail to call the contract due to insufficient proof_size weight. - (set_local $exit_code + (local.set $exit_code (call $seal_call (i32.const 0) ;; Set no flag (i32.const 16) ;; Pointer to "callee" address. @@ -232,23 +232,23 @@ ;; Check for special trap exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped + (i32.eq (local.get $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped ) ;; Zero out destination buffer of output (i32.store - (i32.sub (get_local $sp) (i32.const 4)) + (i32.sub (local.get $sp) (i32.const 4)) (i32.const 0) ) ;; Length of the output buffer (i32.store - (i32.sub (get_local $sp) (i32.const 8)) + (i32.sub (local.get $sp) (i32.const 8)) (i32.const 4) ) ;; Call the contract successfully. - (set_local $exit_code + (local.set $exit_code (call $seal_call (i32.const 0) ;; Set no flag (i32.const 16) ;; Pointer to "callee" address. @@ -258,23 +258,23 @@ (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Pointer to input data buffer address (i32.const 8) ;; Length of input data buffer - (i32.sub (get_local $sp) (i32.const 4)) ;; Ptr to output buffer - (i32.sub (get_local $sp) (i32.const 8)) ;; Ptr to output buffer len + (i32.sub (local.get $sp) (i32.const 4)) ;; Ptr to output buffer + (i32.sub (local.get $sp) (i32.const 8)) ;; Ptr to output buffer len ) ) ;; Check for success exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success + (i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success ) ;; Check that the output buffer contains the expected return data. (call $assert - (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 8))) (i32.const 4)) + (i32.eq (i32.load (i32.sub (local.get $sp) (i32.const 8))) (i32.const 4)) ) (call $assert (i32.eq - (i32.load (i32.sub (get_local $sp) (i32.const 4))) + (i32.load (i32.sub (local.get $sp) (i32.const 4))) (i32.const 0x77665544) ) ) diff --git a/substrate/frame/contracts/fixtures/data/chain_extension.wat b/substrate/frame/contracts/fixtures/data/chain_extension.wat index 670f8e70172e75063b307e4ec679bf68c05be394..c24ca286ff8c0ab9cf9ea2a411b95188b297df67 100644 --- a/substrate/frame/contracts/fixtures/data/chain_extension.wat +++ b/substrate/frame/contracts/fixtures/data/chain_extension.wat @@ -9,7 +9,7 @@ (func $assert (param i32) (block $ok - (br_if $ok (get_local 0)) + (br_if $ok (local.get 0)) (unreachable) ) ) diff --git a/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat b/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat index b481abb5bc7c9617d716ced0a054a94c7b591b86..504646df1b0eb1eb05e580d5f6825e2dd0c0fb4d 100644 --- a/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat +++ b/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat @@ -11,7 +11,7 @@ (func $assert (param i32) (block $ok - (br_if $ok (get_local 0)) + (br_if $ok (local.get 0)) (unreachable) ) ) diff --git a/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat b/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat index 5592e7e96a9804589046a6ecc6e1c3d3d058c0b9..2bff53b638fd0f500a29268dd40afe5256d1641e 100644 --- a/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat +++ b/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat @@ -8,7 +8,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat b/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat index cd7202478437b8e8052c620c26f973b93f70006c..00c9a657f39f9017f334b4c0122dfe42e0147c51 100644 --- a/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat +++ b/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat @@ -14,7 +14,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/crypto_hashes.wat b/substrate/frame/contracts/fixtures/data/crypto_hashes.wat index c2b4d6b81edbf95cd6a08d1fd7a92f0f2d9173f5..9d86b02f419218609017ef69662c9925dc95afda 100644 --- a/substrate/frame/contracts/fixtures/data/crypto_hashes.wat +++ b/substrate/frame/contracts/fixtures/data/crypto_hashes.wat @@ -59,8 +59,10 @@ (call $seal_input (local.get $input_ptr) (local.get $input_len_ptr)) (local.set $chosen_hash_fn (i32.load8_u (local.get $input_ptr))) (if (i32.gt_u (local.get $chosen_hash_fn) (i32.const 7)) - ;; We check that the chosen hash fn identifier is within bounds: [0,7] - (unreachable) + (then + ;; We check that the chosen hash fn identifier is within bounds: [0,7] + (unreachable) + ) ) (local.set $input_ptr (i32.add (local.get $input_ptr) (i32.const 1))) (local.set $input_len (i32.sub (i32.load (local.get $input_len_ptr)) (i32.const 1))) diff --git a/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat b/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat index e8c447b42fca522c32bd471ea02c29911967d166..dae0de8841891ea3ff3dc8f297e8f162b6dcefdd 100644 --- a/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat +++ b/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat @@ -8,7 +8,7 @@ (func $assert_eq (param i32 i32) (block $ok (br_if $ok - (i32.eq (get_local 0) (get_local 1)) + (i32.eq (local.get 0) (local.get 1)) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat b/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat index fc6ee72df8b08c3a25c649ff16d8216f84dba8de..e9ce20ba42b222e3ea54a65a8d49fe7b6bf4848e 100644 --- a/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat +++ b/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat @@ -8,7 +8,7 @@ (func $assert_eq (param i32 i32) (block $ok (br_if $ok - (i32.eq (get_local 0) (get_local 1)) + (i32.eq (local.get 0) (local.get 1)) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/debug_message_works.wat b/substrate/frame/contracts/fixtures/data/debug_message_works.wat index 61933c23296116114f668f2aab747bb46edf5c9b..44a7b6db1befe2d0a64b79453e28a0e6161280fb 100644 --- a/substrate/frame/contracts/fixtures/data/debug_message_works.wat +++ b/substrate/frame/contracts/fixtures/data/debug_message_works.wat @@ -8,7 +8,7 @@ (func $assert_eq (param i32 i32) (block $ok (br_if $ok - (i32.eq (get_local 0) (get_local 1)) + (i32.eq (local.get 0) (local.get 1)) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/delegate_call.wat b/substrate/frame/contracts/fixtures/data/delegate_call.wat index 7fe422af4551131d29164a2bacb69c05ee009fdf..b8d4f0d47f0ffae8737a77e2eb7e89cd50f419dc 100644 --- a/substrate/frame/contracts/fixtures/data/delegate_call.wat +++ b/substrate/frame/contracts/fixtures/data/delegate_call.wat @@ -24,7 +24,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -70,7 +70,7 @@ ) ;; Call deployed library contract code. - (set_local $exit_code + (local.set $exit_code (call $seal_delegate_call (i32.const 0) ;; Set no call flags (i32.const 64) ;; Pointer to "callee" code_hash. @@ -83,7 +83,7 @@ ;; Check for success exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success + (i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success ) (call $assert diff --git a/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat b/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat index 340b9699f87551d9fcbfd968b3fb25f9337ab544..62eea32800a4618d786a88382bf88f3673681222 100644 --- a/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat +++ b/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat @@ -20,7 +20,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat b/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat index 24ae5a13e33e5c49fb2e638e189e799137cb24f9..ba0a8fcc8ae3b53d5850a86962230b4cbfb071c7 100644 --- a/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat +++ b/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat @@ -12,7 +12,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat b/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat index 255547955527262162c955d1430026ea9d1ed341..2afd3b2fbacf5b6d555be55af3678eb7f607aff9 100644 --- a/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat +++ b/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat @@ -33,7 +33,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/drain.wat b/substrate/frame/contracts/fixtures/data/drain.wat index cb8ff0aed61fe76ea6dced1ec1fe1bd68fb708b5..18a21cca803d812fdb2c989d241f702984e39aa7 100644 --- a/substrate/frame/contracts/fixtures/data/drain.wat +++ b/substrate/frame/contracts/fixtures/data/drain.wat @@ -19,7 +19,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/dummy.wat b/substrate/frame/contracts/fixtures/data/dummy.wat deleted file mode 100644 index a6435e49df222fcad04e38b16e63cca7c9282796..0000000000000000000000000000000000000000 --- a/substrate/frame/contracts/fixtures/data/dummy.wat +++ /dev/null @@ -1,6 +0,0 @@ -;; A valid contract which does nothing at all -(module - (import "env" "memory" (memory 1 1)) - (func (export "deploy")) - (func (export "call")) -) diff --git a/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat b/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat index d694b3215e86b3025c99daa3f6704b62f90647fe..4910e706069e4d6a6a556110f876e63e3dce3a19 100644 --- a/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat +++ b/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat @@ -12,7 +12,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/event_size.wat b/substrate/frame/contracts/fixtures/data/event_size.wat index 4bd6158d72fb95471f202b01a92855a0312a76f7..1c1f34b24d728da55c3d5a1fad752a9c54f5a9e9 100644 --- a/substrate/frame/contracts/fixtures/data/event_size.wat +++ b/substrate/frame/contracts/fixtures/data/event_size.wat @@ -9,7 +9,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/multi_store.wat b/substrate/frame/contracts/fixtures/data/multi_store.wat index 2592baf618355ea75d277ee72ff8ce4874af7642..c334ed54c4eb78f45bed0257d92bd30d20737041 100644 --- a/substrate/frame/contracts/fixtures/data/multi_store.wat +++ b/substrate/frame/contracts/fixtures/data/multi_store.wat @@ -19,7 +19,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat b/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat index c6b529e2aff8b147ad34dc12a4466e8a0e652eed..44db8d041b1dad82ee5a98da4226f04111a6428e 100644 --- a/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat +++ b/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat @@ -20,7 +20,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -36,11 +36,11 @@ (call $seal_input (i32.const 32) (i32.const 36)) ;; reading manually passed reentrant count - (set_local $expected_reentrance_count (i32.load (i32.const 32))) + (local.set $expected_reentrance_count (i32.load (i32.const 32))) ;; reentrance count is calculated correctly (call $assert - (i32.eq (call $reentrance_count) (get_local $expected_reentrance_count)) + (i32.eq (call $reentrance_count) (local.get $expected_reentrance_count)) ) ;; re-enter 5 times in a row and assert that the reentrant counter works as expected @@ -52,7 +52,7 @@ (i32.store (i32.const 32) (i32.add (i32.load (i32.const 32)) (i32.const 1))) ;; Call to itself - (set_local $seal_call_exit_code + (local.set $seal_call_exit_code (call $seal_call (i32.const 8) ;; Allow reentrancy flag set (i32.const 0) ;; Pointer to "callee" address @@ -66,7 +66,7 @@ ) (call $assert - (i32.eq (get_local $seal_call_exit_code) (i32.const 0)) + (i32.eq (local.get $seal_call_exit_code) (i32.const 0)) ) ) ) diff --git a/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat b/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat index b8219a8462ee2ef23001f336e5160d1dda35fbc7..49e0193bcdb1016a2d75a26c9bcce85273e01f9a 100644 --- a/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat +++ b/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat @@ -17,7 +17,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -30,7 +30,7 @@ (call $seal_input (i32.const 0) (i32.const 36)) ;; reading passed callstack height - (set_local $callstack_height (i32.load (i32.const 32))) + (local.set $callstack_height (i32.load (i32.const 32))) ;; incrementing callstack height (i32.store (i32.const 32) (i32.add (i32.load (i32.const 32)) (i32.const 1))) @@ -40,12 +40,12 @@ (i32.eq (call $reentrance_count) (i32.const 0)) ) - (i32.eq (get_local $callstack_height) (i32.const 5)) + (i32.eq (local.get $callstack_height) (i32.const 5)) (if (then) ;; exit recursion case (else ;; Call to itself - (set_local $delegate_call_exit_code + (local.set $delegate_call_exit_code (call $seal_delegate_call (i32.const 0) ;; Set no call flags (i32.const 0) ;; Pointer to "callee" code_hash. @@ -57,13 +57,13 @@ ) (call $assert - (i32.eq (get_local $delegate_call_exit_code) (i32.const 0)) + (i32.eq (local.get $delegate_call_exit_code) (i32.const 0)) ) ) ) (call $assert - (i32.le_s (get_local $callstack_height) (i32.const 5)) + (i32.le_s (local.get $callstack_height) (i32.const 5)) ) ) diff --git a/substrate/frame/contracts/fixtures/data/self_destruct.wat b/substrate/frame/contracts/fixtures/data/self_destruct.wat index b8a37306e20110bf43087a511cdf241683da6f43..00c3895fddedd84f4cdfc5082b555acf1fba0742 100644 --- a/substrate/frame/contracts/fixtures/data/self_destruct.wat +++ b/substrate/frame/contracts/fixtures/data/self_destruct.wat @@ -26,7 +26,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat b/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat index 85fce511e21b96fcb3b7c15fce5ad765c6e405d7..628f283a19fd3d06dd5f73b3d37a95e2a0858666 100644 --- a/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat +++ b/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat @@ -5,7 +5,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/set_code_hash.wat b/substrate/frame/contracts/fixtures/data/set_code_hash.wat index b4df1b133186b67100fc6cee9d8804590cc8e66b..c0a9557b4d00194f2aaa9f9d8020c2f6ea6cd5ca 100644 --- a/substrate/frame/contracts/fixtures/data/set_code_hash.wat +++ b/substrate/frame/contracts/fixtures/data/set_code_hash.wat @@ -16,7 +16,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -27,11 +27,11 @@ (call $seal_input (i32.const 0) (i32.const 32)) - (set_local $exit_code + (local.set $exit_code (call $seal_set_code_hash (i32.const 0)) ;; Pointer to the input data. ) (call $assert - (i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success + (i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success ) ;; we return 1 after setting new code_hash diff --git a/substrate/frame/contracts/fixtures/data/storage_size.wat b/substrate/frame/contracts/fixtures/data/storage_size.wat index 293a656d4f6ea4e029e7fc7cbd98241da263f220..728bb4fcf3c095dbc9b50d0f622cf042f1da34a8 100644 --- a/substrate/frame/contracts/fixtures/data/storage_size.wat +++ b/substrate/frame/contracts/fixtures/data/storage_size.wat @@ -20,7 +20,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/store_call.wat b/substrate/frame/contracts/fixtures/data/store_call.wat index 9e090d31801f8d00572b3330c606c875054217c8..746b7a48b551bd379143bdbed9dd716b4dde8bb7 100644 --- a/substrate/frame/contracts/fixtures/data/store_call.wat +++ b/substrate/frame/contracts/fixtures/data/store_call.wat @@ -15,7 +15,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/store_deploy.wat b/substrate/frame/contracts/fixtures/data/store_deploy.wat index cc428e9623bfb49c3b3b52ed349b0a6f856096bb..7f115cba977cc942f0643087f4be13d43f28ef43 100644 --- a/substrate/frame/contracts/fixtures/data/store_deploy.wat +++ b/substrate/frame/contracts/fixtures/data/store_deploy.wat @@ -15,7 +15,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/xcm_execute.wat b/substrate/frame/contracts/fixtures/data/xcm_execute.wat new file mode 100644 index 0000000000000000000000000000000000000000..72ef14ed82c74b5ab1a21599f86aa054b58b7fbf --- /dev/null +++ b/substrate/frame/contracts/fixtures/data/xcm_execute.wat @@ -0,0 +1,52 @@ +;; This passes its input to `seal_xcm_execute` and returns the return value to its caller. +(module + (import "seal0" "xcm_execute" (func $xcm_execute (param i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; 0x1000 = 4k in little endian + ;; Size of input buffer + (data (i32.const 0) "\00\10") + + (func $assert (param i32) + (block $ok + (br_if $ok + (local.get 0) + ) + (unreachable) + ) + ) + + (func (export "call") + ;; Receive the encoded call + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Pointer to the buffer length (before call) and to the copied data length (after call) + ) + ;; Input data layout. + ;; [0..4) - size of the call + ;; [4..) - message + + ;; Call xcm_execute with provided input. + (call $assert + (i32.eq + (call $xcm_execute + (i32.const 4) ;; Pointer where the message is stored + (i32.load (i32.const 0)) ;; Size of the message + (i32.const 100) ;; Pointer to the where the outcome is stored + ) + (i32.const 0) + ) + ) + + (call $seal_return + (i32.const 0) ;; flags + (i32.const 100) ;; Pointer to returned value + (i32.const 10) ;; length of returned value + ) + ) + + (func (export "deploy")) +) + diff --git a/substrate/frame/contracts/fixtures/data/xcm_send.wat b/substrate/frame/contracts/fixtures/data/xcm_send.wat new file mode 100644 index 0000000000000000000000000000000000000000..fe29ddf0f141aab64f3309a0c7696f2a46a20d9e --- /dev/null +++ b/substrate/frame/contracts/fixtures/data/xcm_send.wat @@ -0,0 +1,59 @@ +;; This passes its input to `seal_xcm_send` and returns the return value to its caller. +(module + (import "seal0" "xcm_send" (func $xcm_send (param i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; 0x1000 = 4k in little endian + ;; size of input buffer + (data (i32.const 0) "\00\10") + + (func $assert (param i32) + (block $ok + (br_if $ok + (local.get 0) + ) + (unreachable) + ) + ) + + (func (export "call") + + ;; Receive the encoded call + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Size of the length buffer + ) + + ;; Input data layout. + ;; [0..4) - size of the call + ;; [4..7) - dest + ;; [7..) - message + + ;; Call xcm_send with provided input. + (call $assert + (i32.eq + (call $xcm_send + (i32.const 4) ;; Pointer where the dest is stored + (i32.const 7) ;; Pointer where the message is stored + (i32.sub + (i32.load (i32.const 0)) ;; length of the input buffer + (i32.const 3) ;; Size of the XCM dest + ) + (i32.const 100) ;; Pointer to the where the message_id is stored + ) + (i32.const 0) + ) + ) + + ;; Return the the message_id + (call $seal_return + (i32.const 0) ;; flags + (i32.const 100) ;; Pointer to returned value + (i32.const 32) ;; length of returned value + ) + ) + + (func (export "deploy")) +) diff --git a/substrate/frame/contracts/fixtures/src/lib.rs b/substrate/frame/contracts/fixtures/src/lib.rs index 32f4023e64461ee6fbd8e0911847b8426642fb5f..fbc2647709dce398abfd8395d7579ebfce1e4671 100644 --- a/substrate/frame/contracts/fixtures/src/lib.rs +++ b/substrate/frame/contracts/fixtures/src/lib.rs @@ -16,13 +16,15 @@ // limitations under the License. use sp_runtime::traits::Hash; -use std::{env::var, path::PathBuf}; +use std::{env::var, fs, path::PathBuf}; -fn fixtures_root_dir() -> PathBuf { +fn wat_root_dir() -> PathBuf { match (var("CARGO_MANIFEST_DIR"), var("CARGO_PKG_NAME")) { // When `CARGO_MANIFEST_DIR` is not set, Rust resolves relative paths from the root folder (Err(_), _) => "substrate/frame/contracts/fixtures/data".into(), (Ok(path), Ok(s)) if s == "pallet-contracts" => PathBuf::from(path).join("fixtures/data"), + (Ok(path), Ok(s)) if s == "pallet-contracts-mock-network" => + PathBuf::from(path).parent().unwrap().join("fixtures/data"), (Ok(_), pkg_name) => panic!("Failed to resolve fixture dir for tests from {pkg_name:?}."), } } @@ -31,12 +33,44 @@ fn fixtures_root_dir() -> PathBuf { /// with it's hash. /// /// The fixture files are located under the `fixtures/` directory. -pub fn compile_module(fixture_name: &str) -> wat::Result<(Vec, ::Output)> +fn legacy_compile_module( + fixture_name: &str, +) -> anyhow::Result<(Vec, ::Output)> where T: frame_system::Config, { - let fixture_path = fixtures_root_dir().join(format!("{fixture_name}.wat")); + let fixture_path = wat_root_dir().join(format!("{fixture_name}.wat")); let wasm_binary = wat::parse_file(fixture_path)?; let code_hash = T::Hashing::hash(&wasm_binary); Ok((wasm_binary, code_hash)) } + +/// Load a given wasm module and returns a wasm binary contents along with it's hash. +/// Use the legacy compile_module as fallback, if the rust fixture does not exist yet. +pub fn compile_module( + fixture_name: &str, +) -> anyhow::Result<(Vec, ::Output)> +where + T: frame_system::Config, +{ + let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let fixture_path = out_dir.join(format!("{fixture_name}.wasm")); + match fs::read(fixture_path) { + Ok(wasm_binary) => { + let code_hash = T::Hashing::hash(&wasm_binary); + Ok((wasm_binary, code_hash)) + }, + Err(_) => legacy_compile_module::(fixture_name), + } +} + +#[cfg(test)] +mod test { + #[test] + fn out_dir_should_have_compiled_mocks() { + let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let dummy_wasm = out_dir.join("dummy.wasm"); + println!("dummy_wasm: {:?}", dummy_wasm); + assert!(dummy_wasm.exists()); + } +} diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..7b570eed155c1b01b911ce82ca73291b9eab7dff --- /dev/null +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -0,0 +1,93 @@ +[package] +name = "pallet-contracts-mock-network" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +homepage = "https://substrate.io" +repository.workspace = true +description = "A mock network for testing pallet-contracts" + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } + +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-assets = { path = "../../assets" } +pallet-balances = { path = "../../balances" } +pallet-contracts = { path = ".." } +pallet-contracts-uapi = { path = "../uapi", default-features = false } +pallet-contracts-proc-macro = { path = "../proc-macro" } +pallet-insecure-randomness-collective-flip = { path = "../../insecure-randomness-collective-flip" } +pallet-message-queue = { path = "../../message-queue" } +pallet-proxy = { path = "../../proxy" } +pallet-timestamp = { path = "../../timestamp" } +pallet-utility = { path = "../../utility" } +pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-parachain-primitives = { path = "../../../../polkadot/parachain" } +polkadot-primitives = { path = "../../../../polkadot/primitives" } +polkadot-runtime-parachains = { path = "../../../../polkadot/runtime/parachains" } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +sp-api = { path = "../../../primitives/api", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-keystore = { path = "../../../primitives/keystore" } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-tracing = { path = "../../../primitives/tracing" } +xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../polkadot/xcm/xcm-builder" } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-simulator = { path = "../../../../polkadot/xcm/xcm-simulator" } + +[dev-dependencies] +assert_matches = "1" +pretty_assertions = "1" +pallet-contracts-fixtures = { path = "../fixtures" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "pallet-balances/std", + "pallet-contracts-proc-macro/full", + "pallet-contracts/std", + "pallet-insecure-randomness-collective-flip/std", + "pallet-proxy/std", + "pallet-timestamp/std", + "pallet-utility/std", + "pallet-xcm/std", + "scale-info/std", + "sp-api/std", + "sp-core/std", + "sp-io/std", + "sp-keystore/std", + "sp-runtime/std", + "sp-std/std", + "xcm-executor/std", + "xcm/std", +] + +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-contracts/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", + "polkadot-runtime-parachains/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] diff --git a/substrate/frame/contracts/mock-network/src/lib.rs b/substrate/frame/contracts/mock-network/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..345c69541b6f6f4c5b5c65e13c40a9af2d0ebcc5 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/lib.rs @@ -0,0 +1,151 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub mod mocks; +pub mod parachain; +pub mod primitives; +pub mod relay_chain; + +#[cfg(test)] +mod tests; + +use crate::primitives::{AccountId, UNITS}; +use sp_runtime::BuildStorage; +use xcm::latest::{prelude::*, MultiLocation}; +use xcm_executor::traits::ConvertLocation; +use xcm_simulator::{decl_test_network, decl_test_parachain, decl_test_relay_chain, TestExt}; + +// Accounts +pub const ADMIN: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([0u8; 32]); +pub const ALICE: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([1u8; 32]); +pub const BOB: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([2u8; 32]); + +// Balances +pub const INITIAL_BALANCE: u128 = 1_000_000_000 * UNITS; + +decl_test_parachain! { + pub struct ParaA { + Runtime = parachain::Runtime, + XcmpMessageHandler = parachain::MsgQueue, + DmpMessageHandler = parachain::MsgQueue, + new_ext = para_ext(1), + } +} + +decl_test_relay_chain! { + pub struct Relay { + Runtime = relay_chain::Runtime, + RuntimeCall = relay_chain::RuntimeCall, + RuntimeEvent = relay_chain::RuntimeEvent, + XcmConfig = relay_chain::XcmConfig, + MessageQueue = relay_chain::MessageQueue, + System = relay_chain::System, + new_ext = relay_ext(), + } +} + +decl_test_network! { + pub struct MockNet { + relay_chain = Relay, + parachains = vec![ + (1, ParaA), + ], + } +} + +pub fn relay_sovereign_account_id() -> AccountId { + let location: MultiLocation = (Parent,).into(); + parachain::SovereignAccountOf::convert_location(&location).unwrap() +} + +pub fn parachain_sovereign_account_id(para: u32) -> AccountId { + let location: MultiLocation = (Parachain(para),).into(); + relay_chain::SovereignAccountOf::convert_location(&location).unwrap() +} + +pub fn parachain_account_sovereign_account_id( + para: u32, + who: sp_runtime::AccountId32, +) -> AccountId { + let location: MultiLocation = ( + Parachain(para), + AccountId32 { network: Some(relay_chain::RelayNetwork::get()), id: who.into() }, + ) + .into(); + relay_chain::SovereignAccountOf::convert_location(&location).unwrap() +} + +pub fn para_ext(para_id: u32) -> sp_io::TestExternalities { + use parachain::{MsgQueue, Runtime, System}; + + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![ + (ALICE, INITIAL_BALANCE), + (relay_sovereign_account_id(), INITIAL_BALANCE), + (BOB, INITIAL_BALANCE), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + pallet_assets::GenesisConfig:: { + assets: vec![ + (0u128, ADMIN, false, 1u128), // Create derivative asset for relay's native token + ], + metadata: Default::default(), + accounts: vec![ + (0u128, ALICE, INITIAL_BALANCE), + (0u128, relay_sovereign_account_id(), INITIAL_BALANCE), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| { + sp_tracing::try_init_simple(); + System::set_block_number(1); + MsgQueue::set_para_id(para_id.into()); + }); + ext +} + +pub fn relay_ext() -> sp_io::TestExternalities { + use relay_chain::{Runtime, System}; + + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![ + (ALICE, INITIAL_BALANCE), + (parachain_sovereign_account_id(1), INITIAL_BALANCE), + (parachain_account_sovereign_account_id(1, ALICE), INITIAL_BALANCE), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| { + System::set_block_number(1); + }); + ext +} + +pub type ParachainPalletXcm = pallet_xcm::Pallet; +pub type ParachainBalances = pallet_balances::Pallet; diff --git a/substrate/frame/contracts/mock-network/src/mocks.rs b/substrate/frame/contracts/mock-network/src/mocks.rs new file mode 100644 index 0000000000000000000000000000000000000000..bf3baec7a524abc0a9c5eee8e637b4b9a9cf3e04 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/mocks.rs @@ -0,0 +1,18 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub mod msg_queue; +pub mod relay_message_queue; diff --git a/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..82fb8590e269083547fd124a9058816107dde811 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs @@ -0,0 +1,168 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Parachain runtime mock. + +use codec::{Decode, Encode}; + +use frame_support::weights::Weight; +use polkadot_parachain_primitives::primitives::{ + DmpMessageHandler, Id as ParaId, XcmpMessageFormat, XcmpMessageHandler, +}; +use polkadot_primitives::BlockNumber as RelayBlockNumber; +use sp_runtime::traits::{Get, Hash}; + +use sp_std::prelude::*; +use xcm::{latest::prelude::*, VersionedXcm}; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type XcmExecutor: ExecuteXcm; + } + + #[pallet::call] + impl Pallet {} + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::storage] + #[pallet::getter(fn parachain_id)] + pub(super) type ParachainId = StorageValue<_, ParaId, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn received_dmp)] + /// A queue of received DMP messages + pub(super) type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; + + impl Get for Pallet { + fn get() -> ParaId { + Self::parachain_id() + } + } + + pub type MessageId = [u8; 32]; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Some XCM was executed OK. + Success(Option), + /// Some XCM failed. + Fail(Option, XcmError), + /// Bad XCM version used. + BadVersion(Option), + /// Bad XCM format used. + BadFormat(Option), + + // DMP + /// Downward message is invalid XCM. + InvalidFormat(MessageId), + /// Downward message is unsupported version of XCM. + UnsupportedVersion(MessageId), + /// Downward message executed with the given outcome. + ExecutedDownward(MessageId, Outcome), + } + + impl Pallet { + pub fn set_para_id(para_id: ParaId) { + ParachainId::::put(para_id); + } + + fn handle_xcmp_message( + sender: ParaId, + _sent_at: RelayBlockNumber, + xcm: VersionedXcm, + max_weight: Weight, + ) -> Result { + let hash = Encode::using_encoded(&xcm, T::Hashing::hash); + let message_hash = Encode::using_encoded(&xcm, sp_io::hashing::blake2_256); + let (result, event) = match Xcm::::try_from(xcm) { + Ok(xcm) => { + let location = (Parent, Parachain(sender.into())); + match T::XcmExecutor::execute_xcm(location, xcm, message_hash, max_weight) { + Outcome::Error(e) => (Err(e), Event::Fail(Some(hash), e)), + Outcome::Complete(w) => (Ok(w), Event::Success(Some(hash))), + // As far as the caller is concerned, this was dispatched without error, so + // we just report the weight used. + Outcome::Incomplete(w, e) => (Ok(w), Event::Fail(Some(hash), e)), + } + }, + Err(()) => (Err(XcmError::UnhandledXcmVersion), Event::BadVersion(Some(hash))), + }; + Self::deposit_event(event); + result + } + } + + impl XcmpMessageHandler for Pallet { + fn handle_xcmp_messages<'a, I: Iterator>( + iter: I, + max_weight: Weight, + ) -> Weight { + for (sender, sent_at, data) in iter { + let mut data_ref = data; + let _ = XcmpMessageFormat::decode(&mut data_ref) + .expect("Simulator encodes with versioned xcm format; qed"); + + let mut remaining_fragments = data_ref; + while !remaining_fragments.is_empty() { + if let Ok(xcm) = + VersionedXcm::::decode(&mut remaining_fragments) + { + let _ = Self::handle_xcmp_message(sender, sent_at, xcm, max_weight); + } else { + debug_assert!(false, "Invalid incoming XCMP message data"); + } + } + } + max_weight + } + } + + impl DmpMessageHandler for Pallet { + fn handle_dmp_messages( + iter: impl Iterator)>, + limit: Weight, + ) -> Weight { + for (_i, (_sent_at, data)) in iter.enumerate() { + let id = sp_io::hashing::blake2_256(&data[..]); + let maybe_versioned = VersionedXcm::::decode(&mut &data[..]); + match maybe_versioned { + Err(_) => { + Self::deposit_event(Event::InvalidFormat(id)); + }, + Ok(versioned) => match Xcm::try_from(versioned) { + Err(()) => Self::deposit_event(Event::UnsupportedVersion(id)), + Ok(x) => { + let outcome = T::XcmExecutor::execute_xcm(Parent, x.clone(), id, limit); + >::append(x); + Self::deposit_event(Event::ExecutedDownward(id, outcome)); + }, + }, + } + } + limit + } + } +} diff --git a/substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs b/substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..14099965e3f173d7f06b86c08b722ceeded26418 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs @@ -0,0 +1,52 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame_support::{parameter_types, weights::Weight}; +use xcm::latest::prelude::*; +use xcm_simulator::{ + AggregateMessageOrigin, ProcessMessage, ProcessMessageError, UmpQueueId, WeightMeter, +}; + +use crate::relay_chain::{RuntimeCall, XcmConfig}; + +parameter_types! { + /// Amount of weight that can be spent per block to service messages. + pub MessageQueueServiceWeight: Weight = Weight::from_parts(1_000_000_000, 1_000_000); + pub const MessageQueueHeapSize: u32 = 65_536; + pub const MessageQueueMaxStale: u32 = 16; +} + +/// Message processor to handle any messages that were enqueued into the `MessageQueue` pallet. +pub struct MessageProcessor; +impl ProcessMessage for MessageProcessor { + type Origin = AggregateMessageOrigin; + + fn process_message( + message: &[u8], + origin: Self::Origin, + meter: &mut WeightMeter, + id: &mut [u8; 32], + ) -> Result { + let para = match origin { + AggregateMessageOrigin::Ump(UmpQueueId::Para(para)) => para, + }; + xcm_builder::ProcessXcmMessage::< + Junction, + xcm_executor::XcmExecutor, + RuntimeCall, + >::process_message(message, Junction::Parachain(para.into()), meter, id) + } +} diff --git a/substrate/frame/contracts/mock-network/src/parachain.rs b/substrate/frame/contracts/mock-network/src/parachain.rs new file mode 100644 index 0000000000000000000000000000000000000000..a79b7e4e2d6db7bce5d61e2a6b9d765bec16b2a7 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/parachain.rs @@ -0,0 +1,356 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Parachain runtime mock. + +mod contracts_config; +use crate::{ + mocks::msg_queue::pallet as mock_msg_queue, + primitives::{AccountId, AssetIdForAssets, Balance}, +}; +use core::marker::PhantomData; +use frame_support::{ + construct_runtime, derive_impl, parameter_types, + traits::{AsEnsureOriginWithArg, Contains, ContainsPair, Everything, EverythingBut, Nothing}, + weights::{ + constants::{WEIGHT_PROOF_SIZE_PER_MB, WEIGHT_REF_TIME_PER_SECOND}, + Weight, + }, +}; +use frame_system::{EnsureRoot, EnsureSigned}; +use pallet_xcm::XcmPassthrough; +use sp_core::{ConstU32, ConstU64, H256}; +use sp_runtime::traits::{Get, IdentityLookup, MaybeEquivalence}; + +use sp_std::prelude::*; +use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter as XcmCurrencyAdapter; +use xcm_builder::{ + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom, + ConvertedConcreteId, EnsureXcmOrigin, FixedRateOfFungible, FixedWeightBounds, FungiblesAdapter, + IsConcrete, NativeAsset, NoChecking, ParentAsSuperuser, ParentIsPreset, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, WithComputedOrigin, +}; +use xcm_executor::{traits::JustTry, Config, XcmExecutor}; + +pub type SovereignAccountOf = + (AccountId32Aliases, ParentIsPreset); + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Block = Block; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = Everything; + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +parameter_types! { + pub ExistentialDeposit: Balance = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + type AccountStore = System; + type Balance = Balance; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type FreezeIdentifier = (); + type MaxFreezes = ConstU32<0>; + type MaxHolds = ConstU32<1>; + type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type WeightInfo = (); +} + +parameter_types! { + pub const AssetDeposit: u128 = 1_000_000; + pub const MetadataDepositBase: u128 = 1_000_000; + pub const MetadataDepositPerByte: u128 = 100_000; + pub const AssetAccountDeposit: u128 = 1_000_000; + pub const ApprovalDeposit: u128 = 1_000_000; + pub const AssetsStringLimit: u32 = 50; + pub const RemoveItemsLimit: u32 = 50; +} + +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = AssetIdForAssets; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = EnsureRoot; + type AssetDeposit = AssetDeposit; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type AssetAccountDeposit = AssetAccountDeposit; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = AssetsStringLimit; + type Freezer = (); + type Extra = (); + type WeightInfo = (); + type RemoveItemsLimit = RemoveItemsLimit; + type AssetIdParameter = AssetIdForAssets; + type CallbackHandle = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); + pub const ReservedDmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); +} + +parameter_types! { + pub const KsmLocation: MultiLocation = MultiLocation::parent(); + pub const TokenLocation: MultiLocation = Here.into_location(); + pub const RelayNetwork: NetworkId = ByGenesis([0; 32]); + pub UniversalLocation: InteriorMultiLocation = Parachain(MsgQueue::parachain_id().into()).into(); +} + +pub type XcmOriginToCallOrigin = ( + SovereignSignedViaLocation, + ParentAsSuperuser, + SignedAccountId32AsNative, + XcmPassthrough, +); + +parameter_types! { + pub const XcmInstructionWeight: Weight = Weight::from_parts(1_000, 1_000); + pub TokensPerSecondPerMegabyte: (AssetId, u128, u128) = (Concrete(Parent.into()), 1_000_000_000_000, 1024 * 1024); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; + pub ForeignPrefix: MultiLocation = (Parent,).into(); + pub CheckingAccount: AccountId = PolkadotXcm::check_account(); + pub TrustedLockPairs: (MultiLocation, MultiAssetFilter) = + (Parent.into(), Wild(AllOf { id: Concrete(Parent.into()), fun: WildFungible })); +} + +pub fn estimate_message_fee(number_of_instructions: u64) -> u128 { + let weight = estimate_weight(number_of_instructions); + + estimate_fee_for_weight(weight) +} + +pub fn estimate_weight(number_of_instructions: u64) -> Weight { + XcmInstructionWeight::get().saturating_mul(number_of_instructions) +} + +pub fn estimate_fee_for_weight(weight: Weight) -> u128 { + let (_, units_per_second, units_per_mb) = TokensPerSecondPerMegabyte::get(); + + units_per_second * (weight.ref_time() as u128) / (WEIGHT_REF_TIME_PER_SECOND as u128) + + units_per_mb * (weight.proof_size() as u128) / (WEIGHT_PROOF_SIZE_PER_MB as u128) +} + +#[allow(deprecated)] +pub type LocalBalancesTransactor = + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; + +pub struct FromMultiLocationToAsset(PhantomData<(MultiLocation, AssetId)>); +impl MaybeEquivalence + for FromMultiLocationToAsset +{ + fn convert(value: &MultiLocation) -> Option { + match *value { + MultiLocation { parents: 1, interior: Here } => Some(0 as AssetIdForAssets), + MultiLocation { parents: 1, interior: X1(Parachain(para_id)) } => + Some(para_id as AssetIdForAssets), + _ => None, + } + } + + fn convert_back(_id: &AssetIdForAssets) -> Option { + None + } +} + +pub type ForeignAssetsTransactor = FungiblesAdapter< + Assets, + ConvertedConcreteId< + AssetIdForAssets, + Balance, + FromMultiLocationToAsset, + JustTry, + >, + SovereignAccountOf, + AccountId, + NoChecking, + CheckingAccount, +>; + +/// Means for transacting assets on this chain +pub type AssetTransactors = (LocalBalancesTransactor, ForeignAssetsTransactor); + +pub struct ParentRelay; +impl Contains for ParentRelay { + fn contains(location: &MultiLocation) -> bool { + location.contains_parents_only(1) + } +} +pub struct ThisParachain; +impl Contains for ThisParachain { + fn contains(location: &MultiLocation) -> bool { + matches!( + location, + MultiLocation { parents: 0, interior: Junctions::X1(Junction::AccountId32 { .. }) } + ) + } +} + +pub type XcmRouter = crate::ParachainXcmRouter; + +pub type Barrier = ( + xcm_builder::AllowUnpaidExecutionFrom, + WithComputedOrigin< + (AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom), + UniversalLocation, + ConstU32<1>, + >, +); + +parameter_types! { + pub NftCollectionOne: MultiAssetFilter + = Wild(AllOf { fun: WildNonFungible, id: Concrete((Parent, GeneralIndex(1)).into()) }); + pub NftCollectionOneForRelay: (MultiAssetFilter, MultiLocation) + = (NftCollectionOne::get(), Parent.into()); + pub RelayNativeAsset: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete((Parent, Here).into()) }); + pub RelayNativeAssetForRelay: (MultiAssetFilter, MultiLocation) = (RelayNativeAsset::get(), Parent.into()); +} +pub type TrustedTeleporters = + (xcm_builder::Case, xcm_builder::Case); +pub type TrustedReserves = EverythingBut>; + +pub struct XcmConfig; +impl Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; + type OriginConverter = XcmOriginToCallOrigin; + type IsReserve = (NativeAsset, TrustedReserves); + type IsTeleporter = TrustedTeleporters; + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = FixedRateOfFungible; + type ResponseHandler = PolkadotXcm; + type AssetTrap = PolkadotXcm; + type AssetLocker = PolkadotXcm; + type AssetExchanger = (); + type AssetClaims = PolkadotXcm; + type SubscriptionService = PolkadotXcm; + type PalletInstancesInfo = AllPalletsWithSystem; + type FeeManager = (); + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; +} + +impl mock_msg_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +pub struct TrustedLockerCase(PhantomData); +impl> ContainsPair + for TrustedLockerCase +{ + fn contains(origin: &MultiLocation, asset: &MultiAsset) -> bool { + let (o, a) = T::get(); + a.matches(asset) && &o == origin + } +} + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Nothing; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + type TrustedLockers = TrustedLockerCase; + type SovereignAccountOf = SovereignAccountOf; + type MaxLockers = ConstU32<8>; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + type WeightInfo = pallet_xcm::TestWeightInfo; + type AdminOrigin = EnsureRoot; +} + +type Block = frame_system::mocking::MockBlock; + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<1>; + type WeightInfo = (); +} + +construct_runtime!( + pub enum Runtime + { + System: frame_system, + Balances: pallet_balances, + Timestamp: pallet_timestamp, + MsgQueue: mock_msg_queue, + PolkadotXcm: pallet_xcm, + Contracts: pallet_contracts, + Assets: pallet_assets, + } +); diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..dadba394e26453366d0b90ce8ff18931ab00743f --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs @@ -0,0 +1,98 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::{Balances, Runtime, RuntimeCall, RuntimeEvent}; +use crate::{ + parachain, + parachain::RuntimeHoldReason, + primitives::{Balance, CENTS}, +}; +use frame_support::{ + parameter_types, + traits::{ConstBool, ConstU32, Contains, Randomness}, + weights::Weight, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use pallet_xcm::BalanceOf; +use sp_runtime::{traits::Convert, Perbill}; + +pub const fn deposit(items: u32, bytes: u32) -> Balance { + items as Balance * 1 * CENTS + (bytes as Balance) * 1 * CENTS +} + +parameter_types! { + pub const DepositPerItem: Balance = deposit(1, 0); + pub const DepositPerByte: Balance = deposit(0, 1); + pub const DefaultDepositLimit: Balance = deposit(1024, 1024 * 1024); + pub Schedule: pallet_contracts::Schedule = Default::default(); + pub const CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(0); + pub const MaxDelegateDependencies: u32 = 32; +} + +pub struct DummyRandomness(sp_std::marker::PhantomData); + +impl Randomness> for DummyRandomness { + fn random(_subject: &[u8]) -> (T::Hash, BlockNumberFor) { + (Default::default(), Default::default()) + } +} + +impl Convert> for Runtime { + fn convert(w: Weight) -> BalanceOf { + w.ref_time().into() + } +} + +#[derive(Clone, Default)] +pub struct Filters; + +impl Contains for Filters { + fn contains(call: &RuntimeCall) -> bool { + match call { + parachain::RuntimeCall::Contracts(_) => true, + _ => false, + } + } +} + +impl pallet_contracts::Config for Runtime { + type AddressGenerator = pallet_contracts::DefaultAddressGenerator; + type CallFilter = Filters; + type CallStack = [pallet_contracts::Frame; 5]; + type ChainExtension = (); + type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; + type Currency = Balances; + type DefaultDepositLimit = DefaultDepositLimit; + type DepositPerByte = DepositPerByte; + type DepositPerItem = DepositPerItem; + type MaxCodeLen = ConstU32<{ 123 * 1024 }>; + type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; + type MaxDelegateDependencies = MaxDelegateDependencies; + type MaxStorageKeyLen = ConstU32<128>; + type Migrations = (); + type Randomness = DummyRandomness; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = RuntimeHoldReason; + type Schedule = Schedule; + type Time = super::Timestamp; + type UnsafeUnstableInterface = ConstBool; + type WeightInfo = (); + type WeightPrice = Self; + type Debug = (); + type Environment = (); + type Xcm = pallet_xcm::Pallet; +} diff --git a/substrate/frame/contracts/mock-network/src/primitives.rs b/substrate/frame/contracts/mock-network/src/primitives.rs new file mode 100644 index 0000000000000000000000000000000000000000..efc42772f88ade5868492964b1b383c26ee689ac --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/primitives.rs @@ -0,0 +1,23 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub type Balance = u128; + +pub const UNITS: Balance = 10_000_000_000; +pub const CENTS: Balance = UNITS / 100; // 100_000_000 + +pub type AccountId = sp_runtime::AccountId32; +pub type AssetIdForAssets = u128; diff --git a/substrate/frame/contracts/mock-network/src/relay_chain.rs b/substrate/frame/contracts/mock-network/src/relay_chain.rs new file mode 100644 index 0000000000000000000000000000000000000000..136cc2e3ed671b9d72830da955bee8701739c555 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/relay_chain.rs @@ -0,0 +1,240 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Relay chain runtime mock. + +use frame_support::{ + construct_runtime, derive_impl, parameter_types, + traits::{Contains, Everything, Nothing}, + weights::Weight, +}; + +use frame_system::EnsureRoot; +use sp_core::{ConstU32, H256}; +use sp_runtime::traits::IdentityLookup; + +use polkadot_parachain_primitives::primitives::Id as ParaId; +use polkadot_runtime_parachains::{configuration, origin, shared}; +use xcm::latest::prelude::*; +#[allow(deprecated)] +use xcm_builder::CurrencyAdapter as XcmCurrencyAdapter; +use xcm_builder::{ + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, + ChildSystemParachainAsSuperuser, DescribeAllTerminal, DescribeFamily, FixedRateOfFungible, + FixedWeightBounds, HashedDescription, IsConcrete, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, WithComputedOrigin, +}; +use xcm_executor::{Config, XcmExecutor}; + +use super::{ + mocks::relay_message_queue::*, + primitives::{AccountId, Balance}, +}; + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Block = Block; + type Nonce = u64; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = Everything; + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +parameter_types! { + pub ExistentialDeposit: Balance = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = MaxLocks; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxHolds = ConstU32<0>; + type MaxFreezes = ConstU32<0>; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; +} + +impl shared::Config for Runtime {} + +impl configuration::Config for Runtime { + type WeightInfo = configuration::TestWeightInfo; +} + +parameter_types! { + pub RelayNetwork: NetworkId = ByGenesis([0; 32]); + pub const TokenLocation: MultiLocation = Here.into_location(); + pub UniversalLocation: InteriorMultiLocation = Here; + pub UnitWeightCost: u64 = 1_000; +} + +pub type SovereignAccountOf = ( + HashedDescription>, + AccountId32Aliases, + ChildParachainConvertsVia, +); + +#[allow(deprecated)] +pub type LocalBalancesTransactor = + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; + +pub type AssetTransactors = LocalBalancesTransactor; + +type LocalOriginConverter = ( + SovereignSignedViaLocation, + ChildParachainAsNative, + SignedAccountId32AsNative, + ChildSystemParachainAsSuperuser, +); + +parameter_types! { + pub const XcmInstructionWeight: Weight = Weight::from_parts(1_000, 1_000); + pub TokensPerSecondPerMegabyte: (AssetId, u128, u128) = + (Concrete(TokenLocation::get()), 1_000_000_000_000, 1024 * 1024); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; +} + +pub struct ChildrenParachains; +impl Contains for ChildrenParachains { + fn contains(location: &MultiLocation) -> bool { + matches!(location, MultiLocation { parents: 0, interior: X1(Parachain(_)) }) + } +} + +pub type XcmRouter = crate::RelayChainXcmRouter; +pub type Barrier = WithComputedOrigin< + ( + AllowExplicitUnpaidExecutionFrom, + AllowTopLevelPaidExecutionFrom, + AllowSubscriptionsFrom, + ), + UniversalLocation, + ConstU32<1>, +>; + +pub struct XcmConfig; +impl Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; + type OriginConverter = LocalOriginConverter; + type IsReserve = (); + type IsTeleporter = (); + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = FixedRateOfFungible; + type ResponseHandler = XcmPallet; + type AssetTrap = XcmPallet; + type AssetLocker = XcmPallet; + type AssetExchanger = (); + type AssetClaims = XcmPallet; + type SubscriptionService = XcmPallet; + type PalletInstancesInfo = AllPalletsWithSystem; + type FeeManager = (); + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + type TrustedLockers = (); + type SovereignAccountOf = SovereignAccountOf; + type MaxLockers = ConstU32<8>; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + type WeightInfo = pallet_xcm::TestWeightInfo; + type AdminOrigin = EnsureRoot; +} + +impl origin::Config for Runtime {} + +type Block = frame_system::mocking::MockBlock; + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Size = u32; + type HeapSize = MessageQueueHeapSize; + type MaxStale = MessageQueueMaxStale; + type ServiceWeight = MessageQueueServiceWeight; + type MessageProcessor = MessageProcessor; + type QueueChangeHandler = (); + type WeightInfo = (); + type QueuePausedQuery = (); +} + +construct_runtime!( + pub enum Runtime { + System: frame_system::{Pallet, Call, Storage, Config, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + ParasOrigin: origin::{Pallet, Origin}, + XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin}, + MessageQueue: pallet_message_queue::{Pallet, Event}, + } +); diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..a66b2b0801961d06bf8c4a413135a8759a6b34d6 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -0,0 +1,238 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + parachain::{self, Runtime}, + parachain_account_sovereign_account_id, + primitives::{AccountId, CENTS}, + relay_chain, MockNet, ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE, +}; +use assert_matches::assert_matches; +use codec::{Decode, Encode}; +use frame_support::{ + assert_err, + pallet_prelude::Weight, + traits::{fungibles::Mutate, Currency}, +}; +use pallet_balances::{BalanceLock, Reasons}; +use pallet_contracts::{Code, CollectEvents, DebugInfo, Determinism}; +use pallet_contracts_fixtures::compile_module; +use xcm::{v3::prelude::*, VersionedMultiLocation, VersionedXcm}; +use xcm_simulator::TestExt; + +type ParachainContracts = pallet_contracts::Pallet; + +/// Instantiate the tests contract, and fund it with some balance and assets. +fn instantiate_test_contract(name: &str) -> AccountId { + let (wasm, _) = compile_module::(name).unwrap(); + + // Instantiate contract. + let contract_addr = ParaA::execute_with(|| { + ParachainContracts::bare_instantiate( + ALICE, + 0, + Weight::MAX, + None, + Code::Upload(wasm), + vec![], + vec![], + DebugInfo::UnsafeDebug, + CollectEvents::Skip, + ) + .result + .unwrap() + .account_id + }); + + // Funds contract account with some balance and assets. + ParaA::execute_with(|| { + parachain::Balances::make_free_balance_be(&contract_addr, INITIAL_BALANCE); + parachain::Assets::mint_into(0u32.into(), &contract_addr, INITIAL_BALANCE).unwrap(); + }); + Relay::execute_with(|| { + let sovereign_account = parachain_account_sovereign_account_id(1u32, contract_addr.clone()); + relay_chain::Balances::make_free_balance_be(&sovereign_account, INITIAL_BALANCE); + }); + + contract_addr +} + +#[test] +fn test_xcm_execute() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + // Execute XCM instructions through the contract. + ParaA::execute_with(|| { + let amount: u128 = 10 * CENTS; + + // The XCM used to transfer funds to Bob. + let message: xcm_simulator::Xcm<()> = Xcm(vec![ + WithdrawAsset(vec![(Here, amount).into()].into()), + DepositAsset { + assets: All.into(), + beneficiary: AccountId32 { network: None, id: BOB.clone().into() }.into(), + }, + ]); + + let result = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + VersionedXcm::V3(message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ) + .result + .unwrap(); + + let mut data = &result.data[..]; + let outcome = Outcome::decode(&mut data).expect("Failed to decode xcm_execute Outcome"); + assert_matches!(outcome, Outcome::Complete(_)); + + // Check if the funds are subtracted from the account of Alice and added to the account of + // Bob. + let initial = INITIAL_BALANCE; + assert_eq!(parachain::Assets::balance(0, contract_addr), initial); + assert_eq!(ParachainBalances::free_balance(BOB), initial + amount); + }); +} + +#[test] +fn test_xcm_execute_filtered_call() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + ParaA::execute_with(|| { + // `remark` should be rejected, as it is not allowed by our CallFilter. + let call = parachain::RuntimeCall::System(frame_system::Call::remark { remark: vec![] }); + let message: Xcm = Xcm(vec![Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: Weight::MAX, + call: call.encode().into(), + }]); + + let result = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + VersionedXcm::V3(message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ); + + assert_err!(result.result, frame_system::Error::::CallFiltered); + }); +} + +#[test] +fn test_xcm_execute_reentrant_call() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + ParaA::execute_with(|| { + let transact_call = parachain::RuntimeCall::Contracts(pallet_contracts::Call::call { + dest: contract_addr.clone(), + gas_limit: 1_000_000.into(), + storage_deposit_limit: None, + data: vec![], + value: 0u128, + }); + + // The XCM used to transfer funds to Bob. + let message: Xcm = Xcm(vec![ + Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: 1_000_000_000.into(), + call: transact_call.encode().into(), + }, + ExpectTransactStatus(MaybeErrorCode::Success), + ]); + + let result = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + VersionedXcm::V3(message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ) + .result + .unwrap(); + + let mut data = &result.data[..]; + let outcome = Outcome::decode(&mut data).expect("Failed to decode xcm_execute Outcome"); + assert_matches!(outcome, Outcome::Incomplete(_, XcmError::ExpectationFalse)); + + // Funds should not change hands as the XCM transact failed. + assert_eq!(ParachainBalances::free_balance(BOB), INITIAL_BALANCE); + }); +} + +#[test] +fn test_xcm_send() { + MockNet::reset(); + let contract_addr = instantiate_test_contract("xcm_send"); + let fee = parachain::estimate_message_fee(4); // Accounts for the `DescendOrigin` instruction added by `send_xcm` + + // Send XCM instructions through the contract, to lock some funds on the relay chain. + ParaA::execute_with(|| { + let dest = MultiLocation::from(Parent); + let dest = VersionedMultiLocation::V3(dest); + + let message: xcm_simulator::Xcm<()> = Xcm(vec![ + WithdrawAsset((Here, fee).into()), + BuyExecution { fees: (Here, fee).into(), weight_limit: WeightLimit::Unlimited }, + LockAsset { asset: (Here, 5 * CENTS).into(), unlocker: (Parachain(1)).into() }, + ]); + let message = VersionedXcm::V3(message); + let exec = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + (dest, message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ); + + let mut data = &exec.result.unwrap().data[..]; + XcmHash::decode(&mut data).expect("Failed to decode xcm_send message_id"); + }); + + Relay::execute_with(|| { + // Check if the funds are locked on the relay chain. + assert_eq!( + relay_chain::Balances::locks(¶chain_account_sovereign_account_id(1, contract_addr)), + vec![BalanceLock { id: *b"py/xcmlk", amount: 5 * CENTS, reasons: Reasons::All }] + ); + }); +} diff --git a/substrate/frame/contracts/primitives/Cargo.toml b/substrate/frame/contracts/primitives/Cargo.toml index 0394841aa1f417f81f566eed052d831f2bec5fcf..d1db766ce8139e0640b9fe4606b6a8d973b99474 100644 --- a/substrate/frame/contracts/primitives/Cargo.toml +++ b/substrate/frame/contracts/primitives/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "A crate that hosts a common definitions that are relevant for the pallet-contracts." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,12 +21,12 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } # Substrate Dependencies (This crate should not rely on frame) -sp-std = { path = "../../../primitives/std", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-weights = { path = "../../../primitives/weights", default-features = false} +sp-std = { path = "../../../primitives/std", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-weights = { path = "../../../primitives/weights", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", diff --git a/substrate/frame/contracts/primitives/README.md b/substrate/frame/contracts/primitives/README.md deleted file mode 100644 index c84cfbfe1a87b4fe2ca6da57d9e9fef626704b57..0000000000000000000000000000000000000000 --- a/substrate/frame/contracts/primitives/README.md +++ /dev/null @@ -1,3 +0,0 @@ -A crate that hosts a common definitions that are relevant for the pallet-contracts. - -License: Apache-2.0 diff --git a/substrate/frame/contracts/proc-macro/Cargo.toml b/substrate/frame/contracts/proc-macro/Cargo.toml index 3ada9e0c23dd9093aa1bb0f53b3211d46ac2eb46..972b23c373eb67e25bcaa22c387fe43bbf090803 100644 --- a/substrate/frame/contracts/proc-macro/Cargo.toml +++ b/substrate/frame/contracts/proc-macro/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "Procedural macros used in pallet_contracts" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,7 +20,7 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.38", features = ["full"] } +syn = { version = "2.0.41", features = ["full"] } [dev-dependencies] diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs index ad9cd2dadecf93b1514fdb948509ba5318e973d9..9dc34d5223b27307587bbcbf797846c7e6844fd0 100644 --- a/substrate/frame/contracts/proc-macro/src/lib.rs +++ b/substrate/frame/contracts/proc-macro/src/lib.rs @@ -74,7 +74,7 @@ fn derive_debug(input: TokenStream, fmt: impl Fn(&Ident) -> TokenStream2) -> Tok #[cfg(not(feature = "full"))] let fields = { drop(fmt); - drop(data); + let _ = data; TokenStream2::new() }; @@ -271,7 +271,7 @@ impl HostFn { // process return type let msg = r#"Should return one of the following: - Result<(), TrapReason>, - - Result, + - Result, - Result, - Result"#; let ret_ty = match item.clone().sig.output { @@ -336,7 +336,7 @@ impl HostFn { "()" => Ok(HostFnReturn::Unit), "u32" => Ok(HostFnReturn::U32), "u64" => Ok(HostFnReturn::U64), - "ReturnCode" => Ok(HostFnReturn::ReturnCode), + "ReturnErrorCode" => Ok(HostFnReturn::ReturnCode), _ => Err(err(arg1.span(), &msg)), }?; @@ -550,7 +550,7 @@ fn expand_env(def: &EnvDef, docs: bool) -> TokenStream2 { /// consumed by humans through rustdoc. #[cfg(doc)] pub mod api_doc { - use super::{TrapReason, ReturnCode}; + use super::{TrapReason, ReturnErrorCode}; #docs } } @@ -767,7 +767,7 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) /// #[define_env] /// pub mod some_env { /// #[version(2)] -/// fn foo(ctx: _, memory: _, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { +/// fn foo(ctx: _, memory: _, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { /// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) /// } /// @@ -793,7 +793,7 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) /// pub mod some_env { /// #[version(1)] /// #[prefixed_alias] -/// fn foo(ctx: _, memory: _, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { +/// fn foo(ctx: _, memory: _, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { /// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) /// } /// @@ -811,7 +811,7 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) /// /// Only following return types are allowed for the host functions defined with the macro: /// - `Result<(), TrapReason>`, -/// - `Result`, +/// - `Result`, /// - `Result`. /// /// The macro expands to `pub struct Env` declaration, with the following traits implementations: diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index ac5787e23404192be6699fea9c19117da764c749..3ab76d6112a27e4fbe5a377549ee7e3830ad5d3c 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -33,7 +33,6 @@ use crate::{ migration::{ codegen::LATEST_MIGRATION_VERSION, v09, v10, v11, v12, v13, v14, v15, MigrationStep, }, - wasm::CallFlags, Pallet as Contracts, *, }; use codec::{Encode, MaxEncodedLen}; @@ -46,6 +45,7 @@ use frame_support::{ }; use frame_system::RawOrigin; use pallet_balances; +use pallet_contracts_uapi::CallFlags; use sp_runtime::traits::{Bounded, Hash}; use sp_std::prelude::*; use wasm_instrument::parity_wasm::elements::{BlockType, Instruction, ValueType}; @@ -1749,7 +1749,7 @@ benchmarks! { .collect::>>(); let deposits_bytes: Vec = deposits.iter().flat_map(|i| i.encode()).collect(); let deposits_len = deposits_bytes.len() as u32; - let deposit_len = value_len.clone(); + let deposit_len = value_len; let callee_offset = value_len + deposits_len; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), @@ -2246,13 +2246,12 @@ benchmarks! { let message_len = message.len() as i32; let key_type = sp_core::crypto::KeyTypeId(*b"code"); let sig_params = (0..r) - .map(|i| { + .flat_map(|i| { let pub_key = sp_io::crypto::sr25519_generate(key_type, None); let sig = sp_io::crypto::sr25519_sign(key_type, &pub_key, &message).expect("Generates signature"); let data: [u8; 96] = [AsRef::<[u8]>::as_ref(&sig), AsRef::<[u8]>::as_ref(&pub_key)].concat().try_into().unwrap(); data }) - .flatten() .collect::>(); let sig_params_len = sig_params.len() as i32; diff --git a/substrate/frame/contracts/src/chain_extension.rs b/substrate/frame/contracts/src/chain_extension.rs index 664504d207f3af75641abcb3684410b496521db8..8a7243d6bb371192a92cd7e7b51b868728f7ee67 100644 --- a/substrate/frame/contracts/src/chain_extension.rs +++ b/substrate/frame/contracts/src/chain_extension.rs @@ -81,7 +81,7 @@ use sp_std::{marker::PhantomData, vec::Vec}; pub use crate::{exec::Ext, gas::ChargedAmount, storage::meter::Diff, Config}; pub use frame_system::Config as SysConfig; -pub use pallet_contracts_primitives::ReturnFlags; +pub use pallet_contracts_uapi::ReturnFlags; /// Result that returns a [`DispatchError`] on error. pub type Result = sp_std::result::Result; diff --git a/substrate/frame/contracts/src/debug.rs b/substrate/frame/contracts/src/debug.rs index e22a841e6fb7f73a87c0afbd55fc24bc25129c57..6cdca7aa4c76d3e13882cd3c952dcc45750204ec 100644 --- a/substrate/frame/contracts/src/debug.rs +++ b/substrate/frame/contracts/src/debug.rs @@ -15,9 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub use crate::exec::{ExecResult, ExportedFunction}; +pub use crate::{ + exec::{ExecResult, ExportedFunction}, + primitives::ExecReturnValue, +}; use crate::{Config, LOG_TARGET}; -pub use pallet_contracts_primitives::ExecReturnValue; /// Umbrella trait for all interfaces that serves for debugging. pub trait Debugger: Tracing + CallInterceptor {} diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index c26d82f7f110c08e7853acd494774822b9d2ae60..2183d6b96cc5d4f84cbfa6777e0c1b0bbeec4f86 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -18,6 +18,7 @@ use crate::{ debug::{CallInterceptor, CallSpan, Tracing}, gas::GasMeter, + primitives::{ExecReturnValue, StorageDeposit}, storage::{self, meter::Diff, WriteOutcome}, BalanceOf, CodeHash, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, DebugBufferVec, Determinism, Error, Event, Nonce, Origin, Pallet as Contracts, Schedule, @@ -37,7 +38,6 @@ use frame_support::{ Blake2_128Concat, BoundedVec, StorageHasher, }; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; -use pallet_contracts_primitives::{ExecReturnValue, StorageDeposit}; use smallvec::{Array, SmallVec}; use sp_core::{ ecdsa::Public as ECDSAPublic, @@ -1618,7 +1618,7 @@ mod tests { use codec::{Decode, Encode}; use frame_support::{assert_err, assert_ok, parameter_types}; use frame_system::{EventRecord, Phase}; - use pallet_contracts_primitives::ReturnFlags; + use pallet_contracts_uapi::ReturnFlags; use pretty_assertions::assert_eq; use sp_runtime::{traits::Hash, DispatchError}; use std::{cell::RefCell, collections::hash_map::HashMap, rc::Rc}; diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 7d516fbe2496cbe5e0ad67cad4e46cae7d9ba447..a15006e6388c0e35818c5635dbfa1c9b785fa81f 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -91,6 +91,9 @@ mod address; mod benchmarking; mod exec; mod gas; +mod primitives; +pub use primitives::*; + mod schedule; mod storage; mod wasm; @@ -128,11 +131,6 @@ use frame_system::{ pallet_prelude::{BlockNumberFor, OriginFor}, EventRecord, Pallet as System, }; -use pallet_contracts_primitives::{ - Code, CodeUploadResult, CodeUploadReturnValue, ContractAccessError, ContractExecResult, - ContractInstantiateResult, ContractResult, ExecReturnValue, GetStorageResult, - InstantiateReturnValue, StorageDeposit, -}; use scale_info::TypeInfo; use smallvec::Array; use sp_runtime::{ @@ -403,6 +401,14 @@ pub mod pallet { /// its type appears in the metadata. Only valid value is `()`. #[pallet::constant] type Environment: Get>; + + /// A type that exposes XCM APIs, allowing contracts to interact with other parachains, and + /// execute XCM programs. + type Xcm: xcm_builder::Controller< + OriginFor, + ::RuntimeCall, + BlockNumberFor, + >; } #[pallet::hooks] @@ -1004,6 +1010,8 @@ pub mod pallet { /// in this error. Note that this usually shouldn't happen as deploying such contracts /// is rejected. NoChainExtension, + /// Failed to decode the XCM program. + XCMDecodeFailed, /// A contract with the same AccountId already exists. DuplicateContract, /// A contract self destructed in its constructor. diff --git a/substrate/frame/contracts/src/migration/v10.rs b/substrate/frame/contracts/src/migration/v10.rs index f02e28f6fde325b26a041c683e03671a8f876897..22fad38739e751c9b68dfb3151751e08331f5218 100644 --- a/substrate/frame/contracts/src/migration/v10.rs +++ b/substrate/frame/contracts/src/migration/v10.rs @@ -219,7 +219,7 @@ where "Failed to transfer the base deposit, reason: {:?}", err ); - OldCurrency::deposit_creating(&deposit_account, min_balance); + let _ = OldCurrency::deposit_creating(&deposit_account, min_balance); min_balance }); diff --git a/substrate/frame/contracts/src/migration/v12.rs b/substrate/frame/contracts/src/migration/v12.rs index 4ddc57584b30eb74c0ed18f7da2989d3db2ff53c..7dee31503101ba753b0e807d11621be711d4b58b 100644 --- a/substrate/frame/contracts/src/migration/v12.rs +++ b/substrate/frame/contracts/src/migration/v12.rs @@ -317,7 +317,7 @@ where let (_, old_deposit, storage_module) = state; // CodeInfoOf::max_encoded_len == OwnerInfoOf::max_encoded_len + 1 // I.e. code info adds up 1 byte per record. - let info_bytes_added = items.clone(); + let info_bytes_added = items; // We removed 1 PrefabWasmModule, and added 1 byte of determinism flag, per contract code. let storage_removed = storage_module.saturating_sub(info_bytes_added); // module+code+info - bytes diff --git a/substrate/frame/contracts/primitives/src/lib.rs b/substrate/frame/contracts/src/primitives.rs similarity index 96% rename from substrate/frame/contracts/primitives/src/lib.rs rename to substrate/frame/contracts/src/primitives.rs index c33149285004b38ad4beee7aac330c9d7421324b..ab73b28e8c49ffc0076d0b0792b5750e41be74f3 100644 --- a/substrate/frame/contracts/primitives/src/lib.rs +++ b/substrate/frame/contracts/src/primitives.rs @@ -17,17 +17,15 @@ //! A crate that hosts a common definitions that are relevant for the pallet-contracts. -#![cfg_attr(not(feature = "std"), no_std)] - -use bitflags::bitflags; use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::weights::Weight; +use pallet_contracts_uapi::ReturnFlags; use scale_info::TypeInfo; use sp_runtime::{ traits::{Saturating, Zero}, DispatchError, RuntimeDebug, }; use sp_std::prelude::*; -use sp_weights::Weight; /// Result type of a `bare_call` or `bare_instantiate` call as well as `ContractsApi::call` and /// `ContractsApi::instantiate`. @@ -109,15 +107,6 @@ pub enum ContractAccessError { MigrationInProgress, } -bitflags! { - /// Flags used by a contract to customize exit behaviour. - #[derive(Encode, Decode, TypeInfo)] - pub struct ReturnFlags: u32 { - /// If this bit is set all changes made by the contract execution are rolled back. - const REVERT = 0x0000_0001; - } -} - /// Output of a contract call or instantiation which ran to completion. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct ExecReturnValue { diff --git a/substrate/frame/contracts/src/storage/meter.rs b/substrate/frame/contracts/src/storage/meter.rs index 9f098090bc8bb1d4f42339fb619d38b909501d8a..495cbd90db5a2424e3a5303d7fce9ded7750314e 100644 --- a/substrate/frame/contracts/src/storage/meter.rs +++ b/substrate/frame/contracts/src/storage/meter.rs @@ -33,9 +33,8 @@ use frame_support::{ }, DefaultNoBound, RuntimeDebugNoBound, }; -use sp_api::HashT; use sp_runtime::{ - traits::{Saturating, Zero}, + traits::{Hash as HashT, Saturating, Zero}, DispatchError, FixedPointNumber, FixedU128, }; use sp_std::{fmt::Debug, marker::PhantomData, vec, vec::Vec}; diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index e7784b02b74c7d5661c7854f8205a1270af8725d..4f63104ef268b4dd7f7a882907edab3e0d2634f4 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -30,9 +30,10 @@ use crate::{ }, exec::{Frame, Key}, migration::codegen::LATEST_MIGRATION_VERSION, + primitives::CodeUploadReturnValue, storage::DeletionQueueManager, tests::test_utils::{get_contract, get_contract_checked}, - wasm::{Determinism, ReturnCode as RuntimeReturnCode}, + wasm::{Determinism, ReturnErrorCode as RuntimeReturnCode}, weights::WeightInfo, BalanceOf, Code, CodeHash, CodeInfoOf, CollectEvents, Config, ContractInfo, ContractInfoOf, DebugInfo, DefaultAddressGenerator, DeletionQueueCounter, Error, HoldReason, @@ -42,6 +43,7 @@ use assert_matches::assert_matches; use codec::Encode; use frame_support::{ assert_err, assert_err_ignore_postinfo, assert_err_with_weight, assert_noop, assert_ok, + derive_impl, dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, parameter_types, storage::child, @@ -54,7 +56,6 @@ use frame_support::{ }; use frame_system::{EventRecord, Phase}; use pallet_contracts_fixtures::compile_module; -use pallet_contracts_primitives::CodeUploadReturnValue; use pretty_assertions::{assert_eq, assert_ne}; use sp_core::ByteArray; use sp_io::hashing::blake2_256; @@ -332,6 +333,8 @@ parameter_types! { ); pub static ExistentialDeposit: u64 = 1; } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; @@ -486,6 +489,7 @@ impl Config for Test { type MaxDelegateDependencies = MaxDelegateDependencies; type Debug = TestDebug; type Environment = (); + type Xcm = (); } pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); diff --git a/substrate/frame/contracts/src/tests/test_debug.rs b/substrate/frame/contracts/src/tests/test_debug.rs index 2d7ed4743657573469deff09630ff2b2bf3a51a2..c9b6557bbb97818fe740b06f566da2db735da823 100644 --- a/substrate/frame/contracts/src/tests/test_debug.rs +++ b/substrate/frame/contracts/src/tests/test_debug.rs @@ -18,10 +18,10 @@ use super::*; use crate::{ debug::{CallInterceptor, CallSpan, ExecResult, ExportedFunction, Tracing}, + primitives::ExecReturnValue, AccountIdOf, }; use frame_support::traits::Currency; -use pallet_contracts_primitives::ExecReturnValue; use pretty_assertions::assert_eq; use std::cell::RefCell; diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 77e94b16777b041d746746f2c0653bae3c3d5949..4650a9bc79a366e48f6064c90d0c19752ed6d9dd 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -24,13 +24,15 @@ mod runtime; #[cfg(doc)] pub use crate::wasm::runtime::api_doc; +pub use crate::wasm::runtime::{ + AllowDeprecatedInterface, AllowUnstableInterface, Environment, Runtime, RuntimeCosts, +}; + #[cfg(test)] pub use tests::MockExt; -pub use crate::wasm::runtime::{ - AllowDeprecatedInterface, AllowUnstableInterface, CallFlags, Environment, ReturnCode, Runtime, - RuntimeCosts, -}; +#[cfg(test)] +pub use crate::wasm::runtime::ReturnErrorCode; use crate::{ exec::{ExecResult, Executable, ExportedFunction, Ext}, @@ -436,6 +438,7 @@ mod tests { use crate::{ exec::{AccountIdOf, ErrorOrigin, ExecError, Executable, Ext, Key, SeedOf}, gas::GasMeter, + primitives::ExecReturnValue, storage::WriteOutcome, tests::{RuntimeCall, Test, ALICE, BOB}, BalanceOf, CodeHash, Error, Origin, Pallet as Contracts, @@ -445,7 +448,7 @@ mod tests { assert_err, assert_ok, dispatch::DispatchResultWithPostInfo, weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; - use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; + use pallet_contracts_uapi::ReturnFlags; use pretty_assertions::assert_eq; use sp_core::H256; use sp_runtime::DispatchError; @@ -1506,7 +1509,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1531,7 +1534,7 @@ mod tests { ) ;; Find out the size of the buffer - (set_local $buf_size + (local.set $buf_size (i32.load (i32.const 32)) ) @@ -1539,7 +1542,7 @@ mod tests { (call $seal_return (i32.const 0) (i32.const 36) - (get_local $buf_size) + (local.get $buf_size) ) ;; env:seal_return doesn't return, so this is effectively unreachable. @@ -1575,7 +1578,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1633,7 +1636,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1680,7 +1683,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1726,7 +1729,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1773,7 +1776,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1836,7 +1839,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1925,7 +1928,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1966,7 +1969,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -2013,7 +2016,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -2067,7 +2070,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -2137,7 +2140,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -2327,7 +2330,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -2739,7 +2742,7 @@ mod tests { let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data[0..4].try_into().unwrap()), - ReturnCode::KeyNotFound as u32 + ReturnErrorCode::KeyNotFound as u32 ); // value exists @@ -2747,7 +2750,7 @@ mod tests { let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data[0..4].try_into().unwrap()), - ReturnCode::Success as u32 + ReturnErrorCode::Success as u32 ); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()).unwrap(), &[42u8]); assert_eq!(&result.data[4..], &[42u8]); @@ -2757,7 +2760,7 @@ mod tests { let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data[0..4].try_into().unwrap()), - ReturnCode::Success as u32 + ReturnErrorCode::Success as u32 ); assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), Some(&vec![])); assert_eq!(&result.data[4..], &([] as [u8; 0])); @@ -2920,7 +2923,7 @@ mod tests { let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data[0..4].try_into().unwrap()), - ReturnCode::KeyNotFound as u32 + ReturnErrorCode::KeyNotFound as u32 ); // value did exist -> value returned @@ -2928,7 +2931,7 @@ mod tests { let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data[0..4].try_into().unwrap()), - ReturnCode::Success as u32 + ReturnErrorCode::Success as u32 ); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); assert_eq!(&result.data[4..], &[42u8]); @@ -2938,7 +2941,7 @@ mod tests { let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data[0..4].try_into().unwrap()), - ReturnCode::Success as u32 + ReturnErrorCode::Success as u32 ); assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), None); assert_eq!(&result.data[4..], &[0u8; 0]); @@ -2995,7 +2998,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -3047,7 +3050,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -3162,18 +3165,18 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) ) (func (export "call") (local $exit_code i32) - (set_local $exit_code + (local.set $exit_code (call $seal_set_code_hash (i32.const 0)) ) (call $assert - (i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success + (i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success ) ) @@ -3202,18 +3205,18 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) ) (func (export "call") (local $return_val i32) - (set_local $return_val + (local.set $return_val (call $reentrance_count) ) (call $assert - (i32.eq (get_local $return_val) (i32.const 12)) + (i32.eq (local.get $return_val) (i32.const 12)) ) ) @@ -3234,18 +3237,18 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) ) (func (export "call") (local $return_val i32) - (set_local $return_val + (local.set $return_val (call $account_reentrance_count (i32.const 0)) ) (call $assert - (i32.eq (get_local $return_val) (i32.const 12)) + (i32.eq (local.get $return_val) (i32.const 12)) ) ) @@ -3267,7 +3270,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 4fd52b471a0c6b41cabd364d530d2989891591d7..871ef05c37e65f9ef10f9189297081dd30a16d77 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -20,15 +20,21 @@ use crate::{ exec::{ExecError, ExecResult, Ext, Key, TopicOf}, gas::{ChargedAmount, Token}, + primitives::ExecReturnValue, schedule::HostFnWeights, BalanceOf, CodeHash, Config, DebugBufferVec, Error, SENTINEL, }; - -use bitflags::bitflags; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; -use frame_support::{ensure, traits::Get, weights::Weight}; -use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; +use frame_support::{ + dispatch::DispatchInfo, + ensure, + pallet_prelude::{DispatchResult, DispatchResultWithPostInfo}, + parameter_types, + traits::Get, + weights::Weight, +}; use pallet_contracts_proc_macro::define_env; +use pallet_contracts_uapi::{CallFlags, ReturnFlags}; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; use sp_runtime::{ traits::{Bounded, Zero}, @@ -36,6 +42,9 @@ use sp_runtime::{ }; use sp_std::{fmt, prelude::*}; use wasmi::{core::HostError, errors::LinkerError, Linker, Memory, Store}; +use xcm::VersionedXcm; + +type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; @@ -78,44 +87,16 @@ enum KeyType { Var(u32), } -/// Every error that can be returned to a contract when it calls any of the host functions. -/// -/// # Note -/// -/// This enum can be extended in the future: New codes can be added but existing codes -/// will not be changed or removed. This means that any contract **must not** exhaustively -/// match return codes. Instead, contracts should prepare for unknown variants and deal with -/// those errors gracefully in order to be forward compatible. -#[derive(Debug)] -#[repr(u32)] -pub enum ReturnCode { - /// API call successful. - Success = 0, - /// The called function trapped and has its state changes reverted. - /// In this case no output buffer is returned. - CalleeTrapped = 1, - /// The called function ran to completion but decided to revert its state. - /// An output buffer is returned when one was supplied. - CalleeReverted = 2, - /// The passed key does not exist in storage. - KeyNotFound = 3, - /// See [`Error::TransferFailed`]. - TransferFailed = 5, - /// No code could be found at the supplied code hash. - CodeNotFound = 7, - /// The contract that was called is no contract (a plain account). - NotCallable = 8, - /// The call dispatched by `seal_call_runtime` was executed but returned an error. - CallRuntimeFailed = 10, - /// ECDSA pubkey recovery failed (most probably wrong recovery id or signature), or - /// ECDSA compressed pubkey conversion into Ethereum address failed (most probably - /// wrong pubkey provided). - EcdsaRecoverFailed = 11, - /// sr25519 signature verification failed. - Sr25519VerifyFailed = 12, +pub use pallet_contracts_uapi::ReturnErrorCode; + +parameter_types! { + /// Getter types used by [`crate::api_doc::Current::call_runtime`] + const CallRuntimeFailed: ReturnErrorCode = ReturnErrorCode::CallRuntimeFailed; + /// Getter types used by [`crate::api_doc::Current::xcm_execute`] + const XcmExecutionFailed: ReturnErrorCode = ReturnErrorCode::XcmExecutionFailed; } -impl From for ReturnCode { +impl From for ReturnErrorCode { fn from(from: ExecReturnValue) -> Self { if from.flags.contains(ReturnFlags::REVERT) { Self::CalleeReverted @@ -125,12 +106,6 @@ impl From for ReturnCode { } } -impl From for u32 { - fn from(code: ReturnCode) -> u32 { - code as u32 - } -} - /// The data passed through when a contract uses `seal_return`. #[derive(RuntimeDebug)] pub struct ReturnData { @@ -391,52 +366,6 @@ impl Token for RuntimeToken { } } -bitflags! { - /// Flags used to change the behaviour of `seal_call` and `seal_delegate_call`. - pub struct CallFlags: u32 { - /// Forward the input of current function to the callee. - /// - /// Supplied input pointers are ignored when set. - /// - /// # Note - /// - /// A forwarding call will consume the current contracts input. Any attempt to - /// access the input after this call returns will lead to [`Error::InputForwarded`]. - /// It does not matter if this is due to calling `seal_input` or trying another - /// forwarding call. Consider using [`Self::CLONE_INPUT`] in order to preserve - /// the input. - const FORWARD_INPUT = 0b0000_0001; - /// Identical to [`Self::FORWARD_INPUT`] but without consuming the input. - /// - /// This adds some additional weight costs to the call. - /// - /// # Note - /// - /// This implies [`Self::FORWARD_INPUT`] and takes precedence when both are set. - const CLONE_INPUT = 0b0000_0010; - /// Do not return from the call but rather return the result of the callee to the - /// callers caller. - /// - /// # Note - /// - /// This makes the current contract completely transparent to its caller by replacing - /// this contracts potential output by the callee ones. Any code after `seal_call` - /// can be safely considered unreachable. - const TAIL_CALL = 0b0000_0100; - /// Allow the callee to reenter into the current contract. - /// - /// Without this flag any reentrancy into the current contract that originates from - /// the callee (or any of its callees) is denied. This includes the first callee: - /// You cannot call into yourself with this flag set. - /// - /// # Note - /// - /// For `seal_delegate_call` should be always unset, otherwise - /// [`Error::InvalidCallFlags`] is returned. - const ALLOW_REENTRY = 0b0000_1000; - } -} - /// The kind of call that should be performed. enum CallType { /// Execute another instantiated contract @@ -461,6 +390,29 @@ fn already_charged(_: u32) -> Option { None } +/// Ensure that the XCM program is executable, by checking that it does not contain any [`Transact`] +/// instruction with a call that is not allowed by the CallFilter. +fn ensure_executable(message: &VersionedXcm>) -> DispatchResult { + use frame_support::traits::Contains; + use xcm::prelude::{Transact, Xcm}; + + let mut message: Xcm> = + message.clone().try_into().map_err(|_| Error::::XCMDecodeFailed)?; + + message.iter_mut().try_for_each(|inst| -> DispatchResult { + let Transact { ref mut call, .. } = inst else { return Ok(()) }; + let call = call.ensure_decoded().map_err(|_| Error::::XCMDecodeFailed)?; + + if !::CallFilter::contains(call) { + return Err(frame_system::Error::::CallFiltered.into()) + } + + Ok(()) + })?; + + Ok(()) +} + /// Can only be used for one call. pub struct Runtime<'a, E: Ext + 'a> { ext: &'a mut E, @@ -558,6 +510,32 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { self.ext.gas_meter_mut().adjust_gas(charged, token); } + /// Charge, Run and adjust gas, for executing the given dispatchable. + fn call_dispatchable< + ErrorReturnCode: Get, + F: FnOnce(&mut Self) -> DispatchResultWithPostInfo, + >( + &mut self, + dispatch_info: DispatchInfo, + run: F, + ) -> Result { + use frame_support::dispatch::extract_actual_weight; + let charged = self.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; + let result = run(self); + let actual_weight = extract_actual_weight(&result, &dispatch_info); + self.adjust_gas(charged, RuntimeCosts::CallRuntime(actual_weight)); + match result { + Ok(_) => Ok(ReturnErrorCode::Success), + Err(e) => { + if self.ext.append_debug_buffer("") { + self.ext.append_debug_buffer("call failed with: "); + self.ext.append_debug_buffer(e.into()); + }; + Ok(ErrorReturnCode::get()) + }, + } + } + /// Read designated chunk from the sandbox memory. /// /// Returns `Err` if one of the following conditions occurs: @@ -633,8 +611,10 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { let ptr = ptr as usize; let mut bound_checked = memory.get(ptr..ptr + len as usize).ok_or_else(|| Error::::OutOfBounds)?; + let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut bound_checked) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; + Ok(decoded) } @@ -736,9 +716,9 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { Ok(()) } - /// Fallible conversion of `DispatchError` to `ReturnCode`. - fn err_into_return_code(from: DispatchError) -> Result { - use ReturnCode::*; + /// Fallible conversion of `DispatchError` to `ReturnErrorCode`. + fn err_into_return_code(from: DispatchError) -> Result { + use ReturnErrorCode::*; let transfer_failed = Error::::TransferFailed.into(); let no_code = Error::::CodeNotFound.into(); @@ -752,8 +732,8 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { } } - /// Fallible conversion of a `ExecResult` to `ReturnCode`. - fn exec_into_return_code(from: ExecResult) -> Result { + /// Fallible conversion of a `ExecResult` to `ReturnErrorCode`. + fn exec_into_return_code(from: ExecResult) -> Result { use crate::exec::ErrorOrigin::Callee; let ExecError { error, origin } = match from { @@ -762,7 +742,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { }; match (error, origin) { - (_, Callee) => Ok(ReturnCode::CalleeTrapped), + (_, Callee) => Ok(ReturnErrorCode::CalleeTrapped), (err, _) => Self::err_into_return_code(err), } } @@ -836,7 +816,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { key_ptr: u32, out_ptr: u32, out_len_ptr: u32, - ) -> Result { + ) -> Result { let charged = self.charge_gas(RuntimeCosts::GetStorage(self.ext.max_value_size()))?; let key = self.decode_key(memory, key_type, key_ptr)?; let outcome = self.ext.get_storage(&key); @@ -851,10 +831,10 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { false, already_charged, )?; - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) } else { self.adjust_gas(charged, RuntimeCosts::GetStorage(0)); - Ok(ReturnCode::KeyNotFound) + Ok(ReturnErrorCode::KeyNotFound) } } @@ -881,7 +861,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { input_data_len: u32, output_ptr: u32, output_len_ptr: u32, - ) -> Result { + ) -> Result { self.charge_gas(call_type.cost())?; let input_data = if flags.contains(CallFlags::CLONE_INPUT) { let input = self.input_data.as_ref().ok_or(Error::::InputForwarded)?; @@ -965,7 +945,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { output_len_ptr: u32, salt_ptr: u32, salt_len: u32, - ) -> Result { + ) -> Result { self.charge_gas(RuntimeCosts::InstantiateBase { input_data_len, salt_len })?; let deposit_limit: BalanceOf<::T> = if deposit_ptr == SENTINEL { BalanceOf::<::T>::zero() @@ -1023,11 +1003,9 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { // for every function. #[define_env(doc)] pub mod env { + /// Set the value at the given key in the contract storage. - /// - /// Equivalent to the newer [`seal1`][`super::api_doc::Version1::set_storage`] version with the - /// exception of the return type. Still a valid thing to call when not interested in the return - /// value. + /// See [`pallet_contracts_uapi::HostFn::set_storage`] #[prefixed_alias] fn set_storage( ctx: _, @@ -1040,23 +1018,7 @@ pub mod env { } /// Set the value at the given key in the contract storage. - /// - /// This version is to be used with a fixed sized storage key. For runtimes supporting - /// transparent hashing, please use the newer version of this function. - /// - /// The value length must not exceed the maximum defined by the contracts module parameters. - /// Specifying a `value_len` of zero will store an empty value. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the location to store the value is placed. - /// - `value_ptr`: pointer into the linear memory where the value to set is placed. - /// - `value_len`: the length of the value in bytes. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. + /// See [`pallet_contracts_uapi::HostFn::set_storage_v1`] #[version(1)] #[prefixed_alias] fn set_storage( @@ -1070,21 +1032,7 @@ pub mod env { } /// Set the value at the given key in the contract storage. - /// - /// The key and value lengths must not exceed the maximums defined by the contracts module - /// parameters. Specifying a `value_len` of zero will store an empty value. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the location to store the value is placed. - /// - `key_len`: the length of the key in bytes. - /// - `value_ptr`: pointer into the linear memory where the value to set is placed. - /// - `value_len`: the length of the value in bytes. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. + /// See [`pallet_contracts_uapi::HostFn::set_storage_v2`] #[version(2)] #[prefixed_alias] fn set_storage( @@ -1099,26 +1047,14 @@ pub mod env { } /// Clear the value at the given key in the contract storage. - /// - /// Equivalent to the newer [`seal1`][`super::api_doc::Version1::clear_storage`] version with - /// the exception of the return type. Still a valid thing to call when not interested in the - /// return value. + /// See [`pallet_contracts_uapi::HostFn::clear_storage`] #[prefixed_alias] fn clear_storage(ctx: _, memory: _, key_ptr: u32) -> Result<(), TrapReason> { ctx.clear_storage(memory, KeyType::Fix, key_ptr).map(|_| ()) } /// Clear the value at the given key in the contract storage. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key is placed. - /// - `key_len`: the length of the key in bytes. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. + /// See [`pallet_contracts_uapi::HostFn::clear_storage_v1`] #[version(1)] #[prefixed_alias] fn clear_storage(ctx: _, memory: _, key_ptr: u32, key_len: u32) -> Result { @@ -1126,20 +1062,7 @@ pub mod env { } /// Retrieve the value under the given key from storage. - /// - /// This version is to be used with a fixed sized storage key. For runtimes supporting - /// transparent hashing, please use the newer version of this function. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - `out_ptr`: pointer to the linear memory where the value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// # Errors - /// - /// `ReturnCode::KeyNotFound` + /// See [`pallet_contracts_uapi::HostFn::get_storage`] #[prefixed_alias] fn get_storage( ctx: _, @@ -1147,28 +1070,12 @@ pub mod env { key_ptr: u32, out_ptr: u32, out_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.get_storage(memory, KeyType::Fix, key_ptr, out_ptr, out_len_ptr) } /// Retrieve the value under the given key from storage. - /// - /// This version is to be used with a fixed sized storage key. For runtimes supporting - /// transparent hashing, please use the newer version of this function. - /// - /// The key length must not exceed the maximum defined by the contracts module parameter. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - `key_len`: the length of the key in bytes. - /// - `out_ptr`: pointer to the linear memory where the value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// # Errors - /// - /// - `ReturnCode::KeyNotFound` + /// See [`pallet_contracts_uapi::HostFn::get_storage_v1`] #[version(1)] #[prefixed_alias] fn get_storage( @@ -1178,41 +1085,19 @@ pub mod env { key_len: u32, out_ptr: u32, out_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.get_storage(memory, KeyType::Var(key_len), key_ptr, out_ptr, out_len_ptr) } /// Checks whether there is a value stored under the given key. - /// - /// This version is to be used with a fixed sized storage key. For runtimes supporting - /// transparent hashing, please use the newer version of this function. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. + /// See [`pallet_contracts_uapi::HostFn::contains_storage`] #[prefixed_alias] fn contains_storage(ctx: _, memory: _, key_ptr: u32) -> Result { ctx.contains_storage(memory, KeyType::Fix, key_ptr) } /// Checks whether there is a value stored under the given key. - /// - /// The key length must not exceed the maximum defined by the contracts module parameter. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - `key_len`: the length of the key in bytes. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. + /// See [`pallet_contracts_uapi::HostFn::contains_storage_v1`] #[version(1)] #[prefixed_alias] fn contains_storage(ctx: _, memory: _, key_ptr: u32, key_len: u32) -> Result { @@ -1220,18 +1105,7 @@ pub mod env { } /// Retrieve and remove the value under the given key from storage. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - `key_len`: the length of the key in bytes. - /// - `out_ptr`: pointer to the linear memory where the value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// # Errors - /// - /// - `ReturnCode::KeyNotFound` + /// See [`pallet_contracts_uapi::HostFn::take_storage`] #[prefixed_alias] fn take_storage( ctx: _, @@ -1240,7 +1114,7 @@ pub mod env { key_len: u32, out_ptr: u32, out_len_ptr: u32, - ) -> Result { + ) -> Result { let charged = ctx.charge_gas(RuntimeCosts::TakeStorage(ctx.ext.max_value_size()))?; ensure!( key_len <= <::T as Config>::MaxStorageKeyLen::get(), @@ -1254,27 +1128,15 @@ pub mod env { )? { ctx.adjust_gas(charged, RuntimeCosts::TakeStorage(value.len() as u32)); ctx.write_sandbox_output(memory, out_ptr, out_len_ptr, &value, false, already_charged)?; - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) } else { ctx.adjust_gas(charged, RuntimeCosts::TakeStorage(0)); - Ok(ReturnCode::KeyNotFound) + Ok(ReturnErrorCode::KeyNotFound) } } /// Transfer some value to another account. - /// - /// # Parameters - /// - /// - `account_ptr`: a pointer to the address of the beneficiary account Should be decodable as - /// an `T::AccountId`. Traps otherwise. - /// - `account_len`: length of the address buffer. - /// - `value_ptr`: a pointer to the buffer with value, how much value to send. Should be - /// decodable as a `T::Balance`. Traps otherwise. - /// - `value_len`: length of the value buffer. - /// - /// # Errors - /// - /// - `ReturnCode::TransferFailed` + /// See [`pallet_contracts_uapi::HostFn::transfer`]. #[prefixed_alias] fn transfer( ctx: _, @@ -1283,14 +1145,14 @@ pub mod env { _account_len: u32, value_ptr: u32, _value_len: u32, - ) -> Result { + ) -> Result { ctx.charge_gas(RuntimeCosts::Transfer)?; let callee: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(memory, account_ptr)?; let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(memory, value_ptr)?; let result = ctx.ext.transfer(&callee, value); match result { - Ok(()) => Ok(ReturnCode::Success), + Ok(()) => Ok(ReturnErrorCode::Success), Err(err) => { let code = Runtime::::err_into_return_code(err)?; Ok(code) @@ -1300,17 +1162,11 @@ pub mod env { /// Make a call to another contract. /// - /// # New version available - /// - /// This is equivalent to calling the newer version of this function with - /// `flags` set to `ALLOW_REENTRY`. See the newer version for documentation. - /// /// # Note /// - /// The values `_callee_len` and `_value_len` are ignored because the encoded sizes - /// of those types are fixed through - /// [`codec::MaxEncodedLen`]. The fields exist - /// for backwards compatibility. Consider switching to the newest version of this function. + /// The values `_callee_len` and `_value_len` are ignored because the encoded sizes of those + /// types are fixed through [`codec::MaxEncodedLen`]. The fields exist for backwards + /// compatibility. Consider switching to the newest version of this function. #[prefixed_alias] fn call( ctx: _, @@ -1324,7 +1180,7 @@ pub mod env { input_data_len: u32, output_ptr: u32, output_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.call( memory, CallFlags::ALLOW_REENTRY, @@ -1342,10 +1198,7 @@ pub mod env { } /// Make a call to another contract. - /// - /// Equivalent to the newer [`seal2`][`super::api_doc::Version2::call`] version but works with - /// *ref_time* Weight only. It is recommended to switch to the latest version, once it's - /// stabilized. + /// See [`pallet_contracts_uapi::HostFn::call_v1`]. #[version(1)] #[prefixed_alias] fn call( @@ -1359,7 +1212,7 @@ pub mod env { input_data_len: u32, output_ptr: u32, output_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.call( memory, CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, @@ -1377,39 +1230,7 @@ pub mod env { } /// Make a call to another contract. - /// - /// The callees output buffer is copied to `output_ptr` and its length to `output_len_ptr`. - /// The copy of the output buffer can be skipped by supplying the sentinel value - /// of `SENTINEL` to `output_ptr`. - /// - /// # Parameters - /// - /// - `flags`: See `crate::wasm::runtime::CallFlags` for a documentation of the supported flags. - /// - `callee_ptr`: a pointer to the address of the callee contract. Should be decodable as an - /// `T::AccountId`. Traps otherwise. - /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. - /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. - /// - `deposit_ptr`: a pointer to the buffer with value of the storage deposit limit for the - /// call. Should be decodable as a `T::Balance`. Traps otherwise. Passing `SENTINEL` means - /// setting no specific limit for the call, which implies storage usage up to the limit of the - /// parent call. - /// - `value_ptr`: a pointer to the buffer with value, how much value to send. Should be - /// decodable as a `T::Balance`. Traps otherwise. - /// - `input_data_ptr`: a pointer to a buffer to be used as input data to the callee. - /// - `input_data_len`: length of the input data buffer. - /// - `output_ptr`: a pointer where the output buffer is copied to. - /// - `output_len_ptr`: in-out pointer to where the length of the buffer is read from and the - /// actual length is written to. - /// - /// # Errors - /// - /// An error means that the call wasn't successful output buffer is returned unless - /// stated otherwise. - /// - /// - `ReturnCode::CalleeReverted`: Output buffer is returned. - /// - `ReturnCode::CalleeTrapped` - /// - `ReturnCode::TransferFailed` - /// - `ReturnCode::NotCallable` + /// See [`pallet_contracts_uapi::HostFn::call_v2`]. #[version(2)] #[unstable] fn call( @@ -1425,7 +1246,7 @@ pub mod env { input_data_len: u32, output_ptr: u32, output_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.call( memory, CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, @@ -1443,29 +1264,7 @@ pub mod env { } /// Execute code in the context (storage, caller, value) of the current contract. - /// - /// Reentrancy protection is always disabled since the callee is allowed - /// to modify the callers storage. This makes going through a reentrancy attack - /// unnecessary for the callee when it wants to exploit the caller. - /// - /// # Parameters - /// - /// - `flags`: see `crate::wasm::runtime::CallFlags` for a documentation of the supported flags. - /// - `code_hash`: a pointer to the hash of the code to be called. - /// - `input_data_ptr`: a pointer to a buffer to be used as input data to the callee. - /// - `input_data_len`: length of the input data buffer. - /// - `output_ptr`: a pointer where the output buffer is copied to. - /// - `output_len_ptr`: in-out pointer to where the length of the buffer is read from and the - /// actual length is written to. - /// - /// # Errors - /// - /// An error means that the call wasn't successful and no output buffer is returned unless - /// stated otherwise. - /// - /// - `ReturnCode::CalleeReverted`: Output buffer is returned. - /// - `ReturnCode::CalleeTrapped` - /// - `ReturnCode::CodeNotFound` + /// See [`pallet_contracts_uapi::HostFn::delegate_call`]. #[prefixed_alias] fn delegate_call( ctx: _, @@ -1476,7 +1275,7 @@ pub mod env { input_data_len: u32, output_ptr: u32, output_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.call( memory, CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, @@ -1489,11 +1288,7 @@ pub mod env { } /// Instantiate a contract with the specified code hash. - /// - /// # New version available - /// - /// This is equivalent to calling the newer version of this function. The newer version - /// drops the now unnecessary length fields. + /// See [`pallet_contracts_uapi::HostFn::instantiate`]. /// /// # Note /// @@ -1517,7 +1312,7 @@ pub mod env { output_len_ptr: u32, salt_ptr: u32, salt_len: u32, - ) -> Result { + ) -> Result { ctx.instantiate( memory, code_hash_ptr, @@ -1536,10 +1331,7 @@ pub mod env { } /// Instantiate a contract with the specified code hash. - /// - /// Equivalent to the newer [`seal2`][`super::api_doc::Version2::instantiate`] version but works - /// with *ref_time* Weight only. It is recommended to switch to the latest version, once it's - /// stabilized. + /// See [`pallet_contracts_uapi::HostFn::instantiate_v1`]. #[version(1)] #[prefixed_alias] fn instantiate( @@ -1556,7 +1348,7 @@ pub mod env { output_len_ptr: u32, salt_ptr: u32, salt_len: u32, - ) -> Result { + ) -> Result { ctx.instantiate( memory, code_hash_ptr, @@ -1575,48 +1367,7 @@ pub mod env { } /// Instantiate a contract with the specified code hash. - /// - /// This function creates an account and executes the constructor defined in the code specified - /// by the code hash. The address of this new account is copied to `address_ptr` and its length - /// to `address_len_ptr`. The constructors output buffer is copied to `output_ptr` and its - /// length to `output_len_ptr`. The copy of the output buffer and address can be skipped by - /// supplying the sentinel value of `SENTINEL` to `output_ptr` or `address_ptr`. - /// - /// # Parameters - /// - /// - `code_hash_ptr`: a pointer to the buffer that contains the initializer code. - /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. - /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. - /// - `deposit_ptr`: a pointer to the buffer with value of the storage deposit limit for - /// instantiation. Should be decodable as a `T::Balance`. Traps otherwise. Passing `SENTINEL` - /// means setting no specific limit for the call, which implies storage usage up to the limit - /// of the parent call. - /// - `value_ptr`: a pointer to the buffer with value, how much value to send. Should be - /// decodable as a `T::Balance`. Traps otherwise. - /// - `input_data_ptr`: a pointer to a buffer to be used as input data to the initializer code. - /// - `input_data_len`: length of the input data buffer. - /// - `address_ptr`: a pointer where the new account's address is copied to. `SENTINEL` means - /// not to copy. - /// - `address_len_ptr`: pointer to where put the length of the address. - /// - `output_ptr`: a pointer where the output buffer is copied to. `SENTINEL` means not to - /// copy. - /// - `output_len_ptr`: in-out pointer to where the length of the buffer is read from and the - /// actual length is written to. - /// - `salt_ptr`: Pointer to raw bytes used for address derivation. See `fn contract_address`. - /// - `salt_len`: length in bytes of the supplied salt. - /// - /// # Errors - /// - /// Please consult the `ReturnCode` enum declaration for more information on those - /// errors. Here we only note things specific to this function. - /// - /// An error means that the account wasn't created and no address or output buffer - /// is returned unless stated otherwise. - /// - /// - `ReturnCode::CalleeReverted`: Output buffer is returned. - /// - `ReturnCode::CalleeTrapped` - /// - `ReturnCode::TransferFailed` - /// - `ReturnCode::CodeNotFound` + /// See [`pallet_contracts_uapi::HostFn::instantiate_v2`]. #[version(2)] #[unstable] fn instantiate( @@ -1635,7 +1386,7 @@ pub mod env { output_len_ptr: u32, salt_ptr: u32, salt_len: u32, - ) -> Result { + ) -> Result { ctx.instantiate( memory, code_hash_ptr, @@ -1654,11 +1405,7 @@ pub mod env { } /// Remove the calling account and transfer remaining balance. - /// - /// # New version available - /// - /// This is equivalent to calling the newer version of this function. The newer version - /// drops the now unnecessary length fields. + /// See [`pallet_contracts_uapi::HostFn::terminate`]. /// /// # Note /// @@ -1676,20 +1423,7 @@ pub mod env { } /// Remove the calling account and transfer remaining **free** balance. - /// - /// This function never returns. Either the termination was successful and the - /// execution of the destroyed contract is halted. Or it failed during the termination - /// which is considered fatal and results in a trap + rollback. - /// - /// - `beneficiary_ptr`: a pointer to the address of the beneficiary account where all where all - /// remaining funds of the caller are transferred. Should be decodable as an `T::AccountId`. - /// Traps otherwise. - /// - /// # Traps - /// - /// - The contract is live i.e is already on the call stack. - /// - Failed to send the balance to the beneficiary. - /// - The deletion queue is full. + /// See [`pallet_contracts_uapi::HostFn::terminate_v1`]. #[version(1)] #[prefixed_alias] fn terminate(ctx: _, memory: _, beneficiary_ptr: u32) -> Result<(), TrapReason> { @@ -1697,15 +1431,7 @@ pub mod env { } /// Stores the input passed by the caller into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// # Note - /// - /// This function traps if the input was previously forwarded by a [`call()`][`Self::call()`]. + /// See [`pallet_contracts_uapi::HostFn::input`]. #[prefixed_alias] fn input(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::InputBase)?; @@ -1721,22 +1447,7 @@ pub mod env { } /// Cease contract execution and save a data buffer as a result of the execution. - /// - /// This function never returns as it stops execution of the caller. - /// This is the only way to return a data buffer to the caller. Returning from - /// execution without calling this function is equivalent to calling: - /// ```nocompile - /// seal_return(0, 0, 0); - /// ``` - /// - /// The flags argument is a bitfield that can be used to signal special return - /// conditions to the supervisor: - /// --- lsb --- - /// bit 0 : REVERT - Revert all storage changes made by the caller. - /// bit [1, 31]: Reserved for future use. - /// --- msb --- - /// - /// Using a reserved bit triggers a trap. + /// See [`pallet_contracts_uapi::HostFn::return_value`]. fn seal_return( ctx: _, memory: _, @@ -1752,18 +1463,7 @@ pub mod env { } /// Stores the address of the caller into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the - /// extrinsic will be returned. Otherwise, if this call is initiated by another contract then - /// the address of the contract will be returned. The value is encoded as T::AccountId. - /// - /// If there is no address associated with the caller (e.g. because the caller is root) then - /// it traps with `BadOrigin`. + /// See [`pallet_contracts_uapi::HostFn::caller`]. #[prefixed_alias] fn caller(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Caller)?; @@ -1779,13 +1479,7 @@ pub mod env { } /// Checks whether a specified address belongs to a contract. - /// - /// # Parameters - /// - /// - `account_ptr`: a pointer to the address of the beneficiary account Should be decodable as - /// an `T::AccountId`. Traps otherwise. - /// - /// Returned value is a `u32`-encoded boolean: (0 = false, 1 = true). + /// See [`pallet_contracts_uapi::HostFn::is_contract`]. #[prefixed_alias] fn is_contract(ctx: _, memory: _, account_ptr: u32) -> Result { ctx.charge_gas(RuntimeCosts::IsContract)?; @@ -1796,18 +1490,7 @@ pub mod env { } /// Retrieve the code hash for a specified contract address. - /// - /// # Parameters - /// - /// - `account_ptr`: a pointer to the address in question. Should be decodable as an - /// `T::AccountId`. Traps otherwise. - /// - `out_ptr`: pointer to the linear memory where the returning value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// # Errors - /// - /// - `ReturnCode::KeyNotFound` + /// See [`pallet_contracts_uapi::HostFn::code_hash`]. #[prefixed_alias] fn code_hash( ctx: _, @@ -1815,7 +1498,7 @@ pub mod env { account_ptr: u32, out_ptr: u32, out_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.charge_gas(RuntimeCosts::CodeHash)?; let address: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(memory, account_ptr)?; @@ -1828,19 +1511,14 @@ pub mod env { false, already_charged, )?; - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) } else { - Ok(ReturnCode::KeyNotFound) + Ok(ReturnErrorCode::KeyNotFound) } } /// Retrieve the code hash of the currently executing contract. - /// - /// # Parameters - /// - /// - `out_ptr`: pointer to the linear memory where the returning value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. + /// See [`pallet_contracts_uapi::HostFn::own_code_hash`]. #[prefixed_alias] fn own_code_hash(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::OwnCodeHash)?; @@ -1856,15 +1534,7 @@ pub mod env { } /// Checks whether the caller of the current contract is the origin of the whole call stack. - /// - /// Prefer this over [`is_contract()`][`Self::is_contract`] when checking whether your contract - /// is being called by a contract or a plain account. The reason is that it performs better - /// since it does not need to do any storage lookups. - /// - /// A return value of `true` indicates that this contract is being called by a plain account - /// and `false` indicates that the caller is another contract. - /// - /// Returned value is a `u32`-encoded boolean: (`0 = false`, `1 = true`). + /// See [`pallet_contracts_uapi::HostFn::caller_is_origin`]. #[prefixed_alias] fn caller_is_origin(ctx: _, _memory: _) -> Result { ctx.charge_gas(RuntimeCosts::CallerIsOrigin)?; @@ -1872,14 +1542,7 @@ pub mod env { } /// Checks whether the caller of the current contract is root. - /// - /// Note that only the origin of the call stack can be root. Hence this function returning - /// `true` implies that the contract is being called by the origin. - /// - /// A return value of `true` indicates that this contract is being called by a root origin, - /// and `false` indicates that the caller is a signed origin. - /// - /// Returned value is a `u32`-encoded boolean: (`0 = false`, `1 = true`). + /// See [`pallet_contracts_uapi::HostFn::caller_is_root`]. #[unstable] fn caller_is_root(ctx: _, _memory: _) -> Result { ctx.charge_gas(RuntimeCosts::CallerIsRoot)?; @@ -1887,11 +1550,7 @@ pub mod env { } /// Stores the address of the current contract into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// See [`pallet_contracts_uapi::HostFn::address`]. #[prefixed_alias] fn address(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Address)?; @@ -1906,10 +1565,7 @@ pub mod env { } /// Stores the price for the specified amount of gas into the supplied buffer. - /// - /// Equivalent to the newer [`seal1`][`super::api_doc::Version2::weight_to_fee`] version but - /// works with *ref_time* Weight only. It is recommended to switch to the latest version, once - /// it's stabilized. + /// See [`pallet_contracts_uapi::HostFn::weight_to_fee`]. #[prefixed_alias] fn weight_to_fee( ctx: _, @@ -1931,21 +1587,7 @@ pub mod env { } /// Stores the price for the specified amount of weight into the supplied buffer. - /// - /// # Parameters - /// - /// - `out_ptr`: pointer to the linear memory where the returning value is written to. If the - /// available space at `out_ptr` is less than the size of the value a trap is triggered. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// The data is encoded as `T::Balance`. - /// - /// # Note - /// - /// It is recommended to avoid specifying very small values for `ref_time_limit` and - /// `proof_size_limit` as the prices for a single gas can be smaller than the basic balance - /// unit. + /// See [`pallet_contracts_uapi::HostFn::weight_to_fee_v1`]. #[version(1)] #[unstable] fn weight_to_fee( @@ -1969,10 +1611,7 @@ pub mod env { } /// Stores the weight left into the supplied buffer. - /// - /// Equivalent to the newer [`seal1`][`super::api_doc::Version2::gas_left`] version but - /// works with *ref_time* Weight only. It is recommended to switch to the latest version, once - /// it's stabilized. + /// See [`pallet_contracts_uapi::HostFn::gas_left`]. #[prefixed_alias] fn gas_left(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::GasLeft)?; @@ -1988,13 +1627,7 @@ pub mod env { } /// Stores the amount of weight left into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// The data is encoded as Weight. + /// See [`pallet_contracts_uapi::HostFn::gas_left_v1`]. #[version(1)] #[unstable] fn gas_left(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { @@ -2011,13 +1644,7 @@ pub mod env { } /// Stores the *free* balance of the current account into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// The data is encoded as `T::Balance`. + /// See [`pallet_contracts_uapi::HostFn::balance`]. #[prefixed_alias] fn balance(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Balance)?; @@ -2032,13 +1659,7 @@ pub mod env { } /// Stores the value transferred along with this call/instantiate into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a `u32` value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// The data is encoded as `T::Balance`. + /// See [`pallet_contracts_uapi::HostFn::value_transferred`]. #[prefixed_alias] fn value_transferred( ctx: _, @@ -2138,11 +1759,7 @@ pub mod env { } /// Load the latest block timestamp into the supplied buffer - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// See [`pallet_contracts_uapi::HostFn::now`]. #[prefixed_alias] fn now(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Now)?; @@ -2157,8 +1774,7 @@ pub mod env { } /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. - /// - /// The data is encoded as `T::Balance`. + /// See [`pallet_contracts_uapi::HostFn::minimum_balance`]. #[prefixed_alias] fn minimum_balance( ctx: _, @@ -2306,15 +1922,8 @@ pub mod env { )?) } - /// Deposit a contract event with the data buffer and optional list of topics. There is a limit - /// on the maximum number of topics specified by `event_topics`. - /// - /// - `topics_ptr`: a pointer to the buffer of topics encoded as `Vec`. The value of - /// this is ignored if `topics_len` is set to `0`. The topics list can't contain duplicates. - /// - `topics_len`: the length of the topics buffer. Pass 0 if you want to pass an empty - /// vector. - /// - `data_ptr`: a pointer to a raw data buffer which will saved along the event. - /// - `data_len`: the length of the data buffer. + /// Deposit a contract event with the data buffer and optional list of topics. + /// See [pallet_contracts_uapi::HostFn::deposit_event] #[prefixed_alias] fn deposit_event( ctx: _, @@ -2350,11 +1959,7 @@ pub mod env { } /// Stores the current block number of the current contract into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// See [`pallet_contracts_uapi::HostFn::block_number`]. #[prefixed_alias] fn block_number(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::BlockNumber)?; @@ -2369,22 +1974,7 @@ pub mod env { } /// Computes the SHA2 256-bit hash on the given input buffer. - /// - /// Returns the result directly into the given output buffer. - /// - /// # Note - /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 32 bytes (256 bits). - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the chosen hash function. - /// - /// # Parameters - /// - /// - `input_ptr`: the pointer into the linear memory where the input data is placed. - /// - `input_len`: the length of the input data in bytes. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. + /// See [`pallet_contracts_uapi::HostFn::hash_sha2_256`]. #[prefixed_alias] fn hash_sha2_256( ctx: _, @@ -2400,22 +1990,7 @@ pub mod env { } /// Computes the KECCAK 256-bit hash on the given input buffer. - /// - /// Returns the result directly into the given output buffer. - /// - /// # Note - /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 32 bytes (256 bits). - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the chosen hash function. - /// - /// # Parameters - /// - /// - `input_ptr`: the pointer into the linear memory where the input data is placed. - /// - `input_len`: the length of the input data in bytes. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. + /// See [`pallet_contracts_uapi::HostFn::hash_keccak_256`]. #[prefixed_alias] fn hash_keccak_256( ctx: _, @@ -2431,22 +2006,7 @@ pub mod env { } /// Computes the BLAKE2 256-bit hash on the given input buffer. - /// - /// Returns the result directly into the given output buffer. - /// - /// # Note - /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 32 bytes (256 bits). - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the chosen hash function. - /// - /// # Parameters - /// - /// - `input_ptr`: the pointer into the linear memory where the input data is placed. - /// - `input_len`: the length of the input data in bytes. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. + /// See [`pallet_contracts_uapi::HostFn::hash_blake2_256`]. #[prefixed_alias] fn hash_blake2_256( ctx: _, @@ -2462,22 +2022,7 @@ pub mod env { } /// Computes the BLAKE2 128-bit hash on the given input buffer. - /// - /// Returns the result directly into the given output buffer. - /// - /// # Note - /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 16 bytes (128 bits). - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the chosen hash function. - /// - /// # Parameters - /// - /// - `input_ptr`: the pointer into the linear memory where the input data is placed. - /// - `input_len`: the length of the input data in bytes. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. + /// See [`pallet_contracts_uapi::HostFn::hash_blake2_128`]. #[prefixed_alias] fn hash_blake2_128( ctx: _, @@ -2493,16 +2038,7 @@ pub mod env { } /// Call into the chain extension provided by the chain if any. - /// - /// Handling of the input values is up to the specific chain extension and so is the - /// return value. The extension can decide to use the inputs as primitive inputs or as - /// in/out arguments by interpreting them as pointers. Any caller of this function - /// must therefore coordinate with the chain that it targets. - /// - /// # Note - /// - /// If no chain extension exists the contract will trap with the `NoChainExtension` - /// module error. + /// See [`pallet_contracts_uapi::HostFn::call_chain_extension`]. #[prefixed_alias] fn call_chain_extension( ctx: _, @@ -2555,7 +2091,7 @@ pub mod env { memory: _, str_ptr: u32, str_len: u32, - ) -> Result { + ) -> Result { let str_len = str_len.min(DebugBufferVec::::bound() as u32); ctx.charge_gas(RuntimeCosts::DebugMessage(str_len))?; if ctx.ext.append_debug_buffer("") { @@ -2564,85 +2100,105 @@ pub mod env { ctx.ext.append_debug_buffer(msg); } } - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) } /// Call some dispatchable of the runtime. - /// - /// This function decodes the passed in data as the overarching `Call` type of the - /// runtime and dispatches it. The weight as specified in the runtime is charged - /// from the gas meter. Any weight refunds made by the dispatchable are considered. - /// - /// The filter specified by `Config::CallFilter` is attached to the origin of - /// the dispatched call. - /// - /// # Parameters - /// - /// - `call_ptr`: the pointer into the linear memory where the input data is placed. - /// - `call_len`: the length of the input data in bytes. - /// - /// # Return Value - /// - /// Returns `ReturnCode::Success` when the dispatchable was successfully executed and - /// returned `Ok`. When the dispatchable was exeuted but returned an error - /// `ReturnCode::CallRuntimeFailed` is returned. The full error is not - /// provided because it is not guaranteed to be stable. - /// - /// # Comparison with `ChainExtension` - /// - /// Just as a chain extension this API allows the runtime to extend the functionality - /// of contracts. While making use of this function is generally easier it cannot be - /// used in all cases. Consider writing a chain extension if you need to do perform - /// one of the following tasks: - /// - /// - Return data. - /// - Provide functionality **exclusively** to contracts. - /// - Provide custom weights. - /// - Avoid the need to keep the `Call` data structure stable. + /// See [`frame_support::traits::call_runtime`]. fn call_runtime( ctx: _, memory: _, call_ptr: u32, call_len: u32, - ) -> Result { - use frame_support::dispatch::{extract_actual_weight, GetDispatchInfo}; + ) -> Result { + use frame_support::dispatch::GetDispatchInfo; ctx.charge_gas(RuntimeCosts::CopyFromContract(call_len))?; let call: ::RuntimeCall = ctx.read_sandbox_memory_as_unbounded(memory, call_ptr, call_len)?; - let dispatch_info = call.get_dispatch_info(); - let charged = ctx.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; - let result = ctx.ext.call_runtime(call); - let actual_weight = extract_actual_weight(&result, &dispatch_info); - ctx.adjust_gas(charged, RuntimeCosts::CallRuntime(actual_weight)); - match result { - Ok(_) => Ok(ReturnCode::Success), + ctx.call_dispatchable::(call.get_dispatch_info(), |ctx| { + ctx.ext.call_runtime(call) + }) + } + + /// Execute an XCM program locally, using the contract's address as the origin. + /// See [`pallet_contracts_uapi::HostFn::execute_xcm`]. + #[unstable] + fn xcm_execute( + ctx: _, + memory: _, + msg_ptr: u32, + msg_len: u32, + output_ptr: u32, + ) -> Result { + use frame_support::dispatch::DispatchInfo; + use xcm::VersionedXcm; + use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; + + ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let message: VersionedXcm> = + ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + + let execute_weight = + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + let weight = ctx.ext.gas_meter().gas_left().max(execute_weight); + let dispatch_info = DispatchInfo { weight, ..Default::default() }; + + ensure_executable::(&message)?; + ctx.call_dispatchable::(dispatch_info, |ctx| { + let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); + let outcome = <::Xcm>::execute( + origin, + Box::new(message), + weight.saturating_sub(execute_weight), + )?; + + ctx.write_sandbox_memory(memory, output_ptr, &outcome.encode())?; + let pre_dispatch_weight = + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + Ok(Some(outcome.weight_used().saturating_add(pre_dispatch_weight)).into()) + }) + } + + /// Send an XCM program from the contract to the specified destination. + /// See [`pallet_contracts_uapi::HostFn::send_xcm`]. + #[unstable] + fn xcm_send( + ctx: _, + memory: _, + dest_ptr: u32, + msg_ptr: u32, + msg_len: u32, + output_ptr: u32, + ) -> Result { + use xcm::{VersionedMultiLocation, VersionedXcm}; + use xcm_builder::{SendController, SendControllerWeightInfo}; + + ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let dest: VersionedMultiLocation = ctx.read_sandbox_memory_as(memory, dest_ptr)?; + + let message: VersionedXcm<()> = + ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + let weight = <::Xcm as SendController<_>>::WeightInfo::send(); + ctx.charge_gas(RuntimeCosts::CallRuntime(weight))?; + let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); + + match <::Xcm>::send(origin, dest.into(), message.into()) { + Ok(message_id) => { + ctx.write_sandbox_memory(memory, output_ptr, &message_id.encode())?; + Ok(ReturnErrorCode::Success) + }, Err(e) => { if ctx.ext.append_debug_buffer("") { - ctx.ext.append_debug_buffer("seal0::call_runtime failed with: "); + ctx.ext.append_debug_buffer("seal0::xcm_send failed with: "); ctx.ext.append_debug_buffer(e.into()); }; - Ok(ReturnCode::CallRuntimeFailed) + Ok(ReturnErrorCode::XcmSendFailed) }, } } /// Recovers the ECDSA public key from the given message hash and signature. - /// - /// Writes the public key into the given output buffer. - /// Assumes the secp256k1 curve. - /// - /// # Parameters - /// - /// - `signature_ptr`: the pointer into the linear memory where the signature is placed. Should - /// be decodable as a 65 bytes. Traps otherwise. - /// - `message_hash_ptr`: the pointer into the linear memory where the message hash is placed. - /// Should be decodable as a 32 bytes. Traps otherwise. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// buffer should be 33 bytes. The function will write the result directly into this buffer. - /// - /// # Errors - /// - /// - `ReturnCode::EcdsaRecoverFailed` + /// See [`pallet_contracts_uapi::HostFn::ecdsa_recover`]. #[prefixed_alias] fn ecdsa_recover( ctx: _, @@ -2650,7 +2206,7 @@ pub mod env { signature_ptr: u32, message_hash_ptr: u32, output_ptr: u32, - ) -> Result { + ) -> Result { ctx.charge_gas(RuntimeCosts::EcdsaRecovery)?; let mut signature: [u8; 65] = [0; 65]; @@ -2666,26 +2222,14 @@ pub mod env { // buffer. ctx.write_sandbox_memory(memory, output_ptr, pub_key.as_ref())?; - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) }, - Err(_) => Ok(ReturnCode::EcdsaRecoverFailed), + Err(_) => Ok(ReturnErrorCode::EcdsaRecoveryFailed), } } /// Verify a sr25519 signature - /// - /// # Parameters - /// - /// - `signature_ptr`: the pointer into the linear memory where the signature is placed. Should - /// be a value of 64 bytes. - /// - `pub_key_ptr`: the pointer into the linear memory where the public key is placed. Should - /// be a value of 32 bytes. - /// - `message_len`: the length of the message payload. - /// - `message_ptr`: the pointer into the linear memory where the message is placed. - /// - /// # Errors - /// - /// - `ReturnCode::Sr25519VerifyFailed + /// See [`pallet_contracts_uapi::HostFn::sr25519_verify`]. #[unstable] fn sr25519_verify( ctx: _, @@ -2694,7 +2238,7 @@ pub mod env { pub_key_ptr: u32, message_len: u32, message_ptr: u32, - ) -> Result { + ) -> Result { ctx.charge_gas(RuntimeCosts::Sr25519Verify(message_len))?; let mut signature: [u8; 64] = [0; 64]; @@ -2706,41 +2250,16 @@ pub mod env { let message: Vec = ctx.read_sandbox_memory(memory, message_ptr, message_len)?; if ctx.ext.sr25519_verify(&signature, &message, &pub_key) { - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) } else { - Ok(ReturnCode::Sr25519VerifyFailed) + Ok(ReturnErrorCode::Sr25519VerifyFailed) } } /// Replace the contract code at the specified address with new code. - /// - /// # Note - /// - /// There are a couple of important considerations which must be taken into account when - /// using this API: - /// - /// 1. The storage at the code address will remain untouched. This means that contract - /// developers must ensure that the storage layout of the new code is compatible with that of - /// the old code. - /// - /// 2. Contracts using this API can't be assumed as having deterministic addresses. Said another - /// way, when using this API you lose the guarantee that an address always identifies a specific - /// code hash. - /// - /// 3. If a contract calls into itself after changing its code the new call would use - /// the new code. However, if the original caller panics after returning from the sub call it - /// would revert the changes made by [`set_code_hash()`][`Self::set_code_hash`] and the next - /// caller would use the old code. - /// - /// # Parameters - /// - /// - `code_hash_ptr`: A pointer to the buffer that contains the new code hash. - /// - /// # Errors - /// - /// - `ReturnCode::CodeNotFound` + /// See [`pallet_contracts_uapi::HostFn::set_code_hash`]. #[prefixed_alias] - fn set_code_hash(ctx: _, memory: _, code_hash_ptr: u32) -> Result { + fn set_code_hash(ctx: _, memory: _, code_hash_ptr: u32) -> Result { ctx.charge_gas(RuntimeCosts::SetCodeHash)?; let code_hash: CodeHash<::T> = ctx.read_sandbox_memory_as(memory, code_hash_ptr)?; @@ -2749,33 +2268,19 @@ pub mod env { let code = Runtime::::err_into_return_code(err)?; Ok(code) }, - Ok(()) => Ok(ReturnCode::Success), + Ok(()) => Ok(ReturnErrorCode::Success), } } /// Calculates Ethereum address from the ECDSA compressed public key and stores - /// it into the supplied buffer. - /// - /// # Parameters - /// - /// - `key_ptr`: a pointer to the ECDSA compressed public key. Should be decodable as a 33 bytes - /// value. Traps otherwise. - /// - `out_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// If the available space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// # Errors - /// - /// - `ReturnCode::EcdsaRecoverFailed` + /// See [`pallet_contracts_uapi::HostFn::ecdsa_to_eth_address`]. #[prefixed_alias] fn ecdsa_to_eth_address( ctx: _, memory: _, key_ptr: u32, out_ptr: u32, - ) -> Result { + ) -> Result { ctx.charge_gas(RuntimeCosts::EcdsaToEthAddress)?; let mut compressed_key: [u8; 33] = [0; 33]; ctx.read_sandbox_memory_into_buf(memory, key_ptr, &mut compressed_key)?; @@ -2783,18 +2288,15 @@ pub mod env { match result { Ok(eth_address) => { ctx.write_sandbox_memory(memory, out_ptr, eth_address.as_ref())?; - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) }, - Err(_) => Ok(ReturnCode::EcdsaRecoverFailed), + Err(_) => Ok(ReturnErrorCode::EcdsaRecoveryFailed), } } /// Returns the number of times the currently executing contract exists on the call stack in /// addition to the calling instance. - /// - /// # Return Value - /// - /// Returns `0` when there is no reentrancy. + /// See [`pallet_contracts_uapi::HostFn::reentrance_count`]. #[unstable] fn reentrance_count(ctx: _, memory: _) -> Result { ctx.charge_gas(RuntimeCosts::ReentrantCount)?; @@ -2803,14 +2305,7 @@ pub mod env { /// Returns the number of times specified contract exists on the call stack. Delegated calls are /// not counted as separate calls. - /// - /// # Parameters - /// - /// - `account_ptr`: a pointer to the contract address. - /// - /// # Return Value - /// - /// Returns `0` when the contract does not exist on the call stack. + /// See [`pallet_contracts_uapi::HostFn::account_reentrance_count`]. #[unstable] fn account_reentrance_count(ctx: _, memory: _, account_ptr: u32) -> Result { ctx.charge_gas(RuntimeCosts::AccountEntranceCount)?; @@ -2820,19 +2315,14 @@ pub mod env { } /// Returns a nonce that is unique per contract instantiation. - /// - /// The nonce is incremented for each successful contract instantiation. This is a - /// sensible default salt for contract instantiations. + /// See [`pallet_contracts_uapi::HostFn::instantiation_nonce`]. fn instantiation_nonce(ctx: _, _memory: _) -> Result { ctx.charge_gas(RuntimeCosts::InstantationNonce)?; Ok(ctx.ext.nonce()) } /// Adds a new delegate dependency to the contract. - /// - /// # Parameters - /// - /// - `code_hash_ptr`: A pointer to the code hash of the dependency. + /// See [`pallet_contracts_uapi::HostFn::add_delegate_dependency`]. #[unstable] fn add_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::AddDelegateDependency)?; @@ -2842,10 +2332,7 @@ pub mod env { } /// Removes the delegate dependency from the contract. - /// - /// # Parameters - /// - /// - `code_hash_ptr`: A pointer to the code hash of the dependency. + /// see [`pallet_contracts_uapi::HostFn::remove_delegate_dependency`]. #[unstable] fn remove_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::RemoveDelegateDependency)?; diff --git a/substrate/frame/contracts/uapi/Cargo.toml b/substrate/frame/contracts/uapi/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..f29014272829b1dd4b5df72b81a2b236016b6ddb --- /dev/null +++ b/substrate/frame/contracts/uapi/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "pallet-contracts-uapi" +version = "4.0.0-dev" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage = "https://substrate.io" +repository.workspace = true +description = "Exposes all the host functions that a contract can import." + +[lints] +workspace = true + +[dependencies] +paste = { version = "1.0", default-features = false } +bitflags = "1.0" +scale-info = { version = "2.10.0", default-features = false, features = ["derive"], optional = true } +scale = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", + "max-encoded-len", +], optional = true } + +[features] +default = ["scale"] +scale = ["dep:scale", "scale-info"] diff --git a/substrate/frame/contracts/uapi/src/flags.rs b/substrate/frame/contracts/uapi/src/flags.rs new file mode 100644 index 0000000000000000000000000000000000000000..32553817fb7ae08a7c778853f458a3bfd52d3a2b --- /dev/null +++ b/substrate/frame/contracts/uapi/src/flags.rs @@ -0,0 +1,73 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use bitflags::bitflags; + +bitflags! { + /// Flags used by a contract to customize exit behaviour. + #[cfg_attr(feature = "scale", derive(scale::Encode, scale::Decode, scale_info::TypeInfo))] + pub struct ReturnFlags: u32 { + /// If this bit is set all changes made by the contract execution are rolled back. + const REVERT = 0x0000_0001; + } +} + +bitflags! { + /// Flags used to change the behaviour of `seal_call` and `seal_delegate_call`. + pub struct CallFlags: u32 { + /// Forward the input of current function to the callee. + /// + /// Supplied input pointers are ignored when set. + /// + /// # Note + /// + /// A forwarding call will consume the current contracts input. Any attempt to + /// access the input after this call returns will lead to [`Error::InputForwarded`]. + /// It does not matter if this is due to calling `seal_input` or trying another + /// forwarding call. Consider using [`Self::CLONE_INPUT`] in order to preserve + /// the input. + const FORWARD_INPUT = 0b0000_0001; + /// Identical to [`Self::FORWARD_INPUT`] but without consuming the input. + /// + /// This adds some additional weight costs to the call. + /// + /// # Note + /// + /// This implies [`Self::FORWARD_INPUT`] and takes precedence when both are set. + const CLONE_INPUT = 0b0000_0010; + /// Do not return from the call but rather return the result of the callee to the + /// callers caller. + /// + /// # Note + /// + /// This makes the current contract completely transparent to its caller by replacing + /// this contracts potential output by the callee ones. Any code after `seal_call` + /// can be safely considered unreachable. + const TAIL_CALL = 0b0000_0100; + /// Allow the callee to reenter into the current contract. + /// + /// Without this flag any reentrancy into the current contract that originates from + /// the callee (or any of its callees) is denied. This includes the first callee: + /// You cannot call into yourself with this flag set. + /// + /// # Note + /// + /// For `seal_delegate_call` should be always unset, otherwise + /// [`Error::InvalidCallFlags`] is returned. + const ALLOW_REENTRY = 0b0000_1000; + } +} diff --git a/substrate/frame/contracts/uapi/src/host.rs b/substrate/frame/contracts/uapi/src/host.rs new file mode 100644 index 0000000000000000000000000000000000000000..f8b55d3822e9fb0fd52e6fd7abaa313ddefa7b6c --- /dev/null +++ b/substrate/frame/contracts/uapi/src/host.rs @@ -0,0 +1,793 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use crate::{CallFlags, Result, ReturnFlags, SENTINEL}; +use paste::paste; + +#[cfg(target_arch = "wasm32")] +mod wasm32; + +#[cfg(target_arch = "riscv32")] +mod riscv32; + +macro_rules! hash_fn { + ( $name:ident, $bytes:literal ) => { + paste! { + #[doc = "Computes the " $name " " $bytes "-bit hash on the given input buffer."] + #[doc = "\n# Notes\n"] + #[doc = "- The `input` and `output` buffer may overlap."] + #[doc = "- The output buffer is expected to hold at least " $bytes " bits."] + #[doc = "- It is the callers responsibility to provide an output buffer that is large enough to hold the expected amount of bytes returned by the hash function."] + #[doc = "\n# Parameters\n"] + #[doc = "- `input`: The input data buffer."] + #[doc = "- `output`: The output buffer to write the hash result to."] + fn [](input: &[u8], output: &mut [u8; $bytes]); + } + }; +} + +fn extract_from_slice(output: &mut &mut [u8], new_len: usize) { + debug_assert!(new_len <= output.len()); + let tmp = core::mem::take(output); + *output = &mut tmp[..new_len]; +} + +fn ptr_len_or_sentinel(data: &mut Option<&mut [u8]>) -> (*mut u8, u32) { + match data { + Some(ref mut data) => (data.as_mut_ptr(), data.len() as _), + None => (SENTINEL as _, 0), + } +} + +fn ptr_or_sentinel(data: &Option<&[u8]>) -> *const u8 { + match data { + Some(ref data) => data.as_ptr(), + None => SENTINEL as _, + } +} + +/// Implements [`HostFn`] for each supported target architecture. +pub enum HostFnImpl {} + +/// Defines all the host apis implemented by both wasm and RISC-V vms. +pub trait HostFn { + /// Returns the number of times specified contract exists on the call stack. Delegated calls are + /// not counted as separate calls. + /// + /// # Parameters + /// + /// - `account`: The contract address. Should be decodable as an `T::AccountId`. Traps + /// otherwise. + /// + /// # Return + /// + /// Returns the number of times specified contract exists on the call stack. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn account_reentrance_count(account: &[u8]) -> u32; + + /// Stores the address of the current contract into the supplied buffer. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the address. + fn address(output: &mut &mut [u8]); + + /// Adds a new delegate dependency to the contract. + /// + /// Traps if the maximum number of delegate_dependencies is reached or if + /// the delegate dependency already exists. + /// + /// # Parameters + /// + /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps + /// otherwise. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn add_delegate_dependency(code_hash: &[u8]); + + /// Stores the *free* balance of the current account into the supplied buffer. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the balance. + fn balance(output: &mut &mut [u8]); + + /// Stores the current block number of the current contract into the supplied buffer. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the block number. + fn block_number(output: &mut &mut [u8]); + + /// Make a call to another contract. + /// + /// This is equivalent to calling the newer version of this function with + /// `flags` set to [`CallFlags::ALLOW_REENTRY`]. See the newer version for documentation. + #[deprecated(note = "Deprecated, use newer version instead")] + fn call( + callee: &[u8], + gas: u64, + value: &[u8], + input_data: &[u8], + output: Option<&mut [u8]>, + ) -> Result; + + /// Make a call to another contract. + /// + /// Equivalent to the newer [`Self::call_v2`] version but works with + /// *ref_time* Weight only + fn call_v1( + flags: CallFlags, + callee: &[u8], + gas: u64, + value: &[u8], + input_data: &[u8], + output: Option<&mut [u8]>, + ) -> Result; + + /// Call (possibly transferring some amount of funds) into the specified account. + /// + /// # Parameters + /// + /// - `flags`: See [`CallFlags`] for a documentation of the supported flags. + /// - `callee`: The address of the callee. Should be decodable as an `T::AccountId`. Traps + /// otherwise. + /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. + /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. + /// - `deposit`: The storage deposit limit for instantiation. Should be decodable as a + /// `Option`. Traps otherwise. Passing `None` means setting no specific limit for + /// the call, which implies storage usage up to the limit of the parent call. + /// - `value`: The value to transfer into the contract. Should be decodable as a `T::Balance`. + /// Traps otherwise. + /// - `input`: The input data buffer used to call the contract. + /// - `output`: A reference to the output data buffer to write the call output buffer. If `None` + /// is provided then the output buffer is not copied. + /// + /// # Errors + /// + /// An error means that the call wasn't successful output buffer is returned unless + /// stated otherwise. + /// + /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. + /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] + /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] + /// - [NotCallable][`crate::ReturnErrorCode::NotCallable] + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn call_v2( + flags: CallFlags, + callee: &[u8], + ref_time_limit: u64, + proof_time_limit: u64, + deposit: Option<&[u8]>, + value: &[u8], + input_data: &[u8], + output: Option<&mut [u8]>, + ) -> Result; + + /// Call into the chain extension provided by the chain if any. + /// + /// Handling of the input values is up to the specific chain extension and so is the + /// return value. The extension can decide to use the inputs as primitive inputs or as + /// in/out arguments by interpreting them as pointers. Any caller of this function + /// must therefore coordinate with the chain that it targets. + /// + /// # Note + /// + /// If no chain extension exists the contract will trap with the `NoChainExtension` + /// module error. + /// + /// # Parameters + /// + /// - `func_id`: The function id of the chain extension. + /// - `input`: The input data buffer. + /// - `output`: A reference to the output data buffer to write the output data. + /// + /// # Return + /// + /// The chain extension returned value, if executed successfully. + fn call_chain_extension(func_id: u32, input: &[u8], output: &mut &mut [u8]) -> u32; + + /// Call some dispatchable of the runtime. + /// + /// # Parameters + /// + /// - `call`: The call data. + /// + /// # Return + /// + /// Returns `Error::Success` when the dispatchable was successfully executed and + /// returned `Ok`. When the dispatchable was executed but returned an error + /// `Error::CallRuntimeFailed` is returned. The full error is not + /// provided because it is not guaranteed to be stable. + /// + /// # Comparison with `ChainExtension` + /// + /// Just as a chain extension this API allows the runtime to extend the functionality + /// of contracts. While making use of this function is generally easier it cannot be + /// used in all cases. Consider writing a chain extension if you need to do perform + /// one of the following tasks: + /// + /// - Return data. + /// - Provide functionality **exclusively** to contracts. + /// - Provide custom weights. + /// - Avoid the need to keep the `Call` data structure stable. + fn call_runtime(call: &[u8]) -> Result; + + /// Stores the address of the caller into the supplied buffer. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the + /// extrinsic will be returned. Otherwise, if this call is initiated by another contract then + /// the address of the contract will be returned. + /// + /// If there is no address associated with the caller (e.g. because the caller is root) then + /// it traps with `BadOrigin`. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the caller address. + fn caller(output: &mut &mut [u8]); + + /// Checks whether the caller of the current contract is the origin of the whole call stack. + /// + /// Prefer this over [`is_contract()`][`Self::is_contract`] when checking whether your contract + /// is being called by a contract or a plain account. The reason is that it performs better + /// since it does not need to do any storage lookups. + /// + /// # Return + /// + /// A return value of `true` indicates that this contract is being called by a plain account + /// and `false` indicates that the caller is another contract. + fn caller_is_origin() -> bool; + + /// Checks whether the caller of the current contract is root. + /// + /// Note that only the origin of the call stack can be root. Hence this function returning + /// `true` implies that the contract is being called by the origin. + /// + /// A return value of `true` indicates that this contract is being called by a root origin, + /// and `false` indicates that the caller is a signed origin. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn caller_is_root() -> u32; + + /// Clear the value at the given key in the contract storage. + /// + /// Equivalent to the newer [`Self::clear_storage_v1`] version with + /// the exception of the return type. Still a valid thing to call when not interested in the + /// return value. + fn clear_storage(key: &[u8]); + + /// Clear the value at the given key in the contract storage. + /// + /// # Parameters + /// + /// - `key`: The storage key. + /// + /// # Return + /// + /// Returns the size of the pre-existing value at the specified key if any. + fn clear_storage_v1(key: &[u8]) -> Option; + + /// Retrieve the code hash for a specified contract address. + /// + /// # Parameters + /// + /// - `account_id`: The address of the contract.Should be decodable as an `T::AccountId`. Traps + /// otherwise. + /// - `output`: A reference to the output data buffer to write the code hash. + /// + /// + /// # Errors + /// + /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] + fn code_hash(account_id: &[u8], output: &mut [u8]) -> Result; + + /// Checks whether there is a value stored under the given key. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + fn contains_storage(key: &[u8]) -> Option; + + /// Checks whether there is a value stored under the given key. + /// + /// The key length must not exceed the maximum defined by the contracts module parameter. + /// + /// # Parameters + /// - `key`: The storage key. + /// + /// # Return + /// + /// Returns the size of the pre-existing value at the specified key if any. + fn contains_storage_v1(key: &[u8]) -> Option; + + /// Execute code in the context (storage, caller, value) of the current contract. + /// + /// Reentrancy protection is always disabled since the callee is allowed + /// to modify the callers storage. This makes going through a reentrancy attack + /// unnecessary for the callee when it wants to exploit the caller. + /// + /// # Parameters + /// + /// - `flags`: See [`CallFlags`] for a documentation of the supported flags. + /// - `code_hash`: The hash of the code to be executed. + /// - `input`: The input data buffer used to call the contract. + /// - `output`: A reference to the output data buffer to write the call output buffer. If `None` + /// is provided then the output buffer is not copied. + /// + /// # Errors + /// + /// An error means that the call wasn't successful and no output buffer is returned unless + /// stated otherwise. + /// + /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. + /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] + /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] + fn delegate_call( + flags: CallFlags, + code_hash: &[u8], + input_data: &[u8], + output: Option<&mut [u8]>, + ) -> Result; + + /// Deposit a contract event with the data buffer and optional list of topics. There is a limit + /// on the maximum number of topics specified by `event_topics`. + /// + /// There should not be any duplicates in `topics`. + /// + /// # Parameters + /// + /// - `topics`: The topics list encoded as `Vec`. It can't contain duplicates. + fn deposit_event(topics: &[u8], data: &[u8]); + + /// Recovers the ECDSA public key from the given message hash and signature. + /// + /// Writes the public key into the given output buffer. + /// Assumes the secp256k1 curve. + /// + /// # Parameters + /// + /// - `signature`: The signature bytes. + /// - `message_hash`: The message hash bytes. + /// - `output`: A reference to the output data buffer to write the public key. + /// + /// # Errors + /// + /// - [EcdsaRecoveryFailed][`crate::ReturnErrorCode::EcdsaRecoveryFailed] + fn ecdsa_recover( + signature: &[u8; 65], + message_hash: &[u8; 32], + output: &mut [u8; 33], + ) -> Result; + + /// Calculates Ethereum address from the ECDSA compressed public key and stores + /// it into the supplied buffer. + /// + /// # Parameters + /// + /// - `pubkey`: The public key bytes. + /// - `output`: A reference to the output data buffer to write the address. + /// + /// # Errors + /// + /// - [EcdsaRecoveryFailed][`crate::ReturnErrorCode::EcdsaRecoveryFailed] + fn ecdsa_to_eth_address(pubkey: &[u8; 33], output: &mut [u8; 20]) -> Result; + + /// Stores the weight left into the supplied buffer. + /// + /// Equivalent to the newer [`Self::gas_left_v1`] version but + /// works with *ref_time* Weight only. + fn gas_left(out: &mut &mut [u8]); + + /// Stores the amount of weight left into the supplied buffer. + /// The data is encoded as Weight. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the weight left. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn gas_left_v1(output: &mut &mut [u8]); + + /// Retrieve the value under the given key from storage. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + fn get_storage(key: &[u8], output: &mut &mut [u8]) -> Result; + + /// Retrieve the value under the given key from storage. + /// + /// The key length must not exceed the maximum defined by the contracts module parameter. + /// + /// # Parameters + /// - `key`: The storage key. + /// - `output`: A reference to the output data buffer to write the storage entry. + /// + /// # Errors + /// + /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] + fn get_storage_v1(key: &[u8], output: &mut &mut [u8]) -> Result; + + hash_fn!(sha2_256, 32); + hash_fn!(keccak_256, 32); + hash_fn!(blake2_256, 32); + hash_fn!(blake2_128, 16); + + /// Stores the input passed by the caller into the supplied buffer. + /// + /// # Note + /// + /// This function traps if: + /// - the input is larger than the available space. + /// - the input was previously forwarded by a [`call()`][`Self::call()`]. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the input data. + fn input(output: &mut &mut [u8]); + + /// Instantiate a contract with the specified code hash. + /// + /// Equivalent to the newer [`Self::instantiate_v2`] version but works + /// with *ref_time* Weight only. + fn instantiate_v1( + code_hash: &[u8], + gas: u64, + value: &[u8], + input: &[u8], + address: Option<&mut [u8]>, + output: Option<&mut [u8]>, + salt: &[u8], + ) -> Result; + + /// Instantiate a contract with the specified code hash. + /// + /// This function creates an account and executes the constructor defined in the code specified + /// by the code hash. + /// + /// # Parameters + /// + /// - `code_hash`: The hash of the code to be instantiated. + /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. + /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. + /// - `deposit`: The storage deposit limit for instantiation. Should be decodable as a + /// `Option`. Traps otherwise. Passing `None` means setting no specific limit for + /// the call, which implies storage usage up to the limit of the parent call. + /// - `value`: The value to transfer into the contract. Should be decodable as a `T::Balance`. + /// Traps otherwise. + /// - `input`: The input data buffer. + /// - `address`: A reference to the address buffer to write the address of the contract. If + /// `None` is provided then the output buffer is not copied. + /// - `output`: A reference to the return value buffer to write the constructor output buffer. + /// If `None` is provided then the output buffer is not copied. + /// - `salt`: The salt bytes to use for this instantiation. + /// + /// # Errors + /// + /// Please consult the [ReturnErrorCode][`crate::ReturnErrorCode`] enum declaration for more + /// information on those errors. Here we only note things specific to this function. + /// + /// An error means that the account wasn't created and no address or output buffer + /// is returned unless stated otherwise. + /// + /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. + /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] + /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] + /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn instantiate_v2( + code_hash: &[u8], + ref_time_limit: u64, + proof_size_limit: u64, + deposit: Option<&[u8]>, + value: &[u8], + input: &[u8], + address: Option<&mut [u8]>, + output: Option<&mut [u8]>, + salt: &[u8], + ) -> Result; + + /// Returns a nonce that is unique per contract instantiation. + /// + /// The nonce is incremented for each successful contract instantiation. This is a + /// sensible default salt for contract instantiations. + fn instantiation_nonce() -> u64; + + /// Checks whether a specified address belongs to a contract. + /// + /// # Parameters + /// + /// - `account_id`: The address to check. Should be decodable as an `T::AccountId`. Traps + /// otherwise. + /// + /// # Return + /// + /// Returns `true` if the address belongs to a contract. + fn is_contract(account_id: &[u8]) -> bool; + + /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. + /// The data is encoded as `T::Balance`. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the minimum balance. + fn minimum_balance(output: &mut &mut [u8]); + + /// Retrieve the code hash of the currently executing contract. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the code hash. + fn own_code_hash(output: &mut [u8]); + + /// Load the latest block timestamp into the supplied buffer + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the timestamp. + fn now(output: &mut &mut [u8]); + + /// Returns the number of times the currently executing contract exists on the call stack in + /// addition to the calling instance. + /// + /// # Return + /// + /// Returns `0` when there is no reentrancy. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn reentrance_count() -> u32; + + /// Removes the delegate dependency from the contract. + /// + /// Traps if the delegate dependency does not exist. + /// + /// # Parameters + /// + /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps + /// otherwise. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn remove_delegate_dependency(code_hash: &[u8]); + + /// Cease contract execution and save a data buffer as a result of the execution. + /// + /// This function never returns as it stops execution of the caller. + /// This is the only way to return a data buffer to the caller. Returning from + /// execution without calling this function is equivalent to calling: + /// ```nocompile + /// return_value(ReturnFlags::empty(), &[]) + /// ``` + /// + /// Using an unnamed non empty `ReturnFlags` triggers a trap. + /// + /// # Parameters + /// + /// - `flags`: Flag used to signal special return conditions to the supervisor. See + /// [`ReturnFlags`] for a documentation of the supported flags. + /// - `return_value`: The return value buffer. + fn return_value(flags: ReturnFlags, return_value: &[u8]) -> !; + + /// Replace the contract code at the specified address with new code. + /// + /// # Note + /// + /// There are a couple of important considerations which must be taken into account when + /// using this API: + /// + /// 1. The storage at the code address will remain untouched. This means that contract + /// developers must ensure that the storage layout of the new code is compatible with that of + /// the old code. + /// + /// 2. Contracts using this API can't be assumed as having deterministic addresses. Said another + /// way, when using this API you lose the guarantee that an address always identifies a specific + /// code hash. + /// + /// 3. If a contract calls into itself after changing its code the new call would use + /// the new code. However, if the original caller panics after returning from the sub call it + /// would revert the changes made by [`set_code_hash()`][`Self::set_code_hash`] and the next + /// caller would use the old code. + /// + /// # Parameters + /// + /// - `code_hash`: The hash of the new code. Should be decodable as an `T::Hash`. Traps + /// otherwise. + /// + /// # Errors + /// + /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] + fn set_code_hash(code_hash: &[u8]) -> Result; + + /// Set the value at the given key in the contract storage. + /// + /// Equivalent to [`Self::set_storage_v1`] version with the + /// exception of the return type. Still a valid thing to call for fixed sized storage key, when + /// not interested in the return value. + fn set_storage(key: &[u8], value: &[u8]); + + /// Set the value at the given key in the contract storage. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + fn set_storage_v1(key: &[u8], value: &[u8]) -> Option; + + /// Set the value at the given key in the contract storage. + /// + /// The key and value lengths must not exceed the maximums defined by the contracts module + /// parameters. + /// + /// # Parameters + /// + /// - `key`: The storage key. + /// - `encoded_value`: The storage value. + /// + /// # Return + /// + /// Returns the size of the pre-existing value at the specified key if any. + fn set_storage_v2(key: &[u8], value: &[u8]) -> Option; + + /// Verify a sr25519 signature + /// + /// # Parameters + /// + /// - `signature`: The signature bytes. + /// - `message`: The message bytes. + /// + /// # Errors + /// + /// - [Sr25519VerifyFailed][`crate::ReturnErrorCode::Sr25519VerifyFailed] + fn sr25519_verify(signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> Result; + + /// Retrieve and remove the value under the given key from storage. + /// + /// # Parameters + /// - `key`: The storage key. + /// - `output`: A reference to the output data buffer to write the storage entry. + /// + /// # Errors + /// + /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] + fn take_storage(key: &[u8], output: &mut &mut [u8]) -> Result; + + /// Transfer some amount of funds into the specified account. + /// + /// # Parameters + /// + /// - `account_id`: The address of the account to transfer funds to. Should be decodable as an + /// `T::AccountId`. Traps otherwise. + /// - `value`: The value to transfer. Should be decodable as a `T::Balance`. Traps otherwise. + /// + /// # Errors + /// + /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] + fn transfer(account_id: &[u8], value: &[u8]) -> Result; + + /// Remove the calling account and transfer remaining balance. + /// + /// This is equivalent to calling the newer version of this function + #[deprecated(note = "Deprecated, use newer version instead")] + fn terminate(beneficiary: &[u8]) -> !; + + /// Remove the calling account and transfer remaining **free** balance. + /// + /// This function never returns. Either the termination was successful and the + /// execution of the destroyed contract is halted. Or it failed during the termination + /// which is considered fatal and results in a trap + rollback. + /// + /// # Parameters + /// + /// - `beneficiary`: The address of the beneficiary account, Should be decodable as an + /// `T::AccountId`. + /// + /// # Traps + /// + /// - The contract is live i.e is already on the call stack. + /// - Failed to send the balance to the beneficiary. + /// - The deletion queue is full. + fn terminate_v1(beneficiary: &[u8]) -> !; + + /// Stores the value transferred along with this call/instantiate into the supplied buffer. + /// The data is encoded as `T::Balance`. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the transferred value. + fn value_transferred(output: &mut &mut [u8]); + + /// Stores the price for the specified amount of gas into the supplied buffer. + /// + /// Equivalent to the newer [`Self::weight_to_fee_v1`] version but + /// works with *ref_time* Weight only. It is recommended to switch to the latest version, once + /// it's stabilized. + fn weight_to_fee(gas: u64, output: &mut &mut [u8]); + + /// Stores the price for the specified amount of gas into the supplied buffer. + /// The data is encoded as `T::Balance`. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `ref_time_limit`: The *ref_time* Weight limit to query the price for. + /// - `proof_size_limit`: The *proof_size* Weight limit to query the price for. + /// - `output`: A reference to the output data buffer to write the price. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn weight_to_fee_v1(ref_time_limit: u64, proof_size_limit: u64, output: &mut &mut [u8]); + + /// Execute an XCM program locally, using the contract's address as the origin. + /// This is equivalent to dispatching `pallet_xcm::execute` through call_runtime, except that + /// the function is called directly instead of being dispatched. + /// + /// # Parameters + /// + /// - `msg`: The message, should be decodable as a [VersionedXcm](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedXcm.html), + /// traps otherwise. + /// - `output`: A reference to the output data buffer to write the [Outcome](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/v3/enum.Outcome.html) + /// + /// # Return + /// + /// Returns `Error::Success` when the XCM execution attempt is successful. When the XCM + /// execution fails, `ReturnCode::XcmExecutionFailed` is returned + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn xcm_execute(msg: &[u8], output: &mut &mut [u8]) -> Result; + + /// Send an XCM program from the contract to the specified destination. + /// This is equivalent to dispatching `pallet_xcm::send` through `call_runtime`, except that + /// the function is called directly instead of being dispatched. + /// + /// # Parameters + /// + /// - `dest`: The XCM destination, should be decodable as [VersionedMultiLocation](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedMultiLocation.html), + /// traps otherwise. + /// - `msg`: The message, should be decodable as a [VersionedXcm](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedXcm.html), + /// traps otherwise. + /// - `output`: A reference to the output data buffer to write the [XcmHash](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/v3/type.XcmHash.html) + /// + /// # Return + /// + /// Returns `ReturnCode::Success` when the message was successfully sent. When the XCM + /// execution fails, `ReturnErrorCode::XcmSendFailed` is returned. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn xcm_send(dest: &[u8], msg: &[u8], output: &mut &mut [u8]) -> Result; +} diff --git a/substrate/frame/contracts/uapi/src/host/riscv32.rs b/substrate/frame/contracts/uapi/src/host/riscv32.rs new file mode 100644 index 0000000000000000000000000000000000000000..f58b8831f06d6cce1df4eb5ab3c7897a7fa97b40 --- /dev/null +++ b/substrate/frame/contracts/uapi/src/host/riscv32.rs @@ -0,0 +1,14 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// TODO: bring up to date with wasm32.rs diff --git a/substrate/frame/contracts/uapi/src/host/wasm32.rs b/substrate/frame/contracts/uapi/src/host/wasm32.rs new file mode 100644 index 0000000000000000000000000000000000000000..d30058daf3dff1775432d71f242357596d16021a --- /dev/null +++ b/substrate/frame/contracts/uapi/src/host/wasm32.rs @@ -0,0 +1,811 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use super::{ + extract_from_slice, ptr_len_or_sentinel, ptr_or_sentinel, CallFlags, HostFn, HostFnImpl, Result, +}; +use crate::{ReturnCode, ReturnFlags}; + +mod sys { + use super::ReturnCode; + + #[link(wasm_import_module = "seal0")] + extern "C" { + pub fn account_reentrance_count(account_ptr: *const u8) -> u32; + + pub fn add_delegate_dependency(code_hash_ptr: *const u8); + + pub fn address(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn balance(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn block_number(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn call( + callee_ptr: *const u8, + callee_len: u32, + gas: u64, + value_ptr: *const u8, + value_len: u32, + input_data_ptr: *const u8, + input_data_len: u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn call_chain_extension( + func_id: u32, + input_ptr: *const u8, + input_len: u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn call_runtime(call_ptr: *const u8, call_len: u32) -> ReturnCode; + + pub fn caller(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn caller_is_origin() -> ReturnCode; + + pub fn caller_is_root() -> ReturnCode; + + pub fn clear_storage(key_ptr: *const u8, key_len: u32) -> ReturnCode; + + pub fn code_hash( + account_id_ptr: *const u8, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn contains_storage(key_ptr: *const u8, key_len: u32) -> ReturnCode; + + pub fn delegate_call( + flags: u32, + code_hash_ptr: *const u8, + input_data_ptr: *const u8, + input_data_len: u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn deposit_event( + topics_ptr: *const u8, + topics_len: u32, + data_ptr: *const u8, + data_len: u32, + ); + + pub fn ecdsa_recover( + signature_ptr: *const u8, + message_hash_ptr: *const u8, + output_ptr: *mut u8, + ) -> ReturnCode; + + pub fn ecdsa_to_eth_address(public_key_ptr: *const u8, output_ptr: *mut u8) -> ReturnCode; + + pub fn gas_left(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn get_storage( + key_ptr: *const u8, + key_len: u32, + out_ptr: *mut u8, + out_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn hash_blake2_128(input_ptr: *const u8, input_len: u32, output_ptr: *mut u8); + + pub fn hash_blake2_256(input_ptr: *const u8, input_len: u32, output_ptr: *mut u8); + + pub fn hash_keccak_256(input_ptr: *const u8, input_len: u32, output_ptr: *mut u8); + + pub fn hash_sha2_256(input_ptr: *const u8, input_len: u32, output_ptr: *mut u8); + + pub fn input(buf_ptr: *mut u8, buf_len_ptr: *mut u32); + + pub fn instantiation_nonce() -> u64; + + pub fn is_contract(account_id_ptr: *const u8) -> ReturnCode; + + pub fn minimum_balance(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn now(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn own_code_hash(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn reentrance_count() -> u32; + + pub fn remove_delegate_dependency(code_hash_ptr: *const u8); + + pub fn seal_return(flags: u32, data_ptr: *const u8, data_len: u32) -> !; + + pub fn set_code_hash(code_hash_ptr: *const u8) -> ReturnCode; + + pub fn set_storage( + key_ptr: *const u8, + key_len: u32, + value_ptr: *const u8, + value_len: u32, + ) -> ReturnCode; + + pub fn sr25519_verify( + signature_ptr: *const u8, + public_key_ptr: *const u8, + message_len: u32, + message_ptr: *const u8, + ) -> ReturnCode; + + pub fn take_storage( + key_ptr: *const u8, + key_len: u32, + out_ptr: *mut u8, + out_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn terminate(beneficiary_ptr: *const u8) -> !; + + pub fn transfer( + account_id_ptr: *const u8, + account_id_len: u32, + transferred_value_ptr: *const u8, + transferred_value_len: u32, + ) -> ReturnCode; + + pub fn value_transferred(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn weight_to_fee(gas: u64, output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn xcm_execute(msg_ptr: *const u8, msg_len: u32, output_ptr: *mut u8) -> ReturnCode; + + pub fn xcm_send( + dest_ptr: *const u8, + msg_ptr: *const u8, + msg_len: u32, + output_ptr: *mut u8, + ) -> ReturnCode; + } + + pub mod v1 { + use crate::ReturnCode; + + #[link(wasm_import_module = "seal1")] + extern "C" { + pub fn call( + flags: u32, + callee_ptr: *const u8, + gas: u64, + transferred_value_ptr: *const u8, + input_data_ptr: *const u8, + input_data_len: u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn clear_storage(key_ptr: *const u8, key_len: u32) -> ReturnCode; + + pub fn contains_storage(key_ptr: *const u8, key_len: u32) -> ReturnCode; + + pub fn gas_left(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn get_storage( + key_ptr: *const u8, + key_len: u32, + out_ptr: *mut u8, + out_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn instantiate( + code_hash_ptr: *const u8, + gas: u64, + value_ptr: *const u8, + input_ptr: *const u8, + input_len: u32, + address_ptr: *mut u8, + address_len_ptr: *mut u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + salt_ptr: *const u8, + salt_len: u32, + ) -> ReturnCode; + + pub fn set_storage( + key_ptr: *const u8, + key_len: u32, + value_ptr: *const u8, + value_len: u32, + ) -> ReturnCode; + + pub fn terminate(beneficiary_ptr: *const u8) -> !; + + pub fn weight_to_fee( + ref_time_limit: u64, + proof_time_limit: u64, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ); + } + } + + pub mod v2 { + use crate::ReturnCode; + + #[link(wasm_import_module = "seal2")] + extern "C" { + pub fn call( + flags: u32, + callee_ptr: *const u8, + ref_time_limit: u64, + proof_time_limit: u64, + deposit_ptr: *const u8, + transferred_value_ptr: *const u8, + input_data_ptr: *const u8, + input_data_len: u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn instantiate( + code_hash_ptr: *const u8, + ref_time_limit: u64, + proof_time_limit: u64, + deposit_ptr: *const u8, + value_ptr: *const u8, + input_ptr: *const u8, + input_len: u32, + address_ptr: *mut u8, + address_len_ptr: *mut u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + salt_ptr: *const u8, + salt_len: u32, + ) -> ReturnCode; + + pub fn set_storage( + key_ptr: *const u8, + key_len: u32, + value_ptr: *const u8, + value_len: u32, + ) -> ReturnCode; + } + } +} + +/// A macro to implement all Host functions with a signature of `fn(&mut &mut [u8])`. +macro_rules! impl_wrapper_for { + (@impl_fn $( $mod:ident )::*, $suffix:literal, $name:ident) => { + paste::paste! { + fn [<$name $suffix>](output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + unsafe { + $( $mod )::*::$name(output.as_mut_ptr(), &mut output_len); + } + } + } + }; + + () => {}; + + (($mod:ident, $suffix:literal) => [$( $name:ident),*], $($tail:tt)*) => { + $(impl_wrapper_for!(@impl_fn sys::$mod, $suffix, $name);)* + impl_wrapper_for!($($tail)*); + }; + + (() => [$( $name:ident),*], $($tail:tt)*) => { + $(impl_wrapper_for!(@impl_fn sys, "", $name);)* + impl_wrapper_for!($($tail)*); + }; +} + +/// A macro to implement all the hash functions Apis. +macro_rules! impl_hash_fn { + ( $name:ident, $bytes_result:literal ) => { + paste::item! { + fn [](input: &[u8], output: &mut [u8; $bytes_result]) { + unsafe { + sys::[]( + input.as_ptr(), + input.len() as u32, + output.as_mut_ptr(), + ) + } + } + } + }; +} + +/// A macro to implement the get_storage functions. +macro_rules! impl_get_storage { + ($fn_name:ident, $sys_get_storage:path) => { + fn $fn_name(key: &[u8], output: &mut &mut [u8]) -> Result { + let mut output_len = output.len() as u32; + let ret_code = { + unsafe { + $sys_get_storage( + key.as_ptr(), + key.len() as u32, + output.as_mut_ptr(), + &mut output_len, + ) + } + }; + extract_from_slice(output, output_len as usize); + ret_code.into() + } + }; +} + +impl HostFn for HostFnImpl { + fn instantiate_v1( + code_hash: &[u8], + gas: u64, + value: &[u8], + input: &[u8], + mut address: Option<&mut [u8]>, + mut output: Option<&mut [u8]>, + salt: &[u8], + ) -> Result { + let (address_ptr, mut address_len) = ptr_len_or_sentinel(&mut address); + let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); + let ret_code = unsafe { + sys::v1::instantiate( + code_hash.as_ptr(), + gas, + value.as_ptr(), + input.as_ptr(), + input.len() as u32, + address_ptr, + &mut address_len, + output_ptr, + &mut output_len, + salt.as_ptr(), + salt.len() as u32, + ) + }; + + if let Some(ref mut address) = address { + extract_from_slice(address, address_len as usize); + } + if let Some(ref mut output) = output { + extract_from_slice(output, output_len as usize); + } + ret_code.into() + } + + fn instantiate_v2( + code_hash: &[u8], + ref_time_limit: u64, + proof_size_limit: u64, + deposit: Option<&[u8]>, + value: &[u8], + input: &[u8], + mut address: Option<&mut [u8]>, + mut output: Option<&mut [u8]>, + salt: &[u8], + ) -> Result { + let (address_ptr, mut address_len) = ptr_len_or_sentinel(&mut address); + let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); + let deposit_ptr = ptr_or_sentinel(&deposit); + + let ret_code = { + unsafe { + sys::v2::instantiate( + code_hash.as_ptr(), + ref_time_limit, + proof_size_limit, + deposit_ptr, + value.as_ptr(), + input.as_ptr(), + input.len() as u32, + address_ptr, + &mut address_len, + output_ptr, + &mut output_len, + salt.as_ptr(), + salt.len() as u32, + ) + } + }; + + if let Some(ref mut address) = address { + extract_from_slice(address, address_len as usize); + } + + if let Some(ref mut output) = output { + extract_from_slice(output, output_len as usize); + } + + ret_code.into() + } + + fn call( + callee: &[u8], + gas: u64, + value: &[u8], + input_data: &[u8], + mut output: Option<&mut [u8]>, + ) -> Result { + let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); + let ret_code = { + unsafe { + sys::call( + callee.as_ptr(), + callee.len() as u32, + gas, + value.as_ptr(), + value.len() as u32, + input_data.as_ptr(), + input_data.len() as u32, + output_ptr, + &mut output_len, + ) + } + }; + + if let Some(ref mut output) = output { + extract_from_slice(output, output_len as usize); + } + + ret_code.into() + } + + fn call_v1( + flags: CallFlags, + callee: &[u8], + gas: u64, + value: &[u8], + input_data: &[u8], + mut output: Option<&mut [u8]>, + ) -> Result { + let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); + let ret_code = { + unsafe { + sys::v1::call( + flags.bits(), + callee.as_ptr(), + gas, + value.as_ptr(), + input_data.as_ptr(), + input_data.len() as u32, + output_ptr, + &mut output_len, + ) + } + }; + + if let Some(ref mut output) = output { + extract_from_slice(output, output_len as usize); + } + + ret_code.into() + } + + fn call_v2( + flags: CallFlags, + callee: &[u8], + ref_time_limit: u64, + proof_time_limit: u64, + deposit: Option<&[u8]>, + value: &[u8], + input_data: &[u8], + mut output: Option<&mut [u8]>, + ) -> Result { + let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); + let deposit_ptr = ptr_or_sentinel(&deposit); + let ret_code = { + unsafe { + sys::v2::call( + flags.bits(), + callee.as_ptr(), + ref_time_limit, + proof_time_limit, + deposit_ptr, + value.as_ptr(), + input_data.as_ptr(), + input_data.len() as u32, + output_ptr, + &mut output_len, + ) + } + }; + + if let Some(ref mut output) = output { + extract_from_slice(output, output_len as usize); + } + + ret_code.into() + } + + fn caller_is_root() -> u32 { + unsafe { sys::caller_is_root() }.into_u32() + } + + fn delegate_call( + flags: CallFlags, + code_hash: &[u8], + input: &[u8], + mut output: Option<&mut [u8]>, + ) -> Result { + let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); + let ret_code = { + unsafe { + sys::delegate_call( + flags.bits(), + code_hash.as_ptr(), + input.as_ptr(), + input.len() as u32, + output_ptr, + &mut output_len, + ) + } + }; + + if let Some(ref mut output) = output { + extract_from_slice(output, output_len as usize); + } + + ret_code.into() + } + + fn transfer(account_id: &[u8], value: &[u8]) -> Result { + let ret_code = unsafe { + sys::transfer( + account_id.as_ptr(), + account_id.len() as u32, + value.as_ptr(), + value.len() as u32, + ) + }; + ret_code.into() + } + + fn deposit_event(topics: &[u8], data: &[u8]) { + unsafe { + sys::deposit_event( + topics.as_ptr(), + topics.len() as u32, + data.as_ptr(), + data.len() as u32, + ) + } + } + + fn set_storage(key: &[u8], value: &[u8]) { + unsafe { + sys::set_storage(key.as_ptr(), key.len() as u32, value.as_ptr(), value.len() as u32) + }; + } + + fn set_storage_v1(key: &[u8], encoded_value: &[u8]) -> Option { + let ret_code = unsafe { + sys::v1::set_storage( + key.as_ptr(), + key.len() as u32, + encoded_value.as_ptr(), + encoded_value.len() as u32, + ) + }; + ret_code.into() + } + + fn set_storage_v2(key: &[u8], encoded_value: &[u8]) -> Option { + let ret_code = unsafe { + sys::v2::set_storage( + key.as_ptr(), + key.len() as u32, + encoded_value.as_ptr(), + encoded_value.len() as u32, + ) + }; + ret_code.into() + } + + fn clear_storage(key: &[u8]) { + unsafe { sys::clear_storage(key.as_ptr(), key.len() as u32) }; + } + + fn clear_storage_v1(key: &[u8]) -> Option { + let ret_code = unsafe { sys::v1::clear_storage(key.as_ptr(), key.len() as u32) }; + ret_code.into() + } + + impl_get_storage!(get_storage, sys::get_storage); + impl_get_storage!(get_storage_v1, sys::v1::get_storage); + + fn take_storage(key: &[u8], output: &mut &mut [u8]) -> Result { + let mut output_len = output.len() as u32; + let ret_code = { + unsafe { + sys::take_storage( + key.as_ptr(), + key.len() as u32, + output.as_mut_ptr(), + &mut output_len, + ) + } + }; + extract_from_slice(output, output_len as usize); + ret_code.into() + } + + fn contains_storage(key: &[u8]) -> Option { + let ret_code = unsafe { sys::contains_storage(key.as_ptr(), key.len() as u32) }; + ret_code.into() + } + + fn contains_storage_v1(key: &[u8]) -> Option { + let ret_code = unsafe { sys::v1::contains_storage(key.as_ptr(), key.len() as u32) }; + ret_code.into() + } + + fn terminate(beneficiary: &[u8]) -> ! { + unsafe { sys::terminate(beneficiary.as_ptr()) } + } + + fn terminate_v1(beneficiary: &[u8]) -> ! { + unsafe { sys::v1::terminate(beneficiary.as_ptr()) } + } + + fn call_chain_extension(func_id: u32, input: &[u8], output: &mut &mut [u8]) -> u32 { + let mut output_len = output.len() as u32; + let ret_code = { + unsafe { + sys::call_chain_extension( + func_id, + input.as_ptr(), + input.len() as u32, + output.as_mut_ptr(), + &mut output_len, + ) + } + }; + extract_from_slice(output, output_len as usize); + ret_code.into_u32() + } + + fn input(output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + { + unsafe { sys::input(output.as_mut_ptr(), &mut output_len) }; + } + extract_from_slice(output, output_len as usize); + } + + fn return_value(flags: ReturnFlags, return_value: &[u8]) -> ! { + unsafe { sys::seal_return(flags.bits(), return_value.as_ptr(), return_value.len() as u32) } + } + + fn call_runtime(call: &[u8]) -> Result { + let ret_code = unsafe { sys::call_runtime(call.as_ptr(), call.len() as u32) }; + ret_code.into() + } + + impl_wrapper_for! { + () => [caller, block_number, address, balance, gas_left, value_transferred, now, minimum_balance], + (v1, "_v1") => [gas_left], + } + + fn weight_to_fee(gas: u64, output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + { + unsafe { sys::weight_to_fee(gas, output.as_mut_ptr(), &mut output_len) }; + } + extract_from_slice(output, output_len as usize); + } + + fn weight_to_fee_v1(ref_time_limit: u64, proof_size_limit: u64, output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + { + unsafe { + sys::v1::weight_to_fee( + ref_time_limit, + proof_size_limit, + output.as_mut_ptr(), + &mut output_len, + ) + }; + } + extract_from_slice(output, output_len as usize); + } + + impl_hash_fn!(sha2_256, 32); + impl_hash_fn!(keccak_256, 32); + impl_hash_fn!(blake2_256, 32); + impl_hash_fn!(blake2_128, 16); + + fn ecdsa_recover( + signature: &[u8; 65], + message_hash: &[u8; 32], + output: &mut [u8; 33], + ) -> Result { + let ret_code = unsafe { + sys::ecdsa_recover(signature.as_ptr(), message_hash.as_ptr(), output.as_mut_ptr()) + }; + ret_code.into() + } + + fn ecdsa_to_eth_address(pubkey: &[u8; 33], output: &mut [u8; 20]) -> Result { + let ret_code = unsafe { sys::ecdsa_to_eth_address(pubkey.as_ptr(), output.as_mut_ptr()) }; + ret_code.into() + } + + fn sr25519_verify(signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> Result { + let ret_code = unsafe { + sys::sr25519_verify( + signature.as_ptr(), + pub_key.as_ptr(), + message.len() as u32, + message.as_ptr(), + ) + }; + ret_code.into() + } + + fn is_contract(account_id: &[u8]) -> bool { + let ret_val = unsafe { sys::is_contract(account_id.as_ptr()) }; + ret_val.into_bool() + } + + fn caller_is_origin() -> bool { + let ret_val = unsafe { sys::caller_is_origin() }; + ret_val.into_bool() + } + + fn set_code_hash(code_hash: &[u8]) -> Result { + let ret_val = unsafe { sys::set_code_hash(code_hash.as_ptr()) }; + ret_val.into() + } + + fn code_hash(account_id: &[u8], output: &mut [u8]) -> Result { + let mut output_len = output.len() as u32; + let ret_val = + unsafe { sys::code_hash(account_id.as_ptr(), output.as_mut_ptr(), &mut output_len) }; + ret_val.into() + } + + fn own_code_hash(output: &mut [u8]) { + let mut output_len = output.len() as u32; + unsafe { sys::own_code_hash(output.as_mut_ptr(), &mut output_len) } + } + + fn account_reentrance_count(account: &[u8]) -> u32 { + unsafe { sys::account_reentrance_count(account.as_ptr()) } + } + + fn add_delegate_dependency(code_hash: &[u8]) { + unsafe { sys::add_delegate_dependency(code_hash.as_ptr()) } + } + + fn remove_delegate_dependency(code_hash: &[u8]) { + unsafe { sys::remove_delegate_dependency(code_hash.as_ptr()) } + } + + fn instantiation_nonce() -> u64 { + unsafe { sys::instantiation_nonce() } + } + + fn reentrance_count() -> u32 { + unsafe { sys::reentrance_count() } + } + + fn xcm_execute(msg: &[u8], output: &mut &mut [u8]) -> Result { + let ret_code = + unsafe { sys::xcm_execute(msg.as_ptr(), msg.len() as _, output.as_mut_ptr()) }; + ret_code.into() + } + + fn xcm_send(dest: &[u8], msg: &[u8], output: &mut &mut [u8]) -> Result { + let ret_code = unsafe { + sys::xcm_send(dest.as_ptr(), msg.as_ptr(), msg.len() as _, output.as_mut_ptr()) + }; + ret_code.into() + } +} diff --git a/substrate/frame/contracts/uapi/src/lib.rs b/substrate/frame/contracts/uapi/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..3d384bbb85ddf8a48777d51058a8e3593f118232 --- /dev/null +++ b/substrate/frame/contracts/uapi/src/lib.rs @@ -0,0 +1,139 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! External C API to communicate with substrate contracts runtime module. +//! +//! Refer to substrate FRAME contract module for more documentation. + +#![no_std] + +mod flags; +pub use flags::*; + +#[cfg(any(target_arch = "wasm32", target_arch = "riscv32"))] +mod host; + +#[cfg(any(target_arch = "wasm32", target_arch = "riscv32"))] +pub use host::*; + +macro_rules! define_error_codes { + ( + $( + $( #[$attr:meta] )* + $name:ident = $discr:literal, + )* + ) => { + /// Every error that can be returned to a contract when it calls any of the host functions. + #[derive(Debug)] + #[repr(u32)] + pub enum ReturnErrorCode { + /// API call successful. + Success = 0, + $( + $( #[$attr] )* + $name = $discr, + )* + /// Returns if an unknown error was received from the host module. + Unknown, + } + + impl From for Result { + #[inline] + fn from(return_code: ReturnCode) -> Self { + match return_code.0 { + 0 => Ok(()), + $( + $discr => Err(ReturnErrorCode::$name), + )* + _ => Err(ReturnErrorCode::Unknown), + } + } + } + }; +} + +impl From for u32 { + fn from(code: ReturnErrorCode) -> u32 { + code as u32 + } +} + +define_error_codes! { + /// The called function trapped and has its state changes reverted. + /// In this case no output buffer is returned. + /// Can only be returned from `call` and `instantiate`. + CalleeTrapped = 1, + /// The called function ran to completion but decided to revert its state. + /// An output buffer is returned when one was supplied. + /// Can only be returned from `call` and `instantiate`. + CalleeReverted = 2, + /// The passed key does not exist in storage. + KeyNotFound = 3, + /// Deprecated and no longer returned: There is only the minimum balance. + _BelowSubsistenceThreshold = 4, + /// Transfer failed for other not further specified reason. Most probably + /// reserved or locked balance of the sender that was preventing the transfer. + TransferFailed = 5, + /// Deprecated and no longer returned: Endowment is no longer required. + _EndowmentTooLow = 6, + /// No code could be found at the supplied code hash. + CodeNotFound = 7, + /// The account that was called is no contract. + NotCallable = 8, + /// The call to `debug_message` had no effect because debug message + /// recording was disabled. + LoggingDisabled = 9, + /// The call dispatched by `call_runtime` was executed but returned an error. + CallRuntimeFailed = 10, + /// ECDSA public key recovery failed. Most probably wrong recovery id or signature. + EcdsaRecoveryFailed = 11, + /// sr25519 signature verification failed. + Sr25519VerifyFailed = 12, + /// The `xcm_execute` call failed. + XcmExecutionFailed = 13, + /// The `xcm_send` call failed. + XcmSendFailed = 14, +} + +/// The raw return code returned by the host side. +#[repr(transparent)] +pub struct ReturnCode(u32); + +/// Used as a sentinel value when reading and writing contract memory. +/// +/// We use this value to signal `None` to a contract when only a primitive is +/// allowed and we don't want to go through encoding a full Rust type. +/// Using `u32::Max` is a safe sentinel because contracts are never +/// allowed to use such a large amount of resources. So this value doesn't +/// make sense for a memory location or length. +const SENTINEL: u32 = u32::MAX; + +impl From for Option { + fn from(code: ReturnCode) -> Self { + (code.0 < SENTINEL).then_some(code.0) + } +} + +impl ReturnCode { + /// Returns the raw underlying `u32` representation. + pub fn into_u32(self) -> u32 { + self.0 + } + /// Returns the underlying `u32` converted into `bool`. + pub fn into_bool(self) -> bool { + self.0.ne(&0) + } +} + +type Result = core::result::Result<(), ReturnErrorCode>; diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index 1dc723576dc183aff8e7cb84a77559f104c2cd87..6d96dde1aaa64b97189b55fde057f18e57929095 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet for conviction voting in referenda" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -19,13 +22,13 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "max-encoded-len", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"], optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +serde = { version = "1.0.193", features = ["derive"], optional = true } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -33,7 +36,7 @@ pallet-scheduler = { path = "../scheduler" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index 850b98b218b0c19f2ec06576a1da7e656c1bb641..371d036438437859a20e58ae7788455930cebbb6 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -20,7 +20,7 @@ use std::collections::BTreeMap; use frame_support::{ - assert_noop, assert_ok, parameter_types, + assert_noop, assert_ok, derive_impl, parameter_types, traits::{ConstU32, ConstU64, Contains, Polling, VoteTally}, }; use sp_core::H256; @@ -51,6 +51,7 @@ impl Contains for BaseFilter { } } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; type BlockWeights = (); diff --git a/substrate/frame/core-fellowship/Cargo.toml b/substrate/frame/core-fellowship/Cargo.toml index 523a5bb90a016e649eded2a7a72016ff021d375e..d223ecd4f24c17b6afabe90fc962021ff58c87e0 100644 --- a/substrate/frame/core-fellowship/Cargo.toml +++ b/substrate/frame/core-fellowship/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Logic as per the description of The Fellowship for core Polkadot technology" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,17 +19,17 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.16", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/core-fellowship/src/tests.rs b/substrate/frame/core-fellowship/src/tests.rs index a02c010718c91c37bfdde1f9eef7878ded0013b9..9ac381ab7f5c8f8a7881c7b1f7ebc72c7b3d10cf 100644 --- a/substrate/frame/core-fellowship/src/tests.rs +++ b/substrate/frame/core-fellowship/src/tests.rs @@ -20,7 +20,7 @@ use std::collections::BTreeMap; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, pallet_prelude::Weight, parameter_types, traits::{tokens::GetSalary, ConstU32, ConstU64, Everything, IsInVec, TryMapSuccess}, @@ -50,6 +50,8 @@ parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1_000_000, u64::max_value())); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index 870bfaa9b89213a6541a5102f5c85ac545e35e8e..7bfc8c6903bb14b49b31b56fa23124a99a4835a1 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet for democracy" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,14 +20,14 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"], optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} +serde = { version = "1.0.193", features = ["derive"], optional = true } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } log = { version = "0.4.17", default-features = false } [dev-dependencies] @@ -33,7 +36,7 @@ pallet-scheduler = { path = "../scheduler" } pallet-preimage = { path = "../preimage" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/democracy/src/benchmarking.rs b/substrate/frame/democracy/src/benchmarking.rs index b4aa17726b8d28869110f44acbd6d21895da5218..aa66137ad880403a3a3934e121e358bc193d0826 100644 --- a/substrate/frame/democracy/src/benchmarking.rs +++ b/substrate/frame/democracy/src/benchmarking.rs @@ -65,7 +65,7 @@ fn add_referendum(n: u32) -> (ReferendumIndex, T::Hash, T::Hash) { 0u32.into(), ); let preimage_hash = note_preimage::(); - MetadataOf::::insert(crate::MetadataOwner::Referendum(index), preimage_hash.clone()); + MetadataOf::::insert(crate::MetadataOwner::Referendum(index), preimage_hash); (index, hash, preimage_hash) } diff --git a/substrate/frame/democracy/src/migrations/v1.rs b/substrate/frame/democracy/src/migrations/v1.rs index c27f437901b7ee1a7ce537d09eb3b3f313320292..64baea8f3af7039eb8a2422b48e897aaaa93a664 100644 --- a/substrate/frame/democracy/src/migrations/v1.rs +++ b/substrate/frame/democracy/src/migrations/v1.rs @@ -172,7 +172,7 @@ mod test { let hash = H256::repeat_byte(1); let status = ReferendumStatus { end: 1u32.into(), - proposal: hash.clone(), + proposal: hash, threshold: VoteThreshold::SuperMajorityApprove, delay: 1u32.into(), tally: Tally { ayes: 1u32.into(), nays: 1u32.into(), turnout: 1u32.into() }, @@ -187,13 +187,10 @@ mod test { // Case 3: Public proposals let hash2 = H256::repeat_byte(2); - v0::PublicProps::::put(vec![ - (3u32, hash.clone(), 123u64), - (4u32, hash2.clone(), 123u64), - ]); + v0::PublicProps::::put(vec![(3u32, hash, 123u64), (4u32, hash2, 123u64)]); // Case 4: Next external - v0::NextExternal::::put((hash.clone(), VoteThreshold::SuperMajorityApprove)); + v0::NextExternal::::put((hash, VoteThreshold::SuperMajorityApprove)); // Migrate. let state = v1::Migration::::pre_upgrade().unwrap(); diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index 07a0ef5c3d5a0a729b7bf21dce79984274c76e3c..00d8fedca0cfd2fc97319e3b36a280ea6b445cee 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -20,7 +20,7 @@ use super::*; use crate as pallet_democracy; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ ConstU32, ConstU64, Contains, EqualPrivilegeOnly, OnInitialize, SortedMembers, StorePreimage, @@ -77,6 +77,8 @@ parameter_types! { Weight::from_parts(frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, u64::MAX), ); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; type BlockWeights = BlockWeights; diff --git a/substrate/frame/election-provider-multi-phase/Cargo.toml b/substrate/frame/election-provider-multi-phase/Cargo.toml index 91be97d3e35003737f9fc27ca54732522aaea01b..be3a77065b433ee02af76879988b98b212c06546 100644 --- a/substrate/frame/election-provider-multi-phase/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "PALLET two phase election providers" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -20,35 +23,35 @@ scale-info = { version = "2.10.0", default-features = false, features = [ ] } log = { version = "0.4.17", default-features = false } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -frame-election-provider-support = { path = "../election-provider-support", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +frame-election-provider-support = { path = "../election-provider-support", default-features = false } # Optional imports for benchmarking frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } pallet-election-provider-support-benchmarking = { path = "../election-provider-support/benchmarking", default-features = false, optional = true } rand = { version = "0.8.5", default-features = false, features = ["alloc", "small_rng"], optional = true } -strum = { version = "0.24.1", default-features = false, features = ["derive"], optional = true } +strum = { version = "0.24.1", default-features = false, features = ["derive"], optional = true } [dev-dependencies] parking_lot = "0.12.1" rand = "0.8.5" -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } sp-io = { path = "../../primitives/io" } -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false} +sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } sp-tracing = { path = "../../primitives/tracing" } pallet-balances = { path = "../balances" } frame-benchmarking = { path = "../benchmarking" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 05f9b24f8f9c53f833b5de3504645c40c1df6a6d..04325a40d0ada022b3a875e20df0d4143bbf0b44 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -101,9 +101,8 @@ //! unsigned transaction, thus the name _unsigned_ phase. This unsigned transaction can never be //! valid if propagated, and it acts similar to an inherent. //! -//! Validators will only submit solutions if the one that they have computed is sufficiently better -//! than the best queued one (see [`pallet::Config::BetterUnsignedThreshold`]) and will limit the -//! weight of the solution to [`MinerConfig::MaxWeight`]. +//! Validators will only submit solutions if the one that they have computed is strictly better than +//! the best queued one and will limit the weight of the solution to [`MinerConfig::MaxWeight`]. //! //! The unsigned phase can be made passive depending on how the previous signed phase went, by //! setting the first inner value of [`Phase`] to `false`. For now, the signed phase is always @@ -598,11 +597,6 @@ pub mod pallet { #[pallet::constant] type BetterSignedThreshold: Get; - /// The minimum amount of improvement to the solution score that defines a solution as - /// "better" in the Unsigned phase. - #[pallet::constant] - type BetterUnsignedThreshold: Get; - /// The repeat threshold of the offchain worker. /// /// For example, if it is 5, that means that at least 5 blocks will elapse between attempts @@ -1024,6 +1018,7 @@ pub mod pallet { // ensure solution is timely. ensure!(Self::current_phase().is_signed(), Error::::PreDispatchEarlySubmission); + ensure!(raw_solution.round == Self::round(), Error::::PreDispatchDifferentRound); // NOTE: this is the only case where having separate snapshot would have been better // because could do just decode_len. But we can create abstractions to do this. @@ -1197,6 +1192,8 @@ pub mod pallet { BoundNotMet, /// Submitted solution has too many winners TooManyWinners, + /// Sumission was prepared for a different round. + PreDispatchDifferentRound, } #[pallet::validate_unsigned] diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index 92144351e8f819896015f4ae0e3f7daf8bb3506b..e7dd8acf36b1d35732de17c0baa376a8e0b47619 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -21,7 +21,7 @@ use frame_election_provider_support::{ bounds::{DataProviderBounds, ElectionBounds}, data_provider, onchain, ElectionDataProvider, NposSolution, SequentialPhragmen, }; -pub use frame_support::{assert_noop, assert_ok, pallet_prelude::GetDefault}; +pub use frame_support::derive_impl; use frame_support::{ parameter_types, traits::{ConstU32, Hooks}, @@ -80,11 +80,7 @@ frame_election_provider_support::generate_solution_type!( /// All events of this pallet. pub(crate) fn multi_phase_events() -> Vec> { - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| if let RuntimeEvent::MultiPhase(inner) = e { Some(inner) } else { None }) - .collect::>() + System::read_events_for_pallet::>() } /// To from `now` to block `n`. @@ -116,6 +112,15 @@ pub fn roll_to_with_ocw(n: BlockNumber) { } } +pub fn roll_to_round(n: u32) { + assert!(MultiPhase::round() <= n); + + while MultiPhase::round() != n { + roll_to_signed(); + frame_support::assert_ok!(MultiPhase::elect()); + } +} + pub struct TrimHelpers { pub voters: Vec>, pub assignments: Vec>, @@ -204,6 +209,7 @@ pub fn witness() -> SolutionOrSnapshotSize { .unwrap_or_default() } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; @@ -295,7 +301,6 @@ parameter_types! { pub static SignedMaxWeight: Weight = BlockWeights::get().max_block; pub static MinerTxPriority: u64 = 100; pub static BetterSignedThreshold: Perbill = Perbill::zero(); - pub static BetterUnsignedThreshold: Perbill = Perbill::zero(); pub static OffchainRepeat: BlockNumber = 5; pub static MinerMaxWeight: Weight = BlockWeights::get().max_block; pub static MinerMaxLength: u32 = 256; @@ -393,7 +398,6 @@ impl crate::Config for Runtime { type EstimateCallFee = frame_support::traits::ConstU32<8>; type SignedPhase = SignedPhase; type UnsignedPhase = UnsignedPhase; - type BetterUnsignedThreshold = BetterUnsignedThreshold; type BetterSignedThreshold = BetterSignedThreshold; type OffchainRepeat = OffchainRepeat; type MinerTxPriority = MinerTxPriority; @@ -532,10 +536,7 @@ impl ExtBuilder { ::set(p); self } - pub fn better_unsigned_threshold(self, p: Perbill) -> Self { - ::set(p); - self - } + pub fn phases(self, signed: BlockNumber, unsigned: BlockNumber) -> Self { ::set(signed); ::set(unsigned); @@ -637,9 +638,9 @@ impl ExtBuilder { #[cfg(feature = "try-runtime")] ext.execute_with(|| { - assert_ok!(>::try_state( - System::block_number() - )); + frame_support::assert_ok!( + >::try_state(System::block_number()) + ); }); } } diff --git a/substrate/frame/election-provider-multi-phase/src/signed.rs b/substrate/frame/election-provider-multi-phase/src/signed.rs index 7e4b029ff8c80cff6a57c0868e335d11aa4a2cc6..ae830ed0382d8adfaf664e68548dfce6beddf9d0 100644 --- a/substrate/frame/election-provider-multi-phase/src/signed.rs +++ b/substrate/frame/election-provider-multi-phase/src/signed.rs @@ -571,6 +571,40 @@ mod tests { use frame_support::{assert_noop, assert_ok, assert_storage_noop}; use sp_runtime::Percent; + #[test] + fn cannot_submit_on_different_round() { + ExtBuilder::default().build_and_execute(|| { + // roll to a few rounds ahead. + roll_to_round(5); + assert_eq!(MultiPhase::round(), 5); + + roll_to_signed(); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + + // create a temp snapshot only for this test. + MultiPhase::create_snapshot().unwrap(); + let mut solution = raw_solution(); + + // try a solution prepared in a previous round. + solution.round = MultiPhase::round() - 1; + + assert_noop!( + MultiPhase::submit(RuntimeOrigin::signed(10), Box::new(solution)), + Error::::PreDispatchDifferentRound, + ); + + // try a solution prepared in a later round (not expected to happen, but in any case). + MultiPhase::create_snapshot().unwrap(); + let mut solution = raw_solution(); + solution.round = MultiPhase::round() + 1; + + assert_noop!( + MultiPhase::submit(RuntimeOrigin::signed(10), Box::new(solution)), + Error::::PreDispatchDifferentRound, + ); + }) + } + #[test] fn cannot_submit_too_early() { ExtBuilder::default().build_and_execute(|| { diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index e3d0ded97515b9cbac011956afebbc5d4f15e5d6..94348181334061a3c2de81b90742f53c4390ddc9 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -384,9 +384,8 @@ impl Pallet { // ensure score is being improved. Panic henceforth. ensure!( - Self::queued_solution().map_or(true, |q: ReadySolution<_, _>| raw_solution - .score - .strict_threshold_better(q.score, T::BetterUnsignedThreshold::get())), + Self::queued_solution() + .map_or(true, |q: ReadySolution<_, _>| raw_solution.score > q.score), Error::::PreDispatchWeakSubmission, ); @@ -1025,7 +1024,7 @@ mod tests { bounded_vec, offchain::storage_lock::{BlockAndTime, StorageLock}, traits::{Dispatchable, ValidateUnsigned, Zero}, - ModuleError, PerU16, Perbill, + ModuleError, PerU16, }; type Assignment = crate::unsigned::Assignment; @@ -1360,7 +1359,7 @@ mod tests { .desired_targets(1) .add_voter(7, 2, bounded_vec![10]) .add_voter(8, 5, bounded_vec![10]) - .better_unsigned_threshold(Perbill::from_percent(50)) + .add_voter(9, 1, bounded_vec![10]) .build_and_execute(|| { roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); @@ -1368,12 +1367,15 @@ mod tests { // an initial solution let result = ElectionResult { - // note: This second element of backing stake is not important here. - winners: vec![(10, 10)], - assignments: vec![Assignment { - who: 10, - distribution: vec![(10, PerU16::one())], - }], + winners: vec![(10, 12)], + assignments: vec![ + Assignment { who: 10, distribution: vec![(10, PerU16::one())] }, + Assignment { + who: 7, + // note: this percent doesn't even matter, in solution it is 100%. + distribution: vec![(10, PerU16::one())], + }, + ], }; let RoundSnapshot { voters, targets } = MultiPhase::snapshot().unwrap(); @@ -1394,9 +1396,35 @@ mod tests { Box::new(solution), witness )); - assert_eq!(MultiPhase::queued_solution().unwrap().score.minimal_stake, 10); + assert_eq!(MultiPhase::queued_solution().unwrap().score.minimal_stake, 12); - // trial 1: a solution who's score is only 2, i.e. 20% better in the first element. + // trial 1: a solution who's minimal stake is 10, i.e. worse than the first solution + // of 12. + let result = ElectionResult { + winners: vec![(10, 10)], + assignments: vec![Assignment { + who: 10, + distribution: vec![(10, PerU16::one())], + }], + }; + let (raw, score, _, _) = Miner::::prepare_election_result_with_snapshot( + result, + voters.clone(), + targets.clone(), + desired_targets, + ) + .unwrap(); + let solution = RawSolution { solution: raw, score, round: MultiPhase::round() }; + // 10 is not better than 12 + assert_eq!(solution.score.minimal_stake, 10); + // submitting this will actually panic. + assert_noop!( + MultiPhase::unsigned_pre_dispatch_checks(&solution), + Error::::PreDispatchWeakSubmission, + ); + + // trial 2: try resubmitting another solution with same score (12) as the queued + // solution. let result = ElectionResult { winners: vec![(10, 12)], assignments: vec![ @@ -1408,6 +1436,7 @@ mod tests { }, ], }; + let (raw, score, _, _) = Miner::::prepare_election_result_with_snapshot( result, voters.clone(), @@ -1416,15 +1445,45 @@ mod tests { ) .unwrap(); let solution = RawSolution { solution: raw, score, round: MultiPhase::round() }; - // 12 is not 50% more than 10 + // 12 is not better than 12. We need score of atleast 13 to be accepted. assert_eq!(solution.score.minimal_stake, 12); + // submitting this will panic. assert_noop!( MultiPhase::unsigned_pre_dispatch_checks(&solution), Error::::PreDispatchWeakSubmission, ); - // submitting this will actually panic. - // trial 2: a solution who's score is only 7, i.e. 70% better in the first element. + // trial 3: a solution who's minimal stake is 13, i.e. 1 better than the queued + // solution of 12. + let result = ElectionResult { + winners: vec![(10, 12)], + assignments: vec![ + Assignment { who: 10, distribution: vec![(10, PerU16::one())] }, + Assignment { who: 7, distribution: vec![(10, PerU16::one())] }, + Assignment { who: 9, distribution: vec![(10, PerU16::one())] }, + ], + }; + let (raw, score, witness, _) = + Miner::::prepare_election_result_with_snapshot( + result, + voters.clone(), + targets.clone(), + desired_targets, + ) + .unwrap(); + let solution = RawSolution { solution: raw, score, round: MultiPhase::round() }; + assert_eq!(solution.score.minimal_stake, 13); + + // this should work + assert_ok!(MultiPhase::unsigned_pre_dispatch_checks(&solution)); + assert_ok!(MultiPhase::submit_unsigned( + RuntimeOrigin::none(), + Box::new(solution), + witness + )); + + // trial 4: a solution who's minimal stake is 17, i.e. 4 better than the last + // soluton. let result = ElectionResult { winners: vec![(10, 12)], assignments: vec![ diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml index f5d1991d1990c901b24529f2f7bf337bbb11f4a9..05c6a6d404629f8bdc5a9cdb1d2ccdb785972693 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME election provider multi phase pallet tests with staking pallet, bags-list and session pallets" publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -23,7 +26,7 @@ sp-io = { path = "../../../primitives/io" } sp-std = { path = "../../../primitives/std" } sp-staking = { path = "../../../primitives/staking" } sp-core = { path = "../../../primitives/core" } -sp-npos-elections = { path = "../../../primitives/npos-elections", default-features = false} +sp-npos-elections = { path = "../../../primitives/npos-elections", default-features = false } sp-tracing = { path = "../../../primitives/tracing" } frame-system = { path = "../../system" } diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 751ffc07aa5dd11b3753957ec94a122b8dd38e44..04d218acf8fd14ec82b9e299999273223cae2964 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -190,7 +190,6 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type SignedPhase = SignedPhase; type UnsignedPhase = UnsignedPhase; type BetterSignedThreshold = (); - type BetterUnsignedThreshold = (); type OffchainRepeat = OffchainRepeat; type MinerTxPriority = TransactionPriority; type MinerConfig = Self; @@ -276,6 +275,7 @@ impl pallet_staking::Config for Runtime { type NominationsQuota = pallet_staking::FixedNominationsQuota; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<100>; type HistoryDepth = HistoryDepth; type EventListeners = (); type WeightInfo = pallet_staking::weights::SubstrateWeight; diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index ed36630d0d04364640765c88906ccd9009d7ff0a..8182863d79665ea9f2c026b3cdc4b2ae10d4e0c7 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "election provider supporting traits" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,13 +18,13 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = "solution-type" } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } [dev-dependencies] rand = { version = "0.8.5", features = ["small_rng"] } @@ -29,8 +32,8 @@ sp-io = { path = "../../primitives/io" } sp-npos-elections = { path = "../../primitives/npos-elections" } [features] -default = [ "std" ] -fuzz = [ "default" ] +default = ["std"] +fuzz = ["default"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/election-provider-support/benchmarking/Cargo.toml b/substrate/frame/election-provider-support/benchmarking/Cargo.toml index a8c56b425fd905f005042270a212280d24f50acc..7a2ad5cafb49fff6d181a6b1ce47e562d64fb7df 100644 --- a/substrate/frame/election-provider-support/benchmarking/Cargo.toml +++ b/substrate/frame/election-provider-support/benchmarking/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "Benchmarking for election provider support onchain config trait" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,15 +18,15 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true} -frame-election-provider-support = { path = "..", default-features = false} -frame-system = { path = "../../system", default-features = false} -sp-npos-elections = { path = "../../../primitives/npos-elections", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +frame-election-provider-support = { path = "..", default-features = false } +frame-system = { path = "../../system", default-features = false } +sp-npos-elections = { path = "../../../primitives/npos-elections", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index 1e3002d5dc49f5f2b272c9fae0a4408e813a95dd..601355fdb7aa34b212a5188e3a67cecd1a5a30a4 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "NPoS Solution Type" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.38", features = ["full", "visit"] } +syn = { version = "2.0.41", features = ["full", "visit"] } quote = "1.0.28" proc-macro2 = "1.0.56" -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.1" [dev-dependencies] parity-scale-codec = "3.6.1" diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index cc90ed119ad7e518a5e22a5816cbc0c1081fa999..a7a84b91dba043c3500076922c87fb9695af486a 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -9,13 +9,16 @@ repository.workspace = true description = "Fuzzer for phragmén solution type implementation." publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } honggfuzz = "0.5" -rand = { version = "0.8", features = ["std", "small_rng"] } +rand = { version = "0.8", features = ["small_rng", "std"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } @@ -24,7 +27,7 @@ frame-election-provider-support = { path = "../.." } sp-arithmetic = { path = "../../../../primitives/arithmetic" } sp-runtime = { path = "../../../../primitives/runtime" } # used by generate_solution_type: -sp-npos-elections = { path = "../../../../primitives/npos-elections", default-features = false} +sp-npos-elections = { path = "../../../../primitives/npos-elections", default-features = false } frame-support = { path = "../../../support" } [[bin]] diff --git a/substrate/frame/election-provider-support/src/onchain.rs b/substrate/frame/election-provider-support/src/onchain.rs index 8ac245a360bb5358f630360e5d384936d69c5d8e..412aac4547537855ee0a6c519bc92e67147521fd 100644 --- a/substrate/frame/election-provider-support/src/onchain.rs +++ b/substrate/frame/election-provider-support/src/onchain.rs @@ -182,7 +182,7 @@ impl ElectionProvider for OnChainExecution { mod tests { use super::*; use crate::{ElectionProvider, PhragMMS, SequentialPhragmen}; - use frame_support::{assert_noop, parameter_types}; + use frame_support::{assert_noop, derive_impl, parameter_types}; use sp_npos_elections::Support; use sp_runtime::Perbill; type AccountId = u64; @@ -200,6 +200,7 @@ mod tests { } ); + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; diff --git a/substrate/frame/elections-phragmen/Cargo.toml b/substrate/frame/elections-phragmen/Cargo.toml index cb8bc1035a5f3f8350af30b7ce8f8d00fac812a3..3c0bc56a1d04ba7f90c00f2e7421ebcb5721438b 100644 --- a/substrate/frame/elections-phragmen/Cargo.toml +++ b/substrate/frame/elections-phragmen/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet based on seq-Phragmén election method." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,15 +21,15 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } log = { version = "0.4.14", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-staking = { path = "../../primitives/staking", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-staking = { path = "../../primitives/staking", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -35,7 +38,7 @@ sp-tracing = { path = "../../primitives/tracing" } substrate-test-utils = { path = "../../test-utils" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/elections-phragmen/src/benchmarking.rs b/substrate/frame/elections-phragmen/src/benchmarking.rs index 9878f7fd41c068329574810c3fc8edff0e6b7b4a..55bb1b968fa1b11d0a34761b920b97bd7b20bae1 100644 --- a/substrate/frame/elections-phragmen/src/benchmarking.rs +++ b/substrate/frame/elections-phragmen/src/benchmarking.rs @@ -38,7 +38,7 @@ fn endowed_account(name: &'static str, index: u32) -> T::AccountId { let _ = T::Currency::make_free_balance_be(&account, amount); // important to increase the total issuance since T::CurrencyToVote will need it to be sane for // phragmen to work. - T::Currency::issue(amount); + let _ = T::Currency::issue(amount); account } diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index e4c56e68f9a5ed5a67ff6376834caf8465dc5971..5e50027e34421332f866a96033c67458d4325f70 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -1307,7 +1307,7 @@ mod tests { use super::*; use crate as elections_phragmen; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, derive_impl, dispatch::DispatchResultWithPostInfo, parameter_types, traits::{ConstU32, ConstU64, OnInitialize}, @@ -1321,6 +1321,7 @@ mod tests { }; use substrate_test_utils::assert_eq_uvec; + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/elections-phragmen/src/migrations/v4.rs b/substrate/frame/elections-phragmen/src/migrations/v4.rs index 7e946371f5ca6cfe8c6399aab61d886a48f7e880..e78465cd618de07fe752cbad2f6912bd6ab10f86 100644 --- a/substrate/frame/elections-phragmen/src/migrations/v4.rs +++ b/substrate/frame/elections-phragmen/src/migrations/v4.rs @@ -69,7 +69,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { } /// Some checks prior to migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::pre_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn pre_migration>(new: N) { @@ -97,7 +97,7 @@ pub fn pre_migration>(new: N) { } /// Some checks for after migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::post_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn post_migration() { diff --git a/substrate/frame/elections-phragmen/src/migrations/v5.rs b/substrate/frame/elections-phragmen/src/migrations/v5.rs index 6fac923703fec2071746c01fe9ccc6f6ad300c15..6e360aa8b8c15a21ccbb357d701da074c221fde9 100644 --- a/substrate/frame/elections-phragmen/src/migrations/v5.rs +++ b/substrate/frame/elections-phragmen/src/migrations/v5.rs @@ -71,7 +71,7 @@ pub fn pre_migrate_fn(to_migrate: Vec) -> Box() { diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index 1b21502271546c06d04a959c800bd34125ccbdcc..eb6355edd312a1ff5f865c235ffd034f085a3027 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -9,20 +9,24 @@ repository.workspace = true description = "The single package with examples of various types of FRAME pallets" publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-default-config-example = { path = "default-config", default-features = false} -pallet-dev-mode = { path = "dev-mode", default-features = false} -pallet-example-basic = { path = "basic", default-features = false} +pallet-default-config-example = { path = "default-config", default-features = false } +pallet-dev-mode = { path = "dev-mode", default-features = false } +pallet-example-basic = { path = "basic", default-features = false } pallet-example-frame-crate = { path = "frame-crate", default-features = false } -pallet-example-kitchensink = { path = "kitchensink", default-features = false} -pallet-example-offchain-worker = { path = "offchain-worker", default-features = false} -pallet-example-split = { path = "split", default-features = false} +pallet-example-kitchensink = { path = "kitchensink", default-features = false } +pallet-example-offchain-worker = { path = "offchain-worker", default-features = false } +pallet-example-split = { path = "split", default-features = false } +pallet-example-tasks = { path = "tasks", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "pallet-default-config-example/std", "pallet-dev-mode/std", @@ -31,6 +35,7 @@ std = [ "pallet-example-kitchensink/std", "pallet-example-offchain-worker/std", "pallet-example-split/std", + "pallet-example-tasks/std", ] try-runtime = [ "pallet-default-config-example/try-runtime", @@ -39,4 +44,5 @@ try-runtime = [ "pallet-example-kitchensink/try-runtime", "pallet-example-offchain-worker/try-runtime", "pallet-example-split/try-runtime", + "pallet-example-tasks/try-runtime", ] diff --git a/substrate/frame/examples/basic/Cargo.toml b/substrate/frame/examples/basic/Cargo.toml index d39a93e7abb142a881160da43293e5a3d1f6a4db..3be1a2e558d2c92aeb28ff15bf7bed03f8fe0a59 100644 --- a/substrate/frame/examples/basic/Cargo.toml +++ b/substrate/frame/examples/basic/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME example pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,19 +19,19 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true} -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -pallet-balances = { path = "../../balances", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-balances = { path = "../../balances", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index c7b5b9e9a84511c99d4507d4ab405878df44cf41..e00b1ac01b39fe16645fa3c8f94d4daab2189544 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -19,7 +19,7 @@ use crate::*; use frame_support::{ - assert_ok, + assert_ok, derive_impl, dispatch::{DispatchInfo, GetDispatchInfo}, traits::{ConstU64, OnInitialize}, }; @@ -45,6 +45,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/examples/default-config/Cargo.toml b/substrate/frame/examples/default-config/Cargo.toml index 13b6ce745437e8b596e1da5c263599fc13d75458..01ddf9d383446237fb6cf483a0142776ba31fa35 100644 --- a/substrate/frame/examples/default-config/Cargo.toml +++ b/substrate/frame/examples/default-config/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME example pallet demonstrating derive_impl / default_config in action" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,15 +19,15 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-io = { path = "../../../primitives/io", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/examples/default-config/src/lib.rs b/substrate/frame/examples/default-config/src/lib.rs index 8a1f6f9d6a82cffbd557ab2d3f1e92ad966abad0..f1611bca2ce3cd8466cfff44512d3e0d8d0888d4 100644 --- a/substrate/frame/examples/default-config/src/lib.rs +++ b/substrate/frame/examples/default-config/src/lib.rs @@ -47,6 +47,10 @@ pub mod pallet { #[pallet::no_default] // optional. `RuntimeEvent` is automatically excluded as well. type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// The overarching task type. + #[pallet::no_default] + type RuntimeTask: Task; + /// An input parameter to this pallet. This value can have a default, because it is not /// reliant on `frame_system::Config` or the overarching runtime in any way. type WithDefaultValue: Get; @@ -193,6 +197,7 @@ pub mod tests { impl pallet_default_config_example::Config for Runtime { // These two both cannot have defaults. type RuntimeEvent = RuntimeEvent; + type RuntimeTask = RuntimeTask; type HasNoDefault = frame_support::traits::ConstU32<1>; type CannotHaveDefault = SomeCall; diff --git a/substrate/frame/examples/dev-mode/Cargo.toml b/substrate/frame/examples/dev-mode/Cargo.toml index 806af334bb01d3e184d8919239a4d2caf5446b18..f634d21cf2c958400a108549545adecacbd70087 100644 --- a/substrate/frame/examples/dev-mode/Cargo.toml +++ b/substrate/frame/examples/dev-mode/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME example pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,18 +19,18 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -pallet-balances = { path = "../../balances", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-balances = { path = "../../balances", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/examples/dev-mode/src/tests.rs b/substrate/frame/examples/dev-mode/src/tests.rs index c7722bc0524298565ab3f1c27bf2c56014bb3c12..3acedcd0fd1d31eaca4f1a5dde16140346a4f6fe 100644 --- a/substrate/frame/examples/dev-mode/src/tests.rs +++ b/substrate/frame/examples/dev-mode/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for pallet-dev-mode. use crate::*; -use frame_support::{assert_ok, traits::ConstU64}; +use frame_support::{assert_ok, derive_impl, traits::ConstU64}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -39,6 +39,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index ceb8c7bfb81931ed8444abcee88b817ec4fcd138..93a46ba7b249cc806d2366e2bd1ee947b162157d 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME example pallet with umbrella crate" publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,9 +19,9 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -frame = { path = "../..", default-features = false, features = ["runtime", "experimental"] } +frame = { path = "../..", default-features = false, features = ["experimental", "runtime"] } [features] -default = [ "std" ] -std = [ "codec/std", "frame/std", "scale-info/std" ] +default = ["std"] +std = ["codec/std", "frame/std", "scale-info/std"] diff --git a/substrate/frame/examples/kitchensink/Cargo.toml b/substrate/frame/examples/kitchensink/Cargo.toml index 1275ef0b53f664a4420732c6277a05835c125084..4255ebb66b650efb77264b250396c0a05ba0763a 100644 --- a/substrate/frame/examples/kitchensink/Cargo.toml +++ b/substrate/frame/examples/kitchensink/Cargo.toml @@ -7,6 +7,10 @@ license = "MIT-0" homepage = "https://substrate.io" repository.workspace = true description = "FRAME example kitchensink pallet" +publish = false + +[lints] +workspace = true [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,22 +20,22 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-io = { path = "../../../primitives/io", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true} +frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } -pallet-balances = { path = "../../balances", default-features = false} +pallet-balances = { path = "../../balances", default-features = false } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/examples/kitchensink/src/lib.rs b/substrate/frame/examples/kitchensink/src/lib.rs index 56117c59dc6d706393ccf5215e6b194c457307bc..18429bc967d7c1e1f7b181e708ca3a86ef6f251c 100644 --- a/substrate/frame/examples/kitchensink/src/lib.rs +++ b/substrate/frame/examples/kitchensink/src/lib.rs @@ -206,6 +206,10 @@ pub mod pallet { impl Pallet { #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::set_foo_benchmark())] + /// Marks this call as feeless if `new_foo` is zero. + #[pallet::feeless_if(|_origin: &OriginFor, new_foo: &u32, _other_compact: &u128| -> bool { + *new_foo == 0 + })] pub fn set_foo( _: OriginFor, new_foo: u32, @@ -288,9 +292,8 @@ pub mod pallet { } } - /// Allows you to define an enum on the pallet which will then instruct - /// `construct_runtime` to amalgamate all similarly-named enums from other - /// pallets into an aggregate enum. + /// Allows you to define an enum on the pallet which will then instruct `construct_runtime` to + /// amalgamate all similarly-named enums from other pallets into an aggregate enum. #[pallet::composite_enum] pub enum HoldReason { Staking, diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index e6b7715655d16d7da0ac5a022678141cabb6ad4d..464719375c64994d9fcbe42bb53253f769d6350a 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME example pallet for offchain worker" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,16 +20,16 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = lite-json = { version = "0.2.0", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -sp-core = { path = "../../../primitives/core", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false} -sp-keystore = { path = "../../../primitives/keystore", optional = true} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-keystore = { path = "../../../primitives/keystore", optional = true } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/examples/offchain-worker/src/tests.rs b/substrate/frame/examples/offchain-worker/src/tests.rs index 203a59a8af03c61e9bf33e5c21d9a8a574296314..48a8d86588c2d8841b0e454cd17a49eb866c8cf4 100644 --- a/substrate/frame/examples/offchain-worker/src/tests.rs +++ b/substrate/frame/examples/offchain-worker/src/tests.rs @@ -19,7 +19,7 @@ use crate as example_offchain_worker; use crate::*; use codec::Decode; use frame_support::{ - assert_ok, parameter_types, + assert_ok, derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use sp_core::{ @@ -46,6 +46,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/examples/split/Cargo.toml b/substrate/frame/examples/split/Cargo.toml index db2a75e388d5e9e7af4717051f66c909640e5a56..97f9062f18189f30542baa0fd08de9b2d98de52d 100644 --- a/substrate/frame/examples/split/Cargo.toml +++ b/substrate/frame/examples/split/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME example splitted pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,19 +20,19 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-io = { path = "../../../primitives/io", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true} +frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", @@ -46,4 +49,4 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] -try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime" ] +try-runtime = ["frame-support/try-runtime", "frame-system/try-runtime"] diff --git a/substrate/frame/examples/src/lib.rs b/substrate/frame/examples/src/lib.rs index 8d65639f835264c1e6220a4e19072cfe913445e0..f38bbe52dc114900e4b497cb17da6deb4406512c 100644 --- a/substrate/frame/examples/src/lib.rs +++ b/substrate/frame/examples/src/lib.rs @@ -43,4 +43,6 @@ //! - [`pallet_example_frame_crate`]: Example pallet showcasing how one can be //! built using only the `frame` umbrella crate. //! +//! - [`pallet_example_tasks`]: This pallet demonstrates the use of `Tasks` to execute service work. +//! //! **Tip**: Use `cargo doc --package --open` to view each pallet's documentation. diff --git a/substrate/frame/examples/tasks/Cargo.toml b/substrate/frame/examples/tasks/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..438cb60c756fa0512c9c9e2c7fb4b44d35a95ebe --- /dev/null +++ b/substrate/frame/examples/tasks/Cargo.toml @@ -0,0 +1,56 @@ +[package] +name = "pallet-example-tasks" +version = "1.0.0-dev" +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true +description = "Pallet to demonstrate the usage of Tasks to recongnize and execute service work" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +log = { version = "0.4.17", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } + +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } + +sp-io = { path = "../../../primitives/io", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } + +frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] +experimental = ["frame-support/experimental", "frame-system/experimental"] diff --git a/substrate/frame/examples/tasks/src/benchmarking.rs b/substrate/frame/examples/tasks/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..81f7d3d3b21c78e5b23685f5e460c1ad4edc60dd --- /dev/null +++ b/substrate/frame/examples/tasks/src/benchmarking.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking for `pallet-example-tasks`. + +#![cfg(feature = "runtime-benchmarks")] + +use crate::*; +use frame_benchmarking::v2::*; + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn add_number_into_total() { + Numbers::::insert(0, 1); + + #[block] + { + Task::::add_number_into_total(0).unwrap(); + } + + assert_eq!(Numbers::::get(0), None); + } + + impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::mock::Runtime); +} diff --git a/substrate/frame/examples/tasks/src/lib.rs b/substrate/frame/examples/tasks/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..c65d8095bcf6a2c2295bb87b11e4041fbab88173 --- /dev/null +++ b/substrate/frame/examples/tasks/src/lib.rs @@ -0,0 +1,78 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This pallet demonstrates the use of the `pallet::task` api for service work. +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::dispatch::DispatchResult; +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; + +pub mod mock; +pub mod tests; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +pub mod weights; +pub use weights::*; + +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::error] + pub enum Error { + /// The referenced task was not found. + NotFound, + } + + #[pallet::tasks_experimental] + impl Pallet { + /// Add a pair of numbers into the totals and remove them. + #[pallet::task_list(Numbers::::iter_keys())] + #[pallet::task_condition(|i| Numbers::::contains_key(i))] + #[pallet::task_weight(T::WeightInfo::add_number_into_total())] + #[pallet::task_index(0)] + pub fn add_number_into_total(i: u32) -> DispatchResult { + let v = Numbers::::take(i).ok_or(Error::::NotFound)?; + Total::::mutate(|(total_keys, total_values)| { + *total_keys += i; + *total_values += v; + }); + Ok(()) + } + } + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeTask: frame_support::traits::Task; + type WeightInfo: WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(_); + + /// Some running total. + #[pallet::storage] + pub type Total = StorageValue<_, (u32, u32), ValueQuery>; + + /// Numbers to be added into the total. + #[pallet::storage] + pub type Numbers = StorageMap<_, Twox64Concat, u32, u32, OptionQuery>; +} diff --git a/substrate/frame/examples/tasks/src/mock.rs b/substrate/frame/examples/tasks/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..e0fbec3eb7657d68e7987231314545ece18fe4fa --- /dev/null +++ b/substrate/frame/examples/tasks/src/mock.rs @@ -0,0 +1,43 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Mock runtime for `tasks-example` tests. +#![cfg(test)] + +use crate::{self as tasks_example}; +use frame_support::derive_impl; + +pub type AccountId = u32; +pub type Balance = u32; + +type Block = frame_system::mocking::MockBlock; +frame_support::construct_runtime!( + pub struct Runtime { + System: frame_system, + TasksExample: tasks_example, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; +} + +impl tasks_example::Config for Runtime { + type RuntimeTask = RuntimeTask; + type WeightInfo = (); +} diff --git a/substrate/frame/examples/tasks/src/tests.rs b/substrate/frame/examples/tasks/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..fc3c69f4aef95601c2a96faf1c939c27ee419fd4 --- /dev/null +++ b/substrate/frame/examples/tasks/src/tests.rs @@ -0,0 +1,132 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for `pallet-example-tasks`. +#![cfg(test)] + +use crate::{mock::*, Numbers}; +use frame_support::traits::Task; +use sp_runtime::BuildStorage; + +#[cfg(feature = "experimental")] +use frame_support::{assert_noop, assert_ok}; + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = RuntimeGenesisConfig { + // We use default for brevity, but you can configure as desired if needed. + system: Default::default(), + } + .build_storage() + .unwrap(); + t.into() +} + +#[test] +fn task_enumerate_works() { + new_test_ext().execute_with(|| { + Numbers::::insert(0, 1); + assert_eq!(crate::pallet::Task::::iter().collect::>().len(), 1); + }); +} + +#[test] +fn runtime_task_enumerate_works_via_frame_system_config() { + new_test_ext().execute_with(|| { + Numbers::::insert(0, 1); + Numbers::::insert(1, 4); + assert_eq!( + ::RuntimeTask::iter().collect::>().len(), + 2 + ); + }); +} + +#[test] +fn runtime_task_enumerate_works_via_pallet_config() { + new_test_ext().execute_with(|| { + Numbers::::insert(1, 4); + assert_eq!( + ::RuntimeTask::iter() + .collect::>() + .len(), + 1 + ); + }); +} + +#[test] +fn task_index_works_at_pallet_level() { + new_test_ext().execute_with(|| { + assert_eq!(crate::pallet::Task::::AddNumberIntoTotal { i: 2u32 }.task_index(), 0); + }); +} + +#[test] +fn task_index_works_at_runtime_level() { + new_test_ext().execute_with(|| { + assert_eq!( + ::RuntimeTask::TasksExample(crate::pallet::Task::< + Runtime, + >::AddNumberIntoTotal { + i: 1u32 + }) + .task_index(), + 0 + ); + }); +} + +#[cfg(feature = "experimental")] +#[test] +fn task_execution_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Numbers::::insert(0, 1); + Numbers::::insert(1, 4); + + let task = + ::RuntimeTask::TasksExample(crate::pallet::Task::< + Runtime, + >::AddNumberIntoTotal { + i: 1u32, + }); + assert_ok!(System::do_task(RuntimeOrigin::signed(1), task.clone(),)); + assert_eq!(Numbers::::get(0), Some(1)); + assert_eq!(Numbers::::get(1), None); + assert_eq!(crate::Total::::get(), (1, 4)); + System::assert_last_event(frame_system::Event::::TaskCompleted { task }.into()); + }); +} + +#[cfg(feature = "experimental")] +#[test] +fn task_execution_fails_for_invalid_task() { + new_test_ext().execute_with(|| { + Numbers::::insert(1, 4); + assert_noop!( + System::do_task( + RuntimeOrigin::signed(1), + ::RuntimeTask::TasksExample( + crate::pallet::Task::::AddNumberIntoTotal { i: 0u32 } + ), + ), + frame_system::Error::::InvalidTask + ); + }); +} diff --git a/substrate/frame/examples/tasks/src/weights.rs b/substrate/frame/examples/tasks/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..793af6e962201fd9f92e0260ea0e24f5bc39753d --- /dev/null +++ b/substrate/frame/examples/tasks/src/weights.rs @@ -0,0 +1,84 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_example_tasks` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-02, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `MacBook.local`, CPU: `` +//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/node-template +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_example_tasks +// --extrinsic +// * +// --steps +// 20 +// --repeat +// 10 +// --output +// frame/examples/tasks/src/weights.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for pallet_template. +pub trait WeightInfo { + fn add_number_into_total() -> Weight; +} + +/// Weight functions for `pallet_example_kitchensink`. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: Kitchensink OtherFoo (r:0 w:1) + /// Proof Skipped: Kitchensink OtherFoo (max_values: Some(1), max_size: None, mode: Measured) + fn add_number_into_total() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(1_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} + +impl WeightInfo for () { + /// Storage: Kitchensink OtherFoo (r:0 w:1) + /// Proof Skipped: Kitchensink OtherFoo (max_values: Some(1), max_size: None, mode: Measured) + fn add_number_into_total() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(1_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(RocksDbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index 32983a32c4fffabdb561e92659ce3cd878573ad9..b98ceb0ba9a57042f56ccb9d782e84535a9a3c53 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME executives engine" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,14 +21,14 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } frame-try-runtime = { path = "../try-runtime", default-features = false, optional = true } -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-tracing = { path = "../../primitives/tracing", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-tracing = { path = "../../primitives/tracing", default-features = false } [dev-dependencies] array-bytes = "6.1" @@ -37,8 +40,8 @@ sp-io = { path = "../../primitives/io" } sp-version = { path = "../../primitives/version" } [features] -default = [ "std" ] -with-tracing = [ "sp-tracing/with-tracing" ] +default = ["std"] +with-tracing = ["sp-tracing/with-tracing"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/executive/README.md b/substrate/frame/executive/README.md index 96a412a4537a3657563550558f1828d209b4b92b..6151232ecaf1b3f9e743b2d38e06bccc43a24041 100644 --- a/substrate/frame/executive/README.md +++ b/substrate/frame/executive/README.md @@ -34,14 +34,13 @@ The default Substrate node template declares the `Executive` type declaration from the node template. ```rust -# /// Executive: handles dispatch to the various modules. pub type Executive = executive::Executive< Runtime, Block, Context, Runtime, - AllPallets, + AllPalletsWithSystem, >; ``` @@ -51,7 +50,6 @@ You can add custom logic that should be called in your runtime on a runtime upgr generic parameter. The custom logic will be called before the on runtime upgrade logic of all modules is called. ```rust -# struct CustomOnRuntimeUpgrade; impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { fn on_runtime_upgrade() -> frame_support::weights::Weight { @@ -65,7 +63,7 @@ pub type Executive = executive::Executive< Block, Context, Runtime, - AllPallets, + AllPalletsWithSystem, CustomOnRuntimeUpgrade, >; ``` diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index e2c906c1bf6ce49b9686d13d7a5bc5a84ed15482..b351819f612b025cf6ff7072ae4b62598c899be0 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -362,9 +362,13 @@ where Ok(frame_system::Pallet::::block_weight().total()) } - /// Execute all `OnRuntimeUpgrade` of this runtime. + /// Execute all Migrations of this runtime. /// /// The `checks` param determines whether to execute `pre/post_upgrade` and `try_state` hooks. + /// + /// [`frame_system::LastRuntimeUpgrade`] is set to the current runtime version after + /// migrations execute. This is important for idempotency checks, because some migrations use + /// this value to determine whether or not they should execute. pub fn try_runtime_upgrade(checks: UpgradeCheckSelect) -> Result { let before_all_weight = ::before_all_runtime_migrations(); @@ -372,6 +376,13 @@ where <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::try_on_runtime_upgrade( checks.pre_and_post(), )?; + + frame_system::LastRuntimeUpgrade::::put( + frame_system::LastRuntimeUpgradeInfo::from( + >::get(), + ), + ); + // Nothing should modify the state after the migrations ran: let _guard = StorageNoopGuard::default(); @@ -487,6 +498,12 @@ where let mut weight = Weight::zero(); if Self::runtime_upgraded() { weight = weight.saturating_add(Self::execute_on_runtime_upgrade()); + + frame_system::LastRuntimeUpgrade::::put( + frame_system::LastRuntimeUpgradeInfo::from( + >::get(), + ), + ); } >::initialize(block_number, parent_hash, digest); weight = weight.saturating_add(::note_finished_initialize(); } - /// Returns if the runtime was upgraded since the last time this function was called. + /// Returns if the runtime has been upgraded, based on [`frame_system::LastRuntimeUpgrade`]. fn runtime_upgraded() -> bool { let last = frame_system::LastRuntimeUpgrade::::get(); let current = >::get(); - if last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) { - frame_system::LastRuntimeUpgrade::::put( - frame_system::LastRuntimeUpgradeInfo::from(current), - ); - true - } else { - false - } + last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) } fn initial_checks(block: &Block) { @@ -751,11 +761,11 @@ mod tests { }; use frame_support::{ - assert_err, parameter_types, + assert_err, derive_impl, parameter_types, traits::{fungible, ConstU32, ConstU64, ConstU8, Currency}, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightToFee}, }; - use frame_system::{ChainContext, LastRuntimeUpgradeInfo}; + use frame_system::{ChainContext, LastRuntimeUpgrade, LastRuntimeUpgradeInfo}; use pallet_balances::Call as BalancesCall; use pallet_transaction_payment::CurrencyAdapter; @@ -907,6 +917,7 @@ mod tests { write: 100, }; } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; @@ -994,6 +1005,9 @@ mod tests { sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); System::deposit_event(frame_system::Event::CodeUpdated); + + assert_eq!(0, System::last_runtime_upgrade_spec_version()); + Weight::from_parts(100, 0) } } @@ -1356,17 +1370,13 @@ mod tests { new_test_ext(1).execute_with(|| { RuntimeVersionTestValues::mutate(|v| *v = Default::default()); // It should be added at genesis - assert!(frame_system::LastRuntimeUpgrade::::exists()); + assert!(LastRuntimeUpgrade::::exists()); assert!(!Executive::runtime_upgraded()); RuntimeVersionTestValues::mutate(|v| { *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "".into() }), - frame_system::LastRuntimeUpgrade::::get(), - ); RuntimeVersionTestValues::mutate(|v| { *v = sp_version::RuntimeVersion { @@ -1376,27 +1386,18 @@ mod tests { } }); assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::::get(), - ); RuntimeVersionTestValues::mutate(|v| { *v = sp_version::RuntimeVersion { - spec_version: 1, - spec_name: "test".into(), + spec_version: 0, impl_version: 2, ..Default::default() } }); assert!(!Executive::runtime_upgraded()); - frame_system::LastRuntimeUpgrade::::take(); + LastRuntimeUpgrade::::take(); assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::::get(), - ); }) } @@ -1444,6 +1445,10 @@ mod tests { assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); + assert_eq!( + Some(RuntimeVersionTestValues::get().into()), + LastRuntimeUpgrade::::get(), + ) }); } @@ -1519,6 +1524,9 @@ mod tests { #[test] fn all_weights_are_recorded_correctly() { + // Reset to get the correct new genesis below. + RuntimeVersionTestValues::take(); + new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called for maximum complexity RuntimeVersionTestValues::mutate(|v| { @@ -1535,6 +1543,10 @@ mod tests { Digest::default(), )); + // Reset the last runtime upgrade info, to make the second call to `on_runtime_upgrade` + // succeed. + LastRuntimeUpgrade::::take(); + // All weights that show up in the `initialize_block_impl` let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); let runtime_upgrade_weight = diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index 2aa2e918f3e3c307ca3b90c78d8b6ada4812e6fe..673a2f71d6c8cfb839463b7bfc779ecccfb1a86e 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME fast unstake pallet" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,22 +19,22 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-staking = { path = "../../primitives/staking", default-features = false} -frame-election-provider-support = { path = "../election-provider-support", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-staking = { path = "../../primitives/staking", default-features = false } +frame-election-provider-support = { path = "../election-provider-support", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } docify = "0.2.6" [dev-dependencies] pallet-staking-reward-curve = { path = "../staking/reward-curve" } -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } substrate-test-utils = { path = "../../test-utils" } sp-tracing = { path = "../../primitives/tracing" } pallet-staking = { path = "../staking" } @@ -39,7 +42,7 @@ pallet-balances = { path = "../balances" } pallet-timestamp = { path = "../timestamp" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/fast-unstake/src/benchmarking.rs b/substrate/frame/fast-unstake/src/benchmarking.rs index 851483e3697bfdffe0e9dcabf8f9a04111a25d0b..4828dcb9b42cb924c8b20363e2447b3c428ecf36 100644 --- a/substrate/frame/fast-unstake/src/benchmarking.rs +++ b/substrate/frame/fast-unstake/src/benchmarking.rs @@ -162,7 +162,7 @@ benchmarks! { fast_unstake_events::().last(), Some(Event::BatchChecked { .. }) )); - assert!(stashes.iter().all(|(s, _)| request.stashes.iter().find(|(ss, _)| ss == s).is_some())); + assert!(stashes.iter().all(|(s, _)| request.stashes.iter().any(|(ss, _)| ss == s))); } register_fast_unstake { diff --git a/substrate/frame/fast-unstake/src/lib.rs b/substrate/frame/fast-unstake/src/lib.rs index 153b6c2c353f6cc40ac0689fc1ea26830653ede9..04a50543bcc9f2b74c84f74dbe1615bda9f47b55 100644 --- a/substrate/frame/fast-unstake/src/lib.rs +++ b/substrate/frame/fast-unstake/src/lib.rs @@ -571,7 +571,7 @@ pub mod pallet { .any(|e| T::Staking::is_exposed_in_era(&stash, e)); if is_exposed { - T::Currency::slash_reserved(&stash, deposit); + let _ = T::Currency::slash_reserved(&stash, deposit); log!(info, "slashed {:?} by {:?}", stash, deposit); Self::deposit_event(Event::::Slashed { stash, amount: deposit }); false diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index df133bdfd47f6e56b81eca11048d53b14bb51319..f9326919fd3e4d3bc416589a7f455b7337259b59 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -142,6 +142,7 @@ impl pallet_staking::Config for Runtime { type TargetList = pallet_staking::UseValidatorsMap; type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<100>; type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); @@ -244,7 +245,7 @@ impl ExtBuilder { (v, Exposure { total: 0, own: 0, others }) }) .for_each(|(validator, exposure)| { - pallet_staking::ErasStakers::::insert(era, validator, exposure); + pallet_staking::EraInfo::::set_exposure(era, &validator, exposure); }); } @@ -342,10 +343,11 @@ pub fn assert_unstaked(stash: &AccountId) { } pub fn create_exposed_nominator(exposed: AccountId, era: u32) { - // create an exposed nominator in era 1 - pallet_staking::ErasStakers::::mutate(era, VALIDATORS_PER_ERA, |expo| { - expo.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); - }); + // create an exposed nominator in passed era + let mut exposure = pallet_staking::EraInfo::::get_full_exposure(era, &VALIDATORS_PER_ERA); + exposure.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); + pallet_staking::EraInfo::::set_exposure(era, &VALIDATORS_PER_ERA, exposure); + Balances::make_free_balance_be(&exposed, 100); assert_ok!(Staking::bond( RuntimeOrigin::signed(exposed), diff --git a/substrate/frame/fast-unstake/src/tests.rs b/substrate/frame/fast-unstake/src/tests.rs index 94ad6a84b85a1768b60c93ef94727df523b0f480..b19fe3b8c463ba54571d471b0672d8b7f7d88426 100644 --- a/substrate/frame/fast-unstake/src/tests.rs +++ b/substrate/frame/fast-unstake/src/tests.rs @@ -788,10 +788,12 @@ mod on_idle { assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(VALIDATOR_PREFIX))); // but they indeed are exposed! - assert!(pallet_staking::ErasStakers::::contains_key( + assert!(pallet_staking::EraInfo::::get_paged_exposure( BondingDuration::get() - 1, - VALIDATOR_PREFIX - )); + &VALIDATOR_PREFIX, + 0 + ) + .is_some()); // process a block, this validator is exposed and has been slashed. next_block(true); diff --git a/substrate/frame/fast-unstake/src/types.rs b/substrate/frame/fast-unstake/src/types.rs index 15d0a327e917e3123b0df5d22eb2e9d7e655b39f..3fb5720861fa875c87c7b338028d66208e42e220 100644 --- a/substrate/frame/fast-unstake/src/types.rs +++ b/substrate/frame/fast-unstake/src/types.rs @@ -39,6 +39,7 @@ impl frame_support::traits::Get for MaxChecking { } } +#[docify::export] pub(crate) type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// An unstake request. diff --git a/substrate/frame/glutton/Cargo.toml b/substrate/frame/glutton/Cargo.toml index 368fcab65cc2a74a4ddc042f5e544f00410a93ea..068fb4e821cbad5efd83cc5a08ffd78cf1a2e9d0 100644 --- a/substrate/frame/glutton/Cargo.toml +++ b/substrate/frame/glutton/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet for pushing a chain to its weight limits" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,19 +20,19 @@ blake2 = { version = "0.10.4", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.14", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "blake2/std", "codec/std", diff --git a/substrate/frame/glutton/src/mock.rs b/substrate/frame/glutton/src/mock.rs index 4bc40b5478870e7640a507e5292d308d5def10c0..31b78efc574886013827bf6bfbb22dd12f3a39d6 100644 --- a/substrate/frame/glutton/src/mock.rs +++ b/substrate/frame/glutton/src/mock.rs @@ -19,7 +19,7 @@ use super::*; use crate as pallet_glutton; use frame_support::{ - assert_ok, + assert_ok, derive_impl, traits::{ConstU32, ConstU64}, }; use sp_core::H256; @@ -38,6 +38,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index 5eacc21721badedecb02ca5953ecf213bba8ad2a..b4f51d88c6d21a7d1367f5a29cf7398660125006 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet for GRANDPA finality gadget" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,19 +19,19 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-authorship = { path = "../authorship", default-features = false} -pallet-session = { path = "../session", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-authorship = { path = "../authorship", default-features = false } +pallet-session = { path = "../session", default-features = false } sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } sp-consensus-grandpa = { path = "../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false} +sp-session = { path = "../../primitives/session", default-features = false } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] grandpa = { package = "finality-grandpa", version = "0.16.2", features = ["derive-codec"] } @@ -42,7 +45,7 @@ pallet-timestamp = { path = "../timestamp" } sp-keyring = { path = "../../primitives/keyring" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 95d1c8aa609495f567e23a0650365c924bad040b..0b9f2b3582792e9d93695d34656912a8d70a131f 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -30,14 +30,13 @@ // Re-export since this is necessary for `impl_apis` in runtime. pub use sp_consensus_grandpa::{ - self as fg_primitives, AuthorityId, AuthorityList, AuthorityWeight, VersionedAuthorityList, + self as fg_primitives, AuthorityId, AuthorityList, AuthorityWeight, }; -use codec::{self as codec, Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::{DispatchResultWithPostInfo, Pays}, pallet_prelude::Get, - storage, traits::OneSessionHandler, weights::Weight, WeakBoundedVec, @@ -45,8 +44,8 @@ use frame_support::{ use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; use sp_consensus_grandpa::{ - ConsensusLog, EquivocationProof, ScheduledChange, SetId, GRANDPA_AUTHORITIES_KEY, - GRANDPA_ENGINE_ID, RUNTIME_LOG_TARGET as LOG_TARGET, + ConsensusLog, EquivocationProof, ScheduledChange, SetId, GRANDPA_ENGINE_ID, + RUNTIME_LOG_TARGET as LOG_TARGET, }; use sp_runtime::{generic::DigestItem, traits::Zero, DispatchResult}; use sp_session::{GetSessionNumber, GetValidatorCount}; @@ -75,7 +74,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -145,7 +144,7 @@ pub mod pallet { // enact the change if we've reached the enacting block if block_number == pending_change.scheduled_at + pending_change.delay { - Self::set_grandpa_authorities(&pending_change.next_authorities); + Authorities::::put(&pending_change.next_authorities); Self::deposit_event(Event::NewAuthorities { authority_set: pending_change.next_authorities.into_inner(), }); @@ -342,6 +341,11 @@ pub mod pallet { #[pallet::getter(fn session_for_set)] pub(super) type SetIdSession = StorageMap<_, Twox64Concat, SetId, SessionIndex>; + /// The current list of authorities. + #[pallet::storage] + pub(crate) type Authorities = + StorageValue<_, BoundedAuthorityList, ValueQuery>; + #[derive(frame_support::DefaultNoBound)] #[pallet::genesis_config] pub struct GenesisConfig { @@ -354,7 +358,7 @@ pub mod pallet { impl BuildGenesisConfig for GenesisConfig { fn build(&self) { CurrentSetId::::put(SetId::default()); - Pallet::::initialize(&self.authorities) + Pallet::::initialize(self.authorities.clone()) } } @@ -428,12 +432,7 @@ pub enum StoredState { impl Pallet { /// Get the current set of authorities, along with their respective weights. pub fn grandpa_authorities() -> AuthorityList { - storage::unhashed::get_or_default::(GRANDPA_AUTHORITIES_KEY).into() - } - - /// Set the current set of authorities, along with their respective weights. - fn set_grandpa_authorities(authorities: &AuthorityList) { - storage::unhashed::put(GRANDPA_AUTHORITIES_KEY, &VersionedAuthorityList::from(authorities)); + Authorities::::get().into_inner() } /// Schedule GRANDPA to pause starting in the given number of blocks. @@ -522,10 +521,14 @@ impl Pallet { // Perform module initialization, abstracted so that it can be called either through genesis // config builder or through `on_genesis_session`. - fn initialize(authorities: &AuthorityList) { + fn initialize(authorities: AuthorityList) { if !authorities.is_empty() { assert!(Self::grandpa_authorities().is_empty(), "Authorities are already initialized!"); - Self::set_grandpa_authorities(authorities); + Authorities::::put( + &BoundedAuthorityList::::try_from(authorities).expect( + "Grandpa: `Config::MaxAuthorities` is smaller than the number of genesis authorities!", + ), + ); } // NOTE: initialize first session of first set. this is necessary for @@ -568,7 +571,7 @@ where I: Iterator, { let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); - Self::initialize(&authorities); + Self::initialize(authorities); } fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) diff --git a/substrate/frame/grandpa/src/migrations.rs b/substrate/frame/grandpa/src/migrations.rs index 6307cbdd3b05685906b14a3c519325457bf9545e..3a484eb60d284c3988a1c2e1fad49bdc10d03b6a 100644 --- a/substrate/frame/grandpa/src/migrations.rs +++ b/substrate/frame/grandpa/src/migrations.rs @@ -22,8 +22,11 @@ use frame_support::{ use crate::{Config, CurrentSetId, SetIdSession, LOG_TARGET}; +pub use v5::MigrateV4ToV5; + /// Version 4. pub mod v4; +mod v5; /// This migration will clean up all stale set id -> session entries from the /// `SetIdSession` storage map, only the latest `max_set_id_session_entries` diff --git a/substrate/frame/grandpa/src/migrations/v4.rs b/substrate/frame/grandpa/src/migrations/v4.rs index 8604296b6e57b9829851213f7843fa0b0a3c31fa..9daa818071f70e62557b67e49e7896a9b9ad623d 100644 --- a/substrate/frame/grandpa/src/migrations/v4.rs +++ b/substrate/frame/grandpa/src/migrations/v4.rs @@ -63,7 +63,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { } /// Some checks prior to migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::pre_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn pre_migration>(new: N) { @@ -99,7 +99,7 @@ pub fn pre_migration>(new: N) { } /// Some checks for after migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::post_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn post_migration() { diff --git a/substrate/frame/grandpa/src/migrations/v5.rs b/substrate/frame/grandpa/src/migrations/v5.rs new file mode 100644 index 0000000000000000000000000000000000000000..24cfc34104b5f31cca70f93bc53309ee954a2d42 --- /dev/null +++ b/substrate/frame/grandpa/src/migrations/v5.rs @@ -0,0 +1,96 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{BoundedAuthorityList, Pallet}; +use codec::Decode; +use frame_support::{ + migrations::VersionedMigration, + storage, + traits::{Get, OnRuntimeUpgrade}, + weights::Weight, +}; +use sp_consensus_grandpa::AuthorityList; +use sp_std::{marker::PhantomData, vec::Vec}; + +const GRANDPA_AUTHORITIES_KEY: &[u8] = b":grandpa_authorities"; + +fn load_authority_list() -> AuthorityList { + storage::unhashed::get_raw(GRANDPA_AUTHORITIES_KEY).map_or_else( + || Vec::new(), + |l| <(u8, AuthorityList)>::decode(&mut &l[..]).unwrap_or_default().1, + ) +} + +/// Actual implementation of [`MigrateV4ToV5`]. +pub struct MigrateImpl(PhantomData); + +impl OnRuntimeUpgrade for MigrateImpl { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + + let authority_list_len = load_authority_list().len() as u32; + + if authority_list_len > T::MaxAuthorities::get() { + return Err( + "Grandpa: `Config::MaxAuthorities` is smaller than the actual number of authorities.".into() + ) + } + + if authority_list_len == 0 { + return Err("Grandpa: Authority list is empty!".into()) + } + + Ok(authority_list_len.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let len = u32::decode(&mut &state[..]).unwrap(); + + frame_support::ensure!( + len == crate::Pallet::::grandpa_authorities().len() as u32, + "Grandpa: pre-migrated and post-migrated list should have the same length" + ); + + frame_support::ensure!( + load_authority_list().is_empty(), + "Old authority list shouldn't exist anymore" + ); + + Ok(()) + } + + fn on_runtime_upgrade() -> Weight { + crate::Authorities::::put( + &BoundedAuthorityList::::force_from( + load_authority_list(), + Some("Grandpa: `Config::MaxAuthorities` is smaller than the actual number of authorities.") + ) + ); + + storage::unhashed::kill(GRANDPA_AUTHORITIES_KEY); + + T::DbWeight::get().reads_writes(1, 2) + } +} + +/// Migrate the storage from V4 to V5. +/// +/// Switches from `GRANDPA_AUTHORITIES_KEY` to a normal FRAME storage item. +pub type MigrateV4ToV5 = + VersionedMigration<4, 5, MigrateImpl, Pallet, ::DbWeight>; diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 9afcec1c797a3440cc8c572a77eb16f34d193aac..f1f51e0b118163ec1eee16fb951260306832e6da 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -27,7 +27,7 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, }; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnFinalize, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; @@ -66,6 +66,7 @@ impl_opaque_keys! { } } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -205,6 +206,7 @@ impl pallet_staking::Config for Test { type TargetList = pallet_staking::UseValidatorsMap; type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<100>; type HistoryDepth = ConstU32<84>; type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; diff --git a/substrate/frame/identity/Cargo.toml b/substrate/frame/identity/Cargo.toml index 309c0aab55003fdaa8ea5d2720481710244ab458..a562d7607b468a1499dae2ed42c519aa9563951c 100644 --- a/substrate/frame/identity/Cargo.toml +++ b/substrate/frame/identity/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME identity management pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,19 +19,19 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } enumflags2 = { version = "0.7.7" } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "enumflags2/std", diff --git a/substrate/frame/identity/src/lib.rs b/substrate/frame/identity/src/lib.rs index 264ea3ddb41f90fbcf977de016f232b0dfc6643d..133f9eeb4befcb6a004dfc4792e1227d027d1602 100644 --- a/substrate/frame/identity/src/lib.rs +++ b/substrate/frame/identity/src/lib.rs @@ -79,8 +79,10 @@ mod tests; mod types; pub mod weights; +use codec::Encode; use frame_support::{ - pallet_prelude::DispatchResult, + ensure, + pallet_prelude::{DispatchError, DispatchResult}, traits::{BalanceStatus, Currency, Get, OnUnbalanced, ReservableCurrency}, }; use sp_runtime::traits::{AppendZerosInput, Hash, Saturating, StaticLookup, Zero}; @@ -395,8 +397,7 @@ pub mod pallet { ); let (old_deposit, old_ids) = >::get(&sender); - let new_deposit = - T::SubAccountDeposit::get().saturating_mul(>::from(subs.len() as u32)); + let new_deposit = Self::subs_deposit(subs.len() as u32); let not_other_sub = subs.iter().filter_map(|i| SuperOf::::get(&i.0)).all(|i| i.0 == sender); @@ -898,6 +899,26 @@ impl Pallet { .collect() } + /// Calculate the deposit required for a number of `sub` accounts. + fn subs_deposit(subs: u32) -> BalanceOf { + T::SubAccountDeposit::get().saturating_mul(>::from(subs)) + } + + /// Take the `current` deposit that `who` is holding, and update it to a `new` one. + fn rejig_deposit( + who: &T::AccountId, + current: BalanceOf, + new: BalanceOf, + ) -> DispatchResult { + if new > current { + T::Currency::reserve(who, new - current)?; + } else if new < current { + let err_amount = T::Currency::unreserve(who, current - new); + debug_assert!(err_amount.is_zero()); + } + Ok(()) + } + /// Check if the account has corresponding identity information by the identity field. pub fn has_identity( who: &T::AccountId, @@ -906,4 +927,110 @@ impl Pallet { IdentityOf::::get(who) .map_or(false, |registration| (registration.info.has_identity(fields))) } + + /// Reap an identity, clearing associated storage items and refunding any deposits. This + /// function is very similar to (a) `clear_identity`, but called on a `target` account instead + /// of self; and (b) `kill_identity`, but without imposing a slash. + /// + /// Parameters: + /// - `target`: The account for which to reap identity state. + /// + /// Return type is a tuple of the number of registrars, `IdentityInfo` bytes, and sub accounts, + /// respectively. + /// + /// NOTE: This function is here temporarily for migration of Identity info from the Polkadot + /// Relay Chain into a system parachain. It will be removed after the migration. + pub fn reap_identity(who: &T::AccountId) -> Result<(u32, u32, u32), DispatchError> { + // `take` any storage items keyed by `target` + // identity + let id = >::take(&who).ok_or(Error::::NotNamed)?; + let registrars = id.judgements.len() as u32; + let encoded_byte_size = id.info.encoded_size() as u32; + + // subs + let (subs_deposit, sub_ids) = >::take(&who); + let actual_subs = sub_ids.len() as u32; + for sub in sub_ids.iter() { + >::remove(sub); + } + + // unreserve any deposits + let deposit = id.total_deposit().saturating_add(subs_deposit); + let err_amount = T::Currency::unreserve(&who, deposit); + debug_assert!(err_amount.is_zero()); + Ok((registrars, encoded_byte_size, actual_subs)) + } + + /// Update the deposits held by `target` for its identity info. + /// + /// Parameters: + /// - `target`: The account for which to update deposits. + /// + /// Return type is a tuple of the new Identity and Subs deposits, respectively. + /// + /// NOTE: This function is here temporarily for migration of Identity info from the Polkadot + /// Relay Chain into a system parachain. It will be removed after the migration. + pub fn poke_deposit( + target: &T::AccountId, + ) -> Result<(BalanceOf, BalanceOf), DispatchError> { + // Identity Deposit + let new_id_deposit = IdentityOf::::try_mutate( + &target, + |registration| -> Result, DispatchError> { + let reg = registration.as_mut().ok_or(Error::::NoIdentity)?; + // Calculate what deposit should be + let encoded_byte_size = reg.info.encoded_size() as u32; + let byte_deposit = + T::ByteDeposit::get().saturating_mul(>::from(encoded_byte_size)); + let new_id_deposit = T::BasicDeposit::get().saturating_add(byte_deposit); + + // Update account + Self::rejig_deposit(&target, reg.deposit, new_id_deposit)?; + + reg.deposit = new_id_deposit; + Ok(new_id_deposit) + }, + )?; + + // Subs Deposit + let new_subs_deposit = SubsOf::::try_mutate( + &target, + |(current_subs_deposit, subs_of)| -> Result, DispatchError> { + let new_subs_deposit = Self::subs_deposit(subs_of.len() as u32); + Self::rejig_deposit(&target, *current_subs_deposit, new_subs_deposit)?; + *current_subs_deposit = new_subs_deposit; + Ok(new_subs_deposit) + }, + )?; + Ok((new_id_deposit, new_subs_deposit)) + } + + /// Set an identity with zero deposit. Only used for benchmarking that involves `rejig_deposit`. + #[cfg(feature = "runtime-benchmarks")] + pub fn set_identity_no_deposit( + who: &T::AccountId, + info: T::IdentityInformation, + ) -> DispatchResult { + IdentityOf::::insert( + &who, + Registration { + judgements: Default::default(), + deposit: Zero::zero(), + info: info.clone(), + }, + ); + Ok(()) + } + + /// Set subs with zero deposit. Only used for benchmarking that involves `rejig_deposit`. + #[cfg(feature = "runtime-benchmarks")] + pub fn set_sub_no_deposit(who: &T::AccountId, sub: T::AccountId) -> DispatchResult { + use frame_support::BoundedVec; + let subs = BoundedVec::<_, T::MaxSubAccounts>::try_from(vec![sub]).unwrap(); + SubsOf::::insert::< + &T::AccountId, + (BalanceOf, BoundedVec), + >(&who, (Zero::zero(), subs)); + Ok(()) + } } diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs index 71192ea65a8c1afcf5ae7b45c813e53ce1fc2542..8ac7b4d66cb6b9d162d1dfecb14b500c3df0341c 100644 --- a/substrate/frame/identity/src/tests.rs +++ b/substrate/frame/identity/src/tests.rs @@ -25,7 +25,7 @@ use crate::{ use codec::{Decode, Encode}; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64, EitherOfDiverse, Get}, BoundedVec, }; @@ -47,6 +47,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -712,3 +713,70 @@ fn test_has_identity() { )); }); } + +#[test] +fn reap_identity_works() { + new_test_ext().execute_with(|| { + let ten_info = ten(); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten_info.clone()))); + assert_ok!(Identity::set_subs( + RuntimeOrigin::signed(10), + vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] + )); + // deposit is correct + let id_deposit = id_deposit(&ten_info); + let subs_deposit: u64 = <::SubAccountDeposit as Get>::get(); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - subs_deposit); + // reap + assert_ok!(Identity::reap_identity(&10)); + // no identity or subs + assert!(Identity::identity(10).is_none()); + assert!(Identity::super_of(20).is_none()); + // balance is unreserved + assert_eq!(Balances::free_balance(10), 1000); + }); +} + +#[test] +fn poke_deposit_works() { + new_test_ext().execute_with(|| { + let ten_info = ten(); + // Set a custom registration with 0 deposit + IdentityOf::::insert( + &10, + Registration { + judgements: BoundedVec::default(), + deposit: Zero::zero(), + info: ten_info.clone(), + }, + ); + assert!(Identity::identity(10).is_some()); + // Set a sub with zero deposit + SubsOf::::insert::<&u64, (u64, BoundedVec>)>( + &10, + (0, vec![20].try_into().unwrap()), + ); + SuperOf::::insert(&20, (&10, Data::Raw(vec![1; 1].try_into().unwrap()))); + // Balance is free + assert_eq!(Balances::free_balance(10), 1000); + + // poke + assert_ok!(Identity::poke_deposit(&10)); + + // free balance reduced correctly + let id_deposit = id_deposit(&ten_info); + let subs_deposit: u64 = <::SubAccountDeposit as Get>::get(); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - subs_deposit); + // new registration deposit is 10 + assert_eq!( + Identity::identity(&10), + Some(Registration { + judgements: BoundedVec::default(), + deposit: id_deposit, + info: ten() + }) + ); + // new subs deposit is 10 vvvvvvvvvvvv + assert_eq!(Identity::subs_of(10), (subs_deposit, vec![20].try_into().unwrap())); + }); +} diff --git a/substrate/frame/im-online/Cargo.toml b/substrate/frame/im-online/Cargo.toml index d83ff540648aaff9205e81893873ce5441bd86d7..b5b01858c898a6cc4596e55e76b85433f79e9ca6 100644 --- a/substrate/frame/im-online/Cargo.toml +++ b/substrate/frame/im-online/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME's I'm online pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,22 +19,22 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-authorship = { path = "../authorship", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-authorship = { path = "../authorship", default-features = false } sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-session = { path = "../session" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs index 85da061fe904af384a1bbce3bec113d8b8dd6114..2f4e39220260bcce666df048421b430677b79476 100644 --- a/substrate/frame/im-online/src/mock.rs +++ b/substrate/frame/im-online/src/mock.rs @@ -20,7 +20,7 @@ #![cfg(test)] use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, weights::Weight, }; @@ -113,6 +113,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { result } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index d392522718a5b724fb89a2a7c9ee844758595d5b..4f0c780c6af35d4273bc1824830a663c54220fd3 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -9,26 +9,29 @@ repository.workspace = true description = "FRAME indices management pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-keyring = { path = "../../primitives/keyring", optional = true} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-keyring = { path = "../../primitives/keyring", optional = true } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/indices/src/lib.rs b/substrate/frame/indices/src/lib.rs index 3c0b49304131f58ee64fc1e2824f9e2b9b6a33c9..ff12d092cfb8d67f7d44ca5648a120bbd8507cb3 100644 --- a/substrate/frame/indices/src/lib.rs +++ b/substrate/frame/indices/src/lib.rs @@ -223,7 +223,7 @@ pub mod pallet { let (account, amount, perm) = maybe_value.take().ok_or(Error::::NotAssigned)?; ensure!(!perm, Error::::Permanent); ensure!(account == who, Error::::NotOwner); - T::Currency::slash_reserved(&who, amount); + let _ = T::Currency::slash_reserved(&who, amount); *maybe_value = Some((account, Zero::zero(), true)); Ok(()) })?; diff --git a/substrate/frame/indices/src/mock.rs b/substrate/frame/indices/src/mock.rs index 7dc6730d34e5ae3f78fbed511adf0bd336787092..913a37fe55ba8408643bc2d9883c3cc9bc02de06 100644 --- a/substrate/frame/indices/src/mock.rs +++ b/substrate/frame/indices/src/mock.rs @@ -20,7 +20,10 @@ #![cfg(test)] use crate::{self as pallet_indices, Config}; -use frame_support::traits::{ConstU32, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU32, ConstU64}, +}; use sp_core::H256; use sp_runtime::BuildStorage; @@ -35,6 +38,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml index 07c5e3997d2fb608e96462ed441a0871968a7798..fb1447d10457a94e255bacc4af2d73c0ca084c73 100644 --- a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Insecure do not use in production: FRAME randomness collective flip pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,17 +19,17 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } safe-mix = { version = "1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/insecure-randomness-collective-flip/src/lib.rs b/substrate/frame/insecure-randomness-collective-flip/src/lib.rs index 474087777c46e42e61a099de3721859d314e6624..c7ed22d1dd5186e20aa532ca6aec727f62cc1b8d 100644 --- a/substrate/frame/insecure-randomness-collective-flip/src/lib.rs +++ b/substrate/frame/insecure-randomness-collective-flip/src/lib.rs @@ -169,7 +169,7 @@ mod tests { }; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, OnInitialize, Randomness}, }; use frame_system::limits; @@ -189,6 +189,7 @@ mod tests { ::max(2 * 1024); } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/lottery/Cargo.toml b/substrate/frame/lottery/Cargo.toml index a4942abf243ca7d43fb80e1fadfc7772f9ba332b..49f84b04b2578094e72dcb090b32c735a873d769 100644 --- a/substrate/frame/lottery/Cargo.toml +++ b/substrate/frame/lottery/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME Participation Lottery Pallet" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,11 +19,11 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] frame-support-test = { path = "../support/test" } @@ -29,7 +32,7 @@ sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/lottery/src/lib.rs b/substrate/frame/lottery/src/lib.rs index c54f6d76803fff51e820b99db1b86c82b7629ca6..54a8edd38606d76bfa4780d05918d05a1d690ae9 100644 --- a/substrate/frame/lottery/src/lib.rs +++ b/substrate/frame/lottery/src/lib.rs @@ -368,7 +368,8 @@ pub mod pallet { // Make sure pot exists. let lottery_account = Self::account_id(); if T::Currency::total_balance(&lottery_account).is_zero() { - T::Currency::deposit_creating(&lottery_account, T::Currency::minimum_balance()); + let _ = + T::Currency::deposit_creating(&lottery_account, T::Currency::minimum_balance()); } Self::deposit_event(Event::::LotteryStarted); Ok(()) diff --git a/substrate/frame/lottery/src/mock.rs b/substrate/frame/lottery/src/mock.rs index e50ec3441b2e5e15e84ad33bd6a3cce267a3dfea..6e50529619bfbf59647a59fdcac8d8dcd00615b6 100644 --- a/substrate/frame/lottery/src/mock.rs +++ b/substrate/frame/lottery/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_lottery; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}, }; use frame_support_test::TestRandomness; @@ -47,6 +47,7 @@ parameter_types! { pub const AvailableBlockRatio: Perbill = Perbill::one(); } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/membership/Cargo.toml b/substrate/frame/membership/Cargo.toml index 18c771bf72c7e8fa0a7ba76bea93a2d048c75021..c4c94e202a4db090e8988afb09eafa9a1ed2da14 100644 --- a/substrate/frame/membership/Cargo.toml +++ b/substrate/frame/membership/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME membership management pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,16 +19,16 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/membership/src/lib.rs b/substrate/frame/membership/src/lib.rs index 6fb61f0e491be1b507ef13a0b382741da882aa50..2f4bf4bc4ffa15255c50a25e9ac0a946f019de8b 100644 --- a/substrate/frame/membership/src/lib.rs +++ b/substrate/frame/membership/src/lib.rs @@ -531,7 +531,7 @@ mod tests { }; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64, StorageVersion}, }; use frame_system::EnsureSignedBy; @@ -551,6 +551,7 @@ mod tests { pub static Prime: Option = None; } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/membership/src/migrations/v4.rs b/substrate/frame/membership/src/migrations/v4.rs index 38e97af51a09d150482559efcdd2e557cfa1307b..9b80aca8684744333cad19f997bf1df394925123 100644 --- a/substrate/frame/membership/src/migrations/v4.rs +++ b/substrate/frame/membership/src/migrations/v4.rs @@ -77,7 +77,7 @@ pub fn migrate>(old_pallet_name: N, new_pallet_name: N) { @@ -105,7 +105,7 @@ pub fn pre_migrate>(old_pallet_name: N, new_ } /// Some checks for after migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::post_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn post_migrate>(old_pallet_name: N, new_pallet_name: N) { diff --git a/substrate/frame/merkle-mountain-range/Cargo.toml b/substrate/frame/merkle-mountain-range/Cargo.toml index 2c30af43b67de254a6b377fded141fee5a608049..eaa17d88e9959ee8e51f42f55b46adc45187b04b 100644 --- a/substrate/frame/merkle-mountain-range/Cargo.toml +++ b/substrate/frame/merkle-mountain-range/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME Merkle Mountain Range pallet." +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,14 +18,14 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-mmr-primitives = { path = "../../primitives/merkle-mountain-range", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-mmr-primitives = { path = "../../primitives/merkle-mountain-range", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] array-bytes = "6.1" @@ -30,7 +33,7 @@ env_logger = "0.9" itertools = "0.10.3" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index 48304cd882c19e53eca51f6b2821657672f150f6..b5162d70ccb594950a31c89ee3816991630fcc7f 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -8,22 +8,26 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME pallet to queue and process messages" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true, features = ["derive"] } +serde = { version = "1.0.193", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } +environmental = { version = "1.1.4", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-weights = { path = "../../primitives/weights", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-weights = { path = "../../primitives/weights", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } [dev-dependencies] sp-tracing = { path = "../../primitives/tracing" } @@ -31,9 +35,10 @@ rand = "0.8.5" rand_distr = "0.4.3" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", + "environmental/std", "frame-benchmarking?/std", "frame-support/std", "frame-system/std", diff --git a/substrate/frame/message-queue/src/benchmarking.rs b/substrate/frame/message-queue/src/benchmarking.rs index eedaaebeca944bd8dfd1778fea82ad7504111d03..7e99bc058584504dfea69ebcd3463657e4379d83 100644 --- a/substrate/frame/message-queue/src/benchmarking.rs +++ b/substrate/frame/message-queue/src/benchmarking.rs @@ -25,6 +25,7 @@ use super::{mock_helpers::*, Pallet as MessageQueue, *}; use frame_benchmarking::v2::*; use frame_support::traits::Get; use frame_system::RawOrigin; +use sp_io::hashing::blake2_256; use sp_std::prelude::*; #[benchmarks( @@ -142,7 +143,7 @@ mod benchmarks { // Check that it was processed. assert_last_event::( Event::Processed { - id: sp_io::hashing::blake2_256(&msg), + id: blake2_256(&msg).into(), origin: 0.into(), weight_used: 1.into_weight(), success: true, @@ -227,7 +228,7 @@ mod benchmarks { assert_last_event::( Event::Processed { - id: sp_io::hashing::blake2_256(&((msgs - 1) as u32).encode()), + id: blake2_256(&((msgs - 1) as u32).encode()).into(), origin: 0.into(), weight_used: Weight::from_parts(1, 1), success: true, @@ -264,7 +265,7 @@ mod benchmarks { assert_last_event::( Event::Processed { - id: sp_io::hashing::blake2_256(&((msgs - 1) as u32).encode()), + id: blake2_256(&((msgs - 1) as u32).encode()).into(), origin: 0.into(), weight_used: Weight::from_parts(1, 1), success: true, diff --git a/substrate/frame/message-queue/src/integration_test.rs b/substrate/frame/message-queue/src/integration_test.rs index 965b96a99ca522b11336b9469b6cfde04787ad8f..fee5d24213538eca9b44ed4fae516ca82ec4d445 100644 --- a/substrate/frame/message-queue/src/integration_test.rs +++ b/substrate/frame/message-queue/src/integration_test.rs @@ -29,8 +29,8 @@ use crate::{ mock::{ - build_and_execute, CountingMessageProcessor, IntoWeight, MockedWeightInfo, - NumMessagesProcessed, YieldingQueues, + build_and_execute, gen_seed, Callback, CountingMessageProcessor, IntoWeight, + MessagesProcessed, MockedWeightInfo, NumMessagesProcessed, YieldingQueues, }, mock_helpers::MessageOrigin, *, @@ -38,7 +38,7 @@ use crate::{ use crate as pallet_message_queue; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use rand::{rngs::StdRng, Rng, SeedableRng}; @@ -57,6 +57,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -119,13 +120,13 @@ impl Config for Test { /// Processing all remaining 28639 messages /// ``` #[test] -#[ignore] // Only run in the CI. +#[ignore] // Only run in the CI, otherwise its too slow. fn stress_test_enqueue_and_service() { let blocks = 20; let max_queues = 10_000; let max_messages_per_queue = 10_000; let max_msg_len = MaxMessageLenOf::::get(); - let mut rng = StdRng::seed_from_u64(43); + let mut rng = StdRng::seed_from_u64(gen_seed()); build_and_execute::(|| { let mut msgs_remaining = 0; @@ -147,6 +148,74 @@ fn stress_test_enqueue_and_service() { }); } +/// Very similar to `stress_test_enqueue_and_service`, but enqueues messages while processing them. +#[test] +#[ignore] // Only run in the CI, otherwise its too slow. +fn stress_test_recursive() { + let blocks = 20; + let mut rng = StdRng::seed_from_u64(gen_seed()); + + // We need to use thread-locals since the callback cannot capture anything. + parameter_types! { + pub static TotalEnqueued: u32 = 0; + pub static Enqueued: u32 = 0; + pub static Called: u32 = 0; + } + + Called::take(); + Enqueued::take(); + TotalEnqueued::take(); + + Callback::set(Box::new(|_, _| { + let mut rng = StdRng::seed_from_u64(Enqueued::get() as u64); + let max_queues = 1_000; + let max_messages_per_queue = 1_000; + let max_msg_len = MaxMessageLenOf::::get(); + + // Instead of directly enqueueing, we enqueue inside a `service` call. + let enqueued = enqueue_messages(max_queues, max_messages_per_queue, max_msg_len, &mut rng); + TotalEnqueued::set(TotalEnqueued::get() + enqueued); + Enqueued::set(Enqueued::get() + enqueued); + Called::set(Called::get() + 1); + })); + + build_and_execute::(|| { + let mut msgs_remaining = 0; + for b in 0..blocks { + log::info!("Block #{}", b); + MessageQueue::enqueue_message( + BoundedSlice::defensive_truncate_from(format!("callback={b}").as_bytes()), + b.into(), + ); + + msgs_remaining += Enqueued::take() + 1; + // Pick a fraction of all messages currently in queue and process them. + let processed = rng.gen_range(1..=msgs_remaining); + log::info!("Processing {} of all messages {}", processed, msgs_remaining); + process_some_messages(processed); // This also advances the block. + msgs_remaining -= processed; + TotalEnqueued::set(TotalEnqueued::get() - processed + 1); + MessageQueue::do_try_state().unwrap(); + } + while Called::get() < blocks { + msgs_remaining += Enqueued::take(); + // Pick a fraction of all messages currently in queue and process them. + let processed = rng.gen_range(1..=msgs_remaining); + log::info!("Processing {} of all messages {}", processed, msgs_remaining); + process_some_messages(processed); // This also advances the block. + msgs_remaining -= processed; + TotalEnqueued::set(TotalEnqueued::get() - processed); + MessageQueue::do_try_state().unwrap(); + } + + let msgs_remaining = TotalEnqueued::take(); + log::info!("Processing all remaining {} messages", msgs_remaining); + process_all_messages(msgs_remaining); + assert_eq!(Called::get(), blocks); + post_conditions(); + }); +} + /// Simulates heavy usage of the suspension logic via `Yield`. /// /// # Example output @@ -163,14 +232,14 @@ fn stress_test_enqueue_and_service() { /// Processing all remaining 430 messages /// ``` #[test] -#[ignore] // Only run in the CI. +#[ignore] // Only run in the CI, otherwise its too slow. fn stress_test_queue_suspension() { let blocks = 20; let max_queues = 10_000; let max_messages_per_queue = 10_000; let (max_suspend_per_block, max_resume_per_block) = (100, 50); let max_msg_len = MaxMessageLenOf::::get(); - let mut rng = StdRng::seed_from_u64(43); + let mut rng = StdRng::seed_from_u64(gen_seed()); build_and_execute::(|| { let mut suspended = BTreeSet::::new(); @@ -299,6 +368,7 @@ fn process_all_messages(expected: u32) { assert_eq!(consumed, Weight::from_all(expected as u64)); assert_eq!(NumMessagesProcessed::take(), expected as usize); + MessagesProcessed::take(); } /// Returns the weight consumed by `MessageQueue::on_initialize()`. @@ -326,5 +396,6 @@ fn post_conditions() { assert!(ServiceHead::::get().is_none()); // This still works fine. assert_eq!(MessageQueue::service_queues(Weight::MAX), Weight::zero(), "Nothing left"); + MessageQueue::do_try_state().unwrap(); next_block(); } diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index 12d289478b37c5a0eb81aeb5f673a6d34d8dcf4f..07eb0041985342522a113339769b98f834ef4a17 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -49,9 +49,21 @@ //! **Message Execution** //! //! Executing a message is offloaded to the [`Config::MessageProcessor`] which contains the actual -//! logic of how to handle the message since they are blobs. A message can be temporarily or -//! permanently overweight. The pallet will perpetually try to execute a temporarily overweight -//! message. A permanently overweight message is skipped and must be executed manually. +//! logic of how to handle the message since they are blobs. Storage changes are not rolled back on +//! error. +//! +//! A failed message can be temporarily or permanently overweight. The pallet will perpetually try +//! to execute a temporarily overweight message. A permanently overweight message is skipped and +//! must be executed manually. +//! +//! **Reentrancy** +//! +//! This pallet has two entry points for executing (possibly recursive) logic; +//! [`Pallet::service_queues`] and [`Pallet::execute_overweight`]. Both entry points are guarded by +//! the same mutex to error on reentrancy. The only functions that are explicitly **allowed** to be +//! called by a message processor are: [`Pallet::enqueue_message`] and +//! [`Pallet::enqueue_messages`]. All other functions are forbidden and error with +//! [`Error::RecursiveDisallowed`]. //! //! **Pagination** //! @@ -146,6 +158,7 @@ //! which is the default state for a message after being enqueued. //! - `knitting`/`unknitting`: The means of adding or removing a `Queue` from the `ReadyRing`. //! - `MEL`: The Max Encoded Length of a type, see [`codec::MaxEncodedLen`]. +//! - `Reentrance`: To enter an execution context again before it has completed. //! //! # Properties //! @@ -180,6 +193,7 @@ //! expensive. Currently this is archived by having one queue per para-chain/thread, which keeps the //! number of queues within `O(n)` and should be "good enough". +#![deny(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; @@ -194,8 +208,8 @@ use frame_support::{ defensive, pallet_prelude::*, traits::{ - DefensiveTruncateFrom, EnqueueMessage, ExecuteOverweightError, Footprint, ProcessMessage, - ProcessMessageError, QueueFootprint, QueuePausedQuery, ServiceQueues, + Defensive, DefensiveTruncateFrom, EnqueueMessage, ExecuteOverweightError, Footprint, + ProcessMessage, ProcessMessageError, QueueFootprint, QueuePausedQuery, ServiceQueues, }, BoundedSlice, CloneNoBound, DefaultNoBound, }; @@ -203,6 +217,7 @@ use frame_system::pallet_prelude::*; pub use pallet::*; use scale_info::TypeInfo; use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; +use sp_core::{defer, H256}; use sp_runtime::{ traits::{One, Zero}, SaturatedConversion, Saturating, @@ -460,6 +475,10 @@ pub mod pallet { /// Processor for a message. /// + /// Storage changes are not rolled back on error. + /// + /// # Benchmarking + /// /// Must be set to [`mock_helpers::NoopMessageProcessor`] for benchmarking. /// Other message processors that consumes exactly (1, 1) weight for any give message will /// work as well. Otherwise the benchmarking will also measure the weight of the message @@ -516,18 +535,51 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Message discarded due to an error in the `MessageProcessor` (usually a format error). - ProcessingFailed { id: [u8; 32], origin: MessageOriginOf, error: ProcessMessageError }, + ProcessingFailed { + /// The `blake2_256` hash of the message. + id: H256, + /// The queue of the message. + origin: MessageOriginOf, + /// The error that occurred. + /// + /// This error is pretty opaque. More fine-grained errors need to be emitted as events + /// by the `MessageProcessor`. + error: ProcessMessageError, + }, /// Message is processed. - Processed { id: [u8; 32], origin: MessageOriginOf, weight_used: Weight, success: bool }, + Processed { + /// The `blake2_256` hash of the message. + id: H256, + /// The queue of the message. + origin: MessageOriginOf, + /// How much weight was used to process the message. + weight_used: Weight, + /// Whether the message was processed. + /// + /// Note that this does not mean that the underlying `MessageProcessor` was internally + /// successful. It *solely* means that the MQ pallet will treat this as a success + /// condition and discard the message. Any internal error needs to be emitted as events + /// by the `MessageProcessor`. + success: bool, + }, /// Message placed in overweight queue. OverweightEnqueued { + /// The `blake2_256` hash of the message. id: [u8; 32], + /// The queue of the message. origin: MessageOriginOf, + /// The page of the message. page_index: PageIndex, + /// The index of the message within the page. message_index: T::Size, }, /// This page was reaped. - PageReaped { origin: MessageOriginOf, index: PageIndex }, + PageReaped { + /// The queue of the page. + origin: MessageOriginOf, + /// The index of the page. + index: PageIndex, + }, } #[pallet::error] @@ -554,6 +606,8 @@ pub mod pallet { /// /// This can change at any time and may resolve in the future by re-trying. QueuePaused, + /// Another call is in progress and needs to finish before this call can happen. + RecursiveDisallowed, } /// The index of the first and last (non-empty) pages. @@ -868,6 +922,21 @@ impl Pallet { page_index: PageIndex, index: T::Size, weight_limit: Weight, + ) -> Result> { + match with_service_mutex(|| { + Self::do_execute_overweight_inner(origin, page_index, index, weight_limit) + }) { + Err(()) => Err(Error::::RecursiveDisallowed), + Ok(x) => x, + } + } + + /// Same as `do_execute_overweight` but must be called while holding the `service_mutex`. + fn do_execute_overweight_inner( + origin: MessageOriginOf, + page_index: PageIndex, + index: T::Size, + weight_limit: Weight, ) -> Result> { let mut book_state = BookStateFor::::get(&origin); ensure!(!T::QueuePausedQuery::is_paused(&origin), Error::::QueuePaused); @@ -924,6 +993,14 @@ impl Pallet { /// Remove a stale page or one which has no more messages remaining to be processed. fn do_reap_page(origin: &MessageOriginOf, page_index: PageIndex) -> DispatchResult { + match with_service_mutex(|| Self::do_reap_page_inner(origin, page_index)) { + Err(()) => Err(Error::::RecursiveDisallowed.into()), + Ok(x) => x, + } + } + + /// Same as `do_reap_page` but must be called while holding the `service_mutex`. + fn do_reap_page_inner(origin: &MessageOriginOf, page_index: PageIndex) -> DispatchResult { let mut book_state = BookStateFor::::get(origin); // definitely not reapable if the page's index is no less than the `begin`ning of ready // pages. @@ -1112,6 +1189,7 @@ impl Pallet { weight: &mut WeightMeter, overweight_limit: Weight, ) -> ItemExecutionStatus { + use MessageExecutionStatus::*; // This ugly pre-checking is needed for the invariant // "we never bail if a page became complete". if page.is_complete() { @@ -1125,16 +1203,31 @@ impl Pallet { Some(m) => m, None => return ItemExecutionStatus::NoItem, }[..]; + let payload_len = payload.len() as u64; - use MessageExecutionStatus::*; - let is_processed = match Self::process_message_payload( + // Store these for the case that `process_message_payload` is recursive. + Pages::::insert(origin, page_index, &*page); + BookStateFor::::insert(origin, &*book_state); + + let res = Self::process_message_payload( origin.clone(), page_index, page.first_index, payload, weight, overweight_limit, - ) { + ); + + // And restore them afterwards to see the changes of a recursive call. + *book_state = BookStateFor::::get(origin); + if let Some(new_page) = Pages::::get(origin, page_index) { + *page = new_page; + } else { + defensive!("page must exist since we just inserted it and recursive calls are not allowed to remove anything"); + return ItemExecutionStatus::NoItem + }; + + let is_processed = match res { InsufficientWeight => return ItemExecutionStatus::Bailed, Unprocessable { permanent: false } => return ItemExecutionStatus::NoProgress, Processed | Unprocessable { permanent: true } => true, @@ -1143,7 +1236,7 @@ impl Pallet { if is_processed { book_state.message_count.saturating_dec(); - book_state.size.saturating_reduce(payload.len() as u64); + book_state.size.saturating_reduce(payload_len as u64); } page.skip_first(is_processed); ItemExecutionStatus::Executed(is_processed) @@ -1168,7 +1261,7 @@ impl Pallet { /// * `remaining_size` > 0 /// * `first` <= `last` /// * Every page can be decoded into peek_* functions - #[cfg(any(test, feature = "try-runtime"))] + #[cfg(any(test, feature = "try-runtime", feature = "std"))] pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { // Checking memory corruption for BookStateFor ensure!( @@ -1181,13 +1274,17 @@ impl Pallet { "Memory Corruption in Pages" ); - // No state to check - if ServiceHead::::get().is_none() { - return Ok(()) + // Basic checks for each book + for book in BookStateFor::::iter_values() { + ensure!(book.end >= book.begin, "Invariant"); + ensure!(book.end < 1 << 30, "Likely overflow or corruption"); + ensure!(book.message_count < 1 << 30, "Likely overflow or corruption"); + ensure!(book.size < 1 << 30, "Likely overflow or corruption"); + ensure!(book.count < 1 << 30, "Likely overflow or corruption"); } //loop around this origin - let starting_origin = ServiceHead::::get().unwrap(); + let Some(starting_origin) = ServiceHead::::get() else { return Ok(()) }; while let Some(head) = Self::bump_service_head(&mut WeightMeter::new()) { ensure!( @@ -1220,7 +1317,7 @@ impl Pallet { for page_index in head_book_state.begin..head_book_state.end { let page = Pages::::get(&head, page_index).unwrap(); let remaining_messages = page.remaining; - let mut counted_remaining_messages = 0; + let mut counted_remaining_messages: u32 = 0; ensure!( remaining_messages > 0.into(), "These must be some messages that have not been processed yet!" @@ -1237,7 +1334,7 @@ impl Pallet { } ensure!( - remaining_messages == counted_remaining_messages.into(), + remaining_messages.into() == counted_remaining_messages, "Memory Corruption" ); } @@ -1312,10 +1409,9 @@ impl Pallet { meter: &mut WeightMeter, overweight_limit: Weight, ) -> MessageExecutionStatus { - let hash = sp_io::hashing::blake2_256(message); + let mut id = sp_io::hashing::blake2_256(message); use ProcessMessageError::*; let prev_consumed = meter.consumed(); - let mut id = hash; match T::MessageProcessor::process_message(message, origin.clone(), meter, &mut id) { Err(Overweight(w)) if w.any_gt(overweight_limit) => { @@ -1339,19 +1435,44 @@ impl Pallet { }, Err(error @ BadFormat | error @ Corrupt | error @ Unsupported) => { // Permanent error - drop - Self::deposit_event(Event::::ProcessingFailed { id, origin, error }); + Self::deposit_event(Event::::ProcessingFailed { id: id.into(), origin, error }); MessageExecutionStatus::Unprocessable { permanent: true } }, Ok(success) => { // Success let weight_used = meter.consumed().saturating_sub(prev_consumed); - Self::deposit_event(Event::::Processed { id, origin, weight_used, success }); + Self::deposit_event(Event::::Processed { + id: id.into(), + origin, + weight_used, + success, + }); MessageExecutionStatus::Processed }, } } } +/// Run a closure that errors on re-entrance. Meant to be used by anything that services queues. +pub(crate) fn with_service_mutex R, R>(f: F) -> Result { + // Holds the singelton token instance. + environmental::environmental!(token: Option<()>); + + token::using_once(&mut Some(()), || { + // The first `ok_or` should always be `Ok` since we are inside a `using_once`. + let hold = token::with(|t| t.take()).ok_or(()).defensive()?.ok_or(())?; + + // Put the token back when we're done. + defer! { + token::with(|t| { + *t = Some(hold); + }); + } + + Ok(f()) + }) +} + /// Provides a [`sp_core::Get`] to access the `MEL` of a [`codec::MaxEncodedLen`] type. pub struct MaxEncodedLenOf(sp_std::marker::PhantomData); impl Get for MaxEncodedLenOf { @@ -1407,35 +1528,40 @@ impl ServiceQueues for Pallet { Weight::zero() }); - let mut next = match Self::bump_service_head(&mut weight) { - Some(h) => h, - None => return weight.consumed(), - }; - // The last queue that did not make any progress. - // The loop aborts as soon as it arrives at this queue again without making any progress - // on other queues in between. - let mut last_no_progress = None; - - loop { - let (progressed, n) = Self::service_queue(next.clone(), &mut weight, max_weight); - next = match n { - Some(n) => - if !progressed { - if last_no_progress == Some(n.clone()) { - break - } - if last_no_progress.is_none() { - last_no_progress = Some(next.clone()) - } - n - } else { - last_no_progress = None; - n - }, - None => break, + match with_service_mutex(|| { + let mut next = match Self::bump_service_head(&mut weight) { + Some(h) => h, + None => return weight.consumed(), + }; + // The last queue that did not make any progress. + // The loop aborts as soon as it arrives at this queue again without making any progress + // on other queues in between. + let mut last_no_progress = None; + + loop { + let (progressed, n) = Self::service_queue(next.clone(), &mut weight, max_weight); + next = match n { + Some(n) => + if !progressed { + if last_no_progress == Some(n.clone()) { + break + } + if last_no_progress.is_none() { + last_no_progress = Some(next.clone()) + } + n + } else { + last_no_progress = None; + n + }, + None => break, + } } + weight.consumed() + }) { + Err(()) => weight.consumed(), + Ok(w) => w, } - weight.consumed() } /// Execute a single overweight message. @@ -1463,6 +1589,7 @@ impl ServiceQueues for Pallet { Error::::QueuePaused => ExecuteOverweightError::QueuePaused, Error::::NoPage | Error::::NoMessage | Error::::Queued => ExecuteOverweightError::NotFound, + Error::::RecursiveDisallowed => ExecuteOverweightError::RecursiveDisallowed, _ => ExecuteOverweightError::Other, }, ) diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index 55a6457435423f541b8f53c4cf003dadfd67bacf..89c6e8625109697556f6a1057f1238d2f1d99957 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -24,7 +24,7 @@ use super::*; use crate as pallet_message_queue; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use sp_core::H256; @@ -43,6 +43,8 @@ frame_support::construct_runtime!( MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event}, } ); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -106,7 +108,10 @@ impl MockedWeightInfo { impl crate::weights::WeightInfo for MockedWeightInfo { fn reap_page() -> Weight { - WeightForCall::get().get("reap_page").copied().unwrap_or_default() + WeightForCall::get() + .get("reap_page") + .copied() + .unwrap_or(DefaultWeightForCall::get()) } fn execute_overweight_page_updated() -> Weight { WeightForCall::get() @@ -205,6 +210,10 @@ impl ProcessMessage for RecordingMessageProcessor { let required = Weight::from_parts(weight, weight); if meter.try_consume(required).is_ok() { + if let Some(p) = message.strip_prefix(&b"callback="[..]) { + let s = String::from_utf8(p.to_vec()).expect("Need valid UTF8"); + Callback::get()(&origin, s.parse().expect("Expected an u32")); + } let mut m = MessagesProcessed::get(); m.push((message.to_vec(), origin)); MessagesProcessed::set(m); @@ -215,6 +224,10 @@ impl ProcessMessage for RecordingMessageProcessor { } } +parameter_types! { + pub static Callback: Box = Box::new(|_, _| {}); +} + /// Processed a mocked message. Messages that end with `badformat`, `corrupt`, `unsupported` or /// `yield` will fail with an error respectively. fn processing_message(msg: &[u8], origin: &MessageOrigin) -> Result<(), ProcessMessageError> { @@ -262,6 +275,10 @@ impl ProcessMessage for CountingMessageProcessor { let required = Weight::from_parts(1, 1); if meter.try_consume(required).is_ok() { + if let Some(p) = message.strip_prefix(&b"callback="[..]) { + let s = String::from_utf8(p.to_vec()).expect("Need valid UTF8"); + Callback::get()(&origin, s.parse().expect("Expected an u32")); + } NumMessagesProcessed::set(NumMessagesProcessed::get() + 1); Ok(true) } else { @@ -370,3 +387,16 @@ pub fn num_overweight_enqueued_events() -> u32 { pub fn fp(pages: u32, count: u64, size: u64) -> QueueFootprint { QueueFootprint { storage: Footprint { count, size }, pages } } + +/// A random seed that can be overwritten with `MQ_SEED`. +pub fn gen_seed() -> u64 { + use rand::Rng; + let seed = if let Ok(seed) = std::env::var("MQ_SEED") { + seed.parse().expect("Need valid u64 as MQ_SEED env variable") + } else { + rand::thread_rng().gen::() + }; + + println!("Using seed: {}", seed); + seed +} diff --git a/substrate/frame/message-queue/src/mock_helpers.rs b/substrate/frame/message-queue/src/mock_helpers.rs index f6109c127be123c427e075d78bf80fbe04baada3..28395e27cdd2aaffcda2650fbc0988f764b03f92 100644 --- a/substrate/frame/message-queue/src/mock_helpers.rs +++ b/substrate/frame/message-queue/src/mock_helpers.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#![allow(missing_docs)] + //! Std setup helpers for testing and benchmarking. //! //! Cannot be put into mock.rs since benchmarks require no-std and mock.rs is std. @@ -88,10 +90,12 @@ pub fn page(msg: &[u8]) -> PageOf { PageOf::::from_message::(msg.try_into().unwrap()) } +/// Create a book with a single message of one byte. pub fn single_page_book() -> BookStateOf { BookState { begin: 0, end: 1, count: 1, message_count: 1, size: 1, ..Default::default() } } +/// Create an empty book. pub fn empty_book() -> BookStateOf { BookState { begin: 0, end: 1, count: 1, ..Default::default() } } diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index d94ad581ea0d53a4ee445df25b806dd3ac3417fb..9198e65e2f9c0a5c2ee3bac97a63c1dd844be63f 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -181,7 +181,7 @@ fn service_queues_failing_messages_works() { assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_last_event::( Event::ProcessingFailed { - id: blake2_256(b"badformat"), + id: blake2_256(b"badformat").into(), origin: MessageOrigin::Here, error: ProcessMessageError::BadFormat, } @@ -190,7 +190,7 @@ fn service_queues_failing_messages_works() { assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_last_event::( Event::ProcessingFailed { - id: blake2_256(b"corrupt"), + id: blake2_256(b"corrupt").into(), origin: MessageOrigin::Here, error: ProcessMessageError::Corrupt, } @@ -199,7 +199,7 @@ fn service_queues_failing_messages_works() { assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_last_event::( Event::ProcessingFailed { - id: blake2_256(b"unsupported"), + id: blake2_256(b"unsupported").into(), origin: MessageOrigin::Here, error: ProcessMessageError::Unsupported, } @@ -1264,7 +1264,7 @@ fn permanently_overweight_limit_is_valid_basic() { RuntimeEvent::MessageQueue(Event::Processed { origin: Here, weight_used: 200.into(), - id: blake2_256(m.as_bytes()), + id: blake2_256(m.as_bytes()).into(), success: true, }) ); @@ -1321,7 +1321,7 @@ fn permanently_overweight_limit_is_valid_fuzzy() { RuntimeEvent::MessageQueue(Event::Processed { origin: Here, weight_used: 200.into(), - id: blake2_256(m.as_bytes()), + id: blake2_256(m.as_bytes()).into(), success: true, }) ); @@ -1592,7 +1592,7 @@ fn execute_overweight_respects_suspension() { assert_last_event::( Event::Processed { - id: blake2_256(b"weight=5"), + id: blake2_256(b"weight=5").into(), origin, weight_used: 5.into_weight(), success: true, @@ -1619,7 +1619,7 @@ fn service_queue_suspension_ready_ring_works() { MessageQueue::service_queues(Weight::MAX); assert_last_event::( Event::Processed { - id: blake2_256(b"weight=5"), + id: blake2_256(b"weight=5").into(), origin, weight_used: 5.into_weight(), success: true, @@ -1662,3 +1662,174 @@ fn integrity_test_checks_service_weight() { } }); } + +/// Test for . +#[test] +fn regression_issue_2319() { + build_and_execute::(|| { + Callback::set(Box::new(|_, _| { + MessageQueue::enqueue_message(mock_helpers::msg("anothermessage"), There); + })); + + use MessageOrigin::*; + MessageQueue::enqueue_message(msg("callback=0"), Here); + + // while servicing queue Here, "anothermessage" of origin There is enqueued in + // "firstmessage"'s process_message + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(b"callback=0".to_vec(), Here)]); + + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + // It used to fail here but got fixed. + assert_eq!(MessagesProcessed::take(), vec![(b"anothermessage".to_vec(), There)]); + }); +} + +/// Enqueueing a message from within `service_queues` works. +#[test] +fn recursive_enqueue_works() { + build_and_execute::(|| { + Callback::set(Box::new(|o, i| match i { + 0 => { + MessageQueue::enqueue_message(msg(&format!("callback={}", 1)), *o); + }, + 1 => { + for _ in 0..100 { + MessageQueue::enqueue_message(msg(&format!("callback={}", 2)), *o); + } + for i in 0..100 { + MessageQueue::enqueue_message(msg(&format!("callback={}", 3)), i.into()); + } + }, + 2 | 3 => { + MessageQueue::enqueue_message(msg(&format!("callback={}", 4)), *o); + }, + 4 => (), + _ => unreachable!(), + })); + + MessageQueue::enqueue_message(msg("callback=0"), MessageOrigin::Here); + + for _ in 0..402 { + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + } + assert_eq!(MessageQueue::service_queues(Weight::MAX), Weight::zero()); + + assert_eq!(MessagesProcessed::take().len(), 402); + }); +} + +/// Calling `service_queues` from within `service_queues` is forbidden. +#[test] +fn recursive_service_is_forbidden() { + use MessageOrigin::*; + build_and_execute::(|| { + Callback::set(Box::new(|_, _| { + MessageQueue::enqueue_message(msg("m1"), There); + // This call will fail since it is recursive. But it will not mess up the state. + assert_storage_noop!(MessageQueue::service_queues(10.into_weight())); + MessageQueue::enqueue_message(msg("m2"), There); + })); + + for _ in 0..5 { + MessageQueue::enqueue_message(msg("callback=0"), Here); + MessageQueue::service_queues(3.into_weight()); + + // All three messages are correctly processed. + assert_eq!( + MessagesProcessed::take(), + vec![ + (b"callback=0".to_vec(), Here), + (b"m1".to_vec(), There), + (b"m2".to_vec(), There) + ] + ); + } + }); +} + +/// Calling `service_queues` from within `service_queues` is forbidden. +#[test] +fn recursive_overweight_while_service_is_forbidden() { + use MessageOrigin::*; + build_and_execute::(|| { + Callback::set(Box::new(|_, _| { + // Check that the message was permanently overweight. + assert_last_event::( + Event::OverweightEnqueued { + id: blake2_256(b"weight=10"), + origin: There, + message_index: 0, + page_index: 0, + } + .into(), + ); + // This call will fail since it is recursive. But it will not mess up the state. + assert_noop!( + ::execute_overweight( + 10.into_weight(), + (There, 0, 0) + ), + ExecuteOverweightError::RecursiveDisallowed + ); + })); + + MessageQueue::enqueue_message(msg("weight=10"), There); + MessageQueue::enqueue_message(msg("callback=0"), Here); + + // Mark it as permanently overweight. + MessageQueue::service_queues(5.into_weight()); + assert_ok!(::execute_overweight( + 10.into_weight(), + (There, 0, 0) + )); + }); +} + +/// Calling `reap_page` from within `service_queues` is forbidden. +#[test] +fn recursive_reap_page_is_forbidden() { + use MessageOrigin::*; + build_and_execute::(|| { + Callback::set(Box::new(|_, _| { + // This call will fail since it is recursive. But it will not mess up the state. + assert_noop!(MessageQueue::do_reap_page(&Here, 0), Error::::RecursiveDisallowed); + })); + + // Create 10 pages more than the stale limit. + let n = (MaxStale::get() + 10) as usize; + for _ in 0..n { + MessageQueue::enqueue_message(msg("weight=2"), Here); + } + + // Mark all pages as stale since their message is permanently overweight. + MessageQueue::service_queues(1.into_weight()); + assert_ok!(MessageQueue::do_reap_page(&Here, 0)); + + assert_last_event::(Event::PageReaped { origin: Here, index: 0 }.into()); + }); +} + +#[test] +fn with_service_mutex_works() { + let mut called = 0; + with_service_mutex(|| called = 1).unwrap(); + assert_eq!(called, 1); + + // The outer one is fine but the inner one errors. + with_service_mutex(|| with_service_mutex(|| unreachable!())) + .unwrap() + .unwrap_err(); + with_service_mutex(|| with_service_mutex(|| unreachable!()).unwrap_err()).unwrap(); + with_service_mutex(|| { + with_service_mutex(|| unreachable!()).unwrap_err(); + with_service_mutex(|| unreachable!()).unwrap_err(); + called = 2; + }) + .unwrap(); + assert_eq!(called, 2); + + // Still works. + with_service_mutex(|| called = 3).unwrap(); + assert_eq!(called, 3); +} diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index 665c606fc374cce9efe2849d58da058b241b4ad5..949003864a2c4ff05712d2f9ec7b3e4a9ca02b7e 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -19,7 +22,7 @@ frame-support = { default-features = false, path = "../support" } frame-system = { default-features = false, path = "../system" } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive"] } +serde = { version = "1.0.193", default-features = false, features = ["derive"] } sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto" } sp-arithmetic = { default-features = false, path = "../../primitives/arithmetic" } sp-io = { default-features = false, path = "../../primitives/io" } @@ -28,7 +31,7 @@ sp-runtime = { default-features = false, path = "../../primitives/runtime" } sp-std = { default-features = false, path = "../../primitives/std" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/multisig/Cargo.toml b/substrate/frame/multisig/Cargo.toml index a2ee608c33cd03916d9993451737e3ef20381c57..40b0f4973a8db275cfc964fe2083e84dd0b76569 100644 --- a/substrate/frame/multisig/Cargo.toml +++ b/substrate/frame/multisig/Cargo.toml @@ -9,18 +9,21 @@ repository.workspace = true description = "FRAME multi-signature dispatch pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } # third party log = { version = "0.4.17", default-features = false } @@ -29,7 +32,7 @@ log = { version = "0.4.17", default-features = false } pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml index a2cb9a4aec9ad822fc63efafad029dc2fbd17b7d..355bb2a5d3e7ffe65f40c54c01457f29929cea0a 100644 --- a/substrate/frame/nft-fractionalization/Cargo.toml +++ b/substrate/frame/nft-fractionalization/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet to convert non-fungible to fungible tokens." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,13 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-assets = { path = "../assets", default-features = false} -pallet-nfts = { path = "../nfts", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-assets = { path = "../assets", default-features = false } +pallet-nfts = { path = "../nfts", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -31,7 +34,7 @@ sp-io = { path = "../../primitives/io" } sp-std = { path = "../../primitives/std" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/nft-fractionalization/src/mock.rs b/substrate/frame/nft-fractionalization/src/mock.rs index 987c65a8954f60d12c3669b204a2bdc32ca4227b..855109adcbee08d5e7cfc6897370be85582c73e9 100644 --- a/substrate/frame/nft-fractionalization/src/mock.rs +++ b/substrate/frame/nft-fractionalization/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_nft_fractionalization; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, BoundedVec, PalletId, }; @@ -49,6 +49,8 @@ construct_runtime!( Nfts: pallet_nfts, } ); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/nfts/Cargo.toml b/substrate/frame/nfts/Cargo.toml index 2a3b2921c75f539b768dc38d704bcdea9cd953b2..0d3f542c55266c0bf68333119d533860f840e945 100644 --- a/substrate/frame/nfts/Cargo.toml +++ b/substrate/frame/nfts/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME NFTs pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,20 +20,20 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = enumflags2 = { version = "0.7.7" } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-keystore = { path = "../../primitives/keystore" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "enumflags2/std", diff --git a/substrate/frame/nfts/runtime-api/Cargo.toml b/substrate/frame/nfts/runtime-api/Cargo.toml index 483c4bd323423e02bf538f670d045e0ed98391d8..8eb6726552bb6c3142a375401dac9473746aa4a9 100644 --- a/substrate/frame/nfts/runtime-api/Cargo.toml +++ b/substrate/frame/nfts/runtime-api/Cargo.toml @@ -9,14 +9,18 @@ repository.workspace = true description = "Runtime API for the FRAME NFTs pallet." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -pallet-nfts = { path = "..", default-features = false} -sp-api = { path = "../../../primitives/api", default-features = false} +pallet-nfts = { path = "..", default-features = false } +sp-api = { path = "../../../primitives/api", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "pallet-nfts/std", "sp-api/std" ] +default = ["std"] +std = ["codec/std", "pallet-nfts/std", "sp-api/std", "sp-std/std"] diff --git a/substrate/frame/nfts/runtime-api/src/lib.rs b/substrate/frame/nfts/runtime-api/src/lib.rs index 77535c64069ccbae8f62c6a556e2de8a71d39968..816088f1b716adb25a83455ee35f0af8e961811f 100644 --- a/substrate/frame/nfts/runtime-api/src/lib.rs +++ b/substrate/frame/nfts/runtime-api/src/lib.rs @@ -20,7 +20,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::{Decode, Encode}; -use sp_api::vec::Vec; +use sp_std::vec::Vec; sp_api::decl_runtime_apis! { pub trait NftsApi diff --git a/substrate/frame/nfts/src/features/create_delete_collection.rs b/substrate/frame/nfts/src/features/create_delete_collection.rs index e343ad18e504f68991bbbac2df1ffa077e836222..f03df7fdd4f089509c87374164b5e1dc0d31e093 100644 --- a/substrate/frame/nfts/src/features/create_delete_collection.rs +++ b/substrate/frame/nfts/src/features/create_delete_collection.rs @@ -66,7 +66,13 @@ impl, I: 'static> Pallet { CollectionConfigOf::::insert(&collection, config); CollectionAccount::::insert(&owner, &collection, ()); + Self::deposit_event(event); + + if let Some(max_supply) = config.max_supply { + Self::deposit_event(Event::CollectionMaxSupplySet { collection, max_supply }); + } + Ok(()) } diff --git a/substrate/frame/nfts/src/features/transfer.rs b/substrate/frame/nfts/src/features/transfer.rs index 0471bd67b29164358069d27c2239a2010795b7d0..bba834483e15f64f0c26b3fb7d1ff230ed6db45a 100644 --- a/substrate/frame/nfts/src/features/transfer.rs +++ b/substrate/frame/nfts/src/features/transfer.rs @@ -124,10 +124,10 @@ impl, I: 'static> Pallet { pub(crate) fn do_transfer_ownership( origin: T::AccountId, collection: T::CollectionId, - owner: T::AccountId, + new_owner: T::AccountId, ) -> DispatchResult { // Check if the new owner is acceptable based on the collection's acceptance settings. - let acceptable_collection = OwnershipAcceptance::::get(&owner); + let acceptable_collection = OwnershipAcceptance::::get(&new_owner); ensure!(acceptable_collection.as_ref() == Some(&collection), Error::::Unaccepted); // Try to retrieve and mutate the collection details. @@ -135,27 +135,28 @@ impl, I: 'static> Pallet { let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; // Check if the `origin` is the current owner of the collection. ensure!(origin == details.owner, Error::::NoPermission); - if details.owner == owner { + if details.owner == new_owner { return Ok(()) } // Move the deposit to the new owner. T::Currency::repatriate_reserved( &details.owner, - &owner, + &new_owner, details.owner_deposit, Reserved, )?; // Update account ownership information. CollectionAccount::::remove(&details.owner, &collection); - CollectionAccount::::insert(&owner, &collection, ()); + CollectionAccount::::insert(&new_owner, &collection, ()); - details.owner = owner.clone(); - OwnershipAcceptance::::remove(&owner); + details.owner = new_owner.clone(); + OwnershipAcceptance::::remove(&new_owner); + frame_system::Pallet::::dec_consumers(&new_owner); // Emit `OwnerChanged` event. - Self::deposit_event(Event::OwnerChanged { collection, new_owner: owner }); + Self::deposit_event(Event::OwnerChanged { collection, new_owner }); Ok(()) }) } @@ -172,8 +173,8 @@ impl, I: 'static> Pallet { who: T::AccountId, maybe_collection: Option, ) -> DispatchResult { - let old = OwnershipAcceptance::::get(&who); - match (old.is_some(), maybe_collection.is_some()) { + let exists = OwnershipAcceptance::::contains_key(&who); + match (exists, maybe_collection.is_some()) { (false, true) => { frame_system::Pallet::::inc_consumers(&who)?; }, diff --git a/substrate/frame/nfts/src/lib.rs b/substrate/frame/nfts/src/lib.rs index 92b27432ab215ced08a8dd92ddff40f3db38415f..a7d505e2e397dbca5f547ed9ed1a7c3cc4235aa5 100644 --- a/substrate/frame/nfts/src/lib.rs +++ b/substrate/frame/nfts/src/lib.rs @@ -1153,11 +1153,11 @@ pub mod pallet { pub fn transfer_ownership( origin: OriginFor, collection: T::CollectionId, - owner: AccountIdLookupOf, + new_owner: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; - let owner = T::Lookup::lookup(owner)?; - Self::do_transfer_ownership(origin, collection, owner) + let new_owner = T::Lookup::lookup(new_owner)?; + Self::do_transfer_ownership(origin, collection, new_owner) } /// Change the Issuer, Admin and Freezer of a collection. diff --git a/substrate/frame/nfts/src/mock.rs b/substrate/frame/nfts/src/mock.rs index 248522aafffc3d219364f850d4ce4b8b0e6b95b8..4363eccc7ff5a0a5c47c97e8ef3209d86dca7dde 100644 --- a/substrate/frame/nfts/src/mock.rs +++ b/substrate/frame/nfts/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_nfts; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, }; use sp_core::H256; @@ -46,6 +46,7 @@ pub type Signature = MultiSignature; pub type AccountPublic = ::Signer; pub type AccountId = ::AccountId; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/nfts/src/tests.rs b/substrate/frame/nfts/src/tests.rs index aeebf51b7c78a05e1303dfed0722bd9095aafe32..9e521537534fab31c4626c21ecf06e3df366c01b 100644 --- a/substrate/frame/nfts/src/tests.rs +++ b/substrate/frame/nfts/src/tests.rs @@ -614,8 +614,13 @@ fn transfer_owner_should_work() { Nfts::transfer_ownership(RuntimeOrigin::signed(account(1)), 0, account(2)), Error::::Unaccepted ); + assert_eq!(System::consumers(&account(2)), 0); + assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(account(2)), Some(0))); + assert_eq!(System::consumers(&account(2)), 1); + assert_ok!(Nfts::transfer_ownership(RuntimeOrigin::signed(account(1)), 0, account(2))); + assert_eq!(System::consumers(&account(2)), 1); // one consumer is added due to deposit repatriation assert_eq!(collections(), vec![(account(2), 0)]); assert_eq!(Balances::total_balance(&account(1)), 98); @@ -2191,6 +2196,10 @@ fn max_supply_should_work() { default_collection_config() )); assert_eq!(CollectionConfigOf::::get(collection_id).unwrap().max_supply, None); + assert!(!events().contains(&Event::::CollectionMaxSupplySet { + collection: collection_id, + max_supply, + })); assert_ok!(Nfts::set_collection_max_supply( RuntimeOrigin::signed(user_id.clone()), @@ -2242,9 +2251,31 @@ fn max_supply_should_work() { None )); assert_noop!( - Nfts::mint(RuntimeOrigin::signed(user_id.clone()), collection_id, 2, user_id, None), + Nfts::mint( + RuntimeOrigin::signed(user_id.clone()), + collection_id, + 2, + user_id.clone(), + None + ), Error::::MaxSupplyReached ); + + // validate the event gets emitted when we set the max supply on collection create + let collection_id = 1; + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + user_id.clone(), + CollectionConfig { max_supply: Some(max_supply), ..default_collection_config() } + )); + assert_eq!( + CollectionConfigOf::::get(collection_id).unwrap().max_supply, + Some(max_supply) + ); + assert!(events().contains(&Event::::CollectionMaxSupplySet { + collection: collection_id, + max_supply, + })); }); } diff --git a/substrate/frame/nicks/Cargo.toml b/substrate/frame/nicks/Cargo.toml index b8100d07435c8f4b01f1673b181f43a472934d1a..7d43f64cfe23c0ad37b0c75c312b9e2f56bb0c93 100644 --- a/substrate/frame/nicks/Cargo.toml +++ b/substrate/frame/nicks/Cargo.toml @@ -9,24 +9,27 @@ repository.workspace = true description = "FRAME pallet for nick management" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/nicks/src/lib.rs b/substrate/frame/nicks/src/lib.rs index ad30c628adfbd4f20317faec3caec534b932e21d..540777f87cab739d787e4950d03b440cffad138e 100644 --- a/substrate/frame/nicks/src/lib.rs +++ b/substrate/frame/nicks/src/lib.rs @@ -253,7 +253,7 @@ mod tests { use crate as pallet_nicks; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, traits::{ConstU32, ConstU64}, }; use frame_system::EnsureSignedBy; @@ -274,6 +274,7 @@ mod tests { } ); + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/nis/Cargo.toml b/substrate/frame/nis/Cargo.toml index 986568ea722a269631687264ea3099e03d67c5d3..f95ebc5864c889a73e82c9f878f33e95b157a4a3 100644 --- a/substrate/frame/nis/Cargo.toml +++ b/substrate/frame/nis/Cargo.toml @@ -9,26 +9,29 @@ repository.workspace = true description = "FRAME pallet for rewarding account freezing." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/nis/src/mock.rs b/substrate/frame/nis/src/mock.rs index 30f7ef95f331b29966beda4224a33e813d9690e5..be6e79ac6f663c89a80cee89556a51093791741c 100644 --- a/substrate/frame/nis/src/mock.rs +++ b/substrate/frame/nis/src/mock.rs @@ -20,7 +20,7 @@ use crate::{self as pallet_nis, Perquintill, WithMaximumOf}; use frame_support::{ - ord_parameter_types, parameter_types, + derive_impl, ord_parameter_types, parameter_types, traits::{ fungible::Inspect, ConstU16, ConstU32, ConstU64, Everything, OnFinalize, OnInitialize, StorageMapShim, @@ -50,6 +50,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml index e5a504e2a0ff2643c69acf98e535dec77fe80a07..46fc0b34514e46275a1361b8e0dee8186ec0a2c4 100644 --- a/substrate/frame/node-authorization/Cargo.toml +++ b/substrate/frame/node-authorization/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME pallet for node authorization" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,15 +18,15 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/node-authorization/src/mock.rs b/substrate/frame/node-authorization/src/mock.rs index 84e3336b3bd68b2e60ba15e9df06f48b8f299e98..3c99d41b89ef16f19391e4a77b2bdafe3117bfff 100644 --- a/substrate/frame/node-authorization/src/mock.rs +++ b/substrate/frame/node-authorization/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_node_authorization; use frame_support::{ - ord_parameter_types, + derive_impl, ord_parameter_types, traits::{ConstU32, ConstU64}, }; use frame_system::EnsureSignedBy; @@ -43,6 +43,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type DbWeight = (); diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index 3c55822b9a530f5509e9694dfad9570fe19c4c06..00c90b414dece3ae9fbc3b7a9551f4bcbb746aff 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME nomination pools pallet" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,13 +20,13 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # FRAME -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-staking = { path = "../../primitives/staking", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-staking = { path = "../../primitives/staking", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } log = { version = "0.4.0", default-features = false } # Optional: use for testing and/or fuzzing @@ -35,8 +38,8 @@ pallet-balances = { path = "../balances" } sp-tracing = { path = "../../primitives/tracing" } [features] -default = [ "std" ] -fuzzing = [ "pallet-balances", "sp-tracing" ] +default = ["std"] +fuzzing = ["pallet-balances", "sp-tracing"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/nomination-pools/benchmarking/Cargo.toml b/substrate/frame/nomination-pools/benchmarking/Cargo.toml index e8b18666815e8b3cbd51cbf44eaad67f4c4c752e..8a4ee07dd744947fa28fc3cf664f422fd0ca598e 100644 --- a/substrate/frame/nomination-pools/benchmarking/Cargo.toml +++ b/substrate/frame/nomination-pools/benchmarking/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME nomination pools pallet benchmarking" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,29 +21,29 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # FRAME -frame-benchmarking = { path = "../../benchmarking", default-features = false} -frame-election-provider-support = { path = "../../election-provider-support", default-features = false} -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -pallet-bags-list = { path = "../../bags-list", default-features = false} -pallet-staking = { path = "../../staking", default-features = false} -pallet-nomination-pools = { path = "..", default-features = false} +frame-benchmarking = { path = "../../benchmarking", default-features = false } +frame-election-provider-support = { path = "../../election-provider-support", default-features = false } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-bags-list = { path = "../../bags-list", default-features = false } +pallet-staking = { path = "../../staking", default-features = false } +pallet-nomination-pools = { path = "..", default-features = false } # Substrate Primitives -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-runtime-interface = { path = "../../../primitives/runtime-interface", default-features = false} -sp-staking = { path = "../../../primitives/staking", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-runtime-interface = { path = "../../../primitives/runtime-interface", default-features = false } +sp-staking = { path = "../../../primitives/staking", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] -pallet-balances = { path = "../../balances", default-features = false} +pallet-balances = { path = "../../balances", default-features = false } pallet-timestamp = { path = "../../timestamp" } pallet-staking-reward-curve = { path = "../../staking/reward-curve" } sp-core = { path = "../../../primitives/core" } sp-io = { path = "../../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", diff --git a/substrate/frame/nomination-pools/benchmarking/src/lib.rs b/substrate/frame/nomination-pools/benchmarking/src/lib.rs index fc86a6f56c0bed09cd86963cc4d26b97219f76e6..48d7dae29ef03f9e6615df19d0092f662ce3cda2 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/lib.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/lib.rs @@ -35,9 +35,9 @@ use frame_support::{ use frame_system::RawOrigin as RuntimeOrigin; use pallet_nomination_pools::{ BalanceOf, BondExtra, BondedPoolInner, BondedPools, ClaimPermission, ClaimPermissions, - Commission, CommissionChangeRate, ConfigOp, GlobalMaxCommission, MaxPoolMembers, - MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, Pallet as Pools, - PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, + Commission, CommissionChangeRate, CommissionClaimPermission, ConfigOp, GlobalMaxCommission, + MaxPoolMembers, MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, + Pallet as Pools, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, }; use pallet_staking::MaxNominationsOf; use sp_runtime::{ @@ -706,17 +706,24 @@ frame_benchmarking::benchmarks! { max_increase: Perbill::from_percent(20), min_delay: 0u32.into(), }).unwrap(); + // set a claim permission to an account. + Pools::::set_commission_claim_permission( + RuntimeOrigin::Signed(depositor.clone()).into(), + 1u32.into(), + Some(CommissionClaimPermission::Account(depositor.clone())) + ).unwrap(); }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some((Perbill::from_percent(20), depositor.clone()))) verify { assert_eq!(BondedPools::::get(1).unwrap().commission, Commission { - current: Some((Perbill::from_percent(20), depositor)), + current: Some((Perbill::from_percent(20), depositor.clone())), max: Some(Perbill::from_percent(50)), change_rate: Some(CommissionChangeRate { max_increase: Perbill::from_percent(20), min_delay: 0u32.into() }), throttle_from: Some(1u32.into()), + claim_permission: Some(CommissionClaimPermission::Account(depositor)), }); } @@ -731,6 +738,7 @@ frame_benchmarking::benchmarks! { max: Some(Perbill::from_percent(50)), change_rate: None, throttle_from: Some(0u32.into()), + claim_permission: None, }); } @@ -751,9 +759,25 @@ frame_benchmarking::benchmarks! { min_delay: 1000u32.into(), }), throttle_from: Some(1_u32.into()), + claim_permission: None, }); } + set_commission_claim_permission { + // Create a pool. + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some(CommissionClaimPermission::Account(depositor.clone()))) + verify { + assert_eq!( + BondedPools::::get(1).unwrap().commission, Commission { + current: None, + max: None, + change_rate: None, + throttle_from: None, + claim_permission: Some(CommissionClaimPermission::Account(depositor)), + }); + } + set_claim_permission { // Create a pool let min_create_bond = Pools::::depositor_min_bond(); @@ -786,8 +810,13 @@ frame_benchmarking::benchmarks! { CurrencyOf::::set_balance(&reward_account, ed + origin_weight); // member claims a payout to make some commission available. - let _ = Pools::::claim_payout(RuntimeOrigin::Signed(claimer).into()); - + let _ = Pools::::claim_payout(RuntimeOrigin::Signed(claimer.clone()).into()); + // set a claim permission to an account. + let _ = Pools::::set_commission_claim_permission( + RuntimeOrigin::Signed(depositor.clone()).into(), + 1u32.into(), + Some(CommissionClaimPermission::Account(claimer)) + ); whitelist_account!(depositor); }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into()) verify { diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index 9a7f2197a7b234e643c067af9633d2a2b100a584..c58a66f6163aff113ab934c00b495e951052164e 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -17,7 +17,7 @@ use crate::VoterBagsListInstance; use frame_election_provider_support::VoteWeight; -use frame_support::{pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; +use frame_support::{derive_impl, pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; use sp_runtime::{ traits::{Convert, IdentityLookup}, BuildStorage, FixedU128, Perbill, @@ -28,6 +28,7 @@ type Nonce = u32; type BlockNumber = u64; type Balance = u128; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -118,6 +119,7 @@ impl pallet_staking::Config for Runtime { type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; + type MaxControllersInDeprecationBatch = ConstU32<100>; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = ConstU32<84>; type EventListeners = Pools; diff --git a/substrate/frame/nomination-pools/fuzzer/Cargo.toml b/substrate/frame/nomination-pools/fuzzer/Cargo.toml index b9d0a6197f8de8fbc1699ddadb2f66d5c29e1160..52f49b28457c26c3c247dd4c137565cd9564dcc8 100644 --- a/substrate/frame/nomination-pools/fuzzer/Cargo.toml +++ b/substrate/frame/nomination-pools/fuzzer/Cargo.toml @@ -10,6 +10,9 @@ description = "Fuzzer for fixed point arithmetic primitives." documentation = "https://docs.rs/sp-arithmetic-fuzzer" publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/frame/nomination-pools/runtime-api/Cargo.toml b/substrate/frame/nomination-pools/runtime-api/Cargo.toml index c3aa8035c95aaa78063480f1158c32ae6efc1bee..12a897cc6b6fa16761c94f5d2a3c3e5b5892d790 100644 --- a/substrate/frame/nomination-pools/runtime-api/Cargo.toml +++ b/substrate/frame/nomination-pools/runtime-api/Cargo.toml @@ -9,15 +9,18 @@ repository.workspace = true description = "Runtime API for nomination-pools FRAME pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -sp-api = { path = "../../../primitives/api", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} -pallet-nomination-pools = { path = "..", default-features = false} +sp-api = { path = "../../../primitives/api", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +pallet-nomination-pools = { path = "..", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "pallet-nomination-pools/std", "sp-api/std", "sp-std/std" ] +default = ["std"] +std = ["codec/std", "pallet-nomination-pools/std", "sp-api/std", "sp-std/std"] diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 909a930e3821f8611b30ed39415eaab3f4b38636..3a23b894ec8035d019841bd6b74f1f205b4be636 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -460,7 +460,7 @@ pub enum ClaimPermission { PermissionlessCompound, /// Anyone can withdraw rewards on a pool member's behalf. PermissionlessWithdraw, - /// Anyone can withdraw and compound rewards on a member's behalf. + /// Anyone can withdraw and compound rewards on a pool member's behalf. PermissionlessAll, } @@ -676,6 +676,13 @@ pub struct PoolRoles { pub bouncer: Option, } +// A pool's possible commission claiming permissions. +#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub enum CommissionClaimPermission { + Permissionless, + Account(AccountId), +} + /// Pool commission. /// /// The pool `root` can set commission configuration after pool creation. By default, all commission @@ -705,6 +712,9 @@ pub struct Commission { /// The block from where throttling should be checked from. This value will be updated on all /// commission updates and when setting an initial `change_rate`. pub throttle_from: Option>, + // Whether commission can be claimed permissionlessly, or whether an account can claim + // commission. `Root` role can always claim. + pub claim_permission: Option>, } impl Commission { @@ -1078,6 +1088,17 @@ impl BondedPool { self.is_root(who) } + fn can_claim_commission(&self, who: &T::AccountId) -> bool { + if let Some(permission) = self.commission.claim_permission.as_ref() { + match permission { + CommissionClaimPermission::Permissionless => true, + CommissionClaimPermission::Account(account) => account == who || self.is_root(who), + } + } else { + self.is_root(who) + } + } + fn is_destroying(&self) -> bool { matches!(self.state, PoolState::Destroying) } @@ -1572,7 +1593,7 @@ pub mod pallet { use sp_runtime::Perbill; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(7); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(8); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -1626,6 +1647,10 @@ pub mod pallet { #[pallet::constant] type MaxPointsToBalance: Get; + /// The maximum number of simultaneous unbonding chunks that can exist per member. + #[pallet::constant] + type MaxUnbonding: Get; + /// Infallible method for converting `Currency::Balance` to `U256`. type BalanceToU256: Convert, U256>; @@ -1644,9 +1669,6 @@ pub mod pallet { /// The maximum length, in bytes, that a pools metadata maybe. type MaxMetadataLen: Get; - - /// The maximum number of simultaneous unbonding chunks that can exist per member. - type MaxUnbonding: Get; } /// The sum of funds across all pools. @@ -1849,6 +1871,11 @@ pub mod pallet { pool_id: PoolId, change_rate: CommissionChangeRate>, }, + /// Pool commission claim permission has been updated. + PoolCommissionClaimPermissionUpdated { + pool_id: PoolId, + permission: Option>, + }, /// Pool commission has been claimed. PoolCommissionClaimed { pool_id: PoolId, commission: BalanceOf }, /// Topped up deficit in frozen ED of the reward pool. @@ -2741,6 +2768,32 @@ pub mod pallet { let who = ensure_signed(origin)?; Self::do_adjust_pool_deposit(who, pool_id) } + + /// Set or remove a pool's commission claim permission. + /// + /// Determines who can claim the pool's pending commission. Only the `Root` role of the pool + /// is able to conifigure commission claim permissions. + #[pallet::call_index(22)] + #[pallet::weight(T::WeightInfo::set_commission_claim_permission())] + pub fn set_commission_claim_permission( + origin: OriginFor, + pool_id: PoolId, + permission: Option>, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let mut bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; + ensure!(bonded_pool.can_manage_commission(&who), Error::::DoesNotHavePermission); + + bonded_pool.commission.claim_permission = permission.clone(); + bonded_pool.put(); + + Self::deposit_event(Event::::PoolCommissionClaimPermissionUpdated { + pool_id, + permission, + }); + + Ok(()) + } } #[pallet::hooks] @@ -3105,12 +3158,12 @@ impl Pallet { fn do_claim_commission(who: T::AccountId, pool_id: PoolId) -> DispatchResult { let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; - ensure!(bonded_pool.can_manage_commission(&who), Error::::DoesNotHavePermission); + ensure!(bonded_pool.can_claim_commission(&who), Error::::DoesNotHavePermission); let mut reward_pool = RewardPools::::get(pool_id) .defensive_ok_or::>(DefensiveError::RewardPoolNotFound.into())?; - // IMPORTANT: make sure that any newly pending commission not yet processed is added to + // IMPORTANT: ensure newly pending commission not yet processed is added to // `total_commission_pending`. reward_pool.update_records( pool_id, @@ -3414,7 +3467,13 @@ impl Pallet { /// Check if any pool have an incorrect amount of ED frozen. /// /// This can happen if the ED has changed since the pool was created. - #[cfg(any(feature = "try-runtime", feature = "runtime-benchmarks", test, debug_assertions))] + #[cfg(any( + feature = "try-runtime", + feature = "runtime-benchmarks", + feature = "fuzzing", + test, + debug_assertions + ))] pub fn check_ed_imbalance() -> Result<(), DispatchError> { let mut failed: u32 = 0; BondedPools::::iter_keys().for_each(|id| { diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index eef2a976f1a2550081fee5caa28ea36a9c52ee68..3adfd926d95cf7e90994597263db2e17d36f3978 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -24,9 +24,18 @@ use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; use sp_runtime::TryRuntimeError; /// Exports for versioned migration `type`s for this pallet. -pub mod versioned_migrations { +pub mod versioned { use super::*; + /// v8: Adds commission claim permissions to `BondedPools`. + pub type V7ToV8 = frame_support::migrations::VersionedMigration< + 7, + 8, + v8::VersionUncheckedMigrateV7ToV8, + crate::pallet::Pallet, + ::DbWeight, + >; + /// Migration V6 to V7 wrapped in a [`frame_support::migrations::VersionedMigration`], ensuring /// the migration is only performed when on-chain version is 6. pub type V6ToV7 = frame_support::migrations::VersionedMigration< @@ -47,6 +56,74 @@ pub mod versioned_migrations { >; } +pub mod v8 { + use super::*; + + #[derive(Decode)] + pub struct OldCommission { + pub current: Option<(Perbill, T::AccountId)>, + pub max: Option, + pub change_rate: Option>>, + pub throttle_from: Option>, + } + + #[derive(Decode)] + pub struct OldBondedPoolInner { + pub commission: OldCommission, + pub member_counter: u32, + pub points: BalanceOf, + pub roles: PoolRoles, + pub state: PoolState, + } + + impl OldBondedPoolInner { + fn migrate_to_v8(self) -> BondedPoolInner { + BondedPoolInner { + commission: Commission { + current: self.commission.current, + max: self.commission.max, + change_rate: self.commission.change_rate, + throttle_from: self.commission.throttle_from, + // `claim_permission` is a new field. + claim_permission: None, + }, + member_counter: self.member_counter, + points: self.points, + roles: self.roles, + state: self.state, + } + } + } + + pub struct VersionUncheckedMigrateV7ToV8(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for VersionUncheckedMigrateV7ToV8 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + let mut translated = 0u64; + BondedPools::::translate::, _>(|_key, old_value| { + translated.saturating_inc(); + Some(old_value.migrate_to_v8()) + }); + T::DbWeight::get().reads_writes(translated, translated + 1) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_: Vec) -> Result<(), TryRuntimeError> { + // Check new `claim_permission` field is present. + ensure!( + BondedPools::::iter() + .all(|(_, inner)| inner.commission.claim_permission.is_none()), + "`claim_permission` value has not been set correctly." + ); + Ok(()) + } + } +} + /// This migration accumulates and initializes the [`TotalValueLocked`] for all pools. /// /// WARNING: This migration works under the assumption that the [`BondedPools`] cannot be inflated diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index 24bea0b87f22378339cb57c15467d05389ad8bdf..1bd969230da93d4157aec01cf7e46f21183581cd 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -17,7 +17,7 @@ use super::*; use crate::{self as pools}; -use frame_support::{assert_ok, parameter_types, traits::fungible::Mutate, PalletId}; +use frame_support::{assert_ok, derive_impl, parameter_types, traits::fungible::Mutate, PalletId}; use frame_system::RawOrigin; use sp_runtime::{BuildStorage, FixedU128}; use sp_staking::{OnStakingUpdate, Stake}; @@ -209,6 +209,7 @@ impl sp_staking::StakingInterface for StakingMock { } } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; diff --git a/substrate/frame/nomination-pools/src/tests.rs b/substrate/frame/nomination-pools/src/tests.rs index 2749e89ecff33659dc96d2ddae9f3195e557a645..7fe1e704bb13c3bae041e5fa44bd44545e58a204 100644 --- a/substrate/frame/nomination-pools/src/tests.rs +++ b/substrate/frame/nomination-pools/src/tests.rs @@ -5761,7 +5761,13 @@ mod commission { // Then: assert_eq!( BondedPool::::get(1).unwrap().commission, - Commission { current: None, max: None, change_rate: None, throttle_from: Some(1) } + Commission { + current: None, + max: None, + change_rate: None, + throttle_from: Some(1), + claim_permission: None, + } ); assert_eq!( pool_events_since_last_call(), @@ -5956,6 +5962,7 @@ mod commission { min_delay: 2_u64 }), throttle_from: Some(1_u64), + claim_permission: None, } ); assert_eq!( @@ -6007,6 +6014,7 @@ mod commission { min_delay: 2_u64 }), throttle_from: Some(3_u64), + claim_permission: None, } ); assert_eq!( @@ -6082,7 +6090,8 @@ mod commission { max_increase: Perbill::from_percent(1), min_delay: 2 }), - throttle_from: Some(7) + throttle_from: Some(7), + claim_permission: None, } ); assert_eq!( @@ -6183,6 +6192,7 @@ mod commission { max: Some(Perbill::from_percent(50)), change_rate: None, throttle_from: Some(1), + claim_permission: None, } ); @@ -6409,6 +6419,7 @@ mod commission { min_delay: 10_u64 }), throttle_from: Some(11), + claim_permission: None, } ); @@ -6502,7 +6513,8 @@ mod commission { max_increase: Perbill::from_percent(1), min_delay: 0 }), - throttle_from: Some(1) + throttle_from: Some(1), + claim_permission: None, } ); @@ -6885,6 +6897,13 @@ mod commission { #[test] fn claim_commission_works() { ExtBuilder::default().build_and_execute(|| { + /// Deposit rewards into the pool and claim payout. This will set up pending commission + /// to be tested in various scenarios. + fn deposit_rewards_and_claim_payout(caller: AccountId, points: u128) { + deposit_rewards(points); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(caller))); + } + let pool_id = 1; let _ = Currency::set_balance(&900, 5); @@ -6905,21 +6924,9 @@ mod commission { ] ); - // Pool earns 80 points, payout is triggered. - deposit_rewards(80); - assert_eq!( - PoolMembers::::get(10).unwrap(), - PoolMember:: { pool_id, points: 10, ..Default::default() } - ); - - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id, payout: 40 }] - ); - // Given: - assert_eq!(RewardPool::::current_balance(pool_id), 40); + deposit_rewards_and_claim_payout(10, 100); + assert_eq!(RewardPool::::current_balance(pool_id), 50); // Pool does not exist assert_noop!( @@ -6944,6 +6951,176 @@ mod commission { Pools::claim_commission(RuntimeOrigin::signed(900), pool_id,), Error::::NoPendingCommission ); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id, payout: 50 }, + Event::PoolCommissionClaimed { pool_id: 1, commission: 50 } + ] + ); + + // The pool commission's claim_permission field is updated to `Permissionless` by the + // root member, which means anyone can now claim commission for the pool. + + // Given: + // Some random non-pool member to claim commission. + let non_pool_member = 1001; + let _ = Currency::set_balance(&non_pool_member, 5); + + // Set up pending commission. + deposit_rewards_and_claim_payout(10, 100); + assert_ok!(Pools::set_commission_claim_permission( + RuntimeOrigin::signed(900), + pool_id, + Some(CommissionClaimPermission::Permissionless) + )); + + // When: + assert_ok!(Pools::claim_commission(RuntimeOrigin::signed(non_pool_member), pool_id)); + + // Then: + assert_eq!(RewardPool::::current_balance(pool_id), 0); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id, payout: 50 }, + Event::PoolCommissionClaimPermissionUpdated { + pool_id: 1, + permission: Some(CommissionClaimPermission::Permissionless) + }, + Event::PoolCommissionClaimed { pool_id: 1, commission: 50 }, + ] + ); + + // The pool commission's claim_permission is updated to an adhoc account by the root + // member, which means now only that account (in addition to the root role) can claim + // commission for the pool. + + // Given: + // The account designated to claim commission. + let designated_commission_claimer = 2001; + let _ = Currency::set_balance(&designated_commission_claimer, 5); + + // Set up pending commission. + deposit_rewards_and_claim_payout(10, 100); + assert_ok!(Pools::set_commission_claim_permission( + RuntimeOrigin::signed(900), + pool_id, + Some(CommissionClaimPermission::Account(designated_commission_claimer)) + )); + + // When: + // Previous claimer can no longer claim commission. + assert_noop!( + Pools::claim_commission(RuntimeOrigin::signed(1001), pool_id,), + Error::::DoesNotHavePermission + ); + // Designated claimer can claim commission. + assert_ok!(Pools::claim_commission( + RuntimeOrigin::signed(designated_commission_claimer), + pool_id + )); + + // Then: + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id, payout: 50 }, + Event::PoolCommissionClaimPermissionUpdated { + pool_id: 1, + permission: Some(CommissionClaimPermission::Account(2001)) + }, + Event::PoolCommissionClaimed { pool_id: 1, commission: 50 }, + ] + ); + + // Even with an Account claim permission set, the `root` role of the pool can still + // claim commission. + + // Given: + deposit_rewards_and_claim_payout(10, 100); + + // When: + assert_ok!(Pools::claim_commission(RuntimeOrigin::signed(900), pool_id)); + + // Then: + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id, payout: 50 }, + Event::PoolCommissionClaimed { pool_id: 1, commission: 50 }, + ] + ); + + // The root role updates commission's claim_permission back to `None`, which results in + // only the root member being able to claim commission for the pool. + + // Given: + deposit_rewards_and_claim_payout(10, 100); + + // When: + assert_ok!(Pools::set_commission_claim_permission( + RuntimeOrigin::signed(900), + pool_id, + None + )); + // Previous claimer can no longer claim commission. + assert_noop!( + Pools::claim_commission( + RuntimeOrigin::signed(designated_commission_claimer), + pool_id, + ), + Error::::DoesNotHavePermission + ); + // Root can claim commission. + assert_ok!(Pools::claim_commission(RuntimeOrigin::signed(900), pool_id)); + + // Then: + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id, payout: 50 }, + Event::PoolCommissionClaimPermissionUpdated { pool_id: 1, permission: None }, + Event::PoolCommissionClaimed { pool_id: 1, commission: 50 }, + ] + ); + }) + } + + #[test] + fn set_commission_claim_permission_handles_errors() { + ExtBuilder::default().build_and_execute(|| { + let pool_id = 1; + + let _ = Currency::set_balance(&900, 5); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id }, + Event::Bonded { member: 10, pool_id, bonded: 10, joined: true }, + ] + ); + + // Cannot operate on a non-existing pool. + assert_noop!( + Pools::set_commission_claim_permission( + RuntimeOrigin::signed(10), + 90, + Some(CommissionClaimPermission::Permissionless) + ), + Error::::PoolNotFound + ); + + // Only the root role can change the commission claim permission. + assert_noop!( + Pools::set_commission_claim_permission( + RuntimeOrigin::signed(10), + pool_id, + Some(CommissionClaimPermission::Permissionless) + ), + Error::::DoesNotHavePermission + ); }) } } diff --git a/substrate/frame/nomination-pools/src/weights.rs b/substrate/frame/nomination-pools/src/weights.rs index 2cb414fc2a07f76f97a3c460c3db2a7450d10591..047a17c3f9a278ac9564562edf318f13ecc2f7ca 100644 --- a/substrate/frame/nomination-pools/src/weights.rs +++ b/substrate/frame/nomination-pools/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_nomination_pools` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-guclnr1q-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -67,6 +67,7 @@ pub trait WeightInfo { fn set_commission() -> Weight; fn set_commission_max() -> Weight; fn set_commission_change_rate() -> Weight; + fn set_commission_claim_permission() -> Weight; fn set_claim_permission() -> Weight; fn claim_commission() -> Weight; fn adjust_pool_deposit() -> Weight; @@ -80,7 +81,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) @@ -105,19 +106,21 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3388` + // Measured: `3425` // Estimated: `8877` - // Minimum execution time: 203_377_000 picoseconds. - Weight::from_parts(206_359_000, 8877) - .saturating_add(T::DbWeight::get().reads(19_u64)) - .saturating_add(T::DbWeight::get().writes(12_u64)) + // Minimum execution time: 184_295_000 picoseconds. + Weight::from_parts(188_860_000, 8877) + .saturating_add(T::DbWeight::get().reads(20_u64)) + .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -136,21 +139,23 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3398` + // Measured: `3435` // Estimated: `8877` - // Minimum execution time: 199_792_000 picoseconds. - Weight::from_parts(206_871_000, 8877) - .saturating_add(T::DbWeight::get().reads(16_u64)) - .saturating_add(T::DbWeight::get().writes(12_u64)) + // Minimum execution time: 188_777_000 picoseconds. + Weight::from_parts(192_646_000, 8877) + .saturating_add(T::DbWeight::get().reads(17_u64)) + .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -169,21 +174,23 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3463` + // Measured: `3500` // Estimated: `8877` - // Minimum execution time: 246_362_000 picoseconds. - Weight::from_parts(253_587_000, 8877) - .saturating_add(T::DbWeight::get().reads(17_u64)) - .saturating_add(T::DbWeight::get().writes(13_u64)) + // Minimum execution time: 221_728_000 picoseconds. + Weight::from_parts(227_569_000, 8877) + .saturating_add(T::DbWeight::get().reads(18_u64)) + .saturating_add(T::DbWeight::get().writes(14_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -192,17 +199,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_payout() -> Weight { // Proof Size summary in bytes: - // Measured: `1171` - // Estimated: `3702` - // Minimum execution time: 81_115_000 picoseconds. - Weight::from_parts(83_604_000, 3702) + // Measured: `1172` + // Estimated: `3719` + // Minimum execution time: 75_310_000 picoseconds. + Weight::from_parts(77_709_000, 3719) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) @@ -233,15 +240,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3674` + // Measured: `3622` // Estimated: `27847` - // Minimum execution time: 187_210_000 picoseconds. - Weight::from_parts(189_477_000, 27847) + // Minimum execution time: 170_656_000 picoseconds. + Weight::from_parts(174_950_000, 27847) .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) @@ -252,24 +259,26 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1743` + // Measured: `1817` // Estimated: `4764` - // Minimum execution time: 66_384_000 picoseconds. - Weight::from_parts(69_498_267, 4764) - // Standard Error: 2_566 - .saturating_add(Weight::from_parts(34_528, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Minimum execution time: 68_866_000 picoseconds. + Weight::from_parts(72_312_887, 4764) + // Standard Error: 1_635 + .saturating_add(Weight::from_parts(41_679, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(24382), added: 26857, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) @@ -282,6 +291,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) @@ -289,21 +300,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2171` + // Measured: `2207` // Estimated: `27847` - // Minimum execution time: 137_474_000 picoseconds. - Weight::from_parts(142_341_215, 27847) - // Standard Error: 3_468 - .saturating_add(Weight::from_parts(66_597, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(10_u64)) - .saturating_add(T::DbWeight::get().writes(8_u64)) + // Minimum execution time: 131_383_000 picoseconds. + Weight::from_parts(136_595_971, 27847) + // Standard Error: 2_715 + .saturating_add(Weight::from_parts(52_351, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) + .saturating_add(T::DbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(24382), added: 26857, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) @@ -312,16 +323,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:0) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Validators` (r:1 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `Staking::Nominators` (r:1 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) @@ -345,12 +358,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2526` + // Measured: `2525` // Estimated: `27847` - // Minimum execution time: 249_135_000 picoseconds. - Weight::from_parts(263_632_571, 27847) - .saturating_add(T::DbWeight::get().reads(23_u64)) - .saturating_add(T::DbWeight::get().writes(19_u64)) + // Minimum execution time: 233_314_000 picoseconds. + Weight::from_parts(241_694_316, 27847) + .saturating_add(T::DbWeight::get().reads(24_u64)) + .saturating_add(T::DbWeight::get().writes(20_u64)) } /// Storage: `NominationPools::LastPoolId` (r:1 w:1) /// Proof: `NominationPools::LastPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -376,14 +389,12 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForRewardPools` (r:1 w:1) @@ -393,20 +404,22 @@ impl WeightInfo for SubstrateWeight { /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:0 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1289` + // Measured: `1169` // Estimated: `8538` - // Minimum execution time: 214_207_000 picoseconds. - Weight::from_parts(221_588_000, 8538) - .saturating_add(T::DbWeight::get().reads(24_u64)) - .saturating_add(T::DbWeight::get().writes(16_u64)) + // Minimum execution time: 171_465_000 picoseconds. + Weight::from_parts(176_478_000, 8538) + .saturating_add(T::DbWeight::get().reads(23_u64)) + .saturating_add(T::DbWeight::get().writes(17_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) @@ -432,34 +445,34 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1849` + // Measured: `1808` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 70_626_000 picoseconds. - Weight::from_parts(73_830_182, 4556) - // Standard Error: 24_496 - .saturating_add(Weight::from_parts(1_561_416, 0).saturating_mul(n.into())) + // Minimum execution time: 63_588_000 picoseconds. + Weight::from_parts(64_930_584, 4556) + // Standard Error: 9_167 + .saturating_add(Weight::from_parts(1_595_779, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1438` + // Measured: `1434` // Estimated: `4556` - // Minimum execution time: 36_542_000 picoseconds. - Weight::from_parts(37_644_000, 4556) + // Minimum execution time: 32_899_000 picoseconds. + Weight::from_parts(33_955_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::Metadata` (r:1 w:1) /// Proof: `NominationPools::Metadata` (`max_values`: None, `max_size`: Some(270), added: 2745, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForMetadata` (r:1 w:1) @@ -467,12 +480,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `531` + // Measured: `532` // Estimated: `3735` - // Minimum execution time: 15_130_000 picoseconds. - Weight::from_parts(16_319_671, 3735) - // Standard Error: 351 - .saturating_add(Weight::from_parts(2_024, 0).saturating_mul(n.into())) + // Minimum execution time: 13_778_000 picoseconds. + Weight::from_parts(14_770_006, 3735) + // Standard Error: 151 + .saturating_add(Weight::from_parts(1_900, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -492,23 +505,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_819_000 picoseconds. - Weight::from_parts(7_253_000, 0) + // Minimum execution time: 4_550_000 picoseconds. + Weight::from_parts(4_935_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) fn update_roles() -> Weight { // Proof Size summary in bytes: - // Measured: `531` - // Estimated: `3685` - // Minimum execution time: 19_596_000 picoseconds. - Weight::from_parts(20_828_000, 3685) + // Measured: `532` + // Estimated: `3719` + // Minimum execution time: 16_759_000 picoseconds. + Weight::from_parts(17_346_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) @@ -527,15 +540,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `2012` + // Measured: `1971` // Estimated: `4556` - // Minimum execution time: 68_551_000 picoseconds. - Weight::from_parts(71_768_000, 4556) + // Minimum execution time: 61_970_000 picoseconds. + Weight::from_parts(63_738_000, 4556) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -544,34 +557,45 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn set_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `770` - // Estimated: `3685` - // Minimum execution time: 36_128_000 picoseconds. - Weight::from_parts(38_547_000, 3685) + // Measured: `804` + // Estimated: `3719` + // Minimum execution time: 31_950_000 picoseconds. + Weight::from_parts(33_190_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_commission_max() -> Weight { // Proof Size summary in bytes: - // Measured: `571` - // Estimated: `3685` - // Minimum execution time: 20_067_000 picoseconds. - Weight::from_parts(21_044_000, 3685) + // Measured: `572` + // Estimated: `3719` + // Minimum execution time: 16_807_000 picoseconds. + Weight::from_parts(17_733_000, 3719) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) fn set_commission_change_rate() -> Weight { // Proof Size summary in bytes: - // Measured: `531` - // Estimated: `3685` - // Minimum execution time: 19_186_000 picoseconds. - Weight::from_parts(20_189_000, 3685) + // Measured: `532` + // Estimated: `3719` + // Minimum execution time: 16_710_000 picoseconds. + Weight::from_parts(17_563_000, 3719) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + fn set_commission_claim_permission() -> Weight { + // Proof Size summary in bytes: + // Measured: `532` + // Estimated: `3719` + // Minimum execution time: 16_493_000 picoseconds. + Weight::from_parts(17_022_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -583,13 +607,13 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 15_275_000 picoseconds. - Weight::from_parts(15_932_000, 3702) + // Minimum execution time: 14_248_000 picoseconds. + Weight::from_parts(15_095_000, 3702) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -598,15 +622,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `968` - // Estimated: `3685` - // Minimum execution time: 67_931_000 picoseconds. - Weight::from_parts(72_202_000, 3685) + // Measured: `1002` + // Estimated: `3719` + // Minimum execution time: 61_969_000 picoseconds. + Weight::from_parts(63_965_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -615,10 +639,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) fn adjust_pool_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `900` + // Measured: `901` // Estimated: `4764` - // Minimum execution time: 72_783_000 picoseconds. - Weight::from_parts(75_841_000, 4764) + // Minimum execution time: 65_462_000 picoseconds. + Weight::from_parts(67_250_000, 4764) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -631,7 +655,7 @@ impl WeightInfo for () { /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) @@ -656,19 +680,21 @@ impl WeightInfo for () { /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3388` + // Measured: `3425` // Estimated: `8877` - // Minimum execution time: 203_377_000 picoseconds. - Weight::from_parts(206_359_000, 8877) - .saturating_add(RocksDbWeight::get().reads(19_u64)) - .saturating_add(RocksDbWeight::get().writes(12_u64)) + // Minimum execution time: 184_295_000 picoseconds. + Weight::from_parts(188_860_000, 8877) + .saturating_add(RocksDbWeight::get().reads(20_u64)) + .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -687,21 +713,23 @@ impl WeightInfo for () { /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3398` + // Measured: `3435` // Estimated: `8877` - // Minimum execution time: 199_792_000 picoseconds. - Weight::from_parts(206_871_000, 8877) - .saturating_add(RocksDbWeight::get().reads(16_u64)) - .saturating_add(RocksDbWeight::get().writes(12_u64)) + // Minimum execution time: 188_777_000 picoseconds. + Weight::from_parts(192_646_000, 8877) + .saturating_add(RocksDbWeight::get().reads(17_u64)) + .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -720,21 +748,23 @@ impl WeightInfo for () { /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3463` + // Measured: `3500` // Estimated: `8877` - // Minimum execution time: 246_362_000 picoseconds. - Weight::from_parts(253_587_000, 8877) - .saturating_add(RocksDbWeight::get().reads(17_u64)) - .saturating_add(RocksDbWeight::get().writes(13_u64)) + // Minimum execution time: 221_728_000 picoseconds. + Weight::from_parts(227_569_000, 8877) + .saturating_add(RocksDbWeight::get().reads(18_u64)) + .saturating_add(RocksDbWeight::get().writes(14_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -743,17 +773,17 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_payout() -> Weight { // Proof Size summary in bytes: - // Measured: `1171` - // Estimated: `3702` - // Minimum execution time: 81_115_000 picoseconds. - Weight::from_parts(83_604_000, 3702) + // Measured: `1172` + // Estimated: `3719` + // Minimum execution time: 75_310_000 picoseconds. + Weight::from_parts(77_709_000, 3719) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) @@ -784,15 +814,15 @@ impl WeightInfo for () { /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3674` + // Measured: `3622` // Estimated: `27847` - // Minimum execution time: 187_210_000 picoseconds. - Weight::from_parts(189_477_000, 27847) + // Minimum execution time: 170_656_000 picoseconds. + Weight::from_parts(174_950_000, 27847) .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) @@ -803,24 +833,26 @@ impl WeightInfo for () { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1743` + // Measured: `1817` // Estimated: `4764` - // Minimum execution time: 66_384_000 picoseconds. - Weight::from_parts(69_498_267, 4764) - // Standard Error: 2_566 - .saturating_add(Weight::from_parts(34_528, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Minimum execution time: 68_866_000 picoseconds. + Weight::from_parts(72_312_887, 4764) + // Standard Error: 1_635 + .saturating_add(Weight::from_parts(41_679, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(24382), added: 26857, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) @@ -833,6 +865,8 @@ impl WeightInfo for () { /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) @@ -840,21 +874,21 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2171` + // Measured: `2207` // Estimated: `27847` - // Minimum execution time: 137_474_000 picoseconds. - Weight::from_parts(142_341_215, 27847) - // Standard Error: 3_468 - .saturating_add(Weight::from_parts(66_597, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(10_u64)) - .saturating_add(RocksDbWeight::get().writes(8_u64)) + // Minimum execution time: 131_383_000 picoseconds. + Weight::from_parts(136_595_971, 27847) + // Standard Error: 2_715 + .saturating_add(Weight::from_parts(52_351, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) + .saturating_add(RocksDbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(24382), added: 26857, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) @@ -863,16 +897,18 @@ impl WeightInfo for () { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:0) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Validators` (r:1 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `Staking::Nominators` (r:1 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) @@ -896,12 +932,12 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2526` + // Measured: `2525` // Estimated: `27847` - // Minimum execution time: 249_135_000 picoseconds. - Weight::from_parts(263_632_571, 27847) - .saturating_add(RocksDbWeight::get().reads(23_u64)) - .saturating_add(RocksDbWeight::get().writes(19_u64)) + // Minimum execution time: 233_314_000 picoseconds. + Weight::from_parts(241_694_316, 27847) + .saturating_add(RocksDbWeight::get().reads(24_u64)) + .saturating_add(RocksDbWeight::get().writes(20_u64)) } /// Storage: `NominationPools::LastPoolId` (r:1 w:1) /// Proof: `NominationPools::LastPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -927,14 +963,12 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForRewardPools` (r:1 w:1) @@ -944,20 +978,22 @@ impl WeightInfo for () { /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:0 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1289` + // Measured: `1169` // Estimated: `8538` - // Minimum execution time: 214_207_000 picoseconds. - Weight::from_parts(221_588_000, 8538) - .saturating_add(RocksDbWeight::get().reads(24_u64)) - .saturating_add(RocksDbWeight::get().writes(16_u64)) + // Minimum execution time: 171_465_000 picoseconds. + Weight::from_parts(176_478_000, 8538) + .saturating_add(RocksDbWeight::get().reads(23_u64)) + .saturating_add(RocksDbWeight::get().writes(17_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) @@ -983,34 +1019,34 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1849` + // Measured: `1808` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 70_626_000 picoseconds. - Weight::from_parts(73_830_182, 4556) - // Standard Error: 24_496 - .saturating_add(Weight::from_parts(1_561_416, 0).saturating_mul(n.into())) + // Minimum execution time: 63_588_000 picoseconds. + Weight::from_parts(64_930_584, 4556) + // Standard Error: 9_167 + .saturating_add(Weight::from_parts(1_595_779, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1438` + // Measured: `1434` // Estimated: `4556` - // Minimum execution time: 36_542_000 picoseconds. - Weight::from_parts(37_644_000, 4556) + // Minimum execution time: 32_899_000 picoseconds. + Weight::from_parts(33_955_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::Metadata` (r:1 w:1) /// Proof: `NominationPools::Metadata` (`max_values`: None, `max_size`: Some(270), added: 2745, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForMetadata` (r:1 w:1) @@ -1018,12 +1054,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `531` + // Measured: `532` // Estimated: `3735` - // Minimum execution time: 15_130_000 picoseconds. - Weight::from_parts(16_319_671, 3735) - // Standard Error: 351 - .saturating_add(Weight::from_parts(2_024, 0).saturating_mul(n.into())) + // Minimum execution time: 13_778_000 picoseconds. + Weight::from_parts(14_770_006, 3735) + // Standard Error: 151 + .saturating_add(Weight::from_parts(1_900, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1043,23 +1079,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_819_000 picoseconds. - Weight::from_parts(7_253_000, 0) + // Minimum execution time: 4_550_000 picoseconds. + Weight::from_parts(4_935_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) fn update_roles() -> Weight { // Proof Size summary in bytes: - // Measured: `531` - // Estimated: `3685` - // Minimum execution time: 19_596_000 picoseconds. - Weight::from_parts(20_828_000, 3685) + // Measured: `532` + // Estimated: `3719` + // Minimum execution time: 16_759_000 picoseconds. + Weight::from_parts(17_346_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) @@ -1078,15 +1114,15 @@ impl WeightInfo for () { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `2012` + // Measured: `1971` // Estimated: `4556` - // Minimum execution time: 68_551_000 picoseconds. - Weight::from_parts(71_768_000, 4556) + // Minimum execution time: 61_970_000 picoseconds. + Weight::from_parts(63_738_000, 4556) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -1095,34 +1131,45 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn set_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `770` - // Estimated: `3685` - // Minimum execution time: 36_128_000 picoseconds. - Weight::from_parts(38_547_000, 3685) + // Measured: `804` + // Estimated: `3719` + // Minimum execution time: 31_950_000 picoseconds. + Weight::from_parts(33_190_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_commission_max() -> Weight { // Proof Size summary in bytes: - // Measured: `571` - // Estimated: `3685` - // Minimum execution time: 20_067_000 picoseconds. - Weight::from_parts(21_044_000, 3685) + // Measured: `572` + // Estimated: `3719` + // Minimum execution time: 16_807_000 picoseconds. + Weight::from_parts(17_733_000, 3719) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) fn set_commission_change_rate() -> Weight { // Proof Size summary in bytes: - // Measured: `531` - // Estimated: `3685` - // Minimum execution time: 19_186_000 picoseconds. - Weight::from_parts(20_189_000, 3685) + // Measured: `532` + // Estimated: `3719` + // Minimum execution time: 16_710_000 picoseconds. + Weight::from_parts(17_563_000, 3719) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + fn set_commission_claim_permission() -> Weight { + // Proof Size summary in bytes: + // Measured: `532` + // Estimated: `3719` + // Minimum execution time: 16_493_000 picoseconds. + Weight::from_parts(17_022_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1134,13 +1181,13 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 15_275_000 picoseconds. - Weight::from_parts(15_932_000, 3702) + // Minimum execution time: 14_248_000 picoseconds. + Weight::from_parts(15_095_000, 3702) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -1149,15 +1196,15 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `968` - // Estimated: `3685` - // Minimum execution time: 67_931_000 picoseconds. - Weight::from_parts(72_202_000, 3685) + // Measured: `1002` + // Estimated: `3719` + // Minimum execution time: 61_969_000 picoseconds. + Weight::from_parts(63_965_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -1166,10 +1213,10 @@ impl WeightInfo for () { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) fn adjust_pool_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `900` + // Measured: `901` // Estimated: `4764` - // Minimum execution time: 72_783_000 picoseconds. - Weight::from_parts(75_841_000, 4764) + // Minimum execution time: 65_462_000 picoseconds. + Weight::from_parts(67_250_000, 4764) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/nomination-pools/test-staking/Cargo.toml b/substrate/frame/nomination-pools/test-staking/Cargo.toml index f0558f8314258990b3501c48f3a9b193e74c56ee..845535ae04f567bbe5fa4c4c7f22854b077176b5 100644 --- a/substrate/frame/nomination-pools/test-staking/Cargo.toml +++ b/substrate/frame/nomination-pools/test-staking/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME nomination pools pallet tests with the staking pallet" publish = false +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/frame/nomination-pools/test-staking/src/mock.rs b/substrate/frame/nomination-pools/test-staking/src/mock.rs index 0db24e9c244181abaf7ed7b9d10404c909f20bd0..491cd619161981f6dd8e066c96a7a7141dff834b 100644 --- a/substrate/frame/nomination-pools/test-staking/src/mock.rs +++ b/substrate/frame/nomination-pools/test-staking/src/mock.rs @@ -17,7 +17,7 @@ use frame_election_provider_support::VoteWeight; use frame_support::{ - assert_ok, + assert_ok, derive_impl, pallet_prelude::*, parameter_types, traits::{ConstU64, ConstU8}, @@ -38,6 +38,7 @@ pub(crate) type T = Runtime; pub(crate) const POOL1_BONDED: AccountId = 20318131474730217858575332831085u128; pub(crate) const POOL1_REWARD: AccountId = 20397359637244482196168876781421u128; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -133,6 +134,7 @@ impl pallet_staking::Config for Runtime { type TargetList = pallet_staking::UseValidatorsMap; type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<100>; type HistoryDepth = ConstU32<84>; type EventListeners = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index ac204a7813a50c7acd572d8fd9f664a7d1478c7c..df0fb015e9595025360cd8bcf4e4f090a2aa7c1f 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME offences pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,20 +19,20 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-balances = { path = "../balances", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-staking = { path = "../../primitives/staking", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +serde = { version = "1.0.193", optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-balances = { path = "../balances", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-staking = { path = "../../primitives/staking", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/offences/benchmarking/Cargo.toml b/substrate/frame/offences/benchmarking/Cargo.toml index acd8447c054a3a052e250cb492dc43e8c86f8a55..cddbd6aa4d5ebebd1b259d2b8b18cc9b70ba1a6d 100644 --- a/substrate/frame/offences/benchmarking/Cargo.toml +++ b/substrate/frame/offences/benchmarking/Cargo.toml @@ -9,26 +9,29 @@ repository.workspace = true description = "FRAME offences pallet benchmarking" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false} -frame-election-provider-support = { path = "../../election-provider-support", default-features = false} -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -pallet-babe = { path = "../../babe", default-features = false} -pallet-balances = { path = "../../balances", default-features = false} -pallet-grandpa = { path = "../../grandpa", default-features = false} -pallet-im-online = { path = "../../im-online", default-features = false} -pallet-offences = { path = "..", default-features = false} -pallet-session = { path = "../../session", default-features = false} -pallet-staking = { path = "../../staking", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-staking = { path = "../../../primitives/staking", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-benchmarking = { path = "../../benchmarking", default-features = false } +frame-election-provider-support = { path = "../../election-provider-support", default-features = false } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-babe = { path = "../../babe", default-features = false } +pallet-balances = { path = "../../balances", default-features = false } +pallet-grandpa = { path = "../../grandpa", default-features = false } +pallet-im-online = { path = "../../im-online", default-features = false } +pallet-offences = { path = "..", default-features = false } +pallet-session = { path = "../../session", default-features = false } +pallet-staking = { path = "../../staking", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-staking = { path = "../../../primitives/staking", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } log = { version = "0.4.17", default-features = false } [dev-dependencies] @@ -38,7 +41,7 @@ sp-core = { path = "../../../primitives/core" } sp-io = { path = "../../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/offences/benchmarking/src/lib.rs b/substrate/frame/offences/benchmarking/src/lib.rs index c190927b84bf10d6456a66c4a5d6df4a791d0525..563aa4755cec08ece465236208495baf872e325d 100644 --- a/substrate/frame/offences/benchmarking/src/lib.rs +++ b/substrate/frame/offences/benchmarking/src/lib.rs @@ -25,30 +25,25 @@ mod mock; use sp_std::{prelude::*, vec}; use frame_benchmarking::v1::{account, benchmarks}; -use frame_support::traits::{Currency, Get, ValidatorSet, ValidatorSetWithIdentification}; +use frame_support::traits::{Currency, Get}; use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; -#[cfg(test)] -use sp_runtime::traits::UniqueSaturatedInto; use sp_runtime::{ traits::{Convert, Saturating, StaticLookup}, Perbill, }; -use sp_staking::offence::{Offence, ReportOffence}; +use sp_staking::offence::ReportOffence; use pallet_babe::EquivocationOffence as BabeEquivocationOffence; use pallet_balances::Config as BalancesConfig; use pallet_grandpa::{ EquivocationOffence as GrandpaEquivocationOffence, TimeSlot as GrandpaTimeSlot, }; -use pallet_im_online::{Config as ImOnlineConfig, Pallet as ImOnline, UnresponsivenessOffence}; use pallet_offences::{Config as OffencesConfig, Pallet as Offences}; use pallet_session::{ historical::{Config as HistoricalConfig, IdentificationTuple}, - Config as SessionConfig, SessionManager, + Config as SessionConfig, Pallet as Session, SessionManager, }; -#[cfg(test)] -use pallet_staking::Event as StakingEvent; use pallet_staking::{ Config as StakingConfig, Exposure, IndividualExposure, MaxNominationsOf, Pallet as Staking, RewardDestination, ValidatorPrefs, @@ -56,8 +51,6 @@ use pallet_staking::{ const SEED: u32 = 0; -const MAX_REPORTERS: u32 = 100; -const MAX_OFFENDERS: u32 = 100; const MAX_NOMINATORS: u32 = 100; pub struct Pallet(Offences); @@ -66,7 +59,6 @@ pub trait Config: SessionConfig + StakingConfig + OffencesConfig - + ImOnlineConfig + HistoricalConfig + BalancesConfig + IdTupleConvert @@ -184,220 +176,7 @@ fn make_offenders( Ok((id_tuples, offenders)) } -fn make_offenders_im_online( - num_offenders: u32, - num_nominators: u32, -) -> Result<(Vec>, Vec>), &'static str> { - Staking::::new_session(0); - - let mut offenders = vec![]; - for i in 0..num_offenders { - let offender = create_offender::(i + 1, num_nominators)?; - offenders.push(offender); - } - - Staking::::start_session(0); - - let id_tuples = offenders - .iter() - .map(|offender| { - < - ::ValidatorSet as ValidatorSet - >::ValidatorIdOf::convert(offender.controller.clone()) - .expect("failed to get validator id from account id") - }) - .map(|validator_id| { - < - ::ValidatorSet as ValidatorSetWithIdentification - >::IdentificationOf::convert(validator_id.clone()) - .map(|full_id| (validator_id, full_id)) - .expect("failed to convert validator id to full identification") - }) - .collect::>>(); - Ok((id_tuples, offenders)) -} - -#[cfg(test)] -fn check_events< - T: Config, - I: Iterator, - Item: sp_std::borrow::Borrow<::RuntimeEvent> + sp_std::fmt::Debug, ->( - expected: I, -) { - let events = System::::events() - .into_iter() - .map(|frame_system::EventRecord { event, .. }| event) - .collect::>(); - let expected = expected.collect::>(); - - fn pretty(header: &str, ev: &[D], offset: usize) { - log::info!("{}", header); - for (idx, ev) in ev.iter().enumerate() { - log::info!("\t[{:04}] {:?}", idx + offset, ev); - } - } - fn print_events( - idx: usize, - events: &[D], - expected: &[E], - ) { - let window = 10; - let start = idx.saturating_sub(window / 2); - let end_got = (idx + window / 2).min(events.len()); - pretty("Got(window):", &events[start..end_got], start); - let end_expected = (idx + window / 2).min(expected.len()); - pretty("Expected(window):", &expected[start..end_expected], start); - log::info!("---------------"); - let start_got = events.len().saturating_sub(window); - pretty("Got(end):", &events[start_got..], start_got); - let start_expected = expected.len().saturating_sub(window); - pretty("Expected(end):", &expected[start_expected..], start_expected); - } - - for (idx, (a, b)) in events.iter().zip(expected.iter()).enumerate() { - if a != sp_std::borrow::Borrow::borrow(b) { - print_events(idx, &events, &expected); - log::info!("Mismatch at: {}", idx); - log::info!(" Got: {:?}", b); - log::info!("Expected: {:?}", a); - if events.len() != expected.len() { - log::info!( - "Mismatching lengths. Got: {}, Expected: {}", - events.len(), - expected.len() - ) - } - panic!("Mismatching events."); - } - } - - if events.len() != expected.len() { - print_events(0, &events, &expected); - panic!("Mismatching lengths. Got: {}, Expected: {}", events.len(), expected.len(),) - } -} - benchmarks! { - report_offence_im_online { - let r in 1 .. MAX_REPORTERS; - // we skip 1 offender, because in such case there is no slashing - let o in 2 .. MAX_OFFENDERS; - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); - - // Make r reporters - let mut reporters = vec![]; - for i in 0 .. r { - let reporter = account("reporter", i, SEED); - reporters.push(reporter); - } - - // make sure reporters actually get rewarded - Staking::::set_slash_reward_fraction(Perbill::one()); - - let (offenders, raw_offenders) = make_offenders_im_online::(o, n)?; - let keys = ImOnline::::keys(); - let validator_set_count = keys.len() as u32; - let offenders_count = offenders.len() as u32; - let offence = UnresponsivenessOffence { - session_index: 0, - validator_set_count, - offenders, - }; - let slash_fraction = offence.slash_fraction(offenders_count); - assert_eq!(System::::event_count(), 0); - }: { - let _ = ::ReportUnresponsiveness::report_offence( - reporters.clone(), - offence - ); - } - verify { - #[cfg(test)] - { - let bond_amount: u32 = UniqueSaturatedInto::::unique_saturated_into(bond_amount::()); - let slash_amount = slash_fraction * bond_amount; - let reward_amount = slash_amount.saturating_mul(1 + n) / 2; - let reward = reward_amount / r; - let slash_report = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::SlashReported{ validator: id, fraction: slash_fraction, slash_era: 0}) - ); - let slash = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Slashed{ staker: id, amount: BalanceOf::::from(slash_amount) }) - ); - let balance_slash = |id| core::iter::once( - ::RuntimeEvent::from(pallet_balances::Event::::Slashed{ who: id, amount: slash_amount.into() }) - ); - let balance_locked = |id| core::iter::once( - ::RuntimeEvent::from(pallet_balances::Event::::Locked{ who: id, amount: slash_amount.into() }) - ); - let balance_unlocked = |id| core::iter::once( - ::RuntimeEvent::from(pallet_balances::Event::::Unlocked{ who: id, amount: slash_amount.into() }) - ); - let chill = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Chilled{ stash: id }) - ); - let balance_deposit = |id, amount: u32| - ::RuntimeEvent::from(pallet_balances::Event::::Deposit{ who: id, amount: amount.into() }); - let mut first = true; - - // We need to box all events to prevent running into too big allocations in wasm. - // The event in FRAME is represented as an enum and the size of the enum depends on the biggest variant. - // So, instead of requiring `size_of() * expected_events` we only need to - // allocate `size_of>() * expected_events`. - let slash_events = raw_offenders.into_iter() - .flat_map(|offender| { - let nom_slashes = offender.nominator_stashes.into_iter().flat_map(|nom| { - balance_slash(nom.clone()).map(Into::into) - .chain(balance_unlocked(nom.clone()).map(Into::into)) - .chain(slash(nom).map(Into::into)).map(Box::new) - }); - - let events = chill(offender.stash.clone()).map(Into::into).map(Box::new) - .chain(slash_report(offender.stash.clone()).map(Into::into).map(Box::new)) - .chain(balance_slash(offender.stash.clone()).map(Into::into).map(Box::new)) - .chain(balance_unlocked(offender.stash.clone()).map(Into::into).map(Box::new)) - .chain(slash(offender.stash).map(Into::into).map(Box::new)) - .chain(nom_slashes) - .collect::>(); - - // the first deposit creates endowed events, see `endowed_reward_events` - if first { - first = false; - let reward_events = reporters.iter() - .flat_map(|reporter| vec![ - Box::new(balance_deposit(reporter.clone(), reward).into()), - Box::new(frame_system::Event::::NewAccount { account: reporter.clone() }.into()), - Box::new(::RuntimeEvent::from( - pallet_balances::Event::::Endowed{ account: reporter.clone(), free_balance: reward.into() } - ).into()), - ]) - .collect::>(); - events.into_iter().chain(reward_events) - } else { - let reward_events = reporters.iter() - .map(|reporter| Box::new(balance_deposit(reporter.clone(), reward).into())) - .collect::>(); - events.into_iter().chain(reward_events) - } - }); - - // In case of error it's useful to see the inputs - log::info!("Inputs: r: {}, o: {}, n: {}", r, o, n); - // make sure that all slashes have been applied - check_events::( - sp_std::iter::empty() - .chain(slash_events) - .chain(sp_std::iter::once(Box::new(::RuntimeEvent::from( - pallet_offences::Event::Offence{ - kind: UnresponsivenessOffence::::ID, - timeslot: 0_u32.to_le_bytes().to_vec(), - } - ).into()))) - ); - } - } - report_offence_grandpa { let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); @@ -409,12 +188,12 @@ benchmarks! { Staking::::set_slash_reward_fraction(Perbill::one()); let (mut offenders, raw_offenders) = make_offenders::(1, n)?; - let keys = ImOnline::::keys(); + let validator_set_count = Session::::validators().len() as u32; let offence = GrandpaEquivocationOffence { time_slot: GrandpaTimeSlot { set_id: 0, round: 0 }, session_index: 0, - validator_set_count: keys.len() as u32, + validator_set_count, offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); @@ -446,12 +225,12 @@ benchmarks! { Staking::::set_slash_reward_fraction(Perbill::one()); let (mut offenders, raw_offenders) = make_offenders::(1, n)?; - let keys = ImOnline::::keys(); + let validator_set_count = Session::::validators().len() as u32; let offence = BabeEquivocationOffence { slot: 0u64.into(), session_index: 0, - validator_set_count: keys.len() as u32, + validator_set_count, offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 1a458ec90d584d477d513199171d7fd983357c56..1d642b9b4982a9b3374018daa46214064178f35d 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -25,7 +25,7 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, }; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use frame_system as system; @@ -40,6 +40,7 @@ type AccountId = u64; type Nonce = u32; type Balance = u64; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -184,6 +185,7 @@ impl pallet_staking::Config for Test { type TargetList = pallet_staking::UseValidatorsMap; type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<100>; type HistoryDepth = ConstU32<84>; type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; diff --git a/substrate/frame/offences/src/mock.rs b/substrate/frame/offences/src/mock.rs index 990ceae5ac01e18e779d6cbb9af9c3249c46cb78..61f680f6db928594b2fef77fd7249f80dca2c201 100644 --- a/substrate/frame/offences/src/mock.rs +++ b/substrate/frame/offences/src/mock.rs @@ -23,7 +23,7 @@ use crate as offences; use crate::Config; use codec::Encode; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, weights::{constants::RocksDbWeight, Weight}, }; @@ -75,6 +75,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index 4bc3dd6a3c7a844ac2afc78dd811c1155a715e0b..2370f84898ba12e905f459d8e5c3a5455b806ef6 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -8,26 +8,29 @@ edition.workspace = true license = "Apache-2.0" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive"] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } docify = "0.2.6" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } -sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false, optional = true} +sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false, optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", @@ -55,4 +58,4 @@ try-runtime = [ "sp-runtime/try-runtime", ] -frame-metadata = [ "sp-metadata-ir" ] +frame-metadata = ["sp-metadata-ir"] diff --git a/substrate/frame/paged-list/fuzzer/Cargo.toml b/substrate/frame/paged-list/fuzzer/Cargo.toml index d96c0348cf43c44d9e8617833fff64a806769c0a..5c245cc72c713975459eb025bea3464e3b819bbb 100644 --- a/substrate/frame/paged-list/fuzzer/Cargo.toml +++ b/substrate/frame/paged-list/fuzzer/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Fuzz storage types of pallet-paged-list" publish = false +[lints] +workspace = true + [[bin]] name = "pallet-paged-list-fuzzer" path = "src/paged_list.rs" @@ -17,6 +20,6 @@ path = "src/paged_list.rs" arbitrary = "1.3.0" honggfuzz = "0.5.49" -frame-support = { path = "../../support", default-features = false, features = [ "std" ]} -sp-io = { path = "../../../primitives/io", default-features = false, features = [ "std" ] } -pallet-paged-list = { path = "..", default-features = false, features = [ "std" ] } +frame-support = { path = "../../support", default-features = false, features = ["std"] } +sp-io = { path = "../../../primitives/io", default-features = false, features = ["std"] } +pallet-paged-list = { path = "..", default-features = false, features = ["std"] } diff --git a/substrate/frame/paged-list/src/mock.rs b/substrate/frame/paged-list/src/mock.rs index 390b4a8530dce956ff281c9b7c752606a543d4e2..37bdc4f157cb9861eb0e387f7b0954c4068fa13d 100644 --- a/substrate/frame/paged-list/src/mock.rs +++ b/substrate/frame/paged-list/src/mock.rs @@ -20,7 +20,10 @@ #![cfg(feature = "std")] use crate::{paged_list::StoragePagedListMeta, Config, ListPrefix}; -use frame_support::traits::{ConstU16, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU16, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -38,6 +41,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/paged-list/src/paged_list.rs b/substrate/frame/paged-list/src/paged_list.rs index beea8ecc64409f9f199a94b1f8c3e0a2782e4a11..75467f3ceeb582e04c8e02eb7629637358a760e1 100644 --- a/substrate/frame/paged-list/src/paged_list.rs +++ b/substrate/frame/paged-list/src/paged_list.rs @@ -407,13 +407,11 @@ where #[allow(dead_code)] pub(crate) mod mock { pub use super::*; - pub use frame_support::{ - parameter_types, - storage::{types::ValueQuery, StorageList as _}, - StorageNoopGuard, - }; - pub use sp_io::{hashing::twox_128, TestExternalities}; - pub use sp_metadata_ir::{StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR}; + pub use frame_support::parameter_types; + #[cfg(test)] + pub use frame_support::{storage::StorageList as _, StorageNoopGuard}; + #[cfg(test)] + pub use sp_io::TestExternalities; parameter_types! { pub const ValuesPerNewPage: u32 = 5; diff --git a/substrate/frame/preimage/Cargo.toml b/substrate/frame/preimage/Cargo.toml index a80ccd5a40d29edcff841f1de90b04890e1902e1..2aa21d2a7136ec7e31bbb42fe7fe8e9d7010536f 100644 --- a/substrate/frame/preimage/Cargo.toml +++ b/substrate/frame/preimage/Cargo.toml @@ -8,24 +8,27 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME pallet for storing preimages of hashes" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false, optional = true} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false, optional = true } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } log = { version = "0.4.17", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking", "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/preimage/src/mock.rs b/substrate/frame/preimage/src/mock.rs index 0f966312d9e73560e02e6876f452f4593b76d56d..357f088f5ba24ab0f65b720de1887d1bd1a17718 100644 --- a/substrate/frame/preimage/src/mock.rs +++ b/substrate/frame/preimage/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_preimage; use frame_support::{ - ord_parameter_types, + derive_impl, ord_parameter_types, traits::{fungible::HoldConsideration, ConstU32, ConstU64, Everything}, weights::constants::RocksDbWeight, }; @@ -43,6 +43,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/substrate/frame/proxy/Cargo.toml b/substrate/frame/proxy/Cargo.toml index 647193fad8af5eec1497ee930406b8d80c5e9838..fd163e71bc1b062848c9c3700c21bb101f208de1 100644 --- a/substrate/frame/proxy/Cargo.toml +++ b/substrate/frame/proxy/Cargo.toml @@ -9,18 +9,21 @@ repository.workspace = true description = "FRAME proxying pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -28,7 +31,7 @@ pallet-utility = { path = "../utility" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/proxy/README.md b/substrate/frame/proxy/README.md index bfe26d9aefbc4329955301f9cbb820d2b74c6ef7..c52a881c5909793d74ed11c8485f6c667445949b 100644 --- a/substrate/frame/proxy/README.md +++ b/substrate/frame/proxy/README.md @@ -2,7 +2,7 @@ A module allowing accounts to give permission to other accounts to dispatch types of calls from their signed origin. -The accounts to which permission is delegated may be requied to announce the action that they +The accounts to which permission is delegated may be required to announce the action that they wish to execute some duration prior to execution happens. In this case, the target account may reject the announcement and in doing so, veto the execution. diff --git a/substrate/frame/ranked-collective/Cargo.toml b/substrate/frame/ranked-collective/Cargo.toml index 236489c54b5bd846194ca84e4db234a7697d0d5d..39075b2abf911cc5e4c5a979d0df03e8a7d7eeef 100644 --- a/substrate/frame/ranked-collective/Cargo.toml +++ b/substrate/frame/ranked-collective/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Ranked collective system: Members of a set of account IDs can make their collective feelings known through dispatched calls from one of two specialized origins." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,17 +19,17 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.16", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/ranked-collective/src/lib.rs b/substrate/frame/ranked-collective/src/lib.rs index deb1ccf23578b797ad0ce950b3b721609010c2ff..51ee7d7144b14d40e0bb97c1374dcdc8b39c8467 100644 --- a/substrate/frame/ranked-collective/src/lib.rs +++ b/substrate/frame/ranked-collective/src/lib.rs @@ -663,16 +663,21 @@ pub mod pallet { } fn remove_from_rank(who: &T::AccountId, rank: Rank) -> DispatchResult { - let last_index = MemberCount::::get(rank).saturating_sub(1); - let index = IdToIndex::::get(rank, &who).ok_or(Error::::Corruption)?; - if index != last_index { - let last = - IndexToId::::get(rank, last_index).ok_or(Error::::Corruption)?; - IdToIndex::::insert(rank, &last, index); - IndexToId::::insert(rank, index, &last); - } - MemberCount::::mutate(rank, |r| r.saturating_dec()); - Ok(()) + MemberCount::::try_mutate(rank, |last_index| { + last_index.saturating_dec(); + let index = IdToIndex::::get(rank, &who).ok_or(Error::::Corruption)?; + if index != *last_index { + let last = IndexToId::::get(rank, *last_index) + .ok_or(Error::::Corruption)?; + IdToIndex::::insert(rank, &last, index); + IndexToId::::insert(rank, index, &last); + } + + IdToIndex::::remove(rank, who); + IndexToId::::remove(rank, last_index); + + Ok(()) + }) } /// Adds a member into the ranked collective at level 0. diff --git a/substrate/frame/ranked-collective/src/tests.rs b/substrate/frame/ranked-collective/src/tests.rs index ba8c5a0f937badf8e51fff9a9fa897176c7e6207..60c0da3d7ac29d08c258a73e280c7cee8a469f03 100644 --- a/substrate/frame/ranked-collective/src/tests.rs +++ b/substrate/frame/ranked-collective/src/tests.rs @@ -20,16 +20,13 @@ use std::collections::BTreeMap; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, derive_impl, error::BadOrigin, parameter_types, - traits::{ConstU16, ConstU32, ConstU64, EitherOf, Everything, MapSuccess, Polling}, -}; -use sp_core::{Get, H256}; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup, ReduceBy}, - BuildStorage, + traits::{ConstU16, EitherOf, MapSuccess, Polling}, }; +use sp_core::Get; +use sp_runtime::{traits::ReduceBy, BuildStorage}; use super::*; use crate as pallet_ranked_collective; @@ -45,30 +42,9 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } #[derive(Clone, PartialEq, Eq, Debug)] @@ -441,6 +417,32 @@ fn cleanup_works() { }); } +#[test] +fn remove_member_cleanup_works() { + new_test_ext().execute_with(|| { + assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 3)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); + + assert_eq!(IdToIndex::::get(1, 2), Some(1)); + assert_eq!(IndexToId::::get(1, 1), Some(2)); + + assert_eq!(IdToIndex::::get(1, 3), Some(2)); + assert_eq!(IndexToId::::get(1, 2), Some(3)); + + assert_ok!(Club::remove_member(RuntimeOrigin::root(), 2, 1)); + + assert_eq!(IdToIndex::::get(1, 2), None); + assert_eq!(IndexToId::::get(1, 1), Some(3)); + + assert_eq!(IdToIndex::::get(1, 3), Some(1)); + assert_eq!(IndexToId::::get(1, 2), None); + }); +} + #[test] fn ensure_ranked_works() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/recovery/Cargo.toml b/substrate/frame/recovery/Cargo.toml index 8e240546fddd2f05ea7334ec93827fcdc0240175..6afd494bf7e1cd27d5d3d8f7f8f0960b70092251 100644 --- a/substrate/frame/recovery/Cargo.toml +++ b/substrate/frame/recovery/Cargo.toml @@ -9,32 +9,35 @@ repository.workspace = true description = "FRAME account recovery pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ - 'frame-benchmarking', "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + 'frame-benchmarking', ] std = [ "codec/std", diff --git a/substrate/frame/recovery/src/benchmarking.rs b/substrate/frame/recovery/src/benchmarking.rs index 2deb55bb69f24fb1ee3abeca44bbb356d682a7f4..72f77336212dd6b8c7fb64d7b12a32fd0d9dfa44 100644 --- a/substrate/frame/recovery/src/benchmarking.rs +++ b/substrate/frame/recovery/src/benchmarking.rs @@ -190,7 +190,7 @@ benchmarks! { let recovery_config = RecoveryConfig { delay_period: DEFAULT_DELAY.into(), - deposit: total_deposit.clone(), + deposit: total_deposit, friends: bounded_friends.clone(), threshold: n as u16, }; @@ -243,7 +243,7 @@ benchmarks! { let recovery_config = RecoveryConfig { delay_period: 0u32.into(), - deposit: total_deposit.clone(), + deposit: total_deposit, friends: bounded_friends.clone(), threshold: n as u16, }; @@ -294,7 +294,7 @@ benchmarks! { let recovery_config = RecoveryConfig { delay_period: DEFAULT_DELAY.into(), - deposit: total_deposit.clone(), + deposit: total_deposit, friends: bounded_friends.clone(), threshold: n as u16, }; @@ -342,7 +342,7 @@ benchmarks! { let recovery_config = RecoveryConfig { delay_period: DEFAULT_DELAY.into(), - deposit: total_deposit.clone(), + deposit: total_deposit, friends: bounded_friends.clone(), threshold: n as u16, }; diff --git a/substrate/frame/recovery/src/mock.rs b/substrate/frame/recovery/src/mock.rs index bc81d07bec236ad03d4cf8f22c28cff4102db956..44cbeec09862db0b83dd6817b9a5c28ace950dae 100644 --- a/substrate/frame/recovery/src/mock.rs +++ b/substrate/frame/recovery/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as recovery; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}, }; use sp_core::H256; @@ -41,6 +41,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index 4f53e2bc002a7db2e461a44a6e91c3c22ef18bb5..f76dbece303f62c7d911f11953a28ec0f5ef432c 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet for inclusive on-chain decisions" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,14 +21,14 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"], optional = true } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +serde = { version = "1.0.193", features = ["derive"], optional = true } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } log = { version = "0.4.17", default-features = false } [dev-dependencies] @@ -36,7 +39,7 @@ pallet-scheduler = { path = "../scheduler" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs index 345accbe268f7014a05375cc9fbf0e2a6be03a89..b75558723e9bd36a366c980565faa35ef62ca67e 100644 --- a/substrate/frame/referenda/src/mock.rs +++ b/substrate/frame/referenda/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_referenda; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - assert_ok, ord_parameter_types, parameter_types, + assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ ConstU32, ConstU64, Contains, EqualPrivilegeOnly, OnInitialize, OriginTrait, Polling, SortedMembers, @@ -59,6 +59,7 @@ impl Contains for BaseFilter { parameter_types! { pub MaxWeight: Weight = Weight::from_parts(2_000_000_000_000, u64::MAX); } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; type BlockWeights = (); diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index ad04140ae9f148ad1160a7eeed130812767603b4..646563bdb0883bfe6a7d175fc31bbe16e45cc1de 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -9,26 +9,29 @@ repository.workspace = true description = "Remark storage pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +serde = { version = "1.0.193", optional = true } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", diff --git a/substrate/frame/remark/src/mock.rs b/substrate/frame/remark/src/mock.rs index e597a1ca4dfe80d09ee7b853a9cc22cf882d4531..0a385c30eac30d0287f323948fb1aa139458cc48 100644 --- a/substrate/frame/remark/src/mock.rs +++ b/substrate/frame/remark/src/mock.rs @@ -18,7 +18,10 @@ //! Test environment for remarks pallet. use crate as pallet_remark; -use frame_support::traits::{ConstU16, ConstU32, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU16, ConstU32, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -36,6 +39,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index 8e6fddb43352d581679b6c33a190f91ff988f21f..0f3d3a2883d5a5d92686e0f7fda014d146e4a420 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME root offences pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,13 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -pallet-session = { path = "../session", default-features = false , features = [ "historical" ]} -pallet-staking = { path = "../staking", default-features = false} +pallet-session = { path = "../session", default-features = false, features = ["historical"] } +pallet-staking = { path = "../staking", default-features = false } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } sp-runtime = { path = "../../primitives/runtime" } -sp-staking = { path = "../../primitives/staking", default-features = false} +sp-staking = { path = "../../primitives/staking", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -30,8 +33,8 @@ pallet-timestamp = { path = "../timestamp" } pallet-staking-reward-curve = { path = "../staking/reward-curve" } sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } frame-election-provider-support = { path = "../election-provider-support" } @@ -56,7 +59,7 @@ try-runtime = [ "pallet-timestamp/try-runtime", "sp-runtime/try-runtime", ] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-election-provider-support/std", diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index 82da429e00a58ab317670fb4015cdc809de38d66..c0c83dd08d243777ab6353aade7a8c8859da45af 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -23,7 +23,7 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, }; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, Hooks, OneSessionHandler}, }; use pallet_staking::StakerStatus; @@ -84,6 +84,7 @@ impl sp_runtime::BoundToRuntimeAppPublic for OtherSessionHandler { type Public = UintAuthorityId; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -187,6 +188,7 @@ impl pallet_staking::Config for Test { type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = ConstU32<84>; + type MaxControllersInDeprecationBatch = ConstU32<100>; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; diff --git a/substrate/frame/root-testing/Cargo.toml b/substrate/frame/root-testing/Cargo.toml index 7837289cec591ca8907d3da1cd228329c1e93c63..78aed99a56d712f65d458bd3956070b96c8e6f08 100644 --- a/substrate/frame/root-testing/Cargo.toml +++ b/substrate/frame/root-testing/Cargo.toml @@ -9,18 +9,21 @@ repository.workspace = true description = "FRAME root testing pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] try-runtime = [ @@ -28,7 +31,7 @@ try-runtime = [ "frame-system/try-runtime", "sp-runtime/try-runtime", ] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index ac469bb385c93cb045a69eea1ae253f4142eb43e..f86332483c4a740265a7f660b739700090fa567b 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -8,21 +8,25 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME safe-mode pallet" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +docify = "0.2.6" +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } pallet-balances = { path = "../balances", default-features = false, optional = true } -pallet-utility = { path = "../utility", default-features = false, optional = true } -pallet-proxy = { path = "../proxy", default-features = false, optional = true } +pallet-utility = { path = "../utility", default-features = false, optional = true } +pallet-proxy = { path = "../proxy", default-features = false, optional = true } [dev-dependencies] sp-core = { path = "../../primitives/core" } @@ -33,7 +37,7 @@ pallet-proxy = { path = "../proxy" } frame-support = { path = "../support", features = ["experimental"] } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/safe-mode/src/lib.rs b/substrate/frame/safe-mode/src/lib.rs index b8e8378fa9e7cbd23c6c2f5f8d195c1648652273..554f509db63ea9c9bdfb6a9328dd3eaaecaa617a 100644 --- a/substrate/frame/safe-mode/src/lib.rs +++ b/substrate/frame/safe-mode/src/lib.rs @@ -15,6 +15,62 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! # Safe Mode +//! +//! Trigger for stopping all extrinsics outside of a specific whitelist. +//! +//! ## WARNING +//! +//! NOT YET AUDITED. DO NOT USE IN PRODUCTION. +//! +//! ## Pallet API +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events, and errors. +//! +//! ## Overview +//! +//! Safe mode is entered via two paths (deposit or forced) until a set block number. +//! The mode is exited when the block number is reached or a call to one of the exit extrinsics is +//! made. A `WhitelistedCalls` configuration item contains all calls that can be executed while in +//! safe mode. +//! +//! ### Primary Features +//! +//! - Entering safe mode can be via privileged origin or anyone who places a deposit. +//! - Origin configuration items are separated for privileged entering and exiting safe mode. +//! - A configurable duration sets the number of blocks after which the system will exit safe mode. +//! - Safe mode may be extended beyond the configured exit by additional calls. +//! +//! ### Example +//! +//! Configuration of call filters: +//! +//! ```ignore +//! impl frame_system::Config for Runtime { +//! // … +//! type BaseCallFilter = InsideBoth; +//! // … +//! } +//! ``` +//! +//! Entering safe mode with deposit: +#![doc = docify::embed!("src/tests.rs", can_activate)] +//! +//! Entering safe mode via privileged origin: +#![doc = docify::embed!("src/tests.rs", can_force_activate_with_config_origin)] +//! +//! Exiting safe mode via privileged origin: +#![doc = docify::embed!("src/tests.rs", can_force_deactivate_with_config_origin)] +//! +//! ## Low Level / Implementation Details +//! +//! ### Use Cost +//! +//! A storage value (`EnteredUntil`) is used to store the block safe mode will be exited on. +//! Using the call filter will require a db read of that storage on the first extrinsic. +//! The storage will be added to the overlay and incur low cost for all additional calls. + #![cfg_attr(not(feature = "std"), no_std)] #![deny(rustdoc::broken_intra_doc_links)] diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs index 10afe5bd4b5ec4457635ec7a7ac29f6d27f7ba73..7574d64d59ddba40cee17993ad9edb00f66d08d0 100644 --- a/substrate/frame/safe-mode/src/mock.rs +++ b/substrate/frame/safe-mode/src/mock.rs @@ -23,7 +23,7 @@ use super::*; use crate as pallet_safe_mode; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU64, Everything, InsideBoth, InstanceFilter, IsInVec, SafeModeNotify}, }; use frame_system::EnsureSignedBy; @@ -33,6 +33,7 @@ use sp_runtime::{ BuildStorage, }; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = InsideBoth; type BlockWeights = (); diff --git a/substrate/frame/safe-mode/src/tests.rs b/substrate/frame/safe-mode/src/tests.rs index b92c5b87a53081e9d51f916ebd78b1f6d1a4ace7..c0a2f45a3e7a602892f84f80681ca2a6e88ff82c 100644 --- a/substrate/frame/safe-mode/src/tests.rs +++ b/substrate/frame/safe-mode/src/tests.rs @@ -189,6 +189,7 @@ fn can_filter_balance_in_proxy_when_activated() { }); } +#[docify::export] #[test] fn can_activate() { new_test_ext().execute_with(|| { @@ -271,6 +272,7 @@ fn fails_force_deactivate_if_not_activated() { }); } +#[docify::export] #[test] fn can_force_activate_with_config_origin() { new_test_ext().execute_with(|| { @@ -288,6 +290,7 @@ fn can_force_activate_with_config_origin() { }); } +#[docify::export] #[test] fn can_force_deactivate_with_config_origin() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml index 6c66f01082d268263b39950a2ebc6dc9dc7b9ebb..929151a9c2082c60419d55c47c892b15e26acf15 100644 --- a/substrate/frame/salary/Cargo.toml +++ b/substrate/frame/salary/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Paymaster" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,17 +19,17 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.16", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/salary/src/tests.rs b/substrate/frame/salary/src/tests.rs index 1136ea746f605cde30a4a09c651e90e8b4acae9c..fbca1be11883f5a99d336ef0e52f1ce3da10549e 100644 --- a/substrate/frame/salary/src/tests.rs +++ b/substrate/frame/salary/src/tests.rs @@ -20,7 +20,7 @@ use std::collections::BTreeMap; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, derive_impl, pallet_prelude::Weight, parameter_types, traits::{tokens::ConvertRank, ConstU32, ConstU64, Everything}, @@ -49,6 +49,8 @@ parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1_000_000, 0)); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..ad4c0ba12f0b10f60f18c853975637355b7172c6 --- /dev/null +++ b/substrate/frame/sassafras/Cargo.toml @@ -0,0 +1,62 @@ +[package] +name = "pallet-sassafras" +version = "0.3.5-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Consensus extension module for Sassafras consensus." +readme = "README.md" +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +scale-codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +log = { version = "0.4.17", default-features = false } +sp-consensus-sassafras = { path = "../../primitives/consensus/sassafras", default-features = false, features = ["serde"] } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } + +[dev-dependencies] +array-bytes = "6.1" +sp-core = { path = "../../primitives/core" } + +[features] +default = ["std"] +std = [ + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-codec/std", + "scale-info/std", + "sp-consensus-sassafras/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] +# Construct dummy ring context on genesis. +# Mostly used for testing and development. +construct-dummy-ring-context = [] diff --git a/substrate/frame/sassafras/README.md b/substrate/frame/sassafras/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f0e24a0535578d133d5bbd347ae15ba29379e560 --- /dev/null +++ b/substrate/frame/sassafras/README.md @@ -0,0 +1,8 @@ +Runtime module for SASSAFRAS consensus. + +- Tracking issue: https://github.com/paritytech/polkadot-sdk/issues/41 +- Protocol RFC proposal: https://github.com/polkadot-fellows/RFCs/pull/26 + +# ⚠️ WARNING ⚠️ + +The crate interfaces and structures are experimental and may be subject to changes. diff --git a/substrate/frame/sassafras/src/benchmarking.rs b/substrate/frame/sassafras/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..921f2f0793d3ce9072aa762deeb97ad7b00799d5 --- /dev/null +++ b/substrate/frame/sassafras/src/benchmarking.rs @@ -0,0 +1,272 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the Sassafras pallet. + +use crate::*; +use sp_consensus_sassafras::{vrf::VrfSignature, EphemeralPublic, EpochConfiguration}; + +use frame_benchmarking::v2::*; +use frame_support::traits::Hooks; +use frame_system::RawOrigin; + +const LOG_TARGET: &str = "sassafras::benchmark"; + +const TICKETS_DATA: &[u8] = include_bytes!("data/25_tickets_100_auths.bin"); + +fn make_dummy_vrf_signature() -> VrfSignature { + // This leverages our knowledge about serialized vrf signature structure. + // Mostly to avoid to import all the bandersnatch primitive just for this test. + let buf = [ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xb5, 0x5f, 0x8e, 0xc7, 0x68, 0xf5, 0x05, 0x3f, 0xa9, + 0x18, 0xca, 0x07, 0x13, 0xc7, 0x4b, 0xa3, 0x9a, 0x97, 0xd3, 0x76, 0x8f, 0x0c, 0xbf, 0x2e, + 0xd4, 0xf9, 0x3a, 0xae, 0xc1, 0x96, 0x2a, 0x64, 0x80, + ]; + VrfSignature::decode(&mut &buf[..]).unwrap() +} + +#[benchmarks] +mod benchmarks { + use super::*; + + // For first block (#1) we do some extra operation. + // But is a one shot operation, so we don't account for it here. + // We use 0, as it will be the path used by all the blocks with n != 1 + #[benchmark] + fn on_initialize() { + let block_num = BlockNumberFor::::from(0u32); + + let slot_claim = SlotClaim { + authority_idx: 0, + slot: Default::default(), + vrf_signature: make_dummy_vrf_signature(), + ticket_claim: None, + }; + frame_system::Pallet::::deposit_log((&slot_claim).into()); + + // We currently don't account for the potential weight added by the `on_finalize` + // incremental sorting of the tickets. + + #[block] + { + // According to `Hooks` trait docs, `on_finalize` `Weight` should be bundled + // together with `on_initialize` `Weight`. + Pallet::::on_initialize(block_num); + Pallet::::on_finalize(block_num) + } + } + + // Weight for the default internal epoch change trigger. + // + // Parameters: + // - `x`: number of authorities (1:100). + // - `y`: epoch length in slots (1000:5000) + // + // This accounts for the worst case which includes: + // - load the full ring context. + // - recompute the ring verifier. + // - sorting the epoch tickets in one shot + // (here we account for the very unluky scenario where we haven't done any sort work yet) + // - pending epoch change config. + // + // For this bench we assume a redundancy factor of 2 (suggested value to be used in prod). + #[benchmark] + fn enact_epoch_change(x: Linear<1, 100>, y: Linear<1000, 5000>) { + let authorities_count = x as usize; + let epoch_length = y as u32; + let redundancy_factor = 2; + + let unsorted_tickets_count = epoch_length * redundancy_factor; + + let mut meta = TicketsMetadata { unsorted_tickets_count, tickets_count: [0, 0] }; + let config = EpochConfiguration { redundancy_factor, attempts_number: 32 }; + + // Triggers ring verifier computation for `x` authorities + let mut raw_data = TICKETS_DATA; + let (authorities, _): (Vec, Vec) = + Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); + let next_authorities: Vec<_> = authorities[..authorities_count].to_vec(); + let next_authorities = WeakBoundedVec::force_from(next_authorities, None); + NextAuthorities::::set(next_authorities); + + // Triggers JIT sorting tickets + (0..meta.unsorted_tickets_count) + .collect::>() + .chunks(SEGMENT_MAX_SIZE as usize) + .enumerate() + .for_each(|(segment_id, chunk)| { + let segment = chunk + .iter() + .map(|i| { + let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes()); + TicketId::from_le_bytes(id_bytes) + }) + .collect::>(); + UnsortedSegments::::insert( + segment_id as u32, + BoundedVec::truncate_from(segment), + ); + }); + + // Triggers some code related to config change (dummy values) + NextEpochConfig::::set(Some(config)); + PendingEpochConfigChange::::set(Some(config)); + + // Triggers the cleanup of the "just elapsed" epoch tickets (i.e. the current one) + let epoch_tag = EpochIndex::::get() & 1; + meta.tickets_count[epoch_tag as usize] = epoch_length; + (0..epoch_length).for_each(|i| { + let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes()); + let id = TicketId::from_le_bytes(id_bytes); + TicketsIds::::insert((epoch_tag as u8, i), id); + let body = TicketBody { + attempt_idx: i, + erased_public: EphemeralPublic([i as u8; 32]), + revealed_public: EphemeralPublic([i as u8; 32]), + }; + TicketsData::::set(id, Some(body)); + }); + + TicketsMeta::::set(meta); + + #[block] + { + Pallet::::should_end_epoch(BlockNumberFor::::from(3u32)); + let next_authorities = Pallet::::next_authorities(); + // Using a different set of authorities triggers the recomputation of ring verifier. + Pallet::::enact_epoch_change(Default::default(), next_authorities); + } + } + + #[benchmark] + fn submit_tickets(x: Linear<1, 25>) { + let tickets_count = x as usize; + + let mut raw_data = TICKETS_DATA; + let (authorities, tickets): (Vec, Vec) = + Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); + + log::debug!(target: LOG_TARGET, "PreBuiltTickets: {} tickets, {} authorities", tickets.len(), authorities.len()); + + // Set `NextRandomness` to the same value used for pre-built tickets + // (see `make_tickets_data` test). + NextRandomness::::set([0; 32]); + + Pallet::::update_ring_verifier(&authorities); + + // Set next epoch config to accept all the tickets + let next_config = EpochConfiguration { attempts_number: 1, redundancy_factor: u32::MAX }; + NextEpochConfig::::set(Some(next_config)); + + // Use the authorities in the pre-build tickets + let authorities = WeakBoundedVec::force_from(authorities, None); + NextAuthorities::::set(authorities); + + let tickets = tickets[..tickets_count].to_vec(); + let tickets = BoundedVec::truncate_from(tickets); + + log::debug!(target: LOG_TARGET, "Submitting {} tickets", tickets_count); + + #[extrinsic_call] + submit_tickets(RawOrigin::None, tickets); + } + + #[benchmark] + fn plan_config_change() { + let config = EpochConfiguration { redundancy_factor: 1, attempts_number: 10 }; + + #[extrinsic_call] + plan_config_change(RawOrigin::Root, config); + } + + // Construction of ring verifier + #[benchmark] + fn update_ring_verifier(x: Linear<1, 100>) { + let authorities_count = x as usize; + + let mut raw_data = TICKETS_DATA; + let (authorities, _): (Vec, Vec) = + Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); + let authorities: Vec<_> = authorities[..authorities_count].to_vec(); + + #[block] + { + Pallet::::update_ring_verifier(&authorities); + } + } + + // Bare loading of ring context. + // + // It is interesting to see how this compares to 'update_ring_verifier', which + // also recomputes and stores the new verifier. + #[benchmark] + fn load_ring_context() { + #[block] + { + let _ring_ctx = RingContext::::get().unwrap(); + } + } + + // Tickets segments sorting function benchmark. + #[benchmark] + fn sort_segments(x: Linear<1, 100>) { + let segments_count = x as u32; + let tickets_count = segments_count * SEGMENT_MAX_SIZE; + + // Construct a bunch of dummy tickets + let tickets: Vec<_> = (0..tickets_count) + .map(|i| { + let body = TicketBody { + attempt_idx: i, + erased_public: EphemeralPublic([i as u8; 32]), + revealed_public: EphemeralPublic([i as u8; 32]), + }; + let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes()); + let id = TicketId::from_le_bytes(id_bytes); + (id, body) + }) + .collect(); + + for (chunk_id, chunk) in tickets.chunks(SEGMENT_MAX_SIZE as usize).enumerate() { + let segment: Vec = chunk + .iter() + .map(|(id, body)| { + TicketsData::::set(id, Some(body.clone())); + *id + }) + .collect(); + let segment = BoundedVec::truncate_from(segment); + UnsortedSegments::::insert(chunk_id as u32, segment); + } + + // Update metadata + let mut meta = TicketsMeta::::get(); + meta.unsorted_tickets_count = tickets_count; + TicketsMeta::::set(meta); + + log::debug!(target: LOG_TARGET, "Before sort: {:?}", meta); + #[block] + { + Pallet::::sort_segments(u32::MAX, 0, &mut meta); + } + log::debug!(target: LOG_TARGET, "After sort: {:?}", meta); + } +} diff --git a/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin b/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin new file mode 100644 index 0000000000000000000000000000000000000000..6e81f216455ae9dc61be31a9edef583a652721a8 Binary files /dev/null and b/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin differ diff --git a/substrate/frame/sassafras/src/data/benchmark-results.md b/substrate/frame/sassafras/src/data/benchmark-results.md new file mode 100644 index 0000000000000000000000000000000000000000..8682f96cbe5a67328b6d494005cf03fff2030178 --- /dev/null +++ b/substrate/frame/sassafras/src/data/benchmark-results.md @@ -0,0 +1,99 @@ +# Benchmarks High Level Results + +- **Ring size**: the actual number of validators for an epoch +- **Domain size**: a value which bounds the max size of the ring (max_ring_size = domain_size - 256) + +## Verify Submitted Tickets (extrinsic) + +`x` = Number of tickets + +### Domain=1024, Uncompressed (~ 13 ms + 11·x ms) + + Time ~= 13400 + + x 11390 + µs + +### Domain=1024, Compressed (~ 13 ms + 11·x ms) + + Time ~= 13120 + + x 11370 + µs + +### Domain=2048, Uncompressed (~ 26 ms + 11·x ms) + + Time ~= 26210 + + x 11440 + µs + +### Domain=2048, Compressed (~ 26 ms + 11·x ms) + + Time ~= 26250 + + x 11460 + µs + +### Conclusions + +- Verification doesn't depend on ring size as verification key is already constructed. +- The call is fast as far as the max number of tickets which can be submitted in one shot + is appropriately bounded. +- Currently, the bound is set equal epoch length, which iirc for Polkadot is 3600. + In this case if all the tickets are submitted in one shot timing is expected to be + ~39 seconds, which is not acceptable. TODO: find a sensible bound + +--- + +## Recompute Ring Verifier Key (on epoch change) + +`x` = Ring size + +### Domain=1024, Uncompressed (~ 50 ms) + + Time ~= 54070 + + x 98.53 + µs + +### Domain=1024, Compressed (~ 700 ms) + + Time ~= 733700 + + x 90.49 + µs + +### Domain=2048, Uncompressed (~ 100 ms) + + Time ~= 107700 + + x 108.5 + µs + +### Domain=2048, Compressed (~ 1.5 s) + + Time ~= 1462400 + + x 65.14 + µs + +### Conclusions + +- Here we load the full ring context data to recompute verification key for the epoch +- Ring size influence is marginal (e.g. for 1500 validators → ~98 ms to be added to the base time) +- This step is performed at most once per epoch (if validator set changes). +- Domain size for ring context influence the PoV size (see next paragraph) +- Decompression heavily influence timings (1.5sec vs 100ms for same domain size) + +--- + +## Ring Context Data Size + +### Domain=1024, Uncompressed + + 295412 bytes = ~ 300 KiB + +### Domain=1024, Compressed + + 147716 bytes = ~ 150 KiB + +### Domain=2048, Uncompressed + + 590324 bytes = ~ 590 KiB + +### Domain=2048, Compressed + + 295172 bytes = ~ 300 KiB diff --git a/substrate/frame/sassafras/src/data/tickets-sort.md b/substrate/frame/sassafras/src/data/tickets-sort.md new file mode 100644 index 0000000000000000000000000000000000000000..4d96a6825c889b152bbf0471c006e0d85dbed635 --- /dev/null +++ b/substrate/frame/sassafras/src/data/tickets-sort.md @@ -0,0 +1,274 @@ +# Segments Incremental Sorting Strategy Empirical Results + +Parameters: +- 128 segments +- segment max length 128 +- 32767 random tickets ids +- epoch length 3600 (== max tickets to keep) + +The table shows the comparison between the segments left in the unsorted segments buffer +and the number of new tickets which are added from the last segment to the sorted tickets +buffer (i.e. how many tickets we retain from the last processed segment) + +| Segments Left | Tickets Pushed | +|-----|-----| +| 255 | 128 | +| 254 | 128 | +| 253 | 128 | +| 252 | 128 | +| 251 | 128 | +| 250 | 128 | +| 249 | 128 | +| 248 | 128 | +| 247 | 128 | +| 246 | 128 | +| 245 | 128 | +| 244 | 128 | +| 243 | 128 | +| 242 | 128 | +| 241 | 128 | +| 240 | 128 | +| 239 | 128 | +| 238 | 128 | +| 237 | 128 | +| 236 | 128 | +| 235 | 128 | +| 234 | 128 | +| 233 | 128 | +| 232 | 128 | +| 231 | 128 | +| 230 | 128 | +| 229 | 128 | +| 228 | 128 | +| 227 | 128 | +| 226 | 126 | +| 225 | 117 | +| 224 | 120 | +| 223 | 110 | +| 222 | 110 | +| 221 | 102 | +| 220 | 107 | +| 219 | 96 | +| 218 | 105 | +| 217 | 92 | +| 216 | 91 | +| 215 | 85 | +| 214 | 84 | +| 213 | 88 | +| 212 | 77 | +| 211 | 86 | +| 210 | 73 | +| 209 | 73 | +| 208 | 81 | +| 207 | 83 | +| 206 | 70 | +| 205 | 84 | +| 204 | 71 | +| 203 | 63 | +| 202 | 60 | +| 201 | 53 | +| 200 | 73 | +| 199 | 55 | +| 198 | 65 | +| 197 | 62 | +| 196 | 55 | +| 195 | 63 | +| 194 | 61 | +| 193 | 48 | +| 192 | 67 | +| 191 | 61 | +| 190 | 55 | +| 189 | 49 | +| 188 | 60 | +| 187 | 49 | +| 186 | 51 | +| 185 | 53 | +| 184 | 47 | +| 183 | 51 | +| 182 | 51 | +| 181 | 53 | +| 180 | 42 | +| 179 | 43 | +| 178 | 48 | +| 177 | 46 | +| 176 | 39 | +| 175 | 54 | +| 174 | 39 | +| 173 | 44 | +| 172 | 51 | +| 171 | 49 | +| 170 | 48 | +| 169 | 48 | +| 168 | 41 | +| 167 | 39 | +| 166 | 41 | +| 165 | 40 | +| 164 | 43 | +| 163 | 53 | +| 162 | 51 | +| 161 | 36 | +| 160 | 45 | +| 159 | 40 | +| 158 | 29 | +| 157 | 37 | +| 156 | 31 | +| 155 | 38 | +| 154 | 31 | +| 153 | 38 | +| 152 | 39 | +| 151 | 30 | +| 150 | 37 | +| 149 | 42 | +| 148 | 35 | +| 147 | 33 | +| 146 | 35 | +| 145 | 37 | +| 144 | 38 | +| 143 | 31 | +| 142 | 38 | +| 141 | 38 | +| 140 | 27 | +| 139 | 31 | +| 138 | 25 | +| 137 | 31 | +| 136 | 26 | +| 135 | 30 | +| 134 | 31 | +| 133 | 37 | +| 132 | 29 | +| 131 | 24 | +| 130 | 31 | +| 129 | 34 | +| 128 | 31 | +| 127 | 28 | +| 126 | 28 | +| 125 | 19 | +| 124 | 27 | +| 123 | 29 | +| 122 | 36 | +| 121 | 32 | +| 120 | 29 | +| 119 | 28 | +| 118 | 33 | +| 117 | 18 | +| 116 | 28 | +| 115 | 27 | +| 114 | 28 | +| 113 | 21 | +| 112 | 23 | +| 111 | 19 | +| 110 | 21 | +| 109 | 20 | +| 108 | 26 | +| 107 | 23 | +| 106 | 30 | +| 105 | 31 | +| 104 | 19 | +| 103 | 25 | +| 102 | 23 | +| 101 | 29 | +| 100 | 18 | +| 99 | 19 | +| 98 | 20 | +| 97 | 21 | +| 96 | 23 | +| 95 | 20 | +| 94 | 27 | +| 93 | 20 | +| 92 | 22 | +| 91 | 23 | +| 90 | 23 | +| 89 | 20 | +| 88 | 15 | +| 87 | 17 | +| 86 | 28 | +| 85 | 25 | +| 84 | 10 | +| 83 | 20 | +| 82 | 23 | +| 81 | 28 | +| 80 | 17 | +| 79 | 23 | +| 78 | 24 | +| 77 | 22 | +| 76 | 18 | +| 75 | 25 | +| 74 | 31 | +| 73 | 27 | +| 72 | 19 | +| 71 | 13 | +| 70 | 17 | +| 69 | 24 | +| 68 | 20 | +| 67 | 12 | +| 66 | 17 | +| 65 | 16 | +| 64 | 26 | +| 63 | 24 | +| 62 | 12 | +| 61 | 19 | +| 60 | 18 | +| 59 | 20 | +| 58 | 18 | +| 57 | 12 | +| 56 | 15 | +| 55 | 17 | +| 54 | 14 | +| 53 | 25 | +| 52 | 22 | +| 51 | 15 | +| 50 | 17 | +| 49 | 15 | +| 48 | 17 | +| 47 | 18 | +| 46 | 17 | +| 45 | 23 | +| 44 | 17 | +| 43 | 13 | +| 42 | 15 | +| 41 | 18 | +| 40 | 11 | +| 39 | 19 | +| 38 | 18 | +| 37 | 12 | +| 36 | 19 | +| 35 | 18 | +| 34 | 15 | +| 33 | 12 | +| 32 | 25 | +| 31 | 20 | +| 30 | 24 | +| 29 | 20 | +| 28 | 10 | +| 27 | 15 | +| 26 | 16 | +| 25 | 15 | +| 24 | 15 | +| 23 | 13 | +| 22 | 12 | +| 21 | 14 | +| 20 | 19 | +| 19 | 17 | +| 18 | 17 | +| 17 | 18 | +| 16 | 15 | +| 15 | 13 | +| 14 | 11 | +| 13 | 16 | +| 12 | 13 | +| 11 | 18 | +| 10 | 19 | +| 9 | 10 | +| 8 | 7 | +| 7 | 15 | +| 6 | 12 | +| 5 | 12 | +| 4 | 17 | +| 3 | 14 | +| 2 | 17 | +| 1 | 9 | +| 0 | 13 + +# Graph of the same data + +![graph](tickets-sort.png) diff --git a/substrate/frame/sassafras/src/data/tickets-sort.png b/substrate/frame/sassafras/src/data/tickets-sort.png new file mode 100644 index 0000000000000000000000000000000000000000..b34ce3f37ba9d39aa649cc6d5a216373048c0064 Binary files /dev/null and b/substrate/frame/sassafras/src/data/tickets-sort.png differ diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..0ee8657489b7f7321d24b209dfddcd086cf1cf37 --- /dev/null +++ b/substrate/frame/sassafras/src/lib.rs @@ -0,0 +1,1081 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Extension module for Sassafras consensus. +//! +//! [Sassafras](https://research.web3.foundation/Polkadot/protocols/block-production/SASSAFRAS) +//! is a constant-time block production protocol that aims to ensure that there is +//! exactly one block produced with constant time intervals rather than multiple or none. +//! +//! We run a lottery to distribute block production slots in an epoch and to fix the +//! order validators produce blocks in, by the beginning of an epoch. +//! +//! Each validator signs the same VRF input and publishes the output on-chain. This +//! value is their lottery ticket that can be validated against their public key. +//! +//! We want to keep lottery winners secret, i.e. do not publish their public keys. +//! At the beginning of the epoch all the validators tickets are published but not +//! their public keys. +//! +//! A valid tickets is validated when an honest validator reclaims it on block +//! production. +//! +//! To prevent submission of fake tickets, resulting in empty slots, the validator +//! when submitting the ticket accompanies it with a SNARK of the statement: "Here's +//! my VRF output that has been generated using the given VRF input and my secret +//! key. I'm not telling you my keys, but my public key is among those of the +//! nominated validators", that is validated before the lottery. +//! +//! To anonymously publish the ticket to the chain a validator sends their tickets +//! to a random validator who later puts it on-chain as a transaction. + +#![deny(warnings)] +#![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +use log::{debug, error, trace, warn}; +use scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; + +use frame_support::{ + dispatch::{DispatchResultWithPostInfo, Pays}, + traits::{Defensive, Get}, + weights::Weight, + BoundedVec, WeakBoundedVec, +}; +use frame_system::{ + offchain::{SendTransactionTypes, SubmitTransaction}, + pallet_prelude::BlockNumberFor, +}; +use sp_consensus_sassafras::{ + digests::{ConsensusLog, NextEpochDescriptor, SlotClaim}, + vrf, AuthorityId, Epoch, EpochConfiguration, Randomness, Slot, TicketBody, TicketEnvelope, + TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, +}; +use sp_io::hashing; +use sp_runtime::{ + generic::DigestItem, + traits::{One, Zero}, + BoundToRuntimeAppPublic, +}; +use sp_std::prelude::Vec; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(all(feature = "std", test))] +mod mock; +#[cfg(all(feature = "std", test))] +mod tests; + +pub mod weights; +pub use weights::WeightInfo; + +pub use pallet::*; + +const LOG_TARGET: &str = "sassafras::runtime"; + +// Contextual string used by the VRF to generate per-block randomness. +const RANDOMNESS_VRF_CONTEXT: &[u8] = b"SassafrasOnChainRandomness"; + +// Max length for segments holding unsorted tickets. +const SEGMENT_MAX_SIZE: u32 = 128; + +/// Authorities bounded vector convenience type. +pub type AuthoritiesVec = WeakBoundedVec::MaxAuthorities>; + +/// Epoch length defined by the configuration. +pub type EpochLengthFor = ::EpochLength; + +/// Tickets metadata. +#[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)] +pub struct TicketsMetadata { + /// Number of outstanding next epoch tickets requiring to be sorted. + /// + /// These tickets are held by the [`UnsortedSegments`] storage map in segments + /// containing at most `SEGMENT_MAX_SIZE` items. + pub unsorted_tickets_count: u32, + + /// Number of tickets available for current and next epoch. + /// + /// These tickets are held by the [`TicketsIds`] storage map. + /// + /// The array entry to be used for the current epoch is computed as epoch index modulo 2. + pub tickets_count: [u32; 2], +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// The Sassafras pallet. + #[pallet::pallet] + pub struct Pallet(_); + + /// Configuration parameters. + #[pallet::config] + pub trait Config: frame_system::Config + SendTransactionTypes> { + /// Amount of slots that each epoch should last. + #[pallet::constant] + type EpochLength: Get; + + /// Max number of authorities allowed. + #[pallet::constant] + type MaxAuthorities: Get; + + /// Epoch change trigger. + /// + /// Logic to be triggered on every block to query for whether an epoch has ended + /// and to perform the transition to the next epoch. + type EpochChangeTrigger: EpochChangeTrigger; + + /// Weight information for all calls of this pallet. + type WeightInfo: WeightInfo; + } + + /// Sassafras runtime errors. + #[pallet::error] + pub enum Error { + /// Submitted configuration is invalid. + InvalidConfiguration, + } + + /// Current epoch index. + #[pallet::storage] + #[pallet::getter(fn epoch_index)] + pub type EpochIndex = StorageValue<_, u64, ValueQuery>; + + /// Current epoch authorities. + #[pallet::storage] + #[pallet::getter(fn authorities)] + pub type Authorities = StorageValue<_, AuthoritiesVec, ValueQuery>; + + /// Next epoch authorities. + #[pallet::storage] + #[pallet::getter(fn next_authorities)] + pub type NextAuthorities = StorageValue<_, AuthoritiesVec, ValueQuery>; + + /// First block slot number. + /// + /// As the slots may not be zero-based, we record the slot value for the fist block. + /// This allows to always compute relative indices for epochs and slots. + #[pallet::storage] + #[pallet::getter(fn genesis_slot)] + pub type GenesisSlot = StorageValue<_, Slot, ValueQuery>; + + /// Current block slot number. + #[pallet::storage] + #[pallet::getter(fn current_slot)] + pub type CurrentSlot = StorageValue<_, Slot, ValueQuery>; + + /// Current epoch randomness. + #[pallet::storage] + #[pallet::getter(fn randomness)] + pub type CurrentRandomness = StorageValue<_, Randomness, ValueQuery>; + + /// Next epoch randomness. + #[pallet::storage] + #[pallet::getter(fn next_randomness)] + pub type NextRandomness = StorageValue<_, Randomness, ValueQuery>; + + /// Randomness accumulator. + /// + /// Excluded the first imported block, its value is updated on block finalization. + #[pallet::storage] + #[pallet::getter(fn randomness_accumulator)] + pub(crate) type RandomnessAccumulator = StorageValue<_, Randomness, ValueQuery>; + + /// The configuration for the current epoch. + #[pallet::storage] + #[pallet::getter(fn config)] + pub type EpochConfig = StorageValue<_, EpochConfiguration, ValueQuery>; + + /// The configuration for the next epoch. + #[pallet::storage] + #[pallet::getter(fn next_config)] + pub type NextEpochConfig = StorageValue<_, EpochConfiguration>; + + /// Pending epoch configuration change that will be set as `NextEpochConfig` when the next + /// epoch is enacted. + /// + /// In other words, a configuration change submitted during epoch N will be enacted on epoch + /// N+2. This is to maintain coherence for already submitted tickets for epoch N+1 that where + /// computed using configuration parameters stored for epoch N+1. + #[pallet::storage] + pub type PendingEpochConfigChange = StorageValue<_, EpochConfiguration>; + + /// Stored tickets metadata. + #[pallet::storage] + pub type TicketsMeta = StorageValue<_, TicketsMetadata, ValueQuery>; + + /// Tickets identifiers map. + /// + /// The map holds tickets ids for the current and next epoch. + /// + /// The key is a tuple composed by: + /// - `u8` equal to epoch's index modulo 2; + /// - `u32` equal to the ticket's index in a sorted list of epoch's tickets. + /// + /// Epoch X first N-th ticket has key (X mod 2, N) + /// + /// Note that the ticket's index doesn't directly correspond to the slot index within the epoch. + /// The assigment is computed dynamically using an *outside-in* strategy. + /// + /// Be aware that entries within this map are never removed, only overwritten. + /// Last element index should be fetched from the [`TicketsMeta`] value. + #[pallet::storage] + pub type TicketsIds = StorageMap<_, Identity, (u8, u32), TicketId>; + + /// Tickets to be used for current and next epoch. + #[pallet::storage] + pub type TicketsData = StorageMap<_, Identity, TicketId, TicketBody>; + + /// Next epoch tickets unsorted segments. + /// + /// Contains lists of tickets where each list represents a batch of tickets + /// received via the `submit_tickets` extrinsic. + /// + /// Each segment has max length [`SEGMENT_MAX_SIZE`]. + #[pallet::storage] + pub type UnsortedSegments = + StorageMap<_, Identity, u32, BoundedVec>, ValueQuery>; + + /// The most recently set of tickets which are candidates to become the next + /// epoch tickets. + #[pallet::storage] + pub type SortedCandidates = + StorageValue<_, BoundedVec>, ValueQuery>; + + /// Parameters used to construct the epoch's ring verifier. + /// + /// In practice: Updatable Universal Reference String and the seed. + #[pallet::storage] + #[pallet::getter(fn ring_context)] + pub type RingContext = StorageValue<_, vrf::RingContext>; + + /// Ring verifier data for the current epoch. + #[pallet::storage] + pub type RingVerifierData = StorageValue<_, vrf::RingVerifierData>; + + /// Slot claim VRF pre-output used to generate per-slot randomness. + /// + /// The value is ephemeral and is cleared on block finalization. + #[pallet::storage] + pub(crate) type ClaimTemporaryData = StorageValue<_, vrf::VrfPreOutput>; + + /// Genesis configuration for Sassafras protocol. + #[pallet::genesis_config] + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + /// Genesis authorities. + pub authorities: Vec, + /// Genesis epoch configuration. + pub epoch_config: EpochConfiguration, + /// Phantom config + #[serde(skip)] + pub _phantom: sp_std::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + EpochConfig::::put(self.epoch_config); + Pallet::::genesis_authorities_initialize(&self.authorities); + + #[cfg(feature = "construct-dummy-ring-context")] + { + debug!(target: LOG_TARGET, "Constructing dummy ring context"); + let ring_ctx = vrf::RingContext::new_testing(); + RingContext::::put(ring_ctx); + Pallet::::update_ring_verifier(&self.authorities); + } + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(block_num: BlockNumberFor) -> Weight { + debug_assert_eq!(block_num, frame_system::Pallet::::block_number()); + + let claim = >::digest() + .logs + .iter() + .find_map(|item| item.pre_runtime_try_to::(&SASSAFRAS_ENGINE_ID)) + .expect("Valid block must have a slot claim. qed"); + + CurrentSlot::::put(claim.slot); + + if block_num == One::one() { + Self::post_genesis_initialize(claim.slot); + } + + let randomness_pre_output = claim + .vrf_signature + .pre_outputs + .get(0) + .expect("Valid claim must have VRF signature; qed"); + ClaimTemporaryData::::put(randomness_pre_output); + + let trigger_weight = T::EpochChangeTrigger::trigger::(block_num); + + T::WeightInfo::on_initialize() + trigger_weight + } + + fn on_finalize(_: BlockNumberFor) { + // At the end of the block, we can safely include the current slot randomness + // to the accumulator. If we've determined that this block was the first in + // a new epoch, the changeover logic has already occurred at this point + // (i.e. `enact_epoch_change` has already been called). + let randomness_input = vrf::slot_claim_input( + &Self::randomness(), + CurrentSlot::::get(), + EpochIndex::::get(), + ); + let randomness_pre_output = ClaimTemporaryData::::take() + .expect("Unconditionally populated in `on_initialize`; `on_finalize` is always called after; qed"); + let randomness = randomness_pre_output + .make_bytes::(RANDOMNESS_VRF_CONTEXT, &randomness_input); + Self::deposit_slot_randomness(&randomness); + + // Check if we are in the epoch's second half. + // If so, start sorting the next epoch tickets. + let epoch_length = T::EpochLength::get(); + let current_slot_idx = Self::current_slot_index(); + if current_slot_idx >= epoch_length / 2 { + let mut metadata = TicketsMeta::::get(); + if metadata.unsorted_tickets_count != 0 { + let next_epoch_idx = EpochIndex::::get() + 1; + let next_epoch_tag = (next_epoch_idx & 1) as u8; + let slots_left = epoch_length.checked_sub(current_slot_idx).unwrap_or(1); + Self::sort_segments( + metadata + .unsorted_tickets_count + .div_ceil(SEGMENT_MAX_SIZE * slots_left as u32), + next_epoch_tag, + &mut metadata, + ); + TicketsMeta::::set(metadata); + } + } + } + } + + #[pallet::call] + impl Pallet { + /// Submit next epoch tickets candidates. + /// + /// The number of tickets allowed to be submitted in one call is equal to the epoch length. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::submit_tickets(tickets.len() as u32))] + pub fn submit_tickets( + origin: OriginFor, + tickets: BoundedVec>, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + + debug!(target: LOG_TARGET, "Received {} tickets", tickets.len()); + + let epoch_length = T::EpochLength::get(); + let current_slot_idx = Self::current_slot_index(); + if current_slot_idx > epoch_length / 2 { + warn!(target: LOG_TARGET, "Tickets shall be submitted in the first epoch half",); + return Err("Tickets shall be submitted in the first epoch half".into()) + } + + let Some(verifier) = RingVerifierData::::get().map(|v| v.into()) else { + warn!(target: LOG_TARGET, "Ring verifier key not initialized"); + return Err("Ring verifier key not initialized".into()) + }; + + let next_authorities = Self::next_authorities(); + + // Compute tickets threshold + let next_config = Self::next_config().unwrap_or_else(|| Self::config()); + let ticket_threshold = sp_consensus_sassafras::ticket_id_threshold( + next_config.redundancy_factor, + epoch_length as u32, + next_config.attempts_number, + next_authorities.len() as u32, + ); + + // Get next epoch params + let randomness = NextRandomness::::get(); + let epoch_idx = EpochIndex::::get() + 1; + + let mut valid_tickets = BoundedVec::with_bounded_capacity(tickets.len()); + + for ticket in tickets { + debug!(target: LOG_TARGET, "Checking ring proof"); + + let Some(ticket_id_pre_output) = ticket.signature.pre_outputs.get(0) else { + debug!(target: LOG_TARGET, "Missing ticket VRF pre-output from ring signature"); + continue + }; + let ticket_id_input = + vrf::ticket_id_input(&randomness, ticket.body.attempt_idx, epoch_idx); + + // Check threshold constraint + let ticket_id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_pre_output); + if ticket_id >= ticket_threshold { + debug!(target: LOG_TARGET, "Ignoring ticket over threshold ({:032x} >= {:032x})", ticket_id, ticket_threshold); + continue + } + + // Check for duplicates + if TicketsData::::contains_key(ticket_id) { + debug!(target: LOG_TARGET, "Ignoring duplicate ticket ({:032x})", ticket_id); + continue + } + + // Check ring signature + let sign_data = vrf::ticket_body_sign_data(&ticket.body, ticket_id_input); + if !ticket.signature.ring_vrf_verify(&sign_data, &verifier) { + debug!(target: LOG_TARGET, "Proof verification failure for ticket ({:032x})", ticket_id); + continue + } + + if let Ok(_) = valid_tickets.try_push(ticket_id).defensive_proof( + "Input segment has same length as bounded destination vector; qed", + ) { + TicketsData::::set(ticket_id, Some(ticket.body)); + } + } + + if !valid_tickets.is_empty() { + Self::append_tickets(valid_tickets); + } + + Ok(Pays::No.into()) + } + + /// Plan an epoch configuration change. + /// + /// The epoch configuration change is recorded and will be announced at the begining + /// of the next epoch together with next epoch authorities information. + /// In other words, the configuration will be enacted one epoch later. + /// + /// Multiple calls to this method will replace any existing planned config change + /// that has not been enacted yet. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::plan_config_change())] + pub fn plan_config_change( + origin: OriginFor, + config: EpochConfiguration, + ) -> DispatchResult { + ensure_root(origin)?; + + ensure!( + config.redundancy_factor != 0 && config.attempts_number != 0, + Error::::InvalidConfiguration + ); + PendingEpochConfigChange::::put(config); + Ok(()) + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + let Call::submit_tickets { tickets } = call else { + return InvalidTransaction::Call.into() + }; + + // Discard tickets not coming from the local node or that are not included in a block + if source == TransactionSource::External { + warn!( + target: LOG_TARGET, + "Rejecting unsigned `submit_tickets` transaction from external source", + ); + return InvalidTransaction::BadSigner.into() + } + + // Current slot should be less than half of epoch length. + let epoch_length = T::EpochLength::get(); + let current_slot_idx = Self::current_slot_index(); + if current_slot_idx > epoch_length / 2 { + warn!(target: LOG_TARGET, "Tickets shall be proposed in the first epoch half",); + return InvalidTransaction::Stale.into() + } + + // This should be set such that it is discarded after the first epoch half + let tickets_longevity = epoch_length / 2 - current_slot_idx; + let tickets_tag = tickets.using_encoded(|bytes| hashing::blake2_256(bytes)); + + ValidTransaction::with_tag_prefix("Sassafras") + .priority(TransactionPriority::max_value()) + .longevity(tickets_longevity as u64) + .and_provides(tickets_tag) + .propagate(true) + .build() + } + } +} + +// Inherent methods +impl Pallet { + /// Determine whether an epoch change should take place at this block. + /// + /// Assumes that initialization has already taken place. + pub(crate) fn should_end_epoch(block_num: BlockNumberFor) -> bool { + // The epoch has technically ended during the passage of time between this block and the + // last, but we have to "end" the epoch now, since there is no earlier possible block we + // could have done it. + // + // The exception is for block 1: the genesis has slot 0, so we treat epoch 0 as having + // started at the slot of block 1. We want to use the same randomness and validator set as + // signalled in the genesis, so we don't rotate the epoch. + block_num > One::one() && Self::current_slot_index() >= T::EpochLength::get() + } + + /// Current slot index relative to the current epoch. + fn current_slot_index() -> u32 { + Self::slot_index(CurrentSlot::::get()) + } + + /// Slot index relative to the current epoch. + fn slot_index(slot: Slot) -> u32 { + slot.checked_sub(*Self::current_epoch_start()) + .and_then(|v| v.try_into().ok()) + .unwrap_or(u32::MAX) + } + + /// Finds the start slot of the current epoch. + /// + /// Only guaranteed to give correct results after `initialize` of the first + /// block in the chain (as its result is based off of `GenesisSlot`). + fn current_epoch_start() -> Slot { + Self::epoch_start(EpochIndex::::get()) + } + + /// Get the epoch's first slot. + fn epoch_start(epoch_index: u64) -> Slot { + const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed."; + + let epoch_start = epoch_index.checked_mul(T::EpochLength::get() as u64).expect(PROOF); + GenesisSlot::::get().checked_add(epoch_start).expect(PROOF).into() + } + + pub(crate) fn update_ring_verifier(authorities: &[AuthorityId]) { + debug!(target: LOG_TARGET, "Loading ring context"); + let Some(ring_ctx) = RingContext::::get() else { + debug!(target: LOG_TARGET, "Ring context not initialized"); + return + }; + + let pks: Vec<_> = authorities.iter().map(|auth| *auth.as_ref()).collect(); + + debug!(target: LOG_TARGET, "Building ring verifier (ring size: {})", pks.len()); + let verifier_data = ring_ctx + .verifier_data(&pks) + .expect("Failed to build ring verifier. This is a bug"); + + RingVerifierData::::put(verifier_data); + } + + /// Enact an epoch change. + /// + /// WARNING: Should be called on every block once and if and only if [`should_end_epoch`] + /// has returned `true`. + /// + /// If we detect one or more skipped epochs the policy is to use the authorities and values + /// from the first skipped epoch. The tickets data is invalidated. + pub(crate) fn enact_epoch_change( + authorities: WeakBoundedVec, + next_authorities: WeakBoundedVec, + ) { + if next_authorities != authorities { + Self::update_ring_verifier(&next_authorities); + } + + // Update authorities + Authorities::::put(&authorities); + NextAuthorities::::put(&next_authorities); + + // Update epoch index + let mut epoch_idx = EpochIndex::::get() + 1; + + let slot_idx = CurrentSlot::::get().saturating_sub(Self::epoch_start(epoch_idx)); + if slot_idx >= T::EpochLength::get() { + // Detected one or more skipped epochs, clear tickets data and recompute epoch index. + Self::reset_tickets_data(); + let skipped_epochs = *slot_idx / T::EpochLength::get() as u64; + epoch_idx += skipped_epochs; + warn!( + target: LOG_TARGET, + "Detected {} skipped epochs, resuming from epoch {}", + skipped_epochs, + epoch_idx + ); + } + + let mut metadata = TicketsMeta::::get(); + let mut metadata_dirty = false; + + EpochIndex::::put(epoch_idx); + + let next_epoch_idx = epoch_idx + 1; + + // Updates current epoch randomness and computes the *next* epoch randomness. + let next_randomness = Self::update_epoch_randomness(next_epoch_idx); + + if let Some(config) = NextEpochConfig::::take() { + EpochConfig::::put(config); + } + + let next_config = PendingEpochConfigChange::::take(); + if let Some(next_config) = next_config { + NextEpochConfig::::put(next_config); + } + + // After we update the current epoch, we signal the *next* epoch change + // so that nodes can track changes. + let next_epoch = NextEpochDescriptor { + randomness: next_randomness, + authorities: next_authorities.into_inner(), + config: next_config, + }; + Self::deposit_next_epoch_descriptor_digest(next_epoch); + + let epoch_tag = (epoch_idx & 1) as u8; + + // Optionally finish sorting + if metadata.unsorted_tickets_count != 0 { + Self::sort_segments(u32::MAX, epoch_tag, &mut metadata); + metadata_dirty = true; + } + + // Clear the "prev ≡ next (mod 2)" epoch tickets counter and bodies. + // Ids are left since are just cyclically overwritten on-the-go. + let prev_epoch_tag = epoch_tag ^ 1; + let prev_epoch_tickets_count = &mut metadata.tickets_count[prev_epoch_tag as usize]; + if *prev_epoch_tickets_count != 0 { + for idx in 0..*prev_epoch_tickets_count { + if let Some(ticket_id) = TicketsIds::::get((prev_epoch_tag, idx)) { + TicketsData::::remove(ticket_id); + } + } + *prev_epoch_tickets_count = 0; + metadata_dirty = true; + } + + if metadata_dirty { + TicketsMeta::::set(metadata); + } + } + + // Call this function on epoch change to enact current epoch randomness. + // + // Returns the next epoch randomness. + fn update_epoch_randomness(next_epoch_index: u64) -> Randomness { + let curr_epoch_randomness = NextRandomness::::get(); + CurrentRandomness::::put(curr_epoch_randomness); + + let accumulator = RandomnessAccumulator::::get(); + + let mut buf = [0; RANDOMNESS_LENGTH + 8]; + buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[..]); + buf[RANDOMNESS_LENGTH..].copy_from_slice(&next_epoch_index.to_le_bytes()); + + let next_randomness = hashing::blake2_256(&buf); + NextRandomness::::put(&next_randomness); + + next_randomness + } + + // Deposit per-slot randomness. + fn deposit_slot_randomness(randomness: &Randomness) { + let accumulator = RandomnessAccumulator::::get(); + + let mut buf = [0; 2 * RANDOMNESS_LENGTH]; + buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[..]); + buf[RANDOMNESS_LENGTH..].copy_from_slice(&randomness[..]); + + let accumulator = hashing::blake2_256(&buf); + RandomnessAccumulator::::put(accumulator); + } + + // Deposit next epoch descriptor in the block header digest. + fn deposit_next_epoch_descriptor_digest(desc: NextEpochDescriptor) { + let item = ConsensusLog::NextEpochData(desc); + let log = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, item.encode()); + >::deposit_log(log) + } + + // Initialize authorities on genesis phase. + // + // Genesis authorities may have been initialized via other means (e.g. via session pallet). + // + // If this function has already been called with some authorities, then the new list + // should match the previously set one. + fn genesis_authorities_initialize(authorities: &[AuthorityId]) { + let prev_authorities = Authorities::::get(); + + if !prev_authorities.is_empty() { + // This function has already been called. + if prev_authorities.as_slice() == authorities { + return + } else { + panic!("Authorities were already initialized"); + } + } + + let authorities = WeakBoundedVec::try_from(authorities.to_vec()) + .expect("Initial number of authorities should be lower than T::MaxAuthorities"); + Authorities::::put(&authorities); + NextAuthorities::::put(&authorities); + } + + // Method to be called on first block `on_initialize` to properly populate some key parameters. + fn post_genesis_initialize(slot: Slot) { + // Keep track of the actual first slot used (may not be zero based). + GenesisSlot::::put(slot); + + // Properly initialize randomness using genesis hash and current slot. + // This is important to guarantee that a different set of tickets are produced for: + // - different chains which share the same ring parameters and + // - same chain started with a different slot base. + let genesis_hash = frame_system::Pallet::::parent_hash(); + let mut buf = genesis_hash.as_ref().to_vec(); + buf.extend_from_slice(&slot.to_le_bytes()); + let randomness = hashing::blake2_256(buf.as_slice()); + RandomnessAccumulator::::put(randomness); + + let next_randoness = Self::update_epoch_randomness(1); + + // Deposit a log as this is the first block in first epoch. + let next_epoch = NextEpochDescriptor { + randomness: next_randoness, + authorities: Self::next_authorities().into_inner(), + config: None, + }; + Self::deposit_next_epoch_descriptor_digest(next_epoch); + } + + /// Current epoch information. + pub fn current_epoch() -> Epoch { + let index = EpochIndex::::get(); + Epoch { + index, + start: Self::epoch_start(index), + length: T::EpochLength::get(), + authorities: Self::authorities().into_inner(), + randomness: Self::randomness(), + config: Self::config(), + } + } + + /// Next epoch information. + pub fn next_epoch() -> Epoch { + let index = EpochIndex::::get() + 1; + Epoch { + index, + start: Self::epoch_start(index), + length: T::EpochLength::get(), + authorities: Self::next_authorities().into_inner(), + randomness: Self::next_randomness(), + config: Self::next_config().unwrap_or_else(|| Self::config()), + } + } + + /// Fetch expected ticket-id for the given slot according to an "outside-in" sorting strategy. + /// + /// Given an ordered sequence of tickets [t0, t1, t2, ..., tk] to be assigned to n slots, + /// with n >= k, then the tickets are assigned to the slots according to the following + /// strategy: + /// + /// slot-index : [ 0, 1, 2, ............ , n ] + /// tickets : [ t1, t3, t5, ... , t4, t2, t0 ]. + /// + /// With slot-index computed as `epoch_start() - slot`. + /// + /// If `slot` value falls within the current epoch then we fetch tickets from the current epoch + /// tickets list. + /// + /// If `slot` value falls within the next epoch then we fetch tickets from the next epoch + /// tickets ids list. Note that in this case we may have not finished receiving all the tickets + /// for that epoch yet. The next epoch tickets should be considered "stable" only after the + /// current epoch first half slots were elapsed (see `submit_tickets_unsigned_extrinsic`). + /// + /// Returns `None` if, according to the sorting strategy, there is no ticket associated to the + /// specified slot-index (happens if a ticket falls in the middle of an epoch and n > k), + /// or if the slot falls beyond the next epoch. + /// + /// Before importing the first block this returns `None`. + pub fn slot_ticket_id(slot: Slot) -> Option { + if frame_system::Pallet::::block_number().is_zero() { + return None + } + let epoch_idx = EpochIndex::::get(); + let epoch_len = T::EpochLength::get(); + let mut slot_idx = Self::slot_index(slot); + let mut metadata = TicketsMeta::::get(); + + let get_ticket_idx = |slot_idx| { + let ticket_idx = if slot_idx < epoch_len / 2 { + 2 * slot_idx + 1 + } else { + 2 * (epoch_len - (slot_idx + 1)) + }; + debug!( + target: LOG_TARGET, + "slot-idx {} <-> ticket-idx {}", + slot_idx, + ticket_idx + ); + ticket_idx as u32 + }; + + let mut epoch_tag = (epoch_idx & 1) as u8; + + if epoch_len <= slot_idx && slot_idx < 2 * epoch_len { + // Try to get a ticket for the next epoch. Since its state values were not enacted yet, + // we may have to finish sorting the tickets. + epoch_tag ^= 1; + slot_idx -= epoch_len; + if metadata.unsorted_tickets_count != 0 { + Self::sort_segments(u32::MAX, epoch_tag, &mut metadata); + TicketsMeta::::set(metadata); + } + } else if slot_idx >= 2 * epoch_len { + return None + } + + let ticket_idx = get_ticket_idx(slot_idx); + if ticket_idx < metadata.tickets_count[epoch_tag as usize] { + TicketsIds::::get((epoch_tag, ticket_idx)) + } else { + None + } + } + + /// Returns ticket id and data associated with the given `slot`. + /// + /// Refer to the `slot_ticket_id` documentation for the slot-ticket association + /// criteria. + pub fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketBody)> { + Self::slot_ticket_id(slot).and_then(|id| TicketsData::::get(id).map(|body| (id, body))) + } + + // Sort and truncate candidate tickets, cleanup storage. + fn sort_and_truncate(candidates: &mut Vec, max_tickets: usize) -> u128 { + candidates.sort_unstable(); + candidates.drain(max_tickets..).for_each(TicketsData::::remove); + candidates[max_tickets - 1] + } + + /// Sort the tickets which belong to the epoch with the specified `epoch_tag`. + /// + /// At most `max_segments` are taken from the `UnsortedSegments` structure. + /// + /// The tickets of the removed segments are merged with the tickets on the `SortedCandidates` + /// which is then sorted an truncated to contain at most `MaxTickets` entries. + /// + /// If all the entries in `UnsortedSegments` are consumed, then `SortedCandidates` is elected + /// as the next epoch tickets, else it is saved to be used by next calls of this function. + pub(crate) fn sort_segments(max_segments: u32, epoch_tag: u8, metadata: &mut TicketsMetadata) { + let unsorted_segments_count = metadata.unsorted_tickets_count.div_ceil(SEGMENT_MAX_SIZE); + let max_segments = max_segments.min(unsorted_segments_count); + let max_tickets = Self::epoch_length() as usize; + + // Fetch the sorted candidates (if any). + let mut candidates = SortedCandidates::::take().into_inner(); + + // There is an upper bound to check only if we already sorted the max number + // of allowed tickets. + let mut upper_bound = *candidates.get(max_tickets - 1).unwrap_or(&TicketId::MAX); + + let mut require_sort = false; + + // Consume at most `max_segments` segments. + // During the process remove every stale ticket from `TicketsData` storage. + for segment_idx in (0..unsorted_segments_count).rev().take(max_segments as usize) { + let segment = UnsortedSegments::::take(segment_idx); + metadata.unsorted_tickets_count -= segment.len() as u32; + + // Push only ids with a value less than the current `upper_bound`. + let prev_len = candidates.len(); + for ticket_id in segment { + if ticket_id < upper_bound { + candidates.push(ticket_id); + } else { + TicketsData::::remove(ticket_id); + } + } + require_sort = candidates.len() != prev_len; + + // As we approach the tail of the segments buffer the `upper_bound` value is expected + // to decrease (fast). We thus expect the number of tickets pushed into the + // `candidates` vector to follow an exponential drop. + // + // Given this, sorting and truncating after processing each segment may be an overkill + // as we may find pushing few tickets more and more often. Is preferable to perform + // the sort and truncation operations only when we reach some bigger threshold + // (currently set as twice the capacity of `SortCandidate`). + // + // The more is the protocol's redundancy factor (i.e. the ratio between tickets allowed + // to be submitted and the epoch length) the more this check becomes relevant. + if candidates.len() > 2 * max_tickets { + upper_bound = Self::sort_and_truncate(&mut candidates, max_tickets); + require_sort = false; + } + } + + if candidates.len() > max_tickets { + Self::sort_and_truncate(&mut candidates, max_tickets); + } else if require_sort { + candidates.sort_unstable(); + } + + if metadata.unsorted_tickets_count == 0 { + // Sorting is over, write to next epoch map. + candidates.iter().enumerate().for_each(|(i, id)| { + TicketsIds::::insert((epoch_tag, i as u32), id); + }); + metadata.tickets_count[epoch_tag as usize] = candidates.len() as u32; + } else { + // Keep the partial result for the next calls. + SortedCandidates::::set(BoundedVec::truncate_from(candidates)); + } + } + + /// Append a set of tickets to the segments map. + pub(crate) fn append_tickets(mut tickets: BoundedVec>) { + debug!(target: LOG_TARGET, "Appending batch with {} tickets", tickets.len()); + tickets.iter().for_each(|t| trace!(target: LOG_TARGET, " + {t:032x}")); + + let mut metadata = TicketsMeta::::get(); + let mut segment_idx = metadata.unsorted_tickets_count / SEGMENT_MAX_SIZE; + + while !tickets.is_empty() { + let rem = metadata.unsorted_tickets_count % SEGMENT_MAX_SIZE; + let to_be_added = tickets.len().min((SEGMENT_MAX_SIZE - rem) as usize); + + let mut segment = UnsortedSegments::::get(segment_idx); + let _ = segment + .try_extend(tickets.drain(..to_be_added)) + .defensive_proof("We don't add more than `SEGMENT_MAX_SIZE` and this is the maximum bound for the vector."); + UnsortedSegments::::insert(segment_idx, segment); + + metadata.unsorted_tickets_count += to_be_added as u32; + segment_idx += 1; + } + + TicketsMeta::::set(metadata); + } + + /// Remove all tickets related data. + /// + /// May not be efficient as the calling places may repeat some of this operations + /// but is a very extraordinary operation (hopefully never happens in production) + /// and better safe than sorry. + fn reset_tickets_data() { + let metadata = TicketsMeta::::get(); + + // Remove even/odd-epoch data. + for epoch_tag in 0..=1 { + for idx in 0..metadata.tickets_count[epoch_tag] { + if let Some(id) = TicketsIds::::get((epoch_tag as u8, idx)) { + TicketsData::::remove(id); + } + } + } + + // Remove all unsorted tickets segments. + let segments_count = metadata.unsorted_tickets_count.div_ceil(SEGMENT_MAX_SIZE); + (0..segments_count).for_each(UnsortedSegments::::remove); + + // Reset sorted candidates + SortedCandidates::::kill(); + + // Reset tickets metadata + TicketsMeta::::kill(); + } + + /// Submit next epoch validator tickets via an unsigned extrinsic constructed with a call to + /// `submit_unsigned_transaction`. + /// + /// The submitted tickets are added to the next epoch outstanding tickets as long as the + /// extrinsic is called within the first half of the epoch. Tickets received during the + /// second half are dropped. + pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { + let tickets = BoundedVec::truncate_from(tickets); + let call = Call::submit_tickets { tickets }; + match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + Ok(_) => true, + Err(e) => { + error!(target: LOG_TARGET, "Error submitting tickets {:?}", e); + false + }, + } + } + + /// Epoch length + pub fn epoch_length() -> u32 { + T::EpochLength::get() + } +} + +/// Trigger an epoch change, if any should take place. +pub trait EpochChangeTrigger { + /// May trigger an epoch change, if any should take place. + /// + /// Returns an optional `Weight` if epoch change has been triggered. + /// + /// This should be called during every block, after initialization is done. + fn trigger(_: BlockNumberFor) -> Weight; +} + +/// An `EpochChangeTrigger` which does nothing. +/// +/// In practice this means that the epoch change logic is left to some external component +/// (e.g. pallet-session). +pub struct EpochChangeExternalTrigger; + +impl EpochChangeTrigger for EpochChangeExternalTrigger { + fn trigger(_: BlockNumberFor) -> Weight { + // nothing - trigger is external. + Weight::zero() + } +} + +/// An `EpochChangeTrigger` which recycle the same authorities set forever. +/// +/// The internal trigger should only be used when no other module is responsible for +/// changing authority set. +pub struct EpochChangeInternalTrigger; + +impl EpochChangeTrigger for EpochChangeInternalTrigger { + fn trigger(block_num: BlockNumberFor) -> Weight { + if Pallet::::should_end_epoch(block_num) { + let authorities = Pallet::::next_authorities(); + let next_authorities = authorities.clone(); + let len = next_authorities.len() as u32; + Pallet::::enact_epoch_change(authorities, next_authorities); + T::WeightInfo::enact_epoch_change(len, T::EpochLength::get()) + } else { + Weight::zero() + } + } +} + +impl BoundToRuntimeAppPublic for Pallet { + type Public = AuthorityId; +} diff --git a/substrate/frame/sassafras/src/mock.rs b/substrate/frame/sassafras/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..5aca815cc2140a1f04f05c55e60c2d77d69bcb7e --- /dev/null +++ b/substrate/frame/sassafras/src/mock.rs @@ -0,0 +1,343 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test utilities for Sassafras pallet. + +use crate::{self as pallet_sassafras, EpochChangeInternalTrigger, *}; + +use frame_support::{ + derive_impl, + traits::{ConstU32, OnFinalize, OnInitialize}, +}; +use sp_consensus_sassafras::{ + digests::SlotClaim, + vrf::{RingProver, VrfSignature}, + AuthorityIndex, AuthorityPair, EpochConfiguration, Slot, TicketBody, TicketEnvelope, TicketId, +}; +use sp_core::{ + crypto::{ByteArray, Pair, UncheckedFrom, VrfSecret, Wraps}, + ed25519::Public as EphemeralPublic, + H256, U256, +}; +use sp_runtime::{ + testing::{Digest, DigestItem, Header, TestXt}, + BuildStorage, +}; + +const LOG_TARGET: &str = "sassafras::tests"; + +const EPOCH_LENGTH: u32 = 10; +const MAX_AUTHORITIES: u32 = 100; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Test { + type Block = frame_system::mocking::MockBlock; +} + +impl frame_system::offchain::SendTransactionTypes for Test +where + RuntimeCall: From, +{ + type OverarchingCall = RuntimeCall; + type Extrinsic = TestXt; +} + +impl pallet_sassafras::Config for Test { + type EpochLength = ConstU32; + type MaxAuthorities = ConstU32; + type EpochChangeTrigger = EpochChangeInternalTrigger; + type WeightInfo = (); +} + +frame_support::construct_runtime!( + pub enum Test { + System: frame_system, + Sassafras: pallet_sassafras, + } +); + +// Default used for most of the tests. +// +// The redundancy factor has been set to max value to accept all submitted +// tickets without worrying about the threshold. +pub const TEST_EPOCH_CONFIGURATION: EpochConfiguration = + EpochConfiguration { redundancy_factor: u32::MAX, attempts_number: 5 }; + +/// Build and returns test storage externalities +pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { + new_test_ext_with_pairs(authorities_len, false).1 +} + +/// Build and returns test storage externalities and authority set pairs used +/// by Sassafras genesis configuration. +pub fn new_test_ext_with_pairs( + authorities_len: usize, + with_ring_context: bool, +) -> (Vec, sp_io::TestExternalities) { + let pairs = (0..authorities_len) + .map(|i| AuthorityPair::from_seed(&U256::from(i).into())) + .collect::>(); + + let authorities: Vec<_> = pairs.iter().map(|p| p.public()).collect(); + + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_sassafras::GenesisConfig:: { + authorities: authorities.clone(), + epoch_config: TEST_EPOCH_CONFIGURATION, + _phantom: sp_std::marker::PhantomData, + } + .assimilate_storage(&mut storage) + .unwrap(); + + let mut ext: sp_io::TestExternalities = storage.into(); + + if with_ring_context { + ext.execute_with(|| { + log::debug!(target: LOG_TARGET, "Building testing ring context"); + let ring_ctx = vrf::RingContext::new_testing(); + RingContext::::set(Some(ring_ctx.clone())); + Sassafras::update_ring_verifier(&authorities); + }); + } + + (pairs, ext) +} + +fn make_ticket_with_prover( + attempt: u32, + pair: &AuthorityPair, + prover: &RingProver, +) -> TicketEnvelope { + log::debug!("attempt: {}", attempt); + + // Values are referring to the next epoch + let epoch = Sassafras::epoch_index() + 1; + let randomness = Sassafras::next_randomness(); + + // Make a dummy ephemeral public that hopefully is unique within one test instance. + // In the tests, the values within the erased public are just used to compare + // ticket bodies, so it is not important to be a valid key. + let mut raw: [u8; 32] = [0; 32]; + raw.copy_from_slice(&pair.public().as_slice()[0..32]); + let erased_public = EphemeralPublic::unchecked_from(raw); + let revealed_public = erased_public; + + let ticket_id_input = vrf::ticket_id_input(&randomness, attempt, epoch); + + let body = TicketBody { attempt_idx: attempt, erased_public, revealed_public }; + let sign_data = vrf::ticket_body_sign_data(&body, ticket_id_input); + + let signature = pair.as_ref().ring_vrf_sign(&sign_data, &prover); + + // Ticket-id can be generated via vrf-preout. + // We don't care that much about its value here. + TicketEnvelope { body, signature } +} + +pub fn make_prover(pair: &AuthorityPair) -> RingProver { + let public = pair.public(); + let mut prover_idx = None; + + let ring_ctx = Sassafras::ring_context().unwrap(); + + let pks: Vec = Sassafras::authorities() + .iter() + .enumerate() + .map(|(idx, auth)| { + if public == *auth { + prover_idx = Some(idx); + } + *auth.as_ref() + }) + .collect(); + + log::debug!("Building prover. Ring size: {}", pks.len()); + let prover = ring_ctx.prover(&pks, prover_idx.unwrap()).unwrap(); + log::debug!("Done"); + + prover +} + +/// Construct `attempts` tickets envelopes for the next epoch. +/// +/// E.g. by passing an optional threshold +pub fn make_tickets(attempts: u32, pair: &AuthorityPair) -> Vec { + let prover = make_prover(pair); + (0..attempts) + .into_iter() + .map(|attempt| make_ticket_with_prover(attempt, pair, &prover)) + .collect() +} + +pub fn make_ticket_body(attempt_idx: u32, pair: &AuthorityPair) -> (TicketId, TicketBody) { + // Values are referring to the next epoch + let epoch = Sassafras::epoch_index() + 1; + let randomness = Sassafras::next_randomness(); + + let ticket_id_input = vrf::ticket_id_input(&randomness, attempt_idx, epoch); + let ticket_id_pre_output = pair.as_inner_ref().vrf_pre_output(&ticket_id_input); + + let id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_pre_output); + + // Make a dummy ephemeral public that hopefully is unique within one test instance. + // In the tests, the values within the erased public are just used to compare + // ticket bodies, so it is not important to be a valid key. + let mut raw: [u8; 32] = [0; 32]; + raw[..16].copy_from_slice(&pair.public().as_slice()[0..16]); + raw[16..].copy_from_slice(&id.to_le_bytes()); + let erased_public = EphemeralPublic::unchecked_from(raw); + let revealed_public = erased_public; + + let body = TicketBody { attempt_idx, erased_public, revealed_public }; + + (id, body) +} + +pub fn make_dummy_ticket_body(attempt_idx: u32) -> (TicketId, TicketBody) { + let hash = sp_core::hashing::blake2_256(&attempt_idx.to_le_bytes()); + + let erased_public = EphemeralPublic::unchecked_from(hash); + let revealed_public = erased_public; + + let body = TicketBody { attempt_idx, erased_public, revealed_public }; + + let mut bytes = [0u8; 16]; + bytes.copy_from_slice(&hash[..16]); + let id = TicketId::from_le_bytes(bytes); + + (id, body) +} + +pub fn make_ticket_bodies( + number: u32, + pair: Option<&AuthorityPair>, +) -> Vec<(TicketId, TicketBody)> { + (0..number) + .into_iter() + .map(|i| match pair { + Some(pair) => make_ticket_body(i, pair), + None => make_dummy_ticket_body(i), + }) + .collect() +} + +/// Persist the given tickets in the unsorted segments buffer. +/// +/// This function skips all the checks performed by the `submit_tickets` extrinsic and +/// directly appends the tickets to the `UnsortedSegments` structure. +pub fn persist_next_epoch_tickets_as_segments(tickets: &[(TicketId, TicketBody)]) { + let mut ids = Vec::with_capacity(tickets.len()); + tickets.iter().for_each(|(id, body)| { + TicketsData::::set(id, Some(body.clone())); + ids.push(*id); + }); + let max_chunk_size = Sassafras::epoch_length() as usize; + ids.chunks(max_chunk_size).for_each(|chunk| { + Sassafras::append_tickets(BoundedVec::truncate_from(chunk.to_vec())); + }) +} + +/// Calls the [`persist_next_epoch_tickets_as_segments`] and then proceeds to the +/// sorting of the candidates. +/// +/// Only "winning" tickets are left. +pub fn persist_next_epoch_tickets(tickets: &[(TicketId, TicketBody)]) { + persist_next_epoch_tickets_as_segments(tickets); + // Force sorting of next epoch tickets (enactment) by explicitly querying the first of them. + let next_epoch = Sassafras::next_epoch(); + assert_eq!(TicketsMeta::::get().unsorted_tickets_count, tickets.len() as u32); + Sassafras::slot_ticket(next_epoch.start).unwrap(); + assert_eq!(TicketsMeta::::get().unsorted_tickets_count, 0); +} + +fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { + let mut epoch = Sassafras::epoch_index(); + let mut randomness = Sassafras::randomness(); + + // Check if epoch is going to change on initialization. + let epoch_start = Sassafras::current_epoch_start(); + let epoch_length = EPOCH_LENGTH.into(); + if epoch_start != 0_u64 && slot >= epoch_start + epoch_length { + epoch += slot.saturating_sub(epoch_start).saturating_div(epoch_length); + randomness = crate::NextRandomness::::get(); + } + + let data = vrf::slot_claim_sign_data(&randomness, slot, epoch); + pair.as_ref().vrf_sign(&data) +} + +/// Construct a `PreDigest` instance for the given parameters. +pub fn make_slot_claim( + authority_idx: AuthorityIndex, + slot: Slot, + pair: &AuthorityPair, +) -> SlotClaim { + let vrf_signature = slot_claim_vrf_signature(slot, pair); + SlotClaim { authority_idx, slot, vrf_signature, ticket_claim: None } +} + +/// Construct a `Digest` with a `SlotClaim` item. +pub fn make_digest(authority_idx: AuthorityIndex, slot: Slot, pair: &AuthorityPair) -> Digest { + let claim = make_slot_claim(authority_idx, slot, pair); + Digest { logs: vec![DigestItem::from(&claim)] } +} + +pub fn initialize_block( + number: u64, + slot: Slot, + parent_hash: H256, + pair: &AuthorityPair, +) -> Digest { + let digest = make_digest(0, slot, pair); + System::reset_events(); + System::initialize(&number, &parent_hash, &digest); + Sassafras::on_initialize(number); + digest +} + +pub fn finalize_block(number: u64) -> Header { + Sassafras::on_finalize(number); + System::finalize() +} + +/// Progress the pallet state up to the given block `number` and `slot`. +pub fn go_to_block(number: u64, slot: Slot, pair: &AuthorityPair) -> Digest { + Sassafras::on_finalize(System::block_number()); + let parent_hash = System::finalize().hash(); + + let digest = make_digest(0, slot, pair); + + System::reset_events(); + System::initialize(&number, &parent_hash, &digest); + Sassafras::on_initialize(number); + + digest +} + +/// Progress the pallet state up to the given block `number`. +/// Slots will grow linearly accordingly to blocks. +pub fn progress_to_block(number: u64, pair: &AuthorityPair) -> Option { + let mut slot = Sassafras::current_slot() + 1; + let mut digest = None; + for i in System::block_number() + 1..=number { + let dig = go_to_block(i, slot, pair); + digest = Some(dig); + slot = slot + 1; + } + digest +} diff --git a/substrate/frame/sassafras/src/tests.rs b/substrate/frame/sassafras/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..ec3425cce7bf61e299bb811ac41edda3d9761ae5 --- /dev/null +++ b/substrate/frame/sassafras/src/tests.rs @@ -0,0 +1,874 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Sassafras pallet. + +use crate::*; +use mock::*; + +use sp_consensus_sassafras::Slot; + +fn h2b(hex: &str) -> [u8; N] { + array_bytes::hex2array_unchecked(hex) +} + +fn b2h(bytes: [u8; N]) -> String { + array_bytes::bytes2hex("", &bytes) +} + +#[test] +fn genesis_values_assumptions_check() { + new_test_ext(3).execute_with(|| { + assert_eq!(Sassafras::authorities().len(), 3); + assert_eq!(Sassafras::config(), TEST_EPOCH_CONFIGURATION); + }); +} + +#[test] +fn post_genesis_randomness_initialization() { + let (pairs, mut ext) = new_test_ext_with_pairs(1, false); + let pair = &pairs[0]; + + ext.execute_with(|| { + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(Sassafras::next_randomness(), [0; 32]); + assert_eq!(Sassafras::randomness_accumulator(), [0; 32]); + + // Test the values with a zero genesis block hash + let _ = initialize_block(1, 123.into(), [0x00; 32].into(), pair); + + assert_eq!(Sassafras::randomness(), [0; 32]); + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("b9497550deeeb4adc134555930de61968a0558f8947041eb515b2f5fa68ffaf7") + ); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("febcc7fe9539fe17ed29f525831394edfb30b301755dc9bd91584a1f065faf87") + ); + let (id1, _) = make_ticket_bodies(1, Some(pair))[0]; + + // Reset what is relevant + NextRandomness::::set([0; 32]); + RandomnessAccumulator::::set([0; 32]); + + // Test the values with a non-zero genesis block hash + let _ = initialize_block(1, 123.into(), [0xff; 32].into(), pair); + + assert_eq!(Sassafras::randomness(), [0; 32]); + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("51c1e3b3a73d2043b3cabae98ff27bdd4aad8967c21ecda7b9465afaa0e70f37") + ); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("466bf3007f2e17bffee0b3c42c90f33d654f5ff61eff28b0cc650825960abd52") + ); + let (id2, _) = make_ticket_bodies(1, Some(pair))[0]; + + // Ticket ids should be different when next epoch randomness is different + assert_ne!(id1, id2); + + // Reset what is relevant + NextRandomness::::set([0; 32]); + RandomnessAccumulator::::set([0; 32]); + + // Test the values with a non-zero genesis block hash + let _ = initialize_block(1, 321.into(), [0x00; 32].into(), pair); + + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("d85d84a54f79453000eb62e8a17b30149bd728d3232bc2787a89d51dc9a36008") + ); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("8a035eed02b5b8642b1515ed19752df8df156627aea45c4ef6e3efa88be9a74d") + ); + let (id2, _) = make_ticket_bodies(1, Some(pair))[0]; + + // Ticket ids should be different when next epoch randomness is different + assert_ne!(id1, id2); + }); +} + +// Tests if the sorted tickets are assigned to each slot outside-in. +#[test] +fn slot_ticket_id_outside_in_fetch() { + let genesis_slot = Slot::from(100); + let tickets_count = 6; + + // Current epoch tickets + let curr_tickets: Vec = (0..tickets_count).map(|i| i as TicketId).collect(); + + // Next epoch tickets + let next_tickets: Vec = + (0..tickets_count - 1).map(|i| (i + tickets_count) as TicketId).collect(); + + new_test_ext(0).execute_with(|| { + // Some corner cases + TicketsIds::::insert((0, 0_u32), 1_u128); + + // Cleanup + (0..3).for_each(|i| TicketsIds::::remove((0, i as u32))); + + curr_tickets + .iter() + .enumerate() + .for_each(|(i, id)| TicketsIds::::insert((0, i as u32), id)); + + next_tickets + .iter() + .enumerate() + .for_each(|(i, id)| TicketsIds::::insert((1, i as u32), id)); + + TicketsMeta::::set(TicketsMetadata { + tickets_count: [curr_tickets.len() as u32, next_tickets.len() as u32], + unsorted_tickets_count: 0, + }); + + // Before importing the first block the pallet always return `None` + // This is a kind of special hardcoded case that should never happen in practice + // as the first thing the pallet does is to initialize the genesis slot. + + assert_eq!(Sassafras::slot_ticket_id(0.into()), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 100), None); + + // Initialize genesis slot.. + GenesisSlot::::set(genesis_slot); + frame_system::Pallet::::set_block_number(One::one()); + + // Try to fetch a ticket for a slot before current epoch. + assert_eq!(Sassafras::slot_ticket_id(0.into()), None); + + // Current epoch tickets. + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), Some(curr_tickets[3])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 2), Some(curr_tickets[5])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 3), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 4), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 5), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 6), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 7), Some(curr_tickets[4])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 8), Some(curr_tickets[2])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 9), Some(curr_tickets[0])); + + // Next epoch tickets (note that only 5 tickets are available) + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 10), Some(next_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 11), Some(next_tickets[3])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 12), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 13), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 14), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 15), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 16), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 17), Some(next_tickets[4])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 18), Some(next_tickets[2])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 19), Some(next_tickets[0])); + + // Try to fetch the tickets for slots beyond the next epoch. + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 20), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 42), None); + }); +} + +// Different test for outside-in test with more focus on corner case correctness. +#[test] +fn slot_ticket_id_outside_in_fetch_corner_cases() { + new_test_ext(0).execute_with(|| { + frame_system::Pallet::::set_block_number(One::one()); + + let mut meta = TicketsMetadata { tickets_count: [0, 0], unsorted_tickets_count: 0 }; + let curr_epoch_idx = EpochIndex::::get(); + + let mut epoch_test = |epoch_idx| { + let tag = (epoch_idx & 1) as u8; + let epoch_start = Sassafras::epoch_start(epoch_idx); + + // cleanup + meta.tickets_count = [0, 0]; + TicketsMeta::::set(meta); + assert!((0..10).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); + + meta.tickets_count[tag as usize] += 1; + TicketsMeta::::set(meta); + TicketsIds::::insert((tag, 0_u32), 1_u128); + assert_eq!(Sassafras::slot_ticket_id((epoch_start + 9).into()), Some(1_u128)); + assert!((0..9).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); + + meta.tickets_count[tag as usize] += 1; + TicketsMeta::::set(meta); + TicketsIds::::insert((tag, 1_u32), 2_u128); + assert_eq!(Sassafras::slot_ticket_id((epoch_start + 0).into()), Some(2_u128)); + assert!((1..9).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); + + meta.tickets_count[tag as usize] += 2; + TicketsMeta::::set(meta); + TicketsIds::::insert((tag, 2_u32), 3_u128); + assert_eq!(Sassafras::slot_ticket_id((epoch_start + 8).into()), Some(3_u128)); + assert!((1..8).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); + }; + + // Even epoch + epoch_test(curr_epoch_idx); + epoch_test(curr_epoch_idx + 1); + }); +} + +#[test] +fn on_first_block_after_genesis() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + let digest = initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + let common_assertions = || { + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e") + ); + }; + + // Post-initialization status + + assert!(ClaimTemporaryData::::exists()); + common_assertions(); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("f0d42f6b7c0d157ecbd788be44847b80a96c290c04b5dfa5d1d40c98aa0c04ed") + ); + + let header = finalize_block(start_block); + + // Post-finalization status + + assert!(!ClaimTemporaryData::::exists()); + common_assertions(); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("9f2b9fd19a772c34d437dcd8b84a927e73a5cb43d3d1cd00093223d60d2b4843"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + + // Genesis epoch start deposits consensus + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: Sassafras::next_authorities().into_inner(), + randomness: Sassafras::next_randomness(), + config: None, + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn on_normal_block() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let start_slot = Slot::from(100); + let start_block = 1; + let end_block = start_block + 1; + + ext.execute_with(|| { + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + // We don't want to trigger an epoch change in this test. + let epoch_length = Sassafras::epoch_length() as u64; + assert!(epoch_length > end_block); + + // Progress to block 2 + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + + let common_assertions = || { + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + 1); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_index(), 1); + assert_eq!(Sassafras::randomness(), [0; 32]); + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e") + ); + }; + + // Post-initialization status + + assert!(ClaimTemporaryData::::exists()); + common_assertions(); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("9f2b9fd19a772c34d437dcd8b84a927e73a5cb43d3d1cd00093223d60d2b4843"), + ); + + let header = finalize_block(end_block); + + // Post-finalization status + + assert!(!ClaimTemporaryData::::exists()); + common_assertions(); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("be9261adb9686dfd3f23f8a276b7acc7f4beb3137070beb64c282ac22d84cbf0"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 1); + assert_eq!(header.digest.logs[0], digest.logs[0]); + }); +} + +#[test] +fn produce_epoch_change_digest_no_config() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + // We want to trigger an epoch change in this test. + let epoch_length = Sassafras::epoch_length() as u64; + let end_block = start_block + epoch_length; + + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + + let common_assertions = || { + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + epoch_length); + assert_eq!(Sassafras::epoch_index(), 1); + assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_length); + assert_eq!(Sassafras::current_slot_index(), 0); + println!("[DEBUG] {}", b2h(Sassafras::randomness())); + assert_eq!( + Sassafras::randomness(), + h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e") + ); + }; + + // Post-initialization status + + assert!(ClaimTemporaryData::::exists()); + common_assertions(); + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("d3a18b857af6ecc7b52f047107e684fff0058b5722d540a296d727e37eaa55b3"), + ); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("bf0f1228f4ff953c8c1bda2cceb668bf86ea05d7ae93e26d021c9690995d5279"), + ); + + let header = finalize_block(end_block); + + // Post-finalization status + + assert!(!ClaimTemporaryData::::exists()); + common_assertions(); + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("d3a18b857af6ecc7b52f047107e684fff0058b5722d540a296d727e37eaa55b3"), + ); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("8a1ceb346036c386d021264b10912c8b656799668004c4a487222462b394cd89"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + // Deposits consensus log on epoch change + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: Sassafras::next_authorities().into_inner(), + randomness: Sassafras::next_randomness(), + config: None, + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn produce_epoch_change_digest_with_config() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + let config = EpochConfiguration { redundancy_factor: 1, attempts_number: 123 }; + Sassafras::plan_config_change(RuntimeOrigin::root(), config).unwrap(); + + // We want to trigger an epoch change in this test. + let epoch_length = Sassafras::epoch_length() as u64; + let end_block = start_block + epoch_length; + + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + + let header = finalize_block(end_block); + + // Header data check. + // Skip pallet status checks that were already performed by other tests. + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + // Deposits consensus log on epoch change + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: Sassafras::next_authorities().into_inner(), + randomness: Sassafras::next_randomness(), + config: Some(config), + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn segments_incremental_sort_works() { + let (pairs, mut ext) = new_test_ext_with_pairs(1, false); + let pair = &pairs[0]; + let segments_count = 14; + let start_slot = Slot::from(100); + let start_block = 1; + + ext.execute_with(|| { + let epoch_length = Sassafras::epoch_length() as u64; + // -3 just to have the last segment not full... + let submitted_tickets_count = segments_count * SEGMENT_MAX_SIZE - 3; + + initialize_block(start_block, start_slot, Default::default(), pair); + + // Manually populate the segments to skip the threshold check + let mut tickets = make_ticket_bodies(submitted_tickets_count, None); + persist_next_epoch_tickets_as_segments(&tickets); + + // Proceed to half of the epoch (sortition should not have been started yet) + let half_epoch_block = start_block + epoch_length / 2; + progress_to_block(half_epoch_block, pair); + + let mut unsorted_tickets_count = submitted_tickets_count; + + // Check that next epoch tickets sortition is not started yet + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); + assert_eq!(meta.tickets_count, [0, 0]); + + // Follow the incremental sortition block by block + + progress_to_block(half_epoch_block + 1, pair); + unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE - 3; + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count,); + assert_eq!(meta.tickets_count, [0, 0]); + + progress_to_block(half_epoch_block + 2, pair); + unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE; + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); + assert_eq!(meta.tickets_count, [0, 0]); + + progress_to_block(half_epoch_block + 3, pair); + unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE; + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); + assert_eq!(meta.tickets_count, [0, 0]); + + progress_to_block(half_epoch_block + 4, pair); + unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE; + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); + assert_eq!(meta.tickets_count, [0, 0]); + + let header = finalize_block(half_epoch_block + 4); + + // Sort should be finished now. + // Check that next epoch tickets count have the correct value. + // Bigger ticket ids were discarded during sortition. + unsorted_tickets_count -= 2 * SEGMENT_MAX_SIZE; + assert_eq!(unsorted_tickets_count, 0); + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); + assert_eq!(meta.tickets_count, [0, epoch_length as u32]); + // Epoch change log should have been pushed as well + assert_eq!(header.digest.logs.len(), 1); + // No tickets for the current epoch + assert_eq!(TicketsIds::::get((0, 0)), None); + + // Check persistence of "winning" tickets + tickets.sort_by_key(|t| t.0); + (0..epoch_length as usize).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), tickets[i]); + }); + // Check removal of "loosing" tickets + (epoch_length as usize..tickets.len()).into_iter().for_each(|i| { + assert!(TicketsIds::::get((1, i as u32)).is_none()); + assert!(TicketsData::::get(tickets[i].0).is_none()); + }); + + // The next block will be the first produced on the new epoch. + // At this point the tickets are found already sorted and ready to be used. + let slot = Sassafras::current_slot() + 1; + let number = System::block_number() + 1; + initialize_block(number, slot, header.hash(), pair); + let header = finalize_block(number); + // Epoch changes digest is also produced + assert_eq!(header.digest.logs.len(), 2); + }); +} + +#[test] +fn tickets_fetch_works_after_epoch_change() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let pair = &pairs[0]; + let start_slot = Slot::from(100); + let start_block = 1; + let submitted_tickets = 300; + + ext.execute_with(|| { + initialize_block(start_block, start_slot, Default::default(), pair); + + // We don't want to trigger an epoch change in this test. + let epoch_length = Sassafras::epoch_length() as u64; + assert!(epoch_length > 2); + progress_to_block(2, &pairs[0]).unwrap(); + + // Persist tickets as three different segments. + let tickets = make_ticket_bodies(submitted_tickets, None); + persist_next_epoch_tickets_as_segments(&tickets); + + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, submitted_tickets); + assert_eq!(meta.tickets_count, [0, 0]); + + // Progress up to the last epoch slot (do not enact epoch change) + progress_to_block(epoch_length, &pairs[0]).unwrap(); + + // At this point next epoch tickets should have been sorted and ready to be used + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, 0); + assert_eq!(meta.tickets_count, [0, epoch_length as u32]); + + // Compute and sort the tickets ids (aka tickets scores) + let mut expected_ids: Vec<_> = tickets.into_iter().map(|(id, _)| id).collect(); + expected_ids.sort(); + expected_ids.truncate(epoch_length as usize); + + // Check if we can fetch next epoch tickets ids (outside-in). + let slot = Sassafras::current_slot(); + assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[1]); + assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[3]); + assert_eq!(Sassafras::slot_ticket_id(slot + 3).unwrap(), expected_ids[5]); + assert_eq!(Sassafras::slot_ticket_id(slot + 4).unwrap(), expected_ids[7]); + assert_eq!(Sassafras::slot_ticket_id(slot + 7).unwrap(), expected_ids[6]); + assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[4]); + assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[2]); + assert_eq!(Sassafras::slot_ticket_id(slot + 10).unwrap(), expected_ids[0]); + assert!(Sassafras::slot_ticket_id(slot + 11).is_none()); + + // Enact epoch change by progressing one more block + + progress_to_block(epoch_length + 1, &pairs[0]).unwrap(); + + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, 0); + assert_eq!(meta.tickets_count, [0, 10]); + + // Check if we can fetch current epoch tickets ids (outside-in). + let slot = Sassafras::current_slot(); + assert_eq!(Sassafras::slot_ticket_id(slot).unwrap(), expected_ids[1]); + assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[3]); + assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[5]); + assert_eq!(Sassafras::slot_ticket_id(slot + 3).unwrap(), expected_ids[7]); + assert_eq!(Sassafras::slot_ticket_id(slot + 6).unwrap(), expected_ids[6]); + assert_eq!(Sassafras::slot_ticket_id(slot + 7).unwrap(), expected_ids[4]); + assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[2]); + assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[0]); + assert!(Sassafras::slot_ticket_id(slot + 10).is_none()); + + // Enact another epoch change, for which we don't have any ticket + progress_to_block(2 * epoch_length + 1, &pairs[0]).unwrap(); + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, 0); + assert_eq!(meta.tickets_count, [0, 0]); + }); +} + +#[test] +fn block_allowed_to_skip_epochs() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let pair = &pairs[0]; + let start_slot = Slot::from(100); + let start_block = 1; + + ext.execute_with(|| { + let epoch_length = Sassafras::epoch_length() as u64; + + initialize_block(start_block, start_slot, Default::default(), pair); + + let tickets = make_ticket_bodies(3, Some(pair)); + persist_next_epoch_tickets(&tickets); + + let next_random = Sassafras::next_randomness(); + + // We want to skip 3 epochs in this test. + let offset = 4 * epoch_length; + go_to_block(start_block + offset, start_slot + offset, &pairs[0]); + + // Post-initialization status + + assert!(ClaimTemporaryData::::exists()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + offset); + assert_eq!(Sassafras::epoch_index(), 4); + assert_eq!(Sassafras::current_epoch_start(), start_slot + offset); + assert_eq!(Sassafras::current_slot_index(), 0); + + // Tickets data has been discarded + assert_eq!(TicketsMeta::::get(), TicketsMetadata::default()); + assert!(tickets.iter().all(|(id, _)| TicketsData::::get(id).is_none())); + assert_eq!(SortedCandidates::::get().len(), 0); + + // We used the last known next epoch randomness as a fallback + assert_eq!(next_random, Sassafras::randomness()); + }); +} + +#[test] +fn obsolete_tickets_are_removed_on_epoch_change() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let pair = &pairs[0]; + let start_slot = Slot::from(100); + let start_block = 1; + + ext.execute_with(|| { + let epoch_length = Sassafras::epoch_length() as u64; + + initialize_block(start_block, start_slot, Default::default(), pair); + + let tickets = make_ticket_bodies(10, Some(pair)); + let mut epoch1_tickets = tickets[..4].to_vec(); + let mut epoch2_tickets = tickets[4..].to_vec(); + + // Persist some tickets for next epoch (N) + persist_next_epoch_tickets(&epoch1_tickets); + assert_eq!(TicketsMeta::::get().tickets_count, [0, 4]); + // Check next epoch tickets presence + epoch1_tickets.sort_by_key(|t| t.0); + (0..epoch1_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch1_tickets[i]); + }); + + // Advance one epoch to enact the tickets + go_to_block(start_block + epoch_length, start_slot + epoch_length, pair); + assert_eq!(TicketsMeta::::get().tickets_count, [0, 4]); + + // Persist some tickets for next epoch (N+1) + persist_next_epoch_tickets(&epoch2_tickets); + assert_eq!(TicketsMeta::::get().tickets_count, [6, 4]); + epoch2_tickets.sort_by_key(|t| t.0); + // Check for this epoch and next epoch tickets presence + (0..epoch1_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch1_tickets[i]); + }); + (0..epoch2_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((0, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch2_tickets[i]); + }); + + // Advance to epoch 2 and check for cleanup + + go_to_block(start_block + 2 * epoch_length, start_slot + 2 * epoch_length, pair); + assert_eq!(TicketsMeta::::get().tickets_count, [6, 0]); + + (0..epoch1_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + assert!(TicketsData::::get(id).is_none()); + }); + (0..epoch2_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((0, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch2_tickets[i]); + }); + }) +} + +const TICKETS_FILE: &str = "src/data/25_tickets_100_auths.bin"; + +fn data_read(filename: &str) -> T { + use std::{fs::File, io::Read}; + let mut file = File::open(filename).unwrap(); + let mut buf = Vec::new(); + file.read_to_end(&mut buf).unwrap(); + T::decode(&mut &buf[..]).unwrap() +} + +fn data_write(filename: &str, data: T) { + use std::{fs::File, io::Write}; + let mut file = File::create(filename).unwrap(); + let buf = data.encode(); + file.write_all(&buf).unwrap(); +} + +// We don't want to implement anything secure here. +// Just a trivial shuffle for the tests. +fn trivial_fisher_yates_shuffle(vector: &mut Vec, random_seed: u64) { + let mut rng = random_seed as usize; + for i in (1..vector.len()).rev() { + let j = rng % (i + 1); + vector.swap(i, j); + rng = (rng.wrapping_mul(6364793005) + 1) as usize; // Some random number generation + } +} + +// For this test we use a set of pre-constructed tickets from a file. +// Creating a large set of tickets on the fly takes time, and may be annoying +// for test execution. +// +// A valid ring-context is required for this test since we are passing through the +// `submit_ticket` call which tests for ticket validity. +#[test] +fn submit_tickets_with_ring_proof_check_works() { + use sp_core::Pair as _; + // env_logger::init(); + + let (authorities, mut tickets): (Vec, Vec) = + data_read(TICKETS_FILE); + + // Also checks that duplicates are discarded + tickets.extend(tickets.clone()); + trivial_fisher_yates_shuffle(&mut tickets, 321); + + let (pairs, mut ext) = new_test_ext_with_pairs(authorities.len(), true); + let pair = &pairs[0]; + // Check if deserialized data has been generated for the correct set of authorities... + assert!(authorities.iter().zip(pairs.iter()).all(|(auth, pair)| auth == &pair.public())); + + ext.execute_with(|| { + let start_slot = Slot::from(0); + let start_block = 1; + + // Tweak the config to discard ~half of the tickets. + let mut config = EpochConfig::::get(); + config.redundancy_factor = 25; + EpochConfig::::set(config); + + initialize_block(start_block, start_slot, Default::default(), pair); + NextRandomness::::set([0; 32]); + + // Check state before tickets submission + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { unsorted_tickets_count: 0, tickets_count: [0, 0] }, + ); + + // Submit the tickets + let max_tickets_per_call = Sassafras::epoch_length() as usize; + tickets.chunks(max_tickets_per_call).for_each(|chunk| { + let chunk = BoundedVec::truncate_from(chunk.to_vec()); + Sassafras::submit_tickets(RuntimeOrigin::none(), chunk).unwrap(); + }); + + // Check state after submission + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { unsorted_tickets_count: 16, tickets_count: [0, 0] }, + ); + assert_eq!(UnsortedSegments::::get(0).len(), 16); + assert_eq!(UnsortedSegments::::get(1).len(), 0); + + finalize_block(start_block); + }) +} + +#[test] +#[ignore = "test tickets data generator"] +fn make_tickets_data() { + use super::*; + use sp_core::crypto::Pair; + + // Number of authorities who produces tickets (for the sake of this test) + let tickets_authors_count = 5; + // Total number of authorities (the ring) + let authorities_count = 100; + let (pairs, mut ext) = new_test_ext_with_pairs(authorities_count, true); + + let authorities: Vec<_> = pairs.iter().map(|sk| sk.public()).collect(); + + ext.execute_with(|| { + let config = EpochConfig::::get(); + + let tickets_count = tickets_authors_count * config.attempts_number as usize; + let mut tickets = Vec::with_capacity(tickets_count); + + // Construct pre-built tickets with a well known `NextRandomness` value. + NextRandomness::::set([0; 32]); + + println!("Constructing {} tickets", tickets_count); + pairs.iter().take(tickets_authors_count).enumerate().for_each(|(i, pair)| { + let t = make_tickets(config.attempts_number, pair); + tickets.extend(t); + println!("{:.2}%", 100f32 * ((i + 1) as f32 / tickets_authors_count as f32)); + }); + + data_write(TICKETS_FILE, (authorities, tickets)); + }); +} diff --git a/substrate/frame/sassafras/src/weights.rs b/substrate/frame/sassafras/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..32ea2d29a180b310c944d50014fa4b61f7d7d88b --- /dev/null +++ b/substrate/frame/sassafras/src/weights.rs @@ -0,0 +1,425 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_sassafras` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-16, STEPS: `20`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `behemoth`, CPU: `AMD Ryzen Threadripper 3970X 32-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/release/node-template +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_sassafras +// --extrinsic +// * +// --steps +// 20 +// --repeat +// 3 +// --output +// weights.rs +// --template +// substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_sassafras`. +pub trait WeightInfo { + fn on_initialize() -> Weight; + fn enact_epoch_change(x: u32, y: u32, ) -> Weight; + fn submit_tickets(x: u32, ) -> Weight; + fn plan_config_change() -> Weight; + fn update_ring_verifier(x: u32, ) -> Weight; + fn load_ring_context() -> Weight; + fn sort_segments(x: u32, ) -> Weight; +} + +/// Weights for `pallet_sassafras` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Sassafras::NextRandomness` (r:1 w:0) + /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) + /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentRandomness` (r:1 w:0) + /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochIndex` (r:1 w:0) + /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:1) + /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentSlot` (r:0 w:1) + /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::ClaimTemporaryData` (r:0 w:1) + /// Proof: `Sassafras::ClaimTemporaryData` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::GenesisSlot` (r:0 w:1) + /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn on_initialize() -> Weight { + // Proof Size summary in bytes: + // Measured: `302` + // Estimated: `4787` + // Minimum execution time: 438_039_000 picoseconds. + Weight::from_parts(439_302_000, 4787) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) + /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochIndex` (r:1 w:1) + /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) + /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextAuthorities` (r:1 w:1) + /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingContext` (r:1 w:0) + /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) + /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextRandomness` (r:1 w:1) + /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:0) + /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextEpochConfig` (r:1 w:1) + /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::PendingEpochConfigChange` (r:1 w:1) + /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) + /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::UnsortedSegments` (r:79 w:79) + /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsIds` (r:5000 w:200) + /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::Authorities` (r:0 w:1) + /// Proof: `Sassafras::Authorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsData` (r:0 w:9896) + /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochConfig` (r:0 w:1) + /// Proof: `Sassafras::EpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentRandomness` (r:0 w:1) + /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 100]`. + /// The range of component `y` is `[1000, 5000]`. + fn enact_epoch_change(x: u32, y: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `594909 + x * (33 ±0) + y * (53 ±0)` + // Estimated: `593350 + x * (24 ±1) + y * (2496 ±0)` + // Minimum execution time: 121_279_846_000 picoseconds. + Weight::from_parts(94_454_851_972, 593350) + // Standard Error: 24_177_301 + .saturating_add(Weight::from_parts(8_086_191, 0).saturating_mul(x.into())) + // Standard Error: 601_053 + .saturating_add(Weight::from_parts(15_578_413, 0).saturating_mul(y.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) + .saturating_add(T::DbWeight::get().writes(112_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(y.into()))) + .saturating_add(Weight::from_parts(0, 24).saturating_mul(x.into())) + .saturating_add(Weight::from_parts(0, 2496).saturating_mul(y.into())) + } + /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) + /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochIndex` (r:1 w:0) + /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) + /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierData` (r:1 w:0) + /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) + /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextEpochConfig` (r:1 w:0) + /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextRandomness` (r:1 w:0) + /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsData` (r:25 w:25) + /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) + /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::UnsortedSegments` (r:1 w:1) + /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 25]`. + fn submit_tickets(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `3869` + // Estimated: `5519 + x * (2559 ±0)` + // Minimum execution time: 36_904_934_000 picoseconds. + Weight::from_parts(25_822_957_295, 5519) + // Standard Error: 11_047_832 + .saturating_add(Weight::from_parts(11_338_353_299, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) + .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(x.into()))) + .saturating_add(Weight::from_parts(0, 2559).saturating_mul(x.into())) + } + /// Storage: `Sassafras::PendingEpochConfigChange` (r:0 w:1) + /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn plan_config_change() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_038_000 picoseconds. + Weight::from_parts(4_499_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Sassafras::RingContext` (r:1 w:0) + /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 100]`. + fn update_ring_verifier(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `590485` + // Estimated: `591809` + // Minimum execution time: 105_121_424_000 picoseconds. + Weight::from_parts(105_527_334_385, 591809) + // Standard Error: 2_933_910 + .saturating_add(Weight::from_parts(96_136_261, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Sassafras::RingContext` (r:1 w:0) + /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) + fn load_ring_context() -> Weight { + // Proof Size summary in bytes: + // Measured: `590485` + // Estimated: `591809` + // Minimum execution time: 44_005_681_000 picoseconds. + Weight::from_parts(44_312_079_000, 591809) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) + /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::UnsortedSegments` (r:100 w:100) + /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsIds` (r:0 w:200) + /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsData` (r:0 w:12600) + /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 100]`. + fn sort_segments(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `222 + x * (2060 ±0)` + // Estimated: `4687 + x * (4529 ±0)` + // Minimum execution time: 183_501_000 picoseconds. + Weight::from_parts(183_501_000, 4687) + // Standard Error: 1_426_363 + .saturating_add(Weight::from_parts(169_156_241, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) + .saturating_add(T::DbWeight::get().writes((129_u64).saturating_mul(x.into()))) + .saturating_add(Weight::from_parts(0, 4529).saturating_mul(x.into())) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Sassafras::NextRandomness` (r:1 w:0) + /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) + /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentRandomness` (r:1 w:0) + /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochIndex` (r:1 w:0) + /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:1) + /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentSlot` (r:0 w:1) + /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::ClaimTemporaryData` (r:0 w:1) + /// Proof: `Sassafras::ClaimTemporaryData` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::GenesisSlot` (r:0 w:1) + /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn on_initialize() -> Weight { + // Proof Size summary in bytes: + // Measured: `302` + // Estimated: `4787` + // Minimum execution time: 438_039_000 picoseconds. + Weight::from_parts(439_302_000, 4787) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + } + /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) + /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochIndex` (r:1 w:1) + /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) + /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextAuthorities` (r:1 w:1) + /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingContext` (r:1 w:0) + /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) + /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextRandomness` (r:1 w:1) + /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:0) + /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextEpochConfig` (r:1 w:1) + /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::PendingEpochConfigChange` (r:1 w:1) + /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) + /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::UnsortedSegments` (r:79 w:79) + /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsIds` (r:5000 w:200) + /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::Authorities` (r:0 w:1) + /// Proof: `Sassafras::Authorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsData` (r:0 w:9896) + /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochConfig` (r:0 w:1) + /// Proof: `Sassafras::EpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentRandomness` (r:0 w:1) + /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 100]`. + /// The range of component `y` is `[1000, 5000]`. + fn enact_epoch_change(x: u32, y: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `594909 + x * (33 ±0) + y * (53 ±0)` + // Estimated: `593350 + x * (24 ±1) + y * (2496 ±0)` + // Minimum execution time: 121_279_846_000 picoseconds. + Weight::from_parts(94_454_851_972, 593350) + // Standard Error: 24_177_301 + .saturating_add(Weight::from_parts(8_086_191, 0).saturating_mul(x.into())) + // Standard Error: 601_053 + .saturating_add(Weight::from_parts(15_578_413, 0).saturating_mul(y.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(y.into()))) + .saturating_add(RocksDbWeight::get().writes(112_u64)) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(y.into()))) + .saturating_add(Weight::from_parts(0, 24).saturating_mul(x.into())) + .saturating_add(Weight::from_parts(0, 2496).saturating_mul(y.into())) + } + /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) + /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochIndex` (r:1 w:0) + /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) + /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierData` (r:1 w:0) + /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) + /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextEpochConfig` (r:1 w:0) + /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextRandomness` (r:1 w:0) + /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsData` (r:25 w:25) + /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) + /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::UnsortedSegments` (r:1 w:1) + /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 25]`. + fn submit_tickets(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `3869` + // Estimated: `5519 + x * (2559 ±0)` + // Minimum execution time: 36_904_934_000 picoseconds. + Weight::from_parts(25_822_957_295, 5519) + // Standard Error: 11_047_832 + .saturating_add(Weight::from_parts(11_338_353_299, 0).saturating_mul(x.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(x.into()))) + .saturating_add(Weight::from_parts(0, 2559).saturating_mul(x.into())) + } + /// Storage: `Sassafras::PendingEpochConfigChange` (r:0 w:1) + /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn plan_config_change() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_038_000 picoseconds. + Weight::from_parts(4_499_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Sassafras::RingContext` (r:1 w:0) + /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 100]`. + fn update_ring_verifier(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `590485` + // Estimated: `591809` + // Minimum execution time: 105_121_424_000 picoseconds. + Weight::from_parts(105_527_334_385, 591809) + // Standard Error: 2_933_910 + .saturating_add(Weight::from_parts(96_136_261, 0).saturating_mul(x.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Sassafras::RingContext` (r:1 w:0) + /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) + fn load_ring_context() -> Weight { + // Proof Size summary in bytes: + // Measured: `590485` + // Estimated: `591809` + // Minimum execution time: 44_005_681_000 picoseconds. + Weight::from_parts(44_312_079_000, 591809) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) + /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::UnsortedSegments` (r:100 w:100) + /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsIds` (r:0 w:200) + /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsData` (r:0 w:12600) + /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 100]`. + fn sort_segments(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `222 + x * (2060 ±0)` + // Estimated: `4687 + x * (4529 ±0)` + // Minimum execution time: 183_501_000 picoseconds. + Weight::from_parts(183_501_000, 4687) + // Standard Error: 1_426_363 + .saturating_add(Weight::from_parts(169_156_241, 0).saturating_mul(x.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) + .saturating_add(RocksDbWeight::get().writes((129_u64).saturating_mul(x.into()))) + .saturating_add(Weight::from_parts(0, 4529).saturating_mul(x.into())) + } +} diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index 6aa81baf7ac69e90db864228ac6004a178251f62..c27276c607e6c44828388e5e4346cfe82bc051af 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -9,26 +9,29 @@ repository.workspace = true description = "FRAME Scheduler pallet" readme = "README.md" +[lints] +workspace = true + [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-weights = { path = "../../primitives/weights", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-weights = { path = "../../primitives/weights", default-features = false } docify = "0.2.6" [dev-dependencies] pallet-preimage = { path = "../preimage" } -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } substrate-test-utils = { path = "../../test-utils" } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking", "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/scheduler/src/migration.rs b/substrate/frame/scheduler/src/migration.rs index 9c8b0da03fc0b6368b2734c07db164a4ccc3b683..76e2e04b49cc6fc7d9f886090de4df44911c0333 100644 --- a/substrate/frame/scheduler/src/migration.rs +++ b/substrate/frame/scheduler/src/migration.rs @@ -105,7 +105,7 @@ pub mod v3 { // Check that no agenda overflows `MaxScheduledPerBlock`. let max_scheduled_per_block = T::MaxScheduledPerBlock::get() as usize; for (block_number, agenda) in Agenda::::iter() { - if agenda.iter().cloned().filter_map(|s| s).count() > max_scheduled_per_block { + if agenda.iter().cloned().flatten().count() > max_scheduled_per_block { log::error!( target: TARGET, "Would truncate agenda of block {:?} from {} items to {} items.", @@ -119,7 +119,7 @@ pub mod v3 { // Check that bounding the calls will not overflow `MAX_LENGTH`. let max_length = T::Preimages::MAX_LENGTH as usize; for (block_number, agenda) in Agenda::::iter() { - for schedule in agenda.iter().cloned().filter_map(|s| s) { + for schedule in agenda.iter().cloned().flatten() { match schedule.call { frame_support::traits::schedule::MaybeHashed::Value(call) => { let l = call.using_encoded(|c| c.len()); @@ -362,7 +362,7 @@ mod test { Some(ScheduledV3Of:: { maybe_id: Some(vec![i as u8; 320]), priority: 123, - call: MaybeHashed::Hash(undecodable_hash.clone()), + call: MaybeHashed::Hash(undecodable_hash), maybe_periodic: Some((4u64, 20)), origin: root(), _phantom: PhantomData::::default(), diff --git a/substrate/frame/scheduler/src/mock.rs b/substrate/frame/scheduler/src/mock.rs index b6eb1d044fa2321935b0bbd74ae6e89b1114bf3c..4edcfa0a7bfd8b8b48d408a99567e20deb5c0853 100644 --- a/substrate/frame/scheduler/src/mock.rs +++ b/substrate/frame/scheduler/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as scheduler; use frame_support::{ - ord_parameter_types, parameter_types, + derive_impl, ord_parameter_types, parameter_types, traits::{ ConstU32, ConstU64, Contains, EitherOfDiverse, EqualPrivilegeOnly, OnFinalize, OnInitialize, }, @@ -118,6 +118,8 @@ parameter_types! { Weight::from_parts(2_000_000_000_000, u64::MAX), ); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl system::Config for Test { type BaseCallFilter = BaseFilter; type BlockWeights = BlockWeights; diff --git a/substrate/frame/scored-pool/Cargo.toml b/substrate/frame/scored-pool/Cargo.toml index 81707382693fbbdc9d4aa8797b32031ead10b834..7a534ddd79d1a54264cb39493cae9c165068c9f5 100644 --- a/substrate/frame/scored-pool/Cargo.toml +++ b/substrate/frame/scored-pool/Cargo.toml @@ -9,24 +9,27 @@ repository.workspace = true description = "FRAME pallet for scored pools" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/scored-pool/src/mock.rs b/substrate/frame/scored-pool/src/mock.rs index 32a66c0cdc5cbd9abc43ac31a5f4f8d22567202b..6c032ab808ccab337e1da6c03e6ff14d79fa66f8 100644 --- a/substrate/frame/scored-pool/src/mock.rs +++ b/substrate/frame/scored-pool/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_scored_pool; use frame_support::{ - construct_runtime, ord_parameter_types, parameter_types, + construct_runtime, derive_impl, ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64}, }; use frame_system::EnsureSignedBy; @@ -51,6 +51,7 @@ ord_parameter_types! { pub const ScoreOrigin: u64 = 3; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/session/Cargo.toml b/substrate/frame/session/Cargo.toml index 246dec63bbab5250762f89e3ac3db0762601e1b0..4589dbb427a01697fc9749df0f65ae04c80b637c 100644 --- a/substrate/frame/session/Cargo.toml +++ b/substrate/frame/session/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME sessions pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,21 +20,21 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-timestamp = { path = "../timestamp", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-timestamp = { path = "../timestamp", default-features = false } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false} +sp-session = { path = "../../primitives/session", default-features = false } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} -sp-trie = { path = "../../primitives/trie", default-features = false, optional = true} -sp-state-machine = { path = "../../primitives/state-machine", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } +sp-trie = { path = "../../primitives/trie", default-features = false, optional = true } +sp-state-machine = { path = "../../primitives/state-machine", default-features = false } [features] -default = [ "historical", "std" ] -historical = [ "sp-trie" ] +default = ["historical", "std"] +historical = ["sp-trie"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/session/benchmarking/Cargo.toml b/substrate/frame/session/benchmarking/Cargo.toml index 87f08985138d4a3f25aaba9dc526deffd0013466..16f85048d8d281366776205b15dccb304a7a1464 100644 --- a/substrate/frame/session/benchmarking/Cargo.toml +++ b/substrate/frame/session/benchmarking/Cargo.toml @@ -9,20 +9,23 @@ repository.workspace = true description = "FRAME sessions pallet benchmarking" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } rand = { version = "0.8.5", default-features = false, features = ["std_rng"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false} -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -pallet-session = { path = "..", default-features = false} -pallet-staking = { path = "../../staking", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-session = { path = "../../../primitives/session", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-benchmarking = { path = "../../benchmarking", default-features = false } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-session = { path = "..", default-features = false } +pallet-staking = { path = "../../staking", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-session = { path = "../../../primitives/session", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } @@ -35,7 +38,7 @@ sp-core = { path = "../../../primitives/core" } sp-io = { path = "../../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "frame-benchmarking/std", "frame-election-provider-support/std", diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 47c337569a0274dc95d885173ccf27f87ffb71e3..e1744fa43abbb7e109c9a30d2466b36f6c06fb89 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -24,7 +24,7 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, }; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use sp_runtime::{traits::IdentityLookup, BuildStorage}; @@ -45,6 +45,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -178,6 +179,7 @@ impl pallet_staking::Config for Test { type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<100>; type HistoryDepth = ConstU32<84>; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; diff --git a/substrate/frame/session/src/historical/offchain.rs b/substrate/frame/session/src/historical/offchain.rs index 1b4d53b74b45e64e958bc2703ca8aab6297c7ef4..95f4d762949eeaa022a998b7d54693189a757c27 100644 --- a/substrate/frame/session/src/historical/offchain.rs +++ b/substrate/frame/session/src/historical/offchain.rs @@ -17,13 +17,11 @@ //! Off-chain logic for creating a proof based data provided by on-chain logic. //! -//! Validator-set extracting an iterator from an off-chain worker stored list containing -//! historical validator-sets. -//! Based on the logic of historical slashing, but the validation is done off-chain. +//! Validator-set extracting an iterator from an off-chain worker stored list containing historical +//! validator-sets. Based on the logic of historical slashing, but the validation is done off-chain. //! Use [`fn store_current_session_validator_set_to_offchain()`](super::onchain) to store the -//! required data to the offchain validator set. -//! This is used in conjunction with [`ProvingTrie`](super::ProvingTrie) and -//! the off-chain indexing API. +//! required data to the offchain validator set. This is used in conjunction with [`ProvingTrie`] +//! and the off-chain indexing API. use sp_runtime::{ offchain::storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, diff --git a/substrate/frame/session/src/mock.rs b/substrate/frame/session/src/mock.rs index 2db54e1a59756c7915e9e8b03c8a5fa8728afcb7..f3f18fde168c4a2dedcdeccc6880430ce0a12b42 100644 --- a/substrate/frame/session/src/mock.rs +++ b/substrate/frame/session/src/mock.rs @@ -35,7 +35,7 @@ use sp_staking::SessionIndex; use sp_state_machine::BasicExternalities; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; @@ -232,6 +232,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { sp_io::TestExternalities::new(t) } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/society/Cargo.toml b/substrate/frame/society/Cargo.toml index 654447e6893bbf748554bf50b4a6892ffff83e63..46b4f7a7d6621b1d44d3edc92968168d9960fa2f 100644 --- a/substrate/frame/society/Cargo.toml +++ b/substrate/frame/society/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME society pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,13 +21,13 @@ rand_chacha = { version = "0.2", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -sp-std = { path = "../../primitives/std", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } [dev-dependencies] frame-support-test = { path = "../support/test" } @@ -33,7 +36,7 @@ sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/society/src/mock.rs b/substrate/frame/society/src/mock.rs index 0bee08236f74a1d1e11ab8969047a5bb6966da44..3e29d01ca8e36b7512e96cb46280a5bf3b7f1a41 100644 --- a/substrate/frame/society/src/mock.rs +++ b/substrate/frame/society/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_society; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64}, }; use frame_support_test::TestRandomness; @@ -58,6 +58,7 @@ ord_parameter_types! { pub const MaxBids: u32 = 10; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index 1a8350405a890e32cb32ce862bc76da2569148eb..ddf4e7ea2c4fc3a4468ad2b07fdc196d280809aa 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -15,10 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! > Made for [![polkadot]](https://polkadot.network) -//! -//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white -//! //! # FRAME //! //! ```no_compile @@ -34,14 +30,21 @@ //! > **F**ramework for **R**untime **A**ggregation of **M**odularized **E**ntities: Substrate's //! > State Transition Function (Runtime) Framework. //! +//! ## Documentation +//! +//! See [`polkadot_sdk::frame`](../polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). +//! //! ## Warning: Experimental //! //! This crate and all of its content is experimental, and should not yet be used in production. //! -//! ## Getting Started +//! ## Underlying dependencies //! -//! TODO: link to `developer_hub::polkadot_sdk::frame`. The `developer_hub` hasn't been published -//! yet, this can be updated once it is linkable. +//! This crate is an amalgamation of multiple other crates that are often used together to compose a +//! pallet. It is not necessary to use it, and it may fall short for certain purposes. +//! +//! In short, this crate only re-exports types and traits from multiple sources. All of these +//! sources are listed (and re-exported again) in [`deps`]. #![cfg_attr(not(feature = "std"), no_std)] #![cfg(feature = "experimental")] @@ -54,9 +57,19 @@ /// `#[pallet::bar]` inside the mod. pub use frame_support::pallet; +pub use frame_support::pallet_macros::{import_section, pallet_section}; + /// The logging library of the runtime. Can normally be the classic `log` crate. pub use log; +/// A list of all macros used within the main [`pallet`] macro. +/// +/// Note: All of these macros are "stubs" and not really usable outside `#[pallet] mod pallet { .. +/// }`. They are mainly provided for documentation and IDE support. +pub mod pallet_macros { + pub use frame_support::{derive_impl, pallet, pallet_macros::*}; +} + /// The main prelude of FRAME. /// /// This prelude should almost always be the first line of code in any pallet or runtime. @@ -78,9 +91,6 @@ pub mod prelude { /// Pallet prelude of `frame-support`. /// /// Note: this needs to revised once `frame-support` evolves. - // `frame-support` will be break down https://github.com/paritytech/polkadot-sdk/issues/127 and its reexports will - // most likely change. These wildcard reexportings can be optimized once `frame-support` has - // changed. #[doc(no_inline)] pub use frame_support::pallet_prelude::*; @@ -156,6 +166,9 @@ pub mod runtime { /// Types to define your runtime version. pub use sp_version::{create_runtime_str, runtime_version, RuntimeVersion}; + /// Macro to implement runtime APIs. + pub use sp_api::impl_runtime_apis; + #[cfg(feature = "std")] pub use sp_version::NativeVersion; } @@ -180,9 +193,6 @@ pub mod runtime { pub use sp_inherents::{CheckInherentsResult, InherentData}; pub use sp_runtime::ApplyExtrinsicResult; - /// Macro to implement runtime APIs. - pub use sp_api::impl_runtime_apis; - pub use frame_system_rpc_runtime_api::*; pub use sp_api::{self, *}; pub use sp_block_builder::*; diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index c5cac9fefa792cc648e13312827c8ffbc5079577..31831fd7ed2278567f306f973408486ea63e4f30 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -9,27 +9,30 @@ repository.workspace = true description = "FRAME pallet staking" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.188", default-features = false, features = ["alloc", "derive"]} +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } pallet-session = { path = "../session", default-features = false, features = [ "historical", -]} -pallet-authorship = { path = "../authorship", default-features = false} +] } +pallet-authorship = { path = "../authorship", default-features = false } sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } -frame-election-provider-support = { path = "../election-provider-support", default-features = false} +frame-election-provider-support = { path = "../election-provider-support", default-features = false } log = { version = "0.4.17", default-features = false } # Optional imports for benchmarking @@ -50,7 +53,7 @@ frame-election-provider-support = { path = "../election-provider-support" } rand_chacha = { version = "0.2" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/staking/README.md b/substrate/frame/staking/README.md index 8c91cfcaa7fa386a2e7f403e7a7854e196888910..2938e2fe77066032e3f8d87fe126214b49ea7cbe 100644 --- a/substrate/frame/staking/README.md +++ b/substrate/frame/staking/README.md @@ -24,7 +24,7 @@ be found not to be discharging its duties properly. - Nominating: The process of placing staked funds behind one or more validators in order to share in any reward, and punishment, they take. - Stash account: The account holding an owner's funds used for staking. -- Controller account: The account that controls an owner's funds for staking. +- Controller account (being deprecated): The account that controls an owner's funds for staking. - Era: A (whole) number of sessions, which is the period that the validator set (and each validator's active nominator set) is recalculated and where rewards are paid out. - Slash: The punishment of a staker by reducing its funds. @@ -45,10 +45,10 @@ The staking system in Substrate NPoS is designed to make the following possible: Almost any interaction with the Staking module requires a process of _**bonding**_ (also known as being a _staker_). To become *bonded*, a fund-holding account known as the _stash account_, which holds some or all of the funds that become -frozen in place as part of the staking process, is paired with an active **controller** account, which issues -instructions on how they shall be used. +frozen in place as part of the staking process. The controller account, which this pallet now assigns the stash account to, +issues instructions on how funds shall be used. -An account pair can become bonded using the +An account can become a bonded stash account using the [`bond`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.bond) call. Stash accounts can update their associated controller back to their stash account using the @@ -231,8 +231,8 @@ following: Any funds already placed into stash can be the target of the following operations: The controller account can free a portion (or all) of the funds using the -[`unbond`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.unbond) call. Note that the funds -are not immediately accessible. Instead, a duration denoted by +[`unbond`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.unbond) call. Note that the +funds are not immediately accessible. Instead, a duration denoted by [`BondingDuration`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Config.html#associatedtype.BondingDuration) (in number of eras) must pass until the funds can actually be removed. Once the `BondingDuration` is over, the [`withdraw_unbonded`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.withdraw_unbonded) diff --git a/substrate/frame/staking/reward-curve/Cargo.toml b/substrate/frame/staking/reward-curve/Cargo.toml index 0a725996115990b36e81b7f538f35e2f551dfe6d..c21b79bc2e57584fa6e5a4f0bb36bd8814e92d8b 100644 --- a/substrate/frame/staking/reward-curve/Cargo.toml +++ b/substrate/frame/staking/reward-curve/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "Reward Curve for FRAME staking pallet" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.1" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.38", features = ["full", "visit"] } +syn = { version = "2.0.41", features = ["full", "visit"] } [dev-dependencies] sp-runtime = { path = "../../../primitives/runtime" } diff --git a/substrate/frame/staking/reward-fn/Cargo.toml b/substrate/frame/staking/reward-fn/Cargo.toml index 25f4c33dd62bb7af47ae9666a7b41cce4d07b72b..80a27cc0f5340cdfc849503ec0399f39062769f4 100644 --- a/substrate/frame/staking/reward-fn/Cargo.toml +++ b/substrate/frame/staking/reward-fn/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "Reward function for FRAME staking pallet" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,8 +18,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { version = "0.4.17", default-features = false } -sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false} +sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } [features] -default = [ "std" ] -std = [ "log/std", "sp-arithmetic/std" ] +default = ["std"] +std = ["log/std", "sp-arithmetic/std"] diff --git a/substrate/frame/staking/runtime-api/Cargo.toml b/substrate/frame/staking/runtime-api/Cargo.toml index 746b463b8ce25d1f72980e269e1fa5e57dab2237..b3fd4cfda017f2dff72aee122c44906d7db59d0e 100644 --- a/substrate/frame/staking/runtime-api/Cargo.toml +++ b/substrate/frame/staking/runtime-api/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "RPC runtime API for transaction payment FRAME pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,5 +21,5 @@ sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../pri sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } [features] -default = [ "std" ] -std = [ "codec/std", "sp-api/std", "sp-staking/std" ] +default = ["std"] +std = ["codec/std", "sp-api/std", "sp-staking/std"] diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index 05c6bc39709779c39cd761a68544a81d31a6946c..f1159c06aa11da252f4c448e47d8a968183ad93d 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -25,6 +25,7 @@ use codec::Decode; use frame_election_provider_support::{bounds::DataProviderBounds, SortedListProvider}; use frame_support::{ pallet_prelude::*, + storage::bounded_vec::BoundedVec, traits::{Currency, Get, Imbalance, UnfilteredDispatchable}, }; use sp_runtime::{ @@ -249,7 +250,7 @@ benchmarks! { let original_bonded: BalanceOf = Ledger::::get(&controller).map(|l| l.active).ok_or("ledger not created after")?; - T::Currency::deposit_into_existing(&stash, max_additional).unwrap(); + let _ = T::Currency::deposit_into_existing(&stash, max_additional).unwrap(); whitelist_account!(stash); }: _(RawOrigin::Signed(stash), max_additional) @@ -464,16 +465,28 @@ benchmarks! { } set_payee { - let (stash, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; + let (stash, controller) = create_stash_controller::(USER_SEED, 100, RewardDestination::Staked)?; assert_eq!(Payee::::get(&stash), RewardDestination::Staked); whitelist_account!(controller); - }: _(RawOrigin::Signed(controller), RewardDestination::Controller) + }: _(RawOrigin::Signed(controller.clone()), RewardDestination::Account(controller.clone())) verify { - assert_eq!(Payee::::get(&stash), RewardDestination::Controller); + assert_eq!(Payee::::get(&stash), RewardDestination::Account(controller)); + } + + update_payee { + let (stash, controller) = create_stash_controller::(USER_SEED, 100, RewardDestination::Staked)?; + Payee::::insert(&stash, { + #[allow(deprecated)] + RewardDestination::Controller + }); + whitelist_account!(controller); + }: _(RawOrigin::Signed(controller.clone()), controller.clone()) + verify { + assert_eq!(Payee::::get(&stash), RewardDestination::Account(controller)); } set_controller { - let (stash, ctlr) = create_unique_stash_controller::(9000, 100, Default::default(), false)?; + let (stash, ctlr) = create_unique_stash_controller::(9000, 100, RewardDestination::Staked, false)?; // ensure `ctlr` is the currently stored controller. assert!(!Ledger::::contains_key(&stash)); assert!(Ledger::::contains_key(&ctlr)); @@ -513,6 +526,39 @@ benchmarks! { assert_eq!(Invulnerables::::get().len(), v as usize); } + deprecate_controller_batch { + // We pass a dynamic number of controllers to the benchmark, up to + // `MaxControllersInDeprecationBatch`. + let i in 0 .. T::MaxControllersInDeprecationBatch::get(); + + let mut controllers: Vec<_> = vec![]; + let mut stashes: Vec<_> = vec![]; + for n in 0..i as u32 { + let (stash, controller) = create_unique_stash_controller::( + n, + 100, + RewardDestination::Staked, + false + )?; + controllers.push(controller); + stashes.push(stash); + } + let bounded_controllers: BoundedVec<_, T::MaxControllersInDeprecationBatch> = + BoundedVec::try_from(controllers.clone()).unwrap(); + }: _(RawOrigin::Root, bounded_controllers) + verify { + for n in 0..i as u32 { + let stash = &stashes[n as usize]; + let controller = &controllers[n as usize]; + // Ledger no longer keyed by controller. + assert_eq!(Ledger::::get(controller), None); + // Bonded now maps to the stash. + assert_eq!(Bonded::::get(stash), Some(stash.clone())); + // Ledger is now keyed by stash. + assert_eq!(Ledger::::get(stash).unwrap().stash, *stash); + } + } + force_unstake { // Slashing Spans let s in 0 .. MAX_SPANS; @@ -551,40 +597,6 @@ benchmarks! { assert_eq!(UnappliedSlashes::::get(&era).len(), (MAX_SLASHES - s) as usize); } - payout_stakers_dead_controller { - let n in 0 .. T::MaxExposurePageSize::get() as u32; - let (validator, nominators) = create_validator_with_nominators::( - n, - T::MaxExposurePageSize::get() as u32, - true, - true, - RewardDestination::Controller, - )?; - - let current_era = CurrentEra::::get().unwrap(); - // set the commission for this particular era as well. - >::insert(current_era, validator.clone(), >::validators(&validator)); - - let caller = whitelisted_caller(); - let validator_controller = >::get(&validator).unwrap(); - let balance_before = T::Currency::free_balance(&validator_controller); - for (_, controller) in &nominators { - let balance = T::Currency::free_balance(controller); - ensure!(balance.is_zero(), "Controller has balance, but should be dead."); - } - }: payout_stakers_by_page(RawOrigin::Signed(caller), validator, current_era, 0) - verify { - let balance_after = T::Currency::free_balance(&validator_controller); - ensure!( - balance_before < balance_after, - "Balance of validator controller should have increased after payout.", - ); - for (_, controller) in &nominators { - let balance = T::Currency::free_balance(controller); - ensure!(!balance.is_zero(), "Payout not given to controller."); - } - } - payout_stakers_alive_staked { let n in 0 .. T::MaxExposurePageSize::get() as u32; let (validator, nominators) = create_validator_with_nominators::( @@ -895,7 +907,7 @@ benchmarks! { )?; let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), controller) + }: _(RawOrigin::Signed(caller), stash.clone()) verify { assert!(!T::VoterList::contains(&stash)); } diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 9e4697e845b61c572ee1798e17a8c187b157b3c4..41cb2a12c3a32861c1d9f093c5126e58076221ad 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -41,7 +41,7 @@ //! - Nominating: The process of placing staked funds behind one or more validators in order to //! share in any reward, and punishment, they take. //! - Stash account: The account holding an owner's funds used for staking. -//! - Controller account: The account that controls an owner's funds for staking. +//! - Controller account (being deprecated): The account that controls an owner's funds for staking. //! - Era: A (whole) number of sessions, which is the period that the validator set (and each //! validator's active nominator set) is recalculated and where rewards are paid out. //! - Slash: The punishment of a staker by reducing its funds. @@ -61,20 +61,20 @@ //! //! Almost any interaction with the Staking pallet requires a process of _**bonding**_ (also known //! as being a _staker_). To become *bonded*, a fund-holding register known as the _stash account_, -//! which holds some or all of the funds that become frozen in place as part of the staking process, -//! is paired with an active **controller** account, which issues instructions on how they shall be -//! used. +//! which holds some or all of the funds that become frozen in place as part of the staking process. +//! The controller account, which this pallet now assigns the stash account to, issues instructions +//! on how funds shall be used. //! -//! An account pair can become bonded using the [`bond`](Call::bond) call. +//! An account can become a bonded stash account using the [`bond`](Call::bond) call. //! -//! Stash accounts can update their associated controller back to the stash account using the +//! In the event stash accounts registered a unique controller account before the controller account +//! deprecation, they can update their associated controller back to the stash account using the //! [`set_controller`](Call::set_controller) call. //! //! There are three possible roles that any staked account pair can be in: `Validator`, `Nominator` -//! and `Idle` (defined in [`StakerStatus`]). There are three -//! corresponding instructions to change between roles, namely: -//! [`validate`](Call::validate), -//! [`nominate`](Call::nominate), and [`chill`](Call::chill). +//! and `Idle` (defined in [`StakerStatus`]). There are three corresponding instructions to change +//! between roles, namely: [`validate`](Call::validate), [`nominate`](Call::nominate), and +//! [`chill`](Call::chill). //! //! #### Validating //! @@ -85,14 +85,13 @@ //! _might_ get elected at the _next era_ as a validator. The result of the election is determined //! by nominators and their votes. //! -//! An account can become a validator candidate via the -//! [`validate`](Call::validate) call. +//! An account can become a validator candidate via the [`validate`](Call::validate) call. //! //! #### Nomination //! //! A **nominator** does not take any _direct_ role in maintaining the network, instead, it votes on -//! a set of validators to be elected. Once interest in nomination is stated by an account, it -//! takes effect at the next election round. The funds in the nominator's stash account indicate the +//! a set of validators to be elected. Once interest in nomination is stated by an account, it takes +//! effect at the next election round. The funds in the nominator's stash account indicate the //! _weight_ of its vote. Both the rewards and any punishment that a validator earns are shared //! between the validator and its nominators. This rule incentivizes the nominators to NOT vote for //! the misbehaving/offline validators as much as possible, simply because the nominators will also @@ -104,8 +103,8 @@ //! //! Staking is closely related to elections; actual validators are chosen from among all potential //! validators via election by the potential validators and nominators. To reduce use of the phrase -//! "potential validators and nominators", we often use the term **voters**, who are simply -//! the union of potential validators and nominators. +//! "potential validators and nominators", we often use the term **voters**, who are simply the +//! union of potential validators and nominators. //! //! #### Rewards and Slash //! @@ -117,10 +116,9 @@ //! `payout_stakers`, which pays the reward to the validator as well as its nominators. Only //! [`Config::MaxExposurePageSize`] nominator rewards can be claimed in a single call. When the //! number of nominators exceeds [`Config::MaxExposurePageSize`], then the exposed nominators are -//! stored in multiple pages, with each page containing up to -//! [`Config::MaxExposurePageSize`] nominators. To pay out all nominators, `payout_stakers` must be -//! called once for each available page. Paging exists to limit the i/o cost to mutate storage for -//! each nominator's account. +//! stored in multiple pages, with each page containing up to [`Config::MaxExposurePageSize`] +//! nominators. To pay out all nominators, `payout_stakers` must be called once for each available +//! page. Paging exists to limit the i/o cost to mutate storage for each nominator's account. //! //! Slashing can occur at any point in time, once misbehavior is reported. Once slashing is //! determined, a value is deducted from the balance of the validator and all the nominators who @@ -165,18 +163,18 @@ //! //! #[frame_support::pallet(dev_mode)] //! pub mod pallet { -//! use super::*; -//! use frame_support::pallet_prelude::*; -//! use frame_system::pallet_prelude::*; +//! use super::*; +//! use frame_support::pallet_prelude::*; +//! use frame_system::pallet_prelude::*; //! -//! #[pallet::pallet] -//! pub struct Pallet(_); +//! #[pallet::pallet] +//! pub struct Pallet(_); //! -//! #[pallet::config] -//! pub trait Config: frame_system::Config + staking::Config {} +//! #[pallet::config] +//! pub trait Config: frame_system::Config + staking::Config {} //! -//! #[pallet::call] -//! impl Pallet { +//! #[pallet::call] +//! impl Pallet { //! /// Reward a validator. //! #[pallet::weight(0)] //! pub fn reward_myself(origin: OriginFor) -> DispatchResult { @@ -193,8 +191,8 @@ //! //! ### Era payout //! -//! The era payout is computed using yearly inflation curve defined at -//! [`Config::EraPayout`] as such: +//! The era payout is computed using yearly inflation curve defined at [`Config::EraPayout`] as +//! such: //! //! ```nocompile //! staker_payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year @@ -204,8 +202,7 @@ //! ```nocompile //! remaining_payout = max_yearly_inflation * total_tokens / era_per_year - staker_payout //! ``` -//! The remaining reward is send to the configurable end-point -//! [`Config::RewardRemainder`]. +//! The remaining reward is send to the configurable end-point [`Config::RewardRemainder`]. //! //! ### Reward Calculation //! @@ -219,9 +216,8 @@ //! they received during the era. Points are added to a validator using //! [`reward_by_ids`](Pallet::reward_by_ids). //! -//! [`Pallet`] implements -//! [`pallet_authorship::EventHandler`] to add reward -//! points to block producer and block producer of referenced uncles. +//! [`Pallet`] implements [`pallet_authorship::EventHandler`] to add reward points to block producer +//! and block producer of referenced uncles. //! //! The validator and its nominator split their reward as following: //! @@ -232,29 +228,26 @@ //! validator, proportional to the value staked behind the validator (_i.e._ dividing the //! [`own`](Exposure::own) or [`others`](Exposure::others) by [`total`](Exposure::total) in //! [`Exposure`]). Note that payouts are made in pages with each page capped at -//! [`Config::MaxExposurePageSize`] nominators. The distribution of nominators across -//! pages may be unsorted. The total commission is paid out proportionally across pages based on the -//! total stake of the page. +//! [`Config::MaxExposurePageSize`] nominators. The distribution of nominators across pages may be +//! unsorted. The total commission is paid out proportionally across pages based on the total stake +//! of the page. //! //! All entities who receive a reward have the option to choose their reward destination through the -//! [`Payee`] storage item (see -//! [`set_payee`](Call::set_payee)), to be one of the following: +//! [`Payee`] storage item (see [`set_payee`](Call::set_payee)), to be one of the following: //! -//! - Controller account, (obviously) not increasing the staked value. //! - Stash account, not increasing the staked value. //! - Stash account, also increasing the staked value. +//! - Any other account, sent as free balance. //! //! ### Additional Fund Management Operations //! //! Any funds already placed into stash can be the target of the following operations: //! //! The controller account can free a portion (or all) of the funds using the -//! [`unbond`](Call::unbond) call. Note that the funds are not immediately -//! accessible. Instead, a duration denoted by -//! [`Config::BondingDuration`] (in number of eras) must -//! pass until the funds can actually be removed. Once the `BondingDuration` is over, the -//! [`withdraw_unbonded`](Call::withdraw_unbonded) call can be used to actually -//! withdraw the funds. +//! [`unbond`](Call::unbond) call. Note that the funds are not immediately accessible. Instead, a +//! duration denoted by [`Config::BondingDuration`] (in number of eras) must pass until the funds +//! can actually be removed. Once the `BondingDuration` is over, the +//! [`withdraw_unbonded`](Call::withdraw_unbonded) call can be used to actually withdraw the funds. //! //! Note that there is a limitation to the number of fund-chunks that can be scheduled to be //! unlocked in the future via [`unbond`](Call::unbond). In case this maximum @@ -274,8 +267,8 @@ //! //! ## GenesisConfig //! -//! The Staking pallet depends on the [`GenesisConfig`]. The -//! `GenesisConfig` is optional and allow to set some initial stakers. +//! The Staking pallet depends on the [`GenesisConfig`]. The `GenesisConfig` is optional and allow +//! to set some initial stakers. //! //! ## Related Modules //! @@ -404,7 +397,9 @@ pub enum RewardDestination { Staked, /// Pay into the stash account, not increasing the amount at stake. Stash, - /// Pay into the controller account. + #[deprecated( + note = "`Controller` will be removed after January 2024. Use `Account(controller)` instead." + )] Controller, /// Pay into a specified account. Account(AccountId), @@ -535,12 +530,12 @@ impl StakingLedger { let mut unlocking_balance = BalanceOf::::zero(); while let Some(last) = self.unlocking.last_mut() { - if unlocking_balance + last.value <= value { + if unlocking_balance.defensive_saturating_add(last.value) <= value { unlocking_balance += last.value; self.active += last.value; self.unlocking.pop(); } else { - let diff = value - unlocking_balance; + let diff = value.defensive_saturating_sub(unlocking_balance); unlocking_balance += diff; self.active += diff; @@ -594,7 +589,7 @@ impl StakingLedger { // for a `slash_era = x`, any chunk that is scheduled to be unlocked at era `x + 28` // (assuming 28 is the bonding duration) onwards should be slashed. - let slashable_chunks_start = slash_era + T::BondingDuration::get(); + let slashable_chunks_start = slash_era.saturating_add(T::BondingDuration::get()); // `Some(ratio)` if this is proportional, with `ratio`, `None` otherwise. In both cases, we // slash first the active chunk, and then `slash_chunks_priority`. @@ -729,7 +724,7 @@ pub struct Nominations { /// This is useful where we need to take into account the validator's own stake and total exposure /// in consideration, in addition to the individual nominators backing them. #[derive(Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Eq)] -struct PagedExposure { +pub struct PagedExposure { exposure_metadata: PagedExposureMetadata, exposure_page: ExposurePage, } @@ -1022,7 +1017,7 @@ where /// Wrapper struct for Era related information. It is not a pure encapsulation as these storage /// items can be accessed directly but nevertheless, its recommended to use `EraInfo` where we /// can and add more functions to it as needed. -pub(crate) struct EraInfo(sp_std::marker::PhantomData); +pub struct EraInfo(sp_std::marker::PhantomData); impl EraInfo { /// Temporary function which looks at both (1) passed param `T::StakingLedger` for legacy /// non-paged rewards, and (2) `T::ClaimedRewards` for paged rewards. This function can be @@ -1052,7 +1047,7 @@ impl EraInfo { /// /// This builds a paged exposure from `PagedExposureMetadata` and `ExposurePage` of the /// validator. For older non-paged exposure, it returns the clipped exposure directly. - pub(crate) fn get_paged_exposure( + pub fn get_paged_exposure( era: EraIndex, validator: &T::AccountId, page: Page, @@ -1087,7 +1082,7 @@ impl EraInfo { } /// Get full exposure of the validator at a given era. - pub(crate) fn get_full_exposure( + pub fn get_full_exposure( era: EraIndex, validator: &T::AccountId, ) -> Exposure> { @@ -1181,7 +1176,7 @@ impl EraInfo { } /// Store exposure for elected validators at start of an era. - pub(crate) fn set_exposure( + pub fn set_exposure( era: EraIndex, validator: &T::AccountId, exposure: Exposure>, @@ -1190,8 +1185,9 @@ impl EraInfo { let nominator_count = exposure.others.len(); // expected page count is the number of nominators divided by the page size, rounded up. - let expected_page_count = - nominator_count.defensive_saturating_add(page_size as usize - 1) / page_size as usize; + let expected_page_count = nominator_count + .defensive_saturating_add((page_size as usize).defensive_saturating_sub(1)) + .saturating_div(page_size as usize); let (exposure_metadata, exposure_pages) = exposure.into_pages(page_size); defensive_assert!(exposure_pages.len() == expected_page_count, "unexpected page count"); diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 84b00254126f7e1898861aad977850bfd045d0bf..311e9667cebc9734156194f528d3e18c7827ebfc 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -81,20 +81,10 @@ pub mod v14 { } } - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, TryRuntimeError> { - frame_support::ensure!( - Pallet::::on_chain_storage_version() == 13, - "Required v13 before upgrading to v14." - ); - - Ok(Default::default()) - } - #[cfg(feature = "try-runtime")] fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { frame_support::ensure!( - Pallet::::on_chain_storage_version() == 14, + Pallet::::on_chain_storage_version() >= 14, "v14 not applied" ); Ok(()) diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index d2afd8f26e241853624875dcc87448e2661896a1..5332dbfdd5b2d0b1203d71a72fc1e0698becd37f 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -23,7 +23,7 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, VoteWeight, }; use frame_support::{ - assert_ok, ord_parameter_types, parameter_types, + assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, OnUnbalanced, OneSessionHandler, @@ -122,8 +122,10 @@ parameter_types! { pub static SlashDeferDuration: EraIndex = 0; pub static Period: BlockNumber = 5; pub static Offset: BlockNumber = 0; + pub static MaxControllersInDeprecationBatch: u32 = 5900; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -315,6 +317,7 @@ impl crate::pallet::pallet::Config for Test { type NominationsQuota = WeightedNominationsQuota<16>; type MaxUnlockingChunks = MaxUnlockingChunks; type HistoryDepth = HistoryDepth; + type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; type EventListeners = EventListenerMock; type BenchmarkingConfig = TestBenchmarkingConfig; type WeightInfo = (); @@ -594,7 +597,7 @@ pub(crate) fn current_era() -> EraIndex { pub(crate) fn bond(who: AccountId, val: Balance) { let _ = Balances::make_free_balance_be(&who, val); - assert_ok!(Staking::bond(RuntimeOrigin::signed(who), val, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(who), val, RewardDestination::Stash)); } pub(crate) fn bond_validator(who: AccountId, val: Balance) { diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 9c36c94b87b4739d2fda466da586bffa41c9b73a..093cdfdb9cb9c266f474c86a0086f5ba092bb166 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -27,8 +27,8 @@ use frame_support::{ dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ - Currency, Defensive, EstimateNextNewSession, Get, Imbalance, Len, OnUnbalanced, TryCollect, - UnixTime, + Currency, Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, Len, + OnUnbalanced, TryCollect, UnixTime, }, weights::Weight, }; @@ -148,7 +148,7 @@ impl Pallet { // `consolidate_unlocked` strictly subtracts balance. if new_total < old_total { // Already checked that this won't overflow by entry condition. - let value = old_total - new_total; + let value = old_total.defensive_saturating_sub(new_total); Self::deposit_event(Event::::Withdrawn { stash, amount: value }); } @@ -262,7 +262,8 @@ impl Pallet { // total commission validator takes across all nominator pages let validator_total_commission_payout = validator_commission * validator_total_payout; - let validator_leftover_payout = validator_total_payout - validator_total_commission_payout; + let validator_leftover_payout = + validator_total_payout.defensive_saturating_sub(validator_total_commission_payout); // Now let's calculate how this is split to the validator. let validator_exposure_part = Perbill::from_rational(exposure.own(), exposure.total()); let validator_staking_payout = validator_exposure_part * validator_leftover_payout; @@ -338,9 +339,8 @@ impl Pallet { let dest = Self::payee(StakingAccount::Stash(stash.clone())); let maybe_imbalance = match dest { - RewardDestination::Controller => Self::bonded(stash) - .map(|controller| T::Currency::deposit_creating(&controller, amount)), - RewardDestination::Stash => T::Currency::deposit_into_existing(stash, amount).ok(), + RewardDestination::Stash => + T::Currency::deposit_into_existing(stash, amount).ok(), RewardDestination::Staked => Self::ledger(Stash(stash.clone())) .and_then(|mut ledger| { ledger.active += amount; @@ -357,6 +357,14 @@ impl Pallet { RewardDestination::Account(dest_account) => Some(T::Currency::deposit_creating(&dest_account, amount)), RewardDestination::None => None, + #[allow(deprecated)] + RewardDestination::Controller => Self::bonded(stash) + .map(|controller| { + defensive!("Paying out controller as reward destination which is deprecated and should be migrated."); + // This should never happen once payees with a `Controller` variant have been migrated. + // But if it does, just pay the controller account. + T::Currency::deposit_creating(&controller, amount) + }), }; maybe_imbalance .map(|imbalance| (imbalance, Self::payee(StakingAccount::Stash(stash.clone())))) @@ -468,7 +476,7 @@ impl Pallet { bonded.push((active_era, start_session)); if active_era > bonding_duration { - let first_kept = active_era - bonding_duration; + let first_kept = active_era.defensive_saturating_sub(bonding_duration); // Prune out everything that's from before the first-kept index. let n_to_prune = @@ -494,7 +502,8 @@ impl Pallet { if let Some(active_era_start) = active_era.start { let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); - let era_duration = (now_as_millis_u64 - active_era_start).saturated_into::(); + let era_duration = (now_as_millis_u64.defensive_saturating_sub(active_era_start)) + .saturated_into::(); let staked = Self::eras_total_stake(&active_era.index); let issuance = T::Currency::total_issuance(); let (validator_payout, remainder) = @@ -794,7 +803,7 @@ impl Pallet { stash: T::AccountId, exposure: Exposure>, ) { - >::insert(¤t_era, &stash, &exposure); + EraInfo::::set_exposure(current_era, &stash, exposure); } #[cfg(feature = "runtime-benchmarks")] @@ -1745,9 +1754,16 @@ impl StakingInterface for Pallet { } fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool { + // look in the non paged exposures + // FIXME: Can be cleaned up once non paged exposures are cleared (https://github.com/paritytech/polkadot-sdk/issues/433) ErasStakers::::iter_prefix(era).any(|(validator, exposures)| { validator == *who || exposures.others.iter().any(|i| i.who == *who) }) + || + // look in the paged exposures + ErasStakersPaged::::iter_prefix((era,)).any(|((validator, _), exposure_page)| { + validator == *who || exposure_page.others.iter().any(|i| i.who == *who) + }) } fn status( who: &Self::AccountId, @@ -1785,7 +1801,7 @@ impl StakingInterface for Pallet { ) { let others = exposures .iter() - .map(|(who, value)| IndividualExposure { who: who.clone(), value: value.clone() }) + .map(|(who, value)| IndividualExposure { who: who.clone(), value: *value }) .collect::>(); let exposure = Exposure { total: Default::default(), own: Default::default(), others }; EraInfo::::set_exposure(*current_era, stash, exposure); @@ -1812,6 +1828,7 @@ impl Pallet { Self::check_nominators()?; Self::check_exposures()?; + Self::check_paged_exposures()?; Self::check_ledgers()?; Self::check_count() } @@ -1860,6 +1877,70 @@ impl Pallet { .collect::>() } + fn check_paged_exposures() -> Result<(), TryRuntimeError> { + use sp_staking::PagedExposureMetadata; + use sp_std::collections::btree_map::BTreeMap; + + // Sanity check for the paged exposure of the active era. + let mut exposures: BTreeMap>> = + BTreeMap::new(); + let era = Self::active_era().unwrap().index; + let accumulator_default = PagedExposureMetadata { + total: Zero::zero(), + own: Zero::zero(), + nominator_count: 0, + page_count: 0, + }; + + ErasStakersPaged::::iter_prefix((era,)) + .map(|((validator, _page), expo)| { + ensure!( + expo.page_total == + expo.others.iter().map(|e| e.value).fold(Zero::zero(), |acc, x| acc + x), + "wrong total exposure for the page.", + ); + + let metadata = exposures.get(&validator).unwrap_or(&accumulator_default); + exposures.insert( + validator, + PagedExposureMetadata { + total: metadata.total + expo.page_total, + own: metadata.own, + nominator_count: metadata.nominator_count + expo.others.len() as u32, + page_count: metadata.page_count + 1, + }, + ); + + Ok(()) + }) + .collect::>()?; + + exposures + .iter() + .map(|(validator, metadata)| { + let actual_overview = ErasStakersOverview::::get(era, validator); + + ensure!(actual_overview.is_some(), "No overview found for a paged exposure"); + let actual_overview = actual_overview.unwrap(); + + ensure!( + actual_overview.total == metadata.total + actual_overview.own, + "Exposure metadata does not have correct total exposed stake." + ); + ensure!( + actual_overview.nominator_count == metadata.nominator_count, + "Exposure metadata does not have correct count of nominators." + ); + ensure!( + actual_overview.page_count == metadata.page_count, + "Exposure metadata does not have correct count of pages." + ); + + Ok(()) + }) + .collect::>() + } + fn check_nominators() -> Result<(), TryRuntimeError> { // a check per nominator to ensure their entire stake is correctly distributed. Will only // kick-in if the nomination was submitted before the current era. diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 18ad3e4a6cf1df1a504a60b42711d90c0fed33e6..b914545a76b989069476f14360cb0314167ede94 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -269,6 +269,9 @@ pub mod pallet { #[pallet::constant] type MaxUnlockingChunks: Get; + /// The maximum amount of controller accounts that can be deprecated in one call. + type MaxControllersInDeprecationBatch: Get; + /// Something that listens to staking updates and performs actions based on the data it /// receives. /// @@ -842,6 +845,8 @@ pub mod pallet { CommissionTooLow, /// Some bound is not met. BoundNotMet, + /// Used when attempting to use deprecated controller account logic. + ControllerDeprecated, } #[pallet::hooks] @@ -1066,7 +1071,9 @@ pub mod pallet { ensure!(ledger.active >= min_active_bond, Error::::InsufficientBond); // Note: in case there is no current era it is fine to bond one era more. - let era = Self::current_era().unwrap_or(0) + T::BondingDuration::get(); + let era = Self::current_era() + .unwrap_or(0) + .defensive_saturating_add(T::BondingDuration::get()); if let Some(chunk) = ledger.unlocking.last_mut().filter(|chunk| chunk.era == era) { // To keep the chunk count down, we only keep one chunk per era. Since // `unlocking` is a FiFo queue, if a chunk exists for `era` we know that it will @@ -1283,10 +1290,19 @@ pub mod pallet { payee: RewardDestination, ) -> DispatchResult { let controller = ensure_signed(origin)?; - let ledger = Self::ledger(Controller(controller))?; + let ledger = Self::ledger(Controller(controller.clone()))?; + + ensure!( + (payee != { + #[allow(deprecated)] + RewardDestination::Controller + }), + Error::::ControllerDeprecated + ); + let _ = ledger .set_payee(payee) - .defensive_proof("ledger was retrieved from storage, thus its bonded; qed."); + .defensive_proof("ledger was retrieved from storage, thus its bonded; qed.")?; Ok(()) } @@ -1310,18 +1326,17 @@ pub mod pallet { pub fn set_controller(origin: OriginFor) -> DispatchResult { let stash = ensure_signed(origin)?; - // the bonded map and ledger are mutated directly as this extrinsic is related to a + // The bonded map and ledger are mutated directly as this extrinsic is related to a // (temporary) passive migration. Self::ledger(StakingAccount::Stash(stash.clone())).map(|ledger| { let controller = ledger.controller() - .defensive_proof("ledger was fetched used the StakingInterface, so controller field must exist; qed.") + .defensive_proof("Ledger's controller field didn't exist. The controller should have been fetched using StakingLedger.") .ok_or(Error::::NotController)?; if controller == stash { - // stash is already its own controller. + // Stash is already its own controller. return Err(Error::::AlreadyPaired.into()) } - // update bond and ledger. >::remove(controller); >::insert(&stash, &stash); >::insert(&stash, ledger); @@ -1751,11 +1766,16 @@ pub mod pallet { /// who do not satisfy these requirements. #[pallet::call_index(23)] #[pallet::weight(T::WeightInfo::chill_other())] - pub fn chill_other(origin: OriginFor, controller: T::AccountId) -> DispatchResult { + pub fn chill_other(origin: OriginFor, stash: T::AccountId) -> DispatchResult { // Anyone can call this function. let caller = ensure_signed(origin)?; - let ledger = Self::ledger(Controller(controller.clone()))?; - let stash = ledger.stash; + let ledger = Self::ledger(Stash(stash.clone()))?; + let controller = ledger + .controller() + .defensive_proof( + "Ledger's controller field didn't exist. The controller should have been fetched using StakingLedger.", + ) + .ok_or(Error::::NotController)?; // In order for one user to chill another user, the following conditions must be met: // @@ -1872,6 +1892,84 @@ pub mod pallet { ensure_signed(origin)?; Self::do_payout_stakers_by_page(validator_stash, era, page) } + + /// Migrates an account's `RewardDestination::Controller` to + /// `RewardDestination::Account(controller)`. + /// + /// Effects will be felt instantly (as soon as this function is completed successfully). + /// + /// This will waive the transaction fee if the `payee` is successfully migrated. + #[pallet::call_index(27)] + #[pallet::weight(T::WeightInfo::update_payee())] + pub fn update_payee( + origin: OriginFor, + controller: T::AccountId, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + let ledger = Self::ledger(StakingAccount::Controller(controller.clone()))?; + + ensure!( + (Payee::::get(&ledger.stash) == { + #[allow(deprecated)] + RewardDestination::Controller + }), + Error::::NotController + ); + + let _ = ledger + .set_payee(RewardDestination::Account(controller)) + .defensive_proof("ledger should have been previously retrieved from storage.")?; + + Ok(Pays::No.into()) + } + + /// Updates a batch of controller accounts to their corresponding stash account if they are + /// not the same. Ignores any controller accounts that do not exist, and does not operate if + /// the stash and controller are already the same. + /// + /// Effects will be felt instantly (as soon as this function is completed successfully). + /// + /// The dispatch origin must be `T::AdminOrigin`. + #[pallet::call_index(28)] + #[pallet::weight(T::WeightInfo::deprecate_controller_batch(controllers.len() as u32))] + pub fn deprecate_controller_batch( + origin: OriginFor, + controllers: BoundedVec, + ) -> DispatchResultWithPostInfo { + T::AdminOrigin::ensure_origin(origin)?; + + // Ignore controllers that do not exist or are already the same as stash. + let filtered_batch_with_ledger: Vec<_> = controllers + .iter() + .filter_map(|controller| { + let ledger = Self::ledger(StakingAccount::Controller(controller.clone())); + ledger.ok().map_or(None, |ledger| { + // If the controller `RewardDestination` is still the deprecated + // `Controller` variant, skip deprecating this account. + let payee_deprecated = Payee::::get(&ledger.stash) == { + #[allow(deprecated)] + RewardDestination::Controller + }; + + if ledger.stash != *controller && !payee_deprecated { + Some((controller.clone(), ledger)) + } else { + None + } + }) + }) + .collect(); + + // Update unique pairs. + for (controller, ledger) in filtered_batch_with_ledger { + let stash = ledger.stash.clone(); + + >::insert(&stash, &stash); + >::remove(controller); + >::insert(stash, ledger); + } + Ok(Some(T::WeightInfo::deprecate_controller_batch(controllers.len() as u32)).into()) + } } } diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index 0d84d503733e6451feb69f287d69f8655fe0b20e..709fd1441ec3af313220486aa8000c959e30fd07 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -57,7 +57,7 @@ use crate::{ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ ensure, - traits::{Currency, Defensive, Get, Imbalance, OnUnbalanced}, + traits::{Currency, Defensive, DefensiveSaturating, Get, Imbalance, OnUnbalanced}, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -85,7 +85,7 @@ pub(crate) struct SlashingSpan { impl SlashingSpan { fn contains_era(&self, era: EraIndex) -> bool { - self.start <= era && self.length.map_or(true, |l| self.start + l > era) + self.start <= era && self.length.map_or(true, |l| self.start.saturating_add(l) > era) } } @@ -123,15 +123,15 @@ impl SlashingSpans { // returns `true` if a new span was started, `false` otherwise. `false` indicates // that internal state is unchanged. pub(crate) fn end_span(&mut self, now: EraIndex) -> bool { - let next_start = now + 1; + let next_start = now.defensive_saturating_add(1); if next_start <= self.last_start { return false } - let last_length = next_start - self.last_start; + let last_length = next_start.defensive_saturating_sub(self.last_start); self.prior.insert(0, last_length); self.last_start = next_start; - self.span_index += 1; + self.span_index.defensive_saturating_accrue(1); true } @@ -141,9 +141,9 @@ impl SlashingSpans { let mut index = self.span_index; let last = SlashingSpan { index, start: last_start, length: None }; let prior = self.prior.iter().cloned().map(move |length| { - let start = last_start - length; + let start = last_start.defensive_saturating_sub(length); last_start = start; - index -= 1; + index.defensive_saturating_reduce(1); SlashingSpan { index, start, length: Some(length) } }); @@ -164,13 +164,18 @@ impl SlashingSpans { let old_idx = self .iter() .skip(1) // skip ongoing span. - .position(|span| span.length.map_or(false, |len| span.start + len <= window_start)); + .position(|span| { + span.length + .map_or(false, |len| span.start.defensive_saturating_add(len) <= window_start) + }); - let earliest_span_index = self.span_index - self.prior.len() as SpanIndex; + let earliest_span_index = + self.span_index.defensive_saturating_sub(self.prior.len() as SpanIndex); let pruned = match old_idx { Some(o) => { self.prior.truncate(o); - let new_earliest = self.span_index - self.prior.len() as SpanIndex; + let new_earliest = + self.span_index.defensive_saturating_sub(self.prior.len() as SpanIndex); Some((earliest_span_index, new_earliest)) }, None => None, @@ -500,7 +505,7 @@ impl<'a, T: 'a + Config> InspectingSpans<'a, T> { let reward = if span_record.slashed < slash { // new maximum span slash. apply the difference. - let difference = slash - span_record.slashed; + let difference = slash.defensive_saturating_sub(span_record.slashed); span_record.slashed = slash; // compute reward. diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index ee6f67adf14c97fe819e692a72915496c02ab759..0e9be70ee7d2799621f1cad69d9138506eb4fb3c 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -298,9 +298,9 @@ fn rewards_should_work() { let init_balance_101 = Balances::total_balance(&101); // Set payees - Payee::::insert(11, RewardDestination::Controller); - Payee::::insert(21, RewardDestination::Controller); - Payee::::insert(101, RewardDestination::Controller); + Payee::::insert(11, RewardDestination::Account(11)); + Payee::::insert(21, RewardDestination::Account(21)); + Payee::::insert(101, RewardDestination::Account(101)); Pallet::::reward_by_ids(vec![(11, 50)]); Pallet::::reward_by_ids(vec![(11, 50)]); @@ -417,7 +417,7 @@ fn staking_should_work() { // --- Block 2: start_session(2); // add a new candidate for being a validator. account 3 controlled by 4. - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 1500, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 1500, RewardDestination::Account(3))); assert_ok!(Staking::validate(RuntimeOrigin::signed(3), ValidatorPrefs::default())); assert_ok!(Session::set_keys( RuntimeOrigin::signed(3), @@ -585,22 +585,10 @@ fn nominating_and_rewards_should_work() { assert_ok!(Staking::validate(RuntimeOrigin::signed(31), Default::default())); // Set payee to controller. - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(11), - RewardDestination::Controller - )); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(21), - RewardDestination::Controller - )); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(31), - RewardDestination::Controller - )); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(41), - RewardDestination::Controller - )); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(21), RewardDestination::Stash)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(31), RewardDestination::Stash)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(41), RewardDestination::Stash)); // give the man some money let initial_balance = 1000; @@ -612,14 +600,14 @@ fn nominating_and_rewards_should_work() { assert_ok!(Staking::bond( RuntimeOrigin::signed(1), 1000, - RewardDestination::Controller + RewardDestination::Account(1) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 21, 31])); assert_ok!(Staking::bond( RuntimeOrigin::signed(3), 1000, - RewardDestination::Controller + RewardDestination::Account(3) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![11, 21, 41])); @@ -1116,8 +1104,8 @@ fn reward_destination_works() { // (era 1, page 0) is claimed assert_eq!(Staking::claimed_rewards(1, &11), vec![0]); - // Change RewardDestination to Controller - >::insert(&11, RewardDestination::Controller); + // Change RewardDestination to Account + >::insert(&11, RewardDestination::Account(11)); // Check controller balance assert_eq!(Balances::free_balance(11), 23150); @@ -1129,8 +1117,8 @@ fn reward_destination_works() { mock::start_active_era(3); mock::make_all_reward_payment(2); - // Check that RewardDestination is Controller - assert_eq!(Staking::payee(11.into()), RewardDestination::Controller); + // Check that RewardDestination is Account(11) + assert_eq!(Staking::payee(11.into()), RewardDestination::Account(11)); // Check that reward went to the controller account assert_eq!(Balances::free_balance(11), recorded_stash_balance + total_payout_2); // Check that amount at stake is NOT increased @@ -1159,9 +1147,9 @@ fn validator_payment_prefs_work() { let commission = Perbill::from_percent(40); >::insert(&11, ValidatorPrefs { commission, ..Default::default() }); - // Reward controller so staked ratio doesn't change. - >::insert(&11, RewardDestination::Controller); - >::insert(&101, RewardDestination::Controller); + // Reward stash so staked ratio doesn't change. + >::insert(&11, RewardDestination::Stash); + >::insert(&101, RewardDestination::Stash); mock::start_active_era(1); mock::make_all_reward_payment(0); @@ -1250,8 +1238,8 @@ fn bond_extra_and_withdraw_unbonded_works() { // * it can unbond a portion of its funds from the stash account. // * Once the unbonding period is done, it can actually take the funds out of the stash. ExtBuilder::default().nominate(false).build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller)); + // Set payee to stash. + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1461,8 +1449,8 @@ fn rebond_works() { // * it can unbond a portion of its funds from the stash account. // * it can re-bond a portion of the funds scheduled to unlock. ExtBuilder::default().nominate(false).build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller)); + // Set payee to stash. + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1587,8 +1575,8 @@ fn rebond_works() { fn rebond_is_fifo() { // Rebond should proceed by reversing the most recent bond operations. ExtBuilder::default().nominate(false).build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller)); + // Set payee to stash. + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1683,8 +1671,8 @@ fn rebond_emits_right_value_in_event() { // When a user calls rebond with more than can be rebonded, things succeed, // and the rebond event emits the actual value rebonded. ExtBuilder::default().nominate(false).build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller)); + // Set payee to stash. + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1836,10 +1824,7 @@ fn switching_roles() { ExtBuilder::default().nominate(false).build_and_execute(|| { // Reset reward destination for i in &[11, 21] { - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(*i), - RewardDestination::Controller - )); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(*i), RewardDestination::Stash)); } assert_eq_uvec!(validator_controllers(), vec![21, 11]); @@ -1850,14 +1835,14 @@ fn switching_roles() { } // add 2 nominators - assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 2000, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 2000, RewardDestination::Account(1))); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 5])); - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Account(3))); assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![21, 1])); // add a new validator candidate - assert_ok!(Staking::bond(RuntimeOrigin::signed(5), 1000, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(5), 1000, RewardDestination::Account(5))); assert_ok!(Staking::validate(RuntimeOrigin::signed(5), ValidatorPrefs::default())); assert_ok!(Session::set_keys( RuntimeOrigin::signed(5), @@ -1928,11 +1913,11 @@ fn bond_with_no_staked_value() { .build_and_execute(|| { // Can't bond with 1 assert_noop!( - Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Controller), + Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Account(1)), Error::::InsufficientBond, ); // bonded with absolute minimum value possible. - assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 5, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 5, RewardDestination::Account(1))); assert_eq!(Balances::locks(&1)[0].amount, 5); // unbonding even 1 will cause all to be unbonded. @@ -1974,15 +1959,12 @@ fn bond_with_little_staked_value_bounded() { .build_and_execute(|| { // setup assert_ok!(Staking::chill(RuntimeOrigin::signed(31))); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(11), - RewardDestination::Controller - )); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); let init_balance_1 = Balances::free_balance(&1); let init_balance_11 = Balances::free_balance(&11); // Stingy validator. - assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Account(1))); assert_ok!(Staking::validate(RuntimeOrigin::signed(1), ValidatorPrefs::default())); assert_ok!(Session::set_keys( RuntimeOrigin::signed(1), @@ -2061,14 +2043,14 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { assert_ok!(Staking::bond( RuntimeOrigin::signed(1), 1000, - RewardDestination::Controller + RewardDestination::Account(1) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 11, 11, 21, 31])); assert_ok!(Staking::bond( RuntimeOrigin::signed(3), 1000, - RewardDestination::Controller + RewardDestination::Account(3) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![21, 31])); @@ -2114,14 +2096,14 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { assert_ok!(Staking::bond( RuntimeOrigin::signed(1), 1000, - RewardDestination::Controller + RewardDestination::Account(1) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 11, 11, 21])); assert_ok!(Staking::bond( RuntimeOrigin::signed(3), 1000, - RewardDestination::Controller + RewardDestination::Account(3) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![21])); @@ -3530,8 +3512,8 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { let part_for_101 = Perbill::from_rational::(125, 1125); // Check state - Payee::::insert(11, RewardDestination::Controller); - Payee::::insert(101, RewardDestination::Controller); + Payee::::insert(11, RewardDestination::Account(11)); + Payee::::insert(101, RewardDestination::Account(101)); Pallet::::reward_by_ids(vec![(11, 1)]); // Compute total payout now for whole duration as other parameter won't change @@ -3820,8 +3802,8 @@ fn test_multi_page_payout_stakers_by_page() { staking_events_since_last_call().as_slice(), &[ .., - Event::Rewarded { stash: 1063, dest: RewardDestination::Controller, amount: 111 }, - Event::Rewarded { stash: 1064, dest: RewardDestination::Controller, amount: 111 }, + Event::Rewarded { stash: 1063, dest: RewardDestination::Stash, amount: 111 }, + Event::Rewarded { stash: 1064, dest: RewardDestination::Stash, amount: 111 }, ] )); @@ -3843,8 +3825,8 @@ fn test_multi_page_payout_stakers_by_page() { events.as_slice(), &[ Event::PayoutStarted { era_index: 1, validator_stash: 11 }, - Event::Rewarded { stash: 1065, dest: RewardDestination::Controller, amount: 111 }, - Event::Rewarded { stash: 1066, dest: RewardDestination::Controller, amount: 111 }, + Event::Rewarded { stash: 1065, dest: RewardDestination::Stash, amount: 111 }, + Event::Rewarded { stash: 1066, dest: RewardDestination::Stash, amount: 111 }, .. ] )); @@ -4685,40 +4667,6 @@ fn offences_weight_calculated_correctly() { }); } -#[test] -fn payout_creates_controller() { - ExtBuilder::default().has_stakers(false).build_and_execute(|| { - let balance = 1000; - // Create a validator: - bond_validator(11, balance); - - // create a stash/controller pair and nominate - let (stash, controller) = testing_utils::create_unique_stash_controller::( - 0, - 100, - RewardDestination::Controller, - false, - ) - .unwrap(); - - assert_ok!(Staking::nominate(RuntimeOrigin::signed(controller), vec![11])); - - // kill controller - assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(controller), stash, 100)); - assert_eq!(Balances::free_balance(controller), 0); - - mock::start_active_era(1); - Staking::reward_by_ids(vec![(11, 1)]); - // compute and ensure the reward amount is greater than zero. - let _ = current_total_payout_for_duration(reward_time_per_era()); - mock::start_active_era(2); - assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); - - // Controller is created - assert!(Balances::free_balance(controller) > 0); - }) -} - #[test] fn payout_to_any_account_works() { ExtBuilder::default().has_stakers(false).build_and_execute(|| { @@ -5462,7 +5410,7 @@ fn min_bond_checks_work() { .min_validator_bond(1_500) .build_and_execute(|| { // 500 is not enough for any role - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Stash)); assert_noop!( Staking::nominate(RuntimeOrigin::signed(3), vec![1]), Error::::InsufficientBond @@ -5524,19 +5472,11 @@ fn chill_other_works() { Balances::make_free_balance_be(&c, 100_000); // Nominator - assert_ok!(Staking::bond( - RuntimeOrigin::signed(a), - 1000, - RewardDestination::Controller - )); + assert_ok!(Staking::bond(RuntimeOrigin::signed(a), 1000, RewardDestination::Stash)); assert_ok!(Staking::nominate(RuntimeOrigin::signed(a), vec![1])); // Validator - assert_ok!(Staking::bond( - RuntimeOrigin::signed(b), - 1500, - RewardDestination::Controller - )); + assert_ok!(Staking::bond(RuntimeOrigin::signed(b), 1500, RewardDestination::Stash)); assert_ok!(Staking::validate(RuntimeOrigin::signed(b), ValidatorPrefs::default())); } @@ -5683,7 +5623,7 @@ fn capped_stakers_works() { let (_, controller) = testing_utils::create_stash_controller::( i + 10_000_000, 100, - RewardDestination::Controller, + RewardDestination::Stash, ) .unwrap(); assert_ok!(Staking::validate( @@ -5694,12 +5634,9 @@ fn capped_stakers_works() { } // but no more - let (_, last_validator) = testing_utils::create_stash_controller::( - 1337, - 100, - RewardDestination::Controller, - ) - .unwrap(); + let (_, last_validator) = + testing_utils::create_stash_controller::(1337, 100, RewardDestination::Stash) + .unwrap(); assert_noop!( Staking::validate(RuntimeOrigin::signed(last_validator), ValidatorPrefs::default()), @@ -5712,7 +5649,7 @@ fn capped_stakers_works() { let (_, controller) = testing_utils::create_stash_controller::( i + 20_000_000, 100, - RewardDestination::Controller, + RewardDestination::Stash, ) .unwrap(); assert_ok!(Staking::nominate(RuntimeOrigin::signed(controller), vec![1])); @@ -5723,7 +5660,7 @@ fn capped_stakers_works() { let (_, last_nominator) = testing_utils::create_stash_controller::( 30_000_000, 100, - RewardDestination::Controller, + RewardDestination::Stash, ) .unwrap(); assert_noop!( @@ -6269,7 +6206,7 @@ fn proportional_ledger_slash_works() { #[test] fn reducing_max_unlocking_chunks_abrupt() { // Concern is on validators only - // By Default 11, 10 are stash and ctrl and 21,20 + // By Default 11, 10 are stash and ctlr and 21,20 ExtBuilder::default().build_and_execute(|| { // given a staker at era=10 and MaxUnlockChunks set to 2 MaxUnlockingChunks::set(2); @@ -6637,6 +6574,14 @@ fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout( ); assert_eq!(EraInfo::::get_page_count(1, &11), 2); + // validator is exposed + assert!(::is_exposed_in_era(&11, &1)); + // nominators are exposed + for i in 10..15 { + let who: AccountId = 1000 + i; + assert!(::is_exposed_in_era(&who, &1)); + } + // case 2: exposure exist in ErasStakers and ErasStakersClipped (legacy). // delete paged storage and add exposure to clipped storage >::remove((1, 11, 0)); @@ -6672,6 +6617,14 @@ fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout( assert_eq!(actual_exposure_full.own, 1000); assert_eq!(actual_exposure_full.total, total_exposure); + // validator is exposed + assert!(::is_exposed_in_era(&11, &1)); + // nominators are exposed + for i in 10..15 { + let who: AccountId = 1000 + i; + assert!(::is_exposed_in_era(&who, &1)); + } + // for pages other than 0, clipped storage returns empty exposure assert_eq!(EraInfo::::get_paged_exposure(1, &11, 1), None); // page size is 1 for clipped storage @@ -6771,7 +6724,7 @@ mod ledger { assert_ok!(Staking::bond( RuntimeOrigin::signed(10), 100, - RewardDestination::Controller + RewardDestination::Account(10) )); assert_eq!(>::get(&10), Some(10)); @@ -6880,4 +6833,215 @@ mod ledger { >::remove(42); // ensures try-state checks pass. }) } + + #[test] + #[allow(deprecated)] + fn set_payee_errors_on_controller_destination() { + ExtBuilder::default().build_and_execute(|| { + Payee::::insert(11, RewardDestination::Staked); + assert_noop!( + Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller), + Error::::ControllerDeprecated + ); + assert_eq!(Payee::::get(&11), RewardDestination::Staked); + }) + } + + #[test] + #[allow(deprecated)] + fn update_payee_migration_works() { + ExtBuilder::default().build_and_execute(|| { + // migrate a `Controller` variant to `Account` variant. + Payee::::insert(11, RewardDestination::Controller); + assert_eq!(Payee::::get(&11), RewardDestination::Controller); + assert_ok!(Staking::update_payee(RuntimeOrigin::signed(11), 11)); + assert_eq!(Payee::::get(&11), RewardDestination::Account(11)); + + // Do not migrate a variant if not `Controller`. + Payee::::insert(21, RewardDestination::Stash); + assert_eq!(Payee::::get(&21), RewardDestination::Stash); + assert_noop!( + Staking::update_payee(RuntimeOrigin::signed(11), 21), + Error::::NotController + ); + assert_eq!(Payee::::get(&21), RewardDestination::Stash); + }) + } + + #[test] + fn deprecate_controller_batch_works_full_weight() { + ExtBuilder::default().build_and_execute(|| { + // Given: + + let start = 1001; + let mut controllers: Vec<_> = vec![]; + for n in start..(start + MaxControllersInDeprecationBatch::get()).into() { + let ctlr: u64 = n.into(); + let stash: u64 = (n + 10000).into(); + + Ledger::::insert( + ctlr, + StakingLedger { + controller: None, + total: (10 + ctlr).into(), + active: (10 + ctlr).into(), + ..StakingLedger::default_from(stash) + }, + ); + Bonded::::insert(stash, ctlr); + Payee::::insert(stash, RewardDestination::Staked); + + controllers.push(ctlr); + } + + // When: + + let bounded_controllers: BoundedVec< + _, + ::MaxControllersInDeprecationBatch, + > = BoundedVec::try_from(controllers).unwrap(); + + // Only `AdminOrigin` can sign. + assert_noop!( + Staking::deprecate_controller_batch( + RuntimeOrigin::signed(2), + bounded_controllers.clone() + ), + BadOrigin + ); + + let result = + Staking::deprecate_controller_batch(RuntimeOrigin::root(), bounded_controllers); + assert_ok!(result); + assert_eq!( + result.unwrap().actual_weight.unwrap(), + ::WeightInfo::deprecate_controller_batch( + ::MaxControllersInDeprecationBatch::get() + ) + ); + + // Then: + + for n in start..(start + MaxControllersInDeprecationBatch::get()).into() { + let ctlr: u64 = n.into(); + let stash: u64 = (n + 10000).into(); + + // Ledger no longer keyed by controller. + assert_eq!(Ledger::::get(ctlr), None); + // Bonded now maps to the stash. + assert_eq!(Bonded::::get(stash), Some(stash)); + + // Ledger is now keyed by stash. + let ledger_updated = Ledger::::get(stash).unwrap(); + assert_eq!(ledger_updated.stash, stash); + + // Check `active` and `total` values match the original ledger set by controller. + assert_eq!(ledger_updated.active, (10 + ctlr).into()); + assert_eq!(ledger_updated.total, (10 + ctlr).into()); + } + }) + } + + #[test] + fn deprecate_controller_batch_works_half_weight() { + ExtBuilder::default().build_and_execute(|| { + // Given: + + let start = 1001; + let mut controllers: Vec<_> = vec![]; + for n in start..(start + MaxControllersInDeprecationBatch::get()).into() { + let ctlr: u64 = n.into(); + + // Only half of entries are unique pairs. + let stash: u64 = if n % 2 == 0 { (n + 10000).into() } else { ctlr }; + + Ledger::::insert( + ctlr, + StakingLedger { controller: None, ..StakingLedger::default_from(stash) }, + ); + Bonded::::insert(stash, ctlr); + Payee::::insert(stash, RewardDestination::Staked); + + controllers.push(ctlr); + } + + // When: + let bounded_controllers: BoundedVec< + _, + ::MaxControllersInDeprecationBatch, + > = BoundedVec::try_from(controllers.clone()).unwrap(); + + let result = + Staking::deprecate_controller_batch(RuntimeOrigin::root(), bounded_controllers); + assert_ok!(result); + assert_eq!( + result.unwrap().actual_weight.unwrap(), + ::WeightInfo::deprecate_controller_batch(controllers.len() as u32) + ); + + // Then: + + for n in start..(start + MaxControllersInDeprecationBatch::get()).into() { + let unique_pair = n % 2 == 0; + let ctlr: u64 = n.into(); + let stash: u64 = if unique_pair { (n + 10000).into() } else { ctlr }; + + // Side effect of migration for unique pair. + if unique_pair { + assert_eq!(Ledger::::get(ctlr), None); + } + // Bonded maps to the stash. + assert_eq!(Bonded::::get(stash), Some(stash)); + + // Ledger is keyed by stash. + let ledger_updated = Ledger::::get(stash).unwrap(); + assert_eq!(ledger_updated.stash, stash); + } + }) + } + + #[test] + fn deprecate_controller_batch_skips_unmigrated_controller_payees() { + ExtBuilder::default().build_and_execute(|| { + // Given: + + let stash: u64 = 1000; + let ctlr: u64 = 1001; + + Ledger::::insert( + ctlr, + StakingLedger { controller: None, ..StakingLedger::default_from(stash) }, + ); + Bonded::::insert(stash, ctlr); + #[allow(deprecated)] + Payee::::insert(stash, RewardDestination::Controller); + + // When: + + let bounded_controllers: BoundedVec< + _, + ::MaxControllersInDeprecationBatch, + > = BoundedVec::try_from(vec![ctlr]).unwrap(); + + let result = + Staking::deprecate_controller_batch(RuntimeOrigin::root(), bounded_controllers); + assert_ok!(result); + assert_eq!( + result.unwrap().actual_weight.unwrap(), + ::WeightInfo::deprecate_controller_batch(1 as u32) + ); + + // Then: + + // Esure deprecation did not happen. + assert_eq!(Ledger::::get(ctlr).is_some(), true); + + // Bonded still keyed by controller. + assert_eq!(Bonded::::get(stash), Some(ctlr)); + + // Ledger is still keyed by controller. + let ledger_updated = Ledger::::get(ctlr).unwrap(); + assert_eq!(ledger_updated.stash, stash); + }) + } } diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index ad6dbccde9f853ccafa82452d842460abd3c0747..7c9a050016406a5ae5b8f98bcd98b0093858628a 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-12-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -59,15 +59,16 @@ pub trait WeightInfo { fn nominate(n: u32, ) -> Weight; fn chill() -> Weight; fn set_payee() -> Weight; + fn update_payee() -> Weight; fn set_controller() -> Weight; fn set_validator_count() -> Weight; fn force_no_eras() -> Weight; fn force_new_era() -> Weight; fn force_new_era_always() -> Weight; fn set_invulnerables(v: u32, ) -> Weight; + fn deprecate_controller_batch(i: u32, ) -> Weight; fn force_unstake(s: u32, ) -> Weight; fn cancel_deferred_slash(s: u32, ) -> Weight; - fn payout_stakers_dead_controller(n: u32, ) -> Weight; fn payout_stakers_alive_staked(n: u32, ) -> Weight; fn rebond(l: u32, ) -> Weight; fn reap_stash(s: u32, ) -> Weight; @@ -98,8 +99,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `927` // Estimated: `4764` - // Minimum execution time: 42_811_000 picoseconds. - Weight::from_parts(44_465_000, 4764) + // Minimum execution time: 42_491_000 picoseconds. + Weight::from_parts(44_026_000, 4764) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -119,8 +120,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 87_628_000 picoseconds. - Weight::from_parts(90_020_000, 8877) + // Minimum execution time: 88_756_000 picoseconds. + Weight::from_parts(91_000_000, 8877) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -146,8 +147,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 91_655_000 picoseconds. - Weight::from_parts(94_146_000, 8877) + // Minimum execution time: 91_331_000 picoseconds. + Weight::from_parts(94_781_000, 8877) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -166,10 +167,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1115` // Estimated: `4764` - // Minimum execution time: 42_953_000 picoseconds. - Weight::from_parts(44_648_505, 4764) - // Standard Error: 937 - .saturating_add(Weight::from_parts(51_090, 0).saturating_mul(s.into())) + // Minimum execution time: 42_495_000 picoseconds. + Weight::from_parts(44_189_470, 4764) + // Standard Error: 1_389 + .saturating_add(Weight::from_parts(47_484, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -206,10 +207,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 89_218_000 picoseconds. - Weight::from_parts(97_761_884, 6248) - // Standard Error: 3_888 - .saturating_add(Weight::from_parts(1_346_441, 0).saturating_mul(s.into())) + // Minimum execution time: 89_004_000 picoseconds. + Weight::from_parts(96_677_570, 6248) + // Standard Error: 4_635 + .saturating_add(Weight::from_parts(1_387_718, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -241,8 +242,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 51_200_000 picoseconds. - Weight::from_parts(53_403_000, 4556) + // Minimum execution time: 51_532_000 picoseconds. + Weight::from_parts(53_308_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -255,10 +256,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1280 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 28_963_000 picoseconds. - Weight::from_parts(29_884_371, 4556) - // Standard Error: 9_063 - .saturating_add(Weight::from_parts(6_532_967, 0).saturating_mul(k.into())) + // Minimum execution time: 28_955_000 picoseconds. + Weight::from_parts(29_609_869, 4556) + // Standard Error: 6_793 + .saturating_add(Weight::from_parts(6_412_124, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -291,10 +292,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 64_644_000 picoseconds. - Weight::from_parts(62_855_016, 6248) - // Standard Error: 17_528 - .saturating_add(Weight::from_parts(3_993_850, 0).saturating_mul(n.into())) + // Minimum execution time: 64_080_000 picoseconds. + Weight::from_parts(61_985_382, 6248) + // Standard Error: 13_320 + .saturating_add(Weight::from_parts(4_030_513, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -318,8 +319,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 54_505_000 picoseconds. - Weight::from_parts(56_026_000, 6248) + // Minimum execution time: 54_194_000 picoseconds. + Weight::from_parts(55_578_000, 6248) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -333,11 +334,26 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_639_000 picoseconds. - Weight::from_parts(17_342_000, 4556) + // Minimum execution time: 16_597_000 picoseconds. + Weight::from_parts(16_980_000, 4556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + fn update_payee() -> Weight { + // Proof Size summary in bytes: + // Measured: `969` + // Estimated: `4556` + // Minimum execution time: 20_626_000 picoseconds. + Weight::from_parts(21_242_000, 4556) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:2) @@ -346,8 +362,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 20_334_000 picoseconds. - Weight::from_parts(21_067_000, 4556) + // Minimum execution time: 19_972_000 picoseconds. + Weight::from_parts(20_470_000, 4556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -357,8 +373,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_680_000 picoseconds. - Weight::from_parts(2_774_000, 0) + // Minimum execution time: 2_571_000 picoseconds. + Weight::from_parts(2_720_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -367,8 +383,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_613_000 picoseconds. - Weight::from_parts(8_922_000, 0) + // Minimum execution time: 8_056_000 picoseconds. + Weight::from_parts(8_413_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -377,8 +393,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_657_000 picoseconds. - Weight::from_parts(9_020_000, 0) + // Minimum execution time: 8_162_000 picoseconds. + Weight::from_parts(8_497_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -387,8 +403,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_600_000 picoseconds. - Weight::from_parts(9_157_000, 0) + // Minimum execution time: 8_320_000 picoseconds. + Weight::from_parts(8_564_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -398,12 +414,31 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_792_000 picoseconds. - Weight::from_parts(3_293_694, 0) - // Standard Error: 31 - .saturating_add(Weight::from_parts(10_668, 0).saturating_mul(v.into())) + // Minimum execution time: 2_470_000 picoseconds. + Weight::from_parts(3_110_242, 0) + // Standard Error: 63 + .saturating_add(Weight::from_parts(11_786, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Staking::Ledger` (r:5900 w:11800) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:5900 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:0 w:5900) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// The range of component `i` is `[0, 5900]`. + fn deprecate_controller_batch(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1356 + i * (151 ±0)` + // Estimated: `990 + i * (3566 ±0)` + // Minimum execution time: 2_101_000 picoseconds. + Weight::from_parts(2_238_000, 990) + // Standard Error: 56_753 + .saturating_add(Weight::from_parts(18_404_902, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(i.into()))) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) + } /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Staking::Bonded` (r:1 w:1) @@ -437,10 +472,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_537_000 picoseconds. - Weight::from_parts(95_127_637, 6248) - // Standard Error: 3_902 - .saturating_add(Weight::from_parts(1_336_182, 0).saturating_mul(s.into())) + // Minimum execution time: 86_765_000 picoseconds. + Weight::from_parts(95_173_565, 6248) + // Standard Error: 4_596 + .saturating_add(Weight::from_parts(1_354_849, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -453,54 +488,13 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 100_777_000 picoseconds. - Weight::from_parts(896_540_406, 70137) - // Standard Error: 57_788 - .saturating_add(Weight::from_parts(4_870_910, 0).saturating_mul(s.into())) + // Minimum execution time: 104_490_000 picoseconds. + Weight::from_parts(1_162_956_951, 70137) + // Standard Error: 76_760 + .saturating_add(Weight::from_parts(6_485_569, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) - /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) - /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:257 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:258 w:258) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::ClaimedRewards` (r:1 w:1) - /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) - /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) - /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) - /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:257 w:0) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 256]`. - fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `21644 + n * (155 ±0)` - // Estimated: `21412 + n * (2603 ±0)` - // Minimum execution time: 133_129_000 picoseconds. - Weight::from_parts(190_983_630, 21412) - // Standard Error: 17_497 - .saturating_add(Weight::from_parts(24_723_153, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(14_u64)) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(5_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) - } /// Storage: `Staking::Bonded` (r:257 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:257 w:257) @@ -533,11 +527,11 @@ impl WeightInfo for SubstrateWeight { fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` - // Estimated: `30944 + n * (3774 ±3)` - // Minimum execution time: 149_773_000 picoseconds. - Weight::from_parts(151_527_124, 30944) - // Standard Error: 24_152 - .saturating_add(Weight::from_parts(46_124_074, 0).saturating_mul(n.into())) + // Estimated: `30944 + n * (3774 ±0)` + // Minimum execution time: 144_790_000 picoseconds. + Weight::from_parts(36_764_791, 30944) + // Standard Error: 89_592 + .saturating_add(Weight::from_parts(49_620_105, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -561,10 +555,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 81_618_000 picoseconds. - Weight::from_parts(85_245_630, 8877) - // Standard Error: 5_049 - .saturating_add(Weight::from_parts(39_811, 0).saturating_mul(l.into())) + // Minimum execution time: 81_768_000 picoseconds. + Weight::from_parts(85_332_982, 8877) + // Standard Error: 5_380 + .saturating_add(Weight::from_parts(70_298, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -599,10 +593,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 95_395_000 picoseconds. - Weight::from_parts(100_459_234, 6248) - // Standard Error: 3_781 - .saturating_add(Weight::from_parts(1_333_607, 0).saturating_mul(s.into())) + // Minimum execution time: 96_123_000 picoseconds. + Weight::from_parts(100_278_672, 6248) + // Standard Error: 3_487 + .saturating_add(Weight::from_parts(1_326_503, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -648,12 +642,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 571_337_000 picoseconds. - Weight::from_parts(578_857_000, 512390) - // Standard Error: 2_090_511 - .saturating_add(Weight::from_parts(68_626_083, 0).saturating_mul(v.into())) - // Standard Error: 208_307 - .saturating_add(Weight::from_parts(18_645_374, 0).saturating_mul(n.into())) + // Minimum execution time: 572_893_000 picoseconds. + Weight::from_parts(578_010_000, 512390) + // Standard Error: 2_094_268 + .saturating_add(Weight::from_parts(68_419_710, 0).saturating_mul(v.into())) + // Standard Error: 208_682 + .saturating_add(Weight::from_parts(18_826_175, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(206_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -684,12 +678,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 34_590_734_000 picoseconds. - Weight::from_parts(35_238_091_000, 512390) - // Standard Error: 427_974 - .saturating_add(Weight::from_parts(5_084_196, 0).saturating_mul(v.into())) - // Standard Error: 427_974 - .saturating_add(Weight::from_parts(4_503_420, 0).saturating_mul(n.into())) + // Minimum execution time: 33_836_205_000 picoseconds. + Weight::from_parts(34_210_443_000, 512390) + // Standard Error: 441_692 + .saturating_add(Weight::from_parts(6_122_533, 0).saturating_mul(v.into())) + // Standard Error: 441_692 + .saturating_add(Weight::from_parts(4_418_264, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(201_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -706,10 +700,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_509_588_000 picoseconds. - Weight::from_parts(89_050_539, 3510) - // Standard Error: 11_803 - .saturating_add(Weight::from_parts(5_031_416, 0).saturating_mul(v.into())) + // Minimum execution time: 2_454_689_000 picoseconds. + Weight::from_parts(161_771_064, 3510) + // Standard Error: 31_022 + .saturating_add(Weight::from_parts(4_820_158, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -730,8 +724,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_347_000 picoseconds. - Weight::from_parts(5_562_000, 0) + // Minimum execution time: 5_073_000 picoseconds. + Weight::from_parts(5_452_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) @@ -750,10 +744,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_725_000 picoseconds. - Weight::from_parts(5_075_000, 0) + // Minimum execution time: 4_465_000 picoseconds. + Weight::from_parts(4_832_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -776,11 +772,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill_other() -> Weight { // Proof Size summary in bytes: - // Measured: `1773` + // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 67_204_000 picoseconds. - Weight::from_parts(69_197_000, 6248) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Minimum execution time: 71_239_000 picoseconds. + Weight::from_parts(74_649_000, 6248) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -791,8 +787,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_497_000 picoseconds. - Weight::from_parts(12_943_000, 3510) + // Minimum execution time: 12_525_000 picoseconds. + Weight::from_parts(13_126_000, 3510) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -802,8 +798,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_245_000 picoseconds. - Weight::from_parts(3_352_000, 0) + // Minimum execution time: 2_918_000 picoseconds. + Weight::from_parts(3_176_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -824,8 +820,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `927` // Estimated: `4764` - // Minimum execution time: 42_811_000 picoseconds. - Weight::from_parts(44_465_000, 4764) + // Minimum execution time: 42_491_000 picoseconds. + Weight::from_parts(44_026_000, 4764) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -845,8 +841,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 87_628_000 picoseconds. - Weight::from_parts(90_020_000, 8877) + // Minimum execution time: 88_756_000 picoseconds. + Weight::from_parts(91_000_000, 8877) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -872,8 +868,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 91_655_000 picoseconds. - Weight::from_parts(94_146_000, 8877) + // Minimum execution time: 91_331_000 picoseconds. + Weight::from_parts(94_781_000, 8877) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -892,10 +888,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1115` // Estimated: `4764` - // Minimum execution time: 42_953_000 picoseconds. - Weight::from_parts(44_648_505, 4764) - // Standard Error: 937 - .saturating_add(Weight::from_parts(51_090, 0).saturating_mul(s.into())) + // Minimum execution time: 42_495_000 picoseconds. + Weight::from_parts(44_189_470, 4764) + // Standard Error: 1_389 + .saturating_add(Weight::from_parts(47_484, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -932,10 +928,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 89_218_000 picoseconds. - Weight::from_parts(97_761_884, 6248) - // Standard Error: 3_888 - .saturating_add(Weight::from_parts(1_346_441, 0).saturating_mul(s.into())) + // Minimum execution time: 89_004_000 picoseconds. + Weight::from_parts(96_677_570, 6248) + // Standard Error: 4_635 + .saturating_add(Weight::from_parts(1_387_718, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -967,8 +963,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 51_200_000 picoseconds. - Weight::from_parts(53_403_000, 4556) + // Minimum execution time: 51_532_000 picoseconds. + Weight::from_parts(53_308_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -981,10 +977,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1280 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 28_963_000 picoseconds. - Weight::from_parts(29_884_371, 4556) - // Standard Error: 9_063 - .saturating_add(Weight::from_parts(6_532_967, 0).saturating_mul(k.into())) + // Minimum execution time: 28_955_000 picoseconds. + Weight::from_parts(29_609_869, 4556) + // Standard Error: 6_793 + .saturating_add(Weight::from_parts(6_412_124, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1017,10 +1013,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 64_644_000 picoseconds. - Weight::from_parts(62_855_016, 6248) - // Standard Error: 17_528 - .saturating_add(Weight::from_parts(3_993_850, 0).saturating_mul(n.into())) + // Minimum execution time: 64_080_000 picoseconds. + Weight::from_parts(61_985_382, 6248) + // Standard Error: 13_320 + .saturating_add(Weight::from_parts(4_030_513, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1044,8 +1040,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 54_505_000 picoseconds. - Weight::from_parts(56_026_000, 6248) + // Minimum execution time: 54_194_000 picoseconds. + Weight::from_parts(55_578_000, 6248) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1059,11 +1055,26 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_639_000 picoseconds. - Weight::from_parts(17_342_000, 4556) + // Minimum execution time: 16_597_000 picoseconds. + Weight::from_parts(16_980_000, 4556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + fn update_payee() -> Weight { + // Proof Size summary in bytes: + // Measured: `969` + // Estimated: `4556` + // Minimum execution time: 20_626_000 picoseconds. + Weight::from_parts(21_242_000, 4556) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:2) @@ -1072,8 +1083,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 20_334_000 picoseconds. - Weight::from_parts(21_067_000, 4556) + // Minimum execution time: 19_972_000 picoseconds. + Weight::from_parts(20_470_000, 4556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1083,8 +1094,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_680_000 picoseconds. - Weight::from_parts(2_774_000, 0) + // Minimum execution time: 2_571_000 picoseconds. + Weight::from_parts(2_720_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1093,8 +1104,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_613_000 picoseconds. - Weight::from_parts(8_922_000, 0) + // Minimum execution time: 8_056_000 picoseconds. + Weight::from_parts(8_413_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1103,8 +1114,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_657_000 picoseconds. - Weight::from_parts(9_020_000, 0) + // Minimum execution time: 8_162_000 picoseconds. + Weight::from_parts(8_497_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1113,8 +1124,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_600_000 picoseconds. - Weight::from_parts(9_157_000, 0) + // Minimum execution time: 8_320_000 picoseconds. + Weight::from_parts(8_564_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -1124,12 +1135,31 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_792_000 picoseconds. - Weight::from_parts(3_293_694, 0) - // Standard Error: 31 - .saturating_add(Weight::from_parts(10_668, 0).saturating_mul(v.into())) + // Minimum execution time: 2_470_000 picoseconds. + Weight::from_parts(3_110_242, 0) + // Standard Error: 63 + .saturating_add(Weight::from_parts(11_786, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Staking::Ledger` (r:5900 w:11800) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:5900 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:0 w:5900) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// The range of component `i` is `[0, 5900]`. + fn deprecate_controller_batch(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1356 + i * (151 ±0)` + // Estimated: `990 + i * (3566 ±0)` + // Minimum execution time: 2_101_000 picoseconds. + Weight::from_parts(2_238_000, 990) + // Standard Error: 56_753 + .saturating_add(Weight::from_parts(18_404_902, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(i.into()))) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(i.into()))) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) + } /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Staking::Bonded` (r:1 w:1) @@ -1163,10 +1193,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_537_000 picoseconds. - Weight::from_parts(95_127_637, 6248) - // Standard Error: 3_902 - .saturating_add(Weight::from_parts(1_336_182, 0).saturating_mul(s.into())) + // Minimum execution time: 86_765_000 picoseconds. + Weight::from_parts(95_173_565, 6248) + // Standard Error: 4_596 + .saturating_add(Weight::from_parts(1_354_849, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1179,54 +1209,13 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 100_777_000 picoseconds. - Weight::from_parts(896_540_406, 70137) - // Standard Error: 57_788 - .saturating_add(Weight::from_parts(4_870_910, 0).saturating_mul(s.into())) + // Minimum execution time: 104_490_000 picoseconds. + Weight::from_parts(1_162_956_951, 70137) + // Standard Error: 76_760 + .saturating_add(Weight::from_parts(6_485_569, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) - /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) - /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:257 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:258 w:258) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::ClaimedRewards` (r:1 w:1) - /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) - /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) - /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) - /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:257 w:0) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 256]`. - fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `21644 + n * (155 ±0)` - // Estimated: `21412 + n * (2603 ±0)` - // Minimum execution time: 133_129_000 picoseconds. - Weight::from_parts(190_983_630, 21412) - // Standard Error: 17_497 - .saturating_add(Weight::from_parts(24_723_153, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(14_u64)) - .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(5_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) - } /// Storage: `Staking::Bonded` (r:257 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:257 w:257) @@ -1259,11 +1248,11 @@ impl WeightInfo for () { fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` - // Estimated: `30944 + n * (3774 ±3)` - // Minimum execution time: 149_773_000 picoseconds. - Weight::from_parts(151_527_124, 30944) - // Standard Error: 24_152 - .saturating_add(Weight::from_parts(46_124_074, 0).saturating_mul(n.into())) + // Estimated: `30944 + n * (3774 ±0)` + // Minimum execution time: 144_790_000 picoseconds. + Weight::from_parts(36_764_791, 30944) + // Standard Error: 89_592 + .saturating_add(Weight::from_parts(49_620_105, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -1287,10 +1276,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 81_618_000 picoseconds. - Weight::from_parts(85_245_630, 8877) - // Standard Error: 5_049 - .saturating_add(Weight::from_parts(39_811, 0).saturating_mul(l.into())) + // Minimum execution time: 81_768_000 picoseconds. + Weight::from_parts(85_332_982, 8877) + // Standard Error: 5_380 + .saturating_add(Weight::from_parts(70_298, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1325,10 +1314,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 95_395_000 picoseconds. - Weight::from_parts(100_459_234, 6248) - // Standard Error: 3_781 - .saturating_add(Weight::from_parts(1_333_607, 0).saturating_mul(s.into())) + // Minimum execution time: 96_123_000 picoseconds. + Weight::from_parts(100_278_672, 6248) + // Standard Error: 3_487 + .saturating_add(Weight::from_parts(1_326_503, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1374,12 +1363,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 571_337_000 picoseconds. - Weight::from_parts(578_857_000, 512390) - // Standard Error: 2_090_511 - .saturating_add(Weight::from_parts(68_626_083, 0).saturating_mul(v.into())) - // Standard Error: 208_307 - .saturating_add(Weight::from_parts(18_645_374, 0).saturating_mul(n.into())) + // Minimum execution time: 572_893_000 picoseconds. + Weight::from_parts(578_010_000, 512390) + // Standard Error: 2_094_268 + .saturating_add(Weight::from_parts(68_419_710, 0).saturating_mul(v.into())) + // Standard Error: 208_682 + .saturating_add(Weight::from_parts(18_826_175, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(206_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1410,12 +1399,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 34_590_734_000 picoseconds. - Weight::from_parts(35_238_091_000, 512390) - // Standard Error: 427_974 - .saturating_add(Weight::from_parts(5_084_196, 0).saturating_mul(v.into())) - // Standard Error: 427_974 - .saturating_add(Weight::from_parts(4_503_420, 0).saturating_mul(n.into())) + // Minimum execution time: 33_836_205_000 picoseconds. + Weight::from_parts(34_210_443_000, 512390) + // Standard Error: 441_692 + .saturating_add(Weight::from_parts(6_122_533, 0).saturating_mul(v.into())) + // Standard Error: 441_692 + .saturating_add(Weight::from_parts(4_418_264, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(201_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1432,10 +1421,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_509_588_000 picoseconds. - Weight::from_parts(89_050_539, 3510) - // Standard Error: 11_803 - .saturating_add(Weight::from_parts(5_031_416, 0).saturating_mul(v.into())) + // Minimum execution time: 2_454_689_000 picoseconds. + Weight::from_parts(161_771_064, 3510) + // Standard Error: 31_022 + .saturating_add(Weight::from_parts(4_820_158, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -1456,8 +1445,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_347_000 picoseconds. - Weight::from_parts(5_562_000, 0) + // Minimum execution time: 5_073_000 picoseconds. + Weight::from_parts(5_452_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) @@ -1476,10 +1465,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_725_000 picoseconds. - Weight::from_parts(5_075_000, 0) + // Minimum execution time: 4_465_000 picoseconds. + Weight::from_parts(4_832_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -1502,11 +1493,11 @@ impl WeightInfo for () { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill_other() -> Weight { // Proof Size summary in bytes: - // Measured: `1773` + // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 67_204_000 picoseconds. - Weight::from_parts(69_197_000, 6248) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Minimum execution time: 71_239_000 picoseconds. + Weight::from_parts(74_649_000, 6248) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -1517,8 +1508,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_497_000 picoseconds. - Weight::from_parts(12_943_000, 3510) + // Minimum execution time: 12_525_000 picoseconds. + Weight::from_parts(13_126_000, 3510) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1528,8 +1519,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_245_000 picoseconds. - Weight::from_parts(3_352_000, 0) + // Minimum execution time: 2_918_000 picoseconds. + Weight::from_parts(3_176_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index 9e81397fadd5c2238794f7561541f5e359779151..46f86d203c3d6779730c90a47c901b3cb480026c 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME pallet migration of trie" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,18 +18,18 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } +serde = { version = "1.0.193", optional = true } thousands = { version = "0.2.0", optional = true } zstd = { version = "0.12.4", default-features = false, optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -remote-externalities = { package = "frame-remote-externalities" , path = "../../utils/frame/remote-externalities", optional = true} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -substrate-state-trie-migration-rpc = { path = "../../utils/frame/rpc/state-trie-migration-rpc", optional = true} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +remote-externalities = { package = "frame-remote-externalities", path = "../../utils/frame/remote-externalities", optional = true } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +substrate-state-trie-migration-rpc = { path = "../../utils/frame/rpc/state-trie-migration-rpc", optional = true } [dev-dependencies] parking_lot = "0.12.1" @@ -35,7 +38,7 @@ pallet-balances = { path = "../balances" } sp-tracing = { path = "../../primitives/tracing" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index ac3996459cd4ac2b88178c18a537f9f972b20046..8652e8e9561c83c003f163a99b805c97115c79df 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -1051,7 +1051,7 @@ mod mock { use super::*; use crate as pallet_state_trie_migration; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, Hooks}, weights::Weight, }; @@ -1081,6 +1081,7 @@ mod mock { pub const SS58Prefix: u8 = 42; } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -1636,7 +1637,7 @@ pub(crate) mod remote_tests { weight_sum += StateTrieMigration::::on_initialize(System::::block_number()); - root = System::::finalize().state_root().clone(); + root = *System::::finalize().state_root(); System::::on_finalize(System::::block_number()); } (root, weight_sum) @@ -1686,7 +1687,7 @@ pub(crate) mod remote_tests { ); loop { - let last_state_root = ext.backend.root().clone(); + let last_state_root = *ext.backend.root(); let ((finished, weight), proof) = ext.execute_and_prove(|| { let weight = run_to_block::(now + One::one()).1; if StateTrieMigration::::migration_process().finished() { diff --git a/substrate/frame/statement/Cargo.toml b/substrate/frame/statement/Cargo.toml index a5c8cf5b8de78caa988f89a3798ea10f4962bee1..d41afc3244b4fd36a0583128ee182ca26d0dcbda 100644 --- a/substrate/frame/statement/Cargo.toml +++ b/substrate/frame/statement/Cargo.toml @@ -8,27 +8,30 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME pallet for statement store" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"]} +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-statement-store = { path = "../../primitives/statement-store", default-features = false} -sp-api = { path = "../../primitives/api", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-statement-store = { path = "../../primitives/statement-store", default-features = false } +sp-api = { path = "../../primitives/api", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } log = { version = "0.4.17", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/statement/src/mock.rs b/substrate/frame/statement/src/mock.rs index 10a74e100df842cf1321c8343a1127fdcca679a1..192baa1f218602bec663174e965b75c5f4995d92 100644 --- a/substrate/frame/statement/src/mock.rs +++ b/substrate/frame/statement/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_statement; use frame_support::{ - ord_parameter_types, + derive_impl, ord_parameter_types, traits::{ConstU32, ConstU64, Everything}, weights::constants::RocksDbWeight, }; @@ -47,6 +47,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index ef507a953164f7ff1d9aac57089ab01fd51536cd..027716ce3179fd5e7eecac5912c3e1b4ade2a3ef 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -9,18 +9,21 @@ repository.workspace = true description = "FRAME pallet for sudo" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } docify = "0.2.6" @@ -28,7 +31,7 @@ docify = "0.2.6" sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index d556c5eb6ae638988307a6d14955051b4f2c0870..4f14c32ff76b0e0038597c9d715d632985eceeea 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -349,12 +349,16 @@ pub mod pallet { impl Pallet { /// Ensure that the caller is the sudo key. pub(crate) fn ensure_sudo(origin: OriginFor) -> DispatchResult { - let sender = ensure_signed(origin)?; - - if Self::key().map_or(false, |k| k == sender) { - Ok(()) + let sender = ensure_signed_or_root(origin)?; + + if let Some(sender) = sender { + if Self::key().map_or(false, |k| k == sender) { + Ok(()) + } else { + Err(Error::::RequireSudo.into()) + } } else { - Err(Error::::RequireSudo.into()) + Ok(()) } } } diff --git a/substrate/frame/sudo/src/mock.rs b/substrate/frame/sudo/src/mock.rs index 6f123b7c82b2bc4ac576bfe68b39597b10a33fa9..878e9239080997287fb5c27ce5581b770a440e5e 100644 --- a/substrate/frame/sudo/src/mock.rs +++ b/substrate/frame/sudo/src/mock.rs @@ -19,7 +19,10 @@ use super::*; use crate as sudo; -use frame_support::traits::{ConstU32, Contains}; +use frame_support::{ + derive_impl, + traits::{ConstU32, Contains}, +}; use sp_core::{ConstU64, H256}; use sp_io; use sp_runtime::{ @@ -108,6 +111,7 @@ impl Contains for BlockEverything { } } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = BlockEverything; type BlockWeights = (); diff --git a/substrate/frame/sudo/src/tests.rs b/substrate/frame/sudo/src/tests.rs index 13dc069ddef1cbbcc1b94b25606bdc519d4b44c6..73689415a737fd3e2acba663099f9042f88ace4c 100644 --- a/substrate/frame/sudo/src/tests.rs +++ b/substrate/frame/sudo/src/tests.rs @@ -169,6 +169,18 @@ fn remove_key_works() { }); } +#[test] +fn using_root_origin_works() { + new_test_ext(1).execute_with(|| { + assert_ok!(Sudo::remove_key(RuntimeOrigin::root())); + assert!(Sudo::key().is_none()); + System::assert_has_event(TestEvent::Sudo(Event::KeyRemoved {})); + + assert_ok!(Sudo::set_key(RuntimeOrigin::root(), 1)); + assert_eq!(Some(1), Sudo::key()); + }); +} + #[test] fn sudo_as_basics() { new_test_ext(1).execute_with(|| { diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index b8e21e60761ae30f51a94560e4a345ef98f5db4a..07f9075c82b3c535fe4f4379b2eeaaa95b2a47d2 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -9,32 +9,35 @@ repository.workspace = true description = "Support code for the runtime." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.1", default-features = false } -serde = { version = "1.0.188", default-features = false, features = ["alloc", "derive"] } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } -sp-api = { path = "../../primitives/api", default-features = false, features = [ "frame-metadata" ] } -sp-std = { path = "../../primitives/std", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-tracing = { path = "../../primitives/tracing", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-inherents = { path = "../../primitives/inherents", default-features = false} -sp-staking = { path = "../../primitives/staking", default-features = false} -sp-weights = { path = "../../primitives/weights", default-features = false} -sp-debug-derive = { path = "../../primitives/debug-derive", default-features = false} -sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false} +sp-api = { path = "../../primitives/api", default-features = false, features = ["frame-metadata"] } +sp-std = { path = "../../primitives/std", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-tracing = { path = "../../primitives/tracing", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-inherents = { path = "../../primitives/inherents", default-features = false } +sp-staking = { path = "../../primitives/staking", default-features = false } +sp-weights = { path = "../../primitives/weights", default-features = false } +sp-debug-derive = { path = "../../primitives/debug-derive", default-features = false } +sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false } tt-call = "1.0.8" macro_magic = "0.5.0" -frame-support-procedural = { path = "procedural", default-features = false} +frame-support-procedural = { path = "procedural", default-features = false } paste = "1.0" -sp-state-machine = { path = "../../primitives/state-machine", default-features = false, optional = true} +sp-state-machine = { path = "../../primitives/state-machine", default-features = false, optional = true } bitflags = "1.3" impl-trait-for-tuples = "0.2.2" smallvec = "1.11.0" @@ -42,7 +45,7 @@ log = { version = "0.4.17", default-features = false } sp-core-hashing-proc-macro = { path = "../../primitives/core/hashing/proc-macro" } k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } environmental = { version = "1.1.4", default-features = false } -sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features=false} +sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } serde_json = { version = "1.0.108", default-features = false, features = ["alloc"] } docify = "0.2.6" static_assertions = "1.1.0" @@ -55,7 +58,7 @@ pretty_assertions = "1.2.1" frame-system = { path = "../system" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "environmental/std", @@ -101,8 +104,8 @@ no-metadata-docs = [ ] # By default some types have documentation, `full-metadata-docs` allows to add documentation to # more types in the metadata. -full-metadata-docs = [ "scale-info/docs" ] +full-metadata-docs = ["scale-info/docs"] # Generate impl-trait for tuples with the given number of tuples. Will be needed as the number of # pallets in a runtime grows. Does increase the compile time! -tuples-96 = [ "frame-support-procedural/tuples-96" ] -tuples-128 = [ "frame-support-procedural/tuples-128" ] +tuples-96 = ["frame-support-procedural/tuples-96"] +tuples-128 = ["frame-support-procedural/tuples-128"] diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index 45ed1750a52871f0d43d6e040def2fa8867d4ddd..dd75f6b4ac17255afc79cb3085f186f65fa588e1 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "Proc macro of Support code for the runtime." +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -21,15 +24,18 @@ cfg-expr = "0.15.5" itertools = "0.10.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.38", features = ["full"] } +syn = { version = "2.0.41", features = ["full"] } frame-support-procedural-tools = { path = "tools" } macro_magic = { version = "0.5.0", features = ["proc_support"] } proc-macro-warning = { version = "1.0.0", default-features = false } expander = "2.0.0" sp-core-hashing = { path = "../../../primitives/core/hashing" } +[dev-dependencies] +regex = "1" + [features] -default = [ "std" ] +default = ["std"] std = [] no-metadata-docs = [] # Generate impl-trait for tuples with the given number of tuples. Will be needed as the number of diff --git a/substrate/frame/support/procedural/src/benchmark.rs b/substrate/frame/support/procedural/src/benchmark.rs index fb55e8c9f662cfd93a35e592e57bb2fc42f6f96f..6ded82d91aa5cd9bb9cceb4594ae9768d4948483 100644 --- a/substrate/frame/support/procedural/src/benchmark.rs +++ b/substrate/frame/support/procedural/src/benchmark.rs @@ -517,7 +517,7 @@ pub fn benchmarks( components, // TODO: Not supported by V2 syntax as of yet. // https://github.com/paritytech/substrate/issues/13132 - pov_modes: vec![], + pov_modes: #krate::__private::vec![], } }).collect::<#krate::__private::Vec<_>>() } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs index 859b9a327e48dc7ee8cdce9c7ff3caa8c7621457..ce2aa0942794d40cc3bfe0f4d65fdd80f8140d52 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -124,6 +124,18 @@ pub fn expand_outer_dispatch( } } + impl #scrate::dispatch::CheckIfFeeless for RuntimeCall { + type Origin = #system_path::pallet_prelude::OriginFor<#runtime>; + fn is_feeless(&self, origin: &Self::Origin) -> bool { + match self { + #( + #pallet_attrs + #variant_patterns => call.is_feeless(origin), + )* + } + } + } + impl #scrate::traits::GetCallMetadata for RuntimeCall { fn get_call_metadata(&self) -> #scrate::traits::CallMetadata { use #scrate::traits::GetCallName; diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/mod.rs index a0fc6b8130b3c0b5b8fc7ce76768bbc9b0dc9908..88f9a3c6e33fd3fc99b2f4e511d5a6c0afd9263a 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/mod.rs @@ -26,6 +26,7 @@ mod metadata; mod origin; mod outer_enums; mod slash_reason; +mod task; mod unsigned; pub use call::expand_outer_dispatch; @@ -38,4 +39,5 @@ pub use metadata::expand_runtime_metadata; pub use origin::expand_outer_origin; pub use outer_enums::{expand_outer_enum, OuterEnumType}; pub use slash_reason::expand_outer_slash_reason; +pub use task::expand_outer_task; pub use unsigned::expand_outer_validate_unsigned; diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs new file mode 100644 index 0000000000000000000000000000000000000000..bd952202bbbea263a67d52930a99bb20ac9722c3 --- /dev/null +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::Pallet; +use proc_macro2::{Ident, TokenStream as TokenStream2}; +use quote::quote; + +/// Expands aggregate `RuntimeTask` enum. +pub fn expand_outer_task( + runtime_name: &Ident, + pallet_decls: &[Pallet], + scrate: &TokenStream2, +) -> TokenStream2 { + let mut from_impls = Vec::new(); + let mut task_variants = Vec::new(); + let mut variant_names = Vec::new(); + let mut task_paths = Vec::new(); + for decl in pallet_decls { + if decl.find_part("Task").is_none() { + continue; + } + + let variant_name = &decl.name; + let path = &decl.path; + let index = decl.index; + + from_impls.push(quote! { + impl From<#path::Task<#runtime_name>> for RuntimeTask { + fn from(hr: #path::Task<#runtime_name>) -> Self { + RuntimeTask::#variant_name(hr) + } + } + + impl TryInto<#path::Task<#runtime_name>> for RuntimeTask { + type Error = (); + + fn try_into(self) -> Result<#path::Task<#runtime_name>, Self::Error> { + match self { + RuntimeTask::#variant_name(hr) => Ok(hr), + _ => Err(()), + } + } + } + }); + + task_variants.push(quote! { + #[codec(index = #index)] + #variant_name(#path::Task<#runtime_name>), + }); + + variant_names.push(quote!(#variant_name)); + + task_paths.push(quote!(#path::Task)); + } + + let prelude = quote!(#scrate::traits::tasks::__private); + + const INCOMPLETE_MATCH_QED: &'static str = + "cannot have an instantiated RuntimeTask without some Task variant in the runtime. QED"; + + let output = quote! { + /// An aggregation of all `Task` enums across all pallets included in the current runtime. + #[derive( + Clone, Eq, PartialEq, + #scrate::__private::codec::Encode, + #scrate::__private::codec::Decode, + #scrate::__private::scale_info::TypeInfo, + #scrate::__private::RuntimeDebug, + )] + pub enum RuntimeTask { + #( #task_variants )* + } + + #[automatically_derived] + impl #scrate::traits::Task for RuntimeTask { + type Enumeration = #prelude::IntoIter; + + fn is_valid(&self) -> bool { + match self { + #(RuntimeTask::#variant_names(val) => val.is_valid(),)* + _ => unreachable!(#INCOMPLETE_MATCH_QED), + } + } + + fn run(&self) -> Result<(), #scrate::traits::tasks::__private::DispatchError> { + match self { + #(RuntimeTask::#variant_names(val) => val.run(),)* + _ => unreachable!(#INCOMPLETE_MATCH_QED), + } + } + + fn weight(&self) -> #scrate::pallet_prelude::Weight { + match self { + #(RuntimeTask::#variant_names(val) => val.weight(),)* + _ => unreachable!(#INCOMPLETE_MATCH_QED), + } + } + + fn task_index(&self) -> u32 { + match self { + #(RuntimeTask::#variant_names(val) => val.task_index(),)* + _ => unreachable!(#INCOMPLETE_MATCH_QED), + } + } + + fn iter() -> Self::Enumeration { + let mut all_tasks = Vec::new(); + #(all_tasks.extend(#task_paths::iter().map(RuntimeTask::from).collect::>());)* + all_tasks.into_iter() + } + } + + #( #from_impls )* + }; + + output +} diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index ce34694275b386e029ef5c928569220c770752cf..7a9c4d89a74943ac23ceae8e03ccbf0a0469ebdd 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -253,7 +253,7 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { let res = res.unwrap_or_else(|e| e.to_compile_error()); let res = expander::Expander::new("construct_runtime") - .dry(std::env::var("FRAME_EXPAND").is_err()) + .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) .write_to_out_dir(res) .expect("Does not fail because of IO in OUT_DIR; qed"); @@ -386,6 +386,7 @@ fn construct_runtime_final_expansion( let pallet_to_index = decl_pallet_runtime_setup(&name, &pallets, &scrate); let dispatch = expand::expand_outer_dispatch(&name, system_pallet, &pallets, &scrate); + let tasks = expand::expand_outer_task(&name, &pallets, &scrate); let metadata = expand::expand_runtime_metadata( &name, &pallets, @@ -475,6 +476,8 @@ fn construct_runtime_final_expansion( #dispatch + #tasks + #metadata #outer_config @@ -600,66 +603,12 @@ fn decl_all_pallets<'a>( } }); - let all_pallets_without_system_reversed = attribute_to_names.iter().map(|(attr, names)| { - let names = names.iter().filter(|n| **n != SYSTEM_PALLET_NAME).rev(); - quote! { - #attr - /// All pallets included in the runtime as a nested tuple of types in reversed order. - /// Excludes the System pallet. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletsWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsWithoutSystemReversed = ( #(#names,)* ); - } - }); - - let all_pallets_with_system_reversed = attribute_to_names.iter().map(|(attr, names)| { - let names = names.iter().rev(); - quote! { - #attr - /// All pallets included in the runtime as a nested tuple of types in reversed order. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletsWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsWithSystemReversed = ( #(#names,)* ); - } - }); - - let all_pallets_reversed_with_system_first = attribute_to_names.iter().map(|(attr, names)| { - let system = quote::format_ident!("{}", SYSTEM_PALLET_NAME); - let names = std::iter::once(&system) - .chain(names.iter().rev().filter(|n| **n != SYSTEM_PALLET_NAME).cloned()); - quote! { - #attr - /// All pallets included in the runtime as a nested tuple of types in reversed order. - /// With the system pallet first. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletsWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsReversedWithSystemFirst = ( #(#names,)* ); - } - }); - quote!( #types - /// All pallets included in the runtime as a nested tuple of types. - #[deprecated(note = "The type definition has changed from representing all pallets \ - excluding system, in reversed order to become the representation of all pallets \ - including system pallet in regular order. For this reason it is encouraged to use \ - explicitly one of `AllPalletsWithSystem`, `AllPalletsWithoutSystem`, \ - `AllPalletsWithSystemReversed`, `AllPalletsWithoutSystemReversed`. \ - Note that the type `frame_executive::Executive` expects one of `AllPalletsWithSystem` \ - , `AllPalletsWithSystemReversed`, `AllPalletsReversedWithSystemFirst`. More details in \ - https://github.com/paritytech/substrate/pull/10043")] - pub type AllPallets = AllPalletsWithSystem; - #( #all_pallets_with_system )* #( #all_pallets_without_system )* - - #( #all_pallets_with_system_reversed )* - - #( #all_pallets_without_system_reversed )* - - #( #all_pallets_reversed_with_system_first )* ) } fn decl_pallet_runtime_setup( diff --git a/substrate/frame/support/procedural/src/construct_runtime/parse.rs b/substrate/frame/support/procedural/src/construct_runtime/parse.rs index 9b08e16469754a98c8cc089176b973c7d1ce1fae..88f3f14dc86c541ef949beced9feaf222e9a3146 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/parse.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/parse.rs @@ -42,6 +42,7 @@ mod keyword { syn::custom_keyword!(ValidateUnsigned); syn::custom_keyword!(FreezeReason); syn::custom_keyword!(HoldReason); + syn::custom_keyword!(Task); syn::custom_keyword!(LockId); syn::custom_keyword!(SlashReason); syn::custom_keyword!(exclude_parts); @@ -404,6 +405,7 @@ pub enum PalletPartKeyword { ValidateUnsigned(keyword::ValidateUnsigned), FreezeReason(keyword::FreezeReason), HoldReason(keyword::HoldReason), + Task(keyword::Task), LockId(keyword::LockId), SlashReason(keyword::SlashReason), } @@ -434,6 +436,8 @@ impl Parse for PalletPartKeyword { Ok(Self::FreezeReason(input.parse()?)) } else if lookahead.peek(keyword::HoldReason) { Ok(Self::HoldReason(input.parse()?)) + } else if lookahead.peek(keyword::Task) { + Ok(Self::Task(input.parse()?)) } else if lookahead.peek(keyword::LockId) { Ok(Self::LockId(input.parse()?)) } else if lookahead.peek(keyword::SlashReason) { @@ -459,6 +463,7 @@ impl PalletPartKeyword { Self::ValidateUnsigned(_) => "ValidateUnsigned", Self::FreezeReason(_) => "FreezeReason", Self::HoldReason(_) => "HoldReason", + Self::Task(_) => "Task", Self::LockId(_) => "LockId", Self::SlashReason(_) => "SlashReason", } @@ -471,7 +476,7 @@ impl PalletPartKeyword { /// Returns the names of all pallet parts that allow to have a generic argument. fn all_generic_arg() -> &'static [&'static str] { - &["Event", "Error", "Origin", "Config"] + &["Event", "Error", "Origin", "Config", "Task"] } } @@ -489,6 +494,7 @@ impl ToTokens for PalletPartKeyword { Self::ValidateUnsigned(inner) => inner.to_tokens(tokens), Self::FreezeReason(inner) => inner.to_tokens(tokens), Self::HoldReason(inner) => inner.to_tokens(tokens), + Self::Task(inner) => inner.to_tokens(tokens), Self::LockId(inner) => inner.to_tokens(tokens), Self::SlashReason(inner) => inner.to_tokens(tokens), } diff --git a/substrate/frame/support/procedural/src/derive_impl.rs b/substrate/frame/support/procedural/src/derive_impl.rs index 8b5e334f1f5513cafc1fc5e58d1cbcb44d3232a8..d6d5bf68efd5689af2e96250e24cef3cd7faf47b 100644 --- a/substrate/frame/support/procedural/src/derive_impl.rs +++ b/substrate/frame/support/procedural/src/derive_impl.rs @@ -46,11 +46,15 @@ pub struct PalletAttr { typ: PalletAttrType, } -fn get_first_item_pallet_attr(item: &syn::ImplItemType) -> syn::Result> -where - Attr: syn::parse::Parse, -{ - item.attrs.get(0).map(|a| syn::parse2(a.into_token_stream())).transpose() +fn is_runtime_type(item: &syn::ImplItemType) -> bool { + item.attrs.iter().any(|attr| { + if let Ok(PalletAttr { typ: PalletAttrType::RuntimeType(_), .. }) = + parse2::(attr.into_token_stream()) + { + return true + } + false + }) } #[derive(Parse, Debug)] @@ -132,12 +136,15 @@ fn combine_impls( return None } if let ImplItem::Type(typ) = item.clone() { - let mut typ = typ.clone(); - if let Ok(Some(PalletAttr { typ: PalletAttrType::RuntimeType(_), .. })) = - get_first_item_pallet_attr::(&mut typ) - { + let cfg_attrs = typ + .attrs + .iter() + .filter(|attr| attr.path().get_ident().map_or(false, |ident| ident == "cfg")) + .map(|attr| attr.to_token_stream()); + if is_runtime_type(&typ) { let item: ImplItem = if inject_runtime_types { parse_quote! { + #( #cfg_attrs )* type #ident = #ident; } } else { @@ -147,6 +154,7 @@ fn combine_impls( } // modify and insert uncolliding type items let modified_item: ImplItem = parse_quote! { + #( #cfg_attrs )* type #ident = <#default_impl_path as #disambiguation_path>::#ident; }; return Some(modified_item) @@ -227,3 +235,25 @@ fn test_derive_impl_attr_args_parsing() { assert!(parse2::(quote!()).is_err()); assert!(parse2::(quote!(Config Config)).is_err()); } + +#[test] +fn test_runtime_type_with_doc() { + trait TestTrait { + type Test; + } + #[allow(unused)] + struct TestStruct; + let p = parse2::(quote!( + impl TestTrait for TestStruct { + /// Some doc + #[inject_runtime_type] + type Test = u32; + } + )) + .unwrap(); + for item in p.items { + if let ImplItem::Type(typ) = item { + assert_eq!(is_runtime_type(&typ), true); + } + } +} diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 68bf3e4874bee3fedfea7a923e78225ca8328466..349b6ee6599c27539f9fc11d50521b64cc14ee86 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -646,7 +646,6 @@ pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream /// ``` /// /// where `TestDefaultConfig` was defined and registered as follows: -/// /// ```ignore /// pub struct TestDefaultConfig; /// @@ -673,7 +672,6 @@ pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream /// ``` /// /// The above call to `derive_impl` would expand to roughly the following: -/// /// ```ignore /// impl frame_system::Config for Test { /// use frame_system::config_preludes::TestDefaultConfig; @@ -881,6 +879,7 @@ pub fn inject_runtime_type(_: TokenStream, tokens: TokenStream) -> TokenStream { let item = syn::parse_macro_input!(item as TraitItemType); if item.ident != "RuntimeCall" && item.ident != "RuntimeEvent" && + item.ident != "RuntimeTask" && item.ident != "RuntimeOrigin" && item.ident != "RuntimeHoldReason" && item.ident != "RuntimeFreezeReason" && @@ -888,10 +887,11 @@ pub fn inject_runtime_type(_: TokenStream, tokens: TokenStream) -> TokenStream { { return syn::Error::new_spanned( item, - "`#[inject_runtime_type]` can only be attached to `RuntimeCall`, `RuntimeEvent`, `RuntimeOrigin` or `PalletInfo`", + "`#[inject_runtime_type]` can only be attached to `RuntimeCall`, `RuntimeEvent`, \ + `RuntimeTask`, `RuntimeOrigin` or `PalletInfo`", ) .to_compile_error() - .into(); + .into() } tokens } @@ -1097,8 +1097,11 @@ pub fn weight(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Compact encoding for arguments can be achieved via `#[pallet::compact]`. The function must -/// return a `DispatchResultWithPostInfo` or `DispatchResult`. +/// +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). #[proc_macro_attribute] pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1108,7 +1111,7 @@ pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { /// --- /// /// **Rust-Analyzer users**: See the documentation of the Rust item in -/// `frame_support::pallet_macros::call`. +/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). #[proc_macro_attribute] pub fn call(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1117,43 +1120,42 @@ pub fn call(_: TokenStream, _: TokenStream) -> TokenStream { /// Each dispatchable may also be annotated with the `#[pallet::call_index($idx)]` attribute, /// which explicitly defines the codec index for the dispatchable function in the `Call` enum. /// -/// All call indexes start from 0, until it encounters a dispatchable function with a defined -/// call index. The dispatchable function that lexically follows the function with a defined -/// call index will have that call index, but incremented by 1, e.g. if there are 3 -/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` -/// has a call index of 10, then `fn qux` will have an index of 11, instead of 1. +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). +#[proc_macro_attribute] +pub fn call_index(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// Each dispatchable may be annotated with the `#[pallet::feeless_if($closure)]` attribute, +/// which explicitly defines the condition for the dispatchable to be feeless. /// -/// All arguments must implement [`Debug`], [`PartialEq`], [`Eq`], `Decode`, `Encode`, and -/// [`Clone`]. For ease of use, bound by the trait `frame_support::pallet_prelude::Member`. +/// The arguments for the closure must be the referenced arguments of the dispatchable function. /// -/// If no `#[pallet::call]` exists, then a default implementation corresponding to the -/// following code is automatically generated: +/// The closure must return `bool`. /// +/// ### Example /// ```ignore -/// #[pallet::call] -/// impl Pallet {} +/// #[pallet::feeless_if(|_origin: &OriginFor, something: &u32| -> bool { +/// *something == 0 +/// })] +/// pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { +/// .... +/// } /// ``` /// -/// **WARNING**: modifying dispatchables, changing their order, removing some, etc., must be -/// done with care. Indeed this will change the outer runtime call type (which is an enum with -/// one variant per pallet), this outer runtime call can be stored on-chain (e.g. in -/// `pallet-scheduler`). Thus migration might be needed. To mitigate against some of this, the -/// `#[pallet::call_index($idx)]` attribute can be used to fix the order of the dispatchable so -/// that the `Call` enum encoding does not change after modification. As a general rule of -/// thumb, it is therefore adventageous to always add new calls to the end so you can maintain -/// the existing order of calls. +/// Please note that this only works for signed dispatchables and requires a signed extension +/// such as `SkipCheckIfFeeless` as defined in `pallet-skip-feeless-payment` to wrap the existing +/// payment extension. Else, this is completely ignored and the dispatchable is still charged. /// /// ### Macro expansion /// -/// The macro creates an enum `Call` with one variant per dispatchable. This enum implements: -/// [`Clone`], [`Eq`], [`PartialEq`], [`Debug`] (with stripped implementation in `not("std")`), -/// `Encode`, `Decode`, `GetDispatchInfo`, `GetCallName`, `GetCallIndex` and -/// `UnfilteredDispatchable`. -/// -/// The macro implements the `Callable` trait on `Pallet` and a function `call_functions` -/// which returns the dispatchable metadata. +/// The macro implements the `CheckIfFeeless` trait on the dispatchable and calls the corresponding +/// closure in the implementation. #[proc_macro_attribute] -pub fn call_index(_: TokenStream, _: TokenStream) -> TokenStream { +pub fn feeless_if(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } @@ -1516,6 +1518,56 @@ pub fn composite_enum(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::tasks_experimental`. +#[proc_macro_attribute] +pub fn tasks_experimental(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::task_list`. +#[proc_macro_attribute] +pub fn task_list(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::task_condition`. +#[proc_macro_attribute] +pub fn task_condition(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::task_weight`. +#[proc_macro_attribute] +pub fn task_weight(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::task_index`. +#[proc_macro_attribute] +pub fn task_index(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + /// Can be attached to a module. Doing so will declare that module as importable into a pallet /// via [`#[import_section]`](`macro@import_section`). /// diff --git a/substrate/frame/support/procedural/src/pallet/expand/call.rs b/substrate/frame/support/procedural/src/pallet/expand/call.rs index ed6335159cd6edae4bf435ed60b00da655d5bda0..624cde018dc40258cf12014c5923a43996f496bd 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/call.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/call.rs @@ -241,6 +241,25 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { }) .collect::>(); + let cfg_attrs = methods + .iter() + .map(|method| { + let attrs = + method.cfg_attrs.iter().map(|attr| attr.to_token_stream()).collect::>(); + quote::quote!( #( #attrs )* ) + }) + .collect::>(); + + let feeless_check = methods.iter().map(|method| &method.feeless_check).collect::>(); + let feeless_check_result = + feeless_check.iter().zip(args_name.iter()).map(|(feeless_check, arg_name)| { + if let Some(feeless_check) = feeless_check { + quote::quote!(#feeless_check(origin, #( #arg_name, )*)) + } else { + quote::quote!(false) + } + }); + quote::quote_spanned!(span => mod warnings { #( @@ -287,6 +306,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::Never, ), #( + #cfg_attrs #[doc = #fn_doc] #[codec(index = #call_index)] #fn_name { @@ -300,6 +320,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { impl<#type_impl_gen> #call_ident<#type_use_gen> #where_clause { #( + #cfg_attrs #[doc = #new_call_variant_doc] pub fn #new_call_variant_fn_name( #( #args_name_stripped: #args_type ),* @@ -318,6 +339,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { fn get_dispatch_info(&self) -> #frame_support::dispatch::DispatchInfo { match *self { #( + #cfg_attrs Self::#fn_name { #( #args_name_pattern_ref, )* } => { let __pallet_base_weight = #fn_weight; @@ -347,18 +369,36 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { } } + impl<#type_impl_gen> #frame_support::dispatch::CheckIfFeeless for #call_ident<#type_use_gen> + #where_clause + { + type Origin = #frame_system::pallet_prelude::OriginFor; + #[allow(unused_variables)] + fn is_feeless(&self, origin: &Self::Origin) -> bool { + match *self { + #( + #cfg_attrs + Self::#fn_name { #( #args_name_pattern_ref, )* } => { + #feeless_check_result + }, + )* + Self::__Ignore(_, _) => unreachable!("__Ignore cannot be used"), + } + } + } + impl<#type_impl_gen> #frame_support::traits::GetCallName for #call_ident<#type_use_gen> #where_clause { fn get_call_name(&self) -> &'static str { match *self { - #( Self::#fn_name { .. } => stringify!(#fn_name), )* + #( #cfg_attrs Self::#fn_name { .. } => stringify!(#fn_name), )* Self::__Ignore(_, _) => unreachable!("__PhantomItem cannot be used."), } } fn get_call_names() -> &'static [&'static str] { - &[ #( stringify!(#fn_name), )* ] + &[ #( #cfg_attrs stringify!(#fn_name), )* ] } } @@ -367,13 +407,13 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { { fn get_call_index(&self) -> u8 { match *self { - #( Self::#fn_name { .. } => #call_index, )* + #( #cfg_attrs Self::#fn_name { .. } => #call_index, )* Self::__Ignore(_, _) => unreachable!("__PhantomItem cannot be used."), } } fn get_call_indices() -> &'static [u8] { - &[ #( #call_index, )* ] + &[ #( #cfg_attrs #call_index, )* ] } } @@ -389,6 +429,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::dispatch_context::run_in_context(|| { match self { #( + #cfg_attrs Self::#fn_name { #( #args_name_pattern, )* } => { #frame_support::__private::sp_tracing::enter_span!( #frame_support::__private::sp_tracing::trace_span!(stringify!(#fn_name)) diff --git a/substrate/frame/support/procedural/src/pallet/expand/error.rs b/substrate/frame/support/procedural/src/pallet/expand/error.rs index 877489fd6057b85d06ba2477020776c91bf92d3c..72fb6e923572387622ef2ea820dc6931c32468ef 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/error.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/error.rs @@ -16,10 +16,14 @@ // limitations under the License. use crate::{ - pallet::{parse::error::VariantField, Def}, + pallet::{ + parse::error::{VariantDef, VariantField}, + Def, + }, COUNTER, }; use frame_support_procedural_tools::get_doc_literals; +use quote::ToTokens; use syn::spanned::Spanned; /// @@ -67,20 +71,23 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { ) ); - let as_str_matches = error.variants.iter().map(|(variant, field_ty, _)| { - let variant_str = variant.to_string(); - match field_ty { - Some(VariantField { is_named: true }) => { - quote::quote_spanned!(error.attr_span => Self::#variant { .. } => #variant_str,) - }, - Some(VariantField { is_named: false }) => { - quote::quote_spanned!(error.attr_span => Self::#variant(..) => #variant_str,) - }, - None => { - quote::quote_spanned!(error.attr_span => Self::#variant => #variant_str,) - }, - } - }); + let as_str_matches = error.variants.iter().map( + |VariantDef { ident: variant, field: field_ty, docs: _, cfg_attrs }| { + let variant_str = variant.to_string(); + let cfg_attrs = cfg_attrs.iter().map(|attr| attr.to_token_stream()); + match field_ty { + Some(VariantField { is_named: true }) => { + quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant { .. } => #variant_str,) + }, + Some(VariantField { is_named: false }) => { + quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant(..) => #variant_str,) + }, + None => { + quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant => #variant_str,) + }, + } + }, + ); let error_item = { let item = &mut def.item.content.as_mut().expect("Checked by def parser").1[error.index]; diff --git a/substrate/frame/support/procedural/src/pallet/expand/mod.rs b/substrate/frame/support/procedural/src/pallet/expand/mod.rs index 6f32e5697512f7e87a0130ec31e69eb5201c4d89..db242df781b124f86e14eb6bd084578fda723e73 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/mod.rs @@ -31,6 +31,7 @@ mod origin; mod pallet_struct; mod storage; mod store_trait; +mod tasks; mod tt_default_parts; mod type_value; mod validate_unsigned; @@ -60,6 +61,7 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { let pallet_struct = pallet_struct::expand_pallet_struct(&mut def); let config = config::expand_config(&mut def); let call = call::expand_call(&mut def); + let tasks = tasks::expand_tasks(&mut def); let error = error::expand_error(&mut def); let event = event::expand_event(&mut def); let storages = storage::expand_storages(&mut def); @@ -100,6 +102,7 @@ storage item. Otherwise, all storage items are listed among [*Type Definitions*] #pallet_struct #config #call + #tasks #error #event #storages diff --git a/substrate/frame/support/procedural/src/pallet/expand/tasks.rs b/substrate/frame/support/procedural/src/pallet/expand/tasks.rs new file mode 100644 index 0000000000000000000000000000000000000000..6697e5c822a31b1ef719f03b47eb1ae32169fd03 --- /dev/null +++ b/substrate/frame/support/procedural/src/pallet/expand/tasks.rs @@ -0,0 +1,267 @@ +//! Contains logic for expanding task-related items. + +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Home of the expansion code for the Tasks API + +use crate::pallet::{parse::tasks::*, Def}; +use derive_syn_parse::Parse; +use inflector::Inflector; +use proc_macro2::TokenStream as TokenStream2; +use quote::{format_ident, quote, ToTokens}; +use syn::{parse_quote, spanned::Spanned, ItemEnum, ItemImpl}; + +impl TaskEnumDef { + /// Since we optionally allow users to manually specify a `#[pallet::task_enum]`, in the + /// event they _don't_ specify one (which is actually the most common behavior) we have to + /// generate one based on the existing [`TasksDef`]. This method performs that generation. + pub fn generate( + tasks: &TasksDef, + type_decl_bounded_generics: TokenStream2, + type_use_generics: TokenStream2, + ) -> Self { + let variants = if tasks.tasks_attr.is_some() { + tasks + .tasks + .iter() + .map(|task| { + let ident = &task.item.sig.ident; + let ident = + format_ident!("{}", ident.to_string().to_class_case(), span = ident.span()); + + let args = task.item.sig.inputs.iter().collect::>(); + + if args.is_empty() { + quote!(#ident) + } else { + quote!(#ident { + #(#args),* + }) + } + }) + .collect::>() + } else { + Vec::new() + }; + let mut task_enum_def: TaskEnumDef = parse_quote! { + /// Auto-generated enum that encapsulates all tasks defined by this pallet. + /// + /// Conceptually similar to the [`Call`] enum, but for tasks. This is only + /// generated if there are tasks present in this pallet. + #[pallet::task_enum] + pub enum Task<#type_decl_bounded_generics> { + #( + #variants, + )* + } + }; + task_enum_def.type_use_generics = type_use_generics; + task_enum_def + } +} + +impl ToTokens for TaskEnumDef { + fn to_tokens(&self, tokens: &mut TokenStream2) { + let item_enum = &self.item_enum; + let ident = &item_enum.ident; + let vis = &item_enum.vis; + let attrs = &item_enum.attrs; + let generics = &item_enum.generics; + let variants = &item_enum.variants; + let scrate = &self.scrate; + let type_use_generics = &self.type_use_generics; + if self.attr.is_some() { + // `item_enum` is short-hand / generated enum + tokens.extend(quote! { + #(#attrs)* + #[derive( + #scrate::CloneNoBound, + #scrate::EqNoBound, + #scrate::PartialEqNoBound, + #scrate::pallet_prelude::Encode, + #scrate::pallet_prelude::Decode, + #scrate::pallet_prelude::TypeInfo, + )] + #[codec(encode_bound())] + #[codec(decode_bound())] + #[scale_info(skip_type_params(#type_use_generics))] + #vis enum #ident #generics { + #variants + #[doc(hidden)] + #[codec(skip)] + __Ignore(core::marker::PhantomData, #scrate::Never), + } + + impl core::fmt::Debug for #ident<#type_use_generics> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct(stringify!(#ident)).field("value", self).finish() + } + } + }); + } else { + // `item_enum` is a manually specified enum (no attribute) + tokens.extend(item_enum.to_token_stream()); + } + } +} + +/// Represents an already-expanded [`TasksDef`]. +#[derive(Parse)] +pub struct ExpandedTasksDef { + pub task_item_impl: ItemImpl, + pub task_trait_impl: ItemImpl, +} + +impl ToTokens for TasksDef { + fn to_tokens(&self, tokens: &mut TokenStream2) { + let scrate = &self.scrate; + let enum_ident = syn::Ident::new("Task", self.enum_ident.span()); + let enum_arguments = &self.enum_arguments; + let enum_use = quote!(#enum_ident #enum_arguments); + + let task_fn_idents = self + .tasks + .iter() + .map(|task| { + format_ident!( + "{}", + &task.item.sig.ident.to_string().to_class_case(), + span = task.item.sig.ident.span() + ) + }) + .collect::>(); + let task_indices = self.tasks.iter().map(|task| &task.index_attr.meta.index); + let task_conditions = self.tasks.iter().map(|task| &task.condition_attr.meta.expr); + let task_weights = self.tasks.iter().map(|task| &task.weight_attr.meta.expr); + let task_iters = self.tasks.iter().map(|task| &task.list_attr.meta.expr); + + let task_fn_impls = self.tasks.iter().map(|task| { + let mut task_fn_impl = task.item.clone(); + task_fn_impl.attrs = vec![]; + task_fn_impl + }); + + let task_fn_names = self.tasks.iter().map(|task| &task.item.sig.ident); + let task_arg_names = self.tasks.iter().map(|task| &task.arg_names).collect::>(); + + let sp_std = quote!(#scrate::__private::sp_std); + let impl_generics = &self.item_impl.generics; + tokens.extend(quote! { + impl #impl_generics #enum_use + { + #(#task_fn_impls)* + } + + impl #impl_generics #scrate::traits::Task for #enum_use + { + type Enumeration = #sp_std::vec::IntoIter<#enum_use>; + + fn iter() -> Self::Enumeration { + let mut all_tasks = #sp_std::vec![]; + #(all_tasks + .extend(#task_iters.map(|(#(#task_arg_names),*)| #enum_ident::#task_fn_idents { #(#task_arg_names: #task_arg_names.clone()),* }) + .collect::<#sp_std::vec::Vec<_>>()); + )* + all_tasks.into_iter() + } + + fn task_index(&self) -> u32 { + match self.clone() { + #(#enum_ident::#task_fn_idents { .. } => #task_indices,)* + Task::__Ignore(_, _) => unreachable!(), + } + } + + fn is_valid(&self) -> bool { + match self.clone() { + #(#enum_ident::#task_fn_idents { #(#task_arg_names),* } => (#task_conditions)(#(#task_arg_names),* ),)* + Task::__Ignore(_, _) => unreachable!(), + } + } + + fn run(&self) -> Result<(), #scrate::pallet_prelude::DispatchError> { + match self.clone() { + #(#enum_ident::#task_fn_idents { #(#task_arg_names),* } => { + <#enum_use>::#task_fn_names(#( #task_arg_names, )* ) + },)* + Task::__Ignore(_, _) => unreachable!(), + } + } + + #[allow(unused_variables)] + fn weight(&self) -> #scrate::pallet_prelude::Weight { + match self.clone() { + #(#enum_ident::#task_fn_idents { #(#task_arg_names),* } => #task_weights,)* + Task::__Ignore(_, _) => unreachable!(), + } + } + } + }); + } +} + +/// Expands the [`TasksDef`] in the enclosing [`Def`], if present, and returns its tokens. +/// +/// This modifies the underlying [`Def`] in addition to returning any tokens that were added. +pub fn expand_tasks_impl(def: &mut Def) -> TokenStream2 { + let Some(tasks) = &mut def.tasks else { return quote!() }; + let ExpandedTasksDef { task_item_impl, task_trait_impl } = parse_quote!(#tasks); + quote! { + #task_item_impl + #task_trait_impl + } +} + +/// Represents a fully-expanded [`TaskEnumDef`]. +#[derive(Parse)] +pub struct ExpandedTaskEnum { + pub item_enum: ItemEnum, + pub debug_impl: ItemImpl, +} + +/// Modifies a [`Def`] to expand the underlying [`TaskEnumDef`] if present, and also returns +/// its tokens. A blank [`TokenStream2`] is returned if no [`TaskEnumDef`] has been generated +/// or defined. +pub fn expand_task_enum(def: &mut Def) -> TokenStream2 { + let Some(task_enum) = &mut def.task_enum else { return quote!() }; + let ExpandedTaskEnum { item_enum, debug_impl } = parse_quote!(#task_enum); + quote! { + #item_enum + #debug_impl + } +} + +/// Modifies a [`Def`] to expand the underlying [`TasksDef`] and also generate a +/// [`TaskEnumDef`] if applicable. The tokens for these items are returned if they are created. +pub fn expand_tasks(def: &mut Def) -> TokenStream2 { + if let Some(tasks_def) = &def.tasks { + if def.task_enum.is_none() { + def.task_enum = Some(TaskEnumDef::generate( + &tasks_def, + def.type_decl_bounded_generics(tasks_def.item_impl.span()), + def.type_use_generics(tasks_def.item_impl.span()), + )); + } + } + let tasks_extra_output = expand_tasks_impl(def); + let task_enum_extra_output = expand_task_enum(def); + quote! { + #tasks_extra_output + #task_enum_extra_output + } +} diff --git a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs index c9a776ee247527b8e76508144ece3708d110194b..7cc1415dfddf1514cf398555c650f4892097aefd 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs @@ -31,6 +31,8 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { let call_part = def.call.as_ref().map(|_| quote::quote!(Call,)); + let task_part = def.task_enum.as_ref().map(|_| quote::quote!(Task,)); + let storage_part = (!def.storages.is_empty()).then(|| quote::quote!(Storage,)); let event_part = def.event.as_ref().map(|event| { @@ -99,7 +101,7 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { tokens = [{ expanded::{ Pallet, #call_part #storage_part #event_part #error_part #origin_part #config_part - #inherent_part #validate_unsigned_part #freeze_reason_part + #inherent_part #validate_unsigned_part #freeze_reason_part #task_part #hold_reason_part #lock_id_part #slash_reason_part } }] diff --git a/substrate/frame/support/procedural/src/pallet/parse/call.rs b/substrate/frame/support/procedural/src/pallet/parse/call.rs index 90631f264b92aba30a60059f0d992583b6253938..4e09b86fddec171cdbcd9d4a9c79fe9c6d922960 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/call.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/call.rs @@ -17,19 +17,22 @@ use super::{helper, InheritedCallWeightAttr}; use frame_support_procedural_tools::get_doc_literals; +use proc_macro2::Span; use quote::ToTokens; use std::collections::HashMap; -use syn::spanned::Spanned; +use syn::{spanned::Spanned, ExprClosure}; /// List of additional token to be used for parsing. mod keyword { syn::custom_keyword!(Call); syn::custom_keyword!(OriginFor); + syn::custom_keyword!(RuntimeOrigin); syn::custom_keyword!(weight); syn::custom_keyword!(call_index); syn::custom_keyword!(compact); syn::custom_keyword!(T); syn::custom_keyword!(pallet); + syn::custom_keyword!(feeless_if); } /// Definition of dispatchables typically `impl Pallet { ... }` @@ -82,13 +85,20 @@ pub struct CallVariantDef { pub docs: Vec, /// Attributes annotated at the top of the dispatchable function. pub attrs: Vec, + /// The `cfg` attributes. + pub cfg_attrs: Vec, + /// The optional `feeless_if` attribute on the `pallet::call`. + pub feeless_check: Option, } /// Attributes for functions in call impl block. -/// Parse for `#[pallet::weight(expr)]` or `#[pallet::call_index(expr)] pub enum FunctionAttr { + /// Parse for `#[pallet::call_index(expr)]` CallIndex(u8), + /// Parse for `#[pallet::weight(expr)]` Weight(syn::Expr), + /// Parse for `#[pallet::feeless_if(expr)]` + FeelessIf(Span, syn::ExprClosure), } impl syn::parse::Parse for FunctionAttr { @@ -115,6 +125,19 @@ impl syn::parse::Parse for FunctionAttr { return Err(syn::Error::new(index.span(), msg)) } Ok(FunctionAttr::CallIndex(index.base10_parse()?)) + } else if lookahead.peek(keyword::feeless_if) { + content.parse::()?; + let closure_content; + syn::parenthesized!(closure_content in content); + Ok(FunctionAttr::FeelessIf( + closure_content.span(), + closure_content.parse::().map_err(|e| { + let msg = "Invalid feeless_if attribute: expected a closure"; + let mut err = syn::Error::new(closure_content.span(), msg); + err.combine(e); + err + })?, + )) } else { Err(lookahead.error()) } @@ -138,28 +161,46 @@ impl syn::parse::Parse for ArgAttrIsCompact { } } -/// Check the syntax is `OriginFor` -pub fn check_dispatchable_first_arg_type(ty: &syn::Type) -> syn::Result<()> { - pub struct CheckDispatchableFirstArg; - impl syn::parse::Parse for CheckDispatchableFirstArg { +/// Check the syntax is `OriginFor`, `&OriginFor` or `T::RuntimeOrigin`. +pub fn check_dispatchable_first_arg_type(ty: &syn::Type, is_ref: bool) -> syn::Result<()> { + pub struct CheckOriginFor(bool); + impl syn::parse::Parse for CheckOriginFor { fn parse(input: syn::parse::ParseStream) -> syn::Result { + let is_ref = input.parse::().is_ok(); input.parse::()?; input.parse::()?; input.parse::()?; input.parse::]>()?; - Ok(Self) + Ok(Self(is_ref)) } } - syn::parse2::(ty.to_token_stream()).map_err(|e| { - let msg = "Invalid type: expected `OriginFor`"; - let mut err = syn::Error::new(ty.span(), msg); - err.combine(e); - err - })?; + pub struct CheckRuntimeOrigin; + impl syn::parse::Parse for CheckRuntimeOrigin { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + input.parse::()?; + input.parse::()?; - Ok(()) + Ok(Self) + } + } + + let result_origin_for = syn::parse2::(ty.to_token_stream()); + let result_runtime_origin = syn::parse2::(ty.to_token_stream()); + return match (result_origin_for, result_runtime_origin) { + (Ok(CheckOriginFor(has_ref)), _) if is_ref == has_ref => Ok(()), + (_, Ok(_)) => Ok(()), + (_, _) => { + let msg = if is_ref { + "Invalid type: expected `&OriginFor`" + } else { + "Invalid type: expected `OriginFor` or `T::RuntimeOrigin`" + }; + return Err(syn::Error::new(ty.span(), msg)) + }, + } } impl CallDef { @@ -215,7 +256,7 @@ impl CallDef { return Err(syn::Error::new(method.sig.span(), msg)) }, Some(syn::FnArg::Typed(arg)) => { - check_dispatchable_first_arg_type(&arg.ty)?; + check_dispatchable_first_arg_type(&arg.ty, false)?; }, } @@ -227,22 +268,28 @@ impl CallDef { return Err(syn::Error::new(method.sig.span(), msg)) } - let (mut weight_attrs, mut call_idx_attrs): (Vec, Vec) = - helper::take_item_pallet_attrs(&mut method.attrs)?.into_iter().partition( - |attr| { - if let FunctionAttr::Weight(_) = attr { - true - } else { - false - } + let cfg_attrs: Vec = helper::get_item_cfg_attrs(&method.attrs); + let mut call_idx_attrs = vec![]; + let mut weight_attrs = vec![]; + let mut feeless_attrs = vec![]; + for attr in helper::take_item_pallet_attrs(&mut method.attrs)?.into_iter() { + match attr { + FunctionAttr::CallIndex(_) => { + call_idx_attrs.push(attr); }, - ); + FunctionAttr::Weight(_) => { + weight_attrs.push(attr); + }, + FunctionAttr::FeelessIf(span, _) => { + feeless_attrs.push((span, attr)); + }, + } + } if weight_attrs.is_empty() && dev_mode { // inject a default O(1) weight when dev mode is enabled and no weight has // been specified on the call - let empty_weight: syn::Expr = syn::parse(quote::quote!(0).into()) - .expect("we are parsing a quoted string; qed"); + let empty_weight: syn::Expr = syn::parse_quote!(0); weight_attrs.push(FunctionAttr::Weight(empty_weight)); } @@ -251,8 +298,8 @@ impl CallDef { 0 if dev_mode => CallWeightDef::DevModeDefault, 0 => return Err(syn::Error::new( method.sig.span(), - "A pallet::call requires either a concrete `#[pallet::weight($expr)]` or an - inherited weight from the `#[pallet:call(weight($type))]` attribute, but + "A pallet::call requires either a concrete `#[pallet::weight($expr)]` or an + inherited weight from the `#[pallet:call(weight($type))]` attribute, but none were given.", )), 1 => match weight_attrs.pop().unwrap() { @@ -323,6 +370,73 @@ impl CallDef { let docs = get_doc_literals(&method.attrs); + if feeless_attrs.len() > 1 { + let msg = "Invalid pallet::call, there can only be one feeless_if attribute"; + return Err(syn::Error::new(feeless_attrs[1].0, msg)) + } + let feeless_check: Option = + feeless_attrs.pop().map(|(_, attr)| match attr { + FunctionAttr::FeelessIf(_, closure) => closure, + _ => unreachable!("checked during creation of the let binding"), + }); + + if let Some(ref feeless_check) = feeless_check { + if feeless_check.inputs.len() != args.len() + 1 { + let msg = "Invalid pallet::call, feeless_if closure must have same \ + number of arguments as the dispatchable function"; + return Err(syn::Error::new(feeless_check.span(), msg)) + } + + match feeless_check.inputs.first() { + None => { + let msg = "Invalid pallet::call, feeless_if closure must have at least origin arg"; + return Err(syn::Error::new(feeless_check.span(), msg)) + }, + Some(syn::Pat::Type(arg)) => { + check_dispatchable_first_arg_type(&arg.ty, true)?; + }, + _ => { + let msg = "Invalid pallet::call, feeless_if closure first argument must be a typed argument, \ + e.g. `origin: OriginFor`"; + return Err(syn::Error::new(feeless_check.span(), msg)) + }, + } + + for (feeless_arg, arg) in feeless_check.inputs.iter().skip(1).zip(args.iter()) { + let feeless_arg_type = + if let syn::Pat::Type(syn::PatType { ty, .. }) = feeless_arg.clone() { + if let syn::Type::Reference(pat) = *ty { + pat.elem.clone() + } else { + let msg = "Invalid pallet::call, feeless_if closure argument must be a reference"; + return Err(syn::Error::new(ty.span(), msg)) + } + } else { + let msg = "Invalid pallet::call, feeless_if closure argument must be a type ascription pattern"; + return Err(syn::Error::new(feeless_arg.span(), msg)) + }; + + if feeless_arg_type != arg.2 { + let msg = + "Invalid pallet::call, feeless_if closure argument must have \ + a reference to the same type as the dispatchable function argument"; + return Err(syn::Error::new(feeless_arg.span(), msg)) + } + } + + let valid_return = match &feeless_check.output { + syn::ReturnType::Type(_, type_) => match *(type_.clone()) { + syn::Type::Path(syn::TypePath { path, .. }) => path.is_ident("bool"), + _ => false, + }, + _ => false, + }; + if !valid_return { + let msg = "Invalid pallet::call, feeless_if closure must return `bool`"; + return Err(syn::Error::new(feeless_check.output.span(), msg)) + } + } + methods.push(CallVariantDef { name: method.sig.ident.clone(), weight, @@ -331,6 +445,8 @@ impl CallDef { args, docs, attrs: method.attrs.clone(), + cfg_attrs, + feeless_check, }); } else { let msg = "Invalid pallet::call, only method accepted"; diff --git a/substrate/frame/support/procedural/src/pallet/parse/composite.rs b/substrate/frame/support/procedural/src/pallet/parse/composite.rs index 6e6ea6a795c1403b64891c00507cf0f7d8dffc25..fa5f47dfdfa184d25a15ba05842f14c63fdbede3 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/composite.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/composite.rs @@ -26,11 +26,14 @@ pub mod keyword { syn::custom_keyword!(HoldReason); syn::custom_keyword!(LockId); syn::custom_keyword!(SlashReason); + syn::custom_keyword!(Task); + pub enum CompositeKeyword { FreezeReason(FreezeReason), HoldReason(HoldReason), LockId(LockId), SlashReason(SlashReason), + Task(Task), } impl ToTokens for CompositeKeyword { @@ -41,6 +44,7 @@ pub mod keyword { HoldReason(inner) => inner.to_tokens(tokens), LockId(inner) => inner.to_tokens(tokens), SlashReason(inner) => inner.to_tokens(tokens), + Task(inner) => inner.to_tokens(tokens), } } } @@ -56,6 +60,8 @@ pub mod keyword { Ok(Self::LockId(input.parse()?)) } else if lookahead.peek(SlashReason) { Ok(Self::SlashReason(input.parse()?)) + } else if lookahead.peek(Task) { + Ok(Self::Task(input.parse()?)) } else { Err(lookahead.error()) } @@ -71,6 +77,7 @@ pub mod keyword { match self { FreezeReason(_) => "FreezeReason", HoldReason(_) => "HoldReason", + Task(_) => "Task", LockId(_) => "LockId", SlashReason(_) => "SlashReason", } @@ -80,7 +87,7 @@ pub mod keyword { } pub struct CompositeDef { - /// The index of the HoldReason item in the pallet module. + /// The index of the CompositeDef item in the pallet module. pub index: usize, /// The composite keyword used (contains span). pub composite_keyword: keyword::CompositeKeyword, diff --git a/substrate/frame/support/procedural/src/pallet/parse/error.rs b/substrate/frame/support/procedural/src/pallet/parse/error.rs index 6f82ce61fc93fc118ec4a7017fb7ccc87275a71b..362df8d7340ce0caad72cf85df88378569329672 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/error.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/error.rs @@ -25,19 +25,31 @@ mod keyword { syn::custom_keyword!(Error); } -/// Records information about the error enum variants. +/// Records information about the error enum variant field. pub struct VariantField { /// Whether or not the field is named, i.e. whether it is a tuple variant or struct variant. pub is_named: bool, } +/// Records information about the error enum variants. +pub struct VariantDef { + /// The variant ident. + pub ident: syn::Ident, + /// The variant field, if any. + pub field: Option, + /// The variant doc literals. + pub docs: Vec, + /// The `cfg` attributes. + pub cfg_attrs: Vec, +} + /// This checks error declaration as a enum declaration with only variants without fields nor /// discriminant. pub struct ErrorDef { /// The index of error item in pallet module. pub index: usize, - /// Variants ident, optional field and doc literals (ordered as declaration order) - pub variants: Vec<(syn::Ident, Option, Vec)>, + /// Variant definitions. + pub variants: Vec, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, /// The keyword error used (contains span). @@ -87,8 +99,14 @@ impl ErrorDef { let span = variant.discriminant.as_ref().unwrap().0.span(); return Err(syn::Error::new(span, msg)) } + let cfg_attrs: Vec = helper::get_item_cfg_attrs(&variant.attrs); - Ok((variant.ident.clone(), field_ty, get_doc_literals(&variant.attrs))) + Ok(VariantDef { + ident: variant.ident.clone(), + field: field_ty, + docs: get_doc_literals(&variant.attrs), + cfg_attrs, + }) }) .collect::>()?; diff --git a/substrate/frame/support/procedural/src/pallet/parse/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/mod.rs index 83a881751ef308a948e9fc69a684cd90862af145..e1efdbcc2027975d89a00b1037bd5d0af68998a3 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/mod.rs @@ -33,11 +33,16 @@ pub mod inherent; pub mod origin; pub mod pallet_struct; pub mod storage; +pub mod tasks; pub mod type_value; pub mod validate_unsigned; +#[cfg(test)] +pub mod tests; + use composite::{keyword::CompositeKeyword, CompositeDef}; use frame_support_procedural_tools::generate_access_from_frame_or_crate; +use quote::ToTokens; use syn::spanned::Spanned; /// Parsed definition of a pallet. @@ -49,6 +54,8 @@ pub struct Def { pub pallet_struct: pallet_struct::PalletStructDef, pub hooks: Option, pub call: Option, + pub tasks: Option, + pub task_enum: Option, pub storages: Vec, pub error: Option, pub event: Option, @@ -84,6 +91,8 @@ impl Def { let mut pallet_struct = None; let mut hooks = None; let mut call = None; + let mut tasks = None; + let mut task_enum = None; let mut error = None; let mut event = None; let mut origin = None; @@ -118,6 +127,32 @@ impl Def { }, Some(PalletAttr::RuntimeCall(cw, span)) if call.is_none() => call = Some(call::CallDef::try_from(span, index, item, dev_mode, cw)?), + Some(PalletAttr::Tasks(_)) if tasks.is_none() => { + let item_tokens = item.to_token_stream(); + // `TasksDef::parse` needs to know if attr was provided so we artificially + // re-insert it here + tasks = Some(syn::parse2::(quote::quote! { + #[pallet::tasks_experimental] + #item_tokens + })?); + + // replace item with a no-op because it will be handled by the expansion of tasks + *item = syn::Item::Verbatim(quote::quote!()); + } + Some(PalletAttr::TaskCondition(span)) => return Err(syn::Error::new( + span, + "`#[pallet::task_condition]` can only be used on items within an `impl` statement." + )), + Some(PalletAttr::TaskIndex(span)) => return Err(syn::Error::new( + span, + "`#[pallet::task_index]` can only be used on items within an `impl` statement." + )), + Some(PalletAttr::TaskList(span)) => return Err(syn::Error::new( + span, + "`#[pallet::task_list]` can only be used on items within an `impl` statement." + )), + Some(PalletAttr::RuntimeTask(_)) if task_enum.is_none() => + task_enum = Some(syn::parse2::(item.to_token_stream())?), Some(PalletAttr::Error(span)) if error.is_none() => error = Some(error::ErrorDef::try_from(span, index, item)?), Some(PalletAttr::RuntimeEvent(span)) if event.is_none() => @@ -190,6 +225,8 @@ impl Def { return Err(syn::Error::new(item_span, msg)) } + Self::resolve_tasks(&item_span, &mut tasks, &mut task_enum, items)?; + let def = Def { item, config: config @@ -198,6 +235,8 @@ impl Def { .ok_or_else(|| syn::Error::new(item_span, "Missing `#[pallet::pallet]`"))?, hooks, call, + tasks, + task_enum, extra_constants, genesis_config, genesis_build, @@ -220,6 +259,99 @@ impl Def { Ok(def) } + /// Performs extra logic checks necessary for the `#[pallet::tasks_experimental]` feature. + fn resolve_tasks( + item_span: &proc_macro2::Span, + tasks: &mut Option, + task_enum: &mut Option, + items: &mut Vec, + ) -> syn::Result<()> { + // fallback for manual (without macros) definition of tasks impl + Self::resolve_manual_tasks_impl(tasks, task_enum, items)?; + + // fallback for manual (without macros) definition of task enum + Self::resolve_manual_task_enum(tasks, task_enum, items)?; + + // ensure that if `task_enum` is specified, `tasks` is also specified + match (&task_enum, &tasks) { + (Some(_), None) => + return Err(syn::Error::new( + *item_span, + "Missing `#[pallet::tasks_experimental]` impl", + )), + (None, Some(tasks)) => + if tasks.tasks_attr.is_none() { + return Err(syn::Error::new( + tasks.item_impl.impl_token.span(), + "A `#[pallet::tasks_experimental]` attribute must be attached to your `Task` impl if the \ + task enum has been omitted", + )) + } else { + }, + _ => (), + } + + Ok(()) + } + + /// Tries to locate task enum based on the tasks impl target if attribute is not specified + /// but impl is present. If one is found, `task_enum` is set appropriately. + fn resolve_manual_task_enum( + tasks: &Option, + task_enum: &mut Option, + items: &mut Vec, + ) -> syn::Result<()> { + let (None, Some(tasks)) = (&task_enum, &tasks) else { return Ok(()) }; + let syn::Type::Path(type_path) = &*tasks.item_impl.self_ty else { return Ok(()) }; + let type_path = type_path.path.segments.iter().collect::>(); + let (Some(seg), None) = (type_path.get(0), type_path.get(1)) else { return Ok(()) }; + let mut result = None; + for item in items { + let syn::Item::Enum(item_enum) = item else { continue }; + if item_enum.ident == seg.ident { + result = Some(syn::parse2::(item_enum.to_token_stream())?); + // replace item with a no-op because it will be handled by the expansion of + // `task_enum`. We use a no-op instead of simply removing it from the vec + // so that any indices collected by `Def::try_from` remain accurate + *item = syn::Item::Verbatim(quote::quote!()); + break + } + } + *task_enum = result; + Ok(()) + } + + /// Tries to locate a manual tasks impl (an impl impling a trait whose last path segment is + /// `Task`) in the event that one has not been found already via the attribute macro + pub fn resolve_manual_tasks_impl( + tasks: &mut Option, + task_enum: &Option, + items: &Vec, + ) -> syn::Result<()> { + let None = tasks else { return Ok(()) }; + let mut result = None; + for item in items { + let syn::Item::Impl(item_impl) = item else { continue }; + let Some((_, path, _)) = &item_impl.trait_ else { continue }; + let Some(trait_last_seg) = path.segments.last() else { continue }; + let syn::Type::Path(target_path) = &*item_impl.self_ty else { continue }; + let target_path = target_path.path.segments.iter().collect::>(); + let (Some(target_ident), None) = (target_path.get(0), target_path.get(1)) else { + continue + }; + let matches_task_enum = match task_enum { + Some(task_enum) => task_enum.item_enum.ident == target_ident.ident, + None => true, + }; + if trait_last_seg.ident == "Task" && matches_task_enum { + result = Some(syn::parse2::(item_impl.to_token_stream())?); + break + } + } + *tasks = result; + Ok(()) + } + /// Check that usage of trait `Event` is consistent with the definition, i.e. it is declared /// and trait defines type RuntimeEvent, or not declared and no trait associated type. fn check_event_usage(&self) -> syn::Result<()> { @@ -408,6 +540,11 @@ impl GenericKind { mod keyword { syn::custom_keyword!(origin); syn::custom_keyword!(call); + syn::custom_keyword!(tasks_experimental); + syn::custom_keyword!(task_enum); + syn::custom_keyword!(task_list); + syn::custom_keyword!(task_condition); + syn::custom_keyword!(task_index); syn::custom_keyword!(weight); syn::custom_keyword!(event); syn::custom_keyword!(config); @@ -472,6 +609,11 @@ enum PalletAttr { /// instead of the zero weight. So to say: it works together with `dev_mode`. RuntimeCall(Option, proc_macro2::Span), Error(proc_macro2::Span), + Tasks(proc_macro2::Span), + TaskList(proc_macro2::Span), + TaskCondition(proc_macro2::Span), + TaskIndex(proc_macro2::Span), + RuntimeTask(proc_macro2::Span), RuntimeEvent(proc_macro2::Span), RuntimeOrigin(proc_macro2::Span), Inherent(proc_macro2::Span), @@ -490,8 +632,13 @@ impl PalletAttr { Self::Config(span, _) => *span, Self::Pallet(span) => *span, Self::Hooks(span) => *span, - Self::RuntimeCall(_, span) => *span, + Self::Tasks(span) => *span, + Self::TaskCondition(span) => *span, + Self::TaskIndex(span) => *span, + Self::TaskList(span) => *span, Self::Error(span) => *span, + Self::RuntimeTask(span) => *span, + Self::RuntimeCall(_, span) => *span, Self::RuntimeEvent(span) => *span, Self::RuntimeOrigin(span) => *span, Self::Inherent(span) => *span, @@ -535,6 +682,16 @@ impl syn::parse::Parse for PalletAttr { false => Some(InheritedCallWeightAttr::parse(&content)?), }; Ok(PalletAttr::RuntimeCall(attr, span)) + } else if lookahead.peek(keyword::tasks_experimental) { + Ok(PalletAttr::Tasks(content.parse::()?.span())) + } else if lookahead.peek(keyword::task_enum) { + Ok(PalletAttr::RuntimeTask(content.parse::()?.span())) + } else if lookahead.peek(keyword::task_condition) { + Ok(PalletAttr::TaskCondition(content.parse::()?.span())) + } else if lookahead.peek(keyword::task_index) { + Ok(PalletAttr::TaskIndex(content.parse::()?.span())) + } else if lookahead.peek(keyword::task_list) { + Ok(PalletAttr::TaskList(content.parse::()?.span())) } else if lookahead.peek(keyword::error) { Ok(PalletAttr::Error(content.parse::()?.span())) } else if lookahead.peek(keyword::event) { diff --git a/substrate/frame/support/procedural/src/pallet/parse/tasks.rs b/substrate/frame/support/procedural/src/pallet/parse/tasks.rs new file mode 100644 index 0000000000000000000000000000000000000000..6405bb415a6f1eed18cab8d4f6c40f8a37f2048b --- /dev/null +++ b/substrate/frame/support/procedural/src/pallet/parse/tasks.rs @@ -0,0 +1,968 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Home of the parsing code for the Tasks API + +use std::collections::HashSet; + +#[cfg(test)] +use crate::assert_parse_error_matches; + +#[cfg(test)] +use crate::pallet::parse::tests::simulate_manifest_dir; + +use derive_syn_parse::Parse; +use frame_support_procedural_tools::generate_access_from_frame_or_crate; +use proc_macro2::TokenStream as TokenStream2; +use quote::{quote, ToTokens}; +use syn::{ + parse::ParseStream, + parse2, + spanned::Spanned, + token::{Bracket, Paren, PathSep, Pound}, + Attribute, Error, Expr, Ident, ImplItem, ImplItemFn, ItemEnum, ItemImpl, LitInt, Path, + PathArguments, Result, TypePath, +}; + +pub mod keywords { + use syn::custom_keyword; + + custom_keyword!(tasks_experimental); + custom_keyword!(task_enum); + custom_keyword!(task_list); + custom_keyword!(task_condition); + custom_keyword!(task_index); + custom_keyword!(task_weight); + custom_keyword!(pallet); +} + +/// Represents the `#[pallet::tasks_experimental]` attribute and its attached item. Also includes +/// metadata about the linked [`TaskEnumDef`] if applicable. +#[derive(Clone, Debug)] +pub struct TasksDef { + pub tasks_attr: Option, + pub tasks: Vec, + pub item_impl: ItemImpl, + /// Path to `frame_support` + pub scrate: Path, + pub enum_ident: Ident, + pub enum_arguments: PathArguments, +} + +impl syn::parse::Parse for TasksDef { + fn parse(input: ParseStream) -> Result { + let item_impl: ItemImpl = input.parse()?; + let (tasks_attrs, normal_attrs) = partition_tasks_attrs(&item_impl); + let tasks_attr = match tasks_attrs.first() { + Some(attr) => Some(parse2::(attr.to_token_stream())?), + None => None, + }; + if let Some(extra_tasks_attr) = tasks_attrs.get(1) { + return Err(Error::new( + extra_tasks_attr.span(), + "unexpected extra `#[pallet::tasks_experimental]` attribute", + )) + } + let tasks: Vec = if tasks_attr.is_some() { + item_impl + .items + .clone() + .into_iter() + .filter(|impl_item| matches!(impl_item, ImplItem::Fn(_))) + .map(|item| parse2::(item.to_token_stream())) + .collect::>()? + } else { + Vec::new() + }; + let mut task_indices = HashSet::::new(); + for task in tasks.iter() { + let task_index = &task.index_attr.meta.index; + if !task_indices.insert(task_index.clone()) { + return Err(Error::new( + task_index.span(), + format!("duplicate task index `{}`", task_index), + )) + } + } + let mut item_impl = item_impl; + item_impl.attrs = normal_attrs; + + // we require the path on the impl to be a TypePath + let enum_path = parse2::(item_impl.self_ty.to_token_stream())?; + let segments = enum_path.path.segments.iter().collect::>(); + let (Some(last_seg), None) = (segments.get(0), segments.get(1)) else { + return Err(Error::new( + enum_path.span(), + "if specified manually, the task enum must be defined locally in this \ + pallet and cannot be a re-export", + )) + }; + let enum_ident = last_seg.ident.clone(); + let enum_arguments = last_seg.arguments.clone(); + + // We do this here because it would be improper to do something fallible like this at + // the expansion phase. Fallible stuff should happen during parsing. + let scrate = generate_access_from_frame_or_crate("frame-support")?; + + Ok(TasksDef { tasks_attr, item_impl, tasks, scrate, enum_ident, enum_arguments }) + } +} + +/// Parsing for a `#[pallet::tasks_experimental]` attr. +pub type PalletTasksAttr = PalletTaskAttr; + +/// Parsing for any of the attributes that can be used within a `#[pallet::tasks_experimental]` +/// [`ItemImpl`]. +pub type TaskAttr = PalletTaskAttr; + +/// Parsing for a `#[pallet::task_index]` attr. +pub type TaskIndexAttr = PalletTaskAttr; + +/// Parsing for a `#[pallet::task_condition]` attr. +pub type TaskConditionAttr = PalletTaskAttr; + +/// Parsing for a `#[pallet::task_list]` attr. +pub type TaskListAttr = PalletTaskAttr; + +/// Parsing for a `#[pallet::task_weight]` attr. +pub type TaskWeightAttr = PalletTaskAttr; + +/// Parsing for a `#[pallet:task_enum]` attr. +pub type PalletTaskEnumAttr = PalletTaskAttr; + +/// Parsing for a manually-specified (or auto-generated) task enum, optionally including the +/// attached `#[pallet::task_enum]` attribute. +#[derive(Clone, Debug)] +pub struct TaskEnumDef { + pub attr: Option, + pub item_enum: ItemEnum, + pub scrate: Path, + pub type_use_generics: TokenStream2, +} + +impl syn::parse::Parse for TaskEnumDef { + fn parse(input: ParseStream) -> Result { + let mut item_enum = input.parse::()?; + let attr = extract_pallet_attr(&mut item_enum)?; + let attr = match attr { + Some(attr) => Some(parse2(attr)?), + None => None, + }; + + // We do this here because it would be improper to do something fallible like this at + // the expansion phase. Fallible stuff should happen during parsing. + let scrate = generate_access_from_frame_or_crate("frame-support")?; + + let type_use_generics = quote!(T); + + Ok(TaskEnumDef { attr, item_enum, scrate, type_use_generics }) + } +} + +/// Represents an individual tasks within a [`TasksDef`]. +#[derive(Debug, Clone)] +pub struct TaskDef { + pub index_attr: TaskIndexAttr, + pub condition_attr: TaskConditionAttr, + pub list_attr: TaskListAttr, + pub weight_attr: TaskWeightAttr, + pub normal_attrs: Vec, + pub item: ImplItemFn, + pub arg_names: Vec, +} + +impl syn::parse::Parse for TaskDef { + fn parse(input: ParseStream) -> Result { + let item = input.parse::()?; + // we only want to activate TaskAttrType parsing errors for tasks-related attributes, + // so we filter them here + let (task_attrs, normal_attrs) = partition_task_attrs(&item); + + let task_attrs: Vec = task_attrs + .into_iter() + .map(|attr| parse2(attr.to_token_stream())) + .collect::>()?; + + let Some(index_attr) = task_attrs + .iter() + .find(|attr| matches!(attr.meta, TaskAttrMeta::TaskIndex(_))) + .cloned() + else { + return Err(Error::new( + item.sig.ident.span(), + "missing `#[pallet::task_index(..)]` attribute", + )) + }; + + let Some(condition_attr) = task_attrs + .iter() + .find(|attr| matches!(attr.meta, TaskAttrMeta::TaskCondition(_))) + .cloned() + else { + return Err(Error::new( + item.sig.ident.span(), + "missing `#[pallet::task_condition(..)]` attribute", + )) + }; + + let Some(list_attr) = task_attrs + .iter() + .find(|attr| matches!(attr.meta, TaskAttrMeta::TaskList(_))) + .cloned() + else { + return Err(Error::new( + item.sig.ident.span(), + "missing `#[pallet::task_list(..)]` attribute", + )) + }; + + let Some(weight_attr) = task_attrs + .iter() + .find(|attr| matches!(attr.meta, TaskAttrMeta::TaskWeight(_))) + .cloned() + else { + return Err(Error::new( + item.sig.ident.span(), + "missing `#[pallet::task_weight(..)]` attribute", + )) + }; + + if let Some(duplicate) = task_attrs + .iter() + .filter(|attr| matches!(attr.meta, TaskAttrMeta::TaskCondition(_))) + .collect::>() + .get(1) + { + return Err(Error::new( + duplicate.span(), + "unexpected extra `#[pallet::task_condition(..)]` attribute", + )) + } + + if let Some(duplicate) = task_attrs + .iter() + .filter(|attr| matches!(attr.meta, TaskAttrMeta::TaskList(_))) + .collect::>() + .get(1) + { + return Err(Error::new( + duplicate.span(), + "unexpected extra `#[pallet::task_list(..)]` attribute", + )) + } + + if let Some(duplicate) = task_attrs + .iter() + .filter(|attr| matches!(attr.meta, TaskAttrMeta::TaskIndex(_))) + .collect::>() + .get(1) + { + return Err(Error::new( + duplicate.span(), + "unexpected extra `#[pallet::task_index(..)]` attribute", + )) + } + + let mut arg_names = vec![]; + for input in item.sig.inputs.iter() { + match input { + syn::FnArg::Typed(pat_type) => match &*pat_type.pat { + syn::Pat::Ident(ident) => arg_names.push(ident.ident.clone()), + _ => return Err(Error::new(input.span(), "unexpected pattern type")), + }, + _ => return Err(Error::new(input.span(), "unexpected function argument type")), + } + } + + let index_attr = index_attr.try_into().expect("we check the type above; QED"); + let condition_attr = condition_attr.try_into().expect("we check the type above; QED"); + let list_attr = list_attr.try_into().expect("we check the type above; QED"); + let weight_attr = weight_attr.try_into().expect("we check the type above; QED"); + + Ok(TaskDef { + index_attr, + condition_attr, + list_attr, + weight_attr, + normal_attrs, + item, + arg_names, + }) + } +} + +/// The contents of a [`TasksDef`]-related attribute. +#[derive(Parse, Debug, Clone)] +pub enum TaskAttrMeta { + #[peek(keywords::task_list, name = "#[pallet::task_list(..)]")] + TaskList(TaskListAttrMeta), + #[peek(keywords::task_index, name = "#[pallet::task_index(..)")] + TaskIndex(TaskIndexAttrMeta), + #[peek(keywords::task_condition, name = "#[pallet::task_condition(..)")] + TaskCondition(TaskConditionAttrMeta), + #[peek(keywords::task_weight, name = "#[pallet::task_weight(..)")] + TaskWeight(TaskWeightAttrMeta), +} + +/// The contents of a `#[pallet::task_list]` attribute. +#[derive(Parse, Debug, Clone)] +pub struct TaskListAttrMeta { + pub task_list: keywords::task_list, + #[paren] + _paren: Paren, + #[inside(_paren)] + pub expr: Expr, +} + +/// The contents of a `#[pallet::task_index]` attribute. +#[derive(Parse, Debug, Clone)] +pub struct TaskIndexAttrMeta { + pub task_index: keywords::task_index, + #[paren] + _paren: Paren, + #[inside(_paren)] + pub index: LitInt, +} + +/// The contents of a `#[pallet::task_condition]` attribute. +#[derive(Parse, Debug, Clone)] +pub struct TaskConditionAttrMeta { + pub task_condition: keywords::task_condition, + #[paren] + _paren: Paren, + #[inside(_paren)] + pub expr: Expr, +} + +/// The contents of a `#[pallet::task_weight]` attribute. +#[derive(Parse, Debug, Clone)] +pub struct TaskWeightAttrMeta { + pub task_weight: keywords::task_weight, + #[paren] + _paren: Paren, + #[inside(_paren)] + pub expr: Expr, +} + +/// The contents of a `#[pallet::task]` attribute. +#[derive(Parse, Debug, Clone)] +pub struct PalletTaskAttr { + pub pound: Pound, + #[bracket] + _bracket: Bracket, + #[inside(_bracket)] + pub pallet: keywords::pallet, + #[inside(_bracket)] + pub colons: PathSep, + #[inside(_bracket)] + pub meta: T, +} + +impl ToTokens for TaskListAttrMeta { + fn to_tokens(&self, tokens: &mut TokenStream2) { + let task_list = self.task_list; + let expr = &self.expr; + tokens.extend(quote!(#task_list(#expr))); + } +} + +impl ToTokens for TaskConditionAttrMeta { + fn to_tokens(&self, tokens: &mut TokenStream2) { + let task_condition = self.task_condition; + let expr = &self.expr; + tokens.extend(quote!(#task_condition(#expr))); + } +} + +impl ToTokens for TaskWeightAttrMeta { + fn to_tokens(&self, tokens: &mut TokenStream2) { + let task_weight = self.task_weight; + let expr = &self.expr; + tokens.extend(quote!(#task_weight(#expr))); + } +} + +impl ToTokens for TaskIndexAttrMeta { + fn to_tokens(&self, tokens: &mut TokenStream2) { + let task_index = self.task_index; + let index = &self.index; + tokens.extend(quote!(#task_index(#index))) + } +} + +impl ToTokens for TaskAttrMeta { + fn to_tokens(&self, tokens: &mut TokenStream2) { + match self { + TaskAttrMeta::TaskList(list) => tokens.extend(list.to_token_stream()), + TaskAttrMeta::TaskIndex(index) => tokens.extend(index.to_token_stream()), + TaskAttrMeta::TaskCondition(condition) => tokens.extend(condition.to_token_stream()), + TaskAttrMeta::TaskWeight(weight) => tokens.extend(weight.to_token_stream()), + } + } +} + +impl ToTokens for PalletTaskAttr { + fn to_tokens(&self, tokens: &mut TokenStream2) { + let pound = self.pound; + let pallet = self.pallet; + let colons = self.colons; + let meta = &self.meta; + tokens.extend(quote!(#pound[#pallet #colons #meta])); + } +} + +impl TryFrom> for TaskIndexAttr { + type Error = syn::Error; + + fn try_from(value: PalletTaskAttr) -> Result { + let pound = value.pound; + let pallet = value.pallet; + let colons = value.colons; + match value.meta { + TaskAttrMeta::TaskIndex(meta) => parse2(quote!(#pound[#pallet #colons #meta])), + _ => + return Err(Error::new( + value.span(), + format!("`{:?}` cannot be converted to a `TaskIndexAttr`", value.meta), + )), + } + } +} + +impl TryFrom> for TaskConditionAttr { + type Error = syn::Error; + + fn try_from(value: PalletTaskAttr) -> Result { + let pound = value.pound; + let pallet = value.pallet; + let colons = value.colons; + match value.meta { + TaskAttrMeta::TaskCondition(meta) => parse2(quote!(#pound[#pallet #colons #meta])), + _ => + return Err(Error::new( + value.span(), + format!("`{:?}` cannot be converted to a `TaskConditionAttr`", value.meta), + )), + } + } +} + +impl TryFrom> for TaskWeightAttr { + type Error = syn::Error; + + fn try_from(value: PalletTaskAttr) -> Result { + let pound = value.pound; + let pallet = value.pallet; + let colons = value.colons; + match value.meta { + TaskAttrMeta::TaskWeight(meta) => parse2(quote!(#pound[#pallet #colons #meta])), + _ => + return Err(Error::new( + value.span(), + format!("`{:?}` cannot be converted to a `TaskWeightAttr`", value.meta), + )), + } + } +} + +impl TryFrom> for TaskListAttr { + type Error = syn::Error; + + fn try_from(value: PalletTaskAttr) -> Result { + let pound = value.pound; + let pallet = value.pallet; + let colons = value.colons; + match value.meta { + TaskAttrMeta::TaskList(meta) => parse2(quote!(#pound[#pallet #colons #meta])), + _ => + return Err(Error::new( + value.span(), + format!("`{:?}` cannot be converted to a `TaskListAttr`", value.meta), + )), + } + } +} + +fn extract_pallet_attr(item_enum: &mut ItemEnum) -> Result> { + let mut duplicate = None; + let mut attr = None; + item_enum.attrs = item_enum + .attrs + .iter() + .filter(|found_attr| { + let segs = found_attr + .path() + .segments + .iter() + .map(|seg| seg.ident.clone()) + .collect::>(); + let (Some(seg1), Some(_), None) = (segs.get(0), segs.get(1), segs.get(2)) else { + return true + }; + if seg1 != "pallet" { + return true + } + if attr.is_some() { + duplicate = Some(found_attr.span()); + } + attr = Some(found_attr.to_token_stream()); + false + }) + .cloned() + .collect(); + if let Some(span) = duplicate { + return Err(Error::new(span, "only one `#[pallet::_]` attribute is supported on this item")) + } + Ok(attr) +} + +fn partition_tasks_attrs(item_impl: &ItemImpl) -> (Vec, Vec) { + item_impl.attrs.clone().into_iter().partition(|attr| { + let mut path_segs = attr.path().segments.iter(); + let (Some(prefix), Some(suffix), None) = + (path_segs.next(), path_segs.next(), path_segs.next()) + else { + return false + }; + prefix.ident == "pallet" && suffix.ident == "tasks_experimental" + }) +} + +fn partition_task_attrs(item: &ImplItemFn) -> (Vec, Vec) { + item.attrs.clone().into_iter().partition(|attr| { + let mut path_segs = attr.path().segments.iter(); + let (Some(prefix), Some(suffix)) = (path_segs.next(), path_segs.next()) else { + return false + }; + // N.B: the `PartialEq` impl between `Ident` and `&str` is more efficient than + // parsing and makes no stack or heap allocations + prefix.ident == "pallet" && + (suffix.ident == "tasks_experimental" || + suffix.ident == "task_list" || + suffix.ident == "task_condition" || + suffix.ident == "task_weight" || + suffix.ident == "task_index") + }) +} + +#[test] +fn test_parse_task_list_() { + parse2::(quote!(#[pallet::task_list(Something::iter())])).unwrap(); + parse2::(quote!(#[pallet::task_list(Numbers::::iter_keys())])).unwrap(); + parse2::(quote!(#[pallet::task_list(iter())])).unwrap(); + assert_parse_error_matches!( + parse2::(quote!(#[pallet::task_list()])), + "expected an expression" + ); + assert_parse_error_matches!( + parse2::(quote!(#[pallet::task_list])), + "expected parentheses" + ); +} + +#[test] +fn test_parse_task_index() { + parse2::(quote!(#[pallet::task_index(3)])).unwrap(); + parse2::(quote!(#[pallet::task_index(0)])).unwrap(); + parse2::(quote!(#[pallet::task_index(17)])).unwrap(); + assert_parse_error_matches!( + parse2::(quote!(#[pallet::task_index])), + "expected parentheses" + ); + assert_parse_error_matches!( + parse2::(quote!(#[pallet::task_index("hey")])), + "expected integer literal" + ); + assert_parse_error_matches!( + parse2::(quote!(#[pallet::task_index(0.3)])), + "expected integer literal" + ); +} + +#[test] +fn test_parse_task_condition() { + parse2::(quote!(#[pallet::task_condition(|x| x.is_some())])).unwrap(); + parse2::(quote!(#[pallet::task_condition(|_x| some_expr())])).unwrap(); + parse2::(quote!(#[pallet::task_condition(|| some_expr())])).unwrap(); + parse2::(quote!(#[pallet::task_condition(some_expr())])).unwrap(); +} + +#[test] +fn test_parse_tasks_attr() { + parse2::(quote!(#[pallet::tasks_experimental])).unwrap(); + assert_parse_error_matches!( + parse2::(quote!(#[pallet::taskss])), + "expected `tasks_experimental`" + ); + assert_parse_error_matches!( + parse2::(quote!(#[pallet::tasks_])), + "expected `tasks_experimental`" + ); + assert_parse_error_matches!( + parse2::(quote!(#[pal::tasks])), + "expected `pallet`" + ); + assert_parse_error_matches!( + parse2::(quote!(#[pallet::tasks_experimental()])), + "unexpected token" + ); +} + +#[test] +fn test_parse_tasks_def_basic() { + simulate_manifest_dir("../../examples/basic", || { + let parsed = parse2::(quote! { + #[pallet::tasks_experimental] + impl, I: 'static> Pallet { + /// Add a pair of numbers into the totals and remove them. + #[pallet::task_list(Numbers::::iter_keys())] + #[pallet::task_condition(|i| Numbers::::contains_key(i))] + #[pallet::task_index(0)] + #[pallet::task_weight(0)] + pub fn add_number_into_total(i: u32) -> DispatchResult { + let v = Numbers::::take(i).ok_or(Error::::NotFound)?; + Total::::mutate(|(total_keys, total_values)| { + *total_keys += i; + *total_values += v; + }); + Ok(()) + } + } + }) + .unwrap(); + assert_eq!(parsed.tasks.len(), 1); + }); +} + +#[test] +fn test_parse_tasks_def_basic_increment_decrement() { + simulate_manifest_dir("../../examples/basic", || { + let parsed = parse2::(quote! { + #[pallet::tasks_experimental] + impl, I: 'static> Pallet { + /// Get the value and check if it can be incremented + #[pallet::task_index(0)] + #[pallet::task_condition(|| { + let value = Value::::get().unwrap(); + value < 255 + })] + #[pallet::task_list(Vec::>::new())] + #[pallet::task_weight(0)] + fn increment() -> DispatchResult { + let value = Value::::get().unwrap_or_default(); + if value >= 255 { + Err(Error::::ValueOverflow.into()) + } else { + let new_val = value.checked_add(1).ok_or(Error::::ValueOverflow)?; + Value::::put(new_val); + Pallet::::deposit_event(Event::Incremented { new_val }); + Ok(()) + } + } + + // Get the value and check if it can be decremented + #[pallet::task_index(1)] + #[pallet::task_condition(|| { + let value = Value::::get().unwrap(); + value > 0 + })] + #[pallet::task_list(Vec::>::new())] + #[pallet::task_weight(0)] + fn decrement() -> DispatchResult { + let value = Value::::get().unwrap_or_default(); + if value == 0 { + Err(Error::::ValueUnderflow.into()) + } else { + let new_val = value.checked_sub(1).ok_or(Error::::ValueUnderflow)?; + Value::::put(new_val); + Pallet::::deposit_event(Event::Decremented { new_val }); + Ok(()) + } + } + } + }) + .unwrap(); + assert_eq!(parsed.tasks.len(), 2); + }); +} + +#[test] +fn test_parse_tasks_def_duplicate_index() { + simulate_manifest_dir("../../examples/basic", || { + assert_parse_error_matches!( + parse2::(quote! { + #[pallet::tasks_experimental] + impl, I: 'static> Pallet { + #[pallet::task_list(Something::iter())] + #[pallet::task_condition(|i| i % 2 == 0)] + #[pallet::task_index(0)] + #[pallet::task_weight(0)] + pub fn foo(i: u32) -> DispatchResult { + Ok(()) + } + + #[pallet::task_list(Numbers::::iter_keys())] + #[pallet::task_condition(|i| Numbers::::contains_key(i))] + #[pallet::task_index(0)] + #[pallet::task_weight(0)] + pub fn bar(i: u32) -> DispatchResult { + Ok(()) + } + } + }), + "duplicate task index `0`" + ); + }); +} + +#[test] +fn test_parse_tasks_def_missing_task_list() { + simulate_manifest_dir("../../examples/basic", || { + assert_parse_error_matches!( + parse2::(quote! { + #[pallet::tasks_experimental] + impl, I: 'static> Pallet { + #[pallet::task_condition(|i| i % 2 == 0)] + #[pallet::task_index(0)] + pub fn foo(i: u32) -> DispatchResult { + Ok(()) + } + } + }), + r"missing `#\[pallet::task_list\(\.\.\)\]`" + ); + }); +} + +#[test] +fn test_parse_tasks_def_missing_task_condition() { + simulate_manifest_dir("../../examples/basic", || { + assert_parse_error_matches!( + parse2::(quote! { + #[pallet::tasks_experimental] + impl, I: 'static> Pallet { + #[pallet::task_list(Something::iter())] + #[pallet::task_index(0)] + pub fn foo(i: u32) -> DispatchResult { + Ok(()) + } + } + }), + r"missing `#\[pallet::task_condition\(\.\.\)\]`" + ); + }); +} + +#[test] +fn test_parse_tasks_def_missing_task_index() { + simulate_manifest_dir("../../examples/basic", || { + assert_parse_error_matches!( + parse2::(quote! { + #[pallet::tasks_experimental] + impl, I: 'static> Pallet { + #[pallet::task_condition(|i| i % 2 == 0)] + #[pallet::task_list(Something::iter())] + pub fn foo(i: u32) -> DispatchResult { + Ok(()) + } + } + }), + r"missing `#\[pallet::task_index\(\.\.\)\]`" + ); + }); +} + +#[test] +fn test_parse_tasks_def_missing_task_weight() { + simulate_manifest_dir("../../examples/basic", || { + assert_parse_error_matches!( + parse2::(quote! { + #[pallet::tasks_experimental] + impl, I: 'static> Pallet { + #[pallet::task_condition(|i| i % 2 == 0)] + #[pallet::task_list(Something::iter())] + #[pallet::task_index(0)] + pub fn foo(i: u32) -> DispatchResult { + Ok(()) + } + } + }), + r"missing `#\[pallet::task_weight\(\.\.\)\]`" + ); + }); +} + +#[test] +fn test_parse_tasks_def_unexpected_extra_task_list_attr() { + simulate_manifest_dir("../../examples/basic", || { + assert_parse_error_matches!( + parse2::(quote! { + #[pallet::tasks_experimental] + impl, I: 'static> Pallet { + #[pallet::task_condition(|i| i % 2 == 0)] + #[pallet::task_index(0)] + #[pallet::task_weight(0)] + #[pallet::task_list(Something::iter())] + #[pallet::task_list(SomethingElse::iter())] + pub fn foo(i: u32) -> DispatchResult { + Ok(()) + } + } + }), + r"unexpected extra `#\[pallet::task_list\(\.\.\)\]`" + ); + }); +} + +#[test] +fn test_parse_tasks_def_unexpected_extra_task_condition_attr() { + simulate_manifest_dir("../../examples/basic", || { + assert_parse_error_matches!( + parse2::(quote! { + #[pallet::tasks_experimental] + impl, I: 'static> Pallet { + #[pallet::task_condition(|i| i % 2 == 0)] + #[pallet::task_condition(|i| i % 4 == 0)] + #[pallet::task_index(0)] + #[pallet::task_list(Something::iter())] + #[pallet::task_weight(0)] + pub fn foo(i: u32) -> DispatchResult { + Ok(()) + } + } + }), + r"unexpected extra `#\[pallet::task_condition\(\.\.\)\]`" + ); + }); +} + +#[test] +fn test_parse_tasks_def_unexpected_extra_task_index_attr() { + simulate_manifest_dir("../../examples/basic", || { + assert_parse_error_matches!( + parse2::(quote! { + #[pallet::tasks_experimental] + impl, I: 'static> Pallet { + #[pallet::task_condition(|i| i % 2 == 0)] + #[pallet::task_index(0)] + #[pallet::task_index(0)] + #[pallet::task_list(Something::iter())] + #[pallet::task_weight(0)] + pub fn foo(i: u32) -> DispatchResult { + Ok(()) + } + } + }), + r"unexpected extra `#\[pallet::task_index\(\.\.\)\]`" + ); + }); +} + +#[test] +fn test_parse_tasks_def_extra_tasks_attribute() { + simulate_manifest_dir("../../examples/basic", || { + assert_parse_error_matches!( + parse2::(quote! { + #[pallet::tasks_experimental] + #[pallet::tasks_experimental] + impl, I: 'static> Pallet {} + }), + r"unexpected extra `#\[pallet::tasks_experimental\]` attribute" + ); + }); +} + +#[test] +fn test_parse_task_enum_def_basic() { + simulate_manifest_dir("../../examples/basic", || { + parse2::(quote! { + #[pallet::task_enum] + pub enum Task { + Increment, + Decrement, + } + }) + .unwrap(); + }); +} + +#[test] +fn test_parse_task_enum_def_non_task_name() { + simulate_manifest_dir("../../examples/basic", || { + parse2::(quote! { + #[pallet::task_enum] + pub enum Something { + Foo + } + }) + .unwrap(); + }); +} + +#[test] +fn test_parse_task_enum_def_missing_attr_allowed() { + simulate_manifest_dir("../../examples/basic", || { + parse2::(quote! { + pub enum Task { + Increment, + Decrement, + } + }) + .unwrap(); + }); +} + +#[test] +fn test_parse_task_enum_def_missing_attr_alternate_name_allowed() { + simulate_manifest_dir("../../examples/basic", || { + parse2::(quote! { + pub enum Foo { + Red, + } + }) + .unwrap(); + }); +} + +#[test] +fn test_parse_task_enum_def_wrong_attr() { + simulate_manifest_dir("../../examples/basic", || { + assert_parse_error_matches!( + parse2::(quote! { + #[pallet::something] + pub enum Task { + Increment, + Decrement, + } + }), + "expected `task_enum`" + ); + }); +} + +#[test] +fn test_parse_task_enum_def_wrong_item() { + simulate_manifest_dir("../../examples/basic", || { + assert_parse_error_matches!( + parse2::(quote! { + #[pallet::task_enum] + pub struct Something; + }), + "expected `enum`" + ); + }); +} diff --git a/substrate/frame/support/procedural/src/pallet/parse/tests/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/tests/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..a3661f3076d94511323e466ffe2ba6999b64f717 --- /dev/null +++ b/substrate/frame/support/procedural/src/pallet/parse/tests/mod.rs @@ -0,0 +1,264 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{panic, sync::Mutex}; +use syn::parse_quote; + +#[doc(hidden)] +pub mod __private { + pub use regex; +} + +/// Allows you to assert that the input expression resolves to an error whose string +/// representation matches the specified regex literal. +/// +/// ## Example: +/// +/// ``` +/// use super::tasks::*; +/// +/// assert_parse_error_matches!( +/// parse2::(quote! { +/// #[pallet::task_enum] +/// pub struct Something; +/// }), +/// "expected `enum`" +/// ); +/// ``` +/// +/// More complex regular expressions are also possible (anything that could pass as a regex for +/// use with the [`regex`] crate.): +/// +/// ```ignore +/// assert_parse_error_matches!( +/// parse2::(quote! { +/// #[pallet::tasks_experimental] +/// impl, I: 'static> Pallet { +/// #[pallet::task_condition(|i| i % 2 == 0)] +/// #[pallet::task_index(0)] +/// pub fn foo(i: u32) -> DispatchResult { +/// Ok(()) +/// } +/// } +/// }), +/// r"missing `#\[pallet::task_list\(\.\.\)\]`" +/// ); +/// ``` +/// +/// Although this is primarily intended to be used with parsing errors, this macro is general +/// enough that it will work with any error with a reasonable [`core::fmt::Display`] impl. +#[macro_export] +macro_rules! assert_parse_error_matches { + ($expr:expr, $reg:literal) => { + match $expr { + Ok(_) => panic!("Expected an `Error(..)`, but got Ok(..)"), + Err(e) => { + let error_message = e.to_string(); + let re = $crate::pallet::parse::tests::__private::regex::Regex::new($reg) + .expect("Invalid regex pattern"); + assert!( + re.is_match(&error_message), + "Error message \"{}\" does not match the pattern \"{}\"", + error_message, + $reg + ); + }, + } + }; +} + +/// Allows you to assert that an entire pallet parses successfully. A custom syntax is used for +/// specifying arguments so please pay attention to the docs below. +/// +/// The general syntax is: +/// +/// ```ignore +/// assert_pallet_parses! { +/// #[manifest_dir("../../examples/basic")] +/// #[frame_support::pallet] +/// pub mod pallet { +/// #[pallet::config] +/// pub trait Config: frame_system::Config {} +/// +/// #[pallet::pallet] +/// pub struct Pallet(_); +/// } +/// }; +/// ``` +/// +/// The `#[manifest_dir(..)]` attribute _must_ be specified as the _first_ attribute on the +/// pallet module, and should reference the relative (to your current directory) path of a +/// directory containing containing the `Cargo.toml` of a valid pallet. Typically you will only +/// ever need to use the `examples/basic` pallet, but sometimes it might be advantageous to +/// specify a different one that has additional dependencies. +/// +/// The reason this must be specified is that our underlying parsing of pallets depends on +/// reaching out into the file system to look for particular `Cargo.toml` dependencies via the +/// [`generate_access_from_frame_or_crate`] method, so to simulate this properly in a proc +/// macro crate, we need to temporarily convince this function that we are running from the +/// directory of a valid pallet. +#[macro_export] +macro_rules! assert_pallet_parses { + ( + #[manifest_dir($manifest_dir:literal)] + $($tokens:tt)* + ) => { + { + let mut pallet: Option<$crate::pallet::parse::Def> = None; + $crate::pallet::parse::tests::simulate_manifest_dir($manifest_dir, core::panic::AssertUnwindSafe(|| { + pallet = Some($crate::pallet::parse::Def::try_from(syn::parse_quote! { + $($tokens)* + }, false).unwrap()); + })); + pallet.unwrap() + } + } +} + +/// Similar to [`assert_pallet_parses`], except this instead expects the pallet not to parse, +/// and allows you to specify a regex matching the expected parse error. +/// +/// This is identical syntactically to [`assert_pallet_parses`] in every way except there is a +/// second attribute that must be specified immediately after `#[manifest_dir(..)]` which is +/// `#[error_regex(..)]` which should contain a string/regex literal designed to match what you +/// consider to be the correct parsing error we should see when we try to parse this particular +/// pallet. +/// +/// ## Example: +/// +/// ``` +/// assert_pallet_parse_error! { +/// #[manifest_dir("../../examples/basic")] +/// #[error_regex("Missing `\\#\\[pallet::pallet\\]`")] +/// #[frame_support::pallet] +/// pub mod pallet { +/// #[pallet::config] +/// pub trait Config: frame_system::Config {} +/// } +/// } +/// ``` +#[macro_export] +macro_rules! assert_pallet_parse_error { + ( + #[manifest_dir($manifest_dir:literal)] + #[error_regex($reg:literal)] + $($tokens:tt)* + ) => { + $crate::pallet::parse::tests::simulate_manifest_dir($manifest_dir, || { + $crate::assert_parse_error_matches!( + $crate::pallet::parse::Def::try_from( + parse_quote! { + $($tokens)* + }, + false + ), + $reg + ); + }); + } +} + +/// Safely runs the specified `closure` while simulating an alternative `CARGO_MANIFEST_DIR`, +/// restoring `CARGO_MANIFEST_DIR` to its original value upon completion regardless of whether +/// the closure panics. +/// +/// This is useful in tests of `Def::try_from` and other pallet-related methods that internally +/// make use of [`generate_access_from_frame_or_crate`], which is sensitive to entries in the +/// "current" `Cargo.toml` files. +/// +/// This function uses a [`Mutex`] to avoid a race condition created when multiple tests try to +/// modify and then restore the `CARGO_MANIFEST_DIR` ENV var in an overlapping way. +pub fn simulate_manifest_dir, F: FnOnce() + std::panic::UnwindSafe>( + path: P, + closure: F, +) { + use std::{env::*, path::*}; + + /// Ensures that only one thread can modify/restore the `CARGO_MANIFEST_DIR` ENV var at a time, + /// avoiding a race condition because `cargo test` runs tests in parallel. + /// + /// Although this forces all tests that use [`simulate_manifest_dir`] to run sequentially with + /// respect to each other, this is still several orders of magnitude faster than using UI + /// tests, even if they are run in parallel. + static MANIFEST_DIR_LOCK: Mutex<()> = Mutex::new(()); + + // avoid race condition when swapping out `CARGO_MANIFEST_DIR` + let guard = MANIFEST_DIR_LOCK.lock().unwrap(); + + // obtain the current/original `CARGO_MANIFEST_DIR` + let orig = PathBuf::from( + var("CARGO_MANIFEST_DIR").expect("failed to read ENV var `CARGO_MANIFEST_DIR`"), + ); + + // set `CARGO_MANIFEST_DIR` to the provided path, relative to current working dir + set_var("CARGO_MANIFEST_DIR", orig.join(path.as_ref())); + + // safely run closure catching any panics + let result = panic::catch_unwind(closure); + + // restore original `CARGO_MANIFEST_DIR` before unwinding + set_var("CARGO_MANIFEST_DIR", &orig); + + // unlock the mutex so we don't poison it if there is a panic + drop(guard); + + // unwind any panics originally encountered when running closure + result.unwrap(); +} + +mod tasks; + +#[test] +fn test_parse_minimal_pallet() { + assert_pallet_parses! { + #[manifest_dir("../../examples/basic")] + #[frame_support::pallet] + pub mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + } + }; +} + +#[test] +fn test_parse_pallet_missing_pallet() { + assert_pallet_parse_error! { + #[manifest_dir("../../examples/basic")] + #[error_regex("Missing `\\#\\[pallet::pallet\\]`")] + #[frame_support::pallet] + pub mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + } + } +} + +#[test] +fn test_parse_pallet_missing_config() { + assert_pallet_parse_error! { + #[manifest_dir("../../examples/basic")] + #[error_regex("Missing `\\#\\[pallet::config\\]`")] + #[frame_support::pallet] + pub mod pallet { + #[pallet::pallet] + pub struct Pallet(_); + } + } +} diff --git a/substrate/frame/support/procedural/src/pallet/parse/tests/tasks.rs b/substrate/frame/support/procedural/src/pallet/parse/tests/tasks.rs new file mode 100644 index 0000000000000000000000000000000000000000..9f143628404734d5510de9e5997deec2a63644ea --- /dev/null +++ b/substrate/frame/support/procedural/src/pallet/parse/tests/tasks.rs @@ -0,0 +1,240 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::parse_quote; + +#[test] +fn test_parse_pallet_with_task_enum_missing_impl() { + assert_pallet_parse_error! { + #[manifest_dir("../../examples/basic")] + #[error_regex("Missing `\\#\\[pallet::tasks_experimental\\]` impl")] + #[frame_support::pallet] + pub mod pallet { + #[pallet::task_enum] + pub enum Task { + Something, + } + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + } + } +} + +#[test] +fn test_parse_pallet_with_task_enum_wrong_attribute() { + assert_pallet_parse_error! { + #[manifest_dir("../../examples/basic")] + #[error_regex("expected one of")] + #[frame_support::pallet] + pub mod pallet { + #[pallet::wrong_attribute] + pub enum Task { + Something, + } + + #[pallet::task_list] + impl frame_support::traits::Task for Task + where + T: TypeInfo, + {} + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + } + } +} + +#[test] +fn test_parse_pallet_missing_task_enum() { + assert_pallet_parses! { + #[manifest_dir("../../examples/basic")] + #[frame_support::pallet] + pub mod pallet { + #[pallet::tasks_experimental] + #[cfg(test)] // aha, this means it's being eaten + impl frame_support::traits::Task for Task + where + T: TypeInfo, + {} + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + } + }; +} + +#[test] +fn test_parse_pallet_task_list_in_wrong_place() { + assert_pallet_parse_error! { + #[manifest_dir("../../examples/basic")] + #[error_regex("can only be used on items within an `impl` statement.")] + #[frame_support::pallet] + pub mod pallet { + pub enum MyCustomTaskEnum { + Something, + } + + #[pallet::task_list] + pub fn something() { + println!("hey"); + } + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + } + } +} + +#[test] +fn test_parse_pallet_manual_tasks_impl_without_manual_tasks_enum() { + assert_pallet_parse_error! { + #[manifest_dir("../../examples/basic")] + #[error_regex(".*attribute must be attached to your.*")] + #[frame_support::pallet] + pub mod pallet { + + impl frame_support::traits::Task for Task + where + T: TypeInfo, + { + type Enumeration = sp_std::vec::IntoIter>; + + fn iter() -> Self::Enumeration { + sp_std::vec![Task::increment, Task::decrement].into_iter() + } + } + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + } + } +} + +#[test] +fn test_parse_pallet_manual_task_enum_non_manual_impl() { + assert_pallet_parses! { + #[manifest_dir("../../examples/basic")] + #[frame_support::pallet] + pub mod pallet { + pub enum MyCustomTaskEnum { + Something, + } + + #[pallet::tasks_experimental] + impl frame_support::traits::Task for MyCustomTaskEnum + where + T: TypeInfo, + {} + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + } + }; +} + +#[test] +fn test_parse_pallet_non_manual_task_enum_manual_impl() { + assert_pallet_parses! { + #[manifest_dir("../../examples/basic")] + #[frame_support::pallet] + pub mod pallet { + #[pallet::task_enum] + pub enum MyCustomTaskEnum { + Something, + } + + impl frame_support::traits::Task for MyCustomTaskEnum + where + T: TypeInfo, + {} + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + } + }; +} + +#[test] +fn test_parse_pallet_manual_task_enum_manual_impl() { + assert_pallet_parses! { + #[manifest_dir("../../examples/basic")] + #[frame_support::pallet] + pub mod pallet { + pub enum MyCustomTaskEnum { + Something, + } + + impl frame_support::traits::Task for MyCustomTaskEnum + where + T: TypeInfo, + {} + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + } + }; +} + +#[test] +fn test_parse_pallet_manual_task_enum_mismatch_ident() { + assert_pallet_parses! { + #[manifest_dir("../../examples/basic")] + #[frame_support::pallet] + pub mod pallet { + pub enum WrongIdent { + Something, + } + + #[pallet::tasks_experimental] + impl frame_support::traits::Task for MyCustomTaskEnum + where + T: TypeInfo, + {} + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + } + }; +} diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml index fd42e18180d396bb34050b5acd310ab5888fc6d7..a5d0f4cc17a8c13ec509e0cac338069dee40bd65 100644 --- a/substrate/frame/support/procedural/tools/Cargo.toml +++ b/substrate/frame/support/procedural/tools/Cargo.toml @@ -8,12 +8,15 @@ homepage = "https://substrate.io" repository.workspace = true description = "Proc macro helpers for procedural macros" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.1" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.38", features = ["full", "visit", "extra-traits"] } +syn = { version = "2.0.41", features = ["extra-traits", "full", "visit"] } frame-support-procedural-tools-derive = { path = "derive" } diff --git a/substrate/frame/support/procedural/tools/derive/Cargo.toml b/substrate/frame/support/procedural/tools/derive/Cargo.toml index 06f8e0f3d537a956f26dd873929f87bec86ce9e0..0ccb02a3329eb27dc17d97615ff1b8bc0bc9b958 100644 --- a/substrate/frame/support/procedural/tools/derive/Cargo.toml +++ b/substrate/frame/support/procedural/tools/derive/Cargo.toml @@ -8,6 +8,9 @@ homepage = "https://substrate.io" repository.workspace = true description = "Use to derive parsing for parsing struct." +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,4 +20,4 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.38", features = ["proc-macro", "full", "extra-traits", "parsing"] } +syn = { version = "2.0.41", features = ["extra-traits", "full", "parsing", "proc-macro"] } diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index e6a090ebcae8a0526dab10896adaa968a23f53c0..4a313551aca634b88bed7d0f989943f7075a5050 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -36,7 +36,8 @@ use sp_weights::Weight; /// returned from a dispatch. pub type DispatchResultWithPostInfo = sp_runtime::DispatchResultWithInfo; -/// Unaugmented version of `DispatchResultWithPostInfo` that can be returned from +#[docify::export] +/// Un-augmented version of `DispatchResultWithPostInfo` that can be returned from /// dispatchable functions and is automatically converted to the augmented type. Should be /// used whenever the `PostDispatchInfo` does not need to be overwritten. As this should /// be the common case it is the implicit return type when none is specified. @@ -54,6 +55,20 @@ pub trait Callable { // https://github.com/rust-lang/rust/issues/51331 pub type CallableCallFor = >::RuntimeCall; +/// Means to checks if the dispatchable is feeless. +/// +/// This is automatically implemented for all dispatchables during pallet expansion. +/// If a call is marked by [`#[pallet::feeless_if]`](`macro@frame_support_procedural::feeless_if`) +/// attribute, the corresponding closure is checked. +pub trait CheckIfFeeless { + /// The Origin type of the runtime. + type Origin; + + /// Checks if the dispatchable satisfies the feeless condition as defined by + /// [`#[pallet::feeless_if]`](`macro@frame_support_procedural::feeless_if`) + fn is_feeless(&self, origin: &Self::Origin) -> bool; +} + /// Origin for the System pallet. #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] pub enum RawOrigin { @@ -649,7 +664,7 @@ mod weight_tests { use sp_runtime::{generic, traits::BlakeTwo256}; use sp_weights::RuntimeDbWeight; - pub use self::frame_system::{Call, Config, Pallet}; + pub use self::frame_system::{Call, Config}; fn from_actual_ref_time(ref_time: Option) -> PostDispatchInfo { PostDispatchInfo { @@ -680,6 +695,7 @@ mod weight_tests { type BaseCallFilter: crate::traits::Contains; type RuntimeOrigin; type RuntimeCall; + type RuntimeTask; type PalletInfo: crate::traits::PalletInfo; type DbWeight: Get; } @@ -776,6 +792,7 @@ mod weight_tests { type BaseCallFilter = crate::traits::Everything; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; type DbWeight = DbWeight; type PalletInfo = PalletInfo; } diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index a01f3a01593ad0fac8dd136b8b8eacb61caab5d3..af1f99be1031b0aa16ba60c1047d58e2c91274a3 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -47,6 +47,8 @@ pub mod __private { pub use sp_core::{OpaqueMetadata, Void}; pub use sp_core_hashing_proc_macro; pub use sp_inherents; + #[cfg(feature = "std")] + pub use sp_io::TestExternalities; pub use sp_io::{self, hashing, storage::root as storage_root}; pub use sp_metadata_ir as metadata_ir; #[cfg(feature = "std")] @@ -847,7 +849,7 @@ pub mod pallet_prelude { }, traits::{ BuildGenesisConfig, ConstU32, EnsureOrigin, Get, GetDefault, GetStorageVersion, Hooks, - IsType, PalletInfoAccess, StorageInfoTrait, StorageVersion, TypedGet, + IsType, PalletInfoAccess, StorageInfoTrait, StorageVersion, Task, TypedGet, }, Blake2_128, Blake2_128Concat, Blake2_256, CloneNoBound, DebugNoBound, EqNoBound, Identity, PartialEqNoBound, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, @@ -2226,12 +2228,159 @@ pub use frame_support_procedural::pallet; /// Contains macro stubs for all of the pallet:: macros pub mod pallet_macros { pub use frame_support_procedural::{ - call_index, compact, composite_enum, config, disable_frame_system_supertrait_check, error, - event, extra_constants, generate_deposit, generate_store, getter, hooks, import_section, - inherent, no_default, no_default_bounds, origin, pallet_section, storage_prefix, - storage_version, type_value, unbounded, validate_unsigned, weight, whitelist_storage, + composite_enum, config, disable_frame_system_supertrait_check, error, event, + extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, + import_section, inherent, no_default, no_default_bounds, origin, pallet_section, + storage_prefix, storage_version, type_value, unbounded, validate_unsigned, weight, + whitelist_storage, }; + /// Allows a pallet to declare a set of functions as a *dispatchable extrinsic*. In + /// slightly simplified terms, this macro declares the set of "transactions" of a pallet. + /// + /// > The exact definition of **extrinsic** can be found in + /// > [`sp_runtime::generic::UncheckedExtrinsic`]. + /// + /// A **dispatchable** is a common term in FRAME, referring to process of constructing a + /// function, and dispatching it with the correct inputs. This is commonly used with + /// extrinsics, for example "an extrinsic has been dispatched". See + /// [`sp_runtime::traits::Dispatchable`] and [`crate::traits::UnfilteredDispatchable`]. + /// + /// ## Call Enum + /// + /// The macro is called `call` (rather than `#[pallet::extrinsics]`) because of the + /// generation of a `enum Call`. This enum contains only the encoding of the function + /// arguments of the dispatchable, alongside the information needed to route it to the + /// correct function. + /// + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// pub mod custom_pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// # use frame_support::traits::BuildGenesisConfig; + /// #[pallet::call] + /// impl Pallet { + /// pub fn some_dispatchable(_origin: OriginFor, _input: u32) -> DispatchResult { + /// Ok(()) + /// } + /// pub fn other(_origin: OriginFor, _input: u64) -> DispatchResult { + /// Ok(()) + /// } + /// } + /// + /// // generates something like: + /// // enum Call { + /// // some_dispatchable { input: u32 } + /// // other { input: u64 } + /// // } + /// } + /// + /// fn main() { + /// # use frame_support::{derive_impl, construct_runtime}; + /// # use frame_support::__private::codec::Encode; + /// # use frame_support::__private::TestExternalities; + /// # use frame_support::traits::UnfilteredDispatchable; + /// # impl custom_pallet::Config for Runtime {} + /// # #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + /// # impl frame_system::Config for Runtime { + /// # type Block = frame_system::mocking::MockBlock; + /// # } + /// construct_runtime! { + /// pub struct Runtime { + /// System: frame_system, + /// Custom: custom_pallet + /// } + /// } + /// + /// # TestExternalities::new_empty().execute_with(|| { + /// let origin: RuntimeOrigin = frame_system::RawOrigin::Signed(10).into(); + /// // calling into a dispatchable from within the runtime is simply a function call. + /// let _ = custom_pallet::Pallet::::some_dispatchable(origin.clone(), 10); + /// + /// // calling into a dispatchable from the outer world involves constructing the bytes of + /// let call = custom_pallet::Call::::some_dispatchable { input: 10 }; + /// let _ = call.clone().dispatch_bypass_filter(origin); + /// + /// // the routing of a dispatchable is simply done through encoding of the `Call` enum, + /// // which is the index of the variant, followed by the arguments. + /// assert_eq!(call.encode(), vec![0u8, 10, 0, 0, 0]); + /// + /// // notice how in the encoding of the second function, the first byte is different and + /// // referring to the second variant of `enum Call`. + /// let call = custom_pallet::Call::::other { input: 10 }; + /// assert_eq!(call.encode(), vec![1u8, 10, 0, 0, 0, 0, 0, 0, 0]); + /// # }); + /// } + /// ``` + /// + /// Further properties of dispatchable functions are as follows: + /// + /// - Unless if annotated by `dev_mode`, it must contain [`weight`] to denote the + /// pre-dispatch weight consumed. + /// - The dispatchable must declare its index via [`call_index`], which can override the + /// position of a function in `enum Call`. + /// - The first argument is always an `OriginFor` (or `T::RuntimeOrigin`). + /// - The return type is always [`crate::dispatch::DispatchResult`] (or + /// [`crate::dispatch::DispatchResultWithPostInfo`]). + /// + /// **WARNING**: modifying dispatchables, changing their order (i.e. using [`call_index`]), + /// removing some, etc., must be done with care. This will change the encoding of the , and + /// the call can be stored on-chain (e.g. in `pallet-scheduler`). Thus, migration might be + /// needed. This is why the use of `call_index` is mandatory by default in FRAME. + /// + /// ## Default Behavior + /// + /// If no `#[pallet::call]` exists, then a default implementation corresponding to the + /// following code is automatically generated: + /// + /// ```ignore + /// #[pallet::call] + /// impl Pallet {} + /// ``` + pub use frame_support_procedural::call; + + /// Enforce the index of a variant in the generated `enum Call`. See [`call`] for more + /// information. + /// + /// All call indexes start from 0, until it encounters a dispatchable function with a + /// defined call index. The dispatchable function that lexically follows the function with + /// a defined call index will have that call index, but incremented by 1, e.g. if there are + /// 3 dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn + /// bar` has a call index of 10, then `fn qux` will have an index of 11, instead of 1. + pub use frame_support_procedural::call_index; + + /// Declares the arguments of a [`call`] function to be encoded using + /// [`codec::Compact`]. This will results in smaller extrinsic encoding. + /// + /// A common example of `compact` is for numeric values that are often times far far away + /// from their theoretical maximum. For example, in the context of a crypto-currency, the + /// balance of an individual account is oftentimes way less than what the numeric type + /// allows. In all such cases, using `compact` is sensible. + /// + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// pub mod custom_pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// # use frame_support::traits::BuildGenesisConfig; + /// #[pallet::call] + /// impl Pallet { + /// pub fn some_dispatchable(_origin: OriginFor, #[pallet::compact] _input: u32) -> DispatchResult { + /// Ok(()) + /// } + /// } + /// } + pub use frame_support_procedural::compact; + /// Allows you to define the genesis configuration for the pallet. /// /// Item is defined as either an enum or a struct. It needs to be public and implement the @@ -2525,6 +2674,61 @@ pub mod pallet_macros { /// } /// ``` pub use frame_support_procedural::storage; + /// This attribute is attached to a function inside an `impl` block annoated with + /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the conditions for a + /// given work item to be valid. + /// + /// It takes a closure as input, which is then used to define the condition. The closure + /// should have the same signature as the function it is attached to, except that it should + /// return a `bool` instead. + pub use frame_support_procedural::task_condition; + /// This attribute is attached to a function inside an `impl` block annoated with + /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the index of a given + /// work item. + /// + /// It takes an integer literal as input, which is then used to define the index. This + /// index should be unique for each function in the `impl` block. + pub use frame_support_procedural::task_index; + /// This attribute is attached to a function inside an `impl` block annoated with + /// [`pallet::tasks_experimental`](`tasks_experimental`) to define an iterator over the + /// available work items for a task. + /// + /// It takes an iterator as input that yields a tuple with same types as the function + /// arguments. + pub use frame_support_procedural::task_list; + /// This attribute is attached to a function inside an `impl` block annoated with + /// [`pallet::tasks_experimental`](`tasks_experimental`) define the weight of a given work + /// item. + /// + /// It takes a closure as input, which should return a `Weight` value. + pub use frame_support_procedural::task_weight; + /// Allows you to define some service work that can be recognized by a script or an + /// off-chain worker. Such a script can then create and submit all such work items at any + /// given time. + /// + /// These work items are defined as instances of the [`Task`](frame_support::traits::Task) + /// trait. [`pallet:tasks_experimental`](`tasks_experimental`) when attached to an `impl` + /// block inside a pallet, will generate an enum `Task` whose variants are mapped to + /// functions inside this `impl` block. + /// + /// Each such function must have the following set of attributes: + /// + /// * [`pallet::task_list`](`task_list`) + /// * [`pallet::task_condition`](`task_condition`) + /// * [`pallet::task_weight`](`task_weight`) + /// * [`pallet::task_index`](`task_index`) + /// + /// All of such Tasks are then aggregated into a `RuntimeTask` by + /// [`construct_runtime`](frame_support::construct_runtime). + /// + /// Finally, the `RuntimeTask` can then used by a script or off-chain worker to create and + /// submit such tasks via an extrinsic defined in `frame_system` called `do_task`. + /// + /// ## Example + #[doc = docify::embed!("src/tests/tasks.rs", tasks_example)] + /// Now, this can be executed as follows: + #[doc = docify::embed!("src/tests/tasks.rs", tasks_work)] + pub use frame_support_procedural::tasks_experimental; } #[deprecated(note = "Will be removed after July 2023; Use `sp_runtime::traits` directly instead.")] diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index a9eb460421f18af1f4b5b5b94fd05cd2984fa9ad..bfd62c8611c6092e574bbb982525362f944cce53 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -224,8 +224,7 @@ impl PalletVersionToStorageVersionHelper for T { } } -/// Migrate from the `PalletVersion` struct to the new -/// [`StorageVersion`](crate::traits::StorageVersion) struct. +/// Migrate from the `PalletVersion` struct to the new [`StorageVersion`] struct. /// /// This will remove all `PalletVersion's` from the state and insert the current storage version. pub fn migrate_from_pallet_version_to_storage_version< diff --git a/substrate/frame/support/src/storage/child.rs b/substrate/frame/support/src/storage/child.rs index e54002d18db3ddf09e7ee3dba56ab6948d7192b2..76e6f4ee4023ec08c8c080929f5aba8f3bfd7509 100644 --- a/substrate/frame/support/src/storage/child.rs +++ b/substrate/frame/support/src/storage/child.rs @@ -165,9 +165,9 @@ pub fn kill_storage(child_info: &ChildInfo, limit: Option) -> KillStorageRe /// guarantee that the subsequent call is in a new block; in this case the previous call's result /// cursor need not be passed in an a `None` may be passed instead. This exception may be useful /// then making this call solely from a block-hook such as `on_initialize`. -/// -/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once the -/// resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. + +/// Returns [`MultiRemovalResults`] to inform about the result. Once the resultant `maybe_cursor` +/// field is `None`, then no further items remain to be deleted. /// /// NOTE: After the initial call for any given child storage, it is important that no keys further /// keys are inserted. If so, then they may or may not be deleted by subsequent calls. diff --git a/substrate/frame/support/src/storage/generator/mod.rs b/substrate/frame/support/src/storage/generator/mod.rs index 2b2abdc2e830923c5171703f1dc9173b50726cbc..dd6d622852db161cbe686115663e00a6bd9cd4b0 100644 --- a/substrate/frame/support/src/storage/generator/mod.rs +++ b/substrate/frame/support/src/storage/generator/mod.rs @@ -63,6 +63,7 @@ mod tests { type BaseCallFilter: crate::traits::Contains; type RuntimeOrigin; type RuntimeCall; + type RuntimeTask; type PalletInfo: crate::traits::PalletInfo; type DbWeight: Get; } @@ -129,6 +130,7 @@ mod tests { type BaseCallFilter = crate::traits::Everything; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; type PalletInfo = PalletInfo; type DbWeight = (); } diff --git a/substrate/frame/support/src/storage/mod.rs b/substrate/frame/support/src/storage/mod.rs index 7f39a3fdad85e98d4a2cf1d328b55e33f7846438..c77de1f976f60f18ab87013e03b59bd6ee88db8f 100644 --- a/substrate/frame/support/src/storage/mod.rs +++ b/substrate/frame/support/src/storage/mod.rs @@ -1583,7 +1583,7 @@ pub trait StorageTryAppend: StorageDecodeLength + private::Sealed { fn bound() -> usize; } -/// Storage value that is capable of [`StorageTryAppend`](crate::storage::StorageTryAppend). +/// Storage value that is capable of [`StorageTryAppend`]. pub trait TryAppendValue, I: Encode> { /// Try and append the `item` into the storage item. /// @@ -1612,7 +1612,7 @@ where } } -/// Storage map that is capable of [`StorageTryAppend`](crate::storage::StorageTryAppend). +/// Storage map that is capable of [`StorageTryAppend`]. pub trait TryAppendMap, I: Encode> { /// Try and append the `item` into the storage map at the given `key`. /// @@ -1646,7 +1646,7 @@ where } } -/// Storage double map that is capable of [`StorageTryAppend`](crate::storage::StorageTryAppend). +/// Storage double map that is capable of [`StorageTryAppend`]. pub trait TryAppendDoubleMap, I: Encode> { /// Try and append the `item` into the storage double map at the given `key`. /// diff --git a/substrate/frame/support/src/storage/unhashed.rs b/substrate/frame/support/src/storage/unhashed.rs index aae83034ab71aab13a24f060e369e966d26f93ca..776c7d0f3c3a8d761d7c048e292d590056b374c3 100644 --- a/substrate/frame/support/src/storage/unhashed.rs +++ b/substrate/frame/support/src/storage/unhashed.rs @@ -27,8 +27,8 @@ pub fn get(key: &[u8]) -> Option { // TODO #3700: error should be handleable. log::error!( target: "runtime::storage", - "Corrupted state at `{:?}: {:?}`", - key, + "Corrupted state at `{}`: {:?}", + array_bytes::bytes2hex("0x", key), e, ); None diff --git a/substrate/frame/support/src/tests/mod.rs b/substrate/frame/support/src/tests/mod.rs index 3690159c5994d98d265eadf7c0a623f4630eede3..c6a0b6cde7737e7510ab124bacef86a1a55aca96 100644 --- a/substrate/frame/support/src/tests/mod.rs +++ b/substrate/frame/support/src/tests/mod.rs @@ -16,6 +16,7 @@ // limitations under the License. use super::*; +use frame_support_procedural::import_section; use sp_io::{MultiRemovalResults, TestExternalities}; use sp_metadata_ir::{ PalletStorageMetadataIR, StorageEntryMetadataIR, StorageEntryModifierIR, StorageEntryTypeIR, @@ -27,13 +28,15 @@ pub use self::frame_system::{pallet_prelude::*, Config, Pallet}; mod inject_runtime_type; mod storage_alias; +mod tasks; +#[import_section(tasks::tasks_example)] #[pallet] pub mod frame_system { #[allow(unused)] use super::{frame_system, frame_system::pallet_prelude::*}; pub use crate::dispatch::RawOrigin; - use crate::pallet_prelude::*; + use crate::{pallet_prelude::*, traits::tasks::Task as TaskTrait}; pub mod config_preludes { use super::{inject_runtime_type, DefaultConfig}; @@ -49,6 +52,8 @@ pub mod frame_system { type RuntimeCall = (); #[inject_runtime_type] type PalletInfo = (); + #[inject_runtime_type] + type RuntimeTask = (); type DbWeight = (); } } @@ -69,6 +74,8 @@ pub mod frame_system { #[pallet::no_default_bounds] type RuntimeCall; #[pallet::no_default_bounds] + type RuntimeTask: crate::traits::tasks::Task; + #[pallet::no_default_bounds] type PalletInfo: crate::traits::PalletInfo; type DbWeight: Get; } @@ -77,13 +84,33 @@ pub mod frame_system { pub enum Error { /// Required by construct_runtime CallFiltered, + /// Used in tasks example. + NotFound, + /// The specified [`Task`] is not valid. + InvalidTask, + /// The specified [`Task`] failed during execution. + FailedTask, } #[pallet::origin] pub type Origin = RawOrigin<::AccountId>; #[pallet::call] - impl Pallet {} + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight(task.weight())] + pub fn do_task(_origin: OriginFor, task: T::RuntimeTask) -> DispatchResultWithPostInfo { + if !task.is_valid() { + return Err(Error::::InvalidTask.into()) + } + + if let Err(_err) = task.run() { + return Err(Error::::FailedTask.into()) + } + + Ok(().into()) + } + } #[pallet::storage] pub type Data = StorageMap<_, Twox64Concat, u32, u64, ValueQuery>; @@ -169,6 +196,14 @@ pub mod frame_system { } } + /// Some running total. + #[pallet::storage] + pub type Total = StorageValue<_, (u32, u32), ValueQuery>; + + /// Numbers to be added into the total. + #[pallet::storage] + pub type Numbers = StorageMap<_, Twox64Concat, u32, u32, OptionQuery>; + pub mod pallet_prelude { pub type OriginFor = ::RuntimeOrigin; @@ -622,6 +657,24 @@ fn expected_metadata() -> PalletStorageMetadataIR { default: vec![0], docs: vec![], }, + StorageEntryMetadataIR { + name: "Total", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::<(u32, u32)>()), + default: vec![0, 0, 0, 0, 0, 0, 0, 0], + docs: vec![" Some running total."], + }, + StorageEntryMetadataIR { + name: "Numbers", + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Twox64Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: vec![0], + docs: vec![" Numbers to be added into the total."], + }, ], } } diff --git a/substrate/frame/support/src/tests/tasks.rs b/substrate/frame/support/src/tests/tasks.rs new file mode 100644 index 0000000000000000000000000000000000000000..2774c130075785844b99f35ec8b9b3937a51e6ed --- /dev/null +++ b/substrate/frame/support/src/tests/tasks.rs @@ -0,0 +1,62 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + assert_ok, + tests::{ + frame_system::{Numbers, Total}, + new_test_ext, Runtime, RuntimeOrigin, RuntimeTask, System, + }, +}; +use frame_support_procedural::pallet_section; + +#[pallet_section] +mod tasks_example { + #[docify::export(tasks_example)] + #[pallet::tasks_experimental] + impl Pallet { + /// Add a pair of numbers into the totals and remove them. + #[pallet::task_list(Numbers::::iter_keys())] + #[pallet::task_condition(|i| Numbers::::contains_key(i))] + #[pallet::task_weight(0.into())] + #[pallet::task_index(0)] + pub fn add_number_into_total(i: u32) -> DispatchResult { + let v = Numbers::::take(i).ok_or(Error::::NotFound)?; + Total::::mutate(|(total_keys, total_values)| { + *total_keys += i; + *total_values += v; + }); + Ok(()) + } + } +} + +#[docify::export] +#[test] +fn tasks_work() { + new_test_ext().execute_with(|| { + Numbers::::insert(0, 1); + + let task = RuntimeTask::System(super::frame_system::Task::::AddNumberIntoTotal { + i: 0u32, + }); + + assert_ok!(System::do_task(RuntimeOrigin::signed(1), task.clone(),)); + assert_eq!(Numbers::::get(0), None); + assert_eq!(Total::::get(), (0, 1)); + }); +} diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 6362e750d2ab98e5ae08e4e03ed57b72885c1f6f..9afd9c161303410c5d21903eab6511d02f3569c9 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -123,6 +123,9 @@ pub use safe_mode::{SafeMode, SafeModeError, SafeModeNotify}; mod tx_pause; pub use tx_pause::{TransactionPause, TransactionPauseError}; +pub mod tasks; +pub use tasks::Task; + #[cfg(feature = "try-runtime")] mod try_runtime; #[cfg(feature = "try-runtime")] diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index 58815b107c829845a8a1a8df6c82a7dde4203ff4..995ac4f717911195e4dba202d350c6cbc09ae340 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -82,6 +82,8 @@ pub enum ExecuteOverweightError { QueuePaused, /// An unspecified error. Other, + /// Another call is currently ongoing and prevents this call from executing. + RecursiveDisallowed, } /// Can service queues and execute overweight messages. diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 78032cc0a9407cb2505863ebac39c3f1bd0aa4f6..bf3053a3f8f59b6d55acadb208a8731c7f080e41 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -1170,17 +1170,26 @@ impl PreimageRecipient for () { fn unnote_preimage(_: &Hash) {} } -/// Trait for creating an asset account with a deposit taken from a designated depositor specified -/// by the client. +/// Trait for touching/creating an asset account with a deposit taken from a designated depositor +/// specified by the client. +/// +/// Ensures that transfers to the touched account will succeed without being denied by the account +/// creation requirements. For example, it is useful for the account creation of non-sufficient +/// assets when its system account may not have the free consumer reference required for it. If +/// there is no risk of failing to meet those requirements, the touch operation can be a no-op, as +/// is common for native assets. pub trait AccountTouch { /// The type for currency units of the deposit. type Balance; - /// The deposit amount of a native currency required for creating an account of the `asset`. + /// The deposit amount of a native currency required for touching an account of the `asset`. fn deposit_required(asset: AssetId) -> Self::Balance; + /// Check if an account for a given asset should be touched to meet the existence requirements. + fn should_touch(asset: AssetId, who: &AccountId) -> bool; + /// Create an account for `who` of the `asset` with a deposit taken from the `depositor`. - fn touch(asset: AssetId, who: AccountId, depositor: AccountId) -> DispatchResult; + fn touch(asset: AssetId, who: &AccountId, depositor: &AccountId) -> DispatchResult; } #[cfg(test)] diff --git a/substrate/frame/support/src/traits/tasks.rs b/substrate/frame/support/src/traits/tasks.rs new file mode 100644 index 0000000000000000000000000000000000000000..24f3430cf50b5a23175c4c586c1642da580cf862 --- /dev/null +++ b/substrate/frame/support/src/traits/tasks.rs @@ -0,0 +1,87 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Contains the [`Task`] trait, which defines a general-purpose way for defining and executing +//! service work, and supporting types. + +use codec::FullCodec; +use scale_info::TypeInfo; +use sp_runtime::DispatchError; +use sp_std::{fmt::Debug, iter::Iterator, vec, vec::IntoIter}; +use sp_weights::Weight; + +/// Contain's re-exports of all the supporting types for the [`Task`] trait. Used in the macro +/// expansion of `RuntimeTask`. +#[doc(hidden)] +pub mod __private { + pub use codec::FullCodec; + pub use scale_info::TypeInfo; + pub use sp_runtime::DispatchError; + pub use sp_std::{fmt::Debug, iter::Iterator, vec, vec::IntoIter}; + pub use sp_weights::Weight; +} + +/// A general-purpose trait which defines a type of service work (i.e., work to performed by an +/// off-chain worker) including methods for enumerating, validating, indexing, and running +/// tasks of this type. +pub trait Task: Sized + FullCodec + TypeInfo + Clone + Debug + PartialEq + Eq { + /// An [`Iterator`] over tasks of this type used as the return type for `enumerate`. + type Enumeration: Iterator; + + /// Inspects the pallet's state and enumerates tasks of this type. + fn iter() -> Self::Enumeration; + + /// Checks if a particular instance of this `Task` variant is a valid piece of work. + fn is_valid(&self) -> bool; + + /// Performs the work for this particular `Task` variant. + fn run(&self) -> Result<(), DispatchError>; + + /// Returns the weight of executing this `Task`. + fn weight(&self) -> Weight; + + /// A unique value representing this `Task` within the current pallet. Analogous to + /// `call_index`, but for tasks.' + /// + /// This value should be unique within the current pallet and can overlap with task indices + /// in other pallets. + fn task_index(&self) -> u32; +} + +impl Task for () { + type Enumeration = IntoIter; + + fn iter() -> Self::Enumeration { + vec![].into_iter() + } + + fn is_valid(&self) -> bool { + true + } + + fn run(&self) -> Result<(), DispatchError> { + Ok(()) + } + + fn weight(&self) -> Weight { + Weight::default() + } + + fn task_index(&self) -> u32 { + 0 + } +} diff --git a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs index 995797bc8f66b10861a68831984302057ecfbe34..0e25102197007e7ce3f38eef7417425f09dbcd2f 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs @@ -20,8 +20,9 @@ use super::{super::Imbalance as ImbalanceT, Balanced, *}; use crate::traits::{ + fungibles, misc::{SameOrOther, TryDrop}, - tokens::Balance, + tokens::{AssetId, Balance}, }; use frame_support_procedural::{EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; use sp_runtime::traits::Zero; @@ -87,6 +88,11 @@ impl, OppositeOnDrop: HandleImbalance pub(crate) fn new(amount: B) -> Self { Self { amount, _phantom: PhantomData } } + + /// Forget the imbalance without invoking the on-drop handler. + pub(crate) fn forget(imbalance: Self) { + sp_std::mem::forget(imbalance); + } } impl, OppositeOnDrop: HandleImbalanceDrop> @@ -149,6 +155,27 @@ impl, OppositeOnDrop: HandleImbalance } } +/// Converts a `fungibles` `imbalance` instance to an instance of a `fungible` imbalance type. +/// +/// This function facilitates imbalance conversions within the implementations of +/// [`frame_support::traits::fungibles::UnionOf`], [`frame_support::traits::fungible::UnionOf`], and +/// [`frame_support::traits::fungible::ItemOf`] adapters. It is intended only for internal use +/// within the current crate. +pub(crate) fn from_fungibles< + A: AssetId, + B: Balance, + OnDropIn: fungibles::HandleImbalanceDrop, + OppositeIn: fungibles::HandleImbalanceDrop, + OnDropOut: HandleImbalanceDrop, + OppositeOut: HandleImbalanceDrop, +>( + imbalance: fungibles::Imbalance, +) -> Imbalance { + let new = Imbalance::new(imbalance.peek()); + fungibles::Imbalance::forget(imbalance); + new +} + /// Imbalance implying that the total_issuance value is less than the sum of all account balances. pub type Debt = Imbalance< >::Balance, diff --git a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs index 636866ab93c9ba76379a9fe6b43a371e0819ea9c..fe252c6b0893d05ddbdd33d561e0388daddee307 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs @@ -17,14 +17,16 @@ //! Adapter to use `fungibles::*` implementations as `fungible::*`. -use sp_core::Get; -use sp_runtime::{DispatchError, DispatchResult}; - use super::*; -use crate::traits::tokens::{ - fungibles, DepositConsequence, Fortitude, Imbalance as ImbalanceT, Precision, Preservation, - Provenance, Restriction, WithdrawConsequence, +use crate::traits::{ + fungible::imbalance, + tokens::{ + fungibles, DepositConsequence, Fortitude, Precision, Preservation, Provenance, Restriction, + WithdrawConsequence, + }, }; +use sp_core::Get; +use sp_runtime::{DispatchError, DispatchResult}; /// Convert a `fungibles` trait implementation into a `fungible` trait implementation by identifying /// a single item. @@ -381,35 +383,38 @@ impl< precision: Precision, ) -> Result, DispatchError> { >::deposit(A::get(), who, value, precision) - .map(|debt| Imbalance::new(debt.peek())) + .map(imbalance::from_fungibles) } fn issue(amount: Self::Balance) -> Credit { - Imbalance::new(>::issue(A::get(), amount).peek()) + let credit = >::issue(A::get(), amount); + imbalance::from_fungibles(credit) } fn pair(amount: Self::Balance) -> (Debt, Credit) { let (a, b) = >::pair(A::get(), amount); - (Imbalance::new(a.peek()), Imbalance::new(b.peek())) + (imbalance::from_fungibles(a), imbalance::from_fungibles(b)) } fn rescind(amount: Self::Balance) -> Debt { - Imbalance::new(>::rescind(A::get(), amount).peek()) + let debt = >::rescind(A::get(), amount); + imbalance::from_fungibles(debt) } fn resolve( who: &AccountId, credit: Credit, ) -> Result<(), Credit> { - let credit = fungibles::Imbalance::new(A::get(), credit.peek()); + let credit = fungibles::imbalance::from_fungible(credit, A::get()); >::resolve(who, credit) - .map_err(|credit| Imbalance::new(credit.peek())) + .map_err(imbalance::from_fungibles) } fn settle( who: &AccountId, debt: Debt, preservation: Preservation, ) -> Result, Debt> { - let debt = fungibles::Imbalance::new(A::get(), debt.peek()); - >::settle(who, debt, preservation) - .map(|credit| Imbalance::new(credit.peek())) - .map_err(|debt| Imbalance::new(debt.peek())) + let debt = fungibles::imbalance::from_fungible(debt, A::get()); + >::settle(who, debt, preservation).map_or_else( + |d| Err(imbalance::from_fungibles(d)), + |c| Ok(imbalance::from_fungibles(c)), + ) } fn withdraw( who: &AccountId, @@ -426,7 +431,7 @@ impl< preservation, force, ) - .map(|credit| Imbalance::new(credit.peek())) + .map(imbalance::from_fungibles) } } @@ -443,7 +448,7 @@ impl< ) -> (Credit, Self::Balance) { let (credit, amount) = >::slash(A::get(), reason, who, amount); - (Imbalance::new(credit.peek()), amount) + (imbalance::from_fungibles(credit), amount) } } diff --git a/substrate/frame/support/src/traits/tokens/fungible/mod.rs b/substrate/frame/support/src/traits/tokens/fungible/mod.rs index 61b75fd6563c84659ca19adeb39217feb91524a8..ba4a2e5e21a2c9f5115ca6331e85df0fea58bb74 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/mod.rs @@ -41,9 +41,10 @@ pub mod conformance_tests; pub mod freeze; pub mod hold; -mod imbalance; +pub(crate) mod imbalance; mod item_of; mod regular; +mod union_of; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support_procedural::{CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; @@ -67,6 +68,7 @@ pub use regular::{ use sp_arithmetic::traits::Zero; use sp_core::Get; use sp_runtime::{traits::Convert, DispatchError}; +pub use union_of::{NativeFromLeft, NativeOrWithId, UnionOf}; use crate::{ ensure, diff --git a/substrate/frame/support/src/traits/tokens/fungible/regular.rs b/substrate/frame/support/src/traits/tokens/fungible/regular.rs index f2fb5c5f7c24e4fa3517c0f4dd331e874512e36e..aece73777d280ee40e70a2270bae7abe9189f774 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/regular.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/regular.rs @@ -514,3 +514,47 @@ pub trait Balanced: Inspect + Unbalanced { fn done_deposit(_who: &AccountId, _amount: Self::Balance) {} fn done_withdraw(_who: &AccountId, _amount: Self::Balance) {} } + +/// Dummy implementation of [`Inspect`] +#[cfg(feature = "std")] +impl Inspect for () { + type Balance = u32; + fn total_issuance() -> Self::Balance { + 0 + } + fn minimum_balance() -> Self::Balance { + 0 + } + fn total_balance(_: &AccountId) -> Self::Balance { + 0 + } + fn balance(_: &AccountId) -> Self::Balance { + 0 + } + fn reducible_balance(_: &AccountId, _: Preservation, _: Fortitude) -> Self::Balance { + 0 + } + fn can_deposit(_: &AccountId, _: Self::Balance, _: Provenance) -> DepositConsequence { + DepositConsequence::Success + } + fn can_withdraw(_: &AccountId, _: Self::Balance) -> WithdrawConsequence { + WithdrawConsequence::Success + } +} + +/// Dummy implementation of [`Unbalanced`] +#[cfg(feature = "std")] +impl Unbalanced for () { + fn handle_dust(_: Dust) {} + fn write_balance( + _: &AccountId, + _: Self::Balance, + ) -> Result, DispatchError> { + Ok(None) + } + fn set_total_issuance(_: Self::Balance) {} +} + +/// Dummy implementation of [`Mutate`] +#[cfg(feature = "std")] +impl Mutate for () {} diff --git a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs new file mode 100644 index 0000000000000000000000000000000000000000..86505befc05f75c052f8d33f0f495ba6768d71ba --- /dev/null +++ b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs @@ -0,0 +1,924 @@ +// This file is part of Substrate. + +// Copyright (Criterion) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types to combine some `fungible::*` and `fungibles::*` implementations into one union +//! `fungibles::*` implementation. + +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::traits::{ + fungible::imbalance, + tokens::{ + fungible, fungibles, AssetId, DepositConsequence, Fortitude, Precision, Preservation, + Provenance, Restriction, WithdrawConsequence, + }, + AccountTouch, +}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::Convert, + DispatchError, DispatchResult, Either, + Either::{Left, Right}, + RuntimeDebug, +}; +use sp_std::cmp::Ordering; + +/// The `NativeOrWithId` enum classifies an asset as either `Native` to the current chain or as an +/// asset with a specific ID. +#[derive(Decode, Encode, Default, MaxEncodedLen, TypeInfo, Clone, RuntimeDebug, Eq)] +pub enum NativeOrWithId +where + AssetId: Ord, +{ + /// Represents the native asset of the current chain. + /// + /// E.g., DOT for the Polkadot Asset Hub. + #[default] + Native, + /// Represents an asset identified by its underlying `AssetId`. + WithId(AssetId), +} +impl From for NativeOrWithId { + fn from(asset: AssetId) -> Self { + Self::WithId(asset) + } +} +impl Ord for NativeOrWithId { + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + (Self::Native, Self::Native) => Ordering::Equal, + (Self::Native, Self::WithId(_)) => Ordering::Less, + (Self::WithId(_), Self::Native) => Ordering::Greater, + (Self::WithId(id1), Self::WithId(id2)) => ::cmp(id1, id2), + } + } +} +impl PartialOrd for NativeOrWithId { + fn partial_cmp(&self, other: &Self) -> Option { + Some(::cmp(self, other)) + } +} +impl PartialEq for NativeOrWithId { + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == Ordering::Equal + } +} + +/// Criterion for [`UnionOf`] where a set for [`NativeOrWithId::Native`] asset located from the left +/// and for [`NativeOrWithId::WithId`] from the right. +pub struct NativeFromLeft; +impl Convert, Either<(), AssetId>> for NativeFromLeft { + fn convert(asset: NativeOrWithId) -> Either<(), AssetId> { + match asset { + NativeOrWithId::Native => Either::Left(()), + NativeOrWithId::WithId(id) => Either::Right(id), + } + } +} + +/// Type to combine some `fungible::*` and `fungibles::*` implementations into one union +/// `fungibles::*` implementation. +/// +/// ### Parameters: +/// - `Left` is `fungible::*` implementation that is incorporated into the resulting union. +/// - `Right` is `fungibles::*` implementation that is incorporated into the resulting union. +/// - `Criterion` determines whether the `AssetKind` belongs to the `Left` or `Right` set. +/// - `AssetKind` is a superset type encompassing asset kinds from `Left` and `Right` sets. +/// - `AccountId` is an account identifier type. +pub struct UnionOf( + sp_std::marker::PhantomData<(Left, Right, Criterion, AssetKind, AccountId)>, +); + +impl< + Left: fungible::Inspect, + Right: fungibles::Inspect, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::Inspect for UnionOf +{ + type AssetId = AssetKind; + type Balance = Left::Balance; + + fn total_issuance(asset: Self::AssetId) -> Self::Balance { + match Criterion::convert(asset) { + Left(()) => >::total_issuance(), + Right(a) => >::total_issuance(a), + } + } + fn active_issuance(asset: Self::AssetId) -> Self::Balance { + match Criterion::convert(asset) { + Left(()) => >::active_issuance(), + Right(a) => >::active_issuance(a), + } + } + fn minimum_balance(asset: Self::AssetId) -> Self::Balance { + match Criterion::convert(asset) { + Left(()) => >::minimum_balance(), + Right(a) => >::minimum_balance(a), + } + } + fn balance(asset: Self::AssetId, who: &AccountId) -> Self::Balance { + match Criterion::convert(asset) { + Left(()) => >::balance(who), + Right(a) => >::balance(a, who), + } + } + fn total_balance(asset: Self::AssetId, who: &AccountId) -> Self::Balance { + match Criterion::convert(asset) { + Left(()) => >::total_balance(who), + Right(a) => >::total_balance(a, who), + } + } + fn reducible_balance( + asset: Self::AssetId, + who: &AccountId, + preservation: Preservation, + force: Fortitude, + ) -> Self::Balance { + match Criterion::convert(asset) { + Left(()) => + >::reducible_balance(who, preservation, force), + Right(a) => >::reducible_balance( + a, + who, + preservation, + force, + ), + } + } + fn can_deposit( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + provenance: Provenance, + ) -> DepositConsequence { + match Criterion::convert(asset) { + Left(()) => + >::can_deposit(who, amount, provenance), + Right(a) => + >::can_deposit(a, who, amount, provenance), + } + } + fn can_withdraw( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence { + match Criterion::convert(asset) { + Left(()) => >::can_withdraw(who, amount), + Right(a) => >::can_withdraw(a, who, amount), + } + } + fn asset_exists(asset: Self::AssetId) -> bool { + match Criterion::convert(asset) { + Left(()) => true, + Right(a) => >::asset_exists(a), + } + } +} + +impl< + Left: fungible::InspectHold, + Right: fungibles::InspectHold, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::InspectHold for UnionOf +{ + type Reason = Left::Reason; + + fn reducible_total_balance_on_hold( + asset: Self::AssetId, + who: &AccountId, + force: Fortitude, + ) -> Self::Balance { + match Criterion::convert(asset) { + Left(()) => + >::reducible_total_balance_on_hold( + who, force, + ), + Right(a) => + >::reducible_total_balance_on_hold( + a, who, force, + ), + } + } + fn hold_available(asset: Self::AssetId, reason: &Self::Reason, who: &AccountId) -> bool { + match Criterion::convert(asset) { + Left(()) => >::hold_available(reason, who), + Right(a) => + >::hold_available(a, reason, who), + } + } + fn total_balance_on_hold(asset: Self::AssetId, who: &AccountId) -> Self::Balance { + match Criterion::convert(asset) { + Left(()) => >::total_balance_on_hold(who), + Right(a) => >::total_balance_on_hold(a, who), + } + } + fn balance_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + ) -> Self::Balance { + match Criterion::convert(asset) { + Left(()) => >::balance_on_hold(reason, who), + Right(a) => + >::balance_on_hold(a, reason, who), + } + } + fn can_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> bool { + match Criterion::convert(asset) { + Left(()) => >::can_hold(reason, who, amount), + Right(a) => + >::can_hold(a, reason, who, amount), + } + } +} + +impl< + Left: fungible::InspectFreeze, + Right: fungibles::InspectFreeze, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::InspectFreeze for UnionOf +{ + type Id = Left::Id; + fn balance_frozen(asset: Self::AssetId, id: &Self::Id, who: &AccountId) -> Self::Balance { + match Criterion::convert(asset) { + Left(()) => >::balance_frozen(id, who), + Right(a) => >::balance_frozen(a, id, who), + } + } + fn balance_freezable(asset: Self::AssetId, who: &AccountId) -> Self::Balance { + match Criterion::convert(asset) { + Left(()) => >::balance_freezable(who), + Right(a) => >::balance_freezable(a, who), + } + } + fn can_freeze(asset: Self::AssetId, id: &Self::Id, who: &AccountId) -> bool { + match Criterion::convert(asset) { + Left(()) => >::can_freeze(id, who), + Right(a) => >::can_freeze(a, id, who), + } + } +} + +impl< + Left: fungible::Unbalanced, + Right: fungibles::Unbalanced, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::Unbalanced for UnionOf +{ + fn handle_dust(dust: fungibles::Dust) + where + Self: Sized, + { + match Criterion::convert(dust.0) { + Left(()) => + >::handle_dust(fungible::Dust(dust.1)), + Right(a) => + >::handle_dust(fungibles::Dust(a, dust.1)), + } + } + fn write_balance( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result, DispatchError> { + match Criterion::convert(asset) { + Left(()) => >::write_balance(who, amount), + Right(a) => >::write_balance(a, who, amount), + } + } + fn set_total_issuance(asset: Self::AssetId, amount: Self::Balance) -> () { + match Criterion::convert(asset) { + Left(()) => >::set_total_issuance(amount), + Right(a) => >::set_total_issuance(a, amount), + } + } + fn decrease_balance( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + preservation: Preservation, + force: Fortitude, + ) -> Result { + match Criterion::convert(asset) { + Left(()) => >::decrease_balance( + who, + amount, + precision, + preservation, + force, + ), + Right(a) => >::decrease_balance( + a, + who, + amount, + precision, + preservation, + force, + ), + } + } + fn increase_balance( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + match Criterion::convert(asset) { + Left(()) => + >::increase_balance(who, amount, precision), + Right(a) => >::increase_balance( + a, who, amount, precision, + ), + } + } +} + +impl< + Left: fungible::UnbalancedHold, + Right: fungibles::UnbalancedHold, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::UnbalancedHold for UnionOf +{ + fn set_balance_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult { + match Criterion::convert(asset) { + Left(()) => >::set_balance_on_hold( + reason, who, amount, + ), + Right(a) => >::set_balance_on_hold( + a, reason, who, amount, + ), + } + } + fn decrease_balance_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + match Criterion::convert(asset) { + Left(()) => >::decrease_balance_on_hold( + reason, who, amount, precision, + ), + Right(a) => >::decrease_balance_on_hold( + a, reason, who, amount, precision, + ), + } + } + fn increase_balance_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + match Criterion::convert(asset) { + Left(()) => >::increase_balance_on_hold( + reason, who, amount, precision, + ), + Right(a) => >::increase_balance_on_hold( + a, reason, who, amount, precision, + ), + } + } +} + +impl< + Left: fungible::Mutate, + Right: fungibles::Mutate, + Criterion: Convert>, + AssetKind: AssetId, + AccountId: Eq, + > fungibles::Mutate for UnionOf +{ + fn mint_into( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { + match Criterion::convert(asset) { + Left(()) => >::mint_into(who, amount), + Right(a) => >::mint_into(a, who, amount), + } + } + fn burn_from( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + force: Fortitude, + ) -> Result { + match Criterion::convert(asset) { + Left(()) => + >::burn_from(who, amount, precision, force), + Right(a) => + >::burn_from(a, who, amount, precision, force), + } + } + fn shelve( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { + match Criterion::convert(asset) { + Left(()) => >::shelve(who, amount), + Right(a) => >::shelve(a, who, amount), + } + } + fn restore( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { + match Criterion::convert(asset) { + Left(()) => >::restore(who, amount), + Right(a) => >::restore(a, who, amount), + } + } + fn transfer( + asset: Self::AssetId, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + preservation: Preservation, + ) -> Result { + match Criterion::convert(asset) { + Left(()) => + >::transfer(source, dest, amount, preservation), + Right(a) => >::transfer( + a, + source, + dest, + amount, + preservation, + ), + } + } + + fn set_balance(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> Self::Balance { + match Criterion::convert(asset) { + Left(()) => >::set_balance(who, amount), + Right(a) => >::set_balance(a, who, amount), + } + } +} + +impl< + Left: fungible::MutateHold, + Right: fungibles::MutateHold, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::MutateHold for UnionOf +{ + fn hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult { + match Criterion::convert(asset) { + Left(()) => >::hold(reason, who, amount), + Right(a) => >::hold(a, reason, who, amount), + } + } + fn release( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + match Criterion::convert(asset) { + Left(()) => + >::release(reason, who, amount, precision), + Right(a) => >::release( + a, reason, who, amount, precision, + ), + } + } + fn burn_held( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + force: Fortitude, + ) -> Result { + match Criterion::convert(asset) { + Left(()) => >::burn_held( + reason, who, amount, precision, force, + ), + Right(a) => >::burn_held( + a, reason, who, amount, precision, force, + ), + } + } + fn transfer_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + precision: Precision, + mode: Restriction, + force: Fortitude, + ) -> Result { + match Criterion::convert(asset) { + Left(()) => >::transfer_on_hold( + reason, source, dest, amount, precision, mode, force, + ), + Right(a) => >::transfer_on_hold( + a, reason, source, dest, amount, precision, mode, force, + ), + } + } + fn transfer_and_hold( + asset: Self::AssetId, + reason: &Self::Reason, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + precision: Precision, + preservation: Preservation, + force: Fortitude, + ) -> Result { + match Criterion::convert(asset) { + Left(()) => >::transfer_and_hold( + reason, + source, + dest, + amount, + precision, + preservation, + force, + ), + Right(a) => >::transfer_and_hold( + a, + reason, + source, + dest, + amount, + precision, + preservation, + force, + ), + } + } +} + +impl< + Left: fungible::MutateFreeze, + Right: fungibles::MutateFreeze, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::MutateFreeze for UnionOf +{ + fn set_freeze( + asset: Self::AssetId, + id: &Self::Id, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult { + match Criterion::convert(asset) { + Left(()) => >::set_freeze(id, who, amount), + Right(a) => + >::set_freeze(a, id, who, amount), + } + } + fn extend_freeze( + asset: Self::AssetId, + id: &Self::Id, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult { + match Criterion::convert(asset) { + Left(()) => >::extend_freeze(id, who, amount), + Right(a) => + >::extend_freeze(a, id, who, amount), + } + } + fn thaw(asset: Self::AssetId, id: &Self::Id, who: &AccountId) -> DispatchResult { + match Criterion::convert(asset) { + Left(()) => >::thaw(id, who), + Right(a) => >::thaw(a, id, who), + } + } +} + +pub struct ConvertImbalanceDropHandler< + Left, + Right, + Criterion, + AssetKind, + Balance, + AssetId, + AccountId, +>(sp_std::marker::PhantomData<(Left, Right, Criterion, AssetKind, Balance, AssetId, AccountId)>); + +impl< + Left: fungible::HandleImbalanceDrop, + Right: fungibles::HandleImbalanceDrop, + Criterion: Convert>, + AssetKind, + Balance, + AssetId, + AccountId, + > fungibles::HandleImbalanceDrop + for ConvertImbalanceDropHandler +{ + fn handle(asset: AssetKind, amount: Balance) { + match Criterion::convert(asset) { + Left(()) => Left::handle(amount), + Right(a) => Right::handle(a, amount), + } + } +} + +impl< + Left: fungible::Balanced, + Right: fungibles::Balanced, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::Balanced for UnionOf +{ + type OnDropDebt = ConvertImbalanceDropHandler< + Left::OnDropDebt, + Right::OnDropDebt, + Criterion, + AssetKind, + Left::Balance, + Right::AssetId, + AccountId, + >; + type OnDropCredit = ConvertImbalanceDropHandler< + Left::OnDropCredit, + Right::OnDropCredit, + Criterion, + AssetKind, + Left::Balance, + Right::AssetId, + AccountId, + >; + + fn deposit( + asset: Self::AssetId, + who: &AccountId, + value: Self::Balance, + precision: Precision, + ) -> Result, DispatchError> { + match Criterion::convert(asset.clone()) { + Left(()) => >::deposit(who, value, precision) + .map(|d| fungibles::imbalance::from_fungible(d, asset)), + Right(a) => + >::deposit(a, who, value, precision) + .map(|d| fungibles::imbalance::from_fungibles(d, asset)), + } + } + fn issue(asset: Self::AssetId, amount: Self::Balance) -> fungibles::Credit { + match Criterion::convert(asset.clone()) { + Left(()) => { + let credit = >::issue(amount); + fungibles::imbalance::from_fungible(credit, asset) + }, + Right(a) => { + let credit = >::issue(a, amount); + fungibles::imbalance::from_fungibles(credit, asset) + }, + } + } + fn pair( + asset: Self::AssetId, + amount: Self::Balance, + ) -> (fungibles::Debt, fungibles::Credit) { + match Criterion::convert(asset.clone()) { + Left(()) => { + let (a, b) = >::pair(amount); + ( + fungibles::imbalance::from_fungible(a, asset.clone()), + fungibles::imbalance::from_fungible(b, asset), + ) + }, + Right(a) => { + let (a, b) = >::pair(a, amount); + ( + fungibles::imbalance::from_fungibles(a, asset.clone()), + fungibles::imbalance::from_fungibles(b, asset), + ) + }, + } + } + fn rescind(asset: Self::AssetId, amount: Self::Balance) -> fungibles::Debt { + match Criterion::convert(asset.clone()) { + Left(()) => { + let debt = >::rescind(amount); + fungibles::imbalance::from_fungible(debt, asset) + }, + Right(a) => { + let debt = >::rescind(a, amount); + fungibles::imbalance::from_fungibles(debt, asset) + }, + } + } + fn resolve( + who: &AccountId, + credit: fungibles::Credit, + ) -> Result<(), fungibles::Credit> { + let asset = credit.asset(); + match Criterion::convert(asset.clone()) { + Left(()) => { + let credit = imbalance::from_fungibles(credit); + >::resolve(who, credit) + .map_err(|credit| fungibles::imbalance::from_fungible(credit, asset)) + }, + Right(a) => { + let credit = fungibles::imbalance::from_fungibles(credit, a); + >::resolve(who, credit) + .map_err(|credit| fungibles::imbalance::from_fungibles(credit, asset)) + }, + } + } + fn settle( + who: &AccountId, + debt: fungibles::Debt, + preservation: Preservation, + ) -> Result, fungibles::Debt> { + let asset = debt.asset(); + match Criterion::convert(asset.clone()) { + Left(()) => { + let debt = imbalance::from_fungibles(debt); + match >::settle(who, debt, preservation) { + Ok(c) => Ok(fungibles::imbalance::from_fungible(c, asset)), + Err(d) => Err(fungibles::imbalance::from_fungible(d, asset)), + } + }, + Right(a) => { + let debt = fungibles::imbalance::from_fungibles(debt, a); + match >::settle(who, debt, preservation) { + Ok(c) => Ok(fungibles::imbalance::from_fungibles(c, asset)), + Err(d) => Err(fungibles::imbalance::from_fungibles(d, asset)), + } + }, + } + } + fn withdraw( + asset: Self::AssetId, + who: &AccountId, + value: Self::Balance, + precision: Precision, + preservation: Preservation, + force: Fortitude, + ) -> Result, DispatchError> { + match Criterion::convert(asset.clone()) { + Left(()) => >::withdraw( + who, + value, + precision, + preservation, + force, + ) + .map(|c| fungibles::imbalance::from_fungible(c, asset)), + Right(a) => >::withdraw( + a, + who, + value, + precision, + preservation, + force, + ) + .map(|c| fungibles::imbalance::from_fungibles(c, asset)), + } + } +} + +impl< + Left: fungible::BalancedHold, + Right: fungibles::BalancedHold, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::BalancedHold for UnionOf +{ + fn slash( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> (fungibles::Credit, Self::Balance) { + match Criterion::convert(asset.clone()) { + Left(()) => { + let (credit, amount) = + >::slash(reason, who, amount); + (fungibles::imbalance::from_fungible(credit, asset), amount) + }, + Right(a) => { + let (credit, amount) = + >::slash(a, reason, who, amount); + (fungibles::imbalance::from_fungibles(credit, asset), amount) + }, + } + } +} + +impl< + Left: fungible::Inspect, + Right: fungibles::Inspect + fungibles::Create, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::Create for UnionOf +{ + fn create( + asset: AssetKind, + admin: AccountId, + is_sufficient: bool, + min_balance: Self::Balance, + ) -> DispatchResult { + match Criterion::convert(asset) { + // no-op for `Left` since `Create` trait is not defined within `fungible::*`. + Left(()) => Ok(()), + Right(a) => >::create( + a, + admin, + is_sufficient, + min_balance, + ), + } + } +} + +impl< + Left: fungible::Inspect + + AccountTouch<(), AccountId, Balance = >::Balance>, + Right: fungibles::Inspect + + AccountTouch< + Right::AssetId, + AccountId, + Balance = >::Balance, + >, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > AccountTouch for UnionOf +{ + type Balance = >::Balance; + + fn deposit_required(asset: AssetKind) -> Self::Balance { + match Criterion::convert(asset) { + Left(()) => >::deposit_required(()), + Right(a) => >::deposit_required(a), + } + } + + fn should_touch(asset: AssetKind, who: &AccountId) -> bool { + match Criterion::convert(asset) { + Left(()) => >::should_touch((), who), + Right(a) => >::should_touch(a, who), + } + } + + fn touch(asset: AssetKind, who: &AccountId, depositor: &AccountId) -> DispatchResult { + match Criterion::convert(asset) { + Left(()) => >::touch((), who, depositor), + Right(a) => + >::touch(a, who, depositor), + } + } +} diff --git a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs index 7c0d7721a2e60908f1a5a4be8a8c876ff2bd1ae2..54c1e900b6e3664b6fbf3470e41d17eaae37e635 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs @@ -20,8 +20,9 @@ use super::*; use crate::traits::{ + fungible, misc::{SameOrOther, TryDrop}, - tokens::{AssetId, Balance}, + tokens::{imbalance::Imbalance as ImbalanceT, AssetId, Balance}, }; use frame_support_procedural::{EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; use sp_runtime::traits::Zero; @@ -93,6 +94,11 @@ impl< Self { asset, amount, _phantom: PhantomData } } + /// Forget the imbalance without invoking the on-drop handler. + pub(crate) fn forget(imbalance: Self) { + sp_std::mem::forget(imbalance); + } + pub fn drop_zero(self) -> Result<(), Self> { if self.amount.is_zero() { sp_std::mem::forget(self); @@ -168,6 +174,52 @@ impl< } } +/// Converts a `fungible` `imbalance` instance to an instance of a `fungibles` imbalance type using +/// a specified `asset`. +/// +/// This function facilitates imbalance conversions within the implementations of +/// [`frame_support::traits::fungibles::UnionOf`], [`frame_support::traits::fungible::UnionOf`], and +/// [`frame_support::traits::fungible::ItemOf`] adapters. It is intended only for internal use +/// within the current crate. +pub(crate) fn from_fungible< + A: AssetId, + B: Balance, + OnDropIn: fungible::HandleImbalanceDrop, + OppositeIn: fungible::HandleImbalanceDrop, + OnDropOut: HandleImbalanceDrop, + OppositeOut: HandleImbalanceDrop, +>( + imbalance: fungible::Imbalance, + asset: A, +) -> Imbalance { + let new = Imbalance::new(asset, imbalance.peek()); + fungible::Imbalance::forget(imbalance); + new +} + +/// Converts a `fungibles` `imbalance` instance of one type to another using a specified `asset`. +/// +/// This function facilitates imbalance conversions within the implementations of +/// [`frame_support::traits::fungibles::UnionOf`], [`frame_support::traits::fungible::UnionOf`], and +/// [`frame_support::traits::fungible::ItemOf`] adapters. It is intended only for internal use +/// within the current crate. +pub(crate) fn from_fungibles< + A: AssetId, + B: Balance, + OnDropIn: HandleImbalanceDrop, + OppositeIn: HandleImbalanceDrop, + AssetOut: AssetId, + OnDropOut: HandleImbalanceDrop, + OppositeOut: HandleImbalanceDrop, +>( + imbalance: Imbalance, + asset: AssetOut, +) -> Imbalance { + let new = Imbalance::new(asset, imbalance.peek()); + Imbalance::forget(imbalance); + new +} + /// Imbalance implying that the total_issuance value is less than the sum of all account balances. pub type Debt = Imbalance< >::AssetId, diff --git a/substrate/frame/support/src/traits/tokens/fungibles/mod.rs b/substrate/frame/support/src/traits/tokens/fungibles/mod.rs index 4fd6ef43a15fafe91730fd1c5e01b2ca14bfd817..1db0706ba4fde4c3b53dbcdd5ea850a37752b1e9 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/mod.rs @@ -21,11 +21,12 @@ pub mod approvals; mod enumerable; pub mod freeze; pub mod hold; -mod imbalance; +pub(crate) mod imbalance; mod lifetime; pub mod metadata; mod regular; pub mod roles; +mod union_of; pub use enumerable::Inspect as InspectEnumerable; pub use freeze::{Inspect as InspectFreeze, Mutate as MutateFreeze}; @@ -38,3 +39,4 @@ pub use lifetime::{Create, Destroy}; pub use regular::{ Balanced, DecreaseIssuance, Dust, IncreaseIssuance, Inspect, Mutate, Unbalanced, }; +pub use union_of::UnionOf; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/regular.rs b/substrate/frame/support/src/traits/tokens/fungibles/regular.rs index a2fc4e55095222257953099af7b5795da9a2321a..41ef4b40c75b389971db43d8013072b269a30a39 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/regular.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/regular.rs @@ -593,3 +593,66 @@ pub trait Balanced: Inspect + Unbalanced { fn done_deposit(_asset: Self::AssetId, _who: &AccountId, _amount: Self::Balance) {} fn done_withdraw(_asset: Self::AssetId, _who: &AccountId, _amount: Self::Balance) {} } + +/// Dummy implementation of [`Inspect`] +#[cfg(feature = "std")] +impl Inspect for () { + type AssetId = u32; + type Balance = u32; + fn total_issuance(_: Self::AssetId) -> Self::Balance { + 0 + } + fn minimum_balance(_: Self::AssetId) -> Self::Balance { + 0 + } + fn total_balance(_: Self::AssetId, _: &AccountId) -> Self::Balance { + 0 + } + fn balance(_: Self::AssetId, _: &AccountId) -> Self::Balance { + 0 + } + fn reducible_balance( + _: Self::AssetId, + _: &AccountId, + _: Preservation, + _: Fortitude, + ) -> Self::Balance { + 0 + } + fn can_deposit( + _: Self::AssetId, + _: &AccountId, + _: Self::Balance, + _: Provenance, + ) -> DepositConsequence { + DepositConsequence::Success + } + fn can_withdraw( + _: Self::AssetId, + _: &AccountId, + _: Self::Balance, + ) -> WithdrawConsequence { + WithdrawConsequence::Success + } + fn asset_exists(_: Self::AssetId) -> bool { + false + } +} + +/// Dummy implementation of [`Unbalanced`] +#[cfg(feature = "std")] +impl Unbalanced for () { + fn handle_dust(_: Dust) {} + fn write_balance( + _: Self::AssetId, + _: &AccountId, + _: Self::Balance, + ) -> Result, DispatchError> { + Ok(None) + } + fn set_total_issuance(_: Self::AssetId, _: Self::Balance) {} +} + +/// Dummy implementation of [`Mutate`] +#[cfg(feature = "std")] +impl Mutate for () {} diff --git a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs new file mode 100644 index 0000000000000000000000000000000000000000..3619db3a37b63ac527a7038b4c99bbb2c4bed7c5 --- /dev/null +++ b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs @@ -0,0 +1,897 @@ +// This file is part of Substrate. + +// Copyright (Criterion) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Type to combine two `fungibles::*` implementations into one union `fungibles::*` implementation. + +use frame_support::traits::{ + tokens::{ + fungibles, fungibles::imbalance, AssetId, DepositConsequence, Fortitude, Precision, + Preservation, Provenance, Restriction, WithdrawConsequence, + }, + AccountTouch, +}; +use sp_runtime::{ + traits::Convert, + DispatchError, DispatchResult, Either, + Either::{Left, Right}, +}; + +/// Type to combine two `fungibles::*` implementations into one union `fungibles::*` implementation. +/// +/// ### Parameters: +/// - `Left` is `fungibles::*` implementation that is incorporated into the resulting union. +/// - `Right` is `fungibles::*` implementation that is incorporated into the resulting union. +/// - `Criterion` determines whether the `AssetKind` belongs to the `Left` or `Right` set. +/// - `AssetKind` is a superset type encompassing asset kinds from `Left` and `Right` sets. +/// - `AccountId` is an account identifier type. +pub struct UnionOf( + sp_std::marker::PhantomData<(Left, Right, Criterion, AssetKind, AccountId)>, +); + +impl< + Left: fungibles::Inspect, + Right: fungibles::Inspect, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::Inspect for UnionOf +{ + type AssetId = AssetKind; + type Balance = Left::Balance; + + fn total_issuance(asset: Self::AssetId) -> Self::Balance { + match Criterion::convert(asset) { + Left(a) => >::total_issuance(a), + Right(a) => >::total_issuance(a), + } + } + fn active_issuance(asset: Self::AssetId) -> Self::Balance { + match Criterion::convert(asset) { + Left(a) => >::active_issuance(a), + Right(a) => >::active_issuance(a), + } + } + fn minimum_balance(asset: Self::AssetId) -> Self::Balance { + match Criterion::convert(asset) { + Left(a) => >::minimum_balance(a), + Right(a) => >::minimum_balance(a), + } + } + fn balance(asset: Self::AssetId, who: &AccountId) -> Self::Balance { + match Criterion::convert(asset) { + Left(a) => >::balance(a, who), + Right(a) => >::balance(a, who), + } + } + fn total_balance(asset: Self::AssetId, who: &AccountId) -> Self::Balance { + match Criterion::convert(asset) { + Left(a) => >::total_balance(a, who), + Right(a) => >::total_balance(a, who), + } + } + fn reducible_balance( + asset: Self::AssetId, + who: &AccountId, + preservation: Preservation, + force: Fortitude, + ) -> Self::Balance { + match Criterion::convert(asset) { + Left(a) => >::reducible_balance( + a, + who, + preservation, + force, + ), + Right(a) => >::reducible_balance( + a, + who, + preservation, + force, + ), + } + } + fn can_deposit( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + provenance: Provenance, + ) -> DepositConsequence { + match Criterion::convert(asset) { + Left(a) => + >::can_deposit(a, who, amount, provenance), + Right(a) => + >::can_deposit(a, who, amount, provenance), + } + } + fn can_withdraw( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence { + match Criterion::convert(asset) { + Left(a) => >::can_withdraw(a, who, amount), + Right(a) => >::can_withdraw(a, who, amount), + } + } + fn asset_exists(asset: Self::AssetId) -> bool { + match Criterion::convert(asset) { + Left(a) => >::asset_exists(a), + Right(a) => >::asset_exists(a), + } + } +} + +impl< + Left: fungibles::InspectHold, + Right: fungibles::InspectHold, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::InspectHold for UnionOf +{ + type Reason = Left::Reason; + + fn reducible_total_balance_on_hold( + asset: Self::AssetId, + who: &AccountId, + force: Fortitude, + ) -> Self::Balance { + match Criterion::convert(asset) { + Left(a) => + >::reducible_total_balance_on_hold( + a, who, force, + ), + Right(a) => + >::reducible_total_balance_on_hold( + a, who, force, + ), + } + } + fn hold_available(asset: Self::AssetId, reason: &Self::Reason, who: &AccountId) -> bool { + match Criterion::convert(asset) { + Left(a) => >::hold_available(a, reason, who), + Right(a) => + >::hold_available(a, reason, who), + } + } + fn total_balance_on_hold(asset: Self::AssetId, who: &AccountId) -> Self::Balance { + match Criterion::convert(asset) { + Left(a) => >::total_balance_on_hold(a, who), + Right(a) => >::total_balance_on_hold(a, who), + } + } + fn balance_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + ) -> Self::Balance { + match Criterion::convert(asset) { + Left(a) => >::balance_on_hold(a, reason, who), + Right(a) => + >::balance_on_hold(a, reason, who), + } + } + fn can_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> bool { + match Criterion::convert(asset) { + Left(a) => + >::can_hold(a, reason, who, amount), + Right(a) => + >::can_hold(a, reason, who, amount), + } + } +} + +impl< + Left: fungibles::InspectFreeze, + Right: fungibles::InspectFreeze, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::InspectFreeze for UnionOf +{ + type Id = Left::Id; + fn balance_frozen(asset: Self::AssetId, id: &Self::Id, who: &AccountId) -> Self::Balance { + match Criterion::convert(asset) { + Left(a) => >::balance_frozen(a, id, who), + Right(a) => >::balance_frozen(a, id, who), + } + } + fn balance_freezable(asset: Self::AssetId, who: &AccountId) -> Self::Balance { + match Criterion::convert(asset) { + Left(a) => >::balance_freezable(a, who), + Right(a) => >::balance_freezable(a, who), + } + } + fn can_freeze(asset: Self::AssetId, id: &Self::Id, who: &AccountId) -> bool { + match Criterion::convert(asset) { + Left(a) => >::can_freeze(a, id, who), + Right(a) => >::can_freeze(a, id, who), + } + } +} + +impl< + Left: fungibles::Unbalanced, + Right: fungibles::Unbalanced, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::Unbalanced for UnionOf +{ + fn handle_dust(dust: fungibles::Dust) + where + Self: Sized, + { + match Criterion::convert(dust.0) { + Left(a) => + >::handle_dust(fungibles::Dust(a, dust.1)), + Right(a) => + >::handle_dust(fungibles::Dust(a, dust.1)), + } + } + fn write_balance( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result, DispatchError> { + match Criterion::convert(asset) { + Left(a) => >::write_balance(a, who, amount), + Right(a) => >::write_balance(a, who, amount), + } + } + fn set_total_issuance(asset: Self::AssetId, amount: Self::Balance) -> () { + match Criterion::convert(asset) { + Left(a) => >::set_total_issuance(a, amount), + Right(a) => >::set_total_issuance(a, amount), + } + } + fn decrease_balance( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + preservation: Preservation, + force: Fortitude, + ) -> Result { + match Criterion::convert(asset) { + Left(a) => >::decrease_balance( + a, + who, + amount, + precision, + preservation, + force, + ), + Right(a) => >::decrease_balance( + a, + who, + amount, + precision, + preservation, + force, + ), + } + } + fn increase_balance( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + match Criterion::convert(asset) { + Left(a) => >::increase_balance( + a, who, amount, precision, + ), + Right(a) => >::increase_balance( + a, who, amount, precision, + ), + } + } +} + +impl< + Left: fungibles::UnbalancedHold, + Right: fungibles::UnbalancedHold, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::UnbalancedHold for UnionOf +{ + fn set_balance_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult { + match Criterion::convert(asset) { + Left(a) => >::set_balance_on_hold( + a, reason, who, amount, + ), + Right(a) => >::set_balance_on_hold( + a, reason, who, amount, + ), + } + } + fn decrease_balance_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + match Criterion::convert(asset) { + Left(a) => >::decrease_balance_on_hold( + a, reason, who, amount, precision, + ), + Right(a) => >::decrease_balance_on_hold( + a, reason, who, amount, precision, + ), + } + } + fn increase_balance_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + match Criterion::convert(asset) { + Left(a) => >::increase_balance_on_hold( + a, reason, who, amount, precision, + ), + Right(a) => >::increase_balance_on_hold( + a, reason, who, amount, precision, + ), + } + } +} + +impl< + Left: fungibles::Mutate, + Right: fungibles::Mutate, + Criterion: Convert>, + AssetKind: AssetId, + AccountId: Eq, + > fungibles::Mutate for UnionOf +{ + fn mint_into( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { + match Criterion::convert(asset) { + Left(a) => >::mint_into(a, who, amount), + Right(a) => >::mint_into(a, who, amount), + } + } + fn burn_from( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + force: Fortitude, + ) -> Result { + match Criterion::convert(asset) { + Left(a) => + >::burn_from(a, who, amount, precision, force), + Right(a) => + >::burn_from(a, who, amount, precision, force), + } + } + fn shelve( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { + match Criterion::convert(asset) { + Left(a) => >::shelve(a, who, amount), + Right(a) => >::shelve(a, who, amount), + } + } + fn restore( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { + match Criterion::convert(asset) { + Left(a) => >::restore(a, who, amount), + Right(a) => >::restore(a, who, amount), + } + } + fn transfer( + asset: Self::AssetId, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + preservation: Preservation, + ) -> Result { + match Criterion::convert(asset) { + Left(a) => >::transfer( + a, + source, + dest, + amount, + preservation, + ), + Right(a) => >::transfer( + a, + source, + dest, + amount, + preservation, + ), + } + } + + fn set_balance(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> Self::Balance { + match Criterion::convert(asset) { + Left(a) => >::set_balance(a, who, amount), + Right(a) => >::set_balance(a, who, amount), + } + } +} + +impl< + Left: fungibles::MutateHold, + Right: fungibles::MutateHold, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::MutateHold for UnionOf +{ + fn hold( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult { + match Criterion::convert(asset) { + Left(a) => >::hold(a, reason, who, amount), + Right(a) => >::hold(a, reason, who, amount), + } + } + fn release( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + ) -> Result { + match Criterion::convert(asset) { + Left(a) => >::release( + a, reason, who, amount, precision, + ), + Right(a) => >::release( + a, reason, who, amount, precision, + ), + } + } + fn burn_held( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + precision: Precision, + force: Fortitude, + ) -> Result { + match Criterion::convert(asset) { + Left(a) => >::burn_held( + a, reason, who, amount, precision, force, + ), + Right(a) => >::burn_held( + a, reason, who, amount, precision, force, + ), + } + } + fn transfer_on_hold( + asset: Self::AssetId, + reason: &Self::Reason, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + precision: Precision, + mode: Restriction, + force: Fortitude, + ) -> Result { + match Criterion::convert(asset) { + Left(a) => >::transfer_on_hold( + a, reason, source, dest, amount, precision, mode, force, + ), + Right(a) => >::transfer_on_hold( + a, reason, source, dest, amount, precision, mode, force, + ), + } + } + fn transfer_and_hold( + asset: Self::AssetId, + reason: &Self::Reason, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + precision: Precision, + preservation: Preservation, + force: Fortitude, + ) -> Result { + match Criterion::convert(asset) { + Left(a) => >::transfer_and_hold( + a, + reason, + source, + dest, + amount, + precision, + preservation, + force, + ), + Right(a) => >::transfer_and_hold( + a, + reason, + source, + dest, + amount, + precision, + preservation, + force, + ), + } + } +} + +impl< + Left: fungibles::MutateFreeze, + Right: fungibles::MutateFreeze, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::MutateFreeze for UnionOf +{ + fn set_freeze( + asset: Self::AssetId, + id: &Self::Id, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult { + match Criterion::convert(asset) { + Left(a) => >::set_freeze(a, id, who, amount), + Right(a) => + >::set_freeze(a, id, who, amount), + } + } + fn extend_freeze( + asset: Self::AssetId, + id: &Self::Id, + who: &AccountId, + amount: Self::Balance, + ) -> DispatchResult { + match Criterion::convert(asset) { + Left(a) => + >::extend_freeze(a, id, who, amount), + Right(a) => + >::extend_freeze(a, id, who, amount), + } + } + fn thaw(asset: Self::AssetId, id: &Self::Id, who: &AccountId) -> DispatchResult { + match Criterion::convert(asset) { + Left(a) => >::thaw(a, id, who), + Right(a) => >::thaw(a, id, who), + } + } +} + +pub struct ConvertImbalanceDropHandler< + Left, + Right, + LeftAssetId, + RightAssetId, + Criterion, + AssetKind, + Balance, + AccountId, +>( + sp_std::marker::PhantomData<( + Left, + Right, + LeftAssetId, + RightAssetId, + Criterion, + AssetKind, + Balance, + AccountId, + )>, +); + +impl< + Left: fungibles::HandleImbalanceDrop, + Right: fungibles::HandleImbalanceDrop, + LeftAssetId, + RightAssetId, + Criterion: Convert>, + AssetKind, + Balance, + AccountId, + > fungibles::HandleImbalanceDrop + for ConvertImbalanceDropHandler< + Left, + Right, + LeftAssetId, + RightAssetId, + Criterion, + AssetKind, + Balance, + AccountId, + > +{ + fn handle(asset: AssetKind, amount: Balance) { + match Criterion::convert(asset) { + Left(a) => Left::handle(a, amount), + Right(a) => Right::handle(a, amount), + } + } +} + +impl< + Left: fungibles::Balanced, + Right: fungibles::Balanced, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::Balanced for UnionOf +{ + type OnDropDebt = ConvertImbalanceDropHandler< + Left::OnDropDebt, + Right::OnDropDebt, + Left::AssetId, + Right::AssetId, + Criterion, + AssetKind, + Left::Balance, + AccountId, + >; + type OnDropCredit = ConvertImbalanceDropHandler< + Left::OnDropCredit, + Right::OnDropCredit, + Left::AssetId, + Right::AssetId, + Criterion, + AssetKind, + Left::Balance, + AccountId, + >; + + fn deposit( + asset: Self::AssetId, + who: &AccountId, + value: Self::Balance, + precision: Precision, + ) -> Result, DispatchError> { + match Criterion::convert(asset.clone()) { + Left(a) => >::deposit(a, who, value, precision) + .map(|debt| imbalance::from_fungibles(debt, asset)), + Right(a) => + >::deposit(a, who, value, precision) + .map(|debt| imbalance::from_fungibles(debt, asset)), + } + } + fn issue(asset: Self::AssetId, amount: Self::Balance) -> fungibles::Credit { + match Criterion::convert(asset.clone()) { + Left(a) => { + let credit = >::issue(a, amount); + imbalance::from_fungibles(credit, asset) + }, + Right(a) => { + let credit = >::issue(a, amount); + imbalance::from_fungibles(credit, asset) + }, + } + } + fn pair( + asset: Self::AssetId, + amount: Self::Balance, + ) -> (fungibles::Debt, fungibles::Credit) { + match Criterion::convert(asset.clone()) { + Left(a) => { + let (a, b) = >::pair(a, amount); + (imbalance::from_fungibles(a, asset.clone()), imbalance::from_fungibles(b, asset)) + }, + Right(a) => { + let (a, b) = >::pair(a, amount); + (imbalance::from_fungibles(a, asset.clone()), imbalance::from_fungibles(b, asset)) + }, + } + } + fn rescind(asset: Self::AssetId, amount: Self::Balance) -> fungibles::Debt { + match Criterion::convert(asset.clone()) { + Left(a) => { + let debt = >::rescind(a, amount); + imbalance::from_fungibles(debt, asset) + }, + Right(a) => { + let debt = >::rescind(a, amount); + imbalance::from_fungibles(debt, asset) + }, + } + } + fn resolve( + who: &AccountId, + credit: fungibles::Credit, + ) -> Result<(), fungibles::Credit> { + let asset = credit.asset(); + match Criterion::convert(asset.clone()) { + Left(a) => { + let credit = imbalance::from_fungibles(credit, a); + >::resolve(who, credit) + .map_err(|credit| imbalance::from_fungibles(credit, asset)) + }, + Right(a) => { + let credit = imbalance::from_fungibles(credit, a); + >::resolve(who, credit) + .map_err(|credit| imbalance::from_fungibles(credit, asset)) + }, + } + } + fn settle( + who: &AccountId, + debt: fungibles::Debt, + preservation: Preservation, + ) -> Result, fungibles::Debt> { + let asset = debt.asset(); + match Criterion::convert(asset.clone()) { + Left(a) => { + let debt = imbalance::from_fungibles(debt, a); + match >::settle(who, debt, preservation) { + Ok(credit) => Ok(imbalance::from_fungibles(credit, asset)), + Err(debt) => Err(imbalance::from_fungibles(debt, asset)), + } + }, + Right(a) => { + let debt = imbalance::from_fungibles(debt, a); + match >::settle(who, debt, preservation) { + Ok(credit) => Ok(imbalance::from_fungibles(credit, asset)), + Err(debt) => Err(imbalance::from_fungibles(debt, asset)), + } + }, + } + } + fn withdraw( + asset: Self::AssetId, + who: &AccountId, + value: Self::Balance, + precision: Precision, + preservation: Preservation, + force: Fortitude, + ) -> Result, DispatchError> { + match Criterion::convert(asset.clone()) { + Left(a) => >::withdraw( + a, + who, + value, + precision, + preservation, + force, + ) + .map(|credit| imbalance::from_fungibles(credit, asset)), + Right(a) => >::withdraw( + a, + who, + value, + precision, + preservation, + force, + ) + .map(|credit| imbalance::from_fungibles(credit, asset)), + } + } +} + +impl< + Left: fungibles::BalancedHold, + Right: fungibles::BalancedHold, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::BalancedHold for UnionOf +{ + fn slash( + asset: Self::AssetId, + reason: &Self::Reason, + who: &AccountId, + amount: Self::Balance, + ) -> (fungibles::Credit, Self::Balance) { + match Criterion::convert(asset.clone()) { + Left(a) => { + let (credit, amount) = + >::slash(a, reason, who, amount); + (imbalance::from_fungibles(credit, asset), amount) + }, + Right(a) => { + let (credit, amount) = + >::slash(a, reason, who, amount); + (imbalance::from_fungibles(credit, asset), amount) + }, + } + } +} + +impl< + Left: fungibles::Inspect + fungibles::Create, + Right: fungibles::Inspect + fungibles::Create, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::Create for UnionOf +{ + fn create( + asset: AssetKind, + admin: AccountId, + is_sufficient: bool, + min_balance: Self::Balance, + ) -> DispatchResult { + match Criterion::convert(asset) { + Left(a) => + >::create(a, admin, is_sufficient, min_balance), + Right(a) => >::create( + a, + admin, + is_sufficient, + min_balance, + ), + } + } +} + +impl< + Left: fungibles::Inspect + AccountTouch, + Right: fungibles::Inspect + + AccountTouch< + Right::AssetId, + AccountId, + Balance = >::Balance, + >, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > AccountTouch for UnionOf +{ + type Balance = >::Balance; + + fn deposit_required(asset: AssetKind) -> Self::Balance { + match Criterion::convert(asset) { + Left(a) => >::deposit_required(a), + Right(a) => >::deposit_required(a), + } + } + + fn should_touch(asset: AssetKind, who: &AccountId) -> bool { + match Criterion::convert(asset) { + Left(a) => >::should_touch(a, who), + Right(a) => >::should_touch(a, who), + } + } + + fn touch(asset: AssetKind, who: &AccountId, depositor: &AccountId) -> DispatchResult { + match Criterion::convert(asset) { + Left(a) => >::touch(a, who, depositor), + Right(a) => + >::touch(a, who, depositor), + } + } +} diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index fc10725e814972a5040e3f3da45f00fbf3a041c0..e0c263fb4a1a6939aa1b00faed2540461ff49825 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -8,36 +8,39 @@ publish = false homepage = "https://substrate.io" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] static_assertions = "1.1.0" -serde = { version = "1.0.188", default-features = false, features = ["derive"] } +serde = { version = "1.0.193", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } -sp-api = { path = "../../../primitives/api", default-features = false} -sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false} +sp-api = { path = "../../../primitives/api", default-features = false } +sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } sp-io = { path = "../../../primitives/io", default-features = false } -sp-state-machine = { path = "../../../primitives/state-machine", optional = true} -frame-support = { path = "..", default-features = false} -frame-benchmarking = { path = "../../benchmarking", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-core = { path = "../../../primitives/core", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} -sp-version = { path = "../../../primitives/version", default-features = false} -sp-metadata-ir = { path = "../../../primitives/metadata-ir", default-features = false} -trybuild = { version = "1.0.74", features = [ "diff" ] } +sp-state-machine = { path = "../../../primitives/state-machine", optional = true } +frame-support = { path = "..", default-features = false } +frame-benchmarking = { path = "../../benchmarking", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-version = { path = "../../../primitives/version", default-features = false } +sp-metadata-ir = { path = "../../../primitives/metadata-ir", default-features = false } +trybuild = { version = "1.0.74", features = ["diff"] } pretty_assertions = "1.3.0" rustversion = "1.0.6" -frame-system = { path = "../../system", default-features = false} -frame-executive = { path = "../../executive", default-features = false} +frame-system = { path = "../../system", default-features = false } +frame-executive = { path = "../../executive", default-features = false } # The "std" feature for this pallet is never activated on purpose, in order to test construct_runtime error message -test-pallet = { package = "frame-support-test-pallet", path = "pallet", default-features = false} +test-pallet = { package = "frame-support-test-pallet", path = "pallet", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", @@ -58,7 +61,10 @@ std = [ "sp-version/std", "test-pallet/std", ] -experimental = [ "frame-support/experimental" ] +experimental = [ + "frame-support/experimental", + "frame-system/experimental", +] try-runtime = [ "frame-executive/try-runtime", "frame-support/try-runtime", @@ -72,4 +78,4 @@ frame-feature-testing = [] frame-feature-testing-2 = [] # Disable ui tests disable-ui-tests = [] -no-metadata-docs = [ "frame-support/no-metadata-docs" ] +no-metadata-docs = ["frame-support/no-metadata-docs"] diff --git a/substrate/frame/support/test/compile_pass/Cargo.toml b/substrate/frame/support/test/compile_pass/Cargo.toml index 19465d924ec0c678d3a3059b302078e45dbae672..0617aa105a21f1f3a583404465da97f0af79b176 100644 --- a/substrate/frame/support/test/compile_pass/Cargo.toml +++ b/substrate/frame/support/test/compile_pass/Cargo.toml @@ -8,20 +8,23 @@ publish = false homepage = "https://substrate.io" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -renamed-frame-support = { package = "frame-support", path = "../..", default-features = false} -renamed-frame-system = { package = "frame-system", path = "../../../system", default-features = false} -sp-core = { path = "../../../../primitives/core", default-features = false} -sp-runtime = { path = "../../../../primitives/runtime", default-features = false} -sp-version = { path = "../../../../primitives/version", default-features = false} +renamed-frame-support = { package = "frame-support", path = "../..", default-features = false } +renamed-frame-system = { package = "frame-system", path = "../../../system", default-features = false } +sp-core = { path = "../../../../primitives/core", default-features = false } +sp-runtime = { path = "../../../../primitives/runtime", default-features = false } +sp-version = { path = "../../../../primitives/version", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "renamed-frame-support/std", diff --git a/substrate/frame/support/test/compile_pass/src/lib.rs b/substrate/frame/support/test/compile_pass/src/lib.rs index 6ea37fb27e72f31fe0e88e1f35815675bc33660e..b304dfcb2823e15fedbe7d01bc3cc15b18a1b57f 100644 --- a/substrate/frame/support/test/compile_pass/src/lib.rs +++ b/substrate/frame/support/test/compile_pass/src/lib.rs @@ -22,7 +22,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use renamed_frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{ConstU16, ConstU32, ConstU64, Everything}, }; use sp_core::{sr25519, H256}; @@ -51,6 +51,7 @@ parameter_types! { pub const Version: RuntimeVersion = VERSION; } +#[derive_impl(renamed_frame_system::config_preludes::TestDefaultConfig as renamed_frame_system::DefaultConfig)] impl renamed_frame_system::Config for Runtime { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index c96e22ff1abd88a727b4467365e7e02aed6a183a..493c305cb202d0f23ebf0a391db5fd4d8d469b67 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -8,19 +8,22 @@ publish = false homepage = "https://substrate.io" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive"] } -frame-support = { path = "../..", default-features = false} -frame-system = { path = "../../../system", default-features = false} -sp-runtime = { path = "../../../../primitives/runtime", default-features = false} +serde = { version = "1.0.193", default-features = false, features = ["derive"] } +frame-support = { path = "../..", default-features = false } +frame-system = { path = "../../../system", default-features = false } +sp-runtime = { path = "../../../../primitives/runtime", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/support/test/src/lib.rs b/substrate/frame/support/test/src/lib.rs index 6b38d42d33d0d06b0115ad887825ab31a294a70d..a8a723375033a48e10e3f7f2941bb025991ac869 100644 --- a/substrate/frame/support/test/src/lib.rs +++ b/substrate/frame/support/test/src/lib.rs @@ -50,6 +50,8 @@ pub mod pallet { + From>; /// The runtime call type. type RuntimeCall; + /// Contains an aggregation of all tasks in this runtime. + type RuntimeTask; /// The runtime event type. type RuntimeEvent: Parameter + Member diff --git a/substrate/frame/support/test/stg_frame_crate/Cargo.toml b/substrate/frame/support/test/stg_frame_crate/Cargo.toml index 0f9617c0368762588f7e9e66ed4662f5e8be6b3c..632ea4e794f6f8edb46e57d190be108219935ea0 100644 --- a/substrate/frame/support/test/stg_frame_crate/Cargo.toml +++ b/substrate/frame/support/test/stg_frame_crate/Cargo.toml @@ -8,14 +8,17 @@ publish = false homepage = "https://substrate.io" repository.workspace = true +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -frame = { path = "../../..", default-features = false, features = ["runtime", "experimental"]} +frame = { path = "../../..", default-features = false, features = ["experimental", "runtime"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } [features] -default = [ "std" ] -std = [ "codec/std", "frame/std", "scale-info/std" ] +default = ["std"] +std = ["codec/std", "frame/std", "scale-info/std"] diff --git a/substrate/frame/support/test/tests/construct_runtime.rs b/substrate/frame/support/test/tests/construct_runtime.rs index 9ad51ad530ebd2606f6b5cb6858b57e164b68830..b8341b25cb0985915706ccb68b870474118d33cb 100644 --- a/substrate/frame/support/test/tests/construct_runtime.rs +++ b/substrate/frame/support/test/tests/construct_runtime.rs @@ -27,13 +27,13 @@ use frame_support::{ }; use frame_system::limits::{BlockLength, BlockWeights}; use scale_info::TypeInfo; -use sp_api::RuntimeVersion; use sp_core::{sr25519, ConstU64}; use sp_runtime::{ generic, traits::{BlakeTwo256, Verify}, DispatchError, ModuleError, }; +use sp_version::RuntimeVersion; parameter_types! { pub static IntegrityTestExec: u32 = 0; diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 08954bb6ab5c57982b99432ca1ea888d2e70332c..b08efb3a84213e107916dee6c43c62d9ee90e2d4 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -17,6 +17,7 @@ error: use of deprecated constant `WhereSection::_w`: | |_^ | = note: `-D deprecated` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(deprecated)]` = note: this error originates in the macro `frame_support::match_and_insert` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied @@ -97,7 +98,7 @@ note: required because it appears within the type `RuntimeEvent` | ||_- in this macro invocation ... | note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.4/src/encode_like.rs + --> $CARGO/parity-scale-codec-3.6.5/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` @@ -129,7 +130,7 @@ note: required because it appears within the type `RuntimeEvent` | ||_- in this macro invocation ... | note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.4/src/codec.rs + --> $CARGO/parity-scale-codec-3.6.5/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` @@ -301,7 +302,7 @@ note: required because it appears within the type `RuntimeCall` | ||_- in this macro invocation ... | note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.4/src/encode_like.rs + --> $CARGO/parity-scale-codec-3.6.5/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` @@ -334,7 +335,7 @@ note: required because it appears within the type `RuntimeCall` | ||_- in this macro invocation ... | note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.4/src/codec.rs + --> $CARGO/parity-scale-codec-3.6.5/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr index bf53f43b9ba7bccdc6e1ec0cf720bc6725a6717c..8458de97f6d3e60590f4138002702ac1d62b4afb 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr @@ -1,4 +1,4 @@ -error: `Call` is not allowed to have generics. Only the following pallets are allowed to have generics: `Event`, `Error`, `Origin`, `Config`. +error: `Call` is not allowed to have generics. Only the following pallets are allowed to have generics: `Event`, `Error`, `Origin`, `Config`, `Task`. --> tests/construct_runtime_ui/generics_in_invalid_module.rs:24:36 | 24 | Balance: balances::::{Call, Origin}, diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr index ad631de204e6766cc5d0d61106c23fc4f3fdbcb9..feb61793151da4ccbb78e53d83a648b7f2159f4f 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr @@ -1,4 +1,4 @@ -error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Error`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned`, `FreezeReason`, `HoldReason`, `LockId`, `SlashReason` +error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Error`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned`, `FreezeReason`, `HoldReason`, `Task`, `LockId`, `SlashReason` --> tests/construct_runtime_ui/invalid_module_details_keyword.rs:23:20 | 23 | system: System::{enum}, diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr index b5b89a5a270c903b0e61198a8c05a0781cd2c7a5..97943dfc1763a014621a491578f5cffca90822fa 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr @@ -1,4 +1,4 @@ -error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Error`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned`, `FreezeReason`, `HoldReason`, `LockId`, `SlashReason` +error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Error`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned`, `FreezeReason`, `HoldReason`, `Task`, `LockId`, `SlashReason` --> tests/construct_runtime_ui/invalid_module_entry.rs:24:23 | 24 | Balance: balances::{Unexpected}, diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs index ea52293a6732534923485afbb720621dc2ba1d5e..78ae6f57f087c4a652a75b8636c4cbd15ab7b35f 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:66:2 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:67:2 | -66 | pub struct Runtime +67 | pub struct Runtime | ^^^ error[E0412]: cannot find type `RuntimeCall` in this scope @@ -22,42 +22,70 @@ error[E0412]: cannot find type `Runtime` in this scope | ^^^^^^^ not found in this scope error[E0412]: cannot find type `Runtime` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:39:31 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:40:31 | -39 | impl frame_system::Config for Runtime { +40 | impl frame_system::Config for Runtime { | ^^^^^^^ not found in this scope error[E0412]: cannot find type `RuntimeOrigin` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:41:23 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:42:23 | -41 | type RuntimeOrigin = RuntimeOrigin; - | ^^^^^^^^^^^^^ help: you might have meant to use the associated type: `Self::RuntimeOrigin` +42 | type RuntimeOrigin = RuntimeOrigin; + | ^^^^^^^^^^^^^ + | +help: you might have meant to use the associated type + | +42 | type RuntimeOrigin = Self::RuntimeOrigin; + | ++++++ error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:43:21 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:44:21 + | +44 | type RuntimeCall = RuntimeCall; + | ^^^^^^^^^^^ | -43 | type RuntimeCall = RuntimeCall; - | ^^^^^^^^^^^ help: you might have meant to use the associated type: `Self::RuntimeCall` +help: you might have meant to use the associated type + | +44 | type RuntimeCall = Self::RuntimeCall; + | ++++++ error[E0412]: cannot find type `RuntimeEvent` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:49:22 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:50:22 + | +50 | type RuntimeEvent = RuntimeEvent; + | ^^^^^^^^^^^^ | -49 | type RuntimeEvent = RuntimeEvent; - | ^^^^^^^^^^^^ help: you might have meant to use the associated type: `Self::RuntimeEvent` +help: you might have meant to use the associated type + | +50 | type RuntimeEvent = Self::RuntimeEvent; + | ++++++ error[E0412]: cannot find type `PalletInfo` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:55:20 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:56:20 | -55 | type PalletInfo = PalletInfo; +56 | type PalletInfo = PalletInfo; | ^^^^^^^^^^ | help: you might have meant to use the associated type | -55 | type PalletInfo = Self::PalletInfo; - | ~~~~~~~~~~~~~~~~ +56 | type PalletInfo = Self::PalletInfo; + | ++++++ help: consider importing one of these items | 18 + use frame_benchmarking::__private::traits::PalletInfo; | 18 + use frame_support::traits::PalletInfo; | + +error[E0412]: cannot find type `RuntimeTask` in this scope + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:39:1 + | +39 | #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in the macro `frame_system::config_preludes::TestDefaultConfig` which comes from the expansion of the macro `frame_support::macro_magic::forward_tokens_verbatim` (in Nightly builds, run with -Z macro-backtrace for more info) +help: you might have meant to use the associated type + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | type Self::RuntimeTask = (); + | ++++++ diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs b/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs index 2834b5b8f2a8bc9c0fe6dcb58d01ff8bb778b2d4..d3e519af55150818c6073d7b35f845226ae1edf3 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -61,6 +61,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic tests/construct_runtime_ui/pallet_error_too_large.rs:90:1 + --> tests/construct_runtime_ui/pallet_error_too_large.rs:91:1 | -90 | / construct_runtime! { -91 | | pub struct Runtime -92 | | { -93 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, -94 | | Pallet: pallet::{Pallet}, -95 | | } -96 | | } - | |_^ the evaluated program panicked at 'The maximum encoded size of the error type in the `Pallet` pallet exceeds `MAX_MODULE_ERROR_ENCODED_SIZE`', $DIR/tests/construct_runtime_ui/pallet_error_too_large.rs:90:1 +91 | / construct_runtime! { +92 | | pub struct Runtime +93 | | { +94 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, +95 | | Pallet: pallet::{Pallet}, +96 | | } +97 | | } + | |_^ the evaluated program panicked at 'The maximum encoded size of the error type in the `Pallet` pallet exceeds `MAX_MODULE_ERROR_ENCODED_SIZE`', $DIR/tests/construct_runtime_ui/pallet_error_too_large.rs:91:1 | = note: this error originates in the macro `$crate::panic::panic_2021` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs index 62c4b1327e0a16b4ee6e6efe3715f2b9efab38e3..8193d12120c95ed70d1e8b2f7faab12ac19555e1 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic, Event}, -69 | | Pallet: pallet::{Pallet, Call}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet::{Pallet, Call}, +71 | | } +72 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs index 893690501a8351d07e78f9b4fa9b7daab7413790..ef3a790b61a189b4356f6ddb4a951aad7cc94574 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Event}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Event}, +71 | | } +72 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_event_check::is_event_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `Event` in module `pallet` - --> tests/construct_runtime_ui/undefined_event_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_event_part.rs:66:1 | -65 | / construct_runtime! { -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Event}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Event}, +71 | | } +72 | | } | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs index a3501ca31a3714ca129070df05695076df728c7e..b4dd41750c881607c352e657281dd0fa517aacee 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Config}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Config}, +71 | | } +72 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_genesis_config_check::is_genesis_config_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `GenesisConfig` in module `pallet` - --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:66:1 | -65 | / construct_runtime! { -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Config}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Config}, +71 | | } +72 | | } | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs index e22745930d69a00c789075092d7a5aff11f09fb8..5e0b8f3c44f5635bf6d1040513a5454d7e4c7e15 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_inherent_check::is_inherent_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `create_inherent` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- function or associated item `create_inherent` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | _^ -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, -70 | | } -71 | | } +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope @@ -37,19 +37,19 @@ error[E0599]: no function or associated item named `create_inherent` found for s = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `is_inherent` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- function or associated item `is_inherent` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | _^ -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, -70 | | } -71 | | } +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope @@ -58,19 +58,19 @@ error[E0599]: no function or associated item named `is_inherent` found for struc = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `check_inherent` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- function or associated item `check_inherent` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | _^ -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, -70 | | } -71 | | } +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope @@ -79,19 +79,19 @@ error[E0599]: no function or associated item named `check_inherent` found for st = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no associated item named `INHERENT_IDENTIFIER` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- associated item `INHERENT_IDENTIFIER` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | _^ -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, -70 | | } -71 | | } +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } | |_^ associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope @@ -100,19 +100,19 @@ error[E0599]: no associated item named `INHERENT_IDENTIFIER` found for struct `p = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `is_inherent_required` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- function or associated item `is_inherent_required` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | _^ -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, -70 | | } -71 | | } +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs index 656365279b8867867b7ff2d2b64b055b77eead0a..40a4a1ebcb5d2003a549d5197578a4154c233ea6 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Origin}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Origin}, +71 | | } +72 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_origin_check::is_origin_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `Origin` in module `pallet` - --> tests/construct_runtime_ui/undefined_origin_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_origin_part.rs:66:1 | -65 | / construct_runtime! { -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Origin}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Origin}, +71 | | } +72 | | } | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs index 05545821ab0283faafae8734b2344e9fe3aad811..be9e4ac2c30d44cc107cf8af0713728573cd0253 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic, Event}, -69 | | Pallet: pallet::{Pallet, ValidateUnsigned}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet::{Pallet, ValidateUnsigned}, +71 | | } +72 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no variant or associated item named `Pallet` found for enum `RuntimeCall` in the current scope - --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:69:3 + --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:70:3 | -65 | // construct_runtime! { -66 | || pub struct Runtime -67 | || { -68 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, -69 | || Pallet: pallet::{Pallet, ValidateUnsigned}, +66 | // construct_runtime! { +67 | || pub struct Runtime +68 | || { +69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | || Pallet: pallet::{Pallet, ValidateUnsigned}, | || -^^^^^^ variant or associated item not found in `RuntimeCall` | ||________| | | ... | error[E0599]: no function or associated item named `pre_dispatch` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- function or associated item `pre_dispatch` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | __^ | | _| | || -66 | || pub struct Runtime -67 | || { -68 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, -69 | || Pallet: pallet::{Pallet, ValidateUnsigned}, -70 | || } -71 | || } +67 | || pub struct Runtime +68 | || { +69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | || Pallet: pallet::{Pallet, ValidateUnsigned}, +71 | || } +72 | || } | ||_- in this macro invocation ... | | @@ -54,21 +54,21 @@ error[E0599]: no function or associated item named `pre_dispatch` found for stru = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `validate_unsigned` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- function or associated item `validate_unsigned` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | __^ | | _| | || -66 | || pub struct Runtime -67 | || { -68 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, -69 | || Pallet: pallet::{Pallet, ValidateUnsigned}, -70 | || } -71 | || } +67 | || pub struct Runtime +68 | || { +69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | || Pallet: pallet::{Pallet, ValidateUnsigned}, +71 | || } +72 | || } | ||_- in this macro invocation ... | | diff --git a/substrate/frame/support/test/tests/derive_impl.rs b/substrate/frame/support/test/tests/derive_impl.rs new file mode 100644 index 0000000000000000000000000000000000000000..675e85f4bfce5388ae0bf790a2355bff242cd477 --- /dev/null +++ b/substrate/frame/support/test/tests/derive_impl.rs @@ -0,0 +1,52 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::derive_impl; + +trait Shape { + fn area(&self) -> u32; +} + +struct SomeRectangle {} + +#[frame_support::register_default_impl(SomeRectangle)] +impl Shape for SomeRectangle { + #[cfg(not(feature = "feature-frame-testing"))] + fn area(&self) -> u32 { + 10 + } + + #[cfg(feature = "feature-frame-testing")] + fn area(&self) -> u32 { + 0 + } +} + +struct SomeSquare {} + +#[derive_impl(SomeRectangle)] +impl Shape for SomeSquare {} + +#[test] +fn test_feature_parsing() { + let square = SomeSquare {}; + #[cfg(not(feature = "feature-frame-testing"))] + assert_eq!(square.area(), 10); + + #[cfg(feature = "feature-frame-testing")] + assert_eq!(square.area(), 0); +} diff --git a/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_fails_when_type_not_in_scope.stderr b/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_fails_when_type_not_in_scope.stderr index f3ac6b2328110f0e2c6e95c7c6f995b3dea2d1dc..58aae713f752d8b99a8b814a47606237b431bc3f 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_fails_when_type_not_in_scope.stderr +++ b/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_fails_when_type_not_in_scope.stderr @@ -2,9 +2,13 @@ error[E0412]: cannot find type `RuntimeCall` in this scope --> tests/derive_impl_ui/inject_runtime_type_fails_when_type_not_in_scope.rs:30:10 | 30 | type RuntimeCall = (); - | ^^^^^^^^^^^ help: you might have meant to use the associated type: `Self::RuntimeCall` + | ^^^^^^^^^^^ ... 35 | #[derive_impl(Pallet)] // Injects type RuntimeCall = RuntimeCall; | ---------------------- in this macro invocation | = note: this error originates in the macro `Pallet` which comes from the expansion of the macro `frame_support::macro_magic::forward_tokens_verbatim` (in Nightly builds, run with -Z macro-backtrace for more info) +help: you might have meant to use the associated type + | +30 | type Self::RuntimeCall = (); + | ++++++ diff --git a/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_invalid.stderr b/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_invalid.stderr index 501aad0419f8e85a67f1b9cde11d5c5452feaff0..cda20288984ae535c0755fd1e6814384d124c515 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_invalid.stderr +++ b/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_invalid.stderr @@ -1,4 +1,4 @@ -error: `#[inject_runtime_type]` can only be attached to `RuntimeCall`, `RuntimeEvent`, `RuntimeOrigin` or `PalletInfo` +error: `#[inject_runtime_type]` can only be attached to `RuntimeCall`, `RuntimeEvent`, `RuntimeTask`, `RuntimeOrigin` or `PalletInfo` --> tests/derive_impl_ui/inject_runtime_type_invalid.rs:32:5 | 32 | type RuntimeInfo = (); diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 00e7adafb0b704251359afdab89190da8b5c039c..0223979d7f0e2420d67871ec799272ad017f25f2 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -16,7 +16,7 @@ // limitations under the License. use frame_support::{ - assert_ok, + assert_ok, derive_impl, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Parameter, Pays}, dispatch_context::with_context, pallet_prelude::{StorageInfoTrait, ValueQuery}, @@ -257,6 +257,13 @@ pub mod pallet { pub fn check_for_dispatch_context(_origin: OriginFor) -> DispatchResult { with_context::<(), _>(|_| ()).ok_or_else(|| DispatchError::Unavailable) } + + #[cfg(feature = "frame-feature-testing")] + #[pallet::call_index(5)] + #[pallet::weight({1})] + pub fn foo_feature_test(_origin: OriginFor) -> DispatchResult { + Ok(()) + } } #[pallet::error] @@ -269,6 +276,8 @@ pub mod pallet { #[codec(skip)] Skipped(u128), CompactU8(#[codec(compact)] u8), + #[cfg(feature = "frame-feature-testing")] + FeatureTest, } #[pallet::event] @@ -682,6 +691,7 @@ frame_support::parameter_types!( pub const MyGetParam3: u32 = 12; ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; @@ -795,6 +805,7 @@ fn call_expand() { } ); assert_eq!(call_foo.get_call_name(), "foo"); + #[cfg(not(feature = "frame-feature-testing"))] assert_eq!( pallet::Call::::get_call_names(), &[ @@ -805,9 +816,24 @@ fn call_expand() { "check_for_dispatch_context" ], ); + #[cfg(feature = "frame-feature-testing")] + assert_eq!( + pallet::Call::::get_call_names(), + &[ + "foo", + "foo_storage_layer", + "foo_index_out_of_order", + "foo_no_post_info", + "check_for_dispatch_context", + "foo_feature_test" + ], + ); assert_eq!(call_foo.get_call_index(), 0u8); - assert_eq!(pallet::Call::::get_call_indices(), &[0u8, 1u8, 4u8, 2u8, 3u8]) + #[cfg(not(feature = "frame-feature-testing"))] + assert_eq!(pallet::Call::::get_call_indices(), &[0u8, 1u8, 4u8, 2u8, 3u8]); + #[cfg(feature = "frame-feature-testing")] + assert_eq!(pallet::Call::::get_call_indices(), &[0u8, 1u8, 4u8, 2u8, 3u8, 5u8]); } #[test] @@ -815,7 +841,10 @@ fn call_expand_index() { let call_foo = pallet::Call::::foo_index_out_of_order {}; assert_eq!(call_foo.get_call_index(), 4u8); - assert_eq!(pallet::Call::::get_call_indices(), &[0u8, 1u8, 4u8, 2u8, 3u8]) + #[cfg(not(feature = "frame-feature-testing"))] + assert_eq!(pallet::Call::::get_call_indices(), &[0u8, 1u8, 4u8, 2u8, 3u8]); + #[cfg(feature = "frame-feature-testing")] + assert_eq!(pallet::Call::::get_call_indices(), &[0u8, 1u8, 4u8, 2u8, 3u8, 5u8]); } #[test] @@ -837,6 +866,8 @@ fn error_expand() { }), ); assert_eq!( as PalletError>::MAX_ENCODED_SIZE, 3); + #[cfg(feature = "frame-feature-testing")] + assert_eq!(format!("{:?}", pallet::Error::::FeatureTest), String::from("FeatureTest"),); } #[test] @@ -1270,52 +1301,6 @@ fn pallet_hooks_expand() { }) } -#[test] -fn all_pallets_type_reversed_order_is_correct() { - TestExternalities::default().execute_with(|| { - frame_system::Pallet::::set_block_number(1); - - #[allow(deprecated)] - { - assert_eq!( - AllPalletsWithoutSystemReversed::on_initialize(1), - Weight::from_parts(10, 0) - ); - AllPalletsWithoutSystemReversed::on_finalize(1); - - assert_eq!( - AllPalletsWithoutSystemReversed::on_runtime_upgrade(), - Weight::from_parts(30, 0) - ); - } - - assert_eq!( - frame_system::Pallet::::events()[0].event, - RuntimeEvent::Example2(pallet2::Event::Something(11)), - ); - assert_eq!( - frame_system::Pallet::::events()[1].event, - RuntimeEvent::Example(pallet::Event::Something(10)), - ); - assert_eq!( - frame_system::Pallet::::events()[2].event, - RuntimeEvent::Example2(pallet2::Event::Something(21)), - ); - assert_eq!( - frame_system::Pallet::::events()[3].event, - RuntimeEvent::Example(pallet::Event::Something(20)), - ); - assert_eq!( - frame_system::Pallet::::events()[4].event, - RuntimeEvent::Example2(pallet2::Event::Something(31)), - ); - assert_eq!( - frame_system::Pallet::::events()[5].event, - RuntimeEvent::Example(pallet::Event::Something(30)), - ); - }) -} - #[test] fn pallet_on_genesis() { TestExternalities::default().execute_with(|| { @@ -2185,31 +2170,6 @@ fn test_storage_info() { ); } -#[test] -fn assert_type_all_pallets_reversed_with_system_first_is_correct() { - // Just ensure the 2 types are same. - #[allow(deprecated)] - fn _a(_t: AllPalletsReversedWithSystemFirst) {} - #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] - fn _b(t: (System, Example4, Example2, Example)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] - fn _b(t: (System, Example4, Example3, Example2, Example)) { - _a(t) - } - - #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] - fn _b(t: (System, Example5, Example4, Example2, Example)) { - _a(t) - } - - #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] - fn _b(t: (System, Example5, Example4, Example3, Example2, Example)) { - _a(t) - } -} - #[test] fn assert_type_all_pallets_with_system_is_correct() { // Just ensure the 2 types are same. @@ -2254,52 +2214,6 @@ fn assert_type_all_pallets_without_system_is_correct() { } } -#[test] -fn assert_type_all_pallets_with_system_reversed_is_correct() { - // Just ensure the 2 types are same. - #[allow(deprecated)] - fn _a(_t: AllPalletsWithSystemReversed) {} - #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example4, Example2, Example, System)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example4, Example3, Example2, Example, System)) { - _a(t) - } - #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] - fn _b(t: (Example5, Example4, Example2, Example, System)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] - fn _b(t: (Example5, Example4, Example3, Example2, Example, System)) { - _a(t) - } -} - -#[test] -fn assert_type_all_pallets_without_system_reversed_is_correct() { - // Just ensure the 2 types are same. - #[allow(deprecated)] - fn _a(_t: AllPalletsWithoutSystemReversed) {} - #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example4, Example2, Example)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example4, Example3, Example2, Example)) { - _a(t) - } - #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] - fn _b(t: (Example5, Example4, Example2, Example)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] - fn _b(t: (Example5, Example4, Example3, Example2, Example)) { - _a(t) - } -} - #[test] fn test_storage_alias() { use frame_support::Twox64Concat; @@ -2495,3 +2409,33 @@ fn test_dispatch_context() { .dispatch(RuntimeOrigin::root())); }); } + +#[test] +fn test_call_feature_parsing() { + let call = pallet::Call::::check_for_dispatch_context {}; + match call { + pallet::Call::::check_for_dispatch_context {} | + pallet::Call::::foo { .. } | + pallet::Call::foo_storage_layer { .. } | + pallet::Call::foo_index_out_of_order {} | + pallet::Call::foo_no_post_info {} => (), + #[cfg(feature = "frame-feature-testing")] + pallet::Call::foo_feature_test {} => (), + pallet::Call::__Ignore(_, _) => (), + } +} + +#[test] +fn test_error_feature_parsing() { + let err = pallet::Error::::InsufficientProposersBalance; + match err { + pallet::Error::InsufficientProposersBalance | + pallet::Error::NonExistentStorageValue | + pallet::Error::Code(_) | + pallet::Error::Skipped(_) | + pallet::Error::CompactU8(_) => (), + #[cfg(feature = "frame-feature-testing")] + pallet::Error::FeatureTest => (), + pallet::Error::__Ignore(_, _) => (), + } +} diff --git a/substrate/frame/support/test/tests/pallet_instance.rs b/substrate/frame/support/test/tests/pallet_instance.rs index 724734ec4fc9dbe7eab0c6e26fc221f165a0e8c9..e9ac03302b21435ff7305cbcb25be3fcf0d15496 100644 --- a/substrate/frame/support/test/tests/pallet_instance.rs +++ b/substrate/frame/support/test/tests/pallet_instance.rs @@ -16,6 +16,7 @@ // limitations under the License. use frame_support::{ + derive_impl, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, pallet_prelude::ValueQuery, parameter_types, @@ -292,6 +293,7 @@ pub mod pallet2 { } } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs index a8250f8b15325cf2b2a149f2c8b0414d6308395a..79e9d6786717a5a27717cda754575c2ecd478aa3 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs @@ -90,6 +90,12 @@ fn module_error_outer_enum_expand_explicit() { frame_system::Error::NonDefaultComposite => (), frame_system::Error::NonZeroRefCount => (), frame_system::Error::CallFiltered => (), + #[cfg(feature = "experimental")] + frame_system::Error::InvalidTask => (), + #[cfg(feature = "experimental")] + frame_system::Error::FailedTask => (), + frame_system::Error::NothingAuthorized => (), + frame_system::Error::Unauthorized => (), frame_system::Error::__Ignore(_, _) => (), }, diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs index 191f095f5d78d4c1a1276326451b1afed674989a..4bd8ee0bb39a574b2ff3f59b571c1209fc675da4 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs @@ -90,6 +90,12 @@ fn module_error_outer_enum_expand_implicit() { frame_system::Error::NonDefaultComposite => (), frame_system::Error::NonZeroRefCount => (), frame_system::Error::CallFiltered => (), + #[cfg(feature = "experimental")] + frame_system::Error::InvalidTask => (), + #[cfg(feature = "experimental")] + frame_system::Error::FailedTask => (), + frame_system::Error::NothingAuthorized => (), + frame_system::Error::Unauthorized => (), frame_system::Error::__Ignore(_, _) => (), }, diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index 08ea7c0bec3a5dfca33ff497b2981e9acb17f294..40f8f129830496df6d9b03c0ed505b9bbd4958cb 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -10,6 +10,7 @@ error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: | ^ | = note: `-D deprecated` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(deprecated)]` error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` --> tests/pallet_ui/call_argument_invalid_bound.rs:38:36 diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index 80316fcd2489747ca34a9b030474c453e7f3d08d..5744c636235081449f6f12767821016a70179f4e 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -10,6 +10,7 @@ error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: | ^ | = note: `-D deprecated` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(deprecated)]` error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` --> tests/pallet_ui/call_argument_invalid_bound_2.rs:38:36 @@ -45,9 +46,9 @@ error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is = note: required for `::Bar` to implement `Encode` error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is not satisfied - --> tests/pallet_ui/call_argument_invalid_bound_2.rs:34:12 + --> tests/pallet_ui/call_argument_invalid_bound_2.rs:38:42 | -34 | #[pallet::call] - | ^^^^ the trait `WrapperTypeDecode` is not implemented for `::Bar` +38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `::Bar` | = note: required for `::Bar` to implement `Decode` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index d45b74bad8428d57273e0cb4f0d304706dca9fd1..b58e4516bceb975ea4937f90ac15acec796829cc 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -10,6 +10,7 @@ error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: | ^ | = note: `-D deprecated` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(deprecated)]` error[E0277]: `Bar` doesn't implement `std::fmt::Debug` --> tests/pallet_ui/call_argument_invalid_bound_3.rs:40:36 diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs new file mode 100644 index 0000000000000000000000000000000000000000..08aaf06a7ef25645f0ed673bae8168e5b11be2db --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|| -> bool { true })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr new file mode 100644 index 0000000000000000000000000000000000000000..9c13d59d79320e10fae958585aed1571723e01c2 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, feeless_if closure must have same number of arguments as the dispatchable function + --> tests/pallet_ui/call_feeless_invalid_closure_arg1.rs:31:24 + | +31 | #[pallet::feeless_if(|| -> bool { true })] + | ^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs new file mode 100644 index 0000000000000000000000000000000000000000..b16b4b3ffd94038546533d19feba0e7eb75206ff --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: bool| -> bool { true })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1c38ec2368361c455cde5328e326cbeabbdcec6a --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr @@ -0,0 +1,5 @@ +error: Invalid type: expected `&OriginFor` + --> tests/pallet_ui/call_feeless_invalid_closure_arg2.rs:31:28 + | +31 | #[pallet::feeless_if(|_: bool| -> bool { true })] + | ^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs new file mode 100644 index 0000000000000000000000000000000000000000..5f2230744ff87b5463a212a4e4a0735b9e1d8227 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: &OriginFor, _s: &u32| -> bool { true })] + pub fn foo(_: OriginFor, _something: u64) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1ad9588cead63366913090f730cc92a538964f37 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, feeless_if closure argument must have a reference to the same type as the dispatchable function argument + --> tests/pallet_ui/call_feeless_invalid_closure_arg3.rs:31:43 + | +31 | #[pallet::feeless_if(|_: &OriginFor, _s: &u32| -> bool { true })] + | ^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs new file mode 100644 index 0000000000000000000000000000000000000000..1f0399a123ca6f160302b533db43470d1f3b5acd --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: &OriginFor| -> u32 { 0 })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr new file mode 100644 index 0000000000000000000000000000000000000000..a8c05242bde838dbb6d57ecaf811c3f6012bc1be --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, feeless_if closure must return `bool` + --> tests/pallet_ui/call_feeless_invalid_closure_return.rs:31:43 + | +31 | #[pallet::feeless_if(|_: &OriginFor| -> u32 { 0 })] + | ^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs new file mode 100644 index 0000000000000000000000000000000000000000..26bd8a600ab9d4def80868405c8a519802affb3e --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(0)] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr new file mode 100644 index 0000000000000000000000000000000000000000..add3decbf16020e4ff3680e8a422ebabad22ff1d --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr @@ -0,0 +1,11 @@ +error: Invalid feeless_if attribute: expected a closure + --> tests/pallet_ui/call_feeless_invalid_type.rs:31:24 + | +31 | #[pallet::feeless_if(0)] + | ^ + +error: expected `|` + --> tests/pallet_ui/call_feeless_invalid_type.rs:31:24 + | +31 | #[pallet::feeless_if(0)] + | ^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr b/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr index eec5e33ccbd937d370cf379c38f8a4fcecfa526d..1809fcb6ed998a916356c750ca3129388d1b4f61 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr @@ -1,4 +1,4 @@ -error: expected `weight` or `call_index` +error: expected one of: `weight`, `call_index`, `feeless_if` --> tests/pallet_ui/call_invalid_attr.rs:31:13 | 31 | #[pallet::weird_attr] diff --git a/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr b/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr index 99146c0563a98ad9c1b89ad5109849dacdab3cf2..1f814eaa4077e5a16ec43ca3c3e8e2aa8664bdb7 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr @@ -1,10 +1,4 @@ -error: Invalid type: expected `OriginFor` - --> tests/pallet_ui/call_invalid_origin_type.rs:34:22 - | -34 | pub fn foo(origin: u8) {} - | ^^ - -error: expected `OriginFor` +error: Invalid type: expected `OriginFor` or `T::RuntimeOrigin` --> tests/pallet_ui/call_invalid_origin_type.rs:34:22 | 34 | pub fn foo(origin: u8) {} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_missing_index.stderr b/substrate/frame/support/test/tests/pallet_ui/call_missing_index.stderr index 4d55ef798569bbd6dfa013dec3de081c6bbfa246..ba06285bdeeabbd6d6b45f18e75c3c622fbffe7b 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_missing_index.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_missing_index.stderr @@ -11,6 +11,7 @@ error: use of deprecated constant `pallet::warnings::ImplicitCallIndex_0::_w`: | ^^^ | = note: `-D deprecated` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(deprecated)]` error: use of deprecated constant `pallet::warnings::ImplicitCallIndex_1::_w`: It is deprecated to use implicit call indices. diff --git a/substrate/frame/support/test/tests/pallet_ui/call_weight_argument_has_suffix.stderr b/substrate/frame/support/test/tests/pallet_ui/call_weight_argument_has_suffix.stderr index cf23a76f8ea055ce4c4b0e55f228185fc4a2961b..4b5abdcd0e6d2056281af00c76508678906f8098 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_weight_argument_has_suffix.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_weight_argument_has_suffix.stderr @@ -18,3 +18,4 @@ error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: | ^^^^^^^^^^^^^^^ | = note: `-D deprecated` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(deprecated)]` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_weight_const_warning.stderr b/substrate/frame/support/test/tests/pallet_ui/call_weight_const_warning.stderr index ccd5a935773c31d69b0b9043cf3d56d3150c7000..d399df4d85bd41f8b7d4f2eeeeb8f1be3e9ff506 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_weight_const_warning.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_weight_const_warning.stderr @@ -10,3 +10,4 @@ error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: | ^^^^^^^ | = note: `-D deprecated` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(deprecated)]` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_weight_const_warning_twice.stderr b/substrate/frame/support/test/tests/pallet_ui/call_weight_const_warning_twice.stderr index aadb939b6454f76e042437b2427d5c950dfb3b08..d7e4951e49fcf5042bd0b450a6fd42c7ee92a3a8 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_weight_const_warning_twice.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_weight_const_warning_twice.stderr @@ -18,6 +18,7 @@ error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: | ^^^ | = note: `-D deprecated` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(deprecated)]` error: use of deprecated constant `pallet::warnings::ConstantWeight_1::_w`: It is deprecated to use hard-coded constant as call weight. diff --git a/substrate/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid3.stderr b/substrate/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid3.stderr index e8e6f2fe6df06dec235227b06001ab191be4bb46..339551d9811c50511f5454a23dd36e0c14c50398 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid3.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid3.stderr @@ -17,3 +17,4 @@ error: unused import: `frame_system::pallet_prelude::*` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: `-D unused-imports` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_imports)]` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.stderr b/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.stderr index 89fc1e0820f5ea0969b05b00efee7ca70c95e593..33302a2a0278a12c4037e4eb194bf7032f311f08 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.stderr @@ -10,3 +10,4 @@ error: use of deprecated constant `pallet::warnings::UncheckedWeightWitness_0::_ | ^^^^^^^ | = note: `-D deprecated` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(deprecated)]` diff --git a/substrate/frame/support/test/tests/pallet_ui/composite_enum_unsupported_identifier.stderr b/substrate/frame/support/test/tests/pallet_ui/composite_enum_unsupported_identifier.stderr index cdc8f623142b16118d23d87165ea80562dd79900..8de9c8990b00d3cd76c847be625fc1848e432206 100644 --- a/substrate/frame/support/test/tests/pallet_ui/composite_enum_unsupported_identifier.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/composite_enum_unsupported_identifier.stderr @@ -1,4 +1,4 @@ -error: expected one of: `FreezeReason`, `HoldReason`, `LockId`, `SlashReason` +error: expected one of: `FreezeReason`, `HoldReason`, `LockId`, `SlashReason`, `Task` --> tests/pallet_ui/composite_enum_unsupported_identifier.rs:27:11 | 27 | pub enum HoldReasons {} diff --git a/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr b/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr index 942db0ab4699f779f8ec83b5e932a818483fe508..e227033d3646bac74b23267531730e0a59ce06fb 100644 --- a/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr @@ -7,3 +7,4 @@ error: use of deprecated struct `pallet::_::Store`: | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: `-D deprecated` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(deprecated)]` diff --git a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_call_index.stderr b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_call_index.stderr index bcfe43d008f8c43045a90f45cdd27ae031a4e12a..01254584c62693cd3665dc8f64e9e37e79da3b6c 100644 --- a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_call_index.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_call_index.stderr @@ -11,6 +11,7 @@ error: use of deprecated constant `pallet::warnings::ImplicitCallIndex_0::_w`: | ^^^^^^^ | = note: `-D deprecated` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(deprecated)]` error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: It is deprecated to use hard-coded constant as call weight. diff --git a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr index 531e8bdffeb0c0f073e1775cdc0f683beb3d198d..02ead305dd81ad3d3dd0bdab9b74d2a255e8b61f 100644 --- a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr @@ -11,6 +11,7 @@ error: use of deprecated constant `pallet::warnings::ImplicitCallIndex_0::_w`: | ^^^^^^^ | = note: `-D deprecated` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(deprecated)]` error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: It is deprecated to use hard-coded constant as call weight. @@ -26,8 +27,15 @@ error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied --> tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs:28:12 | -28 | #[pallet::pallet] - | ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Vec` +28 | #[pallet::pallet] + | _______________^ +29 | | pub struct Pallet(_); +30 | | +31 | | // Your Pallet's configuration trait, representing custom external types and interfaces. +... | +35 | | #[pallet::storage] +36 | | type MyStorage = StorageValue<_, Vec>; + | |__________________^ the trait `MaxEncodedLen` is not implemented for `Vec` | = help: the following other types implement trait `MaxEncodedLen`: bool diff --git a/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr b/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr index ea1d0ed99cd39747bfc1f94a9e3433809baa82b5..9cefd2f4899a159c2b0e2962fb4fb542d000672c 100644 --- a/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr @@ -1,8 +1,8 @@ error[E0277]: the trait bound `MyError: PalletError` is not satisfied - --> tests/pallet_ui/error_does_not_derive_pallet_error.rs:18:1 + --> tests/pallet_ui/error_does_not_derive_pallet_error.rs:28:15 | -18 | #[frame_support::pallet] - | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `PalletError` is not implemented for `MyError` +28 | CustomError(crate::MyError), + | ^^^^^^^^^^^^^^ the trait `PalletError` is not implemented for `MyError` | = help: the following other types implement trait `PalletError`: bool @@ -14,4 +14,3 @@ error[E0277]: the trait bound `MyError: PalletError` is not satisfied u8 u16 and $N others - = note: this error originates in the derive macro `frame_support::PalletError` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs b/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs index bf26cfd95b19f861ace29855799925eb725c6aba..4dc33991b1260d3afb5969bcd37d070f192a0268 100644 --- a/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs +++ b/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs @@ -17,7 +17,7 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::traits::ConstU32; +use frame_support::{derive_impl, traits::ConstU32}; pub use pallet::*; @@ -70,6 +70,7 @@ pub mod pallet { impl Pallet {} } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs b/substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs new file mode 100644 index 0000000000000000000000000000000000000000..566b7c65cc71005a75b07f2cb9a79bf24f087e19 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: &OriginFor| -> bool { true })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/no_std_genesis_config.rs b/substrate/frame/support/test/tests/pallet_ui/pass/no_std_genesis_config.rs index 9ab486c718c413761f23242a2ea09fa08443255e..de856ddcd3e92e26914ac25e47984fff7d25b563 100644 --- a/substrate/frame/support/test/tests/pallet_ui/pass/no_std_genesis_config.rs +++ b/substrate/frame/support/test/tests/pallet_ui/pass/no_std_genesis_config.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -27,6 +27,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic(core::marker::PhantomData); + + #[pallet::tasks_experimental] + impl Pallet { + #[pallet::task_index(0)] + #[pallet::task_condition(|i, j| i == 0u32 && j == 2u64)] + #[pallet::task_list(vec![(0u32, 2u64), (2u32, 4u64)].iter())] + #[pallet::task_weight(0.into())] + fn foo(i: u32, j: u64) -> DispatchResult { + ensure!(i == 0, "i must be 0"); + ensure!(j == 2, "j must be 2"); + Ok(()) + } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 7375bcd2f16af03cfa76aab8c9779d8b26554c8f..4229d1e8a5458470493a2ec0003736ea6064bdc0 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -1,13 +1,20 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:27:12 | -27 | #[pallet::without_storage_info] - | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` +27 | #[pallet::without_storage_info] + | _______________^ +28 | | pub struct Pallet(core::marker::PhantomData); +29 | | +30 | | #[pallet::hooks] +... | +38 | | #[pallet::storage] +39 | | type Foo = StorageValue; + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -16,8 +23,15 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:27:12 | -27 | #[pallet::without_storage_info] - | ^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` +27 | #[pallet::without_storage_info] + | _______________^ +28 | | pub struct Pallet(core::marker::PhantomData); +29 | | +30 | | #[pallet::hooks] +... | +38 | | #[pallet::storage] +39 | | type Foo = StorageValue; + | |____________^ the trait `EncodeLike` is not implemented for `Bar` | = help: the following other types implement trait `EncodeLike`: @@ -36,16 +50,23 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:27:12 | -27 | #[pallet::without_storage_info] - | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` +27 | #[pallet::without_storage_info] + | _______________^ +28 | | pub struct Pallet(core::marker::PhantomData); +29 | | +30 | | #[pallet::hooks] +... | +38 | | #[pallet::storage] +39 | | type Foo = StorageValue; + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeEncode`: Box bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -57,8 +78,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:38:12 | -38 | #[pallet::storage] - | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue; + | |____________^ the trait `TypeInfo` is not implemented for `Bar` | = help: the following other types implement trait `TypeInfo`: bool @@ -76,13 +99,15 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:38:12 | -38 | #[pallet::storage] - | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue; + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -91,8 +116,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:38:12 | -38 | #[pallet::storage] - | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue; + | |____________^ the trait `EncodeLike` is not implemented for `Bar` | = help: the following other types implement trait `EncodeLike`: @@ -111,16 +138,18 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:38:12 | -38 | #[pallet::storage] - | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue; + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeEncode`: Box bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -130,26 +159,29 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied - --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:18:1 + --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:38:12 | -18 | #[frame_support::pallet] - | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue; + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` - = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied - --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:18:1 + --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:38:12 | -18 | #[frame_support::pallet] - | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue; + | |____________^ the trait `EncodeLike` is not implemented for `Bar` | = help: the following other types implement trait `EncodeLike`: @@ -164,21 +196,22 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` - = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied - --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:18:1 + --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:38:12 | -18 | #[frame_support::pallet] - | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue; + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeEncode`: Box bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -186,4 +219,3 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` - = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 3a0a25712aafc3587fb5cc290c7e7f9773b35eba..855d289d0a160e08e45fa0c3b9b50616fb9c8ade 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -1,13 +1,20 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:27:12 | -27 | #[pallet::without_storage_info] - | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` +27 | #[pallet::without_storage_info] + | _______________^ +28 | | pub struct Pallet(core::marker::PhantomData); +29 | | +30 | | #[pallet::hooks] +... | +38 | | #[pallet::storage] +39 | | type Foo = StorageValue<_, Bar>; + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -16,8 +23,15 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:27:12 | -27 | #[pallet::without_storage_info] - | ^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` +27 | #[pallet::without_storage_info] + | _______________^ +28 | | pub struct Pallet(core::marker::PhantomData); +29 | | +30 | | #[pallet::hooks] +... | +38 | | #[pallet::storage] +39 | | type Foo = StorageValue<_, Bar>; + | |____________^ the trait `EncodeLike` is not implemented for `Bar` | = help: the following other types implement trait `EncodeLike`: @@ -36,16 +50,23 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:27:12 | -27 | #[pallet::without_storage_info] - | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` +27 | #[pallet::without_storage_info] + | _______________^ +28 | | pub struct Pallet(core::marker::PhantomData); +29 | | +30 | | #[pallet::hooks] +... | +38 | | #[pallet::storage] +39 | | type Foo = StorageValue<_, Bar>; + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeEncode`: Box bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -57,8 +78,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:38:12 | -38 | #[pallet::storage] - | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue<_, Bar>; + | |____________^ the trait `TypeInfo` is not implemented for `Bar` | = help: the following other types implement trait `TypeInfo`: bool @@ -76,13 +99,15 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:38:12 | -38 | #[pallet::storage] - | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue<_, Bar>; + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -91,8 +116,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:38:12 | -38 | #[pallet::storage] - | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue<_, Bar>; + | |____________^ the trait `EncodeLike` is not implemented for `Bar` | = help: the following other types implement trait `EncodeLike`: @@ -111,16 +138,18 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:38:12 | -38 | #[pallet::storage] - | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue<_, Bar>; + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeEncode`: Box bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -130,26 +159,29 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied - --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:18:1 + --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:38:12 | -18 | #[frame_support::pallet] - | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue<_, Bar>; + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` - = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied - --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:18:1 + --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:38:12 | -18 | #[frame_support::pallet] - | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue<_, Bar>; + | |____________^ the trait `EncodeLike` is not implemented for `Bar` | = help: the following other types implement trait `EncodeLike`: @@ -164,21 +196,22 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` - = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied - --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:18:1 + --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:38:12 | -18 | #[frame_support::pallet] - | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` +38 | #[pallet::storage] + | _______________^ +39 | | type Foo = StorageValue<_, Bar>; + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = help: the following other types implement trait `WrapperTypeEncode`: Box bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -186,4 +219,3 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` - = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index e04de98800ec20fc449666f671bcd91058fb3335..504db21feeb2b226da122e4a1719e935145d0c31 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -1,8 +1,15 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied --> tests/pallet_ui/storage_info_unsatisfied.rs:26:12 | -26 | #[pallet::pallet] - | ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` +26 | #[pallet::pallet] + | _______________^ +27 | | pub struct Pallet(core::marker::PhantomData); +28 | | +29 | | #[pallet::hooks] +... | +38 | | #[pallet::storage] +39 | | type Foo = StorageValue<_, Bar>; + | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar` | = help: the following other types implement trait `MaxEncodedLen`: bool diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 31fe3b57338968822c8f49837c5176aa859be2cf..6fd0b1959c860affc94b56c63b76392a310f2eea 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -1,8 +1,15 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied --> tests/pallet_ui/storage_info_unsatisfied_nmap.rs:29:12 | -29 | #[pallet::pallet] - | ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` +29 | #[pallet::pallet] + | _______________^ +30 | | pub struct Pallet(core::marker::PhantomData); +31 | | +32 | | #[pallet::hooks] +... | +41 | | #[pallet::storage] +42 | | type Foo = StorageNMap<_, Key, u32>; + | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar` | = help: the following other types implement trait `MaxEncodedLen`: bool diff --git a/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr b/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr index 20144d825e83248b97c5c53053276aab002aa4ac..ccb55122e8169e84f776dd41504738ab0f1772f5 100644 --- a/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr @@ -7,6 +7,7 @@ error: use of deprecated struct `pallet::_::Store`: | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: `-D deprecated` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(deprecated)]` error[E0446]: private type `_GeneratedPrefixForStorageFoo` in public interface --> tests/pallet_ui/store_trait_leak_private.rs:28:37 diff --git a/substrate/frame/support/test/tests/pallet_ui/task_can_only_be_attached_to_impl.rs b/substrate/frame/support/test/tests/pallet_ui/task_can_only_be_attached_to_impl.rs new file mode 100644 index 0000000000000000000000000000000000000000..95f5655af198e65e7ae39a4c89534cfe7e30f55e --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_can_only_be_attached_to_impl.rs @@ -0,0 +1,34 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::tasks_experimental] + pub struct Task; +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_can_only_be_attached_to_impl.stderr b/substrate/frame/support/test/tests/pallet_ui/task_can_only_be_attached_to_impl.stderr new file mode 100644 index 0000000000000000000000000000000000000000..eaa8e718840e6dae8fabc70dcd9490786edfa8b1 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_can_only_be_attached_to_impl.stderr @@ -0,0 +1,5 @@ +error: expected `impl` + --> tests/pallet_ui/task_can_only_be_attached_to_impl.rs:30:5 + | +30 | pub struct Task; + | ^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/task_condition_invalid_arg.rs b/substrate/frame/support/test/tests/pallet_ui/task_condition_invalid_arg.rs new file mode 100644 index 0000000000000000000000000000000000000000..1db96869155bb1a590d60206472b8045c40ff66c --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_condition_invalid_arg.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::tasks_experimental] + impl Pallet { + #[pallet::task_index(0)] + #[pallet::task_condition(|flag: bool| flag)] + #[pallet::task_list(vec![1, 2].iter())] + #[pallet::task_weight(0.into())] + fn foo(_i: u32) -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_condition_invalid_arg.stderr b/substrate/frame/support/test/tests/pallet_ui/task_condition_invalid_arg.stderr new file mode 100644 index 0000000000000000000000000000000000000000..9c7bad8119f54d53f10970870bc1363613faf366 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_condition_invalid_arg.stderr @@ -0,0 +1,23 @@ +error: unused import: `frame_system::pallet_prelude::OriginFor` + --> tests/pallet_ui/task_condition_invalid_arg.rs:21:6 + | +21 | use frame_system::pallet_prelude::OriginFor; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: `-D unused-imports` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_imports)]` + +error[E0308]: mismatched types + --> tests/pallet_ui/task_condition_invalid_arg.rs:35:10 + | +32 | #[pallet::task_condition(|flag: bool| flag)] + | ----------------- arguments to this function are incorrect +... +35 | fn foo(_i: u32) -> DispatchResult { + | ^^ expected `bool`, found `u32` + | +note: closure parameter defined here + --> tests/pallet_ui/task_condition_invalid_arg.rs:32:29 + | +32 | #[pallet::task_condition(|flag: bool| flag)] + | ^^^^^^^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_condition.rs b/substrate/frame/support/test/tests/pallet_ui/task_invalid_condition.rs new file mode 100644 index 0000000000000000000000000000000000000000..6875bc13b8fa00c3540a4d15f20084ed427981c0 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_condition.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::tasks_experimental] + impl Pallet { + #[pallet::task_index(0)] + #[pallet::task_condition(0)] + #[pallet::task_list(vec![1, 2].iter())] + #[pallet::task_weight(0.into())] + fn foo() -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_condition.stderr b/substrate/frame/support/test/tests/pallet_ui/task_invalid_condition.stderr new file mode 100644 index 0000000000000000000000000000000000000000..05c0ba5eecf26ccd204532023ef5d12a2e367505 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_condition.stderr @@ -0,0 +1,28 @@ +error: unused import: `frame_system::pallet_prelude::OriginFor` + --> tests/pallet_ui/task_invalid_condition.rs:21:6 + | +21 | use frame_system::pallet_prelude::OriginFor; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: `-D unused-imports` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_imports)]` + +error[E0308]: mismatched types + --> tests/pallet_ui/task_invalid_condition.rs:18:1 + | +18 | #[frame_support::pallet(dev_mode)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | expected integer, found `()` + | expected due to this + | + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0618]: expected function, found `{integer}` + --> tests/pallet_ui/task_invalid_condition.rs:32:28 + | +18 | #[frame_support::pallet(dev_mode)] + | ---------------------------------- call expression requires function +... +32 | #[pallet::task_condition(0)] + | ^ diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_index.rs b/substrate/frame/support/test/tests/pallet_ui/task_invalid_index.rs new file mode 100644 index 0000000000000000000000000000000000000000..2a4b40523a6868b32ff011115f201fb45b6e0722 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_index.rs @@ -0,0 +1,39 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::tasks_experimental] + impl Pallet { + #[pallet::task_index("0")] + fn foo() -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_index.stderr b/substrate/frame/support/test/tests/pallet_ui/task_invalid_index.stderr new file mode 100644 index 0000000000000000000000000000000000000000..d33600455bf8ae836f9e6c2a046e095006d7b346 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_index.stderr @@ -0,0 +1,5 @@ +error: expected integer literal + --> tests/pallet_ui/task_invalid_index.rs:31:24 + | +31 | #[pallet::task_index("0")] + | ^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_list.rs b/substrate/frame/support/test/tests/pallet_ui/task_invalid_list.rs new file mode 100644 index 0000000000000000000000000000000000000000..bb6438aaf10505c1dac79daa7d5258be41f34e7d --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_list.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::tasks_experimental] + impl Pallet { + #[pallet::task_index(0)] + #[pallet::task_condition(|| true)] + #[pallet::task_list(0)] + #[pallet::task_weight(0.into())] + fn foo() -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_list.stderr b/substrate/frame/support/test/tests/pallet_ui/task_invalid_list.stderr new file mode 100644 index 0000000000000000000000000000000000000000..536d02610cb9f0f4a2aca736dbdf1ad65f356416 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_list.stderr @@ -0,0 +1,20 @@ +error: unused import: `frame_system::pallet_prelude::OriginFor` + --> tests/pallet_ui/task_invalid_list.rs:21:6 + | +21 | use frame_system::pallet_prelude::OriginFor; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: `-D unused-imports` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_imports)]` + +error[E0689]: can't call method `map` on ambiguous numeric type `{integer}` + --> tests/pallet_ui/task_invalid_list.rs:18:1 + | +18 | #[frame_support::pallet(dev_mode)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) +help: you must specify a concrete type for this numeric value, like `i32` + | +33 | #[pallet::task_list(0_i32)] + | ~~~~~ diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_weight.rs b/substrate/frame/support/test/tests/pallet_ui/task_invalid_weight.rs new file mode 100644 index 0000000000000000000000000000000000000000..a0c4040347a07f3d2fa9a923daa19fe0e5673bc1 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_weight.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::tasks_experimental] + impl Pallet { + #[pallet::task_index(0)] + #[pallet::task_condition(|| true)] + #[pallet::task_list(vec![1, 2].iter())] + #[pallet::task_weight("0")] + fn foo() -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_weight.stderr b/substrate/frame/support/test/tests/pallet_ui/task_invalid_weight.stderr new file mode 100644 index 0000000000000000000000000000000000000000..24e925a069920537d3c80275a18d7818e5845ef3 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_weight.stderr @@ -0,0 +1,28 @@ +error: unused import: `frame_system::pallet_prelude::OriginFor` + --> tests/pallet_ui/task_invalid_weight.rs:21:6 + | +21 | use frame_system::pallet_prelude::OriginFor; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: `-D unused-imports` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_imports)]` + +error[E0308]: mismatched types + --> tests/pallet_ui/task_invalid_weight.rs:18:1 + | +18 | #[frame_support::pallet(dev_mode)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | expected integer, found `()` + | expected due to this + | + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/pallet_ui/task_invalid_weight.rs:34:25 + | +18 | #[frame_support::pallet(dev_mode)] + | ---------------------------------- expected `Weight` because of return type +... +34 | #[pallet::task_weight("0")] + | ^^^ expected `Weight`, found `&str` diff --git a/substrate/frame/support/test/tests/pallet_ui/task_missing_condition.rs b/substrate/frame/support/test/tests/pallet_ui/task_missing_condition.rs new file mode 100644 index 0000000000000000000000000000000000000000..6ca6e37a5bdb58ec980723650a88e9a3fdd690ec --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_missing_condition.rs @@ -0,0 +1,39 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::tasks_experimental] + impl Pallet { + #[pallet::task_index(0)] + fn foo() -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_missing_condition.stderr b/substrate/frame/support/test/tests/pallet_ui/task_missing_condition.stderr new file mode 100644 index 0000000000000000000000000000000000000000..c709ec7eac94daaa56bea48683a1a5ff5236ae19 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_missing_condition.stderr @@ -0,0 +1,5 @@ +error: missing `#[pallet::task_condition(..)]` attribute + --> tests/pallet_ui/task_missing_condition.rs:32:6 + | +32 | fn foo() -> DispatchResult { + | ^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/task_missing_index.rs b/substrate/frame/support/test/tests/pallet_ui/task_missing_index.rs new file mode 100644 index 0000000000000000000000000000000000000000..ed98d229f18b718163661fbda4673e8c727b452d --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_missing_index.rs @@ -0,0 +1,38 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::tasks_experimental] + impl Pallet { + fn foo() -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_missing_index.stderr b/substrate/frame/support/test/tests/pallet_ui/task_missing_index.stderr new file mode 100644 index 0000000000000000000000000000000000000000..ba3c9d132b810a526d4e184a22cecd5dd140fe48 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_missing_index.stderr @@ -0,0 +1,5 @@ +error: missing `#[pallet::task_index(..)]` attribute + --> tests/pallet_ui/task_missing_index.rs:31:6 + | +31 | fn foo() -> DispatchResult { + | ^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/task_missing_list.rs b/substrate/frame/support/test/tests/pallet_ui/task_missing_list.rs new file mode 100644 index 0000000000000000000000000000000000000000..427efe127634ca6bba6c92be4bdf5f5737c72884 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_missing_list.rs @@ -0,0 +1,40 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::tasks_experimental] + impl Pallet { + #[pallet::task_index(0)] + #[pallet::task_condition(|| true)] + fn foo() -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_missing_list.stderr b/substrate/frame/support/test/tests/pallet_ui/task_missing_list.stderr new file mode 100644 index 0000000000000000000000000000000000000000..f4ae26a75add85f9c6708584ba7ad574885b9ebe --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_missing_list.stderr @@ -0,0 +1,5 @@ +error: missing `#[pallet::task_list(..)]` attribute + --> tests/pallet_ui/task_missing_list.rs:33:6 + | +33 | fn foo() -> DispatchResult { + | ^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/task_missing_weight.rs b/substrate/frame/support/test/tests/pallet_ui/task_missing_weight.rs new file mode 100644 index 0000000000000000000000000000000000000000..704be1f1e0b8bc3c3d80b86a97183202831d8426 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_missing_weight.rs @@ -0,0 +1,41 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::tasks_experimental] + impl Pallet { + #[pallet::task_index(0)] + #[pallet::task_condition(|| true)] + #[pallet::task_list(vec![1, 2].iter())] + fn foo() -> DispatchResult { + Ok(()) + } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_missing_weight.stderr b/substrate/frame/support/test/tests/pallet_ui/task_missing_weight.stderr new file mode 100644 index 0000000000000000000000000000000000000000..de7b2eb172058e91077be7d02856cab4c98b8850 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_missing_weight.stderr @@ -0,0 +1,5 @@ +error: missing `#[pallet::task_weight(..)]` attribute + --> tests/pallet_ui/task_missing_weight.rs:34:6 + | +34 | fn foo() -> DispatchResult { + | ^^^ diff --git a/substrate/frame/support/test/tests/runtime_metadata.rs b/substrate/frame/support/test/tests/runtime_metadata.rs index a545735f2b1e8bb8496ef12360fdd1c289ef0f2b..bb7f7d2822e7cf1a15c41de1ca47686dd8440bfc 100644 --- a/substrate/frame/support/test/tests/runtime_metadata.rs +++ b/substrate/frame/support/test/tests/runtime_metadata.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::traits::ConstU32; +use frame_support::{derive_impl, traits::ConstU32}; use scale_info::{form::MetaForm, meta_type}; use sp_metadata_ir::{ RuntimeApiMetadataIR, RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, @@ -27,6 +27,7 @@ pub type Header = sp_runtime::generic::Header; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BlockWeights = (); type BlockLength = (); diff --git a/substrate/frame/support/test/tests/storage_layers.rs b/substrate/frame/support/test/tests/storage_layers.rs index b825c85f9564c12924222af10baa1d6e5e1e7e35..a6d16e0d66d930188199a544411ec1763b08af65 100644 --- a/substrate/frame/support/test/tests/storage_layers.rs +++ b/substrate/frame/support/test/tests/storage_layers.rs @@ -16,8 +16,8 @@ // limitations under the License. use frame_support::{ - assert_noop, assert_ok, dispatch::DispatchResult, ensure, pallet_prelude::ConstU32, - storage::with_storage_layer, + assert_noop, assert_ok, derive_impl, dispatch::DispatchResult, ensure, + pallet_prelude::ConstU32, storage::with_storage_layer, }; use pallet::*; use sp_io::TestExternalities; @@ -64,6 +64,7 @@ pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index f7733e312c3b84e472b6fe4c615a23223ea04cdc..c64c32b4575b7a8e8fde1e66796ff488fa772d58 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME system module" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,14 +20,15 @@ cfg-if = "1.0" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"] } -frame-support = { path = "../support", default-features = false} +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } +frame-support = { path = "../support", default-features = false } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } sp-version = { path = "../../primitives/version", default-features = false, features = ["serde"] } sp-weights = { path = "../../primitives/weights", default-features = false, features = ["serde"] } +docify = "0.2.6" [dev-dependencies] criterion = "0.4.0" @@ -32,7 +36,7 @@ sp-externalities = { path = "../../primitives/externalities" } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", @@ -51,8 +55,8 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -try-runtime = [ "frame-support/try-runtime", "sp-runtime/try-runtime" ] -experimental = [] +try-runtime = ["frame-support/try-runtime", "sp-runtime/try-runtime"] +experimental = ["frame-support/experimental"] [[bench]] name = "bench" diff --git a/substrate/frame/system/benches/bench.rs b/substrate/frame/system/benches/bench.rs index da8bb10fd4e42a17c995a12b39370c9004777c20..79d5a2d8689011839bf46184877e31732b729c1d 100644 --- a/substrate/frame/system/benches/bench.rs +++ b/substrate/frame/system/benches/bench.rs @@ -16,7 +16,10 @@ // limitations under the License. use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use frame_support::traits::{ConstU32, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU32, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -57,6 +60,8 @@ frame_support::parameter_types! { 4 * 1024 * 1024, Perbill::from_percent(75), ); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/system/benchmarking/Cargo.toml b/substrate/frame/system/benchmarking/Cargo.toml index c1d241f4bec1641f65700ff6876b9f36beb441fb..8b9873f44b861d23fc367f6e610f0a8369292f82 100644 --- a/substrate/frame/system/benchmarking/Cargo.toml +++ b/substrate/frame/system/benchmarking/Cargo.toml @@ -9,18 +9,21 @@ repository.workspace = true description = "FRAME System benchmarking" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false} -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "..", default-features = false} -sp-core = { path = "../../../primitives/core", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-benchmarking = { path = "../../benchmarking", default-features = false } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "..", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] sp-io = { path = "../../../primitives/io" } @@ -28,7 +31,7 @@ sp-externalities = { path = "../../../primitives/externalities" } sp-version = { path = "../../../primitives/version" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/system/benchmarking/src/lib.rs b/substrate/frame/system/benchmarking/src/lib.rs index d85b631af01850265bbf25404715ed86be90370d..18bfb85f52dfd4c2b6a5c8a57e78494d97fbdeeb 100644 --- a/substrate/frame/system/benchmarking/src/lib.rs +++ b/substrate/frame/system/benchmarking/src/lib.rs @@ -21,10 +21,7 @@ #![cfg(feature = "runtime-benchmarks")] use codec::Encode; -use frame_benchmarking::{ - v1::{benchmarks, whitelisted_caller}, - BenchmarkError, -}; +use frame_benchmarking::{impl_benchmark_test_suite, v2::*}; use frame_support::{dispatch::DispatchClass, storage, traits::Get}; use frame_system::{Call, Pallet as System, RawOrigin}; use sp_core::storage::well_known_keys; @@ -55,69 +52,104 @@ pub trait Config: frame_system::Config { } } -benchmarks! { - remark { - let b in 0 .. *T::BlockLength::get().max.get(DispatchClass::Normal) as u32; +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn remark( + b: Linear<0, { *T::BlockLength::get().max.get(DispatchClass::Normal) as u32 }>, + ) -> Result<(), BenchmarkError> { let remark_message = vec![1; b as usize]; let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), remark_message) - remark_with_event { - let b in 0 .. *T::BlockLength::get().max.get(DispatchClass::Normal) as u32; + #[extrinsic_call] + remark(RawOrigin::Signed(caller), remark_message); + + Ok(()) + } + + #[benchmark] + fn remark_with_event( + b: Linear<0, { *T::BlockLength::get().max.get(DispatchClass::Normal) as u32 }>, + ) -> Result<(), BenchmarkError> { let remark_message = vec![1; b as usize]; - let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), remark_message) + let caller: T::AccountId = whitelisted_caller(); + let hash = T::Hashing::hash(&remark_message[..]); - set_heap_pages { - }: _(RawOrigin::Root, Default::default()) + #[extrinsic_call] + remark_with_event(RawOrigin::Signed(caller.clone()), remark_message); - set_code { + System::::assert_last_event( + frame_system::Event::::Remarked { sender: caller, hash }.into(), + ); + Ok(()) + } + + #[benchmark] + fn set_heap_pages() -> Result<(), BenchmarkError> { + #[extrinsic_call] + set_heap_pages(RawOrigin::Root, Default::default()); + + Ok(()) + } + + #[benchmark] + fn set_code() -> Result<(), BenchmarkError> { let runtime_blob = T::prepare_set_code_data(); T::setup_set_code_requirements(&runtime_blob)?; - }: _(RawOrigin::Root, runtime_blob) - verify { - T::verify_set_code() + + #[extrinsic_call] + set_code(RawOrigin::Root, runtime_blob); + + T::verify_set_code(); + Ok(()) } - #[extra] - set_code_without_checks { + #[benchmark(extra)] + fn set_code_without_checks() -> Result<(), BenchmarkError> { // Assume Wasm ~4MB let code = vec![1; 4_000_000 as usize]; T::setup_set_code_requirements(&code)?; - }: _(RawOrigin::Root, code) - verify { - let current_code = storage::unhashed::get_raw(well_known_keys::CODE).ok_or("Code not stored.")?; + + #[block] + { + System::::set_code_without_checks(RawOrigin::Root.into(), code)?; + } + + let current_code = + storage::unhashed::get_raw(well_known_keys::CODE).ok_or("Code not stored.")?; assert_eq!(current_code.len(), 4_000_000 as usize); + Ok(()) } - #[skip_meta] - set_storage { - let i in 0 .. 1000; - + #[benchmark(skip_meta)] + fn set_storage(i: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { // Set up i items to add let mut items = Vec::new(); - for j in 0 .. i { + for j in 0..i { let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); items.push((hash.clone(), hash.clone())); } let items_to_verify = items.clone(); - }: _(RawOrigin::Root, items) - verify { + + #[extrinsic_call] + set_storage(RawOrigin::Root, items); + // Verify that they're actually in the storage. for (item, _) in items_to_verify { let value = storage::unhashed::get_raw(&item).ok_or("No value stored")?; assert_eq!(value, *item); } + Ok(()) } - #[skip_meta] - kill_storage { - let i in 0 .. 1000; - + #[benchmark(skip_meta)] + fn kill_storage(i: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { // Add i items to storage let mut items = Vec::with_capacity(i as usize); - for j in 0 .. i { + for j in 0..i { let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); storage::unhashed::put_raw(&hash, &hash); items.push(hash); @@ -130,22 +162,23 @@ benchmarks! { } let items_to_verify = items.clone(); - }: _(RawOrigin::Root, items) - verify { + + #[extrinsic_call] + kill_storage(RawOrigin::Root, items); + // Verify that they're not in the storage anymore. for item in items_to_verify { assert!(storage::unhashed::get_raw(&item).is_none()); } + Ok(()) } - #[skip_meta] - kill_prefix { - let p in 0 .. 1000; - + #[benchmark(skip_meta)] + fn kill_prefix(p: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { let prefix = p.using_encoded(T::Hashing::hash).as_ref().to_vec(); let mut items = Vec::with_capacity(p as usize); // add p items that share a prefix - for i in 0 .. p { + for i in 0..p { let hash = (p, i).using_encoded(T::Hashing::hash).as_ref().to_vec(); let key = [&prefix[..], &hash[..]].concat(); storage::unhashed::put_raw(&key, &key); @@ -157,12 +190,45 @@ benchmarks! { let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; assert_eq!(value, *item); } - }: _(RawOrigin::Root, prefix, p) - verify { + + #[extrinsic_call] + kill_prefix(RawOrigin::Root, prefix, p); + // Verify that they're not in the storage anymore. for item in items { assert!(storage::unhashed::get_raw(&item).is_none()); } + Ok(()) + } + + #[benchmark] + fn authorize_upgrade() -> Result<(), BenchmarkError> { + let runtime_blob = T::prepare_set_code_data(); + T::setup_set_code_requirements(&runtime_blob)?; + let hash = T::Hashing::hash(&runtime_blob); + + #[extrinsic_call] + authorize_upgrade(RawOrigin::Root, hash); + + assert!(System::::authorized_upgrade().is_some()); + Ok(()) + } + + #[benchmark] + fn apply_authorized_upgrade() -> Result<(), BenchmarkError> { + let runtime_blob = T::prepare_set_code_data(); + T::setup_set_code_requirements(&runtime_blob)?; + let hash = T::Hashing::hash(&runtime_blob); + // Will be heavier when it needs to do verification (i.e. don't use `...without_checks`). + System::::authorize_upgrade(RawOrigin::Root.into(), hash)?; + + #[extrinsic_call] + apply_authorized_upgrade(RawOrigin::Root, runtime_blob); + + // Can't check for `CodeUpdated` in parachain upgrades. Just check that the authorization is + // gone. + assert!(System::::authorized_upgrade().is_none()); + Ok(()) } impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/system/benchmarking/src/mock.rs b/substrate/frame/system/benchmarking/src/mock.rs index 4e6b1221da35613a242f3b0388ba8f26d7f3b537..9a81cddca142dc540de1ef18b046b26da76ad5d1 100644 --- a/substrate/frame/system/benchmarking/src/mock.rs +++ b/substrate/frame/system/benchmarking/src/mock.rs @@ -20,6 +20,7 @@ #![cfg(test)] use codec::Encode; +use frame_support::derive_impl; use sp_runtime::{traits::IdentityLookup, BuildStorage}; type AccountId = u64; @@ -34,6 +35,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/system/rpc/runtime-api/Cargo.toml b/substrate/frame/system/rpc/runtime-api/Cargo.toml index 81b6d946d462f863f417b5c354226c3ab8176957..8cec5de8d1e527862c0a91316f6c84bb2ec89b22 100644 --- a/substrate/frame/system/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/system/rpc/runtime-api/Cargo.toml @@ -9,13 +9,16 @@ repository.workspace = true description = "Runtime API definition required by System RPC extensions." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -sp-api = { path = "../../../../primitives/api", default-features = false} +sp-api = { path = "../../../../primitives/api", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "sp-api/std" ] +default = ["std"] +std = ["codec/std", "sp-api/std"] diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 0e394a110411d1dab3952b211de3fdf7372b6a6d..069217bcee46b4663c836a750bfd90a568c1a4d2 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -17,27 +17,60 @@ //! # System Pallet //! -//! The System pallet provides low-level access to core types and cross-cutting utilities. -//! It acts as the base layer for other pallets to interact with the Substrate framework components. +//! The System pallet provides low-level access to core types and cross-cutting utilities. It acts +//! as the base layer for other pallets to interact with the Substrate framework components. //! //! - [`Config`] //! //! ## Overview //! -//! The System pallet defines the core data types used in a Substrate runtime. -//! It also provides several utility functions (see [`Pallet`]) for other FRAME pallets. +//! The System pallet defines the core data types used in a Substrate runtime. It also provides +//! several utility functions (see [`Pallet`]) for other FRAME pallets. //! -//! In addition, it manages the storage items for extrinsics data, indexes, event records, and -//! digest items, among other things that support the execution of the current block. +//! In addition, it manages the storage items for extrinsic data, indices, event records, and digest +//! items, among other things that support the execution of the current block. //! -//! It also handles low-level tasks like depositing logs, basic set up and take down of -//! temporary storage entries, and access to previous block hashes. +//! It also handles low-level tasks like depositing logs, basic set up and take down of temporary +//! storage entries, and access to previous block hashes. //! //! ## Interface //! //! ### Dispatchable Functions //! -//! The System pallet does not implement any dispatchable functions. +//! The System pallet provides dispatchable functions that, with the exception of `remark`, manage +//! low-level or privileged functionality of a Substrate-based runtime. +//! +//! - `remark`: Make some on-chain remark. +//! - `set_heap_pages`: Set the number of pages in the WebAssembly environment's heap. +//! - `set_code`: Set the new runtime code. +//! - `set_code_without_checks`: Set the new runtime code without any checks. +//! - `set_storage`: Set some items of storage. +//! - `kill_storage`: Kill some items from storage. +//! - `kill_prefix`: Kill all storage items with a key that starts with the given prefix. +//! - `remark_with_event`: Make some on-chain remark and emit an event. +//! - `do_task`: Do some specified task. +//! - `authorize_upgrade`: Authorize new runtime code. +//! - `authorize_upgrade_without_checks`: Authorize new runtime code and an upgrade sans +//! verification. +//! - `apply_authorized_upgrade`: Provide new, already-authorized runtime code. +//! +//! #### A Note on Upgrades +//! +//! The pallet provides two primary means of upgrading the runtime, a single-phase means using +//! `set_code` and a two-phase means using `authorize_upgrade` followed by +//! `apply_authorized_upgrade`. The first will directly attempt to apply the provided `code` +//! (application may have to be scheduled, depending on the context and implementation of the +//! `OnSetCode` trait). +//! +//! The `authorize_upgrade` route allows the authorization of a runtime's code hash. Once +//! authorized, anyone may upload the correct runtime to apply the code. This pattern is useful when +//! providing the runtime ahead of time may be unwieldy, for example when a large preimage (the +//! code) would need to be stored on-chain or sent over a message transport protocol such as a +//! bridge. +//! +//! The `*_without_checks` variants do not perform any version checks, so using them runs the risk +//! of applying a downgrade or entirely other chain specification. They will still validate that the +//! `code` meets the authorized hash. //! //! ### Public Functions //! @@ -59,7 +92,7 @@ //! - [`CheckTxVersion`]: Checks that the transaction version is the same as the one used to sign //! the transaction. //! -//! Lookup the runtime aggregator file (e.g. `node/runtime`) to see the full list of signed +//! Look up the runtime aggregator file (e.g. `node/runtime`) to see the full list of signed //! extensions included in a chain. #![cfg_attr(not(feature = "std"), no_std)] @@ -77,6 +110,10 @@ use sp_runtime::{ Hash, Header, Lookup, LookupError, MaybeDisplay, MaybeSerializeDeserialize, Member, One, Saturating, SimpleBitOps, StaticLookup, Zero, }, + transaction_validity::{ + InvalidTransaction, TransactionLongevity, TransactionSource, TransactionValidity, + ValidTransaction, + }, DispatchError, RuntimeDebug, }; #[cfg(any(feature = "std", test))] @@ -90,9 +127,10 @@ use frame_support::traits::BuildGenesisConfig; use frame_support::{ dispatch::{ extract_actual_pays_fee, extract_actual_weight, DispatchClass, DispatchInfo, - DispatchResult, DispatchResultWithPostInfo, PerDispatchClass, + DispatchResult, DispatchResultWithPostInfo, PerDispatchClass, PostDispatchInfo, }, - impl_ensure_origin_with_arg_ignoring_arg, + ensure, impl_ensure_origin_with_arg_ignoring_arg, + pallet_prelude::Pays, storage::{self, StorageStreamIter}, traits::{ ConstU32, Contains, EnsureOrigin, EnsureOriginWithArg, Get, HandleLifetime, @@ -198,6 +236,20 @@ impl, MaxOverflow: Get> ConsumerLimits for (MaxNormal, } } +/// Information needed when a new runtime binary is submitted and needs to be authorized before +/// replacing the current runtime. +#[derive(Decode, Encode, Default, PartialEq, Eq, MaxEncodedLen, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct CodeUpgradeAuthorization +where + T: Config, +{ + /// Hash of the new runtime binary. + code_hash: T::Hash, + /// Whether or not to carry out version checks. + check_version: bool, +} + #[frame_support::pallet] pub mod pallet { use crate::{self as frame_system, pallet_prelude::*, *}; @@ -206,6 +258,7 @@ pub mod pallet { /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. pub mod config_preludes { use super::{inject_runtime_type, DefaultConfig}; + use frame_support::derive_impl; /// Provides a viable default config that can be used with /// [`derive_impl`](`frame_support::derive_impl`) to derive a testing pallet config @@ -240,6 +293,8 @@ pub mod pallet { type RuntimeCall = (); #[inject_runtime_type] type PalletInfo = (); + #[inject_runtime_type] + type RuntimeTask = (); type BaseCallFilter = frame_support::traits::Everything; type BlockHashCount = frame_support::traits::ConstU64<10>; type OnSetCode = (); @@ -258,39 +313,102 @@ pub mod pallet { /// if you use `pallet-balances` or similar. /// * Make sure to overwrite [`DefaultConfig::Version`]. /// * 2s block time, and a default 5mb block size is used. - #[cfg(feature = "experimental")] pub struct SolochainDefaultConfig; - #[cfg(feature = "experimental")] #[frame_support::register_default_impl(SolochainDefaultConfig)] impl DefaultConfig for SolochainDefaultConfig { + /// The default type for storing how many extrinsics an account has signed. type Nonce = u32; + + /// The default type for hashing blocks and tries. type Hash = sp_core::hash::H256; + + /// The default hashing algorithm used. type Hashing = sp_runtime::traits::BlakeTwo256; + + /// The default identifier used to distinguish between accounts. type AccountId = sp_runtime::AccountId32; + + /// The lookup mechanism to get account ID from whatever is passed in dispatchers. type Lookup = sp_runtime::traits::AccountIdLookup; + + /// The maximum number of consumers allowed on a single account. Using 128 as default. type MaxConsumers = frame_support::traits::ConstU32<128>; + + /// The default data to be stored in an account. type AccountData = crate::AccountInfo; + + /// What to do if a new account is created. type OnNewAccount = (); + + /// What to do if an account is fully reaped from the system. type OnKilledAccount = (); + + /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); + + /// This is used as an identifier of the chain. type SS58Prefix = (); + + /// Version of the runtime. type Version = (); + + /// Block & extrinsics weights: base values and limits. type BlockWeights = (); + + /// The maximum length of a block (in bytes). type BlockLength = (); + + /// The weight of database operations that the runtime can invoke. type DbWeight = (); + + /// The ubiquitous event type injected by `construct_runtime!`. #[inject_runtime_type] type RuntimeEvent = (); + + /// The ubiquitous origin type injected by `construct_runtime!`. #[inject_runtime_type] type RuntimeOrigin = (); + + /// The aggregated dispatch type available for extrinsics, injected by + /// `construct_runtime!`. #[inject_runtime_type] type RuntimeCall = (); + + /// The aggregated Task type, injected by `construct_runtime!`. + #[inject_runtime_type] + type RuntimeTask = (); + + /// Converts a module to the index of the module, injected by `construct_runtime!`. #[inject_runtime_type] type PalletInfo = (); + + /// The basic call filter to use in dispatchable. Supports everything as the default. type BaseCallFilter = frame_support::traits::Everything; + + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + /// Using 256 as default. type BlockHashCount = frame_support::traits::ConstU32<256>; + + /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); } + + /// Default configurations of this pallet in a relay-chain environment. + pub struct RelayChainDefaultConfig; + + /// It currently uses the same configuration as `SolochainDefaultConfig`. + #[derive_impl(SolochainDefaultConfig as DefaultConfig, no_aggregated_types)] + #[frame_support::register_default_impl(RelayChainDefaultConfig)] + impl DefaultConfig for RelayChainDefaultConfig {} + + /// Default configurations of this pallet in a parachain environment. + pub struct ParaChainDefaultConfig; + + /// It currently uses the same configuration as `SolochainDefaultConfig`. + #[derive_impl(SolochainDefaultConfig as DefaultConfig, no_aggregated_types)] + #[frame_support::register_default_impl(ParaChainDefaultConfig)] + impl DefaultConfig for ParaChainDefaultConfig {} } /// System configuration trait. Implemented by runtime. @@ -340,6 +458,10 @@ pub mod pallet { + Debug + From>; + /// The aggregated `RuntimeTask` type. + #[pallet::no_default_bounds] + type RuntimeTask: Task; + /// This stores the number of previous transactions associated with a sender account. type Nonce: Parameter + Member @@ -568,6 +690,79 @@ pub mod pallet { Self::deposit_event(Event::Remarked { sender: who, hash }); Ok(().into()) } + + #[cfg(feature = "experimental")] + #[pallet::call_index(8)] + #[pallet::weight(task.weight())] + pub fn do_task(origin: OriginFor, task: T::RuntimeTask) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + + if !task.is_valid() { + return Err(Error::::InvalidTask.into()) + } + + Self::deposit_event(Event::TaskStarted { task: task.clone() }); + if let Err(err) = task.run() { + Self::deposit_event(Event::TaskFailed { task, err }); + return Err(Error::::FailedTask.into()) + } + + // Emit a success event, if your design includes events for this pallet. + Self::deposit_event(Event::TaskCompleted { task }); + + // Return success. + Ok(().into()) + } + + /// Authorize an upgrade to a given `code_hash` for the runtime. The runtime can be supplied + /// later. + /// + /// This call requires Root origin. + #[pallet::call_index(9)] + #[pallet::weight((T::SystemWeightInfo::authorize_upgrade(), DispatchClass::Operational))] + pub fn authorize_upgrade(origin: OriginFor, code_hash: T::Hash) -> DispatchResult { + ensure_root(origin)?; + Self::do_authorize_upgrade(code_hash, true); + Ok(()) + } + + /// Authorize an upgrade to a given `code_hash` for the runtime. The runtime can be supplied + /// later. + /// + /// WARNING: This authorizes an upgrade that will take place without any safety checks, for + /// example that the spec name remains the same and that the version number increases. Not + /// recommended for normal use. Use `authorize_upgrade` instead. + /// + /// This call requires Root origin. + #[pallet::call_index(10)] + #[pallet::weight((T::SystemWeightInfo::authorize_upgrade(), DispatchClass::Operational))] + pub fn authorize_upgrade_without_checks( + origin: OriginFor, + code_hash: T::Hash, + ) -> DispatchResult { + ensure_root(origin)?; + Self::do_authorize_upgrade(code_hash, false); + Ok(()) + } + + /// Provide the preimage (runtime binary) `code` for an upgrade that has been authorized. + /// + /// If the authorization required a version check, this call will ensure the spec name + /// remains unchanged and that the spec version has increased. + /// + /// Depending on the runtime's `OnSetCode` configuration, this function may directly apply + /// the new `code` in the same block or attempt to schedule the upgrade. + /// + /// All origins are allowed. + #[pallet::call_index(11)] + #[pallet::weight((T::SystemWeightInfo::apply_authorized_upgrade(), DispatchClass::Operational))] + pub fn apply_authorized_upgrade( + _: OriginFor, + code: Vec, + ) -> DispatchResultWithPostInfo { + let post = Self::do_apply_authorize_upgrade(code)?; + Ok(post) + } } /// Event for the System pallet. @@ -585,6 +780,17 @@ pub mod pallet { KilledAccount { account: T::AccountId }, /// On on-chain remark happened. Remarked { sender: T::AccountId, hash: T::Hash }, + #[cfg(feature = "experimental")] + /// A [`Task`] has started executing + TaskStarted { task: T::RuntimeTask }, + #[cfg(feature = "experimental")] + /// A [`Task`] has finished executing. + TaskCompleted { task: T::RuntimeTask }, + #[cfg(feature = "experimental")] + /// A [`Task`] failed during execution. + TaskFailed { task: T::RuntimeTask, err: DispatchError }, + /// An upgrade was authorized. + UpgradeAuthorized { code_hash: T::Hash, check_version: bool }, } /// Error for the System pallet @@ -606,6 +812,16 @@ pub mod pallet { NonZeroRefCount, /// The origin filter prevent the call to be dispatched. CallFiltered, + #[cfg(feature = "experimental")] + /// The specified [`Task`] is not valid. + InvalidTask, + #[cfg(feature = "experimental")] + /// The specified [`Task`] failed during execution. + FailedTask, + /// No upgrade authorized. + NothingAuthorized, + /// The submitted code is not authorized. + Unauthorized, } /// Exposed trait-generic origin type. @@ -721,6 +937,12 @@ pub mod pallet { #[pallet::whitelist_storage] pub(super) type ExecutionPhase = StorageValue<_, Phase>; + /// `Some` if a code upgrade has been authorized. + #[pallet::storage] + #[pallet::getter(fn authorized_upgrade)] + pub(super) type AuthorizedUpgrade = + StorageValue<_, CodeUpgradeAuthorization, OptionQuery>; + #[derive(frame_support::DefaultNoBound)] #[pallet::genesis_config] pub struct GenesisConfig { @@ -740,6 +962,25 @@ pub mod pallet { sp_io::storage::set(well_known_keys::EXTRINSIC_INDEX, &0u32.encode()); } } + + #[pallet::validate_unsigned] + impl sp_runtime::traits::ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + if let Call::apply_authorized_upgrade { ref code } = call { + if let Ok(hash) = Self::validate_authorized_upgrade(&code[..]) { + return Ok(ValidTransaction { + priority: 100, + requires: Vec::new(), + provides: vec![hash.as_ref().to_vec()], + longevity: TransactionLongevity::max_value(), + propagate: true, + }) + } + } + Err(InvalidTransaction::Call.into()) + } + } } pub type Key = Vec; @@ -1018,6 +1259,7 @@ impl_ensure_origin_with_arg_ignoring_arg! { {} } +#[docify::export] /// Ensure that the origin `o` represents a signed extrinsic (i.e. transaction). /// Returns `Ok` with the account that signed the extrinsic or an `Err` otherwise. pub fn ensure_signed(o: OuterOrigin) -> Result @@ -1094,6 +1336,25 @@ pub enum DecRefStatus { } impl Pallet { + /// Returns the `spec_version` of the last runtime upgrade. + /// + /// This function is useful for writing guarded runtime migrations in the runtime. A runtime + /// migration can use the `spec_version` to ensure that it isn't applied twice. This works + /// similar as the storage version for pallets. + /// + /// This functions returns the `spec_version` of the last runtime upgrade while executing the + /// runtime migrations + /// [`on_runtime_upgrade`](frame_support::traits::OnRuntimeUpgrade::on_runtime_upgrade) + /// function. After all migrations are executed, this will return the `spec_version` of the + /// current runtime until there is another runtime upgrade. + /// + /// Example: + #[doc = docify::embed!("src/tests.rs", last_runtime_upgrade_spec_version_usage)] + pub fn last_runtime_upgrade_spec_version() -> u32 { + LastRuntimeUpgrade::::get().map_or(0, |l| l.spec_version.0) + } + + /// Returns true if the given account exists. pub fn account_exists(who: &T::AccountId) -> bool { Account::::contains_key(who) } @@ -1353,6 +1614,7 @@ impl Pallet { /// NOTE: Events not registered at the genesis block and quietly omitted. pub fn deposit_event_indexed(topics: &[T::Hash], event: T::RuntimeEvent) { let block_number = Self::block_number(); + // Don't populate events on genesis. if block_number.is_zero() { return @@ -1536,12 +1798,7 @@ impl Pallet { /// NOTE: Events not registered at the genesis block and quietly omitted. #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] pub fn events() -> Vec> { - debug_assert!( - !Self::block_number().is_zero(), - "events not registered at the genesis block" - ); - // Dereferencing the events here is fine since we are not in the - // memory-restricted runtime. + // Dereferencing the events here is fine since we are not in the memory-restricted runtime. Self::read_events_no_consensus().map(|e| *e).collect() } @@ -1562,6 +1819,21 @@ impl Pallet { Events::::stream_iter() } + /// Read and return the events of a specific pallet, as denoted by `E`. + /// + /// This is useful for a pallet that wishes to read only the events it has deposited into + /// `frame_system` using the standard `fn deposit_event`. + pub fn read_events_for_pallet() -> Vec + where + T::RuntimeEvent: TryInto, + { + Events::::get() + .into_iter() + .map(|er| er.event) + .filter_map(|e| e.try_into().ok()) + .collect::<_>() + } + /// Set the block number to something in particular. Can be used as an alternative to /// `initialize` for tests that don't need to bother with the other environment entries. #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] @@ -1733,6 +2005,41 @@ impl Pallet { } } } + + /// To be called after any origin/privilege checks. Put the code upgrade authorization into + /// storage and emit an event. Infallible. + pub fn do_authorize_upgrade(code_hash: T::Hash, check_version: bool) { + AuthorizedUpgrade::::put(CodeUpgradeAuthorization { code_hash, check_version }); + Self::deposit_event(Event::UpgradeAuthorized { code_hash, check_version }); + } + + /// Apply an authorized upgrade, performing any validation checks, and remove the authorization. + /// Whether or not the code is set directly depends on the `OnSetCode` configuration of the + /// runtime. + pub fn do_apply_authorize_upgrade(code: Vec) -> Result { + Self::validate_authorized_upgrade(&code[..])?; + T::OnSetCode::set_code(code)?; + AuthorizedUpgrade::::kill(); + let post = PostDispatchInfo { + // consume the rest of the block to prevent further transactions + actual_weight: Some(T::BlockWeights::get().max_block), + // no fee for valid upgrade + pays_fee: Pays::No, + }; + Ok(post) + } + + /// Check that provided `code` can be upgraded to. Namely, check that its hash matches an + /// existing authorization and that it meets the specification requirements of `can_set_code`. + pub fn validate_authorized_upgrade(code: &[u8]) -> Result { + let authorization = AuthorizedUpgrade::::get().ok_or(Error::::NothingAuthorized)?; + let actual_hash = T::Hashing::hash(code); + ensure!(actual_hash == authorization.code_hash, Error::::Unauthorized); + if authorization.check_version { + Self::can_set_code(code)? + } + Ok(actual_hash) + } } /// Returns a 32 byte datum which is guaranteed to be universally unique. `entropy` is provided @@ -1788,6 +2095,11 @@ impl BlockNumberProvider for Pallet { fn current_block_number() -> Self::BlockNumber { Pallet::::block_number() } + + #[cfg(feature = "runtime-benchmarks")] + fn set_block_number(n: BlockNumberFor) { + Self::set_block_number(n) + } } /// Implement StoredMap for a simple single-item, provide-when-not-default system. This works fine diff --git a/substrate/frame/system/src/mock.rs b/substrate/frame/system/src/mock.rs index c016ea9e1cd14969e5dac8d626687ee4a0553acf..e33ac2f56c875b3ef513dd947b717e4ab57e8ce6 100644 --- a/substrate/frame/system/src/mock.rs +++ b/substrate/frame/system/src/mock.rs @@ -17,7 +17,7 @@ use crate::{self as frame_system, *}; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use sp_core::H256; @@ -85,6 +85,7 @@ impl OnKilledAccount for RecordKilled { } } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index 165df688b1c2c2915bfdb932a9e1d0448d31b680..053cec24f89ce13109e591b1f3d7a1d998a49332 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -19,7 +19,7 @@ use crate::*; use frame_support::{ assert_noop, assert_ok, dispatch::{Pays, PostDispatchInfo, WithPostDispatchInfo}, - traits::WhitelistedStorageKeys, + traits::{OnRuntimeUpgrade, WhitelistedStorageKeys}, }; use std::collections::BTreeSet; @@ -675,6 +675,46 @@ fn set_code_with_real_wasm_blob() { }); } +#[test] +fn set_code_via_authorization_works() { + let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(executor)); + ext.execute_with(|| { + System::set_block_number(1); + assert!(System::authorized_upgrade().is_none()); + + let runtime = substrate_test_runtime_client::runtime::wasm_binary_unwrap().to_vec(); + let hash = ::Hashing::hash(&runtime); + + // Can't apply before authorization + assert_noop!( + System::apply_authorized_upgrade(RawOrigin::None.into(), runtime.clone()), + Error::::NothingAuthorized, + ); + + // Can authorize + assert_ok!(System::authorize_upgrade(RawOrigin::Root.into(), hash)); + System::assert_has_event( + SysEvent::UpgradeAuthorized { code_hash: hash, check_version: true }.into(), + ); + assert!(System::authorized_upgrade().is_some()); + + // Can't be sneaky + let mut bad_runtime = substrate_test_runtime_client::runtime::wasm_binary_unwrap().to_vec(); + bad_runtime.extend(b"sneaky"); + assert_noop!( + System::apply_authorized_upgrade(RawOrigin::None.into(), bad_runtime), + Error::::Unauthorized, + ); + + // Can apply correct runtime + assert_ok!(System::apply_authorized_upgrade(RawOrigin::None.into(), runtime)); + System::assert_has_event(SysEvent::CodeUpdated.into()); + assert!(System::authorized_upgrade().is_none()); + }); +} + #[test] fn runtime_upgraded_with_set_storage() { let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); @@ -773,3 +813,26 @@ pub fn from_actual_ref_time(ref_time: Option) -> PostDispatchInfo { pub fn from_post_weight_info(ref_time: Option, pays_fee: Pays) -> PostDispatchInfo { PostDispatchInfo { actual_weight: ref_time.map(|t| Weight::from_all(t)), pays_fee } } + +#[docify::export] +#[test] +fn last_runtime_upgrade_spec_version_usage() { + struct Migration; + + impl OnRuntimeUpgrade for Migration { + fn on_runtime_upgrade() -> Weight { + // Ensure to compare the spec version against some static version to prevent applying + // the same migration multiple times. + // + // `1337` here is the spec version of the runtime running on chain. If there is maybe + // a runtime upgrade in the pipeline of being applied, you should use the spec version + // of this upgrade. + if System::last_runtime_upgrade_spec_version() > 1337 { + return Weight::zero(); + } + + // Do the migration. + Weight::zero() + } + } +} diff --git a/substrate/frame/system/src/weights.rs b/substrate/frame/system/src/weights.rs index b79db3654b9f7172eab2a520151e0ab470dc7527..41807dea1c55f9cd246db231fc9102d4b9eb5c4f 100644 --- a/substrate/frame/system/src/weights.rs +++ b/substrate/frame/system/src/weights.rs @@ -57,6 +57,8 @@ pub trait WeightInfo { fn set_storage(i: u32, ) -> Weight; fn kill_storage(i: u32, ) -> Weight; fn kill_prefix(p: u32, ) -> Weight; + fn authorize_upgrade() -> Weight; + fn apply_authorized_upgrade() -> Weight; } /// Weights for frame_system using the Substrate node and recommended hardware. @@ -149,6 +151,33 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 33_027_000 picoseconds. + Weight::from_parts(33_027_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `22` + // Estimated: `1518` + // Minimum execution time: 118_101_992_000 picoseconds. + Weight::from_parts(118_101_992_000, 0) + .saturating_add(Weight::from_parts(0, 1518)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } } // For backwards compatibility and tests @@ -240,4 +269,31 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 33_027_000 picoseconds. + Weight::from_parts(33_027_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `22` + // Estimated: `1518` + // Minimum execution time: 118_101_992_000 picoseconds. + Weight::from_parts(118_101_992_000, 0) + .saturating_add(Weight::from_parts(0, 1518)) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(3)) + } } diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index e23ded725d8910d9f608cd6c4d8aa0d82d12ca57..bcf26d622b08066f233e1c688861309cb19b454a 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -10,6 +10,9 @@ description = "FRAME Timestamp Module" documentation = "https://docs.rs/pallet-timestamp" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,15 +20,15 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-inherents = { path = "../../primitives/inherents", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false, optional = true} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-storage = { path = "../../primitives/storage", default-features = false} -sp-timestamp = { path = "../../primitives/timestamp", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-inherents = { path = "../../primitives/inherents", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false, optional = true } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-storage = { path = "../../primitives/storage", default-features = false } +sp-timestamp = { path = "../../primitives/timestamp", default-features = false } docify = "0.2.6" @@ -34,7 +37,7 @@ sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/timestamp/src/mock.rs b/substrate/frame/timestamp/src/mock.rs index 418d257b3f0050d33a9690a068b5669ce055c1e9..b75bcaeb0e037c60dba6fbf976c1cda9d96f03fa 100644 --- a/substrate/frame/timestamp/src/mock.rs +++ b/substrate/frame/timestamp/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_timestamp; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use sp_core::H256; @@ -42,6 +42,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index 6df886b93d71cdafa0ab89034755a406ff3649bc..fbd6404d785645af4f066bca0b4ee13eddb7c5ba 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet to manage tips" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,22 +19,22 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"], optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-treasury = { path = "../treasury", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +serde = { version = "1.0.193", features = ["derive"], optional = true } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-treasury = { path = "../treasury", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-storage = { path = "../../primitives/storage" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/tips/src/migrations/v4.rs b/substrate/frame/tips/src/migrations/v4.rs index 35569633d1bb85e831180775124fc52223337ad1..2404c6de1a16bb0657bd7771a3019186db8d0976 100644 --- a/substrate/frame/tips/src/migrations/v4.rs +++ b/substrate/frame/tips/src/migrations/v4.rs @@ -90,7 +90,7 @@ pub fn migrate for Runtime { parameter_types! { pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); - pub storage AllowMultiAssetPools: bool = false; - // should be non-zero if AllowMultiAssetPools is true, otherwise can be zero pub storage LiquidityWithdrawalFee: Permill = Permill::from_percent(0); pub const MaxSwapPathLength: u32 = 4; + pub const Native: NativeOrWithId = NativeOrWithId::Native; } ord_parameter_types! { @@ -235,28 +241,26 @@ ord_parameter_types! { impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type AssetBalance = ::Balance; - type AssetId = u32; + type Balance = Balance; + type HigherPrecisionBalance = u128; + type AssetKind = NativeOrWithId; + type Assets = UnionOf, AccountId>; + type PoolId = (Self::AssetKind, Self::AssetKind); + type PoolLocator = Chain< + WithFirstAsset>, + Ascending>, + >; type PoolAssetId = u32; - type Assets = Assets; type PoolAssets = PoolAssets; + type PoolSetupFee = ConstU64<100>; // should be more or equal to the existential deposit + type PoolSetupFeeAsset = Native; + type PoolSetupFeeTarget = ResolveAssetTo; type PalletId = AssetConversionPalletId; - type WeightInfo = (); type LPFee = ConstU32<3>; // means 0.3% - type PoolSetupFee = ConstU64<100>; // should be more or equal to the existential deposit - type PoolSetupFeeReceiver = AssetConversionOrigin; type LiquidityWithdrawalFee = LiquidityWithdrawalFee; - type AllowMultiAssetPools = AllowMultiAssetPools; type MaxSwapPathLength = MaxSwapPathLength; type MintMinLiquidity = ConstU64<100>; // 100 is good enough when the main currency has 12 decimals. - - type Balance = u64; - type HigherPrecisionBalance = u128; - - type MultiAssetId = NativeOrAssetId; - type MultiAssetIdConverter = NativeOrAssetIdConverter; - + type WeightInfo = (); pallet_asset_conversion::runtime_benchmarks_enabled! { type BenchmarkHelper = (); } @@ -265,5 +269,5 @@ impl pallet_asset_conversion::Config for Runtime { impl Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; - type OnChargeAssetTransaction = AssetConversionAdapter; + type OnChargeAssetTransaction = AssetConversionAdapter; } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs index 0d090211d035218b3bdea39fc31a58d9f7b68744..f2f2c57bb376d9b81d04f3e39a77d27c0652be20 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs @@ -24,7 +24,7 @@ use frame_support::{ }; use pallet_asset_conversion::Swap; use sp_runtime::{ - traits::{DispatchInfoOf, PostDispatchInfoOf, Zero}, + traits::{DispatchInfoOf, Get, PostDispatchInfoOf, Zero}, transaction_validity::InvalidTransaction, Saturating, }; @@ -76,16 +76,17 @@ pub trait OnChargeAssetTransaction { /// Implements the asset transaction for a balance to asset converter (implementing [`Swap`]). /// /// The converter is given the complete fee in terms of the asset used for the transaction. -pub struct AssetConversionAdapter(PhantomData<(C, CON)>); +pub struct AssetConversionAdapter(PhantomData<(C, CON, N)>); /// Default implementation for a runtime instantiating this pallet, an asset to native swapper. -impl OnChargeAssetTransaction for AssetConversionAdapter +impl OnChargeAssetTransaction for AssetConversionAdapter where + N: Get, T: Config, C: Inspect<::AccountId>, - CON: Swap, - T::HigherPrecisionBalance: From> + TryInto>, - T::MultiAssetId: From>, + CON: Swap, AssetKind = T::AssetKind>, + BalanceOf: Into>, + T::AssetKind: From>, BalanceOf: IsType<::AccountId>>::Balance>, { type Balance = BalanceOf; @@ -116,23 +117,19 @@ where let asset_consumed = CON::swap_tokens_for_exact_tokens( who.clone(), - vec![asset_id.into(), T::MultiAssetIdConverter::get_native()], - T::HigherPrecisionBalance::from(native_asset_required), + vec![asset_id.into(), N::get()], + native_asset_required, None, who.clone(), true, ) .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment))?; - let asset_consumed = asset_consumed - .try_into() - .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment))?; - ensure!(asset_consumed > Zero::zero(), InvalidTransaction::Payment); // charge the fee in native currency ::withdraw_fee(who, call, info, fee, tip) - .map(|r| (r, native_asset_required, asset_consumed)) + .map(|r| (r, native_asset_required, asset_consumed.into())) } /// Correct the fee and swap the refund back to asset. @@ -172,11 +169,10 @@ where match CON::swap_exact_tokens_for_tokens( who.clone(), // we already deposited the native to `who` vec![ - T::MultiAssetIdConverter::get_native(), // we provide the native - asset_id.into(), // we want asset_id back + N::get(), // we provide the native + asset_id.into(), // we want asset_id back ], - T::HigherPrecisionBalance::from(swap_back), /* amount of the native asset to - * convert to `asset_id` */ + swap_back, // amount of the native asset to convert to `asset_id` None, // no minimum amount back who.clone(), // we will refund to `who` false, // no need to keep alive diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs index 9e9b74a0ddb2e598863565bc2c8906825b7f5acb..62faed269d377cc1dc2b75091d79210401cd0a26 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs @@ -19,12 +19,14 @@ use frame_support::{ assert_ok, dispatch::{DispatchInfo, PostDispatchInfo}, pallet_prelude::*, - traits::{fungible::Inspect, fungibles::Mutate}, + traits::{ + fungible::{Inspect, NativeOrWithId}, + fungibles::{Inspect as FungiblesInspect, Mutate}, + }, weights::Weight, }; use frame_system as system; use mock::{ExtrinsicBaseWeight, *}; -use pallet_asset_conversion::NativeOrAssetId; use pallet_balances::Call as BalancesCall; use sp_runtime::{traits::StaticLookup, BuildStorage}; @@ -110,22 +112,32 @@ fn default_post_info() -> PostDispatchInfo { fn setup_lp(asset_id: u32, balance_factor: u64) { let lp_provider = 5; + let ed = Balances::minimum_balance(); + let ed_asset = Assets::minimum_balance(asset_id); assert_ok!(Balances::force_set_balance( RuntimeOrigin::root(), lp_provider, - 10_000 * balance_factor + 10_000 * balance_factor + ed, )); let lp_provider_account = ::Lookup::unlookup(lp_provider); - assert_ok!(Assets::mint_into(asset_id.into(), &lp_provider_account, 10_000 * balance_factor)); + assert_ok!(Assets::mint_into( + asset_id.into(), + &lp_provider_account, + 10_000 * balance_factor + ed_asset + )); - let token_1 = NativeOrAssetId::Native; - let token_2 = NativeOrAssetId::Asset(asset_id); - assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(lp_provider), token_1, token_2)); + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(asset_id); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); assert_ok!(AssetConversion::add_liquidity( RuntimeOrigin::signed(lp_provider), - token_1, - token_2, + Box::new(token_1), + Box::new(token_2), 1_000 * balance_factor, // 1 desired 10_000 * balance_factor, // 2 desired 1, // 1 min @@ -215,8 +227,8 @@ fn transaction_payment_in_asset_possible() { let fee_in_native = base_weight + tx_weight + len as u64; let input_quote = AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Asset(asset_id), - NativeOrAssetId::Native, + NativeOrWithId::WithId(asset_id), + NativeOrWithId::Native, fee_in_native, true, ); @@ -324,8 +336,8 @@ fn transaction_payment_without_fee() { let len = 10; let fee_in_native = base_weight + weight + len as u64; let input_quote = AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Asset(asset_id), - NativeOrAssetId::Native, + NativeOrWithId::WithId(asset_id), + NativeOrWithId::Native, fee_in_native, true, ); @@ -342,8 +354,8 @@ fn transaction_payment_without_fee() { assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); let refund = AssetConversion::quote_price_exact_tokens_for_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(asset_id), + NativeOrWithId::Native, + NativeOrWithId::WithId(asset_id), fee_in_native, true, ) @@ -399,8 +411,8 @@ fn asset_transaction_payment_with_tip_and_refund() { let len = 10; let fee_in_native = base_weight + weight + len as u64 + tip; let input_quote = AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Asset(asset_id), - NativeOrAssetId::Native, + NativeOrWithId::WithId(asset_id), + NativeOrWithId::Native, fee_in_native, true, ); @@ -415,8 +427,8 @@ fn asset_transaction_payment_with_tip_and_refund() { let final_weight = 50; let expected_fee = fee_in_native - final_weight - tip; let expected_token_refund = AssetConversion::quote_price_exact_tokens_for_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(asset_id), + NativeOrWithId::Native, + NativeOrWithId::WithId(asset_id), fee_in_native - expected_fee - tip, true, ) @@ -480,8 +492,8 @@ fn payment_from_account_with_only_assets() { let fee_in_native = base_weight + weight + len as u64; let ed = Balances::minimum_balance(); let fee_in_asset = AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Asset(asset_id), - NativeOrAssetId::Native, + NativeOrWithId::WithId(asset_id), + NativeOrWithId::Native, fee_in_native + ed, true, ) @@ -496,8 +508,8 @@ fn payment_from_account_with_only_assets() { assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); let refund = AssetConversion::quote_price_exact_tokens_for_tokens( - NativeOrAssetId::Native, - NativeOrAssetId::Asset(asset_id), + NativeOrWithId::Native, + NativeOrWithId::WithId(asset_id), ed, true, ) @@ -572,8 +584,8 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { // validate even a small fee gets converted to asset. let fee_in_native = base_weight + weight + len as u64; let fee_in_asset = AssetConversion::quote_price_tokens_for_exact_tokens( - NativeOrAssetId::Asset(asset_id), - NativeOrAssetId::Native, + NativeOrWithId::WithId(asset_id), + NativeOrWithId::Native, fee_in_native, true, ) diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index ae236728cd5816bc6993dfbac46339084006a338..be1bd36231cb542a364332bca0270f7ecaaf12a6 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -9,37 +9,40 @@ repository.workspace = true description = "pallet to manage transaction payments in assets" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies -sp-core = { path = "../../../primitives/core", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -pallet-transaction-payment = { path = "..", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-transaction-payment = { path = "..", default-features = false } frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } # Other dependencies codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } +serde = { version = "1.0.193", optional = true } [dev-dependencies] serde_json = "1.0.108" -sp-storage = { path = "../../../primitives/storage", default-features = false} +sp-storage = { path = "../../../primitives/storage", default-features = false } pallet-assets = { path = "../../assets" } pallet-authorship = { path = "../../authorship" } pallet-balances = { path = "../../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs index 5fa8a4ab27dd114f555646e9a3a84d01e056ee55..c9b00be8e2ce5cb89aa5c07840e7fca7c3ddfe7f 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -18,6 +18,7 @@ use crate as pallet_asset_tx_payment; use codec; use frame_support::{ + derive_impl, dispatch::DispatchClass, pallet_prelude::*, parameter_types, @@ -70,6 +71,7 @@ parameter_types! { pub static TransactionByteFee: u64 = 1; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; diff --git a/substrate/frame/transaction-payment/rpc/Cargo.toml b/substrate/frame/transaction-payment/rpc/Cargo.toml index 8a0052e0337e7ca1d1761b30b61f8471e34a9f3e..5a574a944d82f2ba2c0936528749a946b3abbe65 100644 --- a/substrate/frame/transaction-payment/rpc/Cargo.toml +++ b/substrate/frame/transaction-payment/rpc/Cargo.toml @@ -9,12 +9,15 @@ repository.workspace = true description = "RPC interface for the transaction payment pallet." readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } pallet-transaction-payment-rpc-runtime-api = { path = "runtime-api" } sp-api = { path = "../../../primitives/api" } sp-blockchain = { path = "../../../primitives/blockchain" } diff --git a/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml index af098fd34edc4b6ac53922a25cc457f6c1a2cb32..e384fcef692e43217f3675c2304323db3d3ae13d 100644 --- a/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -9,18 +9,21 @@ repository.workspace = true description = "RPC runtime API for transaction payment FRAME pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -pallet-transaction-payment = { path = "../..", default-features = false} -sp-api = { path = "../../../../primitives/api", default-features = false} -sp-runtime = { path = "../../../../primitives/runtime", default-features = false} -sp-weights = { path = "../../../../primitives/weights", default-features = false} +pallet-transaction-payment = { path = "../..", default-features = false } +sp-api = { path = "../../../../primitives/api", default-features = false } +sp-runtime = { path = "../../../../primitives/runtime", default-features = false } +sp-weights = { path = "../../../../primitives/weights", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "pallet-transaction-payment/std", diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..0e3744626d3f7cbfafd4c8e9661c4ec92e3cab39 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "pallet-skip-feeless-payment" +version = "1.0.0-dev" +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true +description = "Pallet to skip payments for calls annotated with `feeless_if` if the respective conditions are satisfied." + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +# Substrate dependencies +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } + +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } + +# Other dependencies +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..6c34c26ce9236dd76ff90224d3ce2d59dbd3157a --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -0,0 +1,155 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//! # Skip Feeless Payment Pallet +//! +//! This pallet allows runtimes that include it to skip payment of transaction fees for +//! dispatchables marked by [`#[pallet::feeless_if]`](`macro@ +//! frame_support::pallet_prelude::feeless_if`). +//! +//! ## Overview +//! +//! It does this by wrapping an existing [`SignedExtension`] implementation (e.g. +//! [`pallet-transaction-payment`]) and checking if the dispatchable is feeless before applying the +//! wrapped extension. If the dispatchable is indeed feeless, the extension is skipped and a custom +//! event is emitted instead. Otherwise, the extension is applied as usual. +//! +//! +//! ## Integration +//! +//! This pallet wraps an existing transaction payment pallet. This means you should both pallets +//! in your `construct_runtime` macro and include this pallet's +//! [`SignedExtension`] ([`SkipCheckIfFeeless`]) that would accept the existing one as an argument. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{CheckIfFeeless, DispatchResult}, + traits::{IsType, OriginTrait}, +}; +use scale_info::{StaticTypeInfo, TypeInfo}; +use sp_runtime::{ + traits::{DispatchInfoOf, PostDispatchInfoOf, SignedExtension}, + transaction_validity::TransactionValidityError, +}; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A transaction fee was skipped. + FeeSkipped { who: T::AccountId }, + } +} + +/// A [`SignedExtension`] that skips the wrapped extension if the dispatchable is feeless. +#[derive(Encode, Decode, Clone, Eq, PartialEq)] +pub struct SkipCheckIfFeeless(pub S, sp_std::marker::PhantomData); + +// Make this extension "invisible" from the outside (ie metadata type information) +impl TypeInfo for SkipCheckIfFeeless { + type Identity = S; + fn type_info() -> scale_info::Type { + S::type_info() + } +} + +impl sp_std::fmt::Debug for SkipCheckIfFeeless { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "SkipCheckIfFeeless<{:?}>", self.0.encode()) + } + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +impl From for SkipCheckIfFeeless { + fn from(s: S) -> Self { + Self(s, sp_std::marker::PhantomData) + } +} + +impl> SignedExtension + for SkipCheckIfFeeless +where + S::Call: CheckIfFeeless>, +{ + type AccountId = T::AccountId; + type Call = S::Call; + type AdditionalSigned = S::AdditionalSigned; + type Pre = (Self::AccountId, Option<::Pre>); + // From the outside this extension should be "invisible", because it just extends the wrapped + // extension with an extra check in `pre_dispatch` and `post_dispatch`. Thus, we should forward + // the identifier of the wrapped extension to let wallets see this extension as it would only be + // the wrapped extension itself. + const IDENTIFIER: &'static str = S::IDENTIFIER; + + fn additional_signed(&self) -> Result { + self.0.additional_signed() + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + if call.is_feeless(&::RuntimeOrigin::signed(who.clone())) { + Ok((who.clone(), None)) + } else { + Ok((who.clone(), Some(self.0.pre_dispatch(who, call, info, len)?))) + } + } + + fn post_dispatch( + pre: Option, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + if let Some(pre) = pre { + if let Some(pre) = pre.1 { + S::post_dispatch(Some(pre), info, post_info, len, result)?; + } else { + Pallet::::deposit_event(Event::::FeeSkipped { who: pre.0 }); + } + } + Ok(()) + } +} diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..5c540c3e45955f3385462efd2d68a8c394617644 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as pallet_skip_feeless_payment; + +use frame_support::{derive_impl, parameter_types}; +use frame_system as system; + +type Block = frame_system::mocking::MockBlock; +type AccountId = u64; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; +} + +impl Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + +parameter_types! { + pub static PreDispatchCount: u32 = 0; +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] +pub struct DummyExtension; + +impl SignedExtension for DummyExtension { + type AccountId = AccountId; + type Call = RuntimeCall; + type AdditionalSigned = (); + type Pre = (); + const IDENTIFIER: &'static str = "DummyExtension"; + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } + fn pre_dispatch( + self, + _who: &Self::AccountId, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result { + PreDispatchCount::mutate(|c| *c += 1); + Ok(()) + } +} + +#[frame_support::pallet(dev_mode)] +pub mod pallet_dummy { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_origin: &OriginFor, data: &u32| -> bool { + *data == 0 + })] + pub fn aux(_origin: OriginFor, #[pallet::compact] _data: u32) -> DispatchResult { + unreachable!() + } + } +} + +impl pallet_dummy::Config for Runtime {} + +frame_support::construct_runtime!( + pub struct Runtime { + System: system, + SkipFeeless: pallet_skip_feeless_payment, + DummyPallet: pallet_dummy, + } +); diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..4b4dd6997418f9c9e39ebe0ab294051a688f2f87 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs @@ -0,0 +1,33 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate::mock::{pallet_dummy::Call, DummyExtension, PreDispatchCount, Runtime, RuntimeCall}; +use frame_support::dispatch::DispatchInfo; + +#[test] +fn skip_feeless_payment_works() { + let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); + SkipCheckIfFeeless::::from(DummyExtension) + .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .unwrap(); + assert_eq!(PreDispatchCount::get(), 1); + + let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); + SkipCheckIfFeeless::::from(DummyExtension) + .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .unwrap(); + assert_eq!(PreDispatchCount::get(), 1); +} diff --git a/substrate/frame/transaction-payment/src/mock.rs b/substrate/frame/transaction-payment/src/mock.rs index 419989bef12a02b7f6fa52f9cbedd95caf99dd45..d6686d44c8019e8d491e25870d7df42844ee18b4 100644 --- a/substrate/frame/transaction-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/src/mock.rs @@ -22,6 +22,7 @@ use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; use frame_support::{ + derive_impl, dispatch::DispatchClass, parameter_types, traits::{ConstU32, ConstU64, Imbalance, OnUnbalanced}, @@ -69,6 +70,7 @@ parameter_types! { pub static OperationalFeeMultiplier: u8 = 5; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index e90f063427b847555858d2c8b638451b0aeb0993..f2c65e3b8a51e7ddb81ae1aa9a7c8d4648c6ec9d 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "Storage chain pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,24 +19,24 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-balances = { path = "../balances", default-features = false} -sp-inherents = { path = "../../primitives/inherents", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof", default-features = false} +serde = { version = "1.0.193", optional = true } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-balances = { path = "../balances", default-features = false } +sp-inherents = { path = "../../primitives/inherents", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof", default-features = false } log = { version = "0.4.17", default-features = false } [dev-dependencies] -sp-core = { path = "../../primitives/core", default-features = false} -sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof", default-features = true} +sp-core = { path = "../../primitives/core", default-features = false } +sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof", default-features = true } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "array-bytes", "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index 6fb23380f82107d2aa9e08ed8299006130ef14a1..3286f4d7f34dc9e27053fdeb9379da72f47d1c9a 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME pallet to manage treasury" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -20,14 +23,14 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = docify = "0.2.0" impl-trait-for-tuples = "0.2.2" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"], optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-balances = { path = "../balances", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false, optional = true} +serde = { version = "1.0.193", features = ["derive"], optional = true } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-balances = { path = "../balances", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false, optional = true } [dev-dependencies] sp-io = { path = "../../primitives/io" } @@ -35,7 +38,7 @@ pallet-utility = { path = "../utility" } sp-core = { path = "../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/treasury/src/benchmarking.rs b/substrate/frame/treasury/src/benchmarking.rs index 61fe29dafcae53b9b572243ca6b28da1c5a46bf0..0b9999e37fbea566e7ec74f61357104b9b5d6376 100644 --- a/substrate/frame/treasury/src/benchmarking.rs +++ b/substrate/frame/treasury/src/benchmarking.rs @@ -78,8 +78,7 @@ fn create_approved_proposals, I: 'static>(n: u32) -> Result<(), &'s #[allow(deprecated)] Treasury::::propose_spend(RawOrigin::Signed(caller).into(), value, lookup)?; let proposal_id = >::get() - 1; - #[allow(deprecated)] - Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; + Approvals::::try_append(proposal_id).unwrap(); } ensure!(>::get().len() == n as usize, "Not all approved"); Ok(()) @@ -163,6 +162,8 @@ mod benchmarks { fn approve_proposal( p: Linear<0, { T::MaxApprovals::get() - 1 }>, ) -> Result<(), BenchmarkError> { + let approve_origin = + T::ApproveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; create_approved_proposals::(p)?; let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); #[allow(deprecated)] @@ -172,8 +173,6 @@ mod benchmarks { beneficiary_lookup, )?; let proposal_id = Treasury::::proposal_count() - 1; - let approve_origin = - T::ApproveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; #[extrinsic_call] _(approve_origin as T::RuntimeOrigin, proposal_id); @@ -191,8 +190,7 @@ mod benchmarks { beneficiary_lookup, )?; let proposal_id = Treasury::::proposal_count() - 1; - #[allow(deprecated)] - Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; + Approvals::::try_append(proposal_id).unwrap(); let reject_origin = T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; diff --git a/substrate/frame/treasury/src/tests.rs b/substrate/frame/treasury/src/tests.rs index 522ecf6b18fc384d8014ead260a374c7935131a3..093757b277019608abb4df7a77e3ef52df036f57 100644 --- a/substrate/frame/treasury/src/tests.rs +++ b/substrate/frame/treasury/src/tests.rs @@ -27,7 +27,7 @@ use sp_runtime::{ }; use frame_support::{ - assert_err_ignore_postinfo, assert_noop, assert_ok, + assert_err_ignore_postinfo, assert_noop, assert_ok, derive_impl, pallet_prelude::Pays, parameter_types, traits::{ @@ -54,6 +54,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/try-runtime/Cargo.toml b/substrate/frame/try-runtime/Cargo.toml index 1bb3283b1de19196657d2117db1c7fae3e4b8dba..1d036e004476a78361ee2ecc98d5b8f44b2c55f9 100644 --- a/substrate/frame/try-runtime/Cargo.toml +++ b/substrate/frame/try-runtime/Cargo.toml @@ -8,18 +8,21 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME pallet for democracy" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"]} -frame-support = { path = "../support", default-features = false} -sp-api = { path = "../../primitives/api", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +frame-support = { path = "../support", default-features = false } +sp-api = { path = "../../primitives/api", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", @@ -27,4 +30,4 @@ std = [ "sp-runtime/std", "sp-std/std", ] -try-runtime = [ "frame-support/try-runtime", "sp-runtime/try-runtime" ] +try-runtime = ["frame-support/try-runtime", "sp-runtime/try-runtime"] diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index 9af424f541cd740a950e421452e6c9ff6bfe1ff4..5958dcc2c30dbf27c6a1ff393cbe18a8350b39ae 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -8,20 +8,24 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME transaction pause pallet" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +docify = "0.2.6" +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } pallet-balances = { path = "../balances", default-features = false, optional = true } -pallet-utility = { path = "../utility", default-features = false, optional = true } -pallet-proxy = { path = "../proxy", default-features = false, optional = true } +pallet-utility = { path = "../utility", default-features = false, optional = true } +pallet-proxy = { path = "../proxy", default-features = false, optional = true } [dev-dependencies] sp-core = { path = "../../primitives/core" } @@ -31,7 +35,7 @@ pallet-utility = { path = "../utility" } pallet-proxy = { path = "../proxy" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/tx-pause/src/lib.rs b/substrate/frame/tx-pause/src/lib.rs index a3be0f50172707cc8c87b1dbe263350072c1c23d..31be575fba7cb9e6d5335d9e1bbec3b4f9d4e723 100644 --- a/substrate/frame/tx-pause/src/lib.rs +++ b/substrate/frame/tx-pause/src/lib.rs @@ -15,6 +15,62 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! # Transaction Pause +//! +//! Allows dynamic, chain-state-based pausing and unpausing of specific extrinsics via call filters. +//! +//! ## WARNING +//! +//! NOT YET AUDITED. DO NOT USE IN PRODUCTION. +//! +//! ## Pallet API +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events, and errors. +//! +//! ## Overview +//! +//! A dynamic call filter that can be controlled with extrinsics. +//! +//! Pausing an extrinsic means that the extrinsic CANNOT be called again until it is unpaused. +//! The exception is calls that use `dispatch_bypass_filter`, typically only with the root origin. +//! +//! ### Primary Features +//! +//! - Calls that should never be paused can be added to a whitelist. +//! - Separate origins are configurable for pausing and pausing. +//! - Pausing is triggered using the string representation of the call. +//! - Pauses can target a single extrinsic or an entire pallet. +//! - Pauses can target future extrinsics or pallets. +//! +//! ### Example +//! +//! Configuration of call filters: +//! +//! ```ignore +//! impl frame_system::Config for Runtime { +//! // … +//! type BaseCallFilter = InsideBoth; +//! // … +//! } +//! ``` +//! +//! Pause specific all: +#![doc = docify::embed!("src/tests.rs", can_pause_specific_call)] +//! +//! Unpause specific all: +#![doc = docify::embed!("src/tests.rs", can_unpause_specific_call)] +//! +//! Pause all calls in a pallet: +#![doc = docify::embed!("src/tests.rs", can_pause_all_calls_in_pallet_except_on_whitelist)] +//! +//! ## Low Level / Implementation Details +//! +//! ### Use Cost +//! +//! A storage map (`PausedCalls`) is used to store currently paused calls. +//! Using the call filter will require a db read of that storage on each extrinsic. + #![cfg_attr(not(feature = "std"), no_std)] #![deny(rustdoc::broken_intra_doc_links)] diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index 66218c8c015cb83ea90d281723d9f740edf24e95..4f1c981abc6f6281850c28b2ef925baceb97623b 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -23,7 +23,7 @@ use super::*; use crate as pallet_tx_pause; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU64, Everything, InsideBoth, InstanceFilter}, }; use frame_system::EnsureSignedBy; @@ -36,6 +36,7 @@ use sp_runtime::{ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = InsideBoth; type BlockWeights = (); diff --git a/substrate/frame/tx-pause/src/tests.rs b/substrate/frame/tx-pause/src/tests.rs index a71ff3439d902b47036632f923f944e7dd68317d..823abf9d9c43cae48e28bf5bb0fd194431fa345f 100644 --- a/substrate/frame/tx-pause/src/tests.rs +++ b/substrate/frame/tx-pause/src/tests.rs @@ -25,6 +25,7 @@ use sp_runtime::DispatchError; // GENERAL SUCCESS/POSITIVE TESTS --------------------- +#[docify::export] #[test] fn can_pause_specific_call() { new_test_ext().execute_with(|| { @@ -43,6 +44,7 @@ fn can_pause_specific_call() { }); } +#[docify::export] #[test] fn can_pause_all_calls_in_pallet_except_on_whitelist() { new_test_ext().execute_with(|| { @@ -64,6 +66,7 @@ fn can_pause_all_calls_in_pallet_except_on_whitelist() { }); } +#[docify::export] #[test] fn can_unpause_specific_call() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml index 4c1bcca573dc1281d9958c330069cb8ddc62d24e..218b4ffe4c054e48f5ee91b840898966eead67f2 100644 --- a/substrate/frame/uniques/Cargo.toml +++ b/substrate/frame/uniques/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true description = "FRAME NFT asset management pallet" readme = "README.md" +[lints] +workspace = true + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,11 +19,11 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -29,7 +32,7 @@ sp-io = { path = "../../primitives/io" } sp-std = { path = "../../primitives/std" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/uniques/src/benchmarking.rs b/substrate/frame/uniques/src/benchmarking.rs index 821ca1794b865483d8dc3edbb7298fa15a2dd31c..80d02f1362189d34491975918b5d14ce28bc3ab0 100644 --- a/substrate/frame/uniques/src/benchmarking.rs +++ b/substrate/frame/uniques/src/benchmarking.rs @@ -431,9 +431,9 @@ benchmarks_instance_pallet! { let buyer_lookup = T::Lookup::unlookup(buyer.clone()); let price = ItemPrice::::from(0u32); let origin = SystemOrigin::Signed(seller.clone()).into(); - Uniques::::set_price(origin, collection.clone(), item, Some(price.clone()), Some(buyer_lookup))?; + Uniques::::set_price(origin, collection.clone(), item, Some(price), Some(buyer_lookup))?; T::Currency::make_free_balance_be(&buyer, DepositBalanceOf::::max_value()); - }: _(SystemOrigin::Signed(buyer.clone()), collection.clone(), item, price.clone()) + }: _(SystemOrigin::Signed(buyer.clone()), collection.clone(), item, price) verify { assert_last_event::(Event::ItemBought { collection: collection.clone(), diff --git a/substrate/frame/uniques/src/lib.rs b/substrate/frame/uniques/src/lib.rs index 8334a8d943e19a6aea1e6973955c6a4fc44a514f..f7cc6b044d7298704d528d9cea5de7c102cf1f24 100644 --- a/substrate/frame/uniques/src/lib.rs +++ b/substrate/frame/uniques/src/lib.rs @@ -165,7 +165,7 @@ pub mod pallet { #[pallet::storage] #[pallet::storage_prefix = "Class"] /// Details of a collection. - pub(super) type Collection, I: 'static = ()> = StorageMap< + pub type Collection, I: 'static = ()> = StorageMap< _, Blake2_128Concat, T::CollectionId, @@ -174,7 +174,7 @@ pub mod pallet { #[pallet::storage] /// The collection, if any, of which an account is willing to take ownership. - pub(super) type OwnershipAcceptance, I: 'static = ()> = + pub type OwnershipAcceptance, I: 'static = ()> = StorageMap<_, Blake2_128Concat, T::AccountId, T::CollectionId>; #[pallet::storage] @@ -208,7 +208,7 @@ pub mod pallet { #[pallet::storage] #[pallet::storage_prefix = "Asset"] /// The items in existence and their ownership details. - pub(super) type Item, I: 'static = ()> = StorageDoubleMap< + pub type Item, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, T::CollectionId, @@ -257,7 +257,7 @@ pub mod pallet { #[pallet::storage] /// Price of an asset instance. - pub(super) type ItemPriceOf, I: 'static = ()> = StorageDoubleMap< + pub type ItemPriceOf, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, T::CollectionId, @@ -856,34 +856,37 @@ pub mod pallet { pub fn transfer_ownership( origin: OriginFor, collection: T::CollectionId, - owner: AccountIdLookupOf, + new_owner: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; - let owner = T::Lookup::lookup(owner)?; + let new_owner = T::Lookup::lookup(new_owner)?; - let acceptable_collection = OwnershipAcceptance::::get(&owner); + let acceptable_collection = OwnershipAcceptance::::get(&new_owner); ensure!(acceptable_collection.as_ref() == Some(&collection), Error::::Unaccepted); Collection::::try_mutate(collection.clone(), |maybe_details| { let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; ensure!(origin == details.owner, Error::::NoPermission); - if details.owner == owner { + if details.owner == new_owner { return Ok(()) } // Move the deposit to the new owner. T::Currency::repatriate_reserved( &details.owner, - &owner, + &new_owner, details.total_deposit, Reserved, )?; + CollectionAccount::::remove(&details.owner, &collection); - CollectionAccount::::insert(&owner, &collection, ()); - details.owner = owner.clone(); - OwnershipAcceptance::::remove(&owner); + CollectionAccount::::insert(&new_owner, &collection, ()); + + details.owner = new_owner.clone(); + OwnershipAcceptance::::remove(&new_owner); + frame_system::Pallet::::dec_consumers(&new_owner); - Self::deposit_event(Event::OwnerChanged { collection, new_owner: owner }); + Self::deposit_event(Event::OwnerChanged { collection, new_owner }); Ok(()) }) } @@ -1430,8 +1433,8 @@ pub mod pallet { maybe_collection: Option, ) -> DispatchResult { let who = ensure_signed(origin)?; - let old = OwnershipAcceptance::::get(&who); - match (old.is_some(), maybe_collection.is_some()) { + let exists = OwnershipAcceptance::::contains_key(&who); + match (exists, maybe_collection.is_some()) { (false, true) => { frame_system::Pallet::::inc_consumers(&who)?; }, diff --git a/substrate/frame/uniques/src/migration.rs b/substrate/frame/uniques/src/migration.rs index 6c92b753b4ac2ab29a0c0fdaeab10ba69e37eb7d..6b2bbf375e7541d58a30af413b6aad288d77e655 100644 --- a/substrate/frame/uniques/src/migration.rs +++ b/substrate/frame/uniques/src/migration.rs @@ -17,38 +17,39 @@ //! Various pieces of common functionality. use super::*; -use frame_support::traits::{Get, GetStorageVersion, PalletInfoAccess, StorageVersion}; - -/// Migrate the pallet storage to v1. -pub fn migrate_to_v1, I: 'static, P: GetStorageVersion + PalletInfoAccess>( -) -> frame_support::weights::Weight { - let on_chain_storage_version =